public inbox for gentoo-commits@lists.gentoo.org
 help / color / mirror / Atom feed
* [gentoo-commits] proj/linux-patches:4.5 commit in: /
@ 2016-03-09 17:05 Mike Pagano
  0 siblings, 0 replies; 12+ messages in thread
From: Mike Pagano @ 2016-03-09 17:05 UTC (permalink / raw
  To: gentoo-commits

commit:     3fd79bcc90be2c6568e911e390079ab09d8009ec
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Mar  9 17:05:41 2016 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Mar  9 17:05:41 2016 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=3fd79bcc

Enable link security restrictions by default. ACPI: Disable Windows 8 compatibility for some Lenovo ThinkPads. Ensure that /dev/root doesn't appear in /proc/mounts when bootint without an initramfs. Bootsplash ported by Marco. (Bug #539616). Add Gentoo Linux support config settings and defaults.Kernel patch enables gcc < v4.9 optimizations for additional CPUs. Kernel patch enables gcc >= v4.9 optimizations for additional CPUs.

 0000_README                                        |   24 +
 ...ble-link-security-restrictions-by-default.patch |   22 +
 2700_ThinkPad-30-brightness-control-fix.patch      |   67 +
 2900_dev-root-proc-mount-fix.patch                 |   38 +
 4200_fbcondecor-3.19.patch                         | 2119 ++++++++++++++++++++
 ...able-additional-cpu-optimizations-for-gcc.patch |  327 +++
 ...-additional-cpu-optimizations-for-gcc-4.9.patch |  402 ++++
 7 files changed, 2999 insertions(+)

diff --git a/0000_README b/0000_README
index 9018993..1eb82e8 100644
--- a/0000_README
+++ b/0000_README
@@ -43,6 +43,30 @@ EXPERIMENTAL
 Individual Patch Descriptions:
 --------------------------------------------------------------------------
 
+Patch:  1510_fs-enable-link-security-restrictions-by-default.patch
+From:   http://sources.debian.net/src/linux/3.16.7-ckt4-3/debian/patches/debian/fs-enable-link-security-restrictions-by-default.patch/
+Desc:   Enable link security restrictions by default.
+
+Patch:  2700_ThinkPad-30-brightness-control-fix.patch
+From:   Seth Forshee <seth.forshee@canonical.com>
+Desc:   ACPI: Disable Windows 8 compatibility for some Lenovo ThinkPads.
+
+Patch:  2900_dev-root-proc-mount-fix.patch
+From:   https://bugs.gentoo.org/show_bug.cgi?id=438380
+Desc:   Ensure that /dev/root doesn't appear in /proc/mounts when bootint without an initramfs.
+
+Patch:  4200_fbcondecor-3.19.patch
+From:   http://www.mepiscommunity.org/fbcondecor
+Desc:   Bootsplash ported by Marco. (Bug #539616)
+
 Patch:  4567_distro-Gentoo-Kconfig.patch
 From:   Tom Wijsman <TomWij@gentoo.org>
 Desc:   Add Gentoo Linux support config settings and defaults.
+
+Patch:  5000_enable-additional-cpu-optimizations-for-gcc.patch
+From:   https://github.com/graysky2/kernel_gcc_patch/
+Desc:   Kernel patch enables gcc < v4.9 optimizations for additional CPUs.
+
+Patch:  5010_enable-additional-cpu-optimizations-for-gcc-4.9.patch
+From:   https://github.com/graysky2/kernel_gcc_patch/
+Desc:   Kernel patch enables gcc >= v4.9 optimizations for additional CPUs.

diff --git a/1510_fs-enable-link-security-restrictions-by-default.patch b/1510_fs-enable-link-security-restrictions-by-default.patch
new file mode 100644
index 0000000..639fb3c
--- /dev/null
+++ b/1510_fs-enable-link-security-restrictions-by-default.patch
@@ -0,0 +1,22 @@
+From: Ben Hutchings <ben@decadent.org.uk>
+Subject: fs: Enable link security restrictions by default
+Date: Fri, 02 Nov 2012 05:32:06 +0000
+Bug-Debian: https://bugs.debian.org/609455
+Forwarded: not-needed
+
+This reverts commit 561ec64ae67ef25cac8d72bb9c4bfc955edfd415
+('VFS: don't do protected {sym,hard}links by default').
+
+--- a/fs/namei.c
++++ b/fs/namei.c
+@@ -651,8 +651,8 @@ static inline void put_link(struct namei
+ 	path_put(link);
+ }
+ 
+-int sysctl_protected_symlinks __read_mostly = 0;
+-int sysctl_protected_hardlinks __read_mostly = 0;
++int sysctl_protected_symlinks __read_mostly = 1;
++int sysctl_protected_hardlinks __read_mostly = 1;
+ 
+ /**
+  * may_follow_link - Check symlink following for unsafe situations

diff --git a/2700_ThinkPad-30-brightness-control-fix.patch b/2700_ThinkPad-30-brightness-control-fix.patch
new file mode 100644
index 0000000..b548c6d
--- /dev/null
+++ b/2700_ThinkPad-30-brightness-control-fix.patch
@@ -0,0 +1,67 @@
+diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
+index cb96296..6c242ed 100644
+--- a/drivers/acpi/blacklist.c
++++ b/drivers/acpi/blacklist.c
+@@ -269,6 +276,61 @@  static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
+ 	},
+ 
+ 	/*
++	 * The following Lenovo models have a broken workaround in the
++	 * acpi_video backlight implementation to meet the Windows 8
++	 * requirement of 101 backlight levels. Reverting to pre-Win8
++	 * behavior fixes the problem.
++	 */
++	{
++	.callback = dmi_disable_osi_win8,
++	.ident = "Lenovo ThinkPad L430",
++	.matches = {
++		     DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++		     DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L430"),
++		},
++	},
++	{
++	.callback = dmi_disable_osi_win8,
++	.ident = "Lenovo ThinkPad T430s",
++	.matches = {
++		     DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++		     DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T430s"),
++		},
++	},
++	{
++	.callback = dmi_disable_osi_win8,
++	.ident = "Lenovo ThinkPad T530",
++	.matches = {
++		     DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++		     DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T530"),
++		},
++	},
++	{
++	.callback = dmi_disable_osi_win8,
++	.ident = "Lenovo ThinkPad W530",
++	.matches = {
++		     DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++		     DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad W530"),
++		},
++	},
++	{
++	.callback = dmi_disable_osi_win8,
++	.ident = "Lenovo ThinkPad X1 Carbon",
++	.matches = {
++		     DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++		     DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X1 Carbon"),
++		},
++	},
++	{
++	.callback = dmi_disable_osi_win8,
++	.ident = "Lenovo ThinkPad X230",
++	.matches = {
++		     DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++		     DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X230"),
++		},
++	},
++
++	/*
+ 	 * BIOS invocation of _OSI(Linux) is almost always a BIOS bug.
+ 	 * Linux ignores it, except for the machines enumerated below.
+ 	 */
+

diff --git a/2900_dev-root-proc-mount-fix.patch b/2900_dev-root-proc-mount-fix.patch
new file mode 100644
index 0000000..60af1eb
--- /dev/null
+++ b/2900_dev-root-proc-mount-fix.patch
@@ -0,0 +1,38 @@
+--- a/init/do_mounts.c	2015-08-19 10:27:16.753852576 -0400
++++ b/init/do_mounts.c	2015-08-19 10:34:25.473850353 -0400
+@@ -490,7 +490,11 @@ void __init change_floppy(char *fmt, ...
+ 	va_start(args, fmt);
+ 	vsprintf(buf, fmt, args);
+ 	va_end(args);
+-	fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
++	if (saved_root_name[0])
++		fd = sys_open(saved_root_name, O_RDWR | O_NDELAY, 0);
++	else
++		fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
++
+ 	if (fd >= 0) {
+ 		sys_ioctl(fd, FDEJECT, 0);
+ 		sys_close(fd);
+@@ -534,11 +538,17 @@ void __init mount_root(void)
+ #endif
+ #ifdef CONFIG_BLOCK
+ 	{
+-		int err = create_dev("/dev/root", ROOT_DEV);
+-
+-		if (err < 0)
+-			pr_emerg("Failed to create /dev/root: %d\n", err);
+-		mount_block_root("/dev/root", root_mountflags);
++		if (saved_root_name[0] == '/') {
++	       	int err = create_dev(saved_root_name, ROOT_DEV);
++			if (err < 0)
++				pr_emerg("Failed to create %s: %d\n", saved_root_name, err);
++			mount_block_root(saved_root_name, root_mountflags);
++		} else {
++			int err = create_dev("/dev/root", ROOT_DEV);
++			if (err < 0)
++				pr_emerg("Failed to create /dev/root: %d\n", err);
++			mount_block_root("/dev/root", root_mountflags);
++		}
+ 	}
+ #endif
+ }

diff --git a/4200_fbcondecor-3.19.patch b/4200_fbcondecor-3.19.patch
new file mode 100644
index 0000000..29c379f
--- /dev/null
+++ b/4200_fbcondecor-3.19.patch
@@ -0,0 +1,2119 @@
+diff --git a/Documentation/fb/00-INDEX b/Documentation/fb/00-INDEX
+index fe85e7c..2230930 100644
+--- a/Documentation/fb/00-INDEX
++++ b/Documentation/fb/00-INDEX
+@@ -23,6 +23,8 @@ ep93xx-fb.txt
+ 	- info on the driver for EP93xx LCD controller.
+ fbcon.txt
+ 	- intro to and usage guide for the framebuffer console (fbcon).
++fbcondecor.txt
++	- info on the Framebuffer Console Decoration
+ framebuffer.txt
+ 	- introduction to frame buffer devices.
+ gxfb.txt
+diff --git a/Documentation/fb/fbcondecor.txt b/Documentation/fb/fbcondecor.txt
+new file mode 100644
+index 0000000..3388c61
+--- /dev/null
++++ b/Documentation/fb/fbcondecor.txt
+@@ -0,0 +1,207 @@
++What is it?
++-----------
++
++The framebuffer decorations are a kernel feature which allows displaying a 
++background picture on selected consoles.
++
++What do I need to get it to work?
++---------------------------------
++
++To get fbcondecor up-and-running you will have to:
++ 1) get a copy of splashutils [1] or a similar program
++ 2) get some fbcondecor themes
++ 3) build the kernel helper program
++ 4) build your kernel with the FB_CON_DECOR option enabled.
++
++To get fbcondecor operational right after fbcon initialization is finished, you
++will have to include a theme and the kernel helper into your initramfs image.
++Please refer to splashutils documentation for instructions on how to do that.
++
++[1] The splashutils package can be downloaded from:
++    http://github.com/alanhaggai/fbsplash
++
++The userspace helper
++--------------------
++
++The userspace fbcondecor helper (by default: /sbin/fbcondecor_helper) is called by the
++kernel whenever an important event occurs and the kernel needs some kind of
++job to be carried out. Important events include console switches and video
++mode switches (the kernel requests background images and configuration
++parameters for the current console). The fbcondecor helper must be accessible at
++all times. If it's not, fbcondecor will be switched off automatically.
++
++It's possible to set path to the fbcondecor helper by writing it to
++/proc/sys/kernel/fbcondecor.
++
++*****************************************************************************
++
++The information below is mostly technical stuff. There's probably no need to
++read it unless you plan to develop a userspace helper.
++
++The fbcondecor protocol
++-----------------------
++
++The fbcondecor protocol defines a communication interface between the kernel and
++the userspace fbcondecor helper.
++
++The kernel side is responsible for:
++
++ * rendering console text, using an image as a background (instead of a
++   standard solid color fbcon uses),
++ * accepting commands from the user via ioctls on the fbcondecor device,
++ * calling the userspace helper to set things up as soon as the fb subsystem 
++   is initialized.
++
++The userspace helper is responsible for everything else, including parsing
++configuration files, decompressing the image files whenever the kernel needs
++it, and communicating with the kernel if necessary.
++
++The fbcondecor protocol specifies how communication is done in both ways:
++kernel->userspace and userspace->helper.
++  
++Kernel -> Userspace
++-------------------
++
++The kernel communicates with the userspace helper by calling it and specifying
++the task to be done in a series of arguments.
++
++The arguments follow the pattern:
++<fbcondecor protocol version> <command> <parameters>
++
++All commands defined in fbcondecor protocol v2 have the following parameters:
++ virtual console
++ framebuffer number
++ theme
++
++Fbcondecor protocol v1 specified an additional 'fbcondecor mode' after the
++framebuffer number. Fbcondecor protocol v1 is deprecated and should not be used.
++
++Fbcondecor protocol v2 specifies the following commands:
++
++getpic
++------
++ The kernel issues this command to request image data. It's up to the 
++ userspace  helper to find a background image appropriate for the specified 
++ theme and the current resolution. The userspace helper should respond by 
++ issuing the FBIOCONDECOR_SETPIC ioctl.
++
++init
++----
++ The kernel issues this command after the fbcondecor device is created and
++ the fbcondecor interface is initialized. Upon receiving 'init', the userspace
++ helper should parse the kernel command line (/proc/cmdline) or otherwise
++ decide whether fbcondecor is to be activated.
++
++ To activate fbcondecor on the first console the helper should issue the
++ FBIOCONDECOR_SETCFG, FBIOCONDECOR_SETPIC and FBIOCONDECOR_SETSTATE commands,
++ in the above-mentioned order.
++
++ When the userspace helper is called in an early phase of the boot process
++ (right after the initialization of fbcon), no filesystems will be mounted.
++ The helper program should mount sysfs and then create the appropriate
++ framebuffer, fbcondecor and tty0 devices (if they don't already exist) to get
++ current display settings and to be able to communicate with the kernel side.
++ It should probably also mount the procfs to be able to parse the kernel
++ command line parameters.
++
++ Note that the console sem is not held when the kernel calls fbcondecor_helper
++ with the 'init' command. The fbcondecor helper should perform all ioctls with
++ origin set to FBCON_DECOR_IO_ORIG_USER.
++
++modechange
++----------
++ The kernel issues this command on a mode change. The helper's response should
++ be similar to the response to the 'init' command. Note that this time the
++ console sem is held and all ioctls must be performed with origin set to
++ FBCON_DECOR_IO_ORIG_KERNEL.
++
++
++Userspace -> Kernel
++-------------------
++
++Userspace programs can communicate with fbcondecor via ioctls on the
++fbcondecor device. These ioctls are to be used by both the userspace helper
++(called only by the kernel) and userspace configuration tools (run by the users).
++
++The fbcondecor helper should set the origin field to FBCON_DECOR_IO_ORIG_KERNEL
++when doing the appropriate ioctls. All userspace configuration tools should
++use FBCON_DECOR_IO_ORIG_USER. Failure to set the appropriate value in the origin
++field when performing ioctls from the kernel helper will most likely result
++in a console deadlock.
++
++FBCON_DECOR_IO_ORIG_KERNEL instructs fbcondecor not to try to acquire the console
++semaphore. Not surprisingly, FBCON_DECOR_IO_ORIG_USER instructs it to acquire
++the console sem.
++
++The framebuffer console decoration provides the following ioctls (all defined in 
++linux/fb.h):
++
++FBIOCONDECOR_SETPIC
++description: loads a background picture for a virtual console
++argument: struct fbcon_decor_iowrapper*; data: struct fb_image*
++notes: 
++If called for consoles other than the current foreground one, the picture data
++will be ignored.
++
++If the current virtual console is running in a 8-bpp mode, the cmap substruct
++of fb_image has to be filled appropriately: start should be set to 16 (first
++16 colors are reserved for fbcon), len to a value <= 240 and red, green and
++blue should point to valid cmap data. The transp field is ingored. The fields
++dx, dy, bg_color, fg_color in fb_image are ignored as well.
++
++FBIOCONDECOR_SETCFG
++description: sets the fbcondecor config for a virtual console
++argument: struct fbcon_decor_iowrapper*; data: struct vc_decor*
++notes: The structure has to be filled with valid data.
++
++FBIOCONDECOR_GETCFG
++description: gets the fbcondecor config for a virtual console
++argument: struct fbcon_decor_iowrapper*; data: struct vc_decor*
++
++FBIOCONDECOR_SETSTATE
++description: sets the fbcondecor state for a virtual console
++argument: struct fbcon_decor_iowrapper*; data: unsigned int*
++          values: 0 = disabled, 1 = enabled.
++
++FBIOCONDECOR_GETSTATE
++description: gets the fbcondecor state for a virtual console
++argument: struct fbcon_decor_iowrapper*; data: unsigned int*
++          values: as in FBIOCONDECOR_SETSTATE
++
++Info on used structures:
++
++Definition of struct vc_decor can be found in linux/console_decor.h. It's
++heavily commented. Note that the 'theme' field should point to a string
++no longer than FBCON_DECOR_THEME_LEN. When FBIOCONDECOR_GETCFG call is
++performed, the theme field should point to a char buffer of length
++FBCON_DECOR_THEME_LEN.
++
++Definition of struct fbcon_decor_iowrapper can be found in linux/fb.h.
++The fields in this struct have the following meaning:
++
++vc: 
++Virtual console number.
++
++origin: 
++Specifies if the ioctl is performed as a response to a kernel request. The
++fbcondecor helper should set this field to FBCON_DECOR_IO_ORIG_KERNEL, userspace
++programs should set it to FBCON_DECOR_IO_ORIG_USER. This field is necessary to
++avoid console semaphore deadlocks.
++
++data: 
++Pointer to a data structure appropriate for the performed ioctl. Type of
++the data struct is specified in the ioctls description.
++
++*****************************************************************************
++
++Credit
++------
++
++Original 'bootsplash' project & implementation by:
++  Volker Poplawski <volker@poplawski.de>, Stefan Reinauer <stepan@suse.de>,
++  Steffen Winterfeldt <snwint@suse.de>, Michael Schroeder <mls@suse.de>,
++  Ken Wimer <wimer@suse.de>.
++
++Fbcondecor, fbcondecor protocol design, current implementation & docs by:
++  Michal Januszewski <michalj+fbcondecor@gmail.com>
++
+diff --git a/drivers/Makefile b/drivers/Makefile
+index 7183b6a..d576148 100644
+--- a/drivers/Makefile
++++ b/drivers/Makefile
+@@ -17,6 +17,10 @@ obj-y				+= pwm/
+ obj-$(CONFIG_PCI)		+= pci/
+ obj-$(CONFIG_PARISC)		+= parisc/
+ obj-$(CONFIG_RAPIDIO)		+= rapidio/
++# tty/ comes before char/ so that the VT console is the boot-time
++# default.
++obj-y				+= tty/
++obj-y				+= char/
+ obj-y				+= video/
+ obj-y				+= idle/
+ 
+@@ -42,11 +46,6 @@ obj-$(CONFIG_REGULATOR)		+= regulator/
+ # reset controllers early, since gpu drivers might rely on them to initialize
+ obj-$(CONFIG_RESET_CONTROLLER)	+= reset/
+ 
+-# tty/ comes before char/ so that the VT console is the boot-time
+-# default.
+-obj-y				+= tty/
+-obj-y				+= char/
+-
+ # iommu/ comes before gpu as gpu are using iommu controllers
+ obj-$(CONFIG_IOMMU_SUPPORT) += iommu/
+
+diff --git a/drivers/video/console/Kconfig b/drivers/video/console/Kconfig
+index fe1cd01..6d2e87a 100644
+--- a/drivers/video/console/Kconfig
++++ b/drivers/video/console/Kconfig
+@@ -126,6 +126,19 @@ config FRAMEBUFFER_CONSOLE_ROTATION
+          such that other users of the framebuffer will remain normally
+          oriented.
+ 
++config FB_CON_DECOR
++	bool "Support for the Framebuffer Console Decorations"
++	depends on FRAMEBUFFER_CONSOLE=y && !FB_TILEBLITTING
++	default n
++	---help---
++	  This option enables support for framebuffer console decorations which
++	  makes it possible to display images in the background of the system
++	  consoles.  Note that userspace utilities are necessary in order to take 
++	  advantage of these features. Refer to Documentation/fb/fbcondecor.txt 
++	  for more information.
++
++	  If unsure, say N.
++
+ config STI_CONSOLE
+         bool "STI text console"
+         depends on PARISC
+diff --git a/drivers/video/console/Makefile b/drivers/video/console/Makefile
+index 43bfa48..cc104b6f 100644
+--- a/drivers/video/console/Makefile
++++ b/drivers/video/console/Makefile
+@@ -16,4 +16,5 @@ obj-$(CONFIG_FRAMEBUFFER_CONSOLE)     += fbcon_rotate.o fbcon_cw.o fbcon_ud.o \
+                                          fbcon_ccw.o
+ endif
+ 
++obj-$(CONFIG_FB_CON_DECOR)     	  += fbcondecor.o cfbcondecor.o
+ obj-$(CONFIG_FB_STI)              += sticore.o
+diff --git a/drivers/video/console/bitblit.c b/drivers/video/console/bitblit.c
+index 61b182b..984384b 100644
+--- a/drivers/video/console/bitblit.c
++++ b/drivers/video/console/bitblit.c
+@@ -18,6 +18,7 @@
+ #include <linux/console.h>
+ #include <asm/types.h>
+ #include "fbcon.h"
++#include "fbcondecor.h"
+ 
+ /*
+  * Accelerated handlers.
+@@ -55,6 +56,13 @@ static void bit_bmove(struct vc_data *vc, struct fb_info *info, int sy,
+ 	area.height = height * vc->vc_font.height;
+ 	area.width = width * vc->vc_font.width;
+ 
++	if (fbcon_decor_active(info, vc)) {
++ 		area.sx += vc->vc_decor.tx;
++ 		area.sy += vc->vc_decor.ty;
++ 		area.dx += vc->vc_decor.tx;
++ 		area.dy += vc->vc_decor.ty;
++ 	}
++
+ 	info->fbops->fb_copyarea(info, &area);
+ }
+ 
+@@ -380,11 +388,15 @@ static void bit_cursor(struct vc_data *vc, struct fb_info *info, int mode,
+ 	cursor.image.depth = 1;
+ 	cursor.rop = ROP_XOR;
+ 
+-	if (info->fbops->fb_cursor)
+-		err = info->fbops->fb_cursor(info, &cursor);
++	if (fbcon_decor_active(info, vc)) {
++		fbcon_decor_cursor(info, &cursor);
++	} else {
++		if (info->fbops->fb_cursor)
++			err = info->fbops->fb_cursor(info, &cursor);
+ 
+-	if (err)
+-		soft_cursor(info, &cursor);
++		if (err)
++			soft_cursor(info, &cursor);
++	}
+ 
+ 	ops->cursor_reset = 0;
+ }
+diff --git a/drivers/video/console/cfbcondecor.c b/drivers/video/console/cfbcondecor.c
+new file mode 100644
+index 0000000..a2b4497
+--- /dev/null
++++ b/drivers/video/console/cfbcondecor.c
+@@ -0,0 +1,471 @@
++/*
++ *  linux/drivers/video/cfbcon_decor.c -- Framebuffer decor render functions
++ *
++ *  Copyright (C) 2004 Michal Januszewski <michalj+fbcondecor@gmail.com>
++ *
++ *  Code based upon "Bootdecor" (C) 2001-2003
++ *       Volker Poplawski <volker@poplawski.de>,
++ *       Stefan Reinauer <stepan@suse.de>,
++ *       Steffen Winterfeldt <snwint@suse.de>,
++ *       Michael Schroeder <mls@suse.de>,
++ *       Ken Wimer <wimer@suse.de>.
++ *
++ *  This file is subject to the terms and conditions of the GNU General Public
++ *  License.  See the file COPYING in the main directory of this archive for
++ *  more details.
++ */
++#include <linux/module.h>
++#include <linux/types.h>
++#include <linux/fb.h>
++#include <linux/selection.h>
++#include <linux/slab.h>
++#include <linux/vt_kern.h>
++#include <asm/irq.h>
++
++#include "fbcon.h"
++#include "fbcondecor.h"
++
++#define parse_pixel(shift,bpp,type)						\
++	do {									\
++		if (d & (0x80 >> (shift)))					\
++			dd2[(shift)] = fgx;					\
++		else								\
++			dd2[(shift)] = transparent ? *(type *)decor_src : bgx;	\
++		decor_src += (bpp);						\
++	} while (0)								\
++
++extern int get_color(struct vc_data *vc, struct fb_info *info,
++		     u16 c, int is_fg);
++
++void fbcon_decor_fix_pseudo_pal(struct fb_info *info, struct vc_data *vc)
++{
++	int i, j, k;
++	int minlen = min(min(info->var.red.length, info->var.green.length),
++			     info->var.blue.length);
++	u32 col;
++
++	for (j = i = 0; i < 16; i++) {
++		k = color_table[i];
++
++		col = ((vc->vc_palette[j++]  >> (8-minlen))
++			<< info->var.red.offset);
++		col |= ((vc->vc_palette[j++] >> (8-minlen))
++			<< info->var.green.offset);
++		col |= ((vc->vc_palette[j++] >> (8-minlen))
++			<< info->var.blue.offset);
++			((u32 *)info->pseudo_palette)[k] = col;
++	}
++}
++
++void fbcon_decor_renderc(struct fb_info *info, int ypos, int xpos, int height,
++		      int width, u8* src, u32 fgx, u32 bgx, u8 transparent)
++{
++	unsigned int x, y;
++	u32 dd;
++	int bytespp = ((info->var.bits_per_pixel + 7) >> 3);
++	unsigned int d = ypos * info->fix.line_length + xpos * bytespp;
++	unsigned int ds = (ypos * info->var.xres + xpos) * bytespp;
++	u16 dd2[4];
++
++	u8* decor_src = (u8 *)(info->bgdecor.data + ds);
++	u8* dst = (u8 *)(info->screen_base + d);
++
++	if ((ypos + height) > info->var.yres || (xpos + width) > info->var.xres)
++		return;
++
++	for (y = 0; y < height; y++) {
++		switch (info->var.bits_per_pixel) {
++
++		case 32:
++			for (x = 0; x < width; x++) {
++
++				if ((x & 7) == 0)
++					d = *src++;
++				if (d & 0x80)
++					dd = fgx;
++				else
++					dd = transparent ?
++					     *(u32 *)decor_src : bgx;
++
++				d <<= 1;
++				decor_src += 4;
++				fb_writel(dd, dst);
++				dst += 4;
++			}
++			break;
++		case 24:
++			for (x = 0; x < width; x++) {
++
++				if ((x & 7) == 0)
++					d = *src++;
++				if (d & 0x80)
++					dd = fgx;
++				else
++					dd = transparent ?
++					     (*(u32 *)decor_src & 0xffffff) : bgx;
++
++				d <<= 1;
++				decor_src += 3;
++#ifdef __LITTLE_ENDIAN
++				fb_writew(dd & 0xffff, dst);
++				dst += 2;
++				fb_writeb((dd >> 16), dst);
++#else
++				fb_writew(dd >> 8, dst);
++				dst += 2;
++				fb_writeb(dd & 0xff, dst);
++#endif
++				dst++;
++			}
++			break;
++		case 16:
++			for (x = 0; x < width; x += 2) {
++				if ((x & 7) == 0)
++					d = *src++;
++
++				parse_pixel(0, 2, u16);
++				parse_pixel(1, 2, u16);
++#ifdef __LITTLE_ENDIAN
++				dd = dd2[0] | (dd2[1] << 16);
++#else
++				dd = dd2[1] | (dd2[0] << 16);
++#endif
++				d <<= 2;
++				fb_writel(dd, dst);
++				dst += 4;
++			}
++			break;
++
++		case 8:
++			for (x = 0; x < width; x += 4) {
++				if ((x & 7) == 0)
++					d = *src++;
++
++				parse_pixel(0, 1, u8);
++				parse_pixel(1, 1, u8);
++				parse_pixel(2, 1, u8);
++				parse_pixel(3, 1, u8);
++
++#ifdef __LITTLE_ENDIAN
++				dd = dd2[0] | (dd2[1] << 8) | (dd2[2] << 16) | (dd2[3] << 24);
++#else
++				dd = dd2[3] | (dd2[2] << 8) | (dd2[1] << 16) | (dd2[0] << 24);
++#endif
++				d <<= 4;
++				fb_writel(dd, dst);
++				dst += 4;
++			}
++		}
++
++		dst += info->fix.line_length - width * bytespp;
++		decor_src += (info->var.xres - width) * bytespp;
++	}
++}
++
++#define cc2cx(a) 						\
++	((info->fix.visual == FB_VISUAL_TRUECOLOR || 		\
++	  info->fix.visual == FB_VISUAL_DIRECTCOLOR) ? 		\
++	 ((u32*)info->pseudo_palette)[a] : a)
++
++void fbcon_decor_putcs(struct vc_data *vc, struct fb_info *info,
++		   const unsigned short *s, int count, int yy, int xx)
++{
++	unsigned short charmask = vc->vc_hi_font_mask ? 0x1ff : 0xff;
++	struct fbcon_ops *ops = info->fbcon_par;
++	int fg_color, bg_color, transparent;
++	u8 *src;
++	u32 bgx, fgx;
++	u16 c = scr_readw(s);
++
++	fg_color = get_color(vc, info, c, 1);
++        bg_color = get_color(vc, info, c, 0);
++
++	/* Don't paint the background image if console is blanked */
++	transparent = ops->blank_state ? 0 :
++		(vc->vc_decor.bg_color == bg_color);
++
++	xx = xx * vc->vc_font.width + vc->vc_decor.tx;
++	yy = yy * vc->vc_font.height + vc->vc_decor.ty;
++
++	fgx = cc2cx(fg_color);
++	bgx = cc2cx(bg_color);
++
++	while (count--) {
++		c = scr_readw(s++);
++		src = vc->vc_font.data + (c & charmask) * vc->vc_font.height *
++		      ((vc->vc_font.width + 7) >> 3);
++
++		fbcon_decor_renderc(info, yy, xx, vc->vc_font.height,
++			       vc->vc_font.width, src, fgx, bgx, transparent);
++		xx += vc->vc_font.width;
++	}
++}
++
++void fbcon_decor_cursor(struct fb_info *info, struct fb_cursor *cursor)
++{
++	int i;
++	unsigned int dsize, s_pitch;
++	struct fbcon_ops *ops = info->fbcon_par;
++	struct vc_data* vc;
++	u8 *src;
++
++	/* we really don't need any cursors while the console is blanked */
++	if (info->state != FBINFO_STATE_RUNNING || ops->blank_state)
++		return;
++
++	vc = vc_cons[ops->currcon].d;
++
++	src = kmalloc(64 + sizeof(struct fb_image), GFP_ATOMIC);
++	if (!src)
++		return;
++
++	s_pitch = (cursor->image.width + 7) >> 3;
++	dsize = s_pitch * cursor->image.height;
++	if (cursor->enable) {
++		switch (cursor->rop) {
++		case ROP_XOR:
++			for (i = 0; i < dsize; i++)
++				src[i] = cursor->image.data[i] ^ cursor->mask[i];
++                        break;
++		case ROP_COPY:
++		default:
++			for (i = 0; i < dsize; i++)
++				src[i] = cursor->image.data[i] & cursor->mask[i];
++			break;
++		}
++	} else
++		memcpy(src, cursor->image.data, dsize);
++
++	fbcon_decor_renderc(info,
++			cursor->image.dy + vc->vc_decor.ty,
++			cursor->image.dx + vc->vc_decor.tx,
++			cursor->image.height,
++			cursor->image.width,
++			(u8*)src,
++			cc2cx(cursor->image.fg_color),
++			cc2cx(cursor->image.bg_color),
++			cursor->image.bg_color == vc->vc_decor.bg_color);
++
++	kfree(src);
++}
++
++static void decorset(u8 *dst, int height, int width, int dstbytes,
++		        u32 bgx, int bpp)
++{
++	int i;
++
++	if (bpp == 8)
++		bgx |= bgx << 8;
++	if (bpp == 16 || bpp == 8)
++		bgx |= bgx << 16;
++
++	while (height-- > 0) {
++		u8 *p = dst;
++
++		switch (bpp) {
++
++		case 32:
++			for (i=0; i < width; i++) {
++				fb_writel(bgx, p); p += 4;
++			}
++			break;
++		case 24:
++			for (i=0; i < width; i++) {
++#ifdef __LITTLE_ENDIAN
++				fb_writew((bgx & 0xffff),(u16*)p); p += 2;
++				fb_writeb((bgx >> 16),p++);
++#else
++				fb_writew((bgx >> 8),(u16*)p); p += 2;
++				fb_writeb((bgx & 0xff),p++);
++#endif
++			}
++		case 16:
++			for (i=0; i < width/4; i++) {
++				fb_writel(bgx,p); p += 4;
++				fb_writel(bgx,p); p += 4;
++			}
++			if (width & 2) {
++				fb_writel(bgx,p); p += 4;
++			}
++			if (width & 1)
++				fb_writew(bgx,(u16*)p);
++			break;
++		case 8:
++			for (i=0; i < width/4; i++) {
++				fb_writel(bgx,p); p += 4;
++			}
++
++			if (width & 2) {
++				fb_writew(bgx,p); p += 2;
++			}
++			if (width & 1)
++				fb_writeb(bgx,(u8*)p);
++			break;
++
++		}
++		dst += dstbytes;
++	}
++}
++
++void fbcon_decor_copy(u8 *dst, u8 *src, int height, int width, int linebytes,
++		   int srclinebytes, int bpp)
++{
++	int i;
++
++	while (height-- > 0) {
++		u32 *p = (u32 *)dst;
++		u32 *q = (u32 *)src;
++
++		switch (bpp) {
++
++		case 32:
++			for (i=0; i < width; i++)
++				fb_writel(*q++, p++);
++			break;
++		case 24:
++			for (i=0; i < (width*3/4); i++)
++				fb_writel(*q++, p++);
++			if ((width*3) % 4) {
++				if (width & 2) {
++					fb_writeb(*(u8*)q, (u8*)p);
++				} else if (width & 1) {
++					fb_writew(*(u16*)q, (u16*)p);
++					fb_writeb(*(u8*)((u16*)q+1),(u8*)((u16*)p+2));
++				}
++			}
++			break;
++		case 16:
++			for (i=0; i < width/4; i++) {
++				fb_writel(*q++, p++);
++				fb_writel(*q++, p++);
++			}
++			if (width & 2)
++				fb_writel(*q++, p++);
++			if (width & 1)
++				fb_writew(*(u16*)q, (u16*)p);
++			break;
++		case 8:
++			for (i=0; i < width/4; i++)
++				fb_writel(*q++, p++);
++
++			if (width & 2) {
++				fb_writew(*(u16*)q, (u16*)p);
++				q = (u32*) ((u16*)q + 1);
++				p = (u32*) ((u16*)p + 1);
++			}
++			if (width & 1)
++				fb_writeb(*(u8*)q, (u8*)p);
++			break;
++		}
++
++		dst += linebytes;
++		src += srclinebytes;
++	}
++}
++
++static void decorfill(struct fb_info *info, int sy, int sx, int height,
++		       int width)
++{
++	int bytespp = ((info->var.bits_per_pixel + 7) >> 3);
++	int d  = sy * info->fix.line_length + sx * bytespp;
++	int ds = (sy * info->var.xres + sx) * bytespp;
++
++	fbcon_decor_copy((u8 *)(info->screen_base + d), (u8 *)(info->bgdecor.data + ds),
++		    height, width, info->fix.line_length, info->var.xres * bytespp,
++		    info->var.bits_per_pixel);
++}
++
++void fbcon_decor_clear(struct vc_data *vc, struct fb_info *info, int sy, int sx,
++		    int height, int width)
++{
++	int bgshift = (vc->vc_hi_font_mask) ? 13 : 12;
++	struct fbcon_ops *ops = info->fbcon_par;
++	u8 *dst;
++	int transparent, bg_color = attr_bgcol_ec(bgshift, vc, info);
++
++	transparent = (vc->vc_decor.bg_color == bg_color);
++	sy = sy * vc->vc_font.height + vc->vc_decor.ty;
++	sx = sx * vc->vc_font.width + vc->vc_decor.tx;
++	height *= vc->vc_font.height;
++	width *= vc->vc_font.width;
++
++	/* Don't paint the background image if console is blanked */
++	if (transparent && !ops->blank_state) {
++		decorfill(info, sy, sx, height, width);
++	} else {
++		dst = (u8 *)(info->screen_base + sy * info->fix.line_length +
++			     sx * ((info->var.bits_per_pixel + 7) >> 3));
++		decorset(dst, height, width, info->fix.line_length, cc2cx(bg_color),
++			  info->var.bits_per_pixel);
++	}
++}
++
++void fbcon_decor_clear_margins(struct vc_data *vc, struct fb_info *info,
++			    int bottom_only)
++{
++	unsigned int tw = vc->vc_cols*vc->vc_font.width;
++	unsigned int th = vc->vc_rows*vc->vc_font.height;
++
++	if (!bottom_only) {
++		/* top margin */
++		decorfill(info, 0, 0, vc->vc_decor.ty, info->var.xres);
++		/* left margin */
++		decorfill(info, vc->vc_decor.ty, 0, th, vc->vc_decor.tx);
++		/* right margin */
++		decorfill(info, vc->vc_decor.ty, vc->vc_decor.tx + tw, th, 
++			   info->var.xres - vc->vc_decor.tx - tw);
++	}
++	decorfill(info, vc->vc_decor.ty + th, 0, 
++		   info->var.yres - vc->vc_decor.ty - th, info->var.xres);
++}
++
++void fbcon_decor_bmove_redraw(struct vc_data *vc, struct fb_info *info, int y, 
++			   int sx, int dx, int width)
++{
++	u16 *d = (u16 *) (vc->vc_origin + vc->vc_size_row * y + dx * 2);
++	u16 *s = d + (dx - sx);
++	u16 *start = d;
++	u16 *ls = d;
++	u16 *le = d + width;
++	u16 c;
++	int x = dx;
++	u16 attr = 1;
++
++	do {
++		c = scr_readw(d);
++		if (attr != (c & 0xff00)) {
++			attr = c & 0xff00;
++			if (d > start) {
++				fbcon_decor_putcs(vc, info, start, d - start, y, x);
++				x += d - start;
++				start = d;
++			}
++		}
++		if (s >= ls && s < le && c == scr_readw(s)) {
++			if (d > start) {
++				fbcon_decor_putcs(vc, info, start, d - start, y, x);
++				x += d - start + 1;
++				start = d + 1;
++			} else {
++				x++;
++				start++;
++			}
++		}
++		s++;
++		d++;
++	} while (d < le);
++	if (d > start)
++		fbcon_decor_putcs(vc, info, start, d - start, y, x);
++}
++
++void fbcon_decor_blank(struct vc_data *vc, struct fb_info *info, int blank)
++{
++	if (blank) {
++		decorset((u8 *)info->screen_base, info->var.yres, info->var.xres,
++			  info->fix.line_length, 0, info->var.bits_per_pixel);
++	} else {
++		update_screen(vc);
++		fbcon_decor_clear_margins(vc, info, 0);
++	}
++}
++
+diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
+index f447734..da50d61 100644
+--- a/drivers/video/console/fbcon.c
++++ b/drivers/video/console/fbcon.c
+@@ -79,6 +79,7 @@
+ #include <asm/irq.h>
+ 
+ #include "fbcon.h"
++#include "../console/fbcondecor.h"
+ 
+ #ifdef FBCONDEBUG
+ #  define DPRINTK(fmt, args...) printk(KERN_DEBUG "%s: " fmt, __func__ , ## args)
+@@ -94,7 +95,7 @@ enum {
+ 
+ static struct display fb_display[MAX_NR_CONSOLES];
+ 
+-static signed char con2fb_map[MAX_NR_CONSOLES];
++signed char con2fb_map[MAX_NR_CONSOLES];
+ static signed char con2fb_map_boot[MAX_NR_CONSOLES];
+ 
+ static int logo_lines;
+@@ -286,7 +287,7 @@ static inline int fbcon_is_inactive(struct vc_data *vc, struct fb_info *info)
+ 		!vt_force_oops_output(vc);
+ }
+ 
+-static int get_color(struct vc_data *vc, struct fb_info *info,
++int get_color(struct vc_data *vc, struct fb_info *info,
+ 	      u16 c, int is_fg)
+ {
+ 	int depth = fb_get_color_depth(&info->var, &info->fix);
+@@ -551,6 +552,9 @@ static int do_fbcon_takeover(int show_logo)
+ 		info_idx = -1;
+ 	} else {
+ 		fbcon_has_console_bind = 1;
++#ifdef CONFIG_FB_CON_DECOR
++		fbcon_decor_init();
++#endif
+ 	}
+ 
+ 	return err;
+@@ -1007,6 +1011,12 @@ static const char *fbcon_startup(void)
+ 	rows = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres);
+ 	cols /= vc->vc_font.width;
+ 	rows /= vc->vc_font.height;
++
++	if (fbcon_decor_active(info, vc)) {
++		cols = vc->vc_decor.twidth / vc->vc_font.width;
++		rows = vc->vc_decor.theight / vc->vc_font.height;
++	}
++
+ 	vc_resize(vc, cols, rows);
+ 
+ 	DPRINTK("mode:   %s\n", info->fix.id);
+@@ -1036,7 +1046,7 @@ static void fbcon_init(struct vc_data *vc, int init)
+ 	cap = info->flags;
+ 
+ 	if (vc != svc || logo_shown == FBCON_LOGO_DONTSHOW ||
+-	    (info->fix.type == FB_TYPE_TEXT))
++	    (info->fix.type == FB_TYPE_TEXT) || fbcon_decor_active(info, vc))
+ 		logo = 0;
+ 
+ 	if (var_to_display(p, &info->var, info))
+@@ -1260,6 +1270,11 @@ static void fbcon_clear(struct vc_data *vc, int sy, int sx, int height,
+ 		fbcon_clear_margins(vc, 0);
+ 	}
+ 
++ 	if (fbcon_decor_active(info, vc)) {
++ 		fbcon_decor_clear(vc, info, sy, sx, height, width);
++ 		return;
++ 	}
++
+ 	/* Split blits that cross physical y_wrap boundary */
+ 
+ 	y_break = p->vrows - p->yscroll;
+@@ -1279,10 +1294,15 @@ static void fbcon_putcs(struct vc_data *vc, const unsigned short *s,
+ 	struct display *p = &fb_display[vc->vc_num];
+ 	struct fbcon_ops *ops = info->fbcon_par;
+ 
+-	if (!fbcon_is_inactive(vc, info))
+-		ops->putcs(vc, info, s, count, real_y(p, ypos), xpos,
+-			   get_color(vc, info, scr_readw(s), 1),
+-			   get_color(vc, info, scr_readw(s), 0));
++	if (!fbcon_is_inactive(vc, info)) {
++
++		if (fbcon_decor_active(info, vc))
++			fbcon_decor_putcs(vc, info, s, count, ypos, xpos);
++		else
++			ops->putcs(vc, info, s, count, real_y(p, ypos), xpos,
++				   get_color(vc, info, scr_readw(s), 1),
++				   get_color(vc, info, scr_readw(s), 0));
++	}
+ }
+ 
+ static void fbcon_putc(struct vc_data *vc, int c, int ypos, int xpos)
+@@ -1298,8 +1318,13 @@ static void fbcon_clear_margins(struct vc_data *vc, int bottom_only)
+ 	struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
+ 	struct fbcon_ops *ops = info->fbcon_par;
+ 
+-	if (!fbcon_is_inactive(vc, info))
+-		ops->clear_margins(vc, info, bottom_only);
++	if (!fbcon_is_inactive(vc, info)) {
++	 	if (fbcon_decor_active(info, vc)) {
++	 		fbcon_decor_clear_margins(vc, info, bottom_only);
++ 		} else {
++			ops->clear_margins(vc, info, bottom_only);
++		}
++	}
+ }
+ 
+ static void fbcon_cursor(struct vc_data *vc, int mode)
+@@ -1819,7 +1844,7 @@ static int fbcon_scroll(struct vc_data *vc, int t, int b, int dir,
+ 			count = vc->vc_rows;
+ 		if (softback_top)
+ 			fbcon_softback_note(vc, t, count);
+-		if (logo_shown >= 0)
++		if (logo_shown >= 0 || fbcon_decor_active(info, vc))
+ 			goto redraw_up;
+ 		switch (p->scrollmode) {
+ 		case SCROLL_MOVE:
+@@ -1912,6 +1937,8 @@ static int fbcon_scroll(struct vc_data *vc, int t, int b, int dir,
+ 			count = vc->vc_rows;
+ 		if (logo_shown >= 0)
+ 			goto redraw_down;
++		if (fbcon_decor_active(info, vc))
++			goto redraw_down;
+ 		switch (p->scrollmode) {
+ 		case SCROLL_MOVE:
+ 			fbcon_redraw_blit(vc, info, p, b - 1, b - t - count,
+@@ -2060,6 +2087,13 @@ static void fbcon_bmove_rec(struct vc_data *vc, struct display *p, int sy, int s
+ 		}
+ 		return;
+ 	}
++
++	if (fbcon_decor_active(info, vc) && sy == dy && height == 1) {
++ 		/* must use slower redraw bmove to keep background pic intact */
++ 		fbcon_decor_bmove_redraw(vc, info, sy, sx, dx, width);
++ 		return;
++ 	}
++
+ 	ops->bmove(vc, info, real_y(p, sy), sx, real_y(p, dy), dx,
+ 		   height, width);
+ }
+@@ -2130,8 +2164,8 @@ static int fbcon_resize(struct vc_data *vc, unsigned int width,
+ 	var.yres = virt_h * virt_fh;
+ 	x_diff = info->var.xres - var.xres;
+ 	y_diff = info->var.yres - var.yres;
+-	if (x_diff < 0 || x_diff > virt_fw ||
+-	    y_diff < 0 || y_diff > virt_fh) {
++	if ((x_diff < 0 || x_diff > virt_fw ||
++		y_diff < 0 || y_diff > virt_fh) && !vc->vc_decor.state) {
+ 		const struct fb_videomode *mode;
+ 
+ 		DPRINTK("attempting resize %ix%i\n", var.xres, var.yres);
+@@ -2167,6 +2201,21 @@ static int fbcon_switch(struct vc_data *vc)
+ 
+ 	info = registered_fb[con2fb_map[vc->vc_num]];
+ 	ops = info->fbcon_par;
++	prev_console = ops->currcon;
++	if (prev_console != -1)
++		old_info = registered_fb[con2fb_map[prev_console]];
++
++#ifdef CONFIG_FB_CON_DECOR
++	if (!fbcon_decor_active_vc(vc) && info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
++		struct vc_data *vc_curr = vc_cons[prev_console].d;
++		if (vc_curr && fbcon_decor_active_vc(vc_curr)) {
++			/* Clear the screen to avoid displaying funky colors during
++			 * palette updates. */
++			memset((u8*)info->screen_base + info->fix.line_length * info->var.yoffset,
++			       0, info->var.yres * info->fix.line_length);
++		}
++	}
++#endif
+ 
+ 	if (softback_top) {
+ 		if (softback_lines)
+@@ -2185,9 +2234,6 @@ static int fbcon_switch(struct vc_data *vc)
+ 		logo_shown = FBCON_LOGO_CANSHOW;
+ 	}
+ 
+-	prev_console = ops->currcon;
+-	if (prev_console != -1)
+-		old_info = registered_fb[con2fb_map[prev_console]];
+ 	/*
+ 	 * FIXME: If we have multiple fbdev's loaded, we need to
+ 	 * update all info->currcon.  Perhaps, we can place this
+@@ -2231,6 +2277,18 @@ static int fbcon_switch(struct vc_data *vc)
+ 			fbcon_del_cursor_timer(old_info);
+ 	}
+ 
++	if (fbcon_decor_active_vc(vc)) {
++		struct vc_data *vc_curr = vc_cons[prev_console].d;
++
++		if (!vc_curr->vc_decor.theme ||
++			strcmp(vc->vc_decor.theme, vc_curr->vc_decor.theme) ||
++			(fbcon_decor_active_nores(info, vc_curr) &&
++			 !fbcon_decor_active(info, vc_curr))) {
++			fbcon_decor_disable(vc, 0);
++			fbcon_decor_call_helper("modechange", vc->vc_num);
++		}
++	}
++
+ 	if (fbcon_is_inactive(vc, info) ||
+ 	    ops->blank_state != FB_BLANK_UNBLANK)
+ 		fbcon_del_cursor_timer(info);
+@@ -2339,15 +2397,20 @@ static int fbcon_blank(struct vc_data *vc, int blank, int mode_switch)
+ 		}
+ 	}
+ 
+- 	if (!fbcon_is_inactive(vc, info)) {
++	if (!fbcon_is_inactive(vc, info)) {
+ 		if (ops->blank_state != blank) {
+ 			ops->blank_state = blank;
+ 			fbcon_cursor(vc, blank ? CM_ERASE : CM_DRAW);
+ 			ops->cursor_flash = (!blank);
+ 
+-			if (!(info->flags & FBINFO_MISC_USEREVENT))
+-				if (fb_blank(info, blank))
+-					fbcon_generic_blank(vc, info, blank);
++			if (!(info->flags & FBINFO_MISC_USEREVENT)) {
++				if (fb_blank(info, blank)) {
++					if (fbcon_decor_active(info, vc))
++						fbcon_decor_blank(vc, info, blank);
++					else
++						fbcon_generic_blank(vc, info, blank);
++				}
++			}
+ 		}
+ 
+ 		if (!blank)
+@@ -2522,13 +2585,22 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h,
+ 	}
+ 
+ 	if (resize) {
++		/* reset wrap/pan */
+ 		int cols, rows;
+ 
+ 		cols = FBCON_SWAP(ops->rotate, info->var.xres, info->var.yres);
+ 		rows = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres);
++
++		if (fbcon_decor_active(info, vc)) {
++			info->var.xoffset = info->var.yoffset = p->yscroll = 0;
++			cols = vc->vc_decor.twidth;
++			rows = vc->vc_decor.theight;
++		}
+ 		cols /= w;
+ 		rows /= h;
++
+ 		vc_resize(vc, cols, rows);
++
+ 		if (CON_IS_VISIBLE(vc) && softback_buf)
+ 			fbcon_update_softback(vc);
+ 	} else if (CON_IS_VISIBLE(vc)
+@@ -2657,7 +2729,11 @@ static int fbcon_set_palette(struct vc_data *vc, unsigned char *table)
+ 	int i, j, k, depth;
+ 	u8 val;
+ 
+-	if (fbcon_is_inactive(vc, info))
++	if (fbcon_is_inactive(vc, info)
++#ifdef CONFIG_FB_CON_DECOR
++			|| vc->vc_num != fg_console
++#endif
++		)
+ 		return -EINVAL;
+ 
+ 	if (!CON_IS_VISIBLE(vc))
+@@ -2683,14 +2759,56 @@ static int fbcon_set_palette(struct vc_data *vc, unsigned char *table)
+ 	} else
+ 		fb_copy_cmap(fb_default_cmap(1 << depth), &palette_cmap);
+ 
+-	return fb_set_cmap(&palette_cmap, info);
++	if (fbcon_decor_active(info, vc_cons[fg_console].d) &&
++	    info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
++
++		u16 *red, *green, *blue;
++		int minlen = min(min(info->var.red.length, info->var.green.length),
++				     info->var.blue.length);
++		int h;
++
++		struct fb_cmap cmap = {
++			.start = 0,
++			.len = (1 << minlen),
++			.red = NULL,
++			.green = NULL,
++			.blue = NULL,
++			.transp = NULL
++		};
++
++		red = kmalloc(256 * sizeof(u16) * 3, GFP_KERNEL);
++
++		if (!red)
++			goto out;
++
++		green = red + 256;
++		blue = green + 256;
++		cmap.red = red;
++		cmap.green = green;
++		cmap.blue = blue;
++
++		for (i = 0; i < cmap.len; i++) {
++			red[i] = green[i] = blue[i] = (0xffff * i)/(cmap.len-1);
++		}
++
++		h = fb_set_cmap(&cmap, info);
++		fbcon_decor_fix_pseudo_pal(info, vc_cons[fg_console].d);
++		kfree(red);
++
++		return h;
++
++	} else if (fbcon_decor_active(info, vc_cons[fg_console].d) &&
++		   info->var.bits_per_pixel == 8 && info->bgdecor.cmap.red != NULL)
++		fb_set_cmap(&info->bgdecor.cmap, info);
++
++out:	return fb_set_cmap(&palette_cmap, info);
+ }
+ 
+ static u16 *fbcon_screen_pos(struct vc_data *vc, int offset)
+ {
+ 	unsigned long p;
+ 	int line;
+-	
++
+ 	if (vc->vc_num != fg_console || !softback_lines)
+ 		return (u16 *) (vc->vc_origin + offset);
+ 	line = offset / vc->vc_size_row;
+@@ -2909,7 +3027,14 @@ static void fbcon_modechanged(struct fb_info *info)
+ 		rows = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres);
+ 		cols /= vc->vc_font.width;
+ 		rows /= vc->vc_font.height;
+-		vc_resize(vc, cols, rows);
++
++		if (!fbcon_decor_active_nores(info, vc)) {
++			vc_resize(vc, cols, rows);
++		} else {
++			fbcon_decor_disable(vc, 0);
++			fbcon_decor_call_helper("modechange", vc->vc_num);
++		}
++
+ 		updatescrollmode(p, info, vc);
+ 		scrollback_max = 0;
+ 		scrollback_current = 0;
+@@ -2954,7 +3079,9 @@ static void fbcon_set_all_vcs(struct fb_info *info)
+ 		rows = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres);
+ 		cols /= vc->vc_font.width;
+ 		rows /= vc->vc_font.height;
+-		vc_resize(vc, cols, rows);
++		if (!fbcon_decor_active_nores(info, vc)) {
++			vc_resize(vc, cols, rows);
++		}
+ 	}
+ 
+ 	if (fg != -1)
+@@ -3596,6 +3723,7 @@ static void fbcon_exit(void)
+ 		}
+ 	}
+ 
++	fbcon_decor_exit();
+ 	fbcon_has_exited = 1;
+ }
+ 
+diff --git a/drivers/video/console/fbcondecor.c b/drivers/video/console/fbcondecor.c
+new file mode 100644
+index 0000000..babc8c5
+--- /dev/null
++++ b/drivers/video/console/fbcondecor.c
+@@ -0,0 +1,555 @@
++/*
++ *  linux/drivers/video/console/fbcondecor.c -- Framebuffer console decorations
++ *
++ *  Copyright (C) 2004-2009 Michal Januszewski <michalj+fbcondecor@gmail.com>
++ *
++ *  Code based upon "Bootsplash" (C) 2001-2003
++ *       Volker Poplawski <volker@poplawski.de>,
++ *       Stefan Reinauer <stepan@suse.de>,
++ *       Steffen Winterfeldt <snwint@suse.de>,
++ *       Michael Schroeder <mls@suse.de>,
++ *       Ken Wimer <wimer@suse.de>.
++ *
++ *  Compat ioctl support by Thorsten Klein <TK@Thorsten-Klein.de>.
++ *
++ *  This file is subject to the terms and conditions of the GNU General Public
++ *  License.  See the file COPYING in the main directory of this archive for
++ *  more details.
++ *
++ */
++#include <linux/module.h>
++#include <linux/kernel.h>
++#include <linux/string.h>
++#include <linux/types.h>
++#include <linux/fb.h>
++#include <linux/vt_kern.h>
++#include <linux/vmalloc.h>
++#include <linux/unistd.h>
++#include <linux/syscalls.h>
++#include <linux/init.h>
++#include <linux/proc_fs.h>
++#include <linux/workqueue.h>
++#include <linux/kmod.h>
++#include <linux/miscdevice.h>
++#include <linux/device.h>
++#include <linux/fs.h>
++#include <linux/compat.h>
++#include <linux/console.h>
++
++#include <asm/uaccess.h>
++#include <asm/irq.h>
++
++#include "fbcon.h"
++#include "fbcondecor.h"
++
++extern signed char con2fb_map[];
++static int fbcon_decor_enable(struct vc_data *vc);
++char fbcon_decor_path[KMOD_PATH_LEN] = "/sbin/fbcondecor_helper";
++static int initialized = 0;
++
++int fbcon_decor_call_helper(char* cmd, unsigned short vc)
++{
++	char *envp[] = {
++		"HOME=/",
++		"PATH=/sbin:/bin",
++		NULL
++	};
++
++	char tfb[5];
++	char tcons[5];
++	unsigned char fb = (int) con2fb_map[vc];
++
++	char *argv[] = {
++		fbcon_decor_path,
++		"2",
++		cmd,
++		tcons,
++		tfb,
++		vc_cons[vc].d->vc_decor.theme,
++		NULL
++	};
++
++	snprintf(tfb,5,"%d",fb);
++	snprintf(tcons,5,"%d",vc);
++
++	return call_usermodehelper(fbcon_decor_path, argv, envp, UMH_WAIT_EXEC);
++}
++
++/* Disables fbcondecor on a virtual console; called with console sem held. */
++int fbcon_decor_disable(struct vc_data *vc, unsigned char redraw)
++{
++	struct fb_info* info;
++
++	if (!vc->vc_decor.state)
++		return -EINVAL;
++
++	info = registered_fb[(int) con2fb_map[vc->vc_num]];
++
++	if (info == NULL)
++		return -EINVAL;
++
++	vc->vc_decor.state = 0;
++	vc_resize(vc, info->var.xres / vc->vc_font.width,
++		  info->var.yres / vc->vc_font.height);
++
++	if (fg_console == vc->vc_num && redraw) {
++		redraw_screen(vc, 0);
++		update_region(vc, vc->vc_origin +
++			      vc->vc_size_row * vc->vc_top,
++			      vc->vc_size_row * (vc->vc_bottom - vc->vc_top) / 2);
++	}
++
++	printk(KERN_INFO "fbcondecor: switched decor state to 'off' on console %d\n",
++			 vc->vc_num);
++
++	return 0;
++}
++
++/* Enables fbcondecor on a virtual console; called with console sem held. */
++static int fbcon_decor_enable(struct vc_data *vc)
++{
++	struct fb_info* info;
++
++	info = registered_fb[(int) con2fb_map[vc->vc_num]];
++
++	if (vc->vc_decor.twidth == 0 || vc->vc_decor.theight == 0 ||
++	    info == NULL || vc->vc_decor.state || (!info->bgdecor.data &&
++	    vc->vc_num == fg_console))
++		return -EINVAL;
++
++	vc->vc_decor.state = 1;
++	vc_resize(vc, vc->vc_decor.twidth / vc->vc_font.width,
++		  vc->vc_decor.theight / vc->vc_font.height);
++
++	if (fg_console == vc->vc_num) {
++		redraw_screen(vc, 0);
++		update_region(vc, vc->vc_origin +
++			      vc->vc_size_row * vc->vc_top,
++			      vc->vc_size_row * (vc->vc_bottom - vc->vc_top) / 2);
++		fbcon_decor_clear_margins(vc, info, 0);
++	}
++
++	printk(KERN_INFO "fbcondecor: switched decor state to 'on' on console %d\n",
++			 vc->vc_num);
++
++	return 0;
++}
++
++static inline int fbcon_decor_ioctl_dosetstate(struct vc_data *vc, unsigned int state, unsigned char origin)
++{
++	int ret;
++
++//	if (origin == FBCON_DECOR_IO_ORIG_USER)
++		console_lock();
++	if (!state)
++		ret = fbcon_decor_disable(vc, 1);
++	else
++		ret = fbcon_decor_enable(vc);
++//	if (origin == FBCON_DECOR_IO_ORIG_USER)
++		console_unlock();
++
++	return ret;
++}
++
++static inline void fbcon_decor_ioctl_dogetstate(struct vc_data *vc, unsigned int *state)
++{
++	*state = vc->vc_decor.state;
++}
++
++static int fbcon_decor_ioctl_dosetcfg(struct vc_data *vc, struct vc_decor *cfg, unsigned char origin)
++{
++	struct fb_info *info;
++	int len;
++	char *tmp;
++
++	info = registered_fb[(int) con2fb_map[vc->vc_num]];
++
++	if (info == NULL || !cfg->twidth || !cfg->theight ||
++	    cfg->tx + cfg->twidth  > info->var.xres ||
++	    cfg->ty + cfg->theight > info->var.yres)
++		return -EINVAL;
++
++	len = strlen_user(cfg->theme);
++	if (!len || len > FBCON_DECOR_THEME_LEN)
++		return -EINVAL;
++	tmp = kmalloc(len, GFP_KERNEL);
++	if (!tmp)
++		return -ENOMEM;
++	if (copy_from_user(tmp, (void __user *)cfg->theme, len))
++		return -EFAULT;
++	cfg->theme = tmp;
++	cfg->state = 0;
++
++	/* If this ioctl is a response to a request from kernel, the console sem
++	 * is already held; we also don't need to disable decor because either the
++	 * new config and background picture will be successfully loaded, and the
++	 * decor will stay on, or in case of a failure it'll be turned off in fbcon. */
++//	if (origin == FBCON_DECOR_IO_ORIG_USER) {
++		console_lock();
++		if (vc->vc_decor.state)
++			fbcon_decor_disable(vc, 1);
++//	}
++
++	if (vc->vc_decor.theme)
++		kfree(vc->vc_decor.theme);
++
++	vc->vc_decor = *cfg;
++
++//	if (origin == FBCON_DECOR_IO_ORIG_USER)
++		console_unlock();
++
++	printk(KERN_INFO "fbcondecor: console %d using theme '%s'\n",
++			 vc->vc_num, vc->vc_decor.theme);
++	return 0;
++}
++
++static int fbcon_decor_ioctl_dogetcfg(struct vc_data *vc, struct vc_decor *decor)
++{
++	char __user *tmp;
++
++	tmp = decor->theme;
++	*decor = vc->vc_decor;
++	decor->theme = tmp;
++
++	if (vc->vc_decor.theme) {
++		if (copy_to_user(tmp, vc->vc_decor.theme, strlen(vc->vc_decor.theme) + 1))
++			return -EFAULT;
++	} else
++		if (put_user(0, tmp))
++			return -EFAULT;
++
++	return 0;
++}
++
++static int fbcon_decor_ioctl_dosetpic(struct vc_data *vc, struct fb_image *img, unsigned char origin)
++{
++	struct fb_info *info;
++	int len;
++	u8 *tmp;
++
++	if (vc->vc_num != fg_console)
++		return -EINVAL;
++
++	info = registered_fb[(int) con2fb_map[vc->vc_num]];
++
++	if (info == NULL)
++		return -EINVAL;
++
++	if (img->width != info->var.xres || img->height != info->var.yres) {
++		printk(KERN_ERR "fbcondecor: picture dimensions mismatch\n");
++		printk(KERN_ERR "%dx%d vs %dx%d\n", img->width, img->height, info->var.xres, info->var.yres);
++		return -EINVAL;
++	}
++
++	if (img->depth != info->var.bits_per_pixel) {
++		printk(KERN_ERR "fbcondecor: picture depth mismatch\n");
++		return -EINVAL;
++	}
++
++	if (img->depth == 8) {
++		if (!img->cmap.len || !img->cmap.red || !img->cmap.green ||
++		    !img->cmap.blue)
++			return -EINVAL;
++
++		tmp = vmalloc(img->cmap.len * 3 * 2);
++		if (!tmp)
++			return -ENOMEM;
++
++		if (copy_from_user(tmp,
++			    	   (void __user*)img->cmap.red, (img->cmap.len << 1)) ||
++		    copy_from_user(tmp + (img->cmap.len << 1),
++			    	   (void __user*)img->cmap.green, (img->cmap.len << 1)) ||
++		    copy_from_user(tmp + (img->cmap.len << 2),
++			    	   (void __user*)img->cmap.blue, (img->cmap.len << 1))) {
++			vfree(tmp);
++			return -EFAULT;
++		}
++
++		img->cmap.transp = NULL;
++		img->cmap.red = (u16*)tmp;
++		img->cmap.green = img->cmap.red + img->cmap.len;
++		img->cmap.blue = img->cmap.green + img->cmap.len;
++	} else {
++		img->cmap.red = NULL;
++	}
++
++	len = ((img->depth + 7) >> 3) * img->width * img->height;
++
++	/*
++	 * Allocate an additional byte so that we never go outside of the
++	 * buffer boundaries in the rendering functions in a 24 bpp mode.
++	 */
++	tmp = vmalloc(len + 1);
++
++	if (!tmp)
++		goto out;
++
++	if (copy_from_user(tmp, (void __user*)img->data, len))
++		goto out;
++
++	img->data = tmp;
++
++	/* If this ioctl is a response to a request from kernel, the console sem
++	 * is already held. */
++//	if (origin == FBCON_DECOR_IO_ORIG_USER)
++		console_lock();
++
++	if (info->bgdecor.data)
++		vfree((u8*)info->bgdecor.data);
++	if (info->bgdecor.cmap.red)
++		vfree(info->bgdecor.cmap.red);
++
++	info->bgdecor = *img;
++
++	if (fbcon_decor_active_vc(vc) && fg_console == vc->vc_num) {
++		redraw_screen(vc, 0);
++		update_region(vc, vc->vc_origin +
++			      vc->vc_size_row * vc->vc_top,
++			      vc->vc_size_row * (vc->vc_bottom - vc->vc_top) / 2);
++		fbcon_decor_clear_margins(vc, info, 0);
++	}
++
++//	if (origin == FBCON_DECOR_IO_ORIG_USER)
++		console_unlock();
++
++	return 0;
++
++out:	if (img->cmap.red)
++		vfree(img->cmap.red);
++
++	if (tmp)
++		vfree(tmp);
++	return -ENOMEM;
++}
++
++static long fbcon_decor_ioctl(struct file *filp, u_int cmd, u_long arg)
++{
++	struct fbcon_decor_iowrapper __user *wrapper = (void __user*) arg;
++	struct vc_data *vc = NULL;
++	unsigned short vc_num = 0;
++	unsigned char origin = 0;
++	void __user *data = NULL;
++
++	if (!access_ok(VERIFY_READ, wrapper,
++			sizeof(struct fbcon_decor_iowrapper)))
++		return -EFAULT;
++
++	__get_user(vc_num, &wrapper->vc);
++	__get_user(origin, &wrapper->origin);
++	__get_user(data, &wrapper->data);
++
++	if (!vc_cons_allocated(vc_num))
++		return -EINVAL;
++
++	vc = vc_cons[vc_num].d;
++
++	switch (cmd) {
++	case FBIOCONDECOR_SETPIC:
++	{
++		struct fb_image img;
++		if (copy_from_user(&img, (struct fb_image __user *)data, sizeof(struct fb_image)))
++			return -EFAULT;
++
++		return fbcon_decor_ioctl_dosetpic(vc, &img, origin);
++	}
++	case FBIOCONDECOR_SETCFG:
++	{
++		struct vc_decor cfg;
++		if (copy_from_user(&cfg, (struct vc_decor __user *)data, sizeof(struct vc_decor)))
++			return -EFAULT;
++
++		return fbcon_decor_ioctl_dosetcfg(vc, &cfg, origin);
++	}
++	case FBIOCONDECOR_GETCFG:
++	{
++		int rval;
++		struct vc_decor cfg;
++
++		if (copy_from_user(&cfg, (struct vc_decor __user *)data, sizeof(struct vc_decor)))
++			return -EFAULT;
++
++		rval = fbcon_decor_ioctl_dogetcfg(vc, &cfg);
++
++		if (copy_to_user(data, &cfg, sizeof(struct vc_decor)))
++			return -EFAULT;
++		return rval;
++	}
++	case FBIOCONDECOR_SETSTATE:
++	{
++		unsigned int state = 0;
++		if (get_user(state, (unsigned int __user *)data))
++			return -EFAULT;
++		return fbcon_decor_ioctl_dosetstate(vc, state, origin);
++	}
++	case FBIOCONDECOR_GETSTATE:
++	{
++		unsigned int state = 0;
++		fbcon_decor_ioctl_dogetstate(vc, &state);
++		return put_user(state, (unsigned int __user *)data);
++	}
++
++	default:
++		return -ENOIOCTLCMD;
++	}
++}
++
++#ifdef CONFIG_COMPAT
++
++static long fbcon_decor_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) {
++
++	struct fbcon_decor_iowrapper32 __user *wrapper = (void __user *)arg;
++	struct vc_data *vc = NULL;
++	unsigned short vc_num = 0;
++	unsigned char origin = 0;
++	compat_uptr_t data_compat = 0;
++	void __user *data = NULL;
++
++	if (!access_ok(VERIFY_READ, wrapper,
++                       sizeof(struct fbcon_decor_iowrapper32)))
++		return -EFAULT;
++
++	__get_user(vc_num, &wrapper->vc);
++	__get_user(origin, &wrapper->origin);
++	__get_user(data_compat, &wrapper->data);
++	data = compat_ptr(data_compat);
++
++	if (!vc_cons_allocated(vc_num))
++		return -EINVAL;
++
++	vc = vc_cons[vc_num].d;
++
++	switch (cmd) {
++	case FBIOCONDECOR_SETPIC32:
++	{
++		struct fb_image32 img_compat;
++		struct fb_image img;
++
++		if (copy_from_user(&img_compat, (struct fb_image32 __user *)data, sizeof(struct fb_image32)))
++			return -EFAULT;
++
++		fb_image_from_compat(img, img_compat);
++
++		return fbcon_decor_ioctl_dosetpic(vc, &img, origin);
++	}
++
++	case FBIOCONDECOR_SETCFG32:
++	{
++		struct vc_decor32 cfg_compat;
++		struct vc_decor cfg;
++
++		if (copy_from_user(&cfg_compat, (struct vc_decor32 __user *)data, sizeof(struct vc_decor32)))
++			return -EFAULT;
++
++		vc_decor_from_compat(cfg, cfg_compat);
++
++		return fbcon_decor_ioctl_dosetcfg(vc, &cfg, origin);
++	}
++
++	case FBIOCONDECOR_GETCFG32:
++	{
++		int rval;
++		struct vc_decor32 cfg_compat;
++		struct vc_decor cfg;
++
++		if (copy_from_user(&cfg_compat, (struct vc_decor32 __user *)data, sizeof(struct vc_decor32)))
++			return -EFAULT;
++		cfg.theme = compat_ptr(cfg_compat.theme);
++
++		rval = fbcon_decor_ioctl_dogetcfg(vc, &cfg);
++
++		vc_decor_to_compat(cfg_compat, cfg);
++
++		if (copy_to_user((struct vc_decor32 __user *)data, &cfg_compat, sizeof(struct vc_decor32)))
++			return -EFAULT;
++		return rval;
++	}
++
++	case FBIOCONDECOR_SETSTATE32:
++	{
++		compat_uint_t state_compat = 0;
++		unsigned int state = 0;
++
++		if (get_user(state_compat, (compat_uint_t __user *)data))
++			return -EFAULT;
++
++		state = (unsigned int)state_compat;
++
++		return fbcon_decor_ioctl_dosetstate(vc, state, origin);
++	}
++
++	case FBIOCONDECOR_GETSTATE32:
++	{
++		compat_uint_t state_compat = 0;
++		unsigned int state = 0;
++
++		fbcon_decor_ioctl_dogetstate(vc, &state);
++		state_compat = (compat_uint_t)state;
++
++		return put_user(state_compat, (compat_uint_t __user *)data);
++	}
++
++	default:
++		return -ENOIOCTLCMD;
++	}
++}
++#else
++  #define fbcon_decor_compat_ioctl NULL
++#endif
++
++static struct file_operations fbcon_decor_ops = {
++	.owner = THIS_MODULE,
++	.unlocked_ioctl = fbcon_decor_ioctl,
++	.compat_ioctl = fbcon_decor_compat_ioctl
++};
++
++static struct miscdevice fbcon_decor_dev = {
++	.minor = MISC_DYNAMIC_MINOR,
++	.name = "fbcondecor",
++	.fops = &fbcon_decor_ops
++};
++
++void fbcon_decor_reset(void)
++{
++	int i;
++
++	for (i = 0; i < num_registered_fb; i++) {
++		registered_fb[i]->bgdecor.data = NULL;
++		registered_fb[i]->bgdecor.cmap.red = NULL;
++	}
++
++	for (i = 0; i < MAX_NR_CONSOLES && vc_cons[i].d; i++) {
++		vc_cons[i].d->vc_decor.state = vc_cons[i].d->vc_decor.twidth =
++						vc_cons[i].d->vc_decor.theight = 0;
++		vc_cons[i].d->vc_decor.theme = NULL;
++	}
++
++	return;
++}
++
++int fbcon_decor_init(void)
++{
++	int i;
++
++	fbcon_decor_reset();
++
++	if (initialized)
++		return 0;
++
++	i = misc_register(&fbcon_decor_dev);
++	if (i) {
++		printk(KERN_ERR "fbcondecor: failed to register device\n");
++		return i;
++	}
++
++	fbcon_decor_call_helper("init", 0);
++	initialized = 1;
++	return 0;
++}
++
++int fbcon_decor_exit(void)
++{
++	fbcon_decor_reset();
++	return 0;
++}
++
++EXPORT_SYMBOL(fbcon_decor_path);
+diff --git a/drivers/video/console/fbcondecor.h b/drivers/video/console/fbcondecor.h
+new file mode 100644
+index 0000000..3b3724b
+--- /dev/null
++++ b/drivers/video/console/fbcondecor.h
+@@ -0,0 +1,78 @@
++/* 
++ *  linux/drivers/video/console/fbcondecor.h -- Framebuffer Console Decoration headers
++ *
++ *  Copyright (C) 2004 Michal Januszewski <michalj+fbcondecor@gmail.com>
++ *
++ */
++
++#ifndef __FBCON_DECOR_H
++#define __FBCON_DECOR_H
++
++#ifndef _LINUX_FB_H
++#include <linux/fb.h>
++#endif
++
++/* This is needed for vc_cons in fbcmap.c */
++#include <linux/vt_kern.h>
++
++struct fb_cursor;
++struct fb_info;
++struct vc_data;
++
++#ifdef CONFIG_FB_CON_DECOR
++/* fbcondecor.c */
++int fbcon_decor_init(void);
++int fbcon_decor_exit(void);
++int fbcon_decor_call_helper(char* cmd, unsigned short cons);
++int fbcon_decor_disable(struct vc_data *vc, unsigned char redraw);
++
++/* cfbcondecor.c */
++void fbcon_decor_putcs(struct vc_data *vc, struct fb_info *info, const unsigned short *s, int count, int yy, int xx);
++void fbcon_decor_cursor(struct fb_info *info, struct fb_cursor *cursor);
++void fbcon_decor_clear(struct vc_data *vc, struct fb_info *info, int sy, int sx, int height, int width);
++void fbcon_decor_clear_margins(struct vc_data *vc, struct fb_info *info, int bottom_only);
++void fbcon_decor_blank(struct vc_data *vc, struct fb_info *info, int blank);
++void fbcon_decor_bmove_redraw(struct vc_data *vc, struct fb_info *info, int y, int sx, int dx, int width);
++void fbcon_decor_copy(u8 *dst, u8 *src, int height, int width, int linebytes, int srclinesbytes, int bpp);
++void fbcon_decor_fix_pseudo_pal(struct fb_info *info, struct vc_data *vc);
++
++/* vt.c */
++void acquire_console_sem(void);
++void release_console_sem(void);
++void do_unblank_screen(int entering_gfx);
++
++/* struct vc_data *y */
++#define fbcon_decor_active_vc(y) (y->vc_decor.state && y->vc_decor.theme) 
++
++/* struct fb_info *x, struct vc_data *y */
++#define fbcon_decor_active_nores(x,y) (x->bgdecor.data && fbcon_decor_active_vc(y))
++
++/* struct fb_info *x, struct vc_data *y */
++#define fbcon_decor_active(x,y) (fbcon_decor_active_nores(x,y) &&		\
++			      x->bgdecor.width == x->var.xres && 	\
++			      x->bgdecor.height == x->var.yres &&	\
++			      x->bgdecor.depth == x->var.bits_per_pixel)
++
++
++#else /* CONFIG_FB_CON_DECOR */
++
++static inline void fbcon_decor_putcs(struct vc_data *vc, struct fb_info *info, const unsigned short *s, int count, int yy, int xx) {}
++static inline void fbcon_decor_putc(struct vc_data *vc, struct fb_info *info, int c, int ypos, int xpos) {}
++static inline void fbcon_decor_cursor(struct fb_info *info, struct fb_cursor *cursor) {}
++static inline void fbcon_decor_clear(struct vc_data *vc, struct fb_info *info, int sy, int sx, int height, int width) {}
++static inline void fbcon_decor_clear_margins(struct vc_data *vc, struct fb_info *info, int bottom_only) {}
++static inline void fbcon_decor_blank(struct vc_data *vc, struct fb_info *info, int blank) {}
++static inline void fbcon_decor_bmove_redraw(struct vc_data *vc, struct fb_info *info, int y, int sx, int dx, int width) {}
++static inline void fbcon_decor_fix_pseudo_pal(struct fb_info *info, struct vc_data *vc) {}
++static inline int fbcon_decor_call_helper(char* cmd, unsigned short cons) { return 0; }
++static inline int fbcon_decor_init(void) { return 0; }
++static inline int fbcon_decor_exit(void) { return 0; }
++static inline int fbcon_decor_disable(struct vc_data *vc, unsigned char redraw) { return 0; }
++
++#define fbcon_decor_active_vc(y) (0)
++#define fbcon_decor_active_nores(x,y) (0)
++#define fbcon_decor_active(x,y) (0)
++
++#endif /* CONFIG_FB_CON_DECOR */
++
++#endif /* __FBCON_DECOR_H */
+diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
+index e1f4727..2952e33 100644
+--- a/drivers/video/fbdev/Kconfig
++++ b/drivers/video/fbdev/Kconfig
+@@ -1204,7 +1204,6 @@ config FB_MATROX
+ 	select FB_CFB_FILLRECT
+ 	select FB_CFB_COPYAREA
+ 	select FB_CFB_IMAGEBLIT
+-	select FB_TILEBLITTING
+ 	select FB_MACMODES if PPC_PMAC
+ 	---help---
+ 	  Say Y here if you have a Matrox Millennium, Matrox Millennium II,
+diff --git a/drivers/video/fbdev/core/fbcmap.c b/drivers/video/fbdev/core/fbcmap.c
+index f89245b..05e036c 100644
+--- a/drivers/video/fbdev/core/fbcmap.c
++++ b/drivers/video/fbdev/core/fbcmap.c
+@@ -17,6 +17,8 @@
+ #include <linux/slab.h>
+ #include <linux/uaccess.h>
+ 
++#include "../../console/fbcondecor.h"
++
+ static u16 red2[] __read_mostly = {
+     0x0000, 0xaaaa
+ };
+@@ -249,14 +251,17 @@ int fb_set_cmap(struct fb_cmap *cmap, struct fb_info *info)
+ 			if (transp)
+ 				htransp = *transp++;
+ 			if (info->fbops->fb_setcolreg(start++,
+-						      hred, hgreen, hblue,
++						      hred, hgreen, hblue, 
+ 						      htransp, info))
+ 				break;
+ 		}
+ 	}
+-	if (rc == 0)
++	if (rc == 0) {
+ 		fb_copy_cmap(cmap, &info->cmap);
+-
++		if (fbcon_decor_active(info, vc_cons[fg_console].d) &&
++		    info->fix.visual == FB_VISUAL_DIRECTCOLOR)
++			fbcon_decor_fix_pseudo_pal(info, vc_cons[fg_console].d);
++	}
+ 	return rc;
+ }
+ 
+diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
+index b6d5008..d6703f2 100644
+--- a/drivers/video/fbdev/core/fbmem.c
++++ b/drivers/video/fbdev/core/fbmem.c
+@@ -1250,15 +1250,6 @@ struct fb_fix_screeninfo32 {
+ 	u16			reserved[3];
+ };
+ 
+-struct fb_cmap32 {
+-	u32			start;
+-	u32			len;
+-	compat_caddr_t	red;
+-	compat_caddr_t	green;
+-	compat_caddr_t	blue;
+-	compat_caddr_t	transp;
+-};
+-
+ static int fb_getput_cmap(struct fb_info *info, unsigned int cmd,
+ 			  unsigned long arg)
+ {
+diff --git a/include/linux/console_decor.h b/include/linux/console_decor.h
+new file mode 100644
+index 0000000..04b8d80
+--- /dev/null
++++ b/include/linux/console_decor.h
+@@ -0,0 +1,46 @@
++#ifndef _LINUX_CONSOLE_DECOR_H_
++#define _LINUX_CONSOLE_DECOR_H_ 1
++
++/* A structure used by the framebuffer console decorations (drivers/video/console/fbcondecor.c) */
++struct vc_decor {
++	__u8 bg_color;				/* The color that is to be treated as transparent */
++	__u8 state;				/* Current decor state: 0 = off, 1 = on */
++	__u16 tx, ty;				/* Top left corner coordinates of the text field */
++	__u16 twidth, theight;			/* Width and height of the text field */
++	char* theme;
++};
++
++#ifdef __KERNEL__
++#ifdef CONFIG_COMPAT
++#include <linux/compat.h>
++
++struct vc_decor32 {
++	__u8 bg_color;				/* The color that is to be treated as transparent */
++	__u8 state;				/* Current decor state: 0 = off, 1 = on */
++	__u16 tx, ty;				/* Top left corner coordinates of the text field */
++	__u16 twidth, theight;			/* Width and height of the text field */
++	compat_uptr_t theme;
++};
++
++#define vc_decor_from_compat(to, from) \
++	(to).bg_color = (from).bg_color; \
++	(to).state    = (from).state; \
++	(to).tx       = (from).tx; \
++	(to).ty       = (from).ty; \
++	(to).twidth   = (from).twidth; \
++	(to).theight  = (from).theight; \
++	(to).theme    = compat_ptr((from).theme)
++
++#define vc_decor_to_compat(to, from) \
++	(to).bg_color = (from).bg_color; \
++	(to).state    = (from).state; \
++	(to).tx       = (from).tx; \
++	(to).ty       = (from).ty; \
++	(to).twidth   = (from).twidth; \
++	(to).theight  = (from).theight; \
++	(to).theme    = ptr_to_compat((from).theme)
++
++#endif /* CONFIG_COMPAT */
++#endif /* __KERNEL__ */
++
++#endif
+diff --git a/include/linux/console_struct.h b/include/linux/console_struct.h
+index 7f0c329..98f5d60 100644
+--- a/include/linux/console_struct.h
++++ b/include/linux/console_struct.h
+@@ -19,6 +19,7 @@
+ struct vt_struct;
+ 
+ #define NPAR 16
++#include <linux/console_decor.h>
+ 
+ struct vc_data {
+ 	struct tty_port port;			/* Upper level data */
+@@ -107,6 +108,8 @@ struct vc_data {
+ 	unsigned long	vc_uni_pagedir;
+ 	unsigned long	*vc_uni_pagedir_loc;  /* [!] Location of uni_pagedir variable for this console */
+ 	bool vc_panic_force_write; /* when oops/panic this VC can accept forced output/blanking */
++
++	struct vc_decor vc_decor;
+ 	/* additional information is in vt_kern.h */
+ };
+ 
+diff --git a/include/linux/fb.h b/include/linux/fb.h
+index fe6ac95..1e36b03 100644
+--- a/include/linux/fb.h
++++ b/include/linux/fb.h
+@@ -219,6 +219,34 @@ struct fb_deferred_io {
+ };
+ #endif
+ 
++#ifdef __KERNEL__
++#ifdef CONFIG_COMPAT
++struct fb_image32 {
++	__u32 dx;			/* Where to place image */
++	__u32 dy;
++	__u32 width;			/* Size of image */
++	__u32 height;
++	__u32 fg_color;			/* Only used when a mono bitmap */
++	__u32 bg_color;
++	__u8  depth;			/* Depth of the image */
++	const compat_uptr_t data;	/* Pointer to image data */
++	struct fb_cmap32 cmap;		/* color map info */
++};
++
++#define fb_image_from_compat(to, from) \
++	(to).dx       = (from).dx; \
++	(to).dy       = (from).dy; \
++	(to).width    = (from).width; \
++	(to).height   = (from).height; \
++	(to).fg_color = (from).fg_color; \
++	(to).bg_color = (from).bg_color; \
++	(to).depth    = (from).depth; \
++	(to).data     = compat_ptr((from).data); \
++	fb_cmap_from_compat((to).cmap, (from).cmap)
++
++#endif /* CONFIG_COMPAT */
++#endif /* __KERNEL__ */
++
+ /*
+  * Frame buffer operations
+  *
+@@ -489,6 +517,9 @@ struct fb_info {
+ #define FBINFO_STATE_SUSPENDED	1
+ 	u32 state;			/* Hardware state i.e suspend */
+ 	void *fbcon_par;                /* fbcon use-only private area */
++
++	struct fb_image bgdecor;
++
+ 	/* From here on everything is device dependent */
+ 	void *par;
+ 	/* we need the PCI or similar aperture base/size not
+diff --git a/include/uapi/linux/fb.h b/include/uapi/linux/fb.h
+index fb795c3..dc77a03 100644
+--- a/include/uapi/linux/fb.h
++++ b/include/uapi/linux/fb.h
+@@ -8,6 +8,25 @@
+ 
+ #define FB_MAX			32	/* sufficient for now */
+ 
++struct fbcon_decor_iowrapper
++{
++	unsigned short vc;		/* Virtual console */
++	unsigned char origin;		/* Point of origin of the request */
++	void *data;
++};
++
++#ifdef __KERNEL__
++#ifdef CONFIG_COMPAT
++#include <linux/compat.h>
++struct fbcon_decor_iowrapper32
++{
++	unsigned short vc;		/* Virtual console */
++	unsigned char origin;		/* Point of origin of the request */
++	compat_uptr_t data;
++};
++#endif /* CONFIG_COMPAT */
++#endif /* __KERNEL__ */
++
+ /* ioctls
+    0x46 is 'F'								*/
+ #define FBIOGET_VSCREENINFO	0x4600
+@@ -35,6 +54,25 @@
+ #define FBIOGET_DISPINFO        0x4618
+ #define FBIO_WAITFORVSYNC	_IOW('F', 0x20, __u32)
+ 
++#define FBIOCONDECOR_SETCFG	_IOWR('F', 0x19, struct fbcon_decor_iowrapper)
++#define FBIOCONDECOR_GETCFG	_IOR('F', 0x1A, struct fbcon_decor_iowrapper)
++#define FBIOCONDECOR_SETSTATE	_IOWR('F', 0x1B, struct fbcon_decor_iowrapper)
++#define FBIOCONDECOR_GETSTATE	_IOR('F', 0x1C, struct fbcon_decor_iowrapper)
++#define FBIOCONDECOR_SETPIC 	_IOWR('F', 0x1D, struct fbcon_decor_iowrapper)
++#ifdef __KERNEL__
++#ifdef CONFIG_COMPAT
++#define FBIOCONDECOR_SETCFG32	_IOWR('F', 0x19, struct fbcon_decor_iowrapper32)
++#define FBIOCONDECOR_GETCFG32	_IOR('F', 0x1A, struct fbcon_decor_iowrapper32)
++#define FBIOCONDECOR_SETSTATE32	_IOWR('F', 0x1B, struct fbcon_decor_iowrapper32)
++#define FBIOCONDECOR_GETSTATE32	_IOR('F', 0x1C, struct fbcon_decor_iowrapper32)
++#define FBIOCONDECOR_SETPIC32	_IOWR('F', 0x1D, struct fbcon_decor_iowrapper32)
++#endif /* CONFIG_COMPAT */
++#endif /* __KERNEL__ */
++
++#define FBCON_DECOR_THEME_LEN		128	/* Maximum lenght of a theme name */
++#define FBCON_DECOR_IO_ORIG_KERNEL	0	/* Kernel ioctl origin */
++#define FBCON_DECOR_IO_ORIG_USER	1	/* User ioctl origin */
++ 
+ #define FB_TYPE_PACKED_PIXELS		0	/* Packed Pixels	*/
+ #define FB_TYPE_PLANES			1	/* Non interleaved planes */
+ #define FB_TYPE_INTERLEAVED_PLANES	2	/* Interleaved planes	*/
+@@ -277,6 +315,29 @@ struct fb_var_screeninfo {
+ 	__u32 reserved[4];		/* Reserved for future compatibility */
+ };
+ 
++#ifdef __KERNEL__
++#ifdef CONFIG_COMPAT
++struct fb_cmap32 {
++	__u32 start;
++	__u32 len;			/* Number of entries */
++	compat_uptr_t red;		/* Red values	*/
++	compat_uptr_t green;
++	compat_uptr_t blue;
++	compat_uptr_t transp;		/* transparency, can be NULL */
++};
++
++#define fb_cmap_from_compat(to, from) \
++	(to).start  = (from).start; \
++	(to).len    = (from).len; \
++	(to).red    = compat_ptr((from).red); \
++	(to).green  = compat_ptr((from).green); \
++	(to).blue   = compat_ptr((from).blue); \
++	(to).transp = compat_ptr((from).transp)
++
++#endif /* CONFIG_COMPAT */
++#endif /* __KERNEL__ */
++
++
+ struct fb_cmap {
+ 	__u32 start;			/* First entry	*/
+ 	__u32 len;			/* Number of entries */
+diff --git a/kernel/sysctl.c b/kernel/sysctl.c
+index 74f5b58..6386ab0 100644
+--- a/kernel/sysctl.c
++++ b/kernel/sysctl.c
+@@ -146,6 +146,10 @@ static const int cap_last_cap = CAP_LAST_CAP;
+ static unsigned long hung_task_timeout_max = (LONG_MAX/HZ);
+ #endif
+ 
++#ifdef CONFIG_FB_CON_DECOR
++extern char fbcon_decor_path[];
++#endif
++
+ #ifdef CONFIG_INOTIFY_USER
+ #include <linux/inotify.h>
+ #endif
+@@ -255,6 +259,15 @@ static struct ctl_table sysctl_base_table[] = {
+ 		.mode		= 0555,
+ 		.child		= dev_table,
+ 	},
++#ifdef CONFIG_FB_CON_DECOR
++	{
++		.procname	= "fbcondecor",
++		.data		= &fbcon_decor_path,
++		.maxlen		= KMOD_PATH_LEN,
++		.mode		= 0644,
++		.proc_handler	= &proc_dostring,
++	},
++#endif
+ 	{ }
+ };
+ 

diff --git a/5000_enable-additional-cpu-optimizations-for-gcc.patch b/5000_enable-additional-cpu-optimizations-for-gcc.patch
new file mode 100644
index 0000000..f7ab6f0
--- /dev/null
+++ b/5000_enable-additional-cpu-optimizations-for-gcc.patch
@@ -0,0 +1,327 @@
+This patch has been tested on and known to work with kernel versions from 3.2
+up to the latest git version (pulled on 12/14/2013).
+
+This patch will expand the number of microarchitectures to include new
+processors including: AMD K10-family, AMD Family 10h (Barcelona), AMD Family
+14h (Bobcat), AMD Family 15h (Bulldozer), AMD Family 15h (Piledriver), AMD
+Family 16h (Jaguar), Intel 1st Gen Core i3/i5/i7 (Nehalem), Intel 2nd Gen Core
+i3/i5/i7 (Sandybridge), Intel 3rd Gen Core i3/i5/i7 (Ivybridge), and Intel 4th
+Gen Core i3/i5/i7 (Haswell). It also offers the compiler the 'native' flag.
+
+Small but real speed increases are measurable using a make endpoint comparing
+a generic kernel to one built with one of the respective microarchs.
+
+See the following experimental evidence supporting this statement:
+https://github.com/graysky2/kernel_gcc_patch
+
+REQUIREMENTS
+linux version >=3.15
+gcc version <4.9
+
+---
+diff -uprN a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
+--- a/arch/x86/include/asm/module.h	2013-11-03 18:41:51.000000000 -0500
++++ b/arch/x86/include/asm/module.h	2013-12-15 06:21:24.351122516 -0500
+@@ -15,6 +15,16 @@
+ #define MODULE_PROC_FAMILY "586MMX "
+ #elif defined CONFIG_MCORE2
+ #define MODULE_PROC_FAMILY "CORE2 "
++#elif defined CONFIG_MNATIVE
++#define MODULE_PROC_FAMILY "NATIVE "
++#elif defined CONFIG_MCOREI7
++#define MODULE_PROC_FAMILY "COREI7 "
++#elif defined CONFIG_MCOREI7AVX
++#define MODULE_PROC_FAMILY "COREI7AVX "
++#elif defined CONFIG_MCOREAVXI
++#define MODULE_PROC_FAMILY "COREAVXI "
++#elif defined CONFIG_MCOREAVX2
++#define MODULE_PROC_FAMILY "COREAVX2 "
+ #elif defined CONFIG_MATOM
+ #define MODULE_PROC_FAMILY "ATOM "
+ #elif defined CONFIG_M686
+@@ -33,6 +43,18 @@
+ #define MODULE_PROC_FAMILY "K7 "
+ #elif defined CONFIG_MK8
+ #define MODULE_PROC_FAMILY "K8 "
++#elif defined CONFIG_MK10
++#define MODULE_PROC_FAMILY "K10 "
++#elif defined CONFIG_MBARCELONA
++#define MODULE_PROC_FAMILY "BARCELONA "
++#elif defined CONFIG_MBOBCAT
++#define MODULE_PROC_FAMILY "BOBCAT "
++#elif defined CONFIG_MBULLDOZER
++#define MODULE_PROC_FAMILY "BULLDOZER "
++#elif defined CONFIG_MPILEDRIVER
++#define MODULE_PROC_FAMILY "PILEDRIVER "
++#elif defined CONFIG_MJAGUAR
++#define MODULE_PROC_FAMILY "JAGUAR "
+ #elif defined CONFIG_MELAN
+ #define MODULE_PROC_FAMILY "ELAN "
+ #elif defined CONFIG_MCRUSOE
+diff -uprN a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
+--- a/arch/x86/Kconfig.cpu	2013-11-03 18:41:51.000000000 -0500
++++ b/arch/x86/Kconfig.cpu	2013-12-15 06:21:24.351122516 -0500
+@@ -139,7 +139,7 @@ config MPENTIUM4
+ 
+ 
+ config MK6
+-	bool "K6/K6-II/K6-III"
++	bool "AMD K6/K6-II/K6-III"
+ 	depends on X86_32
+ 	---help---
+ 	  Select this for an AMD K6-family processor.  Enables use of
+@@ -147,7 +147,7 @@ config MK6
+ 	  flags to GCC.
+ 
+ config MK7
+-	bool "Athlon/Duron/K7"
++	bool "AMD Athlon/Duron/K7"
+ 	depends on X86_32
+ 	---help---
+ 	  Select this for an AMD Athlon K7-family processor.  Enables use of
+@@ -155,12 +155,55 @@ config MK7
+ 	  flags to GCC.
+ 
+ config MK8
+-	bool "Opteron/Athlon64/Hammer/K8"
++	bool "AMD Opteron/Athlon64/Hammer/K8"
+ 	---help---
+ 	  Select this for an AMD Opteron or Athlon64 Hammer-family processor.
+ 	  Enables use of some extended instructions, and passes appropriate
+ 	  optimization flags to GCC.
+ 
++config MK10
++	bool "AMD 61xx/7x50/PhenomX3/X4/II/K10"
++	---help---
++	  Select this for an AMD 61xx Eight-Core Magny-Cours, Athlon X2 7x50,
++		Phenom X3/X4/II, Athlon II X2/X3/X4, or Turion II-family processor.
++	  Enables use of some extended instructions, and passes appropriate
++	  optimization flags to GCC.
++
++config MBARCELONA
++	bool "AMD Barcelona"
++	---help---
++	  Select this for AMD Barcelona and newer processors.
++
++	  Enables -march=barcelona
++
++config MBOBCAT
++	bool "AMD Bobcat"
++	---help---
++	  Select this for AMD Bobcat processors.
++
++	  Enables -march=btver1
++
++config MBULLDOZER
++	bool "AMD Bulldozer"
++	---help---
++	  Select this for AMD Bulldozer processors.
++
++	  Enables -march=bdver1
++
++config MPILEDRIVER
++	bool "AMD Piledriver"
++	---help---
++	  Select this for AMD Piledriver processors.
++
++	  Enables -march=bdver2
++
++config MJAGUAR
++	bool "AMD Jaguar"
++	---help---
++	  Select this for AMD Jaguar processors.
++
++	  Enables -march=btver2
++
+ config MCRUSOE
+ 	bool "Crusoe"
+ 	depends on X86_32
+@@ -251,8 +294,17 @@ config MPSC
+ 	  using the cpu family field
+ 	  in /proc/cpuinfo. Family 15 is an older Xeon, Family 6 a newer one.
+ 
++config MATOM
++	bool "Intel Atom"
++	---help---
++
++	  Select this for the Intel Atom platform. Intel Atom CPUs have an
++	  in-order pipelining architecture and thus can benefit from
++	  accordingly optimized code. Use a recent GCC with specific Atom
++	  support in order to fully benefit from selecting this option.
++
+ config MCORE2
+-	bool "Core 2/newer Xeon"
++	bool "Intel Core 2"
+ 	---help---
+ 
+ 	  Select this for Intel Core 2 and newer Core 2 Xeons (Xeon 51xx and
+@@ -260,14 +312,40 @@ config MCORE2
+ 	  family in /proc/cpuinfo. Newer ones have 6 and older ones 15
+ 	  (not a typo)
+ 
+-config MATOM
+-	bool "Intel Atom"
++	  Enables -march=core2
++
++config MCOREI7
++	bool "Intel Core i7"
+ 	---help---
+ 
+-	  Select this for the Intel Atom platform. Intel Atom CPUs have an
+-	  in-order pipelining architecture and thus can benefit from
+-	  accordingly optimized code. Use a recent GCC with specific Atom
+-	  support in order to fully benefit from selecting this option.
++	  Select this for the Intel Nehalem platform. Intel Nehalem proecessors
++	  include Core i3, i5, i7, Xeon: 34xx, 35xx, 55xx, 56xx, 75xx processors.
++
++	  Enables -march=corei7
++
++config MCOREI7AVX
++	bool "Intel Core 2nd Gen AVX"
++	---help---
++
++	  Select this for 2nd Gen Core processors including Sandy Bridge.
++
++	  Enables -march=corei7-avx
++
++config MCOREAVXI
++	bool "Intel Core 3rd Gen AVX"
++	---help---
++
++	  Select this for 3rd Gen Core processors including Ivy Bridge.
++
++	  Enables -march=core-avx-i
++
++config MCOREAVX2
++	bool "Intel Core AVX2"
++	---help---
++
++	  Select this for AVX2 enabled processors including Haswell.
++
++	  Enables -march=core-avx2
+ 
+ config GENERIC_CPU
+ 	bool "Generic-x86-64"
+@@ -276,6 +354,19 @@ config GENERIC_CPU
+ 	  Generic x86-64 CPU.
+ 	  Run equally well on all x86-64 CPUs.
+ 
++config MNATIVE
++ bool "Native optimizations autodetected by GCC"
++ ---help---
++
++   GCC 4.2 and above support -march=native, which automatically detects
++   the optimum settings to use based on your processor. -march=native
++   also detects and applies additional settings beyond -march specific
++   to your CPU, (eg. -msse4). Unless you have a specific reason not to
++   (e.g. distcc cross-compiling), you should probably be using
++   -march=native rather than anything listed below.
++
++   Enables -march=native
++
+ endchoice
+ 
+ config X86_GENERIC
+@@ -300,7 +391,7 @@ config X86_INTERNODE_CACHE_SHIFT
+ config X86_L1_CACHE_SHIFT
+ 	int
+ 	default "7" if MPENTIUM4 || MPSC
+-	default "6" if MK7 || MK8 || MPENTIUMM || MCORE2 || MATOM || MVIAC7 || X86_GENERIC || GENERIC_CPU
++	default "6" if MK7 || MK8 || MK10 || MBARCELONA || MBOBCAT || MBULLDOZER || MPILEDRIVER || MJAGUAR || MPENTIUMM || MCORE2 || MCOREI7 || MCOREI7AVX || MCOREAVXI || MCOREAVX2 || MATOM || MVIAC7 || X86_GENERIC || MNATIVE || GENERIC_CPU
+ 	default "4" if MELAN || M486 || MGEODEGX1
+ 	default "5" if MWINCHIP3D || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2 || MGEODE_LX
+ 
+@@ -331,11 +422,11 @@ config X86_ALIGNMENT_16
+ 
+ config X86_INTEL_USERCOPY
+ 	def_bool y
+-	depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M586MMX || X86_GENERIC || MK8 || MK7 || MEFFICEON || MCORE2
++	depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M586MMX || MNATIVE || X86_GENERIC || MK8 || MK7 || MK10 || MBARCELONA || MEFFICEON || MCORE2 || MCOREI7 || MCOREI7AVX || MCOREAVXI || MCOREAVX2
+ 
+ config X86_USE_PPRO_CHECKSUM
+ 	def_bool y
+-	depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MVIAC7 || MEFFICEON || MGEODE_LX || MCORE2 || MATOM
++	depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MK10 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MVIAC7 || MEFFICEON || MGEODE_LX || MCORE2 || MCOREI7 || MCOREI7AVX || MCOREAVXI || MCOREAVX2 || MATOM || MNATIVE
+ 
+ config X86_USE_3DNOW
+ 	def_bool y
+@@ -363,17 +454,17 @@ config X86_P6_NOP
+ 
+ config X86_TSC
+ 	def_bool y
+-	depends on (MWINCHIP3D || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MVIAC7 || MGEODEGX1 || MGEODE_LX || MCORE2 || MATOM) || X86_64
++	depends on (MWINCHIP3D || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MK10 || MBARCELONA || MBOBCAT || MBULLDOZER || MPILEDRIVER || MJAGUAR || MVIAC3_2 || MVIAC7 || MGEODEGX1 || MGEODE_LX || MCORE2 || MCOREI7 || MCOREI7-AVX || MATOM) || X86_64 || MNATIVE
+ 
+ config X86_CMPXCHG64
+ 	def_bool y
+-	depends on X86_PAE || X86_64 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MATOM
++	depends on X86_PAE || X86_64 || MCORE2 || MCOREI7 || MCOREI7AVX || MCOREAVXI || MCOREAVX2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MATOM || MNATIVE
+ 
+ # this should be set for all -march=.. options where the compiler
+ # generates cmov.
+ config X86_CMOV
+ 	def_bool y
+-	depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
++	depends on (MK8 || MK10 || MBARCELONA || MBOBCAT || MBULLDOZER || MPILEDRIVER || MJAGUAR || MK7 || MCORE2 || MCOREI7 || MCOREI7AVX || MCOREAVXI || MCOREAVX2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MNATIVE || MATOM || MGEODE_LX)
+ 
+ config X86_MINIMUM_CPU_FAMILY
+ 	int
+diff -uprN a/arch/x86/Makefile b/arch/x86/Makefile
+--- a/arch/x86/Makefile	2013-11-03 18:41:51.000000000 -0500
++++ b/arch/x86/Makefile	2013-12-15 06:21:24.354455723 -0500
+@@ -61,11 +61,26 @@ else
+ 	KBUILD_CFLAGS += $(call cc-option,-mno-sse -mpreferred-stack-boundary=3)
+ 
+         # FIXME - should be integrated in Makefile.cpu (Makefile_32.cpu)
++        cflags-$(CONFIG_MNATIVE) += $(call cc-option,-march=native)
+         cflags-$(CONFIG_MK8) += $(call cc-option,-march=k8)
++        cflags-$(CONFIG_MK10) += $(call cc-option,-march=amdfam10)
++        cflags-$(CONFIG_MBARCELONA) += $(call cc-option,-march=barcelona)
++        cflags-$(CONFIG_MBOBCAT) += $(call cc-option,-march=btver1)
++        cflags-$(CONFIG_MBULLDOZER) += $(call cc-option,-march=bdver1)
++        cflags-$(CONFIG_MPILEDRIVER) += $(call cc-option,-march=bdver2)
++        cflags-$(CONFIG_MJAGUAR) += $(call cc-option,-march=btver2)
+         cflags-$(CONFIG_MPSC) += $(call cc-option,-march=nocona)
+ 
+         cflags-$(CONFIG_MCORE2) += \
+-                $(call cc-option,-march=core2,$(call cc-option,-mtune=generic))
++                $(call cc-option,-march=core2,$(call cc-option,-mtune=core2))
++        cflags-$(CONFIG_MCOREI7) += \
++                $(call cc-option,-march=corei7,$(call cc-option,-mtune=corei7))
++        cflags-$(CONFIG_MCOREI7AVX) += \
++                $(call cc-option,-march=corei7-avx,$(call cc-option,-mtune=corei7-avx))
++        cflags-$(CONFIG_MCOREAVXI) += \
++                $(call cc-option,-march=core-avx-i,$(call cc-option,-mtune=core-avx-i))
++        cflags-$(CONFIG_MCOREAVX2) += \
++                $(call cc-option,-march=core-avx2,$(call cc-option,-mtune=core-avx2))
+ 	cflags-$(CONFIG_MATOM) += $(call cc-option,-march=atom) \
+ 		$(call cc-option,-mtune=atom,$(call cc-option,-mtune=generic))
+         cflags-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=generic)
+diff -uprN a/arch/x86/Makefile_32.cpu b/arch/x86/Makefile_32.cpu
+--- a/arch/x86/Makefile_32.cpu	2013-11-03 18:41:51.000000000 -0500
++++ b/arch/x86/Makefile_32.cpu	2013-12-15 06:21:24.354455723 -0500
+@@ -23,7 +23,14 @@ cflags-$(CONFIG_MK6)		+= -march=k6
+ # Please note, that patches that add -march=athlon-xp and friends are pointless.
+ # They make zero difference whatsosever to performance at this time.
+ cflags-$(CONFIG_MK7)		+= -march=athlon
++cflags-$(CONFIG_MNATIVE) += $(call cc-option,-march=native)
+ cflags-$(CONFIG_MK8)		+= $(call cc-option,-march=k8,-march=athlon)
++cflags-$(CONFIG_MK10)	+= $(call cc-option,-march=amdfam10,-march=athlon)
++cflags-$(CONFIG_MBARCELONA)	+= $(call cc-option,-march=barcelona,-march=athlon)
++cflags-$(CONFIG_MBOBCAT)	+= $(call cc-option,-march=btver1,-march=athlon)
++cflags-$(CONFIG_MBULLDOZER)	+= $(call cc-option,-march=bdver1,-march=athlon)
++cflags-$(CONFIG_MPILEDRIVER)	+= $(call cc-option,-march=bdver2,-march=athlon)
++cflags-$(CONFIG_MJAGUAR)	+= $(call cc-option,-march=btver2,-march=athlon)
+ cflags-$(CONFIG_MCRUSOE)	+= -march=i686 $(align)-functions=0 $(align)-jumps=0 $(align)-loops=0
+ cflags-$(CONFIG_MEFFICEON)	+= -march=i686 $(call tune,pentium3) $(align)-functions=0 $(align)-jumps=0 $(align)-loops=0
+ cflags-$(CONFIG_MWINCHIPC6)	+= $(call cc-option,-march=winchip-c6,-march=i586)
+@@ -32,6 +39,10 @@ cflags-$(CONFIG_MCYRIXIII)	+= $(call cc-
+ cflags-$(CONFIG_MVIAC3_2)	+= $(call cc-option,-march=c3-2,-march=i686)
+ cflags-$(CONFIG_MVIAC7)		+= -march=i686
+ cflags-$(CONFIG_MCORE2)		+= -march=i686 $(call tune,core2)
++cflags-$(CONFIG_MCOREI7)	+= -march=i686 $(call tune,corei7)
++cflags-$(CONFIG_MCOREI7AVX)	+= -march=i686 $(call tune,corei7-avx)
++cflags-$(CONFIG_MCOREAVXI)	+= -march=i686 $(call tune,core-avx-i)
++cflags-$(CONFIG_MCOREAVX2)	+= -march=i686 $(call tune,core-avx2)
+ cflags-$(CONFIG_MATOM)		+= $(call cc-option,-march=atom,$(call cc-option,-march=core2,-march=i686)) \
+ 	$(call cc-option,-mtune=atom,$(call cc-option,-mtune=generic))

diff --git a/5010_enable-additional-cpu-optimizations-for-gcc-4.9.patch b/5010_enable-additional-cpu-optimizations-for-gcc-4.9.patch
new file mode 100644
index 0000000..c4efd06
--- /dev/null
+++ b/5010_enable-additional-cpu-optimizations-for-gcc-4.9.patch
@@ -0,0 +1,402 @@
+WARNING - this version of the patch works with version 4.9+ of gcc and with
+kernel version 3.15.x+ and should NOT be applied when compiling on older
+versions due to name changes of the flags with the 4.9 release of gcc.
+Use the older version of this patch hosted on the same github for older
+versions of gcc. For example:
+
+corei7 --> nehalem
+corei7-avx --> sandybridge
+core-avx-i --> ivybridge
+core-avx2 --> haswell
+
+For more, see: https://gcc.gnu.org/gcc-4.9/changes.html
+
+It also changes 'atom' to 'bonnell' in accordance with the gcc v4.9 changes.
+Note that upstream is using the deprecated 'match=atom' flags when I believe it
+should use the newer 'march=bonnell' flag for atom processors.
+
+I have made that change to this patch set as well.  See the following kernel
+bug report to see if I'm right: https://bugzilla.kernel.org/show_bug.cgi?id=77461
+
+This patch will expand the number of microarchitectures to include newer
+processors including: AMD K10-family, AMD Family 10h (Barcelona), AMD Family
+14h (Bobcat), AMD Family 15h (Bulldozer), AMD Family 15h (Piledriver), AMD
+Family 16h (Jaguar), Intel 1st Gen Core i3/i5/i7 (Nehalem), Intel 1.5 Gen Core
+i3/i5/i7 (Westmere), Intel 2nd Gen Core i3/i5/i7 (Sandybridge), Intel 3rd Gen
+Core i3/i5/i7 (Ivybridge), Intel 4th Gen Core i3/i5/i7 (Haswell), Intel 5th
+Gen Core i3/i5/i7 (Broadwell), and the low power Silvermont series of Atom
+processors (Silvermont). It also offers the compiler the 'native' flag.
+
+Small but real speed increases are measurable using a make endpoint comparing
+a generic kernel to one built with one of the respective microarchs.
+
+See the following experimental evidence supporting this statement:
+https://github.com/graysky2/kernel_gcc_patch
+
+REQUIREMENTS
+linux version >=3.15
+gcc version >=4.9
+
+--- a/arch/x86/include/asm/module.h	2014-06-16 16:44:27.000000000 -0400
++++ b/arch/x86/include/asm/module.h	2015-03-07 03:27:32.556672424 -0500
+@@ -15,6 +15,22 @@
+ #define MODULE_PROC_FAMILY "586MMX "
+ #elif defined CONFIG_MCORE2
+ #define MODULE_PROC_FAMILY "CORE2 "
++#elif defined CONFIG_MNATIVE
++#define MODULE_PROC_FAMILY "NATIVE "
++#elif defined CONFIG_MNEHALEM
++#define MODULE_PROC_FAMILY "NEHALEM "
++#elif defined CONFIG_MWESTMERE
++#define MODULE_PROC_FAMILY "WESTMERE "
++#elif defined CONFIG_MSILVERMONT
++#define MODULE_PROC_FAMILY "SILVERMONT "
++#elif defined CONFIG_MSANDYBRIDGE
++#define MODULE_PROC_FAMILY "SANDYBRIDGE "
++#elif defined CONFIG_MIVYBRIDGE
++#define MODULE_PROC_FAMILY "IVYBRIDGE "
++#elif defined CONFIG_MHASWELL
++#define MODULE_PROC_FAMILY "HASWELL "
++#elif defined CONFIG_MBROADWELL
++#define MODULE_PROC_FAMILY "BROADWELL "
+ #elif defined CONFIG_MATOM
+ #define MODULE_PROC_FAMILY "ATOM "
+ #elif defined CONFIG_M686
+@@ -33,6 +49,20 @@
+ #define MODULE_PROC_FAMILY "K7 "
+ #elif defined CONFIG_MK8
+ #define MODULE_PROC_FAMILY "K8 "
++#elif defined CONFIG_MK8SSE3
++#define MODULE_PROC_FAMILY "K8SSE3 "
++#elif defined CONFIG_MK10
++#define MODULE_PROC_FAMILY "K10 "
++#elif defined CONFIG_MBARCELONA
++#define MODULE_PROC_FAMILY "BARCELONA "
++#elif defined CONFIG_MBOBCAT
++#define MODULE_PROC_FAMILY "BOBCAT "
++#elif defined CONFIG_MBULLDOZER
++#define MODULE_PROC_FAMILY "BULLDOZER "
++#elif defined CONFIG_MPILEDRIVER
++#define MODULE_PROC_FAMILY "PILEDRIVER "
++#elif defined CONFIG_MJAGUAR
++#define MODULE_PROC_FAMILY "JAGUAR "
+ #elif defined CONFIG_MELAN
+ #define MODULE_PROC_FAMILY "ELAN "
+ #elif defined CONFIG_MCRUSOE
+--- a/arch/x86/Kconfig.cpu	2014-06-16 16:44:27.000000000 -0400
++++ b/arch/x86/Kconfig.cpu	2015-03-07 03:32:14.337713226 -0500
+@@ -137,9 +137,8 @@ config MPENTIUM4
+ 		-Paxville
+ 		-Dempsey
+ 
+-
+ config MK6
+-	bool "K6/K6-II/K6-III"
++	bool "AMD K6/K6-II/K6-III"
+ 	depends on X86_32
+ 	---help---
+ 	  Select this for an AMD K6-family processor.  Enables use of
+@@ -147,7 +146,7 @@ config MK6
+ 	  flags to GCC.
+ 
+ config MK7
+-	bool "Athlon/Duron/K7"
++	bool "AMD Athlon/Duron/K7"
+ 	depends on X86_32
+ 	---help---
+ 	  Select this for an AMD Athlon K7-family processor.  Enables use of
+@@ -155,12 +154,62 @@ config MK7
+ 	  flags to GCC.
+ 
+ config MK8
+-	bool "Opteron/Athlon64/Hammer/K8"
++	bool "AMD Opteron/Athlon64/Hammer/K8"
+ 	---help---
+ 	  Select this for an AMD Opteron or Athlon64 Hammer-family processor.
+ 	  Enables use of some extended instructions, and passes appropriate
+ 	  optimization flags to GCC.
+ 
++config MK8SSE3
++	bool "AMD Opteron/Athlon64/Hammer/K8 with SSE3"
++	---help---
++	  Select this for improved AMD Opteron or Athlon64 Hammer-family processors.
++	  Enables use of some extended instructions, and passes appropriate
++	  optimization flags to GCC.
++
++config MK10
++	bool "AMD 61xx/7x50/PhenomX3/X4/II/K10"
++	---help---
++	  Select this for an AMD 61xx Eight-Core Magny-Cours, Athlon X2 7x50,
++		Phenom X3/X4/II, Athlon II X2/X3/X4, or Turion II-family processor.
++	  Enables use of some extended instructions, and passes appropriate
++	  optimization flags to GCC.
++
++config MBARCELONA
++	bool "AMD Barcelona"
++	---help---
++	  Select this for AMD Barcelona and newer processors.
++
++	  Enables -march=barcelona
++
++config MBOBCAT
++	bool "AMD Bobcat"
++	---help---
++	  Select this for AMD Bobcat processors.
++
++	  Enables -march=btver1
++
++config MBULLDOZER
++	bool "AMD Bulldozer"
++	---help---
++	  Select this for AMD Bulldozer processors.
++
++	  Enables -march=bdver1
++
++config MPILEDRIVER
++	bool "AMD Piledriver"
++	---help---
++	  Select this for AMD Piledriver processors.
++
++	  Enables -march=bdver2
++
++config MJAGUAR
++	bool "AMD Jaguar"
++	---help---
++	  Select this for AMD Jaguar processors.
++
++	  Enables -march=btver2
++
+ config MCRUSOE
+ 	bool "Crusoe"
+ 	depends on X86_32
+@@ -251,8 +300,17 @@ config MPSC
+ 	  using the cpu family field
+ 	  in /proc/cpuinfo. Family 15 is an older Xeon, Family 6 a newer one.
+ 
++config MATOM
++	bool "Intel Atom"
++	---help---
++
++	  Select this for the Intel Atom platform. Intel Atom CPUs have an
++	  in-order pipelining architecture and thus can benefit from
++	  accordingly optimized code. Use a recent GCC with specific Atom
++	  support in order to fully benefit from selecting this option.
++
+ config MCORE2
+-	bool "Core 2/newer Xeon"
++	bool "Intel Core 2"
+ 	---help---
+ 
+ 	  Select this for Intel Core 2 and newer Core 2 Xeons (Xeon 51xx and
+@@ -260,14 +318,63 @@ config MCORE2
+ 	  family in /proc/cpuinfo. Newer ones have 6 and older ones 15
+ 	  (not a typo)
+ 
+-config MATOM
+-	bool "Intel Atom"
++	  Enables -march=core2
++
++config MNEHALEM
++	bool "Intel Nehalem"
+ 	---help---
+ 
+-	  Select this for the Intel Atom platform. Intel Atom CPUs have an
+-	  in-order pipelining architecture and thus can benefit from
+-	  accordingly optimized code. Use a recent GCC with specific Atom
+-	  support in order to fully benefit from selecting this option.
++	  Select this for 1st Gen Core processors in the Nehalem family.
++
++	  Enables -march=nehalem
++
++config MWESTMERE
++	bool "Intel Westmere"
++	---help---
++
++	  Select this for the Intel Westmere formerly Nehalem-C family.
++
++	  Enables -march=westmere
++
++config MSILVERMONT
++	bool "Intel Silvermont"
++	---help---
++
++	  Select this for the Intel Silvermont platform.
++
++	  Enables -march=silvermont
++
++config MSANDYBRIDGE
++	bool "Intel Sandy Bridge"
++	---help---
++
++	  Select this for 2nd Gen Core processors in the Sandy Bridge family.
++
++	  Enables -march=sandybridge
++
++config MIVYBRIDGE
++	bool "Intel Ivy Bridge"
++	---help---
++
++	  Select this for 3rd Gen Core processors in the Ivy Bridge family.
++
++	  Enables -march=ivybridge
++
++config MHASWELL
++	bool "Intel Haswell"
++	---help---
++
++	  Select this for 4th Gen Core processors in the Haswell family.
++
++	  Enables -march=haswell
++
++config MBROADWELL
++	bool "Intel Broadwell"
++	---help---
++
++	  Select this for 5th Gen Core processors in the Broadwell family.
++
++	  Enables -march=broadwell
+ 
+ config GENERIC_CPU
+ 	bool "Generic-x86-64"
+@@ -276,6 +383,19 @@ config GENERIC_CPU
+ 	  Generic x86-64 CPU.
+ 	  Run equally well on all x86-64 CPUs.
+ 
++config MNATIVE
++ bool "Native optimizations autodetected by GCC"
++ ---help---
++
++   GCC 4.2 and above support -march=native, which automatically detects
++   the optimum settings to use based on your processor. -march=native 
++   also detects and applies additional settings beyond -march specific
++   to your CPU, (eg. -msse4). Unless you have a specific reason not to
++   (e.g. distcc cross-compiling), you should probably be using
++   -march=native rather than anything listed below.
++
++   Enables -march=native
++
+ endchoice
+ 
+ config X86_GENERIC
+@@ -300,7 +420,7 @@ config X86_INTERNODE_CACHE_SHIFT
+ config X86_L1_CACHE_SHIFT
+ 	int
+ 	default "7" if MPENTIUM4 || MPSC
+-	default "6" if MK7 || MK8 || MPENTIUMM || MCORE2 || MATOM || MVIAC7 || X86_GENERIC || GENERIC_CPU
++	default "6" if MK7 || MK8 || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MBULLDOZER || MPILEDRIVER || MJAGUAR || MPENTIUMM || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || BROADWELL || MNATIVE || MATOM || MVIAC7 || X86_GENERIC || GENERIC_CPU
+ 	default "4" if MELAN || M486 || MGEODEGX1
+ 	default "5" if MWINCHIP3D || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2 || MGEODE_LX
+ 
+@@ -331,11 +451,11 @@ config X86_ALIGNMENT_16
+ 
+ config X86_INTEL_USERCOPY
+ 	def_bool y
+-	depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M586MMX || X86_GENERIC || MK8 || MK7 || MEFFICEON || MCORE2
++	depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M586MMX || X86_GENERIC || MK8 || MK8SSE3 || MK7 || MEFFICEON || MCORE2 || MK10 || MBARCELONA || MNEHALEM || MWESTMERE || MSILVERMONT || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MNATIVE
+ 
+ config X86_USE_PPRO_CHECKSUM
+ 	def_bool y
+-	depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MVIAC7 || MEFFICEON || MGEODE_LX || MCORE2 || MATOM
++	depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MK10 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MK8SSE3 || MVIAC3_2 || MVIAC7 || MEFFICEON || MGEODE_LX || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MATOM || MNATIVE
+ 
+ config X86_USE_3DNOW
+ 	def_bool y
+@@ -359,17 +479,17 @@ config X86_P6_NOP
+ 
+ config X86_TSC
+ 	def_bool y
+-	depends on (MWINCHIP3D || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MVIAC7 || MGEODEGX1 || MGEODE_LX || MCORE2 || MATOM) || X86_64
++	depends on (MWINCHIP3D || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MK8SSE3 || MVIAC3_2 || MVIAC7 || MGEODEGX1 || MGEODE_LX || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MNATIVE || MATOM) || X86_64
+ 
+ config X86_CMPXCHG64
+ 	def_bool y
+-	depends on X86_PAE || X86_64 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MATOM
++	depends on X86_PAE || X86_64 || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MATOM || MNATIVE
+ 
+ # this should be set for all -march=.. options where the compiler
+ # generates cmov.
+ config X86_CMOV
+ 	def_bool y
+-	depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
++	depends on (MK8 || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MBULLDOZER || MPILEDRIVER || MJAGUAR || MK7 || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MNATIVE || MATOM || MGEODE_LX)
+ 
+ config X86_MINIMUM_CPU_FAMILY
+ 	int
+--- a/arch/x86/Makefile	2014-06-16 16:44:27.000000000 -0400
++++ b/arch/x86/Makefile	2015-03-07 03:33:27.650843211 -0500
+@@ -92,13 +92,35 @@ else
+ 	KBUILD_CFLAGS += $(call cc-option,-mpreferred-stack-boundary=3)
+ 
+         # FIXME - should be integrated in Makefile.cpu (Makefile_32.cpu)
++        cflags-$(CONFIG_MNATIVE) += $(call cc-option,-march=native)
+         cflags-$(CONFIG_MK8) += $(call cc-option,-march=k8)
++        cflags-$(CONFIG_MK8SSE3) += $(call cc-option,-march=k8-sse3,-mtune=k8)
++        cflags-$(CONFIG_MK10) += $(call cc-option,-march=amdfam10)
++        cflags-$(CONFIG_MBARCELONA) += $(call cc-option,-march=barcelona)
++        cflags-$(CONFIG_MBOBCAT) += $(call cc-option,-march=btver1)
++        cflags-$(CONFIG_MBULLDOZER) += $(call cc-option,-march=bdver1)
++        cflags-$(CONFIG_MPILEDRIVER) += $(call cc-option,-march=bdver2)
++        cflags-$(CONFIG_MJAGUAR) += $(call cc-option,-march=btver2)
+         cflags-$(CONFIG_MPSC) += $(call cc-option,-march=nocona)
+ 
+         cflags-$(CONFIG_MCORE2) += \
+-                $(call cc-option,-march=core2,$(call cc-option,-mtune=generic))
+-	cflags-$(CONFIG_MATOM) += $(call cc-option,-march=atom) \
+-		$(call cc-option,-mtune=atom,$(call cc-option,-mtune=generic))
++                $(call cc-option,-march=core2,$(call cc-option,-mtune=core2))
++        cflags-$(CONFIG_MNEHALEM) += \
++                $(call cc-option,-march=nehalem,$(call cc-option,-mtune=nehalem))
++        cflags-$(CONFIG_MWESTMERE) += \
++                $(call cc-option,-march=westmere,$(call cc-option,-mtune=westmere))
++        cflags-$(CONFIG_MSILVERMONT) += \
++                $(call cc-option,-march=silvermont,$(call cc-option,-mtune=silvermont))
++        cflags-$(CONFIG_MSANDYBRIDGE) += \
++                $(call cc-option,-march=sandybridge,$(call cc-option,-mtune=sandybridge))
++        cflags-$(CONFIG_MIVYBRIDGE) += \
++                $(call cc-option,-march=ivybridge,$(call cc-option,-mtune=ivybridge))
++        cflags-$(CONFIG_MHASWELL) += \
++                $(call cc-option,-march=haswell,$(call cc-option,-mtune=haswell))
++        cflags-$(CONFIG_MBROADWELL) += \
++                $(call cc-option,-march=broadwell,$(call cc-option,-mtune=broadwell))
++        cflags-$(CONFIG_MATOM) += $(call cc-option,-march=bonnell) \
++                $(call cc-option,-mtune=bonnell,$(call cc-option,-mtune=generic))
+         cflags-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=generic)
+         KBUILD_CFLAGS += $(cflags-y)
+ 
+--- a/arch/x86/Makefile_32.cpu	2014-06-16 16:44:27.000000000 -0400
++++ b/arch/x86/Makefile_32.cpu	2015-03-07 03:34:15.203586024 -0500
+@@ -23,7 +23,15 @@ cflags-$(CONFIG_MK6)		+= -march=k6
+ # Please note, that patches that add -march=athlon-xp and friends are pointless.
+ # They make zero difference whatsosever to performance at this time.
+ cflags-$(CONFIG_MK7)		+= -march=athlon
++cflags-$(CONFIG_MNATIVE) += $(call cc-option,-march=native)
+ cflags-$(CONFIG_MK8)		+= $(call cc-option,-march=k8,-march=athlon)
++cflags-$(CONFIG_MK8SSE3)		+= $(call cc-option,-march=k8-sse3,-march=athlon)
++cflags-$(CONFIG_MK10)	+= $(call cc-option,-march=amdfam10,-march=athlon)
++cflags-$(CONFIG_MBARCELONA)	+= $(call cc-option,-march=barcelona,-march=athlon)
++cflags-$(CONFIG_MBOBCAT)	+= $(call cc-option,-march=btver1,-march=athlon)
++cflags-$(CONFIG_MBULLDOZER)	+= $(call cc-option,-march=bdver1,-march=athlon)
++cflags-$(CONFIG_MPILEDRIVER)	+= $(call cc-option,-march=bdver2,-march=athlon)
++cflags-$(CONFIG_MJAGUAR)	+= $(call cc-option,-march=btver2,-march=athlon)
+ cflags-$(CONFIG_MCRUSOE)	+= -march=i686 $(align)-functions=0 $(align)-jumps=0 $(align)-loops=0
+ cflags-$(CONFIG_MEFFICEON)	+= -march=i686 $(call tune,pentium3) $(align)-functions=0 $(align)-jumps=0 $(align)-loops=0
+ cflags-$(CONFIG_MWINCHIPC6)	+= $(call cc-option,-march=winchip-c6,-march=i586)
+@@ -32,8 +40,15 @@ cflags-$(CONFIG_MCYRIXIII)	+= $(call cc-
+ cflags-$(CONFIG_MVIAC3_2)	+= $(call cc-option,-march=c3-2,-march=i686)
+ cflags-$(CONFIG_MVIAC7)		+= -march=i686
+ cflags-$(CONFIG_MCORE2)		+= -march=i686 $(call tune,core2)
+-cflags-$(CONFIG_MATOM)		+= $(call cc-option,-march=atom,$(call cc-option,-march=core2,-march=i686)) \
+-	$(call cc-option,-mtune=atom,$(call cc-option,-mtune=generic))
++cflags-$(CONFIG_MNEHALEM)	+= -march=i686 $(call tune,nehalem)
++cflags-$(CONFIG_MWESTMERE)	+= -march=i686 $(call tune,westmere)
++cflags-$(CONFIG_MSILVERMONT)	+= -march=i686 $(call tune,silvermont)
++cflags-$(CONFIG_MSANDYBRIDGE)	+= -march=i686 $(call tune,sandybridge)
++cflags-$(CONFIG_MIVYBRIDGE)	+= -march=i686 $(call tune,ivybridge)
++cflags-$(CONFIG_MHASWELL)	+= -march=i686 $(call tune,haswell)
++cflags-$(CONFIG_MBROADWELL)	+= -march=i686 $(call tune,broadwell)
++cflags-$(CONFIG_MATOM)		+= $(call cc-option,-march=bonnell,$(call cc-option,-march=core2,-march=i686)) \
++	$(call cc-option,-mtune=bonnell,$(call cc-option,-mtune=generic))
+ 
+ # AMD Elan support
+ cflags-$(CONFIG_MELAN)		+= -march=i486
+


^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [gentoo-commits] proj/linux-patches:4.5 commit in: /
@ 2016-03-22 22:46 Mike Pagano
  0 siblings, 0 replies; 12+ messages in thread
From: Mike Pagano @ 2016-03-22 22:46 UTC (permalink / raw
  To: gentoo-commits

commit:     04c6d4296a591520052d98fabf73b9f4e98ad20e
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Tue Mar 22 22:46:47 2016 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Tue Mar 22 22:46:47 2016 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=04c6d429

Update for gcc 4.9 CPU optimization patch. See bug #572108

 5010_enable-additional-cpu-optimizations-for-gcc-4.9.patch | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/5010_enable-additional-cpu-optimizations-for-gcc-4.9.patch b/5010_enable-additional-cpu-optimizations-for-gcc-4.9.patch
index c4efd06..418201d 100644
--- a/5010_enable-additional-cpu-optimizations-for-gcc-4.9.patch
+++ b/5010_enable-additional-cpu-optimizations-for-gcc-4.9.patch
@@ -283,7 +283,7 @@ gcc version >=4.9
  	int
  	default "7" if MPENTIUM4 || MPSC
 -	default "6" if MK7 || MK8 || MPENTIUMM || MCORE2 || MATOM || MVIAC7 || X86_GENERIC || GENERIC_CPU
-+	default "6" if MK7 || MK8 || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MBULLDOZER || MPILEDRIVER || MJAGUAR || MPENTIUMM || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || BROADWELL || MNATIVE || MATOM || MVIAC7 || X86_GENERIC || GENERIC_CPU
++	default "6" if MK7 || MK8 || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MBULLDOZER || MPILEDRIVER || MJAGUAR || MPENTIUMM || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MNATIVE || MATOM || MVIAC7 || X86_GENERIC || GENERIC_CPU
  	default "4" if MELAN || M486 || MGEODEGX1
  	default "5" if MWINCHIP3D || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2 || MGEODE_LX
  


^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [gentoo-commits] proj/linux-patches:4.5 commit in: /
@ 2016-03-27 19:35 Anthony G. Basile
  0 siblings, 0 replies; 12+ messages in thread
From: Anthony G. Basile @ 2016-03-27 19:35 UTC (permalink / raw
  To: gentoo-commits

commit:     86d8d1264cf8a1f897ca565d2b5b08c375f67f01
Author:     Anthony G. Basile <blueness <AT> gentoo <DOT> org>
AuthorDate: Sun Mar 27 19:42:09 2016 +0000
Commit:     Anthony G. Basile <blueness <AT> gentoo <DOT> org>
CommitDate: Sun Mar 27 19:42:09 2016 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=86d8d126

Forward port 1500_XATTR_USER_PREFIX.patch to 4.5 kernels

 0000_README                  |  4 ++++
 1500_XATTR_USER_PREFIX.patch | 54 ++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 58 insertions(+)

diff --git a/0000_README b/0000_README
index 1eb82e8..8e70e78 100644
--- a/0000_README
+++ b/0000_README
@@ -43,6 +43,10 @@ EXPERIMENTAL
 Individual Patch Descriptions:
 --------------------------------------------------------------------------
 
+Patch:  1500_XATTR_USER_PREFIX.patch
+From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
+Desc:   Support for namespace user.pax.* on tmpfs.
+
 Patch:  1510_fs-enable-link-security-restrictions-by-default.patch
 From:   http://sources.debian.net/src/linux/3.16.7-ckt4-3/debian/patches/debian/fs-enable-link-security-restrictions-by-default.patch/
 Desc:   Enable link security restrictions by default.

diff --git a/1500_XATTR_USER_PREFIX.patch b/1500_XATTR_USER_PREFIX.patch
new file mode 100644
index 0000000..cc15cd5
--- /dev/null
+++ b/1500_XATTR_USER_PREFIX.patch
@@ -0,0 +1,54 @@
+From: Anthony G. Basile <blueness@gentoo.org>
+
+This patch adds support for a restricted user-controlled namespace on
+tmpfs filesystem used to house PaX flags.  The namespace must be of the
+form user.pax.* and its value cannot exceed a size of 8 bytes.
+
+This is needed even on all Gentoo systems so that XATTR_PAX flags
+are preserved for users who might build packages using portage on
+a tmpfs system with a non-hardened kernel and then switch to a
+hardened kernel with XATTR_PAX enabled.
+
+The namespace is added to any user with Extended Attribute support
+enabled for tmpfs.  Users who do not enable xattrs will not have
+the XATTR_PAX flags preserved.
+
+diff --git a/include/uapi/linux/xattr.h b/include/uapi/linux/xattr.h
+index e4629b9..6958086 100644
+--- a/include/uapi/linux/xattr.h
++++ b/include/uapi/linux/xattr.h
+@@ -63,5 +63,9 @@
+ #define XATTR_POSIX_ACL_DEFAULT  "posix_acl_default"
+ #define XATTR_NAME_POSIX_ACL_DEFAULT XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_DEFAULT
+ 
++/* User namespace */
++#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
++#define XATTR_PAX_FLAGS_SUFFIX "flags"
++#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
+ 
+ #endif /* _UAPI_LINUX_XATTR_H */
+diff --git a/mm/shmem.c b/mm/shmem.c
+index 1c44af7..f23bb1b 100644
+--- a/mm/shmem.c
++++ b/mm/shmem.c
+@@ -2201,6 +2201,7 @@ static const struct xattr_handler *shmem_xattr_handlers[] = {
+ static int shmem_xattr_validate(const char *name)
+ {
+ 	struct { const char *prefix; size_t len; } arr[] = {
++		{ XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN},
+ 		{ XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN },
+ 		{ XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN }
+ 	};
+@@ -2256,6 +2257,12 @@ static int shmem_setxattr(struct dentry *dentry, const char *name,
+ 	if (err)
+ 		return err;
+ 
++	if (!strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN)) {
++		if (strcmp(name, XATTR_NAME_PAX_FLAGS))
++			return -EOPNOTSUPP;
++		if (size > 8)
++			return -EINVAL;
++	}
+ 	return simple_xattr_set(&info->xattrs, name, value, size, flags);
+ }
+ 


^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [gentoo-commits] proj/linux-patches:4.5 commit in: /
@ 2016-03-29  0:45 Anthony G. Basile
  0 siblings, 0 replies; 12+ messages in thread
From: Anthony G. Basile @ 2016-03-29  0:45 UTC (permalink / raw
  To: gentoo-commits

commit:     dfb0a0a1ae8d29fac2ec39450ba0895328e6ffb6
Author:     Anthony G. Basile <blueness <AT> gentoo <DOT> org>
AuthorDate: Tue Mar 29 00:52:35 2016 +0000
Commit:     Anthony G. Basile <blueness <AT> gentoo <DOT> org>
CommitDate: Tue Mar 29 00:52:35 2016 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=dfb0a0a1

Update 1500_XATTR_USER_PREFIX.patch for the new struct syntax in 4.5 kernels

 1500_XATTR_USER_PREFIX.patch | 43 +++++++++++++++++++++++++++++--------------
 1 file changed, 29 insertions(+), 14 deletions(-)

diff --git a/1500_XATTR_USER_PREFIX.patch b/1500_XATTR_USER_PREFIX.patch
index cc15cd5..bacd032 100644
--- a/1500_XATTR_USER_PREFIX.patch
+++ b/1500_XATTR_USER_PREFIX.patch
@@ -14,10 +14,10 @@ enabled for tmpfs.  Users who do not enable xattrs will not have
 the XATTR_PAX flags preserved.
 
 diff --git a/include/uapi/linux/xattr.h b/include/uapi/linux/xattr.h
-index e4629b9..6958086 100644
+index 1590c49..5eab462 100644
 --- a/include/uapi/linux/xattr.h
 +++ b/include/uapi/linux/xattr.h
-@@ -63,5 +63,9 @@
+@@ -73,5 +73,9 @@
  #define XATTR_POSIX_ACL_DEFAULT  "posix_acl_default"
  #define XATTR_NAME_POSIX_ACL_DEFAULT XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_DEFAULT
  
@@ -28,27 +28,42 @@ index e4629b9..6958086 100644
  
  #endif /* _UAPI_LINUX_XATTR_H */
 diff --git a/mm/shmem.c b/mm/shmem.c
-index 1c44af7..f23bb1b 100644
+index 440e2a7..c377172 100644
 --- a/mm/shmem.c
 +++ b/mm/shmem.c
-@@ -2201,6 +2201,7 @@ static const struct xattr_handler *shmem_xattr_handlers[] = {
- static int shmem_xattr_validate(const char *name)
- {
- 	struct { const char *prefix; size_t len; } arr[] = {
-+		{ XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN},
- 		{ XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN },
- 		{ XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN }
- 	};
-@@ -2256,6 +2257,12 @@ static int shmem_setxattr(struct dentry *dentry, const char *name,
- 	if (err)
- 		return err;
+@@ -2667,6 +2667,14 @@ static int shmem_xattr_handler_set(const struct xattr_handler *handler,
+ 	struct shmem_inode_info *info = SHMEM_I(d_inode(dentry));
  
+ 	name = xattr_full_name(handler, name);
++
 +	if (!strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN)) {
 +		if (strcmp(name, XATTR_NAME_PAX_FLAGS))
 +			return -EOPNOTSUPP;
 +		if (size > 8)
 +			return -EINVAL;
 +	}
++
  	return simple_xattr_set(&info->xattrs, name, value, size, flags);
  }
  
+@@ -2682,6 +2690,12 @@ static const struct xattr_handler shmem_trusted_xattr_handler = {
+ 	.set = shmem_xattr_handler_set,
+ };
+ 
++static const struct xattr_handler shmem_user_xattr_handler = {
++	.prefix = XATTR_USER_PREFIX,
++	.get = shmem_xattr_handler_get,
++	.set = shmem_xattr_handler_set,
++};
++
+ static const struct xattr_handler *shmem_xattr_handlers[] = {
+ #ifdef CONFIG_TMPFS_POSIX_ACL
+ 	&posix_acl_access_xattr_handler,
+@@ -2689,6 +2703,7 @@ static const struct xattr_handler *shmem_xattr_handlers[] = {
+ #endif
+ 	&shmem_security_xattr_handler,
+ 	&shmem_trusted_xattr_handler,
++	&shmem_user_xattr_handler,
+ 	NULL
+ };
+ 


^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [gentoo-commits] proj/linux-patches:4.5 commit in: /
@ 2016-04-12 18:53 Mike Pagano
  0 siblings, 0 replies; 12+ messages in thread
From: Mike Pagano @ 2016-04-12 18:53 UTC (permalink / raw
  To: gentoo-commits

commit:     2d1f73dbbb04d3586c686226598e8acadd11021f
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Tue Apr 12 18:52:57 2016 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Tue Apr 12 18:52:57 2016 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=2d1f73db

Linux patch 4.5.1

 0000_README            |    4 +
 1000_linux-4.5.1.patch | 9372 ++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 9376 insertions(+)

diff --git a/0000_README b/0000_README
index 8e70e78..40d6c2b 100644
--- a/0000_README
+++ b/0000_README
@@ -43,6 +43,10 @@ EXPERIMENTAL
 Individual Patch Descriptions:
 --------------------------------------------------------------------------
 
+Patch:  1000_linux-4.5.1.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.5.1
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1000_linux-4.5.1.patch b/1000_linux-4.5.1.patch
new file mode 100644
index 0000000..4b27f5d
--- /dev/null
+++ b/1000_linux-4.5.1.patch
@@ -0,0 +1,9372 @@
+diff --git a/Documentation/cgroup-v2.txt b/Documentation/cgroup-v2.txt
+index ff49cf901148..81eb378210c6 100644
+--- a/Documentation/cgroup-v2.txt
++++ b/Documentation/cgroup-v2.txt
+@@ -1368,6 +1368,12 @@ system than killing the group.  Otherwise, memory.max is there to
+ limit this type of spillover and ultimately contain buggy or even
+ malicious applications.
+ 
++Setting the original memory.limit_in_bytes below the current usage was
++subject to a race condition, where concurrent charges could cause the
++limit setting to fail. memory.max on the other hand will first set the
++limit to prevent new charges, and then reclaim and OOM kill until the
++new limit is met - or the task writing to memory.max is killed.
++
+ The combined memory+swap accounting and limiting is replaced by real
+ control over swap space.
+ 
+diff --git a/MAINTAINERS b/MAINTAINERS
+index 6ee06ea47be4..5a389bc68e0e 100644
+--- a/MAINTAINERS
++++ b/MAINTAINERS
+@@ -228,13 +228,13 @@ F:	kernel/sys_ni.c
+ 
+ ABIT UGURU 1,2 HARDWARE MONITOR DRIVER
+ M:	Hans de Goede <hdegoede@redhat.com>
+-L:	lm-sensors@lm-sensors.org
++L:	linux-hwmon@vger.kernel.org
+ S:	Maintained
+ F:	drivers/hwmon/abituguru.c
+ 
+ ABIT UGURU 3 HARDWARE MONITOR DRIVER
+ M:	Alistair John Strachan <alistair@devzero.co.uk>
+-L:	lm-sensors@lm-sensors.org
++L:	linux-hwmon@vger.kernel.org
+ S:	Maintained
+ F:	drivers/hwmon/abituguru3.c
+ 
+@@ -386,14 +386,14 @@ F:	Documentation/devicetree/bindings/net/ieee802154/adf7242.txt
+ 
+ ADM1025 HARDWARE MONITOR DRIVER
+ M:	Jean Delvare <jdelvare@suse.com>
+-L:	lm-sensors@lm-sensors.org
++L:	linux-hwmon@vger.kernel.org
+ S:	Maintained
+ F:	Documentation/hwmon/adm1025
+ F:	drivers/hwmon/adm1025.c
+ 
+ ADM1029 HARDWARE MONITOR DRIVER
+ M:	Corentin Labbe <clabbe.montjoie@gmail.com>
+-L:	lm-sensors@lm-sensors.org
++L:	linux-hwmon@vger.kernel.org
+ S:	Maintained
+ F:	drivers/hwmon/adm1029.c
+ 
+@@ -438,7 +438,7 @@ F:	drivers/video/backlight/adp8860_bl.c
+ 
+ ADS1015 HARDWARE MONITOR DRIVER
+ M:	Dirk Eibach <eibach@gdsys.de>
+-L:	lm-sensors@lm-sensors.org
++L:	linux-hwmon@vger.kernel.org
+ S:	Maintained
+ F:	Documentation/hwmon/ads1015
+ F:	drivers/hwmon/ads1015.c
+@@ -451,7 +451,7 @@ F:	drivers/macintosh/therm_adt746x.c
+ 
+ ADT7475 HARDWARE MONITOR DRIVER
+ M:	Jean Delvare <jdelvare@suse.com>
+-L:	lm-sensors@lm-sensors.org
++L:	linux-hwmon@vger.kernel.org
+ S:	Maintained
+ F:	Documentation/hwmon/adt7475
+ F:	drivers/hwmon/adt7475.c
+@@ -628,7 +628,7 @@ F:	include/linux/ccp.h
+ 
+ AMD FAM15H PROCESSOR POWER MONITORING DRIVER
+ M:	Huang Rui <ray.huang@amd.com>
+-L:	lm-sensors@lm-sensors.org
++L:	linux-hwmon@vger.kernel.org
+ S:	Supported
+ F:	Documentation/hwmon/fam15h_power
+ F:	drivers/hwmon/fam15h_power.c
+@@ -786,7 +786,7 @@ F:	drivers/input/mouse/bcm5974.c
+ 
+ APPLE SMC DRIVER
+ M:	Henrik Rydberg <rydberg@bitmath.org>
+-L:	lm-sensors@lm-sensors.org
++L:	linux-hwmon@vger.kernel.org
+ S:	Odd fixes
+ F:	drivers/hwmon/applesmc.c
+ 
+@@ -1825,7 +1825,7 @@ F:	include/media/i2c/as3645a.h
+ 
+ ASC7621 HARDWARE MONITOR DRIVER
+ M:	George Joseph <george.joseph@fairview5.com>
+-L:	lm-sensors@lm-sensors.org
++L:	linux-hwmon@vger.kernel.org
+ S:	Maintained
+ F:	Documentation/hwmon/asc7621
+ F:	drivers/hwmon/asc7621.c
+@@ -1918,7 +1918,7 @@ F:	drivers/net/wireless/ath/carl9170/
+ 
+ ATK0110 HWMON DRIVER
+ M:	Luca Tettamanti <kronos.it@gmail.com>
+-L:	lm-sensors@lm-sensors.org
++L:	linux-hwmon@vger.kernel.org
+ S:	Maintained
+ F:	drivers/hwmon/asus_atk0110.c
+ 
+@@ -3037,7 +3037,7 @@ F:	mm/swap_cgroup.c
+ 
+ CORETEMP HARDWARE MONITORING DRIVER
+ M:	Fenghua Yu <fenghua.yu@intel.com>
+-L:	lm-sensors@lm-sensors.org
++L:	linux-hwmon@vger.kernel.org
+ S:	Maintained
+ F:	Documentation/hwmon/coretemp
+ F:	drivers/hwmon/coretemp.c
+@@ -3625,7 +3625,7 @@ T:	git git://git.infradead.org/users/vkoul/slave-dma.git
+ 
+ DME1737 HARDWARE MONITOR DRIVER
+ M:	Juerg Haefliger <juergh@gmail.com>
+-L:	lm-sensors@lm-sensors.org
++L:	linux-hwmon@vger.kernel.org
+ S:	Maintained
+ F:	Documentation/hwmon/dme1737
+ F:	drivers/hwmon/dme1737.c
+@@ -4322,7 +4322,7 @@ F:	include/video/exynos_mipi*
+ 
+ F71805F HARDWARE MONITORING DRIVER
+ M:	Jean Delvare <jdelvare@suse.com>
+-L:	lm-sensors@lm-sensors.org
++L:	linux-hwmon@vger.kernel.org
+ S:	Maintained
+ F:	Documentation/hwmon/f71805f
+ F:	drivers/hwmon/f71805f.c
+@@ -4401,7 +4401,7 @@ F:	fs/*
+ 
+ FINTEK F75375S HARDWARE MONITOR AND FAN CONTROLLER DRIVER
+ M:	Riku Voipio <riku.voipio@iki.fi>
+-L:	lm-sensors@lm-sensors.org
++L:	linux-hwmon@vger.kernel.org
+ S:	Maintained
+ F:	drivers/hwmon/f75375s.c
+ F:	include/linux/f75375s.h
+@@ -4958,8 +4958,8 @@ F:	drivers/media/usb/hackrf/
+ HARDWARE MONITORING
+ M:	Jean Delvare <jdelvare@suse.com>
+ M:	Guenter Roeck <linux@roeck-us.net>
+-L:	lm-sensors@lm-sensors.org
+-W:	http://www.lm-sensors.org/
++L:	linux-hwmon@vger.kernel.org
++W:	http://hwmon.wiki.kernel.org/
+ T:	quilt http://jdelvare.nerim.net/devel/linux/jdelvare-hwmon/
+ T:	git git://git.kernel.org/pub/scm/linux/kernel/git/groeck/linux-staging.git
+ S:	Maintained
+@@ -5484,7 +5484,7 @@ F:	drivers/usb/atm/ueagle-atm.c
+ 
+ INA209 HARDWARE MONITOR DRIVER
+ M:	Guenter Roeck <linux@roeck-us.net>
+-L:	lm-sensors@lm-sensors.org
++L:	linux-hwmon@vger.kernel.org
+ S:	Maintained
+ F:	Documentation/hwmon/ina209
+ F:	Documentation/devicetree/bindings/i2c/ina209.txt
+@@ -5492,7 +5492,7 @@ F:	drivers/hwmon/ina209.c
+ 
+ INA2XX HARDWARE MONITOR DRIVER
+ M:	Guenter Roeck <linux@roeck-us.net>
+-L:	lm-sensors@lm-sensors.org
++L:	linux-hwmon@vger.kernel.org
+ S:	Maintained
+ F:	Documentation/hwmon/ina2xx
+ F:	drivers/hwmon/ina2xx.c
+@@ -5985,7 +5985,7 @@ F:	drivers/isdn/hardware/eicon/
+ 
+ IT87 HARDWARE MONITORING DRIVER
+ M:	Jean Delvare <jdelvare@suse.com>
+-L:	lm-sensors@lm-sensors.org
++L:	linux-hwmon@vger.kernel.org
+ S:	Maintained
+ F:	Documentation/hwmon/it87
+ F:	drivers/hwmon/it87.c
+@@ -6021,7 +6021,7 @@ F:	drivers/media/dvb-frontends/ix2505v*
+ 
+ JC42.4 TEMPERATURE SENSOR DRIVER
+ M:	Guenter Roeck <linux@roeck-us.net>
+-L:	lm-sensors@lm-sensors.org
++L:	linux-hwmon@vger.kernel.org
+ S:	Maintained
+ F:	drivers/hwmon/jc42.c
+ F:	Documentation/hwmon/jc42
+@@ -6071,14 +6071,14 @@ F:	drivers/tty/serial/jsm/
+ 
+ K10TEMP HARDWARE MONITORING DRIVER
+ M:	Clemens Ladisch <clemens@ladisch.de>
+-L:	lm-sensors@lm-sensors.org
++L:	linux-hwmon@vger.kernel.org
+ S:	Maintained
+ F:	Documentation/hwmon/k10temp
+ F:	drivers/hwmon/k10temp.c
+ 
+ K8TEMP HARDWARE MONITORING DRIVER
+ M:	Rudolf Marek <r.marek@assembler.cz>
+-L:	lm-sensors@lm-sensors.org
++L:	linux-hwmon@vger.kernel.org
+ S:	Maintained
+ F:	Documentation/hwmon/k8temp
+ F:	drivers/hwmon/k8temp.c
+@@ -6605,27 +6605,27 @@ F:	net/llc/
+ 
+ LM73 HARDWARE MONITOR DRIVER
+ M:	Guillaume Ligneul <guillaume.ligneul@gmail.com>
+-L:	lm-sensors@lm-sensors.org
++L:	linux-hwmon@vger.kernel.org
+ S:	Maintained
+ F:	drivers/hwmon/lm73.c
+ 
+ LM78 HARDWARE MONITOR DRIVER
+ M:	Jean Delvare <jdelvare@suse.com>
+-L:	lm-sensors@lm-sensors.org
++L:	linux-hwmon@vger.kernel.org
+ S:	Maintained
+ F:	Documentation/hwmon/lm78
+ F:	drivers/hwmon/lm78.c
+ 
+ LM83 HARDWARE MONITOR DRIVER
+ M:	Jean Delvare <jdelvare@suse.com>
+-L:	lm-sensors@lm-sensors.org
++L:	linux-hwmon@vger.kernel.org
+ S:	Maintained
+ F:	Documentation/hwmon/lm83
+ F:	drivers/hwmon/lm83.c
+ 
+ LM90 HARDWARE MONITOR DRIVER
+ M:	Jean Delvare <jdelvare@suse.com>
+-L:	lm-sensors@lm-sensors.org
++L:	linux-hwmon@vger.kernel.org
+ S:	Maintained
+ F:	Documentation/hwmon/lm90
+ F:	Documentation/devicetree/bindings/hwmon/lm90.txt
+@@ -6633,7 +6633,7 @@ F:	drivers/hwmon/lm90.c
+ 
+ LM95234 HARDWARE MONITOR DRIVER
+ M:	Guenter Roeck <linux@roeck-us.net>
+-L:	lm-sensors@lm-sensors.org
++L:	linux-hwmon@vger.kernel.org
+ S:	Maintained
+ F:	Documentation/hwmon/lm95234
+ F:	drivers/hwmon/lm95234.c
+@@ -6700,7 +6700,7 @@ F:	drivers/scsi/sym53c8xx_2/
+ 
+ LTC4261 HARDWARE MONITOR DRIVER
+ M:	Guenter Roeck <linux@roeck-us.net>
+-L:	lm-sensors@lm-sensors.org
++L:	linux-hwmon@vger.kernel.org
+ S:	Maintained
+ F:	Documentation/hwmon/ltc4261
+ F:	drivers/hwmon/ltc4261.c
+@@ -6870,28 +6870,28 @@ F:	include/uapi/linux/matroxfb.h
+ 
+ MAX16065 HARDWARE MONITOR DRIVER
+ M:	Guenter Roeck <linux@roeck-us.net>
+-L:	lm-sensors@lm-sensors.org
++L:	linux-hwmon@vger.kernel.org
+ S:	Maintained
+ F:	Documentation/hwmon/max16065
+ F:	drivers/hwmon/max16065.c
+ 
+ MAX20751 HARDWARE MONITOR DRIVER
+ M:	Guenter Roeck <linux@roeck-us.net>
+-L:	lm-sensors@lm-sensors.org
++L:	linux-hwmon@vger.kernel.org
+ S:	Maintained
+ F:	Documentation/hwmon/max20751
+ F:	drivers/hwmon/max20751.c
+ 
+ MAX6650 HARDWARE MONITOR AND FAN CONTROLLER DRIVER
+ M:	"Hans J. Koch" <hjk@hansjkoch.de>
+-L:	lm-sensors@lm-sensors.org
++L:	linux-hwmon@vger.kernel.org
+ S:	Maintained
+ F:	Documentation/hwmon/max6650
+ F:	drivers/hwmon/max6650.c
+ 
+ MAX6697 HARDWARE MONITOR DRIVER
+ M:	Guenter Roeck <linux@roeck-us.net>
+-L:	lm-sensors@lm-sensors.org
++L:	linux-hwmon@vger.kernel.org
+ S:	Maintained
+ F:	Documentation/hwmon/max6697
+ F:	Documentation/devicetree/bindings/i2c/max6697.txt
+@@ -7455,7 +7455,7 @@ F:	drivers/scsi/NCR_D700.*
+ 
+ NCT6775 HARDWARE MONITOR DRIVER
+ M:	Guenter Roeck <linux@roeck-us.net>
+-L:	lm-sensors@lm-sensors.org
++L:	linux-hwmon@vger.kernel.org
+ S:	Maintained
+ F:	Documentation/hwmon/nct6775
+ F:	drivers/hwmon/nct6775.c
+@@ -8235,7 +8235,7 @@ F:	drivers/video/logo/logo_parisc*
+ 
+ PC87360 HARDWARE MONITORING DRIVER
+ M:	Jim Cromie <jim.cromie@gmail.com>
+-L:	lm-sensors@lm-sensors.org
++L:	linux-hwmon@vger.kernel.org
+ S:	Maintained
+ F:	Documentation/hwmon/pc87360
+ F:	drivers/hwmon/pc87360.c
+@@ -8247,7 +8247,7 @@ F:	drivers/char/pc8736x_gpio.c
+ 
+ PC87427 HARDWARE MONITORING DRIVER
+ M:	Jean Delvare <jdelvare@suse.com>
+-L:	lm-sensors@lm-sensors.org
++L:	linux-hwmon@vger.kernel.org
+ S:	Maintained
+ F:	Documentation/hwmon/pc87427
+ F:	drivers/hwmon/pc87427.c
+@@ -8601,8 +8601,8 @@ F:	drivers/rtc/rtc-puv3.c
+ 
+ PMBUS HARDWARE MONITORING DRIVERS
+ M:	Guenter Roeck <linux@roeck-us.net>
+-L:	lm-sensors@lm-sensors.org
+-W:	http://www.lm-sensors.org/
++L:	linux-hwmon@vger.kernel.org
++W:	http://hwmon.wiki.kernel.org/
+ W:	http://www.roeck-us.net/linux/drivers/
+ T:	git git://git.kernel.org/pub/scm/linux/kernel/git/groeck/linux-staging.git
+ S:	Maintained
+@@ -8807,7 +8807,7 @@ F:	drivers/media/usb/pwc/*
+ 
+ PWM FAN DRIVER
+ M:	Kamil Debski <k.debski@samsung.com>
+-L:	lm-sensors@lm-sensors.org
++L:	linux-hwmon@vger.kernel.org
+ S:	Supported
+ F:	Documentation/devicetree/bindings/hwmon/pwm-fan.txt
+ F:	Documentation/hwmon/pwm-fan
+@@ -10113,28 +10113,28 @@ F:	Documentation/devicetree/bindings/media/i2c/nokia,smia.txt
+ 
+ SMM665 HARDWARE MONITOR DRIVER
+ M:	Guenter Roeck <linux@roeck-us.net>
+-L:	lm-sensors@lm-sensors.org
++L:	linux-hwmon@vger.kernel.org
+ S:	Maintained
+ F:	Documentation/hwmon/smm665
+ F:	drivers/hwmon/smm665.c
+ 
+ SMSC EMC2103 HARDWARE MONITOR DRIVER
+ M:	Steve Glendinning <steve.glendinning@shawell.net>
+-L:	lm-sensors@lm-sensors.org
++L:	linux-hwmon@vger.kernel.org
+ S:	Maintained
+ F:	Documentation/hwmon/emc2103
+ F:	drivers/hwmon/emc2103.c
+ 
+ SMSC SCH5627 HARDWARE MONITOR DRIVER
+ M:	Hans de Goede <hdegoede@redhat.com>
+-L:	lm-sensors@lm-sensors.org
++L:	linux-hwmon@vger.kernel.org
+ S:	Supported
+ F:	Documentation/hwmon/sch5627
+ F:	drivers/hwmon/sch5627.c
+ 
+ SMSC47B397 HARDWARE MONITOR DRIVER
+ M:	Jean Delvare <jdelvare@suse.com>
+-L:	lm-sensors@lm-sensors.org
++L:	linux-hwmon@vger.kernel.org
+ S:	Maintained
+ F:	Documentation/hwmon/smsc47b397
+ F:	drivers/hwmon/smsc47b397.c
+@@ -11067,7 +11067,7 @@ F:	include/linux/mmc/sh_mobile_sdhi.h
+ 
+ TMP401 HARDWARE MONITOR DRIVER
+ M:	Guenter Roeck <linux@roeck-us.net>
+-L:	lm-sensors@lm-sensors.org
++L:	linux-hwmon@vger.kernel.org
+ S:	Maintained
+ F:	Documentation/hwmon/tmp401
+ F:	drivers/hwmon/tmp401.c
+@@ -11812,14 +11812,14 @@ F:	Documentation/networking/vrf.txt
+ 
+ VT1211 HARDWARE MONITOR DRIVER
+ M:	Juerg Haefliger <juergh@gmail.com>
+-L:	lm-sensors@lm-sensors.org
++L:	linux-hwmon@vger.kernel.org
+ S:	Maintained
+ F:	Documentation/hwmon/vt1211
+ F:	drivers/hwmon/vt1211.c
+ 
+ VT8231 HARDWARE MONITOR DRIVER
+ M:	Roger Lucas <vt8231@hiddenengine.co.uk>
+-L:	lm-sensors@lm-sensors.org
++L:	linux-hwmon@vger.kernel.org
+ S:	Maintained
+ F:	drivers/hwmon/vt8231.c
+ 
+@@ -11838,21 +11838,21 @@ F:	drivers/w1/
+ 
+ W83791D HARDWARE MONITORING DRIVER
+ M:	Marc Hulsman <m.hulsman@tudelft.nl>
+-L:	lm-sensors@lm-sensors.org
++L:	linux-hwmon@vger.kernel.org
+ S:	Maintained
+ F:	Documentation/hwmon/w83791d
+ F:	drivers/hwmon/w83791d.c
+ 
+ W83793 HARDWARE MONITORING DRIVER
+ M:	Rudolf Marek <r.marek@assembler.cz>
+-L:	lm-sensors@lm-sensors.org
++L:	linux-hwmon@vger.kernel.org
+ S:	Maintained
+ F:	Documentation/hwmon/w83793
+ F:	drivers/hwmon/w83793.c
+ 
+ W83795 HARDWARE MONITORING DRIVER
+ M:	Jean Delvare <jdelvare@suse.com>
+-L:	lm-sensors@lm-sensors.org
++L:	linux-hwmon@vger.kernel.org
+ S:	Maintained
+ F:	drivers/hwmon/w83795.c
+ 
+diff --git a/Makefile b/Makefile
+index 7b3ecdcdc6c1..c621889b8827 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 5
+-SUBLEVEL = 0
++SUBLEVEL = 1
+ EXTRAVERSION =
+ NAME = Blurry Fish Butt
+ 
+diff --git a/arch/arc/boot/dts/axs10x_mb.dtsi b/arch/arc/boot/dts/axs10x_mb.dtsi
+index 44a578c10732..ab5d5701e11d 100644
+--- a/arch/arc/boot/dts/axs10x_mb.dtsi
++++ b/arch/arc/boot/dts/axs10x_mb.dtsi
+@@ -47,6 +47,14 @@
+ 			clocks = <&apbclk>;
+ 			clock-names = "stmmaceth";
+ 			max-speed = <100>;
++			mdio0 {
++				#address-cells = <1>;
++				#size-cells = <0>;
++				compatible = "snps,dwmac-mdio";
++				phy1: ethernet-phy@1 {
++					reg = <1>;
++				};
++			};
+ 		};
+ 
+ 		ehci@0x40000 {
+diff --git a/arch/arc/include/asm/bitops.h b/arch/arc/include/asm/bitops.h
+index 57c1f33844d4..0352fb8d21b9 100644
+--- a/arch/arc/include/asm/bitops.h
++++ b/arch/arc/include/asm/bitops.h
+@@ -35,21 +35,6 @@ static inline void op##_bit(unsigned long nr, volatile unsigned long *m)\
+ 									\
+ 	m += nr >> 5;							\
+ 									\
+-	/*								\
+-	 * ARC ISA micro-optimization:					\
+-	 *								\
+-	 * Instructions dealing with bitpos only consider lower 5 bits	\
+-	 * e.g (x << 33) is handled like (x << 1) by ASL instruction	\
+-	 *  (mem pointer still needs adjustment to point to next word)	\
+-	 *								\
+-	 * Hence the masking to clamp @nr arg can be elided in general.	\
+-	 *								\
+-	 * However if @nr is a constant (above assumed in a register),	\
+-	 * and greater than 31, gcc can optimize away (x << 33) to 0,	\
+-	 * as overflow, given the 32-bit ISA. Thus masking needs to be	\
+-	 * done for const @nr, but no code is generated due to gcc	\
+-	 * const prop.							\
+-	 */								\
+ 	nr &= 0x1f;							\
+ 									\
+ 	__asm__ __volatile__(						\
+diff --git a/arch/arc/include/asm/io.h b/arch/arc/include/asm/io.h
+index 694ece8a0243..27b17adea50d 100644
+--- a/arch/arc/include/asm/io.h
++++ b/arch/arc/include/asm/io.h
+@@ -129,15 +129,23 @@ static inline void __raw_writel(u32 w, volatile void __iomem *addr)
+ #define writel(v,c)		({ __iowmb(); writel_relaxed(v,c); })
+ 
+ /*
+- * Relaxed API for drivers which can handle any ordering themselves
++ * Relaxed API for drivers which can handle barrier ordering themselves
++ *
++ * Also these are defined to perform little endian accesses.
++ * To provide the typical device register semantics of fixed endian,
++ * swap the byte order for Big Endian
++ *
++ * http://lkml.kernel.org/r/201603100845.30602.arnd@arndb.de
+  */
+ #define readb_relaxed(c)	__raw_readb(c)
+-#define readw_relaxed(c)	__raw_readw(c)
+-#define readl_relaxed(c)	__raw_readl(c)
++#define readw_relaxed(c) ({ u16 __r = le16_to_cpu((__force __le16) \
++					__raw_readw(c)); __r; })
++#define readl_relaxed(c) ({ u32 __r = le32_to_cpu((__force __le32) \
++					__raw_readl(c)); __r; })
+ 
+ #define writeb_relaxed(v,c)	__raw_writeb(v,c)
+-#define writew_relaxed(v,c)	__raw_writew(v,c)
+-#define writel_relaxed(v,c)	__raw_writel(v,c)
++#define writew_relaxed(v,c)	__raw_writew((__force u16) cpu_to_le16(v),c)
++#define writel_relaxed(v,c)	__raw_writel((__force u32) cpu_to_le32(v),c)
+ 
+ #include <asm-generic/io.h>
+ 
+diff --git a/arch/arm/boot/dts/at91-sama5d3_xplained.dts b/arch/arm/boot/dts/at91-sama5d3_xplained.dts
+index ff888d21c786..f3e2b96c06a3 100644
+--- a/arch/arm/boot/dts/at91-sama5d3_xplained.dts
++++ b/arch/arm/boot/dts/at91-sama5d3_xplained.dts
+@@ -303,6 +303,7 @@
+ 		regulator-name = "mmc0-card-supply";
+ 		regulator-min-microvolt = <3300000>;
+ 		regulator-max-microvolt = <3300000>;
++		regulator-always-on;
+ 	};
+ 
+ 	gpio_keys {
+diff --git a/arch/arm/boot/dts/at91-sama5d4_xplained.dts b/arch/arm/boot/dts/at91-sama5d4_xplained.dts
+index 569026e8f96c..da84e65b56ef 100644
+--- a/arch/arm/boot/dts/at91-sama5d4_xplained.dts
++++ b/arch/arm/boot/dts/at91-sama5d4_xplained.dts
+@@ -268,5 +268,6 @@
+ 		regulator-min-microvolt = <3300000>;
+ 		regulator-max-microvolt = <3300000>;
+ 		vin-supply = <&vcc_3v3_reg>;
++		regulator-always-on;
+ 	};
+ };
+diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
+index 819aff5d593f..7273210782d5 100644
+--- a/arch/arm64/include/asm/pgtable.h
++++ b/arch/arm64/include/asm/pgtable.h
+@@ -279,7 +279,7 @@ extern void __sync_icache_dcache(pte_t pteval, unsigned long addr);
+ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
+ 			      pte_t *ptep, pte_t pte)
+ {
+-	if (pte_valid(pte)) {
++	if (pte_present(pte)) {
+ 		if (pte_sw_dirty(pte) && pte_write(pte))
+ 			pte_val(pte) &= ~PTE_RDONLY;
+ 		else
+@@ -649,6 +649,7 @@ extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
+  *	bits 0-1:	present (must be zero)
+  *	bits 2-7:	swap type
+  *	bits 8-57:	swap offset
++ *	bit  58:	PTE_PROT_NONE (must be zero)
+  */
+ #define __SWP_TYPE_SHIFT	2
+ #define __SWP_TYPE_BITS		6
+diff --git a/arch/ia64/include/asm/io.h b/arch/ia64/include/asm/io.h
+index a865d2a04f75..5de673ac9cb1 100644
+--- a/arch/ia64/include/asm/io.h
++++ b/arch/ia64/include/asm/io.h
+@@ -433,6 +433,7 @@ static inline void __iomem * ioremap_cache (unsigned long phys_addr, unsigned lo
+ 	return ioremap(phys_addr, size);
+ }
+ #define ioremap_cache ioremap_cache
++#define ioremap_uc ioremap_nocache
+ 
+ 
+ /*
+diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h
+index c873e682b67f..2b2ced9dc00a 100644
+--- a/arch/s390/include/asm/pci.h
++++ b/arch/s390/include/asm/pci.h
+@@ -45,7 +45,7 @@ struct zpci_fmb {
+ 	u64 rpcit_ops;
+ 	u64 dma_rbytes;
+ 	u64 dma_wbytes;
+-} __packed __aligned(16);
++} __packed __aligned(64);
+ 
+ enum zpci_state {
+ 	ZPCI_FN_STATE_RESERVED,
+diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
+index cd5a191381b9..c920b81be5bb 100644
+--- a/arch/s390/kernel/entry.S
++++ b/arch/s390/kernel/entry.S
+@@ -1199,114 +1199,12 @@ cleanup_critical:
+ 	.quad	.Lpsw_idle_lpsw
+ 
+ .Lcleanup_save_fpu_regs:
+-	TSTMSK	__LC_CPU_FLAGS,_CIF_FPU
+-	bor	%r14
+-	clg	%r9,BASED(.Lcleanup_save_fpu_regs_done)
+-	jhe	5f
+-	clg	%r9,BASED(.Lcleanup_save_fpu_regs_fp)
+-	jhe	4f
+-	clg	%r9,BASED(.Lcleanup_save_fpu_regs_vx_high)
+-	jhe	3f
+-	clg	%r9,BASED(.Lcleanup_save_fpu_regs_vx_low)
+-	jhe	2f
+-	clg	%r9,BASED(.Lcleanup_save_fpu_fpc_end)
+-	jhe	1f
+-	lg	%r2,__LC_CURRENT
+-	aghi	%r2,__TASK_thread
+-0:	# Store floating-point controls
+-	stfpc	__THREAD_FPU_fpc(%r2)
+-1:	# Load register save area and check if VX is active
+-	lg	%r3,__THREAD_FPU_regs(%r2)
+-	TSTMSK	__LC_MACHINE_FLAGS,MACHINE_FLAG_VX
+-	jz	4f			  # no VX -> store FP regs
+-2:	# Store vector registers (V0-V15)
+-	VSTM	%v0,%v15,0,%r3		  # vstm 0,15,0(3)
+-3:	# Store vector registers (V16-V31)
+-	VSTM	%v16,%v31,256,%r3	  # vstm 16,31,256(3)
+-	j	5f			  # -> done, set CIF_FPU flag
+-4:	# Store floating-point registers
+-	std	0,0(%r3)
+-	std	1,8(%r3)
+-	std	2,16(%r3)
+-	std	3,24(%r3)
+-	std	4,32(%r3)
+-	std	5,40(%r3)
+-	std	6,48(%r3)
+-	std	7,56(%r3)
+-	std	8,64(%r3)
+-	std	9,72(%r3)
+-	std	10,80(%r3)
+-	std	11,88(%r3)
+-	std	12,96(%r3)
+-	std	13,104(%r3)
+-	std	14,112(%r3)
+-	std	15,120(%r3)
+-5:	# Set CIF_FPU flag
+-	oi	__LC_CPU_FLAGS+7,_CIF_FPU
+-	lg	%r9,48(%r11)		# return from save_fpu_regs
++	larl	%r9,save_fpu_regs
+ 	br	%r14
+-.Lcleanup_save_fpu_fpc_end:
+-	.quad	.Lsave_fpu_regs_fpc_end
+-.Lcleanup_save_fpu_regs_vx_low:
+-	.quad	.Lsave_fpu_regs_vx_low
+-.Lcleanup_save_fpu_regs_vx_high:
+-	.quad	.Lsave_fpu_regs_vx_high
+-.Lcleanup_save_fpu_regs_fp:
+-	.quad	.Lsave_fpu_regs_fp
+-.Lcleanup_save_fpu_regs_done:
+-	.quad	.Lsave_fpu_regs_done
+ 
+ .Lcleanup_load_fpu_regs:
+-	TSTMSK	__LC_CPU_FLAGS,_CIF_FPU
+-	bnor	%r14
+-	clg	%r9,BASED(.Lcleanup_load_fpu_regs_done)
+-	jhe	1f
+-	clg	%r9,BASED(.Lcleanup_load_fpu_regs_fp)
+-	jhe	2f
+-	clg	%r9,BASED(.Lcleanup_load_fpu_regs_vx_high)
+-	jhe	3f
+-	clg	%r9,BASED(.Lcleanup_load_fpu_regs_vx)
+-	jhe	4f
+-	lg	%r4,__LC_CURRENT
+-	aghi	%r4,__TASK_thread
+-	lfpc	__THREAD_FPU_fpc(%r4)
+-	TSTMSK	__LC_MACHINE_FLAGS,MACHINE_FLAG_VX
+-	lg	%r4,__THREAD_FPU_regs(%r4)	# %r4 <- reg save area
+-	jz	2f				# -> no VX, load FP regs
+-4:	# Load V0 ..V15 registers
+-	VLM	%v0,%v15,0,%r4
+-3:	# Load V16..V31 registers
+-	VLM	%v16,%v31,256,%r4
+-	j	1f
+-2:	# Load floating-point registers
+-	ld	0,0(%r4)
+-	ld	1,8(%r4)
+-	ld	2,16(%r4)
+-	ld	3,24(%r4)
+-	ld	4,32(%r4)
+-	ld	5,40(%r4)
+-	ld	6,48(%r4)
+-	ld	7,56(%r4)
+-	ld	8,64(%r4)
+-	ld	9,72(%r4)
+-	ld	10,80(%r4)
+-	ld	11,88(%r4)
+-	ld	12,96(%r4)
+-	ld	13,104(%r4)
+-	ld	14,112(%r4)
+-	ld	15,120(%r4)
+-1:	# Clear CIF_FPU bit
+-	ni	__LC_CPU_FLAGS+7,255-_CIF_FPU
+-	lg	%r9,48(%r11)		# return from load_fpu_regs
++	larl	%r9,load_fpu_regs
+ 	br	%r14
+-.Lcleanup_load_fpu_regs_vx:
+-	.quad	.Lload_fpu_regs_vx
+-.Lcleanup_load_fpu_regs_vx_high:
+-	.quad	.Lload_fpu_regs_vx_high
+-.Lcleanup_load_fpu_regs_fp:
+-	.quad	.Lload_fpu_regs_fp
+-.Lcleanup_load_fpu_regs_done:
+-	.quad	.Lload_fpu_regs_done
+ 
+ /*
+  * Integer constants
+diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
+index 9220db5c996a..93fc63ef6e95 100644
+--- a/arch/s390/kernel/setup.c
++++ b/arch/s390/kernel/setup.c
+@@ -327,6 +327,7 @@ static void __init setup_lowcore(void)
+ 		+ PAGE_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
+ 	lc->current_task = (unsigned long) init_thread_union.thread_info.task;
+ 	lc->thread_info = (unsigned long) &init_thread_union;
++	lc->lpp = LPP_MAGIC;
+ 	lc->machine_flags = S390_lowcore.machine_flags;
+ 	lc->stfl_fac_list = S390_lowcore.stfl_fac_list;
+ 	memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
+diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
+index 8f19c8f9d660..8f75edc998ff 100644
+--- a/arch/s390/pci/pci.c
++++ b/arch/s390/pci/pci.c
+@@ -864,8 +864,11 @@ static inline int barsize(u8 size)
+ 
+ static int zpci_mem_init(void)
+ {
++	BUILD_BUG_ON(!is_power_of_2(__alignof__(struct zpci_fmb)) ||
++		     __alignof__(struct zpci_fmb) < sizeof(struct zpci_fmb));
++
+ 	zdev_fmb_cache = kmem_cache_create("PCI_FMB_cache", sizeof(struct zpci_fmb),
+-				16, 0, NULL);
++					   __alignof__(struct zpci_fmb), 0, NULL);
+ 	if (!zdev_fmb_cache)
+ 		goto error_fmb;
+ 
+diff --git a/arch/sh/mm/kmap.c b/arch/sh/mm/kmap.c
+index ec29e14ec5a8..bf25d7c79a2d 100644
+--- a/arch/sh/mm/kmap.c
++++ b/arch/sh/mm/kmap.c
+@@ -36,6 +36,7 @@ void *kmap_coherent(struct page *page, unsigned long addr)
+ 
+ 	BUG_ON(!test_bit(PG_dcache_clean, &page->flags));
+ 
++	preempt_disable();
+ 	pagefault_disable();
+ 
+ 	idx = FIX_CMAP_END -
+@@ -64,4 +65,5 @@ void kunmap_coherent(void *kvaddr)
+ 	}
+ 
+ 	pagefault_enable();
++	preempt_enable();
+ }
+diff --git a/arch/um/drivers/mconsole_kern.c b/arch/um/drivers/mconsole_kern.c
+index b821b13d343a..8a6b57108ac2 100644
+--- a/arch/um/drivers/mconsole_kern.c
++++ b/arch/um/drivers/mconsole_kern.c
+@@ -133,7 +133,7 @@ void mconsole_proc(struct mc_request *req)
+ 	ptr += strlen("proc");
+ 	ptr = skip_spaces(ptr);
+ 
+-	file = file_open_root(mnt->mnt_root, mnt, ptr, O_RDONLY);
++	file = file_open_root(mnt->mnt_root, mnt, ptr, O_RDONLY, 0);
+ 	if (IS_ERR(file)) {
+ 		mconsole_reply(req, "Failed to open file", 1, 0);
+ 		printk(KERN_ERR "open /proc/%s: %ld\n", ptr, PTR_ERR(file));
+diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
+index c46662f64c39..3bf45a0cd69e 100644
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -1160,22 +1160,23 @@ config MICROCODE
+ 	bool "CPU microcode loading support"
+ 	default y
+ 	depends on CPU_SUP_AMD || CPU_SUP_INTEL
+-	depends on BLK_DEV_INITRD
+ 	select FW_LOADER
+ 	---help---
+-
+ 	  If you say Y here, you will be able to update the microcode on
+-	  certain Intel and AMD processors. The Intel support is for the
+-	  IA32 family, e.g. Pentium Pro, Pentium II, Pentium III, Pentium 4,
+-	  Xeon etc. The AMD support is for families 0x10 and later. You will
+-	  obviously need the actual microcode binary data itself which is not
+-	  shipped with the Linux kernel.
+-
+-	  This option selects the general module only, you need to select
+-	  at least one vendor specific module as well.
+-
+-	  To compile this driver as a module, choose M here: the module
+-	  will be called microcode.
++	  Intel and AMD processors. The Intel support is for the IA32 family,
++	  e.g. Pentium Pro, Pentium II, Pentium III, Pentium 4, Xeon etc. The
++	  AMD support is for families 0x10 and later. You will obviously need
++	  the actual microcode binary data itself which is not shipped with
++	  the Linux kernel.
++
++	  The preferred method to load microcode from a detached initrd is described
++	  in Documentation/x86/early-microcode.txt. For that you need to enable
++	  CONFIG_BLK_DEV_INITRD in order for the loader to be able to scan the
++	  initrd for microcode blobs.
++
++	  In addition, you can build-in the microcode into the kernel. For that you
++	  need to enable FIRMWARE_IN_KERNEL and add the vendor-supplied microcode
++	  to the CONFIG_EXTRA_FIRMWARE config option.
+ 
+ config MICROCODE_INTEL
+ 	bool "Intel microcode loading support"
+diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c
+index 03663740c866..1a4477cedc49 100644
+--- a/arch/x86/entry/common.c
++++ b/arch/x86/entry/common.c
+@@ -268,6 +268,7 @@ static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags)
+ /* Called with IRQs disabled. */
+ __visible inline void prepare_exit_to_usermode(struct pt_regs *regs)
+ {
++	struct thread_info *ti = pt_regs_to_thread_info(regs);
+ 	u32 cached_flags;
+ 
+ 	if (IS_ENABLED(CONFIG_PROVE_LOCKING) && WARN_ON(!irqs_disabled()))
+@@ -275,12 +276,22 @@ __visible inline void prepare_exit_to_usermode(struct pt_regs *regs)
+ 
+ 	lockdep_sys_exit();
+ 
+-	cached_flags =
+-		READ_ONCE(pt_regs_to_thread_info(regs)->flags);
++	cached_flags = READ_ONCE(ti->flags);
+ 
+ 	if (unlikely(cached_flags & EXIT_TO_USERMODE_LOOP_FLAGS))
+ 		exit_to_usermode_loop(regs, cached_flags);
+ 
++#ifdef CONFIG_COMPAT
++	/*
++	 * Compat syscalls set TS_COMPAT.  Make sure we clear it before
++	 * returning to user mode.  We need to clear it *after* signal
++	 * handling, because syscall restart has a fixup for compat
++	 * syscalls.  The fixup is exercised by the ptrace_syscall_32
++	 * selftest.
++	 */
++	ti->status &= ~TS_COMPAT;
++#endif
++
+ 	user_enter();
+ }
+ 
+@@ -332,14 +343,6 @@ __visible inline void syscall_return_slowpath(struct pt_regs *regs)
+ 	if (unlikely(cached_flags & SYSCALL_EXIT_WORK_FLAGS))
+ 		syscall_slow_exit_work(regs, cached_flags);
+ 
+-#ifdef CONFIG_COMPAT
+-	/*
+-	 * Compat syscalls set TS_COMPAT.  Make sure we clear it before
+-	 * returning to user mode.
+-	 */
+-	ti->status &= ~TS_COMPAT;
+-#endif
+-
+ 	local_irq_disable();
+ 	prepare_exit_to_usermode(regs);
+ }
+diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
+index c80f6b6f3da2..e8c4fba52d3d 100644
+--- a/arch/x86/include/asm/apic.h
++++ b/arch/x86/include/asm/apic.h
+@@ -644,8 +644,8 @@ static inline void entering_irq(void)
+ 
+ static inline void entering_ack_irq(void)
+ {
+-	ack_APIC_irq();
+ 	entering_irq();
++	ack_APIC_irq();
+ }
+ 
+ static inline void ipi_entering_ack_irq(void)
+diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
+index 1815b736269d..84b3d194a958 100644
+--- a/arch/x86/include/asm/hw_irq.h
++++ b/arch/x86/include/asm/hw_irq.h
+@@ -141,6 +141,7 @@ struct irq_alloc_info {
+ struct irq_cfg {
+ 	unsigned int		dest_apicid;
+ 	u8			vector;
++	u8			old_vector;
+ };
+ 
+ extern struct irq_cfg *irq_cfg(unsigned int irq);
+diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h
+index 1e1b07a5a738..9d3a96c4da78 100644
+--- a/arch/x86/include/asm/microcode.h
++++ b/arch/x86/include/asm/microcode.h
+@@ -3,6 +3,7 @@
+ 
+ #include <asm/cpu.h>
+ #include <linux/earlycpio.h>
++#include <linux/initrd.h>
+ 
+ #define native_rdmsr(msr, val1, val2)			\
+ do {							\
+@@ -143,4 +144,29 @@ static inline void reload_early_microcode(void)			{ }
+ static inline bool
+ get_builtin_firmware(struct cpio_data *cd, const char *name)	{ return false; }
+ #endif
++
++static inline unsigned long get_initrd_start(void)
++{
++#ifdef CONFIG_BLK_DEV_INITRD
++	return initrd_start;
++#else
++	return 0;
++#endif
++}
++
++static inline unsigned long get_initrd_start_addr(void)
++{
++#ifdef CONFIG_BLK_DEV_INITRD
++#ifdef CONFIG_X86_32
++	unsigned long *initrd_start_p = (unsigned long *)__pa_nodebug(&initrd_start);
++
++	return (unsigned long)__pa_nodebug(*initrd_start_p);
++#else
++	return get_initrd_start();
++#endif
++#else /* CONFIG_BLK_DEV_INITRD */
++	return 0;
++#endif
++}
++
+ #endif /* _ASM_X86_MICROCODE_H */
+diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
+index 7bcb861a04e5..5a2ed3ed2f26 100644
+--- a/arch/x86/include/asm/perf_event.h
++++ b/arch/x86/include/asm/perf_event.h
+@@ -165,6 +165,7 @@ struct x86_pmu_capability {
+ #define GLOBAL_STATUS_ASIF				BIT_ULL(60)
+ #define GLOBAL_STATUS_COUNTERS_FROZEN			BIT_ULL(59)
+ #define GLOBAL_STATUS_LBRS_FROZEN			BIT_ULL(58)
++#define GLOBAL_STATUS_TRACE_TOPAPMI			BIT_ULL(55)
+ 
+ /*
+  * IBS cpuid feature detection
+diff --git a/arch/x86/include/asm/xen/hypervisor.h b/arch/x86/include/asm/xen/hypervisor.h
+index 8b2d4bea9962..39171b3646bb 100644
+--- a/arch/x86/include/asm/xen/hypervisor.h
++++ b/arch/x86/include/asm/xen/hypervisor.h
+@@ -62,4 +62,6 @@ void xen_arch_register_cpu(int num);
+ void xen_arch_unregister_cpu(int num);
+ #endif
+ 
++extern void xen_set_iopl_mask(unsigned mask);
++
+ #endif /* _ASM_X86_XEN_HYPERVISOR_H */
+diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
+index 3b670df4ba7b..ad59d70bcb1a 100644
+--- a/arch/x86/kernel/apic/vector.c
++++ b/arch/x86/kernel/apic/vector.c
+@@ -213,6 +213,7 @@ update:
+ 	 */
+ 	cpumask_and(d->old_domain, d->old_domain, cpu_online_mask);
+ 	d->move_in_progress = !cpumask_empty(d->old_domain);
++	d->cfg.old_vector = d->move_in_progress ? d->cfg.vector : 0;
+ 	d->cfg.vector = vector;
+ 	cpumask_copy(d->domain, vector_cpumask);
+ success:
+@@ -655,46 +656,97 @@ void irq_complete_move(struct irq_cfg *cfg)
+ }
+ 
+ /*
+- * Called with @desc->lock held and interrupts disabled.
++ * Called from fixup_irqs() with @desc->lock held and interrupts disabled.
+  */
+ void irq_force_complete_move(struct irq_desc *desc)
+ {
+ 	struct irq_data *irqdata = irq_desc_get_irq_data(desc);
+ 	struct apic_chip_data *data = apic_chip_data(irqdata);
+ 	struct irq_cfg *cfg = data ? &data->cfg : NULL;
++	unsigned int cpu;
+ 
+ 	if (!cfg)
+ 		return;
+ 
+-	__irq_complete_move(cfg, cfg->vector);
+-
+ 	/*
+ 	 * This is tricky. If the cleanup of @data->old_domain has not been
+ 	 * done yet, then the following setaffinity call will fail with
+ 	 * -EBUSY. This can leave the interrupt in a stale state.
+ 	 *
+-	 * The cleanup cannot make progress because we hold @desc->lock. So in
+-	 * case @data->old_domain is not yet cleaned up, we need to drop the
+-	 * lock and acquire it again. @desc cannot go away, because the
+-	 * hotplug code holds the sparse irq lock.
++	 * All CPUs are stuck in stop machine with interrupts disabled so
++	 * calling __irq_complete_move() would be completely pointless.
+ 	 */
+ 	raw_spin_lock(&vector_lock);
+-	/* Clean out all offline cpus (including ourself) first. */
++	/*
++	 * Clean out all offline cpus (including the outgoing one) from the
++	 * old_domain mask.
++	 */
+ 	cpumask_and(data->old_domain, data->old_domain, cpu_online_mask);
+-	while (!cpumask_empty(data->old_domain)) {
++
++	/*
++	 * If move_in_progress is cleared and the old_domain mask is empty,
++	 * then there is nothing to cleanup. fixup_irqs() will take care of
++	 * the stale vectors on the outgoing cpu.
++	 */
++	if (!data->move_in_progress && cpumask_empty(data->old_domain)) {
+ 		raw_spin_unlock(&vector_lock);
+-		raw_spin_unlock(&desc->lock);
+-		cpu_relax();
+-		raw_spin_lock(&desc->lock);
++		return;
++	}
++
++	/*
++	 * 1) The interrupt is in move_in_progress state. That means that we
++	 *    have not seen an interrupt since the io_apic was reprogrammed to
++	 *    the new vector.
++	 *
++	 * 2) The interrupt has fired on the new vector, but the cleanup IPIs
++	 *    have not been processed yet.
++	 */
++	if (data->move_in_progress) {
+ 		/*
+-		 * Reevaluate apic_chip_data. It might have been cleared after
+-		 * we dropped @desc->lock.
++		 * In theory there is a race:
++		 *
++		 * set_ioapic(new_vector) <-- Interrupt is raised before update
++		 *			      is effective, i.e. it's raised on
++		 *			      the old vector.
++		 *
++		 * So if the target cpu cannot handle that interrupt before
++		 * the old vector is cleaned up, we get a spurious interrupt
++		 * and in the worst case the ioapic irq line becomes stale.
++		 *
++		 * But in case of cpu hotplug this should be a non issue
++		 * because if the affinity update happens right before all
++		 * cpus rendevouz in stop machine, there is no way that the
++		 * interrupt can be blocked on the target cpu because all cpus
++		 * loops first with interrupts enabled in stop machine, so the
++		 * old vector is not yet cleaned up when the interrupt fires.
++		 *
++		 * So the only way to run into this issue is if the delivery
++		 * of the interrupt on the apic/system bus would be delayed
++		 * beyond the point where the target cpu disables interrupts
++		 * in stop machine. I doubt that it can happen, but at least
++		 * there is a theroretical chance. Virtualization might be
++		 * able to expose this, but AFAICT the IOAPIC emulation is not
++		 * as stupid as the real hardware.
++		 *
++		 * Anyway, there is nothing we can do about that at this point
++		 * w/o refactoring the whole fixup_irq() business completely.
++		 * We print at least the irq number and the old vector number,
++		 * so we have the necessary information when a problem in that
++		 * area arises.
+ 		 */
+-		data = apic_chip_data(irqdata);
+-		if (!data)
+-			return;
+-		raw_spin_lock(&vector_lock);
++		pr_warn("IRQ fixup: irq %d move in progress, old vector %d\n",
++			irqdata->irq, cfg->old_vector);
+ 	}
++	/*
++	 * If old_domain is not empty, then other cpus still have the irq
++	 * descriptor set in their vector array. Clean it up.
++	 */
++	for_each_cpu(cpu, data->old_domain)
++		per_cpu(vector_irq, cpu)[cfg->old_vector] = VECTOR_UNUSED;
++
++	/* Cleanup the left overs of the (half finished) move */
++	cpumask_clear(data->old_domain);
++	data->move_in_progress = 0;
+ 	raw_spin_unlock(&vector_lock);
+ }
+ #endif
+diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
+index ee81c544ee0d..4f4735bd8698 100644
+--- a/arch/x86/kernel/cpu/microcode/intel.c
++++ b/arch/x86/kernel/cpu/microcode/intel.c
+@@ -551,10 +551,14 @@ scan_microcode(struct mc_saved_data *mc_saved_data, unsigned long *initrd,
+ 	cd.data = NULL;
+ 	cd.size = 0;
+ 
+-	cd = find_cpio_data(p, (void *)start, size, &offset);
+-	if (!cd.data) {
++	/* try built-in microcode if no initrd */
++	if (!size) {
+ 		if (!load_builtin_intel_microcode(&cd))
+ 			return UCODE_ERROR;
++	} else {
++		cd = find_cpio_data(p, (void *)start, size, &offset);
++		if (!cd.data)
++			return UCODE_ERROR;
+ 	}
+ 
+ 	return get_matching_model_microcode(0, start, cd.data, cd.size,
+@@ -690,7 +694,7 @@ int __init save_microcode_in_initrd_intel(void)
+ 	if (count == 0)
+ 		return ret;
+ 
+-	copy_initrd_ptrs(mc_saved, mc_saved_in_initrd, initrd_start, count);
++	copy_initrd_ptrs(mc_saved, mc_saved_in_initrd, get_initrd_start(), count);
+ 	ret = save_microcode(&mc_saved_data, mc_saved, count);
+ 	if (ret)
+ 		pr_err("Cannot save microcode patches from initrd.\n");
+@@ -728,16 +732,20 @@ void __init load_ucode_intel_bsp(void)
+ 	struct boot_params *p;
+ 
+ 	p	= (struct boot_params *)__pa_nodebug(&boot_params);
+-	start	= p->hdr.ramdisk_image;
+ 	size	= p->hdr.ramdisk_size;
+ 
+-	_load_ucode_intel_bsp(
+-			(struct mc_saved_data *)__pa_nodebug(&mc_saved_data),
+-			(unsigned long *)__pa_nodebug(&mc_saved_in_initrd),
+-			start, size);
++	/*
++	 * Set start only if we have an initrd image. We cannot use initrd_start
++	 * because it is not set that early yet.
++	 */
++	start	= (size ? p->hdr.ramdisk_image : 0);
++
++	_load_ucode_intel_bsp((struct mc_saved_data *)__pa_nodebug(&mc_saved_data),
++			      (unsigned long *)__pa_nodebug(&mc_saved_in_initrd),
++			      start, size);
+ #else
+-	start	= boot_params.hdr.ramdisk_image + PAGE_OFFSET;
+ 	size	= boot_params.hdr.ramdisk_size;
++	start	= (size ? boot_params.hdr.ramdisk_image + PAGE_OFFSET : 0);
+ 
+ 	_load_ucode_intel_bsp(&mc_saved_data, mc_saved_in_initrd, start, size);
+ #endif
+@@ -748,20 +756,14 @@ void load_ucode_intel_ap(void)
+ 	struct mc_saved_data *mc_saved_data_p;
+ 	struct ucode_cpu_info uci;
+ 	unsigned long *mc_saved_in_initrd_p;
+-	unsigned long initrd_start_addr;
+ 	enum ucode_state ret;
+ #ifdef CONFIG_X86_32
+-	unsigned long *initrd_start_p;
+ 
+-	mc_saved_in_initrd_p =
+-		(unsigned long *)__pa_nodebug(mc_saved_in_initrd);
++	mc_saved_in_initrd_p = (unsigned long *)__pa_nodebug(mc_saved_in_initrd);
+ 	mc_saved_data_p = (struct mc_saved_data *)__pa_nodebug(&mc_saved_data);
+-	initrd_start_p = (unsigned long *)__pa_nodebug(&initrd_start);
+-	initrd_start_addr = (unsigned long)__pa_nodebug(*initrd_start_p);
+ #else
+-	mc_saved_data_p = &mc_saved_data;
+ 	mc_saved_in_initrd_p = mc_saved_in_initrd;
+-	initrd_start_addr = initrd_start;
++	mc_saved_data_p = &mc_saved_data;
+ #endif
+ 
+ 	/*
+@@ -773,7 +775,7 @@ void load_ucode_intel_ap(void)
+ 
+ 	collect_cpu_info_early(&uci);
+ 	ret = load_microcode(mc_saved_data_p, mc_saved_in_initrd_p,
+-			     initrd_start_addr, &uci);
++			     get_initrd_start_addr(), &uci);
+ 
+ 	if (ret != UCODE_OK)
+ 		return;
+diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
+index 1b443db2db50..6532f5b40646 100644
+--- a/arch/x86/kernel/cpu/perf_event.c
++++ b/arch/x86/kernel/cpu/perf_event.c
+@@ -596,6 +596,19 @@ void x86_pmu_disable_all(void)
+ 	}
+ }
+ 
++/*
++ * There may be PMI landing after enabled=0. The PMI hitting could be before or
++ * after disable_all.
++ *
++ * If PMI hits before disable_all, the PMU will be disabled in the NMI handler.
++ * It will not be re-enabled in the NMI handler again, because enabled=0. After
++ * handling the NMI, disable_all will be called, which will not change the
++ * state either. If PMI hits after disable_all, the PMU is already disabled
++ * before entering NMI handler. The NMI handler will not change the state
++ * either.
++ *
++ * So either situation is harmless.
++ */
+ static void x86_pmu_disable(struct pmu *pmu)
+ {
+ 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h
+index 7bb61e32fb29..98be6d6d32fa 100644
+--- a/arch/x86/kernel/cpu/perf_event.h
++++ b/arch/x86/kernel/cpu/perf_event.h
+@@ -586,6 +586,7 @@ struct x86_pmu {
+ 			pebs_broken	:1,
+ 			pebs_prec_dist	:1;
+ 	int		pebs_record_size;
++	int		pebs_buffer_size;
+ 	void		(*drain_pebs)(struct pt_regs *regs);
+ 	struct event_constraint *pebs_constraints;
+ 	void		(*pebs_aliases)(struct perf_event *event);
+@@ -904,6 +905,8 @@ void intel_pmu_lbr_init_skl(void);
+ 
+ void intel_pmu_lbr_init_knl(void);
+ 
++void intel_pmu_pebs_data_source_nhm(void);
++
+ int intel_pmu_setup_lbr_filter(struct perf_event *event);
+ 
+ void intel_pt_interrupt(void);
+diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
+index fed2ab1f1065..760aec1e8f82 100644
+--- a/arch/x86/kernel/cpu/perf_event_intel.c
++++ b/arch/x86/kernel/cpu/perf_event_intel.c
+@@ -1502,7 +1502,15 @@ static __initconst const u64 knl_hw_cache_extra_regs
+ };
+ 
+ /*
+- * Use from PMIs where the LBRs are already disabled.
++ * Used from PMIs where the LBRs are already disabled.
++ *
++ * This function could be called consecutively. It is required to remain in
++ * disabled state if called consecutively.
++ *
++ * During consecutive calls, the same disable value will be written to related
++ * registers, so the PMU state remains unchanged. hw.state in
++ * intel_bts_disable_local will remain PERF_HES_STOPPED too in consecutive
++ * calls.
+  */
+ static void __intel_pmu_disable_all(void)
+ {
+@@ -1884,6 +1892,16 @@ again:
+ 	if (__test_and_clear_bit(62, (unsigned long *)&status)) {
+ 		handled++;
+ 		x86_pmu.drain_pebs(regs);
++		/*
++		 * There are cases where, even though, the PEBS ovfl bit is set
++		 * in GLOBAL_OVF_STATUS, the PEBS events may also have their
++		 * overflow bits set for their counters. We must clear them
++		 * here because they have been processed as exact samples in
++		 * the drain_pebs() routine. They must not be processed again
++		 * in the for_each_bit_set() loop for regular samples below.
++		 */
++		status &= ~cpuc->pebs_enabled;
++		status &= x86_pmu.intel_ctrl | GLOBAL_STATUS_TRACE_TOPAPMI;
+ 	}
+ 
+ 	/*
+@@ -1929,7 +1947,10 @@ again:
+ 		goto again;
+ 
+ done:
+-	__intel_pmu_enable_all(0, true);
++	/* Only restore PMU state when it's active. See x86_pmu_disable(). */
++	if (cpuc->enabled)
++		__intel_pmu_enable_all(0, true);
++
+ 	/*
+ 	 * Only unmask the NMI after the overflow counters
+ 	 * have been reset. This avoids spurious NMIs on
+@@ -3396,6 +3417,7 @@ __init int intel_pmu_init(void)
+ 		intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
+ 			X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1);
+ 
++		intel_pmu_pebs_data_source_nhm();
+ 		x86_add_quirk(intel_nehalem_quirk);
+ 
+ 		pr_cont("Nehalem events, ");
+@@ -3459,6 +3481,7 @@ __init int intel_pmu_init(void)
+ 		intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
+ 			X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1);
+ 
++		intel_pmu_pebs_data_source_nhm();
+ 		pr_cont("Westmere events, ");
+ 		break;
+ 
+diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c
+index 10602f0a438f..955140140fd4 100644
+--- a/arch/x86/kernel/cpu/perf_event_intel_ds.c
++++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c
+@@ -51,7 +51,8 @@ union intel_x86_pebs_dse {
+ #define OP_LH (P(OP, LOAD) | P(LVL, HIT))
+ #define SNOOP_NONE_MISS (P(SNOOP, NONE) | P(SNOOP, MISS))
+ 
+-static const u64 pebs_data_source[] = {
++/* Version for Sandy Bridge and later */
++static u64 pebs_data_source[] = {
+ 	P(OP, LOAD) | P(LVL, MISS) | P(LVL, L3) | P(SNOOP, NA),/* 0x00:ukn L3 */
+ 	OP_LH | P(LVL, L1)  | P(SNOOP, NONE),	/* 0x01: L1 local */
+ 	OP_LH | P(LVL, LFB) | P(SNOOP, NONE),	/* 0x02: LFB hit */
+@@ -70,6 +71,14 @@ static const u64 pebs_data_source[] = {
+ 	OP_LH | P(LVL, UNC) | P(SNOOP, NONE), /* 0x0f: uncached */
+ };
+ 
++/* Patch up minor differences in the bits */
++void __init intel_pmu_pebs_data_source_nhm(void)
++{
++	pebs_data_source[0x05] = OP_LH | P(LVL, L3)  | P(SNOOP, HIT);
++	pebs_data_source[0x06] = OP_LH | P(LVL, L3)  | P(SNOOP, HITM);
++	pebs_data_source[0x07] = OP_LH | P(LVL, L3)  | P(SNOOP, HITM);
++}
++
+ static u64 precise_store_data(u64 status)
+ {
+ 	union intel_x86_pebs_dse dse;
+@@ -269,7 +278,7 @@ static int alloc_pebs_buffer(int cpu)
+ 	if (!x86_pmu.pebs)
+ 		return 0;
+ 
+-	buffer = kzalloc_node(PEBS_BUFFER_SIZE, GFP_KERNEL, node);
++	buffer = kzalloc_node(x86_pmu.pebs_buffer_size, GFP_KERNEL, node);
+ 	if (unlikely(!buffer))
+ 		return -ENOMEM;
+ 
+@@ -286,7 +295,7 @@ static int alloc_pebs_buffer(int cpu)
+ 		per_cpu(insn_buffer, cpu) = ibuffer;
+ 	}
+ 
+-	max = PEBS_BUFFER_SIZE / x86_pmu.pebs_record_size;
++	max = x86_pmu.pebs_buffer_size / x86_pmu.pebs_record_size;
+ 
+ 	ds->pebs_buffer_base = (u64)(unsigned long)buffer;
+ 	ds->pebs_index = ds->pebs_buffer_base;
+@@ -1319,6 +1328,7 @@ void __init intel_ds_init(void)
+ 
+ 	x86_pmu.bts  = boot_cpu_has(X86_FEATURE_BTS);
+ 	x86_pmu.pebs = boot_cpu_has(X86_FEATURE_PEBS);
++	x86_pmu.pebs_buffer_size = PEBS_BUFFER_SIZE;
+ 	if (x86_pmu.pebs) {
+ 		char pebs_type = x86_pmu.intel_cap.pebs_trap ?  '+' : '-';
+ 		int format = x86_pmu.intel_cap.pebs_format;
+@@ -1327,6 +1337,14 @@ void __init intel_ds_init(void)
+ 		case 0:
+ 			printk(KERN_CONT "PEBS fmt0%c, ", pebs_type);
+ 			x86_pmu.pebs_record_size = sizeof(struct pebs_record_core);
++			/*
++			 * Using >PAGE_SIZE buffers makes the WRMSR to
++			 * PERF_GLOBAL_CTRL in intel_pmu_enable_all()
++			 * mysteriously hang on Core2.
++			 *
++			 * As a workaround, we don't do this.
++			 */
++			x86_pmu.pebs_buffer_size = PAGE_SIZE;
+ 			x86_pmu.drain_pebs = intel_pmu_drain_pebs_core;
+ 			break;
+ 
+diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c b/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c
+index 33acb884ccf1..4547b2cca71b 100644
+--- a/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c
++++ b/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c
+@@ -2875,11 +2875,13 @@ static struct intel_uncore_type bdx_uncore_sbox = {
+ 	.format_group		= &hswep_uncore_sbox_format_group,
+ };
+ 
++#define BDX_MSR_UNCORE_SBOX	3
++
+ static struct intel_uncore_type *bdx_msr_uncores[] = {
+ 	&bdx_uncore_ubox,
+ 	&bdx_uncore_cbox,
+-	&bdx_uncore_sbox,
+ 	&hswep_uncore_pcu,
++	&bdx_uncore_sbox,
+ 	NULL,
+ };
+ 
+@@ -2888,6 +2890,10 @@ void bdx_uncore_cpu_init(void)
+ 	if (bdx_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
+ 		bdx_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
+ 	uncore_msr_uncores = bdx_msr_uncores;
++
++	/* BDX-DE doesn't have SBOX */
++	if (boot_cpu_data.x86_model == 86)
++		uncore_msr_uncores[BDX_MSR_UNCORE_SBOX] = NULL;
+ }
+ 
+ static struct intel_uncore_type bdx_uncore_ha = {
+diff --git a/arch/x86/kernel/cpu/perf_event_knc.c b/arch/x86/kernel/cpu/perf_event_knc.c
+index 5b0c232d1ee6..b931095e86d4 100644
+--- a/arch/x86/kernel/cpu/perf_event_knc.c
++++ b/arch/x86/kernel/cpu/perf_event_knc.c
+@@ -263,7 +263,9 @@ again:
+ 		goto again;
+ 
+ done:
+-	knc_pmu_enable_all(0);
++	/* Only restore PMU state when it's active. See x86_pmu_disable(). */
++	if (cpuc->enabled)
++		knc_pmu_enable_all(0);
+ 
+ 	return handled;
+ }
+diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
+index 37dae792dbbe..589b3193f102 100644
+--- a/arch/x86/kernel/ioport.c
++++ b/arch/x86/kernel/ioport.c
+@@ -96,9 +96,14 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
+ SYSCALL_DEFINE1(iopl, unsigned int, level)
+ {
+ 	struct pt_regs *regs = current_pt_regs();
+-	unsigned int old = (regs->flags >> 12) & 3;
+ 	struct thread_struct *t = &current->thread;
+ 
++	/*
++	 * Careful: the IOPL bits in regs->flags are undefined under Xen PV
++	 * and changing them has no effect.
++	 */
++	unsigned int old = t->iopl >> X86_EFLAGS_IOPL_BIT;
++
+ 	if (level > 3)
+ 		return -EINVAL;
+ 	/* Trying to gain more privileges? */
+@@ -106,8 +111,9 @@ SYSCALL_DEFINE1(iopl, unsigned int, level)
+ 		if (!capable(CAP_SYS_RAWIO))
+ 			return -EPERM;
+ 	}
+-	regs->flags = (regs->flags & ~X86_EFLAGS_IOPL) | (level << 12);
+-	t->iopl = level << 12;
++	regs->flags = (regs->flags & ~X86_EFLAGS_IOPL) |
++		(level << X86_EFLAGS_IOPL_BIT);
++	t->iopl = level << X86_EFLAGS_IOPL_BIT;
+ 	set_iopl_mask(t->iopl);
+ 
+ 	return 0;
+diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
+index b9d99e0f82c4..9f751876066f 100644
+--- a/arch/x86/kernel/process_64.c
++++ b/arch/x86/kernel/process_64.c
+@@ -48,6 +48,7 @@
+ #include <asm/syscalls.h>
+ #include <asm/debugreg.h>
+ #include <asm/switch_to.h>
++#include <asm/xen/hypervisor.h>
+ 
+ asmlinkage extern void ret_from_fork(void);
+ 
+@@ -411,6 +412,17 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
+ 		     task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV))
+ 		__switch_to_xtra(prev_p, next_p, tss);
+ 
++#ifdef CONFIG_XEN
++	/*
++	 * On Xen PV, IOPL bits in pt_regs->flags have no effect, and
++	 * current_pt_regs()->flags may not match the current task's
++	 * intended IOPL.  We need to switch it manually.
++	 */
++	if (unlikely(static_cpu_has(X86_FEATURE_XENPV) &&
++		     prev->iopl != next->iopl))
++		xen_set_iopl_mask(next->iopl);
++#endif
++
+ 	if (static_cpu_has_bug(X86_BUG_SYSRET_SS_ATTRS)) {
+ 		/*
+ 		 * AMD CPUs have a misfeature: SYSRET sets the SS selector but
+diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
+index b0ea42b78ccd..ab5318727579 100644
+--- a/arch/x86/kvm/i8254.c
++++ b/arch/x86/kvm/i8254.c
+@@ -245,7 +245,7 @@ static void kvm_pit_ack_irq(struct kvm_irq_ack_notifier *kian)
+ 		 * PIC is being reset.  Handle it gracefully here
+ 		 */
+ 		atomic_inc(&ps->pending);
+-	else if (value > 0)
++	else if (value > 0 && ps->reinject)
+ 		/* in this case, we had multiple outstanding pit interrupts
+ 		 * that we needed to inject.  Reinject
+ 		 */
+@@ -288,7 +288,9 @@ static void pit_do_work(struct kthread_work *work)
+ 	 * last one has been acked.
+ 	 */
+ 	spin_lock(&ps->inject_lock);
+-	if (ps->irq_ack) {
++	if (!ps->reinject)
++		inject = 1;
++	else if (ps->irq_ack) {
+ 		ps->irq_ack = 0;
+ 		inject = 1;
+ 	}
+@@ -317,10 +319,10 @@ static enum hrtimer_restart pit_timer_fn(struct hrtimer *data)
+ 	struct kvm_kpit_state *ps = container_of(data, struct kvm_kpit_state, timer);
+ 	struct kvm_pit *pt = ps->kvm->arch.vpit;
+ 
+-	if (ps->reinject || !atomic_read(&ps->pending)) {
++	if (ps->reinject)
+ 		atomic_inc(&ps->pending);
+-		queue_kthread_work(&pt->worker, &pt->expired);
+-	}
++
++	queue_kthread_work(&pt->worker, &pt->expired);
+ 
+ 	if (ps->is_periodic) {
+ 		hrtimer_add_expires_ns(&ps->timer, ps->period);
+diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
+index 9bd8f44baded..539062e24de1 100644
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -2702,8 +2702,15 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
+ 	} else
+ 		vmx->nested.nested_vmx_ept_caps = 0;
+ 
++	/*
++	 * Old versions of KVM use the single-context version without
++	 * checking for support, so declare that it is supported even
++	 * though it is treated as global context.  The alternative is
++	 * not failing the single-context invvpid, and it is worse.
++	 */
+ 	if (enable_vpid)
+ 		vmx->nested.nested_vmx_vpid_caps = VMX_VPID_INVVPID_BIT |
++				VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT |
+ 				VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT;
+ 	else
+ 		vmx->nested.nested_vmx_vpid_caps = 0;
+@@ -7398,6 +7405,7 @@ static int handle_invept(struct kvm_vcpu *vcpu)
+ 	if (!(types & (1UL << type))) {
+ 		nested_vmx_failValid(vcpu,
+ 				VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
++		skip_emulated_instruction(vcpu);
+ 		return 1;
+ 	}
+ 
+@@ -7456,6 +7464,7 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
+ 	if (!(types & (1UL << type))) {
+ 		nested_vmx_failValid(vcpu,
+ 			VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
++		skip_emulated_instruction(vcpu);
+ 		return 1;
+ 	}
+ 
+@@ -7472,12 +7481,17 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
+ 	}
+ 
+ 	switch (type) {
++	case VMX_VPID_EXTENT_SINGLE_CONTEXT:
++		/*
++		 * Old versions of KVM use the single-context version so we
++		 * have to support it; just treat it the same as all-context.
++		 */
+ 	case VMX_VPID_EXTENT_ALL_CONTEXT:
+ 		__vmx_flush_tlb(vcpu, to_vmx(vcpu)->nested.vpid02);
+ 		nested_vmx_succeed(vcpu);
+ 		break;
+ 	default:
+-		/* Trap single context invalidation invvpid calls */
++		/* Trap individual address invalidation invvpid calls */
+ 		BUG_ON(1);
+ 		break;
+ 	}
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index eaf6ee8c28b8..d47d231e0d4b 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -2752,6 +2752,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+ 	}
+ 
+ 	kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
++	vcpu->arch.switch_db_regs |= KVM_DEBUGREG_RELOAD;
+ }
+ 
+ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
+diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
+index 8f4cc3dfac32..5fb6adaaa796 100644
+--- a/arch/x86/mm/tlb.c
++++ b/arch/x86/mm/tlb.c
+@@ -106,8 +106,6 @@ static void flush_tlb_func(void *info)
+ 
+ 	if (f->flush_mm != this_cpu_read(cpu_tlbstate.active_mm))
+ 		return;
+-	if (!f->flush_end)
+-		f->flush_end = f->flush_start + PAGE_SIZE;
+ 
+ 	count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
+ 	if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) {
+@@ -135,12 +133,20 @@ void native_flush_tlb_others(const struct cpumask *cpumask,
+ 				 unsigned long end)
+ {
+ 	struct flush_tlb_info info;
++
++	if (end == 0)
++		end = start + PAGE_SIZE;
+ 	info.flush_mm = mm;
+ 	info.flush_start = start;
+ 	info.flush_end = end;
+ 
+ 	count_vm_tlb_event(NR_TLB_REMOTE_FLUSH);
+-	trace_tlb_flush(TLB_REMOTE_SEND_IPI, end - start);
++	if (end == TLB_FLUSH_ALL)
++		trace_tlb_flush(TLB_REMOTE_SEND_IPI, TLB_FLUSH_ALL);
++	else
++		trace_tlb_flush(TLB_REMOTE_SEND_IPI,
++				(end - start) >> PAGE_SHIFT);
++
+ 	if (is_uv_system()) {
+ 		unsigned int cpu;
+ 
+diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c
+index e58565556703..0ae7e9fa348d 100644
+--- a/arch/x86/pci/fixup.c
++++ b/arch/x86/pci/fixup.c
+@@ -540,3 +540,10 @@ static void twinhead_reserve_killing_zone(struct pci_dev *dev)
+         }
+ }
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x27B9, twinhead_reserve_killing_zone);
++
++static void pci_bdwep_bar(struct pci_dev *dev)
++{
++	dev->non_compliant_bars = 1;
++}
++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fa0, pci_bdwep_bar);
++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fc0, pci_bdwep_bar);
+diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
+index d09e4c9d7cc5..e3679db17545 100644
+--- a/arch/x86/xen/enlighten.c
++++ b/arch/x86/xen/enlighten.c
+@@ -961,7 +961,7 @@ static void xen_load_sp0(struct tss_struct *tss,
+ 	tss->x86_tss.sp0 = thread->sp0;
+ }
+ 
+-static void xen_set_iopl_mask(unsigned mask)
++void xen_set_iopl_mask(unsigned mask)
+ {
+ 	struct physdev_set_iopl set_iopl;
+ 
+diff --git a/arch/xtensa/kernel/head.S b/arch/xtensa/kernel/head.S
+index 9ed55649ac8e..05e1df943856 100644
+--- a/arch/xtensa/kernel/head.S
++++ b/arch/xtensa/kernel/head.S
+@@ -128,7 +128,7 @@ ENTRY(_startup)
+ 	wsr	a0, icountlevel
+ 
+ 	.set	_index, 0
+-	.rept	XCHAL_NUM_DBREAK - 1
++	.rept	XCHAL_NUM_DBREAK
+ 	wsr	a0, SREG_DBREAKC + _index
+ 	.set	_index, _index + 1
+ 	.endr
+diff --git a/arch/xtensa/mm/cache.c b/arch/xtensa/mm/cache.c
+index d75aa1476da7..1a804a2f9a5b 100644
+--- a/arch/xtensa/mm/cache.c
++++ b/arch/xtensa/mm/cache.c
+@@ -97,11 +97,11 @@ void clear_user_highpage(struct page *page, unsigned long vaddr)
+ 	unsigned long paddr;
+ 	void *kvaddr = coherent_kvaddr(page, TLBTEMP_BASE_1, vaddr, &paddr);
+ 
+-	pagefault_disable();
++	preempt_disable();
+ 	kmap_invalidate_coherent(page, vaddr);
+ 	set_bit(PG_arch_1, &page->flags);
+ 	clear_page_alias(kvaddr, paddr);
+-	pagefault_enable();
++	preempt_enable();
+ }
+ 
+ void copy_user_highpage(struct page *dst, struct page *src,
+@@ -113,11 +113,11 @@ void copy_user_highpage(struct page *dst, struct page *src,
+ 	void *src_vaddr = coherent_kvaddr(src, TLBTEMP_BASE_2, vaddr,
+ 					  &src_paddr);
+ 
+-	pagefault_disable();
++	preempt_disable();
+ 	kmap_invalidate_coherent(dst, vaddr);
+ 	set_bit(PG_arch_1, &dst->flags);
+ 	copy_page_alias(dst_vaddr, src_vaddr, dst_paddr, src_paddr);
+-	pagefault_enable();
++	preempt_enable();
+ }
+ 
+ #endif /* DCACHE_WAY_SIZE > PAGE_SIZE */
+diff --git a/arch/xtensa/platforms/iss/console.c b/arch/xtensa/platforms/iss/console.c
+index 70cb408bc20d..92d785fefb6d 100644
+--- a/arch/xtensa/platforms/iss/console.c
++++ b/arch/xtensa/platforms/iss/console.c
+@@ -100,21 +100,23 @@ static void rs_poll(unsigned long priv)
+ {
+ 	struct tty_port *port = (struct tty_port *)priv;
+ 	int i = 0;
++	int rd = 1;
+ 	unsigned char c;
+ 
+ 	spin_lock(&timer_lock);
+ 
+ 	while (simc_poll(0)) {
+-		simc_read(0, &c, 1);
++		rd = simc_read(0, &c, 1);
++		if (rd <= 0)
++			break;
+ 		tty_insert_flip_char(port, c, TTY_NORMAL);
+ 		i++;
+ 	}
+ 
+ 	if (i)
+ 		tty_flip_buffer_push(port);
+-
+-
+-	mod_timer(&serial_timer, jiffies + SERIAL_TIMER_VALUE);
++	if (rd)
++		mod_timer(&serial_timer, jiffies + SERIAL_TIMER_VALUE);
+ 	spin_unlock(&timer_lock);
+ }
+ 
+diff --git a/block/blk-core.c b/block/blk-core.c
+index b83d29755b5a..45f4d7efbf34 100644
+--- a/block/blk-core.c
++++ b/block/blk-core.c
+@@ -2198,7 +2198,7 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
+ 	if (q->mq_ops) {
+ 		if (blk_queue_io_stat(q))
+ 			blk_account_io_start(rq, true);
+-		blk_mq_insert_request(rq, false, true, true);
++		blk_mq_insert_request(rq, false, true, false);
+ 		return 0;
+ 	}
+ 
+diff --git a/crypto/asymmetric_keys/x509_cert_parser.c b/crypto/asymmetric_keys/x509_cert_parser.c
+index 021d39c0ba75..13c4e5a5fe8c 100644
+--- a/crypto/asymmetric_keys/x509_cert_parser.c
++++ b/crypto/asymmetric_keys/x509_cert_parser.c
+@@ -494,7 +494,7 @@ int x509_decode_time(time64_t *_t,  size_t hdrlen,
+ 		     unsigned char tag,
+ 		     const unsigned char *value, size_t vlen)
+ {
+-	static const unsigned char month_lengths[] = { 31, 29, 31, 30, 31, 30,
++	static const unsigned char month_lengths[] = { 31, 28, 31, 30, 31, 30,
+ 						       31, 31, 30, 31, 30, 31 };
+ 	const unsigned char *p = value;
+ 	unsigned year, mon, day, hour, min, sec, mon_len;
+@@ -540,9 +540,9 @@ int x509_decode_time(time64_t *_t,  size_t hdrlen,
+ 		if (year % 4 == 0) {
+ 			mon_len = 29;
+ 			if (year % 100 == 0) {
+-				year /= 100;
+-				if (year % 4 != 0)
+-					mon_len = 28;
++				mon_len = 28;
++				if (year % 400 == 0)
++					mon_len = 29;
+ 			}
+ 		}
+ 	}
+diff --git a/crypto/keywrap.c b/crypto/keywrap.c
+index b1d106ce55f3..72014f963ba7 100644
+--- a/crypto/keywrap.c
++++ b/crypto/keywrap.c
+@@ -212,7 +212,7 @@ static int crypto_kw_decrypt(struct blkcipher_desc *desc,
+ 			  SEMIBSIZE))
+ 		ret = -EBADMSG;
+ 
+-	memzero_explicit(&block, sizeof(struct crypto_kw_block));
++	memzero_explicit(block, sizeof(struct crypto_kw_block));
+ 
+ 	return ret;
+ }
+@@ -297,7 +297,7 @@ static int crypto_kw_encrypt(struct blkcipher_desc *desc,
+ 	/* establish the IV for the caller to pick up */
+ 	memcpy(desc->info, block->A, SEMIBSIZE);
+ 
+-	memzero_explicit(&block, sizeof(struct crypto_kw_block));
++	memzero_explicit(block, sizeof(struct crypto_kw_block));
+ 
+ 	return 0;
+ }
+diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
+index d02fd53042a5..56241eb341f4 100644
+--- a/drivers/acpi/resource.c
++++ b/drivers/acpi/resource.c
+@@ -27,8 +27,20 @@
+ 
+ #ifdef CONFIG_X86
+ #define valid_IRQ(i) (((i) != 0) && ((i) != 2))
++static inline bool acpi_iospace_resource_valid(struct resource *res)
++{
++	/* On X86 IO space is limited to the [0 - 64K] IO port range */
++	return res->end < 0x10003;
++}
+ #else
+ #define valid_IRQ(i) (true)
++/*
++ * ACPI IO descriptors on arches other than X86 contain MMIO CPU physical
++ * addresses mapping IO space in CPU physical address space, IO space
++ * resources can be placed anywhere in the 64-bit physical address space.
++ */
++static inline bool
++acpi_iospace_resource_valid(struct resource *res) { return true; }
+ #endif
+ 
+ static bool acpi_dev_resource_len_valid(u64 start, u64 end, u64 len, bool io)
+@@ -127,7 +139,7 @@ static void acpi_dev_ioresource_flags(struct resource *res, u64 len,
+ 	if (!acpi_dev_resource_len_valid(res->start, res->end, len, true))
+ 		res->flags |= IORESOURCE_DISABLED | IORESOURCE_UNSET;
+ 
+-	if (res->end >= 0x10003)
++	if (!acpi_iospace_resource_valid(res))
+ 		res->flags |= IORESOURCE_DISABLED | IORESOURCE_UNSET;
+ 
+ 	if (io_decode == ACPI_DECODE_16)
+diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
+index 9cb975200cac..f054cadf30d8 100644
+--- a/drivers/acpi/sleep.c
++++ b/drivers/acpi/sleep.c
+@@ -714,6 +714,7 @@ static int acpi_hibernation_enter(void)
+ 
+ static void acpi_hibernation_leave(void)
+ {
++	pm_set_resume_via_firmware();
+ 	/*
+ 	 * If ACPI is not enabled by the BIOS and the boot kernel, we need to
+ 	 * enable it here.
+diff --git a/drivers/block/brd.c b/drivers/block/brd.c
+index cb27190e9f39..f7ecc287d733 100644
+--- a/drivers/block/brd.c
++++ b/drivers/block/brd.c
+@@ -341,7 +341,7 @@ static blk_qc_t brd_make_request(struct request_queue *q, struct bio *bio)
+ 
+ 	if (unlikely(bio->bi_rw & REQ_DISCARD)) {
+ 		if (sector & ((PAGE_SIZE >> SECTOR_SHIFT) - 1) ||
+-		    bio->bi_iter.bi_size & PAGE_MASK)
++		    bio->bi_iter.bi_size & ~PAGE_MASK)
+ 			goto io_error;
+ 		discard_from_brd(brd, sector, bio->bi_iter.bi_size);
+ 		goto out;
+diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
+index 9b180dbbd03c..1c330b61f05d 100644
+--- a/drivers/block/mtip32xx/mtip32xx.c
++++ b/drivers/block/mtip32xx/mtip32xx.c
+@@ -173,7 +173,13 @@ static struct mtip_cmd *mtip_get_int_command(struct driver_data *dd)
+ {
+ 	struct request *rq;
+ 
++	if (mtip_check_surprise_removal(dd->pdev))
++		return NULL;
++
+ 	rq = blk_mq_alloc_request(dd->queue, 0, BLK_MQ_REQ_RESERVED);
++	if (IS_ERR(rq))
++		return NULL;
++
+ 	return blk_mq_rq_to_pdu(rq);
+ }
+ 
+@@ -233,15 +239,9 @@ static void mtip_async_complete(struct mtip_port *port,
+ 			"Command tag %d failed due to TFE\n", tag);
+ 	}
+ 
+-	/* Unmap the DMA scatter list entries */
+-	dma_unmap_sg(&dd->pdev->dev, cmd->sg, cmd->scatter_ents, cmd->direction);
+-
+ 	rq = mtip_rq_from_tag(dd, tag);
+ 
+-	if (unlikely(cmd->unaligned))
+-		up(&port->cmd_slot_unal);
+-
+-	blk_mq_end_request(rq, status ? -EIO : 0);
++	blk_mq_complete_request(rq, status);
+ }
+ 
+ /*
+@@ -581,6 +581,8 @@ static void mtip_completion(struct mtip_port *port,
+ 		dev_warn(&port->dd->pdev->dev,
+ 			"Internal command %d completed with TFE\n", tag);
+ 
++	command->comp_func = NULL;
++	command->comp_data = NULL;
+ 	complete(waiting);
+ }
+ 
+@@ -618,8 +620,6 @@ static void mtip_handle_tfe(struct driver_data *dd)
+ 
+ 	port = dd->port;
+ 
+-	set_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags);
+-
+ 	if (test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags)) {
+ 		cmd = mtip_cmd_from_tag(dd, MTIP_TAG_INTERNAL);
+ 		dbg_printk(MTIP_DRV_NAME " TFE for the internal command\n");
+@@ -628,7 +628,7 @@ static void mtip_handle_tfe(struct driver_data *dd)
+ 			cmd->comp_func(port, MTIP_TAG_INTERNAL,
+ 					cmd, PORT_IRQ_TF_ERR);
+ 		}
+-		goto handle_tfe_exit;
++		return;
+ 	}
+ 
+ 	/* clear the tag accumulator */
+@@ -701,7 +701,7 @@ static void mtip_handle_tfe(struct driver_data *dd)
+ 			fail_reason = "thermal shutdown";
+ 		}
+ 		if (buf[288] == 0xBF) {
+-			set_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag);
++			set_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag);
+ 			dev_info(&dd->pdev->dev,
+ 				"Drive indicates rebuild has failed. Secure erase required.\n");
+ 			fail_all_ncq_cmds = 1;
+@@ -771,11 +771,6 @@ static void mtip_handle_tfe(struct driver_data *dd)
+ 		}
+ 	}
+ 	print_tags(dd, "reissued (TFE)", tagaccum, cmd_cnt);
+-
+-handle_tfe_exit:
+-	/* clear eh_active */
+-	clear_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags);
+-	wake_up_interruptible(&port->svc_wait);
+ }
+ 
+ /*
+@@ -1007,6 +1002,7 @@ static bool mtip_pause_ncq(struct mtip_port *port,
+ 			(fis->features == 0x27 || fis->features == 0x72 ||
+ 			 fis->features == 0x62 || fis->features == 0x26))) {
+ 		clear_bit(MTIP_DDF_SEC_LOCK_BIT, &port->dd->dd_flag);
++		clear_bit(MTIP_DDF_REBUILD_FAILED_BIT, &port->dd->dd_flag);
+ 		/* Com reset after secure erase or lowlevel format */
+ 		mtip_restart_port(port);
+ 		clear_bit(MTIP_PF_SE_ACTIVE_BIT, &port->flags);
+@@ -1021,12 +1017,14 @@ static bool mtip_pause_ncq(struct mtip_port *port,
+  *
+  * @port    Pointer to port data structure
+  * @timeout Max duration to wait (ms)
++ * @atomic  gfp_t flag to indicate blockable context or not
+  *
+  * return value
+  *	0	Success
+  *	-EBUSY  Commands still active
+  */
+-static int mtip_quiesce_io(struct mtip_port *port, unsigned long timeout)
++static int mtip_quiesce_io(struct mtip_port *port, unsigned long timeout,
++								gfp_t atomic)
+ {
+ 	unsigned long to;
+ 	unsigned int n;
+@@ -1037,16 +1035,21 @@ static int mtip_quiesce_io(struct mtip_port *port, unsigned long timeout)
+ 	to = jiffies + msecs_to_jiffies(timeout);
+ 	do {
+ 		if (test_bit(MTIP_PF_SVC_THD_ACTIVE_BIT, &port->flags) &&
+-			test_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags)) {
++			test_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags) &&
++			atomic == GFP_KERNEL) {
+ 			msleep(20);
+ 			continue; /* svc thd is actively issuing commands */
+ 		}
+ 
+-		msleep(100);
++		if (atomic == GFP_KERNEL)
++			msleep(100);
++		else {
++			cpu_relax();
++			udelay(100);
++		}
++
+ 		if (mtip_check_surprise_removal(port->dd->pdev))
+ 			goto err_fault;
+-		if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &port->dd->dd_flag))
+-			goto err_fault;
+ 
+ 		/*
+ 		 * Ignore s_active bit 0 of array element 0.
+@@ -1099,6 +1102,7 @@ static int mtip_exec_internal_command(struct mtip_port *port,
+ 	struct mtip_cmd *int_cmd;
+ 	struct driver_data *dd = port->dd;
+ 	int rv = 0;
++	unsigned long start;
+ 
+ 	/* Make sure the buffer is 8 byte aligned. This is asic specific. */
+ 	if (buffer & 0x00000007) {
+@@ -1107,6 +1111,10 @@ static int mtip_exec_internal_command(struct mtip_port *port,
+ 	}
+ 
+ 	int_cmd = mtip_get_int_command(dd);
++	if (!int_cmd) {
++		dbg_printk(MTIP_DRV_NAME "Unable to allocate tag for PIO cmd\n");
++		return -EFAULT;
++	}
+ 
+ 	set_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags);
+ 
+@@ -1119,7 +1127,7 @@ static int mtip_exec_internal_command(struct mtip_port *port,
+ 		if (fis->command != ATA_CMD_STANDBYNOW1) {
+ 			/* wait for io to complete if non atomic */
+ 			if (mtip_quiesce_io(port,
+-					MTIP_QUIESCE_IO_TIMEOUT_MS) < 0) {
++				MTIP_QUIESCE_IO_TIMEOUT_MS, atomic) < 0) {
+ 				dev_warn(&dd->pdev->dev,
+ 					"Failed to quiesce IO\n");
+ 				mtip_put_int_command(dd, int_cmd);
+@@ -1162,6 +1170,8 @@ static int mtip_exec_internal_command(struct mtip_port *port,
+ 	/* Populate the command header */
+ 	int_cmd->command_header->byte_count = 0;
+ 
++	start = jiffies;
++
+ 	/* Issue the command to the hardware */
+ 	mtip_issue_non_ncq_command(port, MTIP_TAG_INTERNAL);
+ 
+@@ -1170,10 +1180,12 @@ static int mtip_exec_internal_command(struct mtip_port *port,
+ 		if ((rv = wait_for_completion_interruptible_timeout(
+ 				&wait,
+ 				msecs_to_jiffies(timeout))) <= 0) {
++
+ 			if (rv == -ERESTARTSYS) { /* interrupted */
+ 				dev_err(&dd->pdev->dev,
+-					"Internal command [%02X] was interrupted after %lu ms\n",
+-					fis->command, timeout);
++					"Internal command [%02X] was interrupted after %u ms\n",
++					fis->command,
++					jiffies_to_msecs(jiffies - start));
+ 				rv = -EINTR;
+ 				goto exec_ic_exit;
+ 			} else if (rv == 0) /* timeout */
+@@ -2890,6 +2902,42 @@ static int mtip_ftl_rebuild_poll(struct driver_data *dd)
+ 	return -EFAULT;
+ }
+ 
++static void mtip_softirq_done_fn(struct request *rq)
++{
++	struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq);
++	struct driver_data *dd = rq->q->queuedata;
++
++	/* Unmap the DMA scatter list entries */
++	dma_unmap_sg(&dd->pdev->dev, cmd->sg, cmd->scatter_ents,
++							cmd->direction);
++
++	if (unlikely(cmd->unaligned))
++		up(&dd->port->cmd_slot_unal);
++
++	blk_mq_end_request(rq, rq->errors);
++}
++
++static void mtip_abort_cmd(struct request *req, void *data,
++							bool reserved)
++{
++	struct driver_data *dd = data;
++
++	dbg_printk(MTIP_DRV_NAME " Aborting request, tag = %d\n", req->tag);
++
++	clear_bit(req->tag, dd->port->cmds_to_issue);
++	req->errors = -EIO;
++	mtip_softirq_done_fn(req);
++}
++
++static void mtip_queue_cmd(struct request *req, void *data,
++							bool reserved)
++{
++	struct driver_data *dd = data;
++
++	set_bit(req->tag, dd->port->cmds_to_issue);
++	blk_abort_request(req);
++}
++
+ /*
+  * service thread to issue queued commands
+  *
+@@ -2902,7 +2950,7 @@ static int mtip_ftl_rebuild_poll(struct driver_data *dd)
+ static int mtip_service_thread(void *data)
+ {
+ 	struct driver_data *dd = (struct driver_data *)data;
+-	unsigned long slot, slot_start, slot_wrap;
++	unsigned long slot, slot_start, slot_wrap, to;
+ 	unsigned int num_cmd_slots = dd->slot_groups * 32;
+ 	struct mtip_port *port = dd->port;
+ 
+@@ -2917,9 +2965,7 @@ static int mtip_service_thread(void *data)
+ 		 * is in progress nor error handling is active
+ 		 */
+ 		wait_event_interruptible(port->svc_wait, (port->flags) &&
+-			!(port->flags & MTIP_PF_PAUSE_IO));
+-
+-		set_bit(MTIP_PF_SVC_THD_ACTIVE_BIT, &port->flags);
++			(port->flags & MTIP_PF_SVC_THD_WORK));
+ 
+ 		if (kthread_should_stop() ||
+ 			test_bit(MTIP_PF_SVC_THD_STOP_BIT, &port->flags))
+@@ -2929,6 +2975,8 @@ static int mtip_service_thread(void *data)
+ 				&dd->dd_flag)))
+ 			goto st_out;
+ 
++		set_bit(MTIP_PF_SVC_THD_ACTIVE_BIT, &port->flags);
++
+ restart_eh:
+ 		/* Demux bits: start with error handling */
+ 		if (test_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags)) {
+@@ -2939,6 +2987,32 @@ restart_eh:
+ 		if (test_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags))
+ 			goto restart_eh;
+ 
++		if (test_bit(MTIP_PF_TO_ACTIVE_BIT, &port->flags)) {
++			to = jiffies + msecs_to_jiffies(5000);
++
++			do {
++				mdelay(100);
++			} while (atomic_read(&dd->irq_workers_active) != 0 &&
++				time_before(jiffies, to));
++
++			if (atomic_read(&dd->irq_workers_active) != 0)
++				dev_warn(&dd->pdev->dev,
++					"Completion workers still active!");
++
++			spin_lock(dd->queue->queue_lock);
++			blk_mq_all_tag_busy_iter(*dd->tags.tags,
++							mtip_queue_cmd, dd);
++			spin_unlock(dd->queue->queue_lock);
++
++			set_bit(MTIP_PF_ISSUE_CMDS_BIT, &dd->port->flags);
++
++			if (mtip_device_reset(dd))
++				blk_mq_all_tag_busy_iter(*dd->tags.tags,
++							mtip_abort_cmd, dd);
++
++			clear_bit(MTIP_PF_TO_ACTIVE_BIT, &dd->port->flags);
++		}
++
+ 		if (test_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags)) {
+ 			slot = 1;
+ 			/* used to restrict the loop to one iteration */
+@@ -2971,10 +3045,8 @@ restart_eh:
+ 		}
+ 
+ 		if (test_bit(MTIP_PF_REBUILD_BIT, &port->flags)) {
+-			if (mtip_ftl_rebuild_poll(dd) < 0)
+-				set_bit(MTIP_DDF_REBUILD_FAILED_BIT,
+-							&dd->dd_flag);
+-			clear_bit(MTIP_PF_REBUILD_BIT, &port->flags);
++			if (mtip_ftl_rebuild_poll(dd) == 0)
++				clear_bit(MTIP_PF_REBUILD_BIT, &port->flags);
+ 		}
+ 	}
+ 
+@@ -3089,7 +3161,7 @@ static int mtip_hw_get_identify(struct driver_data *dd)
+ 		if (buf[288] == 0xBF) {
+ 			dev_info(&dd->pdev->dev,
+ 				"Drive indicates rebuild has failed.\n");
+-			/* TODO */
++			set_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag);
+ 		}
+ 	}
+ 
+@@ -3263,20 +3335,25 @@ out1:
+ 	return rv;
+ }
+ 
+-static void mtip_standby_drive(struct driver_data *dd)
++static int mtip_standby_drive(struct driver_data *dd)
+ {
+-	if (dd->sr)
+-		return;
++	int rv = 0;
+ 
++	if (dd->sr || !dd->port)
++		return -ENODEV;
+ 	/*
+ 	 * Send standby immediate (E0h) to the drive so that it
+ 	 * saves its state.
+ 	 */
+ 	if (!test_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags) &&
+-	    !test_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag))
+-		if (mtip_standby_immediate(dd->port))
++	    !test_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag) &&
++	    !test_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag)) {
++		rv = mtip_standby_immediate(dd->port);
++		if (rv)
+ 			dev_warn(&dd->pdev->dev,
+ 				"STANDBY IMMEDIATE failed\n");
++	}
++	return rv;
+ }
+ 
+ /*
+@@ -3289,10 +3366,6 @@ static void mtip_standby_drive(struct driver_data *dd)
+  */
+ static int mtip_hw_exit(struct driver_data *dd)
+ {
+-	/*
+-	 * Send standby immediate (E0h) to the drive so that it
+-	 * saves its state.
+-	 */
+ 	if (!dd->sr) {
+ 		/* de-initialize the port. */
+ 		mtip_deinit_port(dd->port);
+@@ -3334,8 +3407,7 @@ static int mtip_hw_shutdown(struct driver_data *dd)
+ 	 * Send standby immediate (E0h) to the drive so that it
+ 	 * saves its state.
+ 	 */
+-	if (!dd->sr && dd->port)
+-		mtip_standby_immediate(dd->port);
++	mtip_standby_drive(dd);
+ 
+ 	return 0;
+ }
+@@ -3358,7 +3430,7 @@ static int mtip_hw_suspend(struct driver_data *dd)
+ 	 * Send standby immediate (E0h) to the drive
+ 	 * so that it saves its state.
+ 	 */
+-	if (mtip_standby_immediate(dd->port) != 0) {
++	if (mtip_standby_drive(dd) != 0) {
+ 		dev_err(&dd->pdev->dev,
+ 			"Failed standby-immediate command\n");
+ 		return -EFAULT;
+@@ -3596,6 +3668,28 @@ static int mtip_block_getgeo(struct block_device *dev,
+ 	return 0;
+ }
+ 
++static int mtip_block_open(struct block_device *dev, fmode_t mode)
++{
++	struct driver_data *dd;
++
++	if (dev && dev->bd_disk) {
++		dd = (struct driver_data *) dev->bd_disk->private_data;
++
++		if (dd) {
++			if (test_bit(MTIP_DDF_REMOVAL_BIT,
++							&dd->dd_flag)) {
++				return -ENODEV;
++			}
++			return 0;
++		}
++	}
++	return -ENODEV;
++}
++
++void mtip_block_release(struct gendisk *disk, fmode_t mode)
++{
++}
++
+ /*
+  * Block device operation function.
+  *
+@@ -3603,6 +3697,8 @@ static int mtip_block_getgeo(struct block_device *dev,
+  * layer.
+  */
+ static const struct block_device_operations mtip_block_ops = {
++	.open		= mtip_block_open,
++	.release	= mtip_block_release,
+ 	.ioctl		= mtip_block_ioctl,
+ #ifdef CONFIG_COMPAT
+ 	.compat_ioctl	= mtip_block_compat_ioctl,
+@@ -3664,10 +3760,9 @@ static int mtip_submit_request(struct blk_mq_hw_ctx *hctx, struct request *rq)
+ 				rq_data_dir(rq))) {
+ 			return -ENODATA;
+ 		}
+-		if (unlikely(test_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag)))
++		if (unlikely(test_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag) ||
++			test_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag)))
+ 			return -ENODATA;
+-		if (test_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag))
+-			return -ENXIO;
+ 	}
+ 
+ 	if (rq->cmd_flags & REQ_DISCARD) {
+@@ -3779,11 +3874,33 @@ static int mtip_init_cmd(void *data, struct request *rq, unsigned int hctx_idx,
+ 	return 0;
+ }
+ 
++static enum blk_eh_timer_return mtip_cmd_timeout(struct request *req,
++								bool reserved)
++{
++	struct driver_data *dd = req->q->queuedata;
++	int ret = BLK_EH_RESET_TIMER;
++
++	if (reserved)
++		goto exit_handler;
++
++	if (test_bit(req->tag, dd->port->cmds_to_issue))
++		goto exit_handler;
++
++	if (test_and_set_bit(MTIP_PF_TO_ACTIVE_BIT, &dd->port->flags))
++		goto exit_handler;
++
++	wake_up_interruptible(&dd->port->svc_wait);
++exit_handler:
++	return ret;
++}
++
+ static struct blk_mq_ops mtip_mq_ops = {
+ 	.queue_rq	= mtip_queue_rq,
+ 	.map_queue	= blk_mq_map_queue,
+ 	.init_request	= mtip_init_cmd,
+ 	.exit_request	= mtip_free_cmd,
++	.complete	= mtip_softirq_done_fn,
++	.timeout        = mtip_cmd_timeout,
+ };
+ 
+ /*
+@@ -3850,7 +3967,6 @@ static int mtip_block_initialize(struct driver_data *dd)
+ 
+ 	mtip_hw_debugfs_init(dd);
+ 
+-skip_create_disk:
+ 	memset(&dd->tags, 0, sizeof(dd->tags));
+ 	dd->tags.ops = &mtip_mq_ops;
+ 	dd->tags.nr_hw_queues = 1;
+@@ -3860,12 +3976,13 @@ skip_create_disk:
+ 	dd->tags.numa_node = dd->numa_node;
+ 	dd->tags.flags = BLK_MQ_F_SHOULD_MERGE;
+ 	dd->tags.driver_data = dd;
++	dd->tags.timeout = MTIP_NCQ_CMD_TIMEOUT_MS;
+ 
+ 	rv = blk_mq_alloc_tag_set(&dd->tags);
+ 	if (rv) {
+ 		dev_err(&dd->pdev->dev,
+ 			"Unable to allocate request queue\n");
+-		goto block_queue_alloc_init_error;
++		goto block_queue_alloc_tag_error;
+ 	}
+ 
+ 	/* Allocate the request queue. */
+@@ -3880,6 +3997,7 @@ skip_create_disk:
+ 	dd->disk->queue		= dd->queue;
+ 	dd->queue->queuedata	= dd;
+ 
++skip_create_disk:
+ 	/* Initialize the protocol layer. */
+ 	wait_for_rebuild = mtip_hw_get_identify(dd);
+ 	if (wait_for_rebuild < 0) {
+@@ -3976,8 +4094,9 @@ kthread_run_error:
+ read_capacity_error:
+ init_hw_cmds_error:
+ 	blk_cleanup_queue(dd->queue);
+-	blk_mq_free_tag_set(&dd->tags);
+ block_queue_alloc_init_error:
++	blk_mq_free_tag_set(&dd->tags);
++block_queue_alloc_tag_error:
+ 	mtip_hw_debugfs_exit(dd);
+ disk_index_error:
+ 	spin_lock(&rssd_index_lock);
+@@ -3994,6 +4113,22 @@ protocol_init_error:
+ 	return rv;
+ }
+ 
++static void mtip_no_dev_cleanup(struct request *rq, void *data, bool reserv)
++{
++	struct driver_data *dd = (struct driver_data *)data;
++	struct mtip_cmd *cmd;
++
++	if (likely(!reserv))
++		blk_mq_complete_request(rq, -ENODEV);
++	else if (test_bit(MTIP_PF_IC_ACTIVE_BIT, &dd->port->flags)) {
++
++		cmd = mtip_cmd_from_tag(dd, MTIP_TAG_INTERNAL);
++		if (cmd->comp_func)
++			cmd->comp_func(dd->port, MTIP_TAG_INTERNAL,
++					cmd, -ENODEV);
++	}
++}
++
+ /*
+  * Block layer deinitialization function.
+  *
+@@ -4025,12 +4160,23 @@ static int mtip_block_remove(struct driver_data *dd)
+ 		}
+ 	}
+ 
+-	if (!dd->sr)
+-		mtip_standby_drive(dd);
++	if (!dd->sr) {
++		/*
++		 * Explicitly wait here for IOs to quiesce,
++		 * as mtip_standby_drive usually won't wait for IOs.
++		 */
++		if (!mtip_quiesce_io(dd->port, MTIP_QUIESCE_IO_TIMEOUT_MS,
++								GFP_KERNEL))
++			mtip_standby_drive(dd);
++	}
+ 	else
+ 		dev_info(&dd->pdev->dev, "device %s surprise removal\n",
+ 						dd->disk->disk_name);
+ 
++	blk_mq_freeze_queue_start(dd->queue);
++	blk_mq_stop_hw_queues(dd->queue);
++	blk_mq_all_tag_busy_iter(dd->tags.tags[0], mtip_no_dev_cleanup, dd);
++
+ 	/*
+ 	 * Delete our gendisk structure. This also removes the device
+ 	 * from /dev
+@@ -4040,7 +4186,8 @@ static int mtip_block_remove(struct driver_data *dd)
+ 		dd->bdev = NULL;
+ 	}
+ 	if (dd->disk) {
+-		del_gendisk(dd->disk);
++		if (test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag))
++			del_gendisk(dd->disk);
+ 		if (dd->disk->queue) {
+ 			blk_cleanup_queue(dd->queue);
+ 			blk_mq_free_tag_set(&dd->tags);
+@@ -4081,7 +4228,8 @@ static int mtip_block_shutdown(struct driver_data *dd)
+ 		dev_info(&dd->pdev->dev,
+ 			"Shutting down %s ...\n", dd->disk->disk_name);
+ 
+-		del_gendisk(dd->disk);
++		if (test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag))
++			del_gendisk(dd->disk);
+ 		if (dd->disk->queue) {
+ 			blk_cleanup_queue(dd->queue);
+ 			blk_mq_free_tag_set(&dd->tags);
+@@ -4426,7 +4574,7 @@ static void mtip_pci_remove(struct pci_dev *pdev)
+ 	struct driver_data *dd = pci_get_drvdata(pdev);
+ 	unsigned long flags, to;
+ 
+-	set_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag);
++	set_bit(MTIP_DDF_REMOVAL_BIT, &dd->dd_flag);
+ 
+ 	spin_lock_irqsave(&dev_lock, flags);
+ 	list_del_init(&dd->online_list);
+@@ -4443,12 +4591,17 @@ static void mtip_pci_remove(struct pci_dev *pdev)
+ 	} while (atomic_read(&dd->irq_workers_active) != 0 &&
+ 		time_before(jiffies, to));
+ 
++	if (!dd->sr)
++		fsync_bdev(dd->bdev);
++
+ 	if (atomic_read(&dd->irq_workers_active) != 0) {
+ 		dev_warn(&dd->pdev->dev,
+ 			"Completion workers still active!\n");
+ 	}
+ 
+-	blk_mq_stop_hw_queues(dd->queue);
++	blk_set_queue_dying(dd->queue);
++	set_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag);
++
+ 	/* Clean up the block layer. */
+ 	mtip_block_remove(dd);
+ 
+diff --git a/drivers/block/mtip32xx/mtip32xx.h b/drivers/block/mtip32xx/mtip32xx.h
+index 3274784008eb..7617888f7944 100644
+--- a/drivers/block/mtip32xx/mtip32xx.h
++++ b/drivers/block/mtip32xx/mtip32xx.h
+@@ -134,16 +134,24 @@ enum {
+ 	MTIP_PF_EH_ACTIVE_BIT       = 1, /* error handling */
+ 	MTIP_PF_SE_ACTIVE_BIT       = 2, /* secure erase */
+ 	MTIP_PF_DM_ACTIVE_BIT       = 3, /* download microcde */
++	MTIP_PF_TO_ACTIVE_BIT       = 9, /* timeout handling */
+ 	MTIP_PF_PAUSE_IO      =	((1 << MTIP_PF_IC_ACTIVE_BIT) |
+ 				(1 << MTIP_PF_EH_ACTIVE_BIT) |
+ 				(1 << MTIP_PF_SE_ACTIVE_BIT) |
+-				(1 << MTIP_PF_DM_ACTIVE_BIT)),
++				(1 << MTIP_PF_DM_ACTIVE_BIT) |
++				(1 << MTIP_PF_TO_ACTIVE_BIT)),
+ 
+ 	MTIP_PF_SVC_THD_ACTIVE_BIT  = 4,
+ 	MTIP_PF_ISSUE_CMDS_BIT      = 5,
+ 	MTIP_PF_REBUILD_BIT         = 6,
+ 	MTIP_PF_SVC_THD_STOP_BIT    = 8,
+ 
++	MTIP_PF_SVC_THD_WORK	= ((1 << MTIP_PF_EH_ACTIVE_BIT) |
++				  (1 << MTIP_PF_ISSUE_CMDS_BIT) |
++				  (1 << MTIP_PF_REBUILD_BIT) |
++				  (1 << MTIP_PF_SVC_THD_STOP_BIT) |
++				  (1 << MTIP_PF_TO_ACTIVE_BIT)),
++
+ 	/* below are bit numbers in 'dd_flag' defined in driver_data */
+ 	MTIP_DDF_SEC_LOCK_BIT	    = 0,
+ 	MTIP_DDF_REMOVE_PENDING_BIT = 1,
+@@ -153,6 +161,7 @@ enum {
+ 	MTIP_DDF_RESUME_BIT         = 6,
+ 	MTIP_DDF_INIT_DONE_BIT      = 7,
+ 	MTIP_DDF_REBUILD_FAILED_BIT = 8,
++	MTIP_DDF_REMOVAL_BIT	    = 9,
+ 
+ 	MTIP_DDF_STOP_IO      = ((1 << MTIP_DDF_REMOVE_PENDING_BIT) |
+ 				(1 << MTIP_DDF_SEC_LOCK_BIT) |
+diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
+index fa893c3ec408..0beaa52df66b 100644
+--- a/drivers/bluetooth/ath3k.c
++++ b/drivers/bluetooth/ath3k.c
+@@ -82,6 +82,7 @@ static const struct usb_device_id ath3k_table[] = {
+ 	{ USB_DEVICE(0x0489, 0xe05f) },
+ 	{ USB_DEVICE(0x0489, 0xe076) },
+ 	{ USB_DEVICE(0x0489, 0xe078) },
++	{ USB_DEVICE(0x0489, 0xe095) },
+ 	{ USB_DEVICE(0x04c5, 0x1330) },
+ 	{ USB_DEVICE(0x04CA, 0x3004) },
+ 	{ USB_DEVICE(0x04CA, 0x3005) },
+@@ -92,6 +93,7 @@ static const struct usb_device_id ath3k_table[] = {
+ 	{ USB_DEVICE(0x04CA, 0x300d) },
+ 	{ USB_DEVICE(0x04CA, 0x300f) },
+ 	{ USB_DEVICE(0x04CA, 0x3010) },
++	{ USB_DEVICE(0x04CA, 0x3014) },
+ 	{ USB_DEVICE(0x0930, 0x0219) },
+ 	{ USB_DEVICE(0x0930, 0x021c) },
+ 	{ USB_DEVICE(0x0930, 0x0220) },
+@@ -113,10 +115,12 @@ static const struct usb_device_id ath3k_table[] = {
+ 	{ USB_DEVICE(0x13d3, 0x3362) },
+ 	{ USB_DEVICE(0x13d3, 0x3375) },
+ 	{ USB_DEVICE(0x13d3, 0x3393) },
++	{ USB_DEVICE(0x13d3, 0x3395) },
+ 	{ USB_DEVICE(0x13d3, 0x3402) },
+ 	{ USB_DEVICE(0x13d3, 0x3408) },
+ 	{ USB_DEVICE(0x13d3, 0x3423) },
+ 	{ USB_DEVICE(0x13d3, 0x3432) },
++	{ USB_DEVICE(0x13d3, 0x3472) },
+ 	{ USB_DEVICE(0x13d3, 0x3474) },
+ 
+ 	/* Atheros AR5BBU12 with sflash firmware */
+@@ -144,6 +148,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
+ 	{ USB_DEVICE(0x0489, 0xe05f), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0489, 0xe076), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0489, 0xe078), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x0489, 0xe095), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
+@@ -154,6 +159,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
+ 	{ USB_DEVICE(0x04ca, 0x300d), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x04ca, 0x300f), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x04ca, 0x3014), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0930, 0x021c), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
+@@ -175,10 +181,12 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
+ 	{ USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x13d3, 0x3395), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x13d3, 0x3408), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x13d3, 0x3423), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x13d3, 0x3432), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x13d3, 0x3472), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x13d3, 0x3474), .driver_info = BTUSB_ATH3012 },
+ 
+ 	/* Atheros AR5BBU22 with sflash firmware */
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index a191e318fab8..0d4e372e426d 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -196,6 +196,7 @@ static const struct usb_device_id blacklist_table[] = {
+ 	{ USB_DEVICE(0x0489, 0xe05f), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0489, 0xe076), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0489, 0xe078), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x0489, 0xe095), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
+@@ -206,6 +207,7 @@ static const struct usb_device_id blacklist_table[] = {
+ 	{ USB_DEVICE(0x04ca, 0x300d), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x04ca, 0x300f), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x04ca, 0x3014), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0930, 0x021c), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
+@@ -227,10 +229,12 @@ static const struct usb_device_id blacklist_table[] = {
+ 	{ USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x13d3, 0x3395), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x13d3, 0x3408), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x13d3, 0x3423), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x13d3, 0x3432), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x13d3, 0x3472), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x13d3, 0x3474), .driver_info = BTUSB_ATH3012 },
+ 
+ 	/* Atheros AR5BBU12 with sflash firmware */
+diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
+index 45cc39aabeee..252142524ff2 100644
+--- a/drivers/char/tpm/tpm-chip.c
++++ b/drivers/char/tpm/tpm-chip.c
+@@ -136,11 +136,13 @@ struct tpm_chip *tpmm_chip_alloc(struct device *dev,
+ 	chip->cdev.owner = chip->pdev->driver->owner;
+ 	chip->cdev.kobj.parent = &chip->dev.kobj;
+ 
++	devm_add_action(dev, (void (*)(void *)) put_device, &chip->dev);
++
+ 	return chip;
+ }
+ EXPORT_SYMBOL_GPL(tpmm_chip_alloc);
+ 
+-static int tpm_dev_add_device(struct tpm_chip *chip)
++static int tpm_add_char_device(struct tpm_chip *chip)
+ {
+ 	int rc;
+ 
+@@ -151,7 +153,6 @@ static int tpm_dev_add_device(struct tpm_chip *chip)
+ 			chip->devname, MAJOR(chip->dev.devt),
+ 			MINOR(chip->dev.devt), rc);
+ 
+-		device_unregister(&chip->dev);
+ 		return rc;
+ 	}
+ 
+@@ -162,16 +163,17 @@ static int tpm_dev_add_device(struct tpm_chip *chip)
+ 			chip->devname, MAJOR(chip->dev.devt),
+ 			MINOR(chip->dev.devt), rc);
+ 
++		cdev_del(&chip->cdev);
+ 		return rc;
+ 	}
+ 
+ 	return rc;
+ }
+ 
+-static void tpm_dev_del_device(struct tpm_chip *chip)
++static void tpm_del_char_device(struct tpm_chip *chip)
+ {
+ 	cdev_del(&chip->cdev);
+-	device_unregister(&chip->dev);
++	device_del(&chip->dev);
+ }
+ 
+ static int tpm1_chip_register(struct tpm_chip *chip)
+@@ -222,7 +224,7 @@ int tpm_chip_register(struct tpm_chip *chip)
+ 
+ 	tpm_add_ppi(chip);
+ 
+-	rc = tpm_dev_add_device(chip);
++	rc = tpm_add_char_device(chip);
+ 	if (rc)
+ 		goto out_err;
+ 
+@@ -274,6 +276,6 @@ void tpm_chip_unregister(struct tpm_chip *chip)
+ 		sysfs_remove_link(&chip->pdev->kobj, "ppi");
+ 
+ 	tpm1_chip_unregister(chip);
+-	tpm_dev_del_device(chip);
++	tpm_del_char_device(chip);
+ }
+ EXPORT_SYMBOL_GPL(tpm_chip_unregister);
+diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c
+index 8342cf51ffdc..26bab5a2959f 100644
+--- a/drivers/char/tpm/tpm_crb.c
++++ b/drivers/char/tpm/tpm_crb.c
+@@ -302,11 +302,11 @@ static int crb_acpi_remove(struct acpi_device *device)
+ 	struct device *dev = &device->dev;
+ 	struct tpm_chip *chip = dev_get_drvdata(dev);
+ 
+-	tpm_chip_unregister(chip);
+-
+ 	if (chip->flags & TPM_CHIP_FLAG_TPM2)
+ 		tpm2_shutdown(chip, TPM2_SU_CLEAR);
+ 
++	tpm_chip_unregister(chip);
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/char/tpm/tpm_eventlog.c b/drivers/char/tpm/tpm_eventlog.c
+index bd72fb04225e..4e6940acf639 100644
+--- a/drivers/char/tpm/tpm_eventlog.c
++++ b/drivers/char/tpm/tpm_eventlog.c
+@@ -232,7 +232,7 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
+ {
+ 	struct tcpa_event *event = v;
+ 	struct tcpa_event temp_event;
+-	char *tempPtr;
++	char *temp_ptr;
+ 	int i;
+ 
+ 	memcpy(&temp_event, event, sizeof(struct tcpa_event));
+@@ -242,10 +242,16 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
+ 	temp_event.event_type = do_endian_conversion(event->event_type);
+ 	temp_event.event_size = do_endian_conversion(event->event_size);
+ 
+-	tempPtr = (char *)&temp_event;
++	temp_ptr = (char *) &temp_event;
+ 
+-	for (i = 0; i < sizeof(struct tcpa_event) + temp_event.event_size; i++)
+-		seq_putc(m, tempPtr[i]);
++	for (i = 0; i < (sizeof(struct tcpa_event) - 1) ; i++)
++		seq_putc(m, temp_ptr[i]);
++
++	temp_ptr = (char *) v;
++
++	for (i = (sizeof(struct tcpa_event) - 1);
++	     i < (sizeof(struct tcpa_event) + temp_event.event_size); i++)
++		seq_putc(m, temp_ptr[i]);
+ 
+ 	return 0;
+ 
+diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c
+index 015e687ffabe..9f4df8f645f8 100644
+--- a/drivers/clk/bcm/clk-bcm2835.c
++++ b/drivers/clk/bcm/clk-bcm2835.c
+@@ -1107,13 +1107,15 @@ static int bcm2835_pll_divider_set_rate(struct clk_hw *hw,
+ 	struct bcm2835_pll_divider *divider = bcm2835_pll_divider_from_hw(hw);
+ 	struct bcm2835_cprman *cprman = divider->cprman;
+ 	const struct bcm2835_pll_divider_data *data = divider->data;
+-	u32 cm;
+-	int ret;
++	u32 cm, div, max_div = 1 << A2W_PLL_DIV_BITS;
+ 
+-	ret = clk_divider_ops.set_rate(hw, rate, parent_rate);
+-	if (ret)
+-		return ret;
++	div = DIV_ROUND_UP_ULL(parent_rate, rate);
++
++	div = min(div, max_div);
++	if (div == max_div)
++		div = 0;
+ 
++	cprman_write(cprman, data->a2w_reg, div);
+ 	cm = cprman_read(cprman, data->cm_reg);
+ 	cprman_write(cprman, data->cm_reg, cm | data->load_mask);
+ 	cprman_write(cprman, data->cm_reg, cm & ~data->load_mask);
+diff --git a/drivers/clk/rockchip/clk-rk3188.c b/drivers/clk/rockchip/clk-rk3188.c
+index 7f7444cbf6fc..05263571c223 100644
+--- a/drivers/clk/rockchip/clk-rk3188.c
++++ b/drivers/clk/rockchip/clk-rk3188.c
+@@ -748,6 +748,7 @@ static const char *const rk3188_critical_clocks[] __initconst = {
+ 	"hclk_peri",
+ 	"pclk_cpu",
+ 	"pclk_peri",
++	"hclk_cpubus"
+ };
+ 
+ static void __init rk3188_common_clk_init(struct device_node *np)
+diff --git a/drivers/clk/rockchip/clk-rk3368.c b/drivers/clk/rockchip/clk-rk3368.c
+index 21f3ea909fab..57acb625c8ff 100644
+--- a/drivers/clk/rockchip/clk-rk3368.c
++++ b/drivers/clk/rockchip/clk-rk3368.c
+@@ -165,7 +165,7 @@ static const struct rockchip_cpuclk_reg_data rk3368_cpuclkb_data = {
+ 	.core_reg = RK3368_CLKSEL_CON(0),
+ 	.div_core_shift = 0,
+ 	.div_core_mask = 0x1f,
+-	.mux_core_shift = 15,
++	.mux_core_shift = 7,
+ };
+ 
+ static const struct rockchip_cpuclk_reg_data rk3368_cpuclkl_data = {
+@@ -218,29 +218,29 @@ static const struct rockchip_cpuclk_reg_data rk3368_cpuclkl_data = {
+ 	}
+ 
+ static struct rockchip_cpuclk_rate_table rk3368_cpuclkb_rates[] __initdata = {
+-	RK3368_CPUCLKB_RATE(1512000000, 2, 6, 6),
+-	RK3368_CPUCLKB_RATE(1488000000, 2, 5, 5),
+-	RK3368_CPUCLKB_RATE(1416000000, 2, 5, 5),
+-	RK3368_CPUCLKB_RATE(1200000000, 2, 4, 4),
+-	RK3368_CPUCLKB_RATE(1008000000, 2, 4, 4),
+-	RK3368_CPUCLKB_RATE( 816000000, 2, 3, 3),
+-	RK3368_CPUCLKB_RATE( 696000000, 2, 3, 3),
+-	RK3368_CPUCLKB_RATE( 600000000, 2, 2, 2),
+-	RK3368_CPUCLKB_RATE( 408000000, 2, 2, 2),
+-	RK3368_CPUCLKB_RATE( 312000000, 2, 2, 2),
++	RK3368_CPUCLKB_RATE(1512000000, 1, 5, 5),
++	RK3368_CPUCLKB_RATE(1488000000, 1, 4, 4),
++	RK3368_CPUCLKB_RATE(1416000000, 1, 4, 4),
++	RK3368_CPUCLKB_RATE(1200000000, 1, 3, 3),
++	RK3368_CPUCLKB_RATE(1008000000, 1, 3, 3),
++	RK3368_CPUCLKB_RATE( 816000000, 1, 2, 2),
++	RK3368_CPUCLKB_RATE( 696000000, 1, 2, 2),
++	RK3368_CPUCLKB_RATE( 600000000, 1, 1, 1),
++	RK3368_CPUCLKB_RATE( 408000000, 1, 1, 1),
++	RK3368_CPUCLKB_RATE( 312000000, 1, 1, 1),
+ };
+ 
+ static struct rockchip_cpuclk_rate_table rk3368_cpuclkl_rates[] __initdata = {
+-	RK3368_CPUCLKL_RATE(1512000000, 2, 7, 7),
+-	RK3368_CPUCLKL_RATE(1488000000, 2, 6, 6),
+-	RK3368_CPUCLKL_RATE(1416000000, 2, 6, 6),
+-	RK3368_CPUCLKL_RATE(1200000000, 2, 5, 5),
+-	RK3368_CPUCLKL_RATE(1008000000, 2, 5, 5),
+-	RK3368_CPUCLKL_RATE( 816000000, 2, 4, 4),
+-	RK3368_CPUCLKL_RATE( 696000000, 2, 3, 3),
+-	RK3368_CPUCLKL_RATE( 600000000, 2, 3, 3),
+-	RK3368_CPUCLKL_RATE( 408000000, 2, 2, 2),
+-	RK3368_CPUCLKL_RATE( 312000000, 2, 2, 2),
++	RK3368_CPUCLKL_RATE(1512000000, 1, 6, 6),
++	RK3368_CPUCLKL_RATE(1488000000, 1, 5, 5),
++	RK3368_CPUCLKL_RATE(1416000000, 1, 5, 5),
++	RK3368_CPUCLKL_RATE(1200000000, 1, 4, 4),
++	RK3368_CPUCLKL_RATE(1008000000, 1, 4, 4),
++	RK3368_CPUCLKL_RATE( 816000000, 1, 3, 3),
++	RK3368_CPUCLKL_RATE( 696000000, 1, 2, 2),
++	RK3368_CPUCLKL_RATE( 600000000, 1, 2, 2),
++	RK3368_CPUCLKL_RATE( 408000000, 1, 1, 1),
++	RK3368_CPUCLKL_RATE( 312000000, 1, 1, 1),
+ };
+ 
+ static struct rockchip_clk_branch rk3368_clk_branches[] __initdata = {
+@@ -384,10 +384,10 @@ static struct rockchip_clk_branch rk3368_clk_branches[] __initdata = {
+ 	 * Clock-Architecture Diagram 3
+ 	 */
+ 
+-	COMPOSITE(0, "aclk_vepu", mux_pll_src_cpll_gpll_usb_p, 0,
++	COMPOSITE(0, "aclk_vepu", mux_pll_src_cpll_gpll_npll_usb_p, 0,
+ 			RK3368_CLKSEL_CON(15), 6, 2, MFLAGS, 0, 5, DFLAGS,
+ 			RK3368_CLKGATE_CON(4), 6, GFLAGS),
+-	COMPOSITE(0, "aclk_vdpu", mux_pll_src_cpll_gpll_usb_p, 0,
++	COMPOSITE(0, "aclk_vdpu", mux_pll_src_cpll_gpll_npll_usb_p, 0,
+ 			RK3368_CLKSEL_CON(15), 14, 2, MFLAGS, 8, 5, DFLAGS,
+ 			RK3368_CLKGATE_CON(4), 7, GFLAGS),
+ 
+@@ -442,7 +442,7 @@ static struct rockchip_clk_branch rk3368_clk_branches[] __initdata = {
+ 	GATE(SCLK_HDMI_HDCP, "sclk_hdmi_hdcp", "xin24m", 0,
+ 			RK3368_CLKGATE_CON(4), 13, GFLAGS),
+ 	GATE(SCLK_HDMI_CEC, "sclk_hdmi_cec", "xin32k", 0,
+-			RK3368_CLKGATE_CON(5), 12, GFLAGS),
++			RK3368_CLKGATE_CON(4), 12, GFLAGS),
+ 
+ 	COMPOSITE_NODIV(0, "vip_src", mux_pll_src_cpll_gpll_p, 0,
+ 			RK3368_CLKSEL_CON(21), 15, 1, MFLAGS,
+diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c
+index 3eb3f1279fb7..7de007abe46e 100644
+--- a/drivers/crypto/atmel-aes.c
++++ b/drivers/crypto/atmel-aes.c
+@@ -2085,9 +2085,9 @@ static int atmel_aes_probe(struct platform_device *pdev)
+ 	}
+ 
+ 	aes_dd->io_base = devm_ioremap_resource(&pdev->dev, aes_res);
+-	if (!aes_dd->io_base) {
++	if (IS_ERR(aes_dd->io_base)) {
+ 		dev_err(dev, "can't ioremap\n");
+-		err = -ENOMEM;
++		err = PTR_ERR(aes_dd->io_base);
+ 		goto res_err;
+ 	}
+ 
+diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c
+index 8bf9914d4d15..68d47a2da4a1 100644
+--- a/drivers/crypto/atmel-sha.c
++++ b/drivers/crypto/atmel-sha.c
+@@ -1404,9 +1404,9 @@ static int atmel_sha_probe(struct platform_device *pdev)
+ 	}
+ 
+ 	sha_dd->io_base = devm_ioremap_resource(&pdev->dev, sha_res);
+-	if (!sha_dd->io_base) {
++	if (IS_ERR(sha_dd->io_base)) {
+ 		dev_err(dev, "can't ioremap\n");
+-		err = -ENOMEM;
++		err = PTR_ERR(sha_dd->io_base);
+ 		goto res_err;
+ 	}
+ 
+diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c
+index 2c7a628d0375..bf467d7be35c 100644
+--- a/drivers/crypto/atmel-tdes.c
++++ b/drivers/crypto/atmel-tdes.c
+@@ -1417,9 +1417,9 @@ static int atmel_tdes_probe(struct platform_device *pdev)
+ 	}
+ 
+ 	tdes_dd->io_base = devm_ioremap_resource(&pdev->dev, tdes_res);
+-	if (!tdes_dd->io_base) {
++	if (IS_ERR(tdes_dd->io_base)) {
+ 		dev_err(dev, "can't ioremap\n");
+-		err = -ENOMEM;
++		err = PTR_ERR(tdes_dd->io_base);
+ 		goto res_err;
+ 	}
+ 
+diff --git a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
+index d89f20c04266..3d9acc53d247 100644
+--- a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
++++ b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
+@@ -220,6 +220,39 @@ static int ccp_aes_cmac_digest(struct ahash_request *req)
+ 	return ccp_aes_cmac_finup(req);
+ }
+ 
++static int ccp_aes_cmac_export(struct ahash_request *req, void *out)
++{
++	struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req);
++	struct ccp_aes_cmac_exp_ctx state;
++
++	state.null_msg = rctx->null_msg;
++	memcpy(state.iv, rctx->iv, sizeof(state.iv));
++	state.buf_count = rctx->buf_count;
++	memcpy(state.buf, rctx->buf, sizeof(state.buf));
++
++	/* 'out' may not be aligned so memcpy from local variable */
++	memcpy(out, &state, sizeof(state));
++
++	return 0;
++}
++
++static int ccp_aes_cmac_import(struct ahash_request *req, const void *in)
++{
++	struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req);
++	struct ccp_aes_cmac_exp_ctx state;
++
++	/* 'in' may not be aligned so memcpy to local variable */
++	memcpy(&state, in, sizeof(state));
++
++	memset(rctx, 0, sizeof(*rctx));
++	rctx->null_msg = state.null_msg;
++	memcpy(rctx->iv, state.iv, sizeof(rctx->iv));
++	rctx->buf_count = state.buf_count;
++	memcpy(rctx->buf, state.buf, sizeof(rctx->buf));
++
++	return 0;
++}
++
+ static int ccp_aes_cmac_setkey(struct crypto_ahash *tfm, const u8 *key,
+ 			       unsigned int key_len)
+ {
+@@ -352,10 +385,13 @@ int ccp_register_aes_cmac_algs(struct list_head *head)
+ 	alg->final = ccp_aes_cmac_final;
+ 	alg->finup = ccp_aes_cmac_finup;
+ 	alg->digest = ccp_aes_cmac_digest;
++	alg->export = ccp_aes_cmac_export;
++	alg->import = ccp_aes_cmac_import;
+ 	alg->setkey = ccp_aes_cmac_setkey;
+ 
+ 	halg = &alg->halg;
+ 	halg->digestsize = AES_BLOCK_SIZE;
++	halg->statesize = sizeof(struct ccp_aes_cmac_exp_ctx);
+ 
+ 	base = &halg->base;
+ 	snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "cmac(aes)");
+diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c b/drivers/crypto/ccp/ccp-crypto-sha.c
+index d14b3f28e010..8ef06fad8b14 100644
+--- a/drivers/crypto/ccp/ccp-crypto-sha.c
++++ b/drivers/crypto/ccp/ccp-crypto-sha.c
+@@ -207,6 +207,43 @@ static int ccp_sha_digest(struct ahash_request *req)
+ 	return ccp_sha_finup(req);
+ }
+ 
++static int ccp_sha_export(struct ahash_request *req, void *out)
++{
++	struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req);
++	struct ccp_sha_exp_ctx state;
++
++	state.type = rctx->type;
++	state.msg_bits = rctx->msg_bits;
++	state.first = rctx->first;
++	memcpy(state.ctx, rctx->ctx, sizeof(state.ctx));
++	state.buf_count = rctx->buf_count;
++	memcpy(state.buf, rctx->buf, sizeof(state.buf));
++
++	/* 'out' may not be aligned so memcpy from local variable */
++	memcpy(out, &state, sizeof(state));
++
++	return 0;
++}
++
++static int ccp_sha_import(struct ahash_request *req, const void *in)
++{
++	struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req);
++	struct ccp_sha_exp_ctx state;
++
++	/* 'in' may not be aligned so memcpy to local variable */
++	memcpy(&state, in, sizeof(state));
++
++	memset(rctx, 0, sizeof(*rctx));
++	rctx->type = state.type;
++	rctx->msg_bits = state.msg_bits;
++	rctx->first = state.first;
++	memcpy(rctx->ctx, state.ctx, sizeof(rctx->ctx));
++	rctx->buf_count = state.buf_count;
++	memcpy(rctx->buf, state.buf, sizeof(rctx->buf));
++
++	return 0;
++}
++
+ static int ccp_sha_setkey(struct crypto_ahash *tfm, const u8 *key,
+ 			  unsigned int key_len)
+ {
+@@ -403,9 +440,12 @@ static int ccp_register_sha_alg(struct list_head *head,
+ 	alg->final = ccp_sha_final;
+ 	alg->finup = ccp_sha_finup;
+ 	alg->digest = ccp_sha_digest;
++	alg->export = ccp_sha_export;
++	alg->import = ccp_sha_import;
+ 
+ 	halg = &alg->halg;
+ 	halg->digestsize = def->digest_size;
++	halg->statesize = sizeof(struct ccp_sha_exp_ctx);
+ 
+ 	base = &halg->base;
+ 	snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
+diff --git a/drivers/crypto/ccp/ccp-crypto.h b/drivers/crypto/ccp/ccp-crypto.h
+index 76a96f0f44c6..a326ec20bfa8 100644
+--- a/drivers/crypto/ccp/ccp-crypto.h
++++ b/drivers/crypto/ccp/ccp-crypto.h
+@@ -129,6 +129,15 @@ struct ccp_aes_cmac_req_ctx {
+ 	struct ccp_cmd cmd;
+ };
+ 
++struct ccp_aes_cmac_exp_ctx {
++	unsigned int null_msg;
++
++	u8 iv[AES_BLOCK_SIZE];
++
++	unsigned int buf_count;
++	u8 buf[AES_BLOCK_SIZE];
++};
++
+ /***** SHA related defines *****/
+ #define MAX_SHA_CONTEXT_SIZE	SHA256_DIGEST_SIZE
+ #define MAX_SHA_BLOCK_SIZE	SHA256_BLOCK_SIZE
+@@ -171,6 +180,19 @@ struct ccp_sha_req_ctx {
+ 	struct ccp_cmd cmd;
+ };
+ 
++struct ccp_sha_exp_ctx {
++	enum ccp_sha_type type;
++
++	u64 msg_bits;
++
++	unsigned int first;
++
++	u8 ctx[MAX_SHA_CONTEXT_SIZE];
++
++	unsigned int buf_count;
++	u8 buf[MAX_SHA_BLOCK_SIZE];
++};
++
+ /***** Common Context Structure *****/
+ struct ccp_ctx {
+ 	int (*complete)(struct crypto_async_request *req, int ret);
+diff --git a/drivers/crypto/marvell/cesa.c b/drivers/crypto/marvell/cesa.c
+index c0656e7f37b5..80239ae69527 100644
+--- a/drivers/crypto/marvell/cesa.c
++++ b/drivers/crypto/marvell/cesa.c
+@@ -420,7 +420,7 @@ static int mv_cesa_probe(struct platform_device *pdev)
+ 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
+ 	cesa->regs = devm_ioremap_resource(dev, res);
+ 	if (IS_ERR(cesa->regs))
+-		return -ENOMEM;
++		return PTR_ERR(cesa->regs);
+ 
+ 	ret = mv_cesa_dev_dma_init(cesa);
+ 	if (ret)
+diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c
+index 4c243c1ffc7f..790f7cadc1ed 100644
+--- a/drivers/crypto/ux500/cryp/cryp_core.c
++++ b/drivers/crypto/ux500/cryp/cryp_core.c
+@@ -1440,9 +1440,9 @@ static int ux500_cryp_probe(struct platform_device *pdev)
+ 
+ 	device_data->phybase = res->start;
+ 	device_data->base = devm_ioremap_resource(dev, res);
+-	if (!device_data->base) {
++	if (IS_ERR(device_data->base)) {
+ 		dev_err(dev, "[%s]: ioremap failed!", __func__);
+-		ret = -ENOMEM;
++		ret = PTR_ERR(device_data->base);
+ 		goto out;
+ 	}
+ 
+diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c
+index d6fdc583ce5d..574e87c7f2b8 100644
+--- a/drivers/crypto/ux500/hash/hash_core.c
++++ b/drivers/crypto/ux500/hash/hash_core.c
+@@ -1659,9 +1659,9 @@ static int ux500_hash_probe(struct platform_device *pdev)
+ 
+ 	device_data->phybase = res->start;
+ 	device_data->base = devm_ioremap_resource(dev, res);
+-	if (!device_data->base) {
++	if (IS_ERR(device_data->base)) {
+ 		dev_err(dev, "%s: ioremap() failed!\n", __func__);
+-		ret = -ENOMEM;
++		ret = PTR_ERR(device_data->base);
+ 		goto out;
+ 	}
+ 	spin_lock_init(&device_data->ctx_lock);
+diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
+index 9eee13ef83a5..d87a47547ba5 100644
+--- a/drivers/edac/amd64_edac.c
++++ b/drivers/edac/amd64_edac.c
+@@ -1452,7 +1452,7 @@ static u64 f1x_get_norm_dct_addr(struct amd64_pvt *pvt, u8 range,
+ 	u64 chan_off;
+ 	u64 dram_base		= get_dram_base(pvt, range);
+ 	u64 hole_off		= f10_dhar_offset(pvt);
+-	u64 dct_sel_base_off	= (pvt->dct_sel_hi & 0xFFFFFC00) << 16;
++	u64 dct_sel_base_off	= (u64)(pvt->dct_sel_hi & 0xFFFFFC00) << 16;
+ 
+ 	if (hi_rng) {
+ 		/*
+diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
+index f5c6b97c8958..93f0d4120289 100644
+--- a/drivers/edac/sb_edac.c
++++ b/drivers/edac/sb_edac.c
+@@ -1839,8 +1839,8 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
+ 		edac_dbg(0, "TAD#%d: up to %u.%03u GB (0x%016Lx), socket interleave %d, memory interleave %d, TGT: %d, %d, %d, %d, reg=0x%08x\n",
+ 			 n_tads, gb, (mb*1000)/1024,
+ 			 ((u64)tmp_mb) << 20L,
+-			 (u32)TAD_SOCK(reg),
+-			 (u32)TAD_CH(reg),
++			 (u32)(1 << TAD_SOCK(reg)),
++			 (u32)TAD_CH(reg) + 1,
+ 			 (u32)TAD_TGT0(reg),
+ 			 (u32)TAD_TGT1(reg),
+ 			 (u32)TAD_TGT2(reg),
+@@ -2118,7 +2118,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
+ 	}
+ 
+ 	ch_way = TAD_CH(reg) + 1;
+-	sck_way = TAD_SOCK(reg) + 1;
++	sck_way = 1 << TAD_SOCK(reg);
+ 
+ 	if (ch_way == 3)
+ 		idx = addr >> 6;
+@@ -2175,7 +2175,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
+ 		 n_tads,
+ 		 addr,
+ 		 limit,
+-		 (u32)TAD_SOCK(reg),
++		 sck_way,
+ 		 ch_way,
+ 		 offset,
+ 		 idx,
+@@ -2190,18 +2190,12 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
+ 			offset, addr);
+ 		return -EINVAL;
+ 	}
+-	addr -= offset;
+-	/* Store the low bits [0:6] of the addr */
+-	ch_addr = addr & 0x7f;
+-	/* Remove socket wayness and remove 6 bits */
+-	addr >>= 6;
+-	addr = div_u64(addr, sck_xch);
+-#if 0
+-	/* Divide by channel way */
+-	addr = addr / ch_way;
+-#endif
+-	/* Recover the last 6 bits */
+-	ch_addr |= addr << 6;
++
++	ch_addr = addr - offset;
++	ch_addr >>= (6 + shiftup);
++	ch_addr /= ch_way * sck_way;
++	ch_addr <<= (6 + shiftup);
++	ch_addr |= addr & ((1 << (6 + shiftup)) - 1);
+ 
+ 	/*
+ 	 * Step 3) Decode rank
+diff --git a/drivers/firmware/broadcom/bcm47xx_nvram.c b/drivers/firmware/broadcom/bcm47xx_nvram.c
+index 0c2f0a61b0ea..0b631e5b5b84 100644
+--- a/drivers/firmware/broadcom/bcm47xx_nvram.c
++++ b/drivers/firmware/broadcom/bcm47xx_nvram.c
+@@ -94,15 +94,14 @@ static int nvram_find_and_copy(void __iomem *iobase, u32 lim)
+ 
+ found:
+ 	__ioread32_copy(nvram_buf, header, sizeof(*header) / 4);
+-	header = (struct nvram_header *)nvram_buf;
+-	nvram_len = header->len;
++	nvram_len = ((struct nvram_header *)(nvram_buf))->len;
+ 	if (nvram_len > size) {
+ 		pr_err("The nvram size according to the header seems to be bigger than the partition on flash\n");
+ 		nvram_len = size;
+ 	}
+ 	if (nvram_len >= NVRAM_SPACE) {
+ 		pr_err("nvram on flash (%i bytes) is bigger than the reserved space in memory, will just copy the first %i bytes\n",
+-		       header->len, NVRAM_SPACE - 1);
++		       nvram_len, NVRAM_SPACE - 1);
+ 		nvram_len = NVRAM_SPACE - 1;
+ 	}
+ 	/* proceed reading data after header */
+diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
+index 23196c5fc17c..99b375c95998 100644
+--- a/drivers/gpio/gpio-pca953x.c
++++ b/drivers/gpio/gpio-pca953x.c
+@@ -367,9 +367,11 @@ static void pca953x_gpio_set_multiple(struct gpio_chip *gc,
+ 	memcpy(reg_val, chip->reg_output, NBANK(chip));
+ 	mutex_lock(&chip->i2c_lock);
+ 	for(bank=0; bank<NBANK(chip); bank++) {
+-		unsigned bankmask = mask[bank/4] >> ((bank % 4) * 8);
++		unsigned bankmask = mask[bank / sizeof(*mask)] >>
++				    ((bank % sizeof(*mask)) * 8);
+ 		if(bankmask) {
+-			unsigned bankval  = bits[bank/4] >> ((bank % 4) * 8);
++			unsigned bankval  = bits[bank / sizeof(*bits)] >>
++					    ((bank % sizeof(*bits)) * 8);
+ 			reg_val[bank] = (reg_val[bank] & ~bankmask) | bankval;
+ 		}
+ 	}
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+index 3c895863fcf5..81dc6b65436f 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+@@ -63,6 +63,10 @@ bool amdgpu_has_atpx(void) {
+ 	return amdgpu_atpx_priv.atpx_detected;
+ }
+ 
++bool amdgpu_has_atpx_dgpu_power_cntl(void) {
++	return amdgpu_atpx_priv.atpx.functions.power_cntl;
++}
++
+ /**
+  * amdgpu_atpx_call - call an ATPX method
+  *
+@@ -142,10 +146,6 @@ static void amdgpu_atpx_parse_functions(struct amdgpu_atpx_functions *f, u32 mas
+  */
+ static int amdgpu_atpx_validate(struct amdgpu_atpx *atpx)
+ {
+-	/* make sure required functions are enabled */
+-	/* dGPU power control is required */
+-	atpx->functions.power_cntl = true;
+-
+ 	if (atpx->functions.px_params) {
+ 		union acpi_object *info;
+ 		struct atpx_px_params output;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index 51bfc114584e..d6c68d00cbb0 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -62,6 +62,12 @@ static const char *amdgpu_asic_name[] = {
+ 	"LAST",
+ };
+ 
++#if defined(CONFIG_VGA_SWITCHEROO)
++bool amdgpu_has_atpx_dgpu_power_cntl(void);
++#else
++static inline bool amdgpu_has_atpx_dgpu_power_cntl(void) { return false; }
++#endif
++
+ bool amdgpu_device_is_px(struct drm_device *dev)
+ {
+ 	struct amdgpu_device *adev = dev->dev_private;
+@@ -1511,7 +1517,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
+ 
+ 	if (amdgpu_runtime_pm == 1)
+ 		runtime = true;
+-	if (amdgpu_device_is_px(ddev))
++	if (amdgpu_device_is_px(ddev) && amdgpu_has_atpx_dgpu_power_cntl())
+ 		runtime = true;
+ 	vga_switcheroo_register_client(adev->pdev, &amdgpu_switcheroo_ops, runtime);
+ 	if (runtime)
+diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+index 2cf50180cc51..b1c7a9b3631b 100644
+--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
++++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+@@ -32,8 +32,8 @@
+ #include "oss/oss_2_4_d.h"
+ #include "oss/oss_2_4_sh_mask.h"
+ 
+-#include "gmc/gmc_8_1_d.h"
+-#include "gmc/gmc_8_1_sh_mask.h"
++#include "gmc/gmc_7_1_d.h"
++#include "gmc/gmc_7_1_sh_mask.h"
+ 
+ #include "gca/gfx_8_0_d.h"
+ #include "gca/gfx_8_0_enum.h"
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
+index cf01177ca3b5..2ea012e88991 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
+@@ -241,6 +241,11 @@ static int cz_initialize_dpm_defaults(struct pp_hwmgr *hwmgr)
+ 	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ 					PHM_PlatformCaps_DynamicUVDState);
+ 
++	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
++			PHM_PlatformCaps_UVDDPM);
++	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
++			PHM_PlatformCaps_VCEDPM);
++
+ 	cz_hwmgr->cc6_settings.cpu_cc6_disable = false;
+ 	cz_hwmgr->cc6_settings.cpu_pstate_disable = false;
+ 	cz_hwmgr->cc6_settings.nb_pstate_switch_disable = false;
+diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
+index 01b20e14a247..6104d7d7449e 100644
+--- a/drivers/gpu/drm/radeon/atombios_encoders.c
++++ b/drivers/gpu/drm/radeon/atombios_encoders.c
+@@ -892,8 +892,6 @@ atombios_dig_encoder_setup2(struct drm_encoder *encoder, int action, int panel_m
+ 			else
+ 				args.v1.ucLaneNum = 4;
+ 
+-			if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode) && (dp_clock == 270000))
+-				args.v1.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
+ 			switch (radeon_encoder->encoder_id) {
+ 			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+ 				args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER1;
+@@ -910,6 +908,10 @@ atombios_dig_encoder_setup2(struct drm_encoder *encoder, int action, int panel_m
+ 				args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKB;
+ 			else
+ 				args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKA;
++
++			if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode) && (dp_clock == 270000))
++				args.v1.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
++
+ 			break;
+ 		case 2:
+ 		case 3:
+diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+index c4b4f298a283..9bc408c9f9f6 100644
+--- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
++++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+@@ -62,6 +62,10 @@ bool radeon_has_atpx(void) {
+ 	return radeon_atpx_priv.atpx_detected;
+ }
+ 
++bool radeon_has_atpx_dgpu_power_cntl(void) {
++	return radeon_atpx_priv.atpx.functions.power_cntl;
++}
++
+ /**
+  * radeon_atpx_call - call an ATPX method
+  *
+@@ -141,10 +145,6 @@ static void radeon_atpx_parse_functions(struct radeon_atpx_functions *f, u32 mas
+  */
+ static int radeon_atpx_validate(struct radeon_atpx *atpx)
+ {
+-	/* make sure required functions are enabled */
+-	/* dGPU power control is required */
+-	atpx->functions.power_cntl = true;
+-
+ 	if (atpx->functions.px_params) {
+ 		union acpi_object *info;
+ 		struct atpx_px_params output;
+diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
+index 4197ca1bb1e4..e2396336f9e8 100644
+--- a/drivers/gpu/drm/radeon/radeon_device.c
++++ b/drivers/gpu/drm/radeon/radeon_device.c
+@@ -103,6 +103,12 @@ static const char radeon_family_name[][16] = {
+ 	"LAST",
+ };
+ 
++#if defined(CONFIG_VGA_SWITCHEROO)
++bool radeon_has_atpx_dgpu_power_cntl(void);
++#else
++static inline bool radeon_has_atpx_dgpu_power_cntl(void) { return false; }
++#endif
++
+ #define RADEON_PX_QUIRK_DISABLE_PX  (1 << 0)
+ #define RADEON_PX_QUIRK_LONG_WAKEUP (1 << 1)
+ 
+@@ -1433,7 +1439,7 @@ int radeon_device_init(struct radeon_device *rdev,
+ 	 * ignore it */
+ 	vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
+ 
+-	if (rdev->flags & RADEON_IS_PX)
++	if ((rdev->flags & RADEON_IS_PX) && radeon_has_atpx_dgpu_power_cntl())
+ 		runtime = true;
+ 	vga_switcheroo_register_client(rdev->pdev, &radeon_switcheroo_ops, runtime);
+ 	if (runtime)
+diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
+index 2d9196a447fd..bfcef4db8138 100644
+--- a/drivers/gpu/drm/radeon/radeon_display.c
++++ b/drivers/gpu/drm/radeon/radeon_display.c
+@@ -1683,10 +1683,8 @@ int radeon_modeset_init(struct radeon_device *rdev)
+ 	/* setup afmt */
+ 	radeon_afmt_init(rdev);
+ 
+-	if (!list_empty(&rdev->ddev->mode_config.connector_list)) {
+-		radeon_fbdev_init(rdev);
+-		drm_kms_helper_poll_init(rdev->ddev);
+-	}
++	radeon_fbdev_init(rdev);
++	drm_kms_helper_poll_init(rdev->ddev);
+ 
+ 	/* do pm late init */
+ 	ret = radeon_pm_late_init(rdev);
+diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c
+index df7a1719c841..9d210bbcab50 100644
+--- a/drivers/gpu/drm/radeon/radeon_dp_mst.c
++++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c
+@@ -525,17 +525,9 @@ static bool radeon_mst_mode_fixup(struct drm_encoder *encoder,
+ 	drm_mode_set_crtcinfo(adjusted_mode, 0);
+ 	{
+ 	  struct radeon_connector_atom_dig *dig_connector;
+-	  int ret;
+-
+ 	  dig_connector = mst_enc->connector->con_priv;
+-	  ret = radeon_dp_get_dp_link_config(&mst_enc->connector->base,
+-					     dig_connector->dpcd, adjusted_mode->clock,
+-					     &dig_connector->dp_lane_count,
+-					     &dig_connector->dp_clock);
+-	  if (ret) {
+-		  dig_connector->dp_lane_count = 0;
+-		  dig_connector->dp_clock = 0;
+-	  }
++	  dig_connector->dp_lane_count = drm_dp_max_lane_count(dig_connector->dpcd);
++	  dig_connector->dp_clock = drm_dp_max_link_rate(dig_connector->dpcd);
+ 	  DRM_DEBUG_KMS("dig clock %p %d %d\n", dig_connector,
+ 			dig_connector->dp_lane_count, dig_connector->dp_clock);
+ 	}
+diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
+index d2e628eea53d..d179596334a7 100644
+--- a/drivers/gpu/drm/radeon/radeon_fb.c
++++ b/drivers/gpu/drm/radeon/radeon_fb.c
+@@ -292,7 +292,8 @@ out_unref:
+ 
+ void radeon_fb_output_poll_changed(struct radeon_device *rdev)
+ {
+-	drm_fb_helper_hotplug_event(&rdev->mode_info.rfbdev->helper);
++	if (rdev->mode_info.rfbdev)
++		drm_fb_helper_hotplug_event(&rdev->mode_info.rfbdev->helper);
+ }
+ 
+ static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfbdev)
+@@ -325,6 +326,10 @@ int radeon_fbdev_init(struct radeon_device *rdev)
+ 	int bpp_sel = 32;
+ 	int ret;
+ 
++	/* don't enable fbdev if no connectors */
++	if (list_empty(&rdev->ddev->mode_config.connector_list))
++		return 0;
++
+ 	/* select 8 bpp console on RN50 or 16MB cards */
+ 	if (ASIC_IS_RN50(rdev) || rdev->mc.real_vram_size <= (32*1024*1024))
+ 		bpp_sel = 8;
+@@ -377,11 +382,15 @@ void radeon_fbdev_fini(struct radeon_device *rdev)
+ 
+ void radeon_fbdev_set_suspend(struct radeon_device *rdev, int state)
+ {
+-	fb_set_suspend(rdev->mode_info.rfbdev->helper.fbdev, state);
++	if (rdev->mode_info.rfbdev)
++		fb_set_suspend(rdev->mode_info.rfbdev->helper.fbdev, state);
+ }
+ 
+ bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj)
+ {
++	if (!rdev->mode_info.rfbdev)
++		return false;
++
+ 	if (robj == gem_to_radeon_bo(rdev->mode_info.rfbdev->rfb.obj))
+ 		return true;
+ 	return false;
+@@ -389,12 +398,14 @@ bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj)
+ 
+ void radeon_fb_add_connector(struct radeon_device *rdev, struct drm_connector *connector)
+ {
+-	drm_fb_helper_add_one_connector(&rdev->mode_info.rfbdev->helper, connector);
++	if (rdev->mode_info.rfbdev)
++		drm_fb_helper_add_one_connector(&rdev->mode_info.rfbdev->helper, connector);
+ }
+ 
+ void radeon_fb_remove_connector(struct radeon_device *rdev, struct drm_connector *connector)
+ {
+-	drm_fb_helper_remove_one_connector(&rdev->mode_info.rfbdev->helper, connector);
++	if (rdev->mode_info.rfbdev)
++		drm_fb_helper_remove_one_connector(&rdev->mode_info.rfbdev->helper, connector);
+ }
+ 
+ void radeon_fbdev_restore_mode(struct radeon_device *rdev)
+diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c
+index 22278bcfc60e..ac8eafea6361 100644
+--- a/drivers/gpu/drm/vc4/vc4_bo.c
++++ b/drivers/gpu/drm/vc4/vc4_bo.c
+@@ -499,11 +499,12 @@ vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data,
+ 	if (IS_ERR(bo))
+ 		return PTR_ERR(bo);
+ 
+-	ret = copy_from_user(bo->base.vaddr,
++	if (copy_from_user(bo->base.vaddr,
+ 			     (void __user *)(uintptr_t)args->data,
+-			     args->size);
+-	if (ret != 0)
++			     args->size)) {
++		ret = -EFAULT;
+ 		goto fail;
++	}
+ 	/* Clear the rest of the memory from allocating from the BO
+ 	 * cache.
+ 	 */
+diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
+index 7e89288b1537..99446ffd71fb 100644
+--- a/drivers/hid/hid-core.c
++++ b/drivers/hid/hid-core.c
+@@ -1891,6 +1891,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_ELITE_KBD) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_CORDLESS_DESKTOP_LX500) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_EXTREME_3D) },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DUAL_ACTION) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WHEEL) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD_CORD) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD) },
+@@ -2615,9 +2616,10 @@ int hid_add_device(struct hid_device *hdev)
+ 	/*
+ 	 * Scan generic devices for group information
+ 	 */
+-	if (hid_ignore_special_drivers ||
+-	    (!hdev->group &&
+-	     !hid_match_id(hdev, hid_have_special_driver))) {
++	if (hid_ignore_special_drivers) {
++		hdev->group = HID_GROUP_GENERIC;
++	} else if (!hdev->group &&
++		   !hid_match_id(hdev, hid_have_special_driver)) {
+ 		ret = hid_scan_report(hdev);
+ 		if (ret)
+ 			hid_warn(hdev, "bad device descriptor (%d)\n", ret);
+diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
+index 296d4991560e..a20fc604ffd8 100644
+--- a/drivers/hid/hid-multitouch.c
++++ b/drivers/hid/hid-multitouch.c
+@@ -396,6 +396,11 @@ static void mt_feature_mapping(struct hid_device *hdev,
+ 			td->is_buttonpad = true;
+ 
+ 		break;
++	case 0xff0000c5:
++		/* Retrieve the Win8 blob once to enable some devices */
++		if (usage->usage_index == 0)
++			mt_get_feature(hdev, field->report);
++		break;
+ 	}
+ }
+ 
+diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
+index b9216938a718..bb897497f008 100644
+--- a/drivers/hid/i2c-hid/i2c-hid.c
++++ b/drivers/hid/i2c-hid/i2c-hid.c
+@@ -283,17 +283,21 @@ static int i2c_hid_set_or_send_report(struct i2c_client *client, u8 reportType,
+ 	u16 dataRegister = le16_to_cpu(ihid->hdesc.wDataRegister);
+ 	u16 outputRegister = le16_to_cpu(ihid->hdesc.wOutputRegister);
+ 	u16 maxOutputLength = le16_to_cpu(ihid->hdesc.wMaxOutputLength);
++	u16 size;
++	int args_len;
++	int index = 0;
++
++	i2c_hid_dbg(ihid, "%s\n", __func__);
++
++	if (data_len > ihid->bufsize)
++		return -EINVAL;
+ 
+-	/* hid_hw_* already checked that data_len < HID_MAX_BUFFER_SIZE */
+-	u16 size =	2			/* size */ +
++	size =		2			/* size */ +
+ 			(reportID ? 1 : 0)	/* reportID */ +
+ 			data_len		/* buf */;
+-	int args_len =	(reportID >= 0x0F ? 1 : 0) /* optional third byte */ +
++	args_len =	(reportID >= 0x0F ? 1 : 0) /* optional third byte */ +
+ 			2			/* dataRegister */ +
+ 			size			/* args */;
+-	int index = 0;
+-
+-	i2c_hid_dbg(ihid, "%s\n", __func__);
+ 
+ 	if (!use_data && maxOutputLength == 0)
+ 		return -ENOSYS;
+diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
+index cd4510a63375..146eed70bdf4 100644
+--- a/drivers/idle/intel_idle.c
++++ b/drivers/idle/intel_idle.c
+@@ -65,7 +65,7 @@
+ #include <asm/mwait.h>
+ #include <asm/msr.h>
+ 
+-#define INTEL_IDLE_VERSION "0.4"
++#define INTEL_IDLE_VERSION "0.4.1"
+ #define PREFIX "intel_idle: "
+ 
+ static struct cpuidle_driver intel_idle_driver = {
+@@ -994,36 +994,92 @@ static void intel_idle_cpuidle_devices_uninit(void)
+ }
+ 
+ /*
+- * intel_idle_state_table_update()
+- *
+- * Update the default state_table for this CPU-id
++ * ivt_idle_state_table_update(void)
+  *
+- * Currently used to access tuned IVT multi-socket targets
++ * Tune IVT multi-socket targets
+  * Assumption: num_sockets == (max_package_num + 1)
+  */
+-void intel_idle_state_table_update(void)
++static void ivt_idle_state_table_update(void)
+ {
+ 	/* IVT uses a different table for 1-2, 3-4, and > 4 sockets */
+-	if (boot_cpu_data.x86_model == 0x3e) { /* IVT */
+-		int cpu, package_num, num_sockets = 1;
+-
+-		for_each_online_cpu(cpu) {
+-			package_num = topology_physical_package_id(cpu);
+-			if (package_num + 1 > num_sockets) {
+-				num_sockets = package_num + 1;
+-
+-				if (num_sockets > 4) {
+-					cpuidle_state_table = ivt_cstates_8s;
+-					return;
+-				}
++	int cpu, package_num, num_sockets = 1;
++
++	for_each_online_cpu(cpu) {
++		package_num = topology_physical_package_id(cpu);
++		if (package_num + 1 > num_sockets) {
++			num_sockets = package_num + 1;
++
++			if (num_sockets > 4) {
++				cpuidle_state_table = ivt_cstates_8s;
++				return;
+ 			}
+ 		}
++	}
++
++	if (num_sockets > 2)
++		cpuidle_state_table = ivt_cstates_4s;
++
++	/* else, 1 and 2 socket systems use default ivt_cstates */
++}
++/*
++ * sklh_idle_state_table_update(void)
++ *
++ * On SKL-H (model 0x5e) disable C8 and C9 if:
++ * C10 is enabled and SGX disabled
++ */
++static void sklh_idle_state_table_update(void)
++{
++	unsigned long long msr;
++	unsigned int eax, ebx, ecx, edx;
++
++
++	/* if PC10 disabled via cmdline intel_idle.max_cstate=7 or shallower */
++	if (max_cstate <= 7)
++		return;
++
++	/* if PC10 not present in CPUID.MWAIT.EDX */
++	if ((mwait_substates & (0xF << 28)) == 0)
++		return;
++
++	rdmsrl(MSR_NHM_SNB_PKG_CST_CFG_CTL, msr);
++
++	/* PC10 is not enabled in PKG C-state limit */
++	if ((msr & 0xF) != 8)
++		return;
++
++	ecx = 0;
++	cpuid(7, &eax, &ebx, &ecx, &edx);
++
++	/* if SGX is present */
++	if (ebx & (1 << 2)) {
+ 
+-		if (num_sockets > 2)
+-			cpuidle_state_table = ivt_cstates_4s;
+-		/* else, 1 and 2 socket systems use default ivt_cstates */
++		rdmsrl(MSR_IA32_FEATURE_CONTROL, msr);
++
++		/* if SGX is enabled */
++		if (msr & (1 << 18))
++			return;
++	}
++
++	skl_cstates[5].disabled = 1;	/* C8-SKL */
++	skl_cstates[6].disabled = 1;	/* C9-SKL */
++}
++/*
++ * intel_idle_state_table_update()
++ *
++ * Update the default state_table for this CPU-id
++ */
++
++static void intel_idle_state_table_update(void)
++{
++	switch (boot_cpu_data.x86_model) {
++
++	case 0x3e: /* IVT */
++		ivt_idle_state_table_update();
++		break;
++	case 0x5e: /* SKL-H */
++		sklh_idle_state_table_update();
++		break;
+ 	}
+-	return;
+ }
+ 
+ /*
+@@ -1063,6 +1119,14 @@ static int __init intel_idle_cpuidle_driver_init(void)
+ 		if (num_substates == 0)
+ 			continue;
+ 
++		/* if state marked as disabled, skip it */
++		if (cpuidle_state_table[cstate].disabled != 0) {
++			pr_debug(PREFIX "state %s is disabled",
++				cpuidle_state_table[cstate].name);
++			continue;
++		}
++
++
+ 		if (((mwait_cstate + 1) > 2) &&
+ 			!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
+ 			mark_tsc_unstable("TSC halts in idle"
+diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
+index f121e6129339..0e1a802c3618 100644
+--- a/drivers/infiniband/ulp/isert/ib_isert.c
++++ b/drivers/infiniband/ulp/isert/ib_isert.c
+@@ -65,6 +65,7 @@ isert_rdma_accept(struct isert_conn *isert_conn);
+ struct rdma_cm_id *isert_setup_id(struct isert_np *isert_np);
+ 
+ static void isert_release_work(struct work_struct *work);
++static void isert_wait4flush(struct isert_conn *isert_conn);
+ 
+ static inline bool
+ isert_prot_cmd(struct isert_conn *conn, struct se_cmd *cmd)
+@@ -820,12 +821,31 @@ isert_put_conn(struct isert_conn *isert_conn)
+ 	kref_put(&isert_conn->kref, isert_release_kref);
+ }
+ 
++static void
++isert_handle_unbound_conn(struct isert_conn *isert_conn)
++{
++	struct isert_np *isert_np = isert_conn->cm_id->context;
++
++	mutex_lock(&isert_np->mutex);
++	if (!list_empty(&isert_conn->node)) {
++		/*
++		 * This means iscsi doesn't know this connection
++		 * so schedule a cleanup ourselves
++		 */
++		list_del_init(&isert_conn->node);
++		isert_put_conn(isert_conn);
++		complete(&isert_conn->wait);
++		queue_work(isert_release_wq, &isert_conn->release_work);
++	}
++	mutex_unlock(&isert_np->mutex);
++}
++
+ /**
+  * isert_conn_terminate() - Initiate connection termination
+  * @isert_conn: isert connection struct
+  *
+  * Notes:
+- * In case the connection state is FULL_FEATURE, move state
++ * In case the connection state is BOUND, move state
+  * to TEMINATING and start teardown sequence (rdma_disconnect).
+  * In case the connection state is UP, complete flush as well.
+  *
+@@ -837,23 +857,19 @@ isert_conn_terminate(struct isert_conn *isert_conn)
+ {
+ 	int err;
+ 
+-	switch (isert_conn->state) {
+-	case ISER_CONN_TERMINATING:
+-		break;
+-	case ISER_CONN_UP:
+-	case ISER_CONN_FULL_FEATURE: /* FALLTHRU */
+-		isert_info("Terminating conn %p state %d\n",
+-			   isert_conn, isert_conn->state);
+-		isert_conn->state = ISER_CONN_TERMINATING;
+-		err = rdma_disconnect(isert_conn->cm_id);
+-		if (err)
+-			isert_warn("Failed rdma_disconnect isert_conn %p\n",
+-				   isert_conn);
+-		break;
+-	default:
+-		isert_warn("conn %p teminating in state %d\n",
+-			   isert_conn, isert_conn->state);
+-	}
++	if (isert_conn->state >= ISER_CONN_TERMINATING)
++		return;
++
++	isert_info("Terminating conn %p state %d\n",
++		   isert_conn, isert_conn->state);
++	isert_conn->state = ISER_CONN_TERMINATING;
++	err = rdma_disconnect(isert_conn->cm_id);
++	if (err)
++		isert_warn("Failed rdma_disconnect isert_conn %p\n",
++			   isert_conn);
++
++	isert_info("conn %p completing wait\n", isert_conn);
++	complete(&isert_conn->wait);
+ }
+ 
+ static int
+@@ -887,35 +903,27 @@ static int
+ isert_disconnected_handler(struct rdma_cm_id *cma_id,
+ 			   enum rdma_cm_event_type event)
+ {
+-	struct isert_np *isert_np = cma_id->context;
+-	struct isert_conn *isert_conn;
+-	bool terminating = false;
+-
+-	if (isert_np->cm_id == cma_id)
+-		return isert_np_cma_handler(cma_id->context, event);
+-
+-	isert_conn = cma_id->qp->qp_context;
++	struct isert_conn *isert_conn = cma_id->qp->qp_context;
+ 
+ 	mutex_lock(&isert_conn->mutex);
+-	terminating = (isert_conn->state == ISER_CONN_TERMINATING);
+-	isert_conn_terminate(isert_conn);
+-	mutex_unlock(&isert_conn->mutex);
+-
+-	isert_info("conn %p completing wait\n", isert_conn);
+-	complete(&isert_conn->wait);
+-
+-	if (terminating)
+-		goto out;
+-
+-	mutex_lock(&isert_np->mutex);
+-	if (!list_empty(&isert_conn->node)) {
+-		list_del_init(&isert_conn->node);
+-		isert_put_conn(isert_conn);
+-		queue_work(isert_release_wq, &isert_conn->release_work);
++	switch (isert_conn->state) {
++	case ISER_CONN_TERMINATING:
++		break;
++	case ISER_CONN_UP:
++		isert_conn_terminate(isert_conn);
++		isert_wait4flush(isert_conn);
++		isert_handle_unbound_conn(isert_conn);
++		break;
++	case ISER_CONN_BOUND:
++	case ISER_CONN_FULL_FEATURE: /* FALLTHRU */
++		iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
++		break;
++	default:
++		isert_warn("conn %p teminating in state %d\n",
++			   isert_conn, isert_conn->state);
+ 	}
+-	mutex_unlock(&isert_np->mutex);
++	mutex_unlock(&isert_conn->mutex);
+ 
+-out:
+ 	return 0;
+ }
+ 
+@@ -934,12 +942,16 @@ isert_connect_error(struct rdma_cm_id *cma_id)
+ static int
+ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
+ {
++	struct isert_np *isert_np = cma_id->context;
+ 	int ret = 0;
+ 
+ 	isert_info("%s (%d): status %d id %p np %p\n",
+ 		   rdma_event_msg(event->event), event->event,
+ 		   event->status, cma_id, cma_id->context);
+ 
++	if (isert_np->cm_id == cma_id)
++		return isert_np_cma_handler(cma_id->context, event->event);
++
+ 	switch (event->event) {
+ 	case RDMA_CM_EVENT_CONNECT_REQUEST:
+ 		ret = isert_connect_request(cma_id, event);
+@@ -985,13 +997,10 @@ isert_post_recvm(struct isert_conn *isert_conn, u32 count)
+ 	rx_wr--;
+ 	rx_wr->next = NULL; /* mark end of work requests list */
+ 
+-	isert_conn->post_recv_buf_count += count;
+ 	ret = ib_post_recv(isert_conn->qp, isert_conn->rx_wr,
+ 			   &rx_wr_failed);
+-	if (ret) {
++	if (ret)
+ 		isert_err("ib_post_recv() failed with ret: %d\n", ret);
+-		isert_conn->post_recv_buf_count -= count;
+-	}
+ 
+ 	return ret;
+ }
+@@ -1007,12 +1016,9 @@ isert_post_recv(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc)
+ 	rx_wr.num_sge = 1;
+ 	rx_wr.next = NULL;
+ 
+-	isert_conn->post_recv_buf_count++;
+ 	ret = ib_post_recv(isert_conn->qp, &rx_wr, &rx_wr_failed);
+-	if (ret) {
++	if (ret)
+ 		isert_err("ib_post_recv() failed with ret: %d\n", ret);
+-		isert_conn->post_recv_buf_count--;
+-	}
+ 
+ 	return ret;
+ }
+@@ -1132,12 +1138,9 @@ isert_rdma_post_recvl(struct isert_conn *isert_conn)
+ 	rx_wr.sg_list = &sge;
+ 	rx_wr.num_sge = 1;
+ 
+-	isert_conn->post_recv_buf_count++;
+ 	ret = ib_post_recv(isert_conn->qp, &rx_wr, &rx_wr_fail);
+-	if (ret) {
++	if (ret)
+ 		isert_err("ib_post_recv() failed: %d\n", ret);
+-		isert_conn->post_recv_buf_count--;
+-	}
+ 
+ 	return ret;
+ }
+@@ -1633,7 +1636,6 @@ isert_rcv_completion(struct iser_rx_desc *desc,
+ 	ib_dma_sync_single_for_device(ib_dev, rx_dma, rx_buflen,
+ 				      DMA_FROM_DEVICE);
+ 
+-	isert_conn->post_recv_buf_count--;
+ }
+ 
+ static int
+@@ -2048,7 +2050,8 @@ is_isert_tx_desc(struct isert_conn *isert_conn, void *wr_id)
+ 	void *start = isert_conn->rx_descs;
+ 	int len = ISERT_QP_MAX_RECV_DTOS * sizeof(*isert_conn->rx_descs);
+ 
+-	if (wr_id >= start && wr_id < start + len)
++	if ((wr_id >= start && wr_id < start + len) ||
++	    (wr_id == isert_conn->login_req_buf))
+ 		return false;
+ 
+ 	return true;
+@@ -2072,10 +2075,6 @@ isert_cq_comp_err(struct isert_conn *isert_conn, struct ib_wc *wc)
+ 			isert_unmap_tx_desc(desc, ib_dev);
+ 		else
+ 			isert_completion_put(desc, isert_cmd, ib_dev, true);
+-	} else {
+-		isert_conn->post_recv_buf_count--;
+-		if (!isert_conn->post_recv_buf_count)
+-			iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
+ 	}
+ }
+ 
+@@ -3214,6 +3213,7 @@ accept_wait:
+ 
+ 	conn->context = isert_conn;
+ 	isert_conn->conn = conn;
++	isert_conn->state = ISER_CONN_BOUND;
+ 
+ 	isert_set_conn_info(np, conn, isert_conn);
+ 
+diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h
+index 8d50453eef66..1aa019ab9d78 100644
+--- a/drivers/infiniband/ulp/isert/ib_isert.h
++++ b/drivers/infiniband/ulp/isert/ib_isert.h
+@@ -84,6 +84,7 @@ enum iser_ib_op_code {
+ enum iser_conn_state {
+ 	ISER_CONN_INIT,
+ 	ISER_CONN_UP,
++	ISER_CONN_BOUND,
+ 	ISER_CONN_FULL_FEATURE,
+ 	ISER_CONN_TERMINATING,
+ 	ISER_CONN_DOWN,
+@@ -179,7 +180,6 @@ struct isert_device;
+ 
+ struct isert_conn {
+ 	enum iser_conn_state	state;
+-	int			post_recv_buf_count;
+ 	u32			responder_resources;
+ 	u32			initiator_depth;
+ 	bool			pi_support;
+diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
+index 0c37fee363b1..4328679a67a7 100644
+--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
++++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
+@@ -1670,47 +1670,6 @@ send_sense:
+ 	return -1;
+ }
+ 
+-/**
+- * srpt_rx_mgmt_fn_tag() - Process a task management function by tag.
+- * @ch: RDMA channel of the task management request.
+- * @fn: Task management function to perform.
+- * @req_tag: Tag of the SRP task management request.
+- * @mgmt_ioctx: I/O context of the task management request.
+- *
+- * Returns zero if the target core will process the task management
+- * request asynchronously.
+- *
+- * Note: It is assumed that the initiator serializes tag-based task management
+- * requests.
+- */
+-static int srpt_rx_mgmt_fn_tag(struct srpt_send_ioctx *ioctx, u64 tag)
+-{
+-	struct srpt_device *sdev;
+-	struct srpt_rdma_ch *ch;
+-	struct srpt_send_ioctx *target;
+-	int ret, i;
+-
+-	ret = -EINVAL;
+-	ch = ioctx->ch;
+-	BUG_ON(!ch);
+-	BUG_ON(!ch->sport);
+-	sdev = ch->sport->sdev;
+-	BUG_ON(!sdev);
+-	spin_lock_irq(&sdev->spinlock);
+-	for (i = 0; i < ch->rq_size; ++i) {
+-		target = ch->ioctx_ring[i];
+-		if (target->cmd.se_lun == ioctx->cmd.se_lun &&
+-		    target->cmd.tag == tag &&
+-		    srpt_get_cmd_state(target) != SRPT_STATE_DONE) {
+-			ret = 0;
+-			/* now let the target core abort &target->cmd; */
+-			break;
+-		}
+-	}
+-	spin_unlock_irq(&sdev->spinlock);
+-	return ret;
+-}
+-
+ static int srp_tmr_to_tcm(int fn)
+ {
+ 	switch (fn) {
+@@ -1745,7 +1704,6 @@ static void srpt_handle_tsk_mgmt(struct srpt_rdma_ch *ch,
+ 	struct se_cmd *cmd;
+ 	struct se_session *sess = ch->sess;
+ 	uint64_t unpacked_lun;
+-	uint32_t tag = 0;
+ 	int tcm_tmr;
+ 	int rc;
+ 
+@@ -1761,25 +1719,10 @@ static void srpt_handle_tsk_mgmt(struct srpt_rdma_ch *ch,
+ 	srpt_set_cmd_state(send_ioctx, SRPT_STATE_MGMT);
+ 	send_ioctx->cmd.tag = srp_tsk->tag;
+ 	tcm_tmr = srp_tmr_to_tcm(srp_tsk->tsk_mgmt_func);
+-	if (tcm_tmr < 0) {
+-		send_ioctx->cmd.se_tmr_req->response =
+-			TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED;
+-		goto fail;
+-	}
+ 	unpacked_lun = srpt_unpack_lun((uint8_t *)&srp_tsk->lun,
+ 				       sizeof(srp_tsk->lun));
+-
+-	if (srp_tsk->tsk_mgmt_func == SRP_TSK_ABORT_TASK) {
+-		rc = srpt_rx_mgmt_fn_tag(send_ioctx, srp_tsk->task_tag);
+-		if (rc < 0) {
+-			send_ioctx->cmd.se_tmr_req->response =
+-					TMR_TASK_DOES_NOT_EXIST;
+-			goto fail;
+-		}
+-		tag = srp_tsk->task_tag;
+-	}
+ 	rc = target_submit_tmr(&send_ioctx->cmd, sess, NULL, unpacked_lun,
+-				srp_tsk, tcm_tmr, GFP_KERNEL, tag,
++				srp_tsk, tcm_tmr, GFP_KERNEL, srp_tsk->task_tag,
+ 				TARGET_SCF_ACK_KREF);
+ 	if (rc != 0) {
+ 		send_ioctx->cmd.se_tmr_req->response = TMR_FUNCTION_REJECTED;
+diff --git a/drivers/input/misc/ati_remote2.c b/drivers/input/misc/ati_remote2.c
+index cfd58e87da26..1c5914cae853 100644
+--- a/drivers/input/misc/ati_remote2.c
++++ b/drivers/input/misc/ati_remote2.c
+@@ -817,26 +817,49 @@ static int ati_remote2_probe(struct usb_interface *interface, const struct usb_d
+ 
+ 	ar2->udev = udev;
+ 
++	/* Sanity check, first interface must have an endpoint */
++	if (alt->desc.bNumEndpoints < 1 || !alt->endpoint) {
++		dev_err(&interface->dev,
++			"%s(): interface 0 must have an endpoint\n", __func__);
++		r = -ENODEV;
++		goto fail1;
++	}
+ 	ar2->intf[0] = interface;
+ 	ar2->ep[0] = &alt->endpoint[0].desc;
+ 
++	/* Sanity check, the device must have two interfaces */
+ 	ar2->intf[1] = usb_ifnum_to_if(udev, 1);
++	if ((udev->actconfig->desc.bNumInterfaces < 2) || !ar2->intf[1]) {
++		dev_err(&interface->dev, "%s(): need 2 interfaces, found %d\n",
++			__func__, udev->actconfig->desc.bNumInterfaces);
++		r = -ENODEV;
++		goto fail1;
++	}
++
+ 	r = usb_driver_claim_interface(&ati_remote2_driver, ar2->intf[1], ar2);
+ 	if (r)
+ 		goto fail1;
++
++	/* Sanity check, second interface must have an endpoint */
+ 	alt = ar2->intf[1]->cur_altsetting;
++	if (alt->desc.bNumEndpoints < 1 || !alt->endpoint) {
++		dev_err(&interface->dev,
++			"%s(): interface 1 must have an endpoint\n", __func__);
++		r = -ENODEV;
++		goto fail2;
++	}
+ 	ar2->ep[1] = &alt->endpoint[0].desc;
+ 
+ 	r = ati_remote2_urb_init(ar2);
+ 	if (r)
+-		goto fail2;
++		goto fail3;
+ 
+ 	ar2->channel_mask = channel_mask;
+ 	ar2->mode_mask = mode_mask;
+ 
+ 	r = ati_remote2_setup(ar2, ar2->channel_mask);
+ 	if (r)
+-		goto fail2;
++		goto fail3;
+ 
+ 	usb_make_path(udev, ar2->phys, sizeof(ar2->phys));
+ 	strlcat(ar2->phys, "/input0", sizeof(ar2->phys));
+@@ -845,11 +868,11 @@ static int ati_remote2_probe(struct usb_interface *interface, const struct usb_d
+ 
+ 	r = sysfs_create_group(&udev->dev.kobj, &ati_remote2_attr_group);
+ 	if (r)
+-		goto fail2;
++		goto fail3;
+ 
+ 	r = ati_remote2_input_init(ar2);
+ 	if (r)
+-		goto fail3;
++		goto fail4;
+ 
+ 	usb_set_intfdata(interface, ar2);
+ 
+@@ -857,10 +880,11 @@ static int ati_remote2_probe(struct usb_interface *interface, const struct usb_d
+ 
+ 	return 0;
+ 
+- fail3:
++ fail4:
+ 	sysfs_remove_group(&udev->dev.kobj, &ati_remote2_attr_group);
+- fail2:
++ fail3:
+ 	ati_remote2_urb_cleanup(ar2);
++ fail2:
+ 	usb_driver_release_interface(&ati_remote2_driver, ar2->intf[1]);
+  fail1:
+ 	kfree(ar2);
+diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c
+index ac1fa5f44580..9c0ea36913b4 100644
+--- a/drivers/input/misc/ims-pcu.c
++++ b/drivers/input/misc/ims-pcu.c
+@@ -1663,6 +1663,8 @@ static int ims_pcu_parse_cdc_data(struct usb_interface *intf, struct ims_pcu *pc
+ 
+ 	pcu->ctrl_intf = usb_ifnum_to_if(pcu->udev,
+ 					 union_desc->bMasterInterface0);
++	if (!pcu->ctrl_intf)
++		return -EINVAL;
+ 
+ 	alt = pcu->ctrl_intf->cur_altsetting;
+ 	pcu->ep_ctrl = &alt->endpoint[0].desc;
+@@ -1670,6 +1672,8 @@ static int ims_pcu_parse_cdc_data(struct usb_interface *intf, struct ims_pcu *pc
+ 
+ 	pcu->data_intf = usb_ifnum_to_if(pcu->udev,
+ 					 union_desc->bSlaveInterface0);
++	if (!pcu->data_intf)
++		return -EINVAL;
+ 
+ 	alt = pcu->data_intf->cur_altsetting;
+ 	if (alt->desc.bNumEndpoints != 2) {
+diff --git a/drivers/input/misc/powermate.c b/drivers/input/misc/powermate.c
+index 63b539d3daba..84909a12ff36 100644
+--- a/drivers/input/misc/powermate.c
++++ b/drivers/input/misc/powermate.c
+@@ -307,6 +307,9 @@ static int powermate_probe(struct usb_interface *intf, const struct usb_device_i
+ 	int error = -ENOMEM;
+ 
+ 	interface = intf->cur_altsetting;
++	if (interface->desc.bNumEndpoints < 1)
++		return -EINVAL;
++
+ 	endpoint = &interface->endpoint[0].desc;
+ 	if (!usb_endpoint_is_int_in(endpoint))
+ 		return -EIO;
+diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
+index 6025eb430c0a..a41d8328c064 100644
+--- a/drivers/input/mouse/synaptics.c
++++ b/drivers/input/mouse/synaptics.c
+@@ -862,8 +862,9 @@ static void synaptics_report_ext_buttons(struct psmouse *psmouse,
+ 	if (!SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap))
+ 		return;
+ 
+-	/* Bug in FW 8.1, buttons are reported only when ExtBit is 1 */
+-	if (SYN_ID_FULL(priv->identity) == 0x801 &&
++	/* Bug in FW 8.1 & 8.2, buttons are reported only when ExtBit is 1 */
++	if ((SYN_ID_FULL(priv->identity) == 0x801 ||
++	     SYN_ID_FULL(priv->identity) == 0x802) &&
+ 	    !((psmouse->packet[0] ^ psmouse->packet[3]) & 0x02))
+ 		return;
+ 
+diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
+index 8d0ead98eb6e..a296425a7270 100644
+--- a/drivers/md/bcache/super.c
++++ b/drivers/md/bcache/super.c
+@@ -1015,8 +1015,12 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c)
+ 	 */
+ 	atomic_set(&dc->count, 1);
+ 
+-	if (bch_cached_dev_writeback_start(dc))
++	/* Block writeback thread, but spawn it */
++	down_write(&dc->writeback_lock);
++	if (bch_cached_dev_writeback_start(dc)) {
++		up_write(&dc->writeback_lock);
+ 		return -ENOMEM;
++	}
+ 
+ 	if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) {
+ 		bch_sectors_dirty_init(dc);
+@@ -1028,6 +1032,9 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c)
+ 	bch_cached_dev_run(dc);
+ 	bcache_device_link(&dc->disk, c, "bdev");
+ 
++	/* Allow the writeback thread to proceed */
++	up_write(&dc->writeback_lock);
++
+ 	pr_info("Caching %s as %s on set %pU",
+ 		bdevname(dc->bdev, buf), dc->disk.disk->disk_name,
+ 		dc->disk.c->sb.set_uuid);
+@@ -1366,6 +1373,9 @@ static void cache_set_flush(struct closure *cl)
+ 	struct btree *b;
+ 	unsigned i;
+ 
++	if (!c)
++		closure_return(cl);
++
+ 	bch_cache_accounting_destroy(&c->accounting);
+ 
+ 	kobject_put(&c->internal);
+@@ -1828,11 +1838,12 @@ static int cache_alloc(struct cache_sb *sb, struct cache *ca)
+ 	return 0;
+ }
+ 
+-static void register_cache(struct cache_sb *sb, struct page *sb_page,
++static int register_cache(struct cache_sb *sb, struct page *sb_page,
+ 				struct block_device *bdev, struct cache *ca)
+ {
+ 	char name[BDEVNAME_SIZE];
+-	const char *err = "cannot allocate memory";
++	const char *err = NULL;
++	int ret = 0;
+ 
+ 	memcpy(&ca->sb, sb, sizeof(struct cache_sb));
+ 	ca->bdev = bdev;
+@@ -1847,27 +1858,35 @@ static void register_cache(struct cache_sb *sb, struct page *sb_page,
+ 	if (blk_queue_discard(bdev_get_queue(ca->bdev)))
+ 		ca->discard = CACHE_DISCARD(&ca->sb);
+ 
+-	if (cache_alloc(sb, ca) != 0)
++	ret = cache_alloc(sb, ca);
++	if (ret != 0)
+ 		goto err;
+ 
+-	err = "error creating kobject";
+-	if (kobject_add(&ca->kobj, &part_to_dev(bdev->bd_part)->kobj, "bcache"))
+-		goto err;
++	if (kobject_add(&ca->kobj, &part_to_dev(bdev->bd_part)->kobj, "bcache")) {
++		err = "error calling kobject_add";
++		ret = -ENOMEM;
++		goto out;
++	}
+ 
+ 	mutex_lock(&bch_register_lock);
+ 	err = register_cache_set(ca);
+ 	mutex_unlock(&bch_register_lock);
+ 
+-	if (err)
+-		goto err;
++	if (err) {
++		ret = -ENODEV;
++		goto out;
++	}
+ 
+ 	pr_info("registered cache device %s", bdevname(bdev, name));
++
+ out:
+ 	kobject_put(&ca->kobj);
+-	return;
++
+ err:
+-	pr_notice("error opening %s: %s", bdevname(bdev, name), err);
+-	goto out;
++	if (err)
++		pr_notice("error opening %s: %s", bdevname(bdev, name), err);
++
++	return ret;
+ }
+ 
+ /* Global interfaces/init */
+@@ -1965,7 +1984,8 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
+ 		if (!ca)
+ 			goto err_close;
+ 
+-		register_cache(sb, sb_page, bdev, ca);
++		if (register_cache(sb, sb_page, bdev, ca) != 0)
++			goto err_close;
+ 	}
+ out:
+ 	if (sb_page)
+diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
+index f6543f3a970f..27f2ef300f8b 100644
+--- a/drivers/md/dm-cache-metadata.c
++++ b/drivers/md/dm-cache-metadata.c
+@@ -867,19 +867,40 @@ static int blocks_are_unmapped_or_clean(struct dm_cache_metadata *cmd,
+ 	return 0;
+ }
+ 
+-#define WRITE_LOCK(cmd) \
+-	if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) \
++#define WRITE_LOCK(cmd)	\
++	down_write(&cmd->root_lock); \
++	if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) { \
++		up_write(&cmd->root_lock); \
+ 		return -EINVAL; \
+-	down_write(&cmd->root_lock)
++	}
+ 
+ #define WRITE_LOCK_VOID(cmd) \
+-	if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) \
++	down_write(&cmd->root_lock); \
++	if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) { \
++		up_write(&cmd->root_lock); \
+ 		return; \
+-	down_write(&cmd->root_lock)
++	}
+ 
+ #define WRITE_UNLOCK(cmd) \
+ 	up_write(&cmd->root_lock)
+ 
++#define READ_LOCK(cmd) \
++	down_read(&cmd->root_lock); \
++	if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) { \
++		up_read(&cmd->root_lock); \
++		return -EINVAL; \
++	}
++
++#define READ_LOCK_VOID(cmd)	\
++	down_read(&cmd->root_lock); \
++	if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) { \
++		up_read(&cmd->root_lock); \
++		return; \
++	}
++
++#define READ_UNLOCK(cmd) \
++	up_read(&cmd->root_lock)
++
+ int dm_cache_resize(struct dm_cache_metadata *cmd, dm_cblock_t new_cache_size)
+ {
+ 	int r;
+@@ -1015,22 +1036,20 @@ int dm_cache_load_discards(struct dm_cache_metadata *cmd,
+ {
+ 	int r;
+ 
+-	down_read(&cmd->root_lock);
++	READ_LOCK(cmd);
+ 	r = __load_discards(cmd, fn, context);
+-	up_read(&cmd->root_lock);
++	READ_UNLOCK(cmd);
+ 
+ 	return r;
+ }
+ 
+-dm_cblock_t dm_cache_size(struct dm_cache_metadata *cmd)
++int dm_cache_size(struct dm_cache_metadata *cmd, dm_cblock_t *result)
+ {
+-	dm_cblock_t r;
++	READ_LOCK(cmd);
++	*result = cmd->cache_blocks;
++	READ_UNLOCK(cmd);
+ 
+-	down_read(&cmd->root_lock);
+-	r = cmd->cache_blocks;
+-	up_read(&cmd->root_lock);
+-
+-	return r;
++	return 0;
+ }
+ 
+ static int __remove(struct dm_cache_metadata *cmd, dm_cblock_t cblock)
+@@ -1188,9 +1207,9 @@ int dm_cache_load_mappings(struct dm_cache_metadata *cmd,
+ {
+ 	int r;
+ 
+-	down_read(&cmd->root_lock);
++	READ_LOCK(cmd);
+ 	r = __load_mappings(cmd, policy, fn, context);
+-	up_read(&cmd->root_lock);
++	READ_UNLOCK(cmd);
+ 
+ 	return r;
+ }
+@@ -1215,18 +1234,18 @@ static int __dump_mappings(struct dm_cache_metadata *cmd)
+ 
+ void dm_cache_dump(struct dm_cache_metadata *cmd)
+ {
+-	down_read(&cmd->root_lock);
++	READ_LOCK_VOID(cmd);
+ 	__dump_mappings(cmd);
+-	up_read(&cmd->root_lock);
++	READ_UNLOCK(cmd);
+ }
+ 
+ int dm_cache_changed_this_transaction(struct dm_cache_metadata *cmd)
+ {
+ 	int r;
+ 
+-	down_read(&cmd->root_lock);
++	READ_LOCK(cmd);
+ 	r = cmd->changed;
+-	up_read(&cmd->root_lock);
++	READ_UNLOCK(cmd);
+ 
+ 	return r;
+ }
+@@ -1276,9 +1295,9 @@ int dm_cache_set_dirty(struct dm_cache_metadata *cmd,
+ void dm_cache_metadata_get_stats(struct dm_cache_metadata *cmd,
+ 				 struct dm_cache_statistics *stats)
+ {
+-	down_read(&cmd->root_lock);
++	READ_LOCK_VOID(cmd);
+ 	*stats = cmd->stats;
+-	up_read(&cmd->root_lock);
++	READ_UNLOCK(cmd);
+ }
+ 
+ void dm_cache_metadata_set_stats(struct dm_cache_metadata *cmd,
+@@ -1312,9 +1331,9 @@ int dm_cache_get_free_metadata_block_count(struct dm_cache_metadata *cmd,
+ {
+ 	int r = -EINVAL;
+ 
+-	down_read(&cmd->root_lock);
++	READ_LOCK(cmd);
+ 	r = dm_sm_get_nr_free(cmd->metadata_sm, result);
+-	up_read(&cmd->root_lock);
++	READ_UNLOCK(cmd);
+ 
+ 	return r;
+ }
+@@ -1324,9 +1343,9 @@ int dm_cache_get_metadata_dev_size(struct dm_cache_metadata *cmd,
+ {
+ 	int r = -EINVAL;
+ 
+-	down_read(&cmd->root_lock);
++	READ_LOCK(cmd);
+ 	r = dm_sm_get_nr_blocks(cmd->metadata_sm, result);
+-	up_read(&cmd->root_lock);
++	READ_UNLOCK(cmd);
+ 
+ 	return r;
+ }
+@@ -1417,7 +1436,13 @@ int dm_cache_write_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *
+ 
+ int dm_cache_metadata_all_clean(struct dm_cache_metadata *cmd, bool *result)
+ {
+-	return blocks_are_unmapped_or_clean(cmd, 0, cmd->cache_blocks, result);
++	int r;
++
++	READ_LOCK(cmd);
++	r = blocks_are_unmapped_or_clean(cmd, 0, cmd->cache_blocks, result);
++	READ_UNLOCK(cmd);
++
++	return r;
+ }
+ 
+ void dm_cache_metadata_set_read_only(struct dm_cache_metadata *cmd)
+@@ -1440,10 +1465,7 @@ int dm_cache_metadata_set_needs_check(struct dm_cache_metadata *cmd)
+ 	struct dm_block *sblock;
+ 	struct cache_disk_superblock *disk_super;
+ 
+-	/*
+-	 * We ignore fail_io for this function.
+-	 */
+-	down_write(&cmd->root_lock);
++	WRITE_LOCK(cmd);
+ 	set_bit(NEEDS_CHECK, &cmd->flags);
+ 
+ 	r = superblock_lock(cmd, &sblock);
+@@ -1458,19 +1480,17 @@ int dm_cache_metadata_set_needs_check(struct dm_cache_metadata *cmd)
+ 	dm_bm_unlock(sblock);
+ 
+ out:
+-	up_write(&cmd->root_lock);
++	WRITE_UNLOCK(cmd);
+ 	return r;
+ }
+ 
+-bool dm_cache_metadata_needs_check(struct dm_cache_metadata *cmd)
++int dm_cache_metadata_needs_check(struct dm_cache_metadata *cmd, bool *result)
+ {
+-	bool needs_check;
++	READ_LOCK(cmd);
++	*result = !!test_bit(NEEDS_CHECK, &cmd->flags);
++	READ_UNLOCK(cmd);
+ 
+-	down_read(&cmd->root_lock);
+-	needs_check = !!test_bit(NEEDS_CHECK, &cmd->flags);
+-	up_read(&cmd->root_lock);
+-
+-	return needs_check;
++	return 0;
+ }
+ 
+ int dm_cache_metadata_abort(struct dm_cache_metadata *cmd)
+diff --git a/drivers/md/dm-cache-metadata.h b/drivers/md/dm-cache-metadata.h
+index 2ffee21f318d..8528744195e5 100644
+--- a/drivers/md/dm-cache-metadata.h
++++ b/drivers/md/dm-cache-metadata.h
+@@ -66,7 +66,7 @@ void dm_cache_metadata_close(struct dm_cache_metadata *cmd);
+  * origin blocks to map to.
+  */
+ int dm_cache_resize(struct dm_cache_metadata *cmd, dm_cblock_t new_cache_size);
+-dm_cblock_t dm_cache_size(struct dm_cache_metadata *cmd);
++int dm_cache_size(struct dm_cache_metadata *cmd, dm_cblock_t *result);
+ 
+ int dm_cache_discard_bitset_resize(struct dm_cache_metadata *cmd,
+ 				   sector_t discard_block_size,
+@@ -137,7 +137,7 @@ int dm_cache_write_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *
+  */
+ int dm_cache_metadata_all_clean(struct dm_cache_metadata *cmd, bool *result);
+ 
+-bool dm_cache_metadata_needs_check(struct dm_cache_metadata *cmd);
++int dm_cache_metadata_needs_check(struct dm_cache_metadata *cmd, bool *result);
+ int dm_cache_metadata_set_needs_check(struct dm_cache_metadata *cmd);
+ void dm_cache_metadata_set_read_only(struct dm_cache_metadata *cmd);
+ void dm_cache_metadata_set_read_write(struct dm_cache_metadata *cmd);
+diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
+index 5780accffa30..bb9b92ebbf8e 100644
+--- a/drivers/md/dm-cache-target.c
++++ b/drivers/md/dm-cache-target.c
+@@ -984,9 +984,14 @@ static void notify_mode_switch(struct cache *cache, enum cache_metadata_mode mod
+ 
+ static void set_cache_mode(struct cache *cache, enum cache_metadata_mode new_mode)
+ {
+-	bool needs_check = dm_cache_metadata_needs_check(cache->cmd);
++	bool needs_check;
+ 	enum cache_metadata_mode old_mode = get_cache_mode(cache);
+ 
++	if (dm_cache_metadata_needs_check(cache->cmd, &needs_check)) {
++		DMERR("unable to read needs_check flag, setting failure mode");
++		new_mode = CM_FAIL;
++	}
++
+ 	if (new_mode == CM_WRITE && needs_check) {
+ 		DMERR("%s: unable to switch cache to write mode until repaired.",
+ 		      cache_device_name(cache));
+@@ -3510,6 +3515,7 @@ static void cache_status(struct dm_target *ti, status_type_t type,
+ 	char buf[BDEVNAME_SIZE];
+ 	struct cache *cache = ti->private;
+ 	dm_cblock_t residency;
++	bool needs_check;
+ 
+ 	switch (type) {
+ 	case STATUSTYPE_INFO:
+@@ -3583,7 +3589,9 @@ static void cache_status(struct dm_target *ti, status_type_t type,
+ 		else
+ 			DMEMIT("rw ");
+ 
+-		if (dm_cache_metadata_needs_check(cache->cmd))
++		r = dm_cache_metadata_needs_check(cache->cmd, &needs_check);
++
++		if (r || needs_check)
+ 			DMEMIT("needs_check ");
+ 		else
+ 			DMEMIT("- ");
+diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
+index 3766386080a4..e4d1bafe78c1 100644
+--- a/drivers/md/dm-snap.c
++++ b/drivers/md/dm-snap.c
+@@ -1105,6 +1105,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+ 	int i;
+ 	int r = -EINVAL;
+ 	char *origin_path, *cow_path;
++	dev_t origin_dev, cow_dev;
+ 	unsigned args_used, num_flush_bios = 1;
+ 	fmode_t origin_mode = FMODE_READ;
+ 
+@@ -1135,11 +1136,19 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+ 		ti->error = "Cannot get origin device";
+ 		goto bad_origin;
+ 	}
++	origin_dev = s->origin->bdev->bd_dev;
+ 
+ 	cow_path = argv[0];
+ 	argv++;
+ 	argc--;
+ 
++	cow_dev = dm_get_dev_t(cow_path);
++	if (cow_dev && cow_dev == origin_dev) {
++		ti->error = "COW device cannot be the same as origin device";
++		r = -EINVAL;
++		goto bad_cow;
++	}
++
+ 	r = dm_get_device(ti, cow_path, dm_table_get_mode(ti->table), &s->cow);
+ 	if (r) {
+ 		ti->error = "Cannot get COW device";
+diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
+index 061152a43730..cb5d0daf53bb 100644
+--- a/drivers/md/dm-table.c
++++ b/drivers/md/dm-table.c
+@@ -365,6 +365,26 @@ static int upgrade_mode(struct dm_dev_internal *dd, fmode_t new_mode,
+ }
+ 
+ /*
++ * Convert the path to a device
++ */
++dev_t dm_get_dev_t(const char *path)
++{
++	dev_t uninitialized_var(dev);
++	struct block_device *bdev;
++
++	bdev = lookup_bdev(path);
++	if (IS_ERR(bdev))
++		dev = name_to_dev_t(path);
++	else {
++		dev = bdev->bd_dev;
++		bdput(bdev);
++	}
++
++	return dev;
++}
++EXPORT_SYMBOL_GPL(dm_get_dev_t);
++
++/*
+  * Add a device to the list, or just increment the usage count if
+  * it's already present.
+  */
+@@ -372,23 +392,15 @@ int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,
+ 		  struct dm_dev **result)
+ {
+ 	int r;
+-	dev_t uninitialized_var(dev);
++	dev_t dev;
+ 	struct dm_dev_internal *dd;
+ 	struct dm_table *t = ti->table;
+-	struct block_device *bdev;
+ 
+ 	BUG_ON(!t);
+ 
+-	/* convert the path to a device */
+-	bdev = lookup_bdev(path);
+-	if (IS_ERR(bdev)) {
+-		dev = name_to_dev_t(path);
+-		if (!dev)
+-			return -ENODEV;
+-	} else {
+-		dev = bdev->bd_dev;
+-		bdput(bdev);
+-	}
++	dev = dm_get_dev_t(path);
++	if (!dev)
++		return -ENODEV;
+ 
+ 	dd = find_device(&t->devices, dev);
+ 	if (!dd) {
+diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
+index f962d6453afd..185010d9cccc 100644
+--- a/drivers/md/dm-thin-metadata.c
++++ b/drivers/md/dm-thin-metadata.c
+@@ -1981,5 +1981,8 @@ bool dm_pool_metadata_needs_check(struct dm_pool_metadata *pmd)
+ 
+ void dm_pool_issue_prefetches(struct dm_pool_metadata *pmd)
+ {
+-	dm_tm_issue_prefetches(pmd->tm);
++	down_read(&pmd->root_lock);
++	if (!pmd->fail_io)
++		dm_tm_issue_prefetches(pmd->tm);
++	up_read(&pmd->root_lock);
+ }
+diff --git a/drivers/md/dm.c b/drivers/md/dm.c
+index dd834927bc66..c338aebb4ccd 100644
+--- a/drivers/md/dm.c
++++ b/drivers/md/dm.c
+@@ -1109,12 +1109,8 @@ static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
+ 	 * back into ->request_fn() could deadlock attempting to grab the
+ 	 * queue lock again.
+ 	 */
+-	if (run_queue) {
+-		if (md->queue->mq_ops)
+-			blk_mq_run_hw_queues(md->queue, true);
+-		else
+-			blk_run_queue_async(md->queue);
+-	}
++	if (!md->queue->mq_ops && run_queue)
++		blk_run_queue_async(md->queue);
+ 
+ 	/*
+ 	 * dm_put() must be at the end of this function. See the comment above
+@@ -1214,9 +1210,9 @@ static void dm_requeue_original_request(struct mapped_device *md,
+ {
+ 	int rw = rq_data_dir(rq);
+ 
++	rq_end_stats(md, rq);
+ 	dm_unprep_request(rq);
+ 
+-	rq_end_stats(md, rq);
+ 	if (!rq->q->mq_ops)
+ 		old_requeue_request(rq);
+ 	else {
+@@ -1336,7 +1332,10 @@ static void dm_complete_request(struct request *rq, int error)
+ 	struct dm_rq_target_io *tio = tio_from_request(rq);
+ 
+ 	tio->error = error;
+-	blk_complete_request(rq);
++	if (!rq->q->mq_ops)
++		blk_complete_request(rq);
++	else
++		blk_mq_complete_request(rq, error);
+ }
+ 
+ /*
+diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
+index 0a72ab6e6c20..dd483bb2e111 100644
+--- a/drivers/md/multipath.c
++++ b/drivers/md/multipath.c
+@@ -129,7 +129,9 @@ static void multipath_make_request(struct mddev *mddev, struct bio * bio)
+ 	}
+ 	multipath = conf->multipaths + mp_bh->path;
+ 
+-	mp_bh->bio = *bio;
++	bio_init(&mp_bh->bio);
++	__bio_clone_fast(&mp_bh->bio, bio);
++
+ 	mp_bh->bio.bi_iter.bi_sector += multipath->rdev->data_offset;
+ 	mp_bh->bio.bi_bdev = multipath->rdev->bdev;
+ 	mp_bh->bio.bi_rw |= REQ_FAILFAST_TRANSPORT;
+diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
+index 4e3843f7d245..bb5bce059eb4 100644
+--- a/drivers/md/raid1.c
++++ b/drivers/md/raid1.c
+@@ -2274,6 +2274,7 @@ static void handle_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
+ 	if (fail) {
+ 		spin_lock_irq(&conf->device_lock);
+ 		list_add(&r1_bio->retry_list, &conf->bio_end_io_list);
++		conf->nr_queued++;
+ 		spin_unlock_irq(&conf->device_lock);
+ 		md_wakeup_thread(conf->mddev->thread);
+ 	} else {
+@@ -2391,8 +2392,10 @@ static void raid1d(struct md_thread *thread)
+ 		LIST_HEAD(tmp);
+ 		spin_lock_irqsave(&conf->device_lock, flags);
+ 		if (!test_bit(MD_CHANGE_PENDING, &mddev->flags)) {
+-			list_add(&tmp, &conf->bio_end_io_list);
+-			list_del_init(&conf->bio_end_io_list);
++			while (!list_empty(&conf->bio_end_io_list)) {
++				list_move(conf->bio_end_io_list.prev, &tmp);
++				conf->nr_queued--;
++			}
+ 		}
+ 		spin_unlock_irqrestore(&conf->device_lock, flags);
+ 		while (!list_empty(&tmp)) {
+diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
+index 1c1447dd3417..e3fd725d5c4d 100644
+--- a/drivers/md/raid10.c
++++ b/drivers/md/raid10.c
+@@ -2664,6 +2664,7 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
+ 		if (fail) {
+ 			spin_lock_irq(&conf->device_lock);
+ 			list_add(&r10_bio->retry_list, &conf->bio_end_io_list);
++			conf->nr_queued++;
+ 			spin_unlock_irq(&conf->device_lock);
+ 			md_wakeup_thread(conf->mddev->thread);
+ 		} else {
+@@ -2691,8 +2692,10 @@ static void raid10d(struct md_thread *thread)
+ 		LIST_HEAD(tmp);
+ 		spin_lock_irqsave(&conf->device_lock, flags);
+ 		if (!test_bit(MD_CHANGE_PENDING, &mddev->flags)) {
+-			list_add(&tmp, &conf->bio_end_io_list);
+-			list_del_init(&conf->bio_end_io_list);
++			while (!list_empty(&conf->bio_end_io_list)) {
++				list_move(conf->bio_end_io_list.prev, &tmp);
++				conf->nr_queued--;
++			}
+ 		}
+ 		spin_unlock_irqrestore(&conf->device_lock, flags);
+ 		while (!list_empty(&tmp)) {
+diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
+index b4f02c9959f2..32d52878f182 100644
+--- a/drivers/md/raid5.c
++++ b/drivers/md/raid5.c
+@@ -340,8 +340,7 @@ static void release_inactive_stripe_list(struct r5conf *conf,
+ 					 int hash)
+ {
+ 	int size;
+-	unsigned long do_wakeup = 0;
+-	int i = 0;
++	bool do_wakeup = false;
+ 	unsigned long flags;
+ 
+ 	if (hash == NR_STRIPE_HASH_LOCKS) {
+@@ -362,19 +361,15 @@ static void release_inactive_stripe_list(struct r5conf *conf,
+ 			    !list_empty(list))
+ 				atomic_dec(&conf->empty_inactive_list_nr);
+ 			list_splice_tail_init(list, conf->inactive_list + hash);
+-			do_wakeup |= 1 << hash;
++			do_wakeup = true;
+ 			spin_unlock_irqrestore(conf->hash_locks + hash, flags);
+ 		}
+ 		size--;
+ 		hash--;
+ 	}
+ 
+-	for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++) {
+-		if (do_wakeup & (1 << i))
+-			wake_up(&conf->wait_for_stripe[i]);
+-	}
+-
+ 	if (do_wakeup) {
++		wake_up(&conf->wait_for_stripe);
+ 		if (atomic_read(&conf->active_stripes) == 0)
+ 			wake_up(&conf->wait_for_quiescent);
+ 		if (conf->retry_read_aligned)
+@@ -687,15 +682,14 @@ raid5_get_active_stripe(struct r5conf *conf, sector_t sector,
+ 			if (!sh) {
+ 				set_bit(R5_INACTIVE_BLOCKED,
+ 					&conf->cache_state);
+-				wait_event_exclusive_cmd(
+-					conf->wait_for_stripe[hash],
++				wait_event_lock_irq(
++					conf->wait_for_stripe,
+ 					!list_empty(conf->inactive_list + hash) &&
+ 					(atomic_read(&conf->active_stripes)
+ 					 < (conf->max_nr_stripes * 3 / 4)
+ 					 || !test_bit(R5_INACTIVE_BLOCKED,
+ 						      &conf->cache_state)),
+-					spin_unlock_irq(conf->hash_locks + hash),
+-					spin_lock_irq(conf->hash_locks + hash));
++					*(conf->hash_locks + hash));
+ 				clear_bit(R5_INACTIVE_BLOCKED,
+ 					  &conf->cache_state);
+ 			} else {
+@@ -720,9 +714,6 @@ raid5_get_active_stripe(struct r5conf *conf, sector_t sector,
+ 		}
+ 	} while (sh == NULL);
+ 
+-	if (!list_empty(conf->inactive_list + hash))
+-		wake_up(&conf->wait_for_stripe[hash]);
+-
+ 	spin_unlock_irq(conf->hash_locks + hash);
+ 	return sh;
+ }
+@@ -2089,6 +2080,14 @@ static int resize_chunks(struct r5conf *conf, int new_disks, int new_sectors)
+ 	unsigned long cpu;
+ 	int err = 0;
+ 
++	/*
++	 * Never shrink. And mddev_suspend() could deadlock if this is called
++	 * from raid5d. In that case, scribble_disks and scribble_sectors
++	 * should equal to new_disks and new_sectors
++	 */
++	if (conf->scribble_disks >= new_disks &&
++	    conf->scribble_sectors >= new_sectors)
++		return 0;
+ 	mddev_suspend(conf->mddev);
+ 	get_online_cpus();
+ 	for_each_present_cpu(cpu) {
+@@ -2110,6 +2109,10 @@ static int resize_chunks(struct r5conf *conf, int new_disks, int new_sectors)
+ 	}
+ 	put_online_cpus();
+ 	mddev_resume(conf->mddev);
++	if (!err) {
++		conf->scribble_disks = new_disks;
++		conf->scribble_sectors = new_sectors;
++	}
+ 	return err;
+ }
+ 
+@@ -2190,7 +2193,7 @@ static int resize_stripes(struct r5conf *conf, int newsize)
+ 	cnt = 0;
+ 	list_for_each_entry(nsh, &newstripes, lru) {
+ 		lock_device_hash_lock(conf, hash);
+-		wait_event_exclusive_cmd(conf->wait_for_stripe[hash],
++		wait_event_cmd(conf->wait_for_stripe,
+ 				    !list_empty(conf->inactive_list + hash),
+ 				    unlock_device_hash_lock(conf, hash),
+ 				    lock_device_hash_lock(conf, hash));
+@@ -4236,7 +4239,6 @@ static void break_stripe_batch_list(struct stripe_head *head_sh,
+ 		WARN_ON_ONCE(sh->state & ((1 << STRIPE_ACTIVE) |
+ 					  (1 << STRIPE_SYNCING) |
+ 					  (1 << STRIPE_REPLACED) |
+-					  (1 << STRIPE_PREREAD_ACTIVE) |
+ 					  (1 << STRIPE_DELAYED) |
+ 					  (1 << STRIPE_BIT_DELAY) |
+ 					  (1 << STRIPE_FULL_WRITE) |
+@@ -4251,6 +4253,7 @@ static void break_stripe_batch_list(struct stripe_head *head_sh,
+ 					      (1 << STRIPE_REPLACED)));
+ 
+ 		set_mask_bits(&sh->state, ~(STRIPE_EXPAND_SYNC_FLAGS |
++					    (1 << STRIPE_PREREAD_ACTIVE) |
+ 					    (1 << STRIPE_DEGRADED)),
+ 			      head_sh->state & (1 << STRIPE_INSYNC));
+ 
+@@ -6413,6 +6416,12 @@ static int raid5_alloc_percpu(struct r5conf *conf)
+ 	}
+ 	put_online_cpus();
+ 
++	if (!err) {
++		conf->scribble_disks = max(conf->raid_disks,
++			conf->previous_raid_disks);
++		conf->scribble_sectors = max(conf->chunk_sectors,
++			conf->prev_chunk_sectors);
++	}
+ 	return err;
+ }
+ 
+@@ -6503,9 +6512,7 @@ static struct r5conf *setup_conf(struct mddev *mddev)
+ 	seqcount_init(&conf->gen_lock);
+ 	mutex_init(&conf->cache_size_mutex);
+ 	init_waitqueue_head(&conf->wait_for_quiescent);
+-	for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++) {
+-		init_waitqueue_head(&conf->wait_for_stripe[i]);
+-	}
++	init_waitqueue_head(&conf->wait_for_stripe);
+ 	init_waitqueue_head(&conf->wait_for_overlap);
+ 	INIT_LIST_HEAD(&conf->handle_list);
+ 	INIT_LIST_HEAD(&conf->hold_list);
+@@ -7014,8 +7021,8 @@ static int raid5_run(struct mddev *mddev)
+ 		}
+ 
+ 		if (discard_supported &&
+-		   mddev->queue->limits.max_discard_sectors >= stripe &&
+-		   mddev->queue->limits.discard_granularity >= stripe)
++		    mddev->queue->limits.max_discard_sectors >= (stripe >> 9) &&
++		    mddev->queue->limits.discard_granularity >= stripe)
+ 			queue_flag_set_unlocked(QUEUE_FLAG_DISCARD,
+ 						mddev->queue);
+ 		else
+diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
+index a415e1cd39b8..517d4b68a1be 100644
+--- a/drivers/md/raid5.h
++++ b/drivers/md/raid5.h
+@@ -510,6 +510,8 @@ struct r5conf {
+ 					      * conversions
+ 					      */
+ 	} __percpu *percpu;
++	int scribble_disks;
++	int scribble_sectors;
+ #ifdef CONFIG_HOTPLUG_CPU
+ 	struct notifier_block	cpu_notify;
+ #endif
+@@ -522,7 +524,7 @@ struct r5conf {
+ 	atomic_t		empty_inactive_list_nr;
+ 	struct llist_head	released_stripes;
+ 	wait_queue_head_t	wait_for_quiescent;
+-	wait_queue_head_t	wait_for_stripe[NR_STRIPE_HASH_LOCKS];
++	wait_queue_head_t	wait_for_stripe;
+ 	wait_queue_head_t	wait_for_overlap;
+ 	unsigned long		cache_state;
+ #define R5_INACTIVE_BLOCKED	1	/* release of inactive stripes blocked,
+diff --git a/drivers/media/i2c/adv7511.c b/drivers/media/i2c/adv7511.c
+index 471fd23b5c5c..08d2c6bf7341 100644
+--- a/drivers/media/i2c/adv7511.c
++++ b/drivers/media/i2c/adv7511.c
+@@ -1161,12 +1161,23 @@ static void adv7511_dbg_dump_edid(int lvl, int debug, struct v4l2_subdev *sd, in
+ 	}
+ }
+ 
++static void adv7511_notify_no_edid(struct v4l2_subdev *sd)
++{
++	struct adv7511_state *state = get_adv7511_state(sd);
++	struct adv7511_edid_detect ed;
++
++	/* We failed to read the EDID, so send an event for this. */
++	ed.present = false;
++	ed.segment = adv7511_rd(sd, 0xc4);
++	v4l2_subdev_notify(sd, ADV7511_EDID_DETECT, (void *)&ed);
++	v4l2_ctrl_s_ctrl(state->have_edid0_ctrl, 0x0);
++}
++
+ static void adv7511_edid_handler(struct work_struct *work)
+ {
+ 	struct delayed_work *dwork = to_delayed_work(work);
+ 	struct adv7511_state *state = container_of(dwork, struct adv7511_state, edid_handler);
+ 	struct v4l2_subdev *sd = &state->sd;
+-	struct adv7511_edid_detect ed;
+ 
+ 	v4l2_dbg(1, debug, sd, "%s:\n", __func__);
+ 
+@@ -1191,9 +1202,7 @@ static void adv7511_edid_handler(struct work_struct *work)
+ 	}
+ 
+ 	/* We failed to read the EDID, so send an event for this. */
+-	ed.present = false;
+-	ed.segment = adv7511_rd(sd, 0xc4);
+-	v4l2_subdev_notify(sd, ADV7511_EDID_DETECT, (void *)&ed);
++	adv7511_notify_no_edid(sd);
+ 	v4l2_dbg(1, debug, sd, "%s: no edid found\n", __func__);
+ }
+ 
+@@ -1264,7 +1273,6 @@ static void adv7511_check_monitor_present_status(struct v4l2_subdev *sd)
+ 	/* update read only ctrls */
+ 	v4l2_ctrl_s_ctrl(state->hotplug_ctrl, adv7511_have_hotplug(sd) ? 0x1 : 0x0);
+ 	v4l2_ctrl_s_ctrl(state->rx_sense_ctrl, adv7511_have_rx_sense(sd) ? 0x1 : 0x0);
+-	v4l2_ctrl_s_ctrl(state->have_edid0_ctrl, state->edid.segments ? 0x1 : 0x0);
+ 
+ 	if ((status & MASK_ADV7511_HPD_DETECT) && ((status & MASK_ADV7511_MSEN_DETECT) || state->edid.segments)) {
+ 		v4l2_dbg(1, debug, sd, "%s: hotplug and (rx-sense or edid)\n", __func__);
+@@ -1294,6 +1302,7 @@ static void adv7511_check_monitor_present_status(struct v4l2_subdev *sd)
+ 		}
+ 		adv7511_s_power(sd, false);
+ 		memset(&state->edid, 0, sizeof(struct adv7511_state_edid));
++		adv7511_notify_no_edid(sd);
+ 	}
+ }
+ 
+@@ -1370,6 +1379,7 @@ static bool adv7511_check_edid_status(struct v4l2_subdev *sd)
+ 		}
+ 		/* one more segment read ok */
+ 		state->edid.segments = segment + 1;
++		v4l2_ctrl_s_ctrl(state->have_edid0_ctrl, 0x1);
+ 		if (((state->edid.data[0x7e] >> 1) + 1) > state->edid.segments) {
+ 			/* Request next EDID segment */
+ 			v4l2_dbg(1, debug, sd, "%s: request segment %d\n", __func__, state->edid.segments);
+@@ -1389,7 +1399,6 @@ static bool adv7511_check_edid_status(struct v4l2_subdev *sd)
+ 		ed.present = true;
+ 		ed.segment = 0;
+ 		state->edid_detect_counter++;
+-		v4l2_ctrl_s_ctrl(state->have_edid0_ctrl, state->edid.segments ? 0x1 : 0x0);
+ 		v4l2_subdev_notify(sd, ADV7511_EDID_DETECT, (void *)&ed);
+ 		return ed.present;
+ 	}
+diff --git a/drivers/media/pci/bt8xx/bttv-driver.c b/drivers/media/pci/bt8xx/bttv-driver.c
+index 9400e996087b..bedbd51fb77c 100644
+--- a/drivers/media/pci/bt8xx/bttv-driver.c
++++ b/drivers/media/pci/bt8xx/bttv-driver.c
+@@ -2334,6 +2334,19 @@ static int bttv_g_fmt_vid_overlay(struct file *file, void *priv,
+ 	return 0;
+ }
+ 
++static void bttv_get_width_mask_vid_cap(const struct bttv_format *fmt,
++					unsigned int *width_mask,
++					unsigned int *width_bias)
++{
++	if (fmt->flags & FORMAT_FLAGS_PLANAR) {
++		*width_mask = ~15; /* width must be a multiple of 16 pixels */
++		*width_bias = 8;   /* nearest */
++	} else {
++		*width_mask = ~3; /* width must be a multiple of 4 pixels */
++		*width_bias = 2;  /* nearest */
++	}
++}
++
+ static int bttv_try_fmt_vid_cap(struct file *file, void *priv,
+ 						struct v4l2_format *f)
+ {
+@@ -2343,6 +2356,7 @@ static int bttv_try_fmt_vid_cap(struct file *file, void *priv,
+ 	enum v4l2_field field;
+ 	__s32 width, height;
+ 	__s32 height2;
++	unsigned int width_mask, width_bias;
+ 	int rc;
+ 
+ 	fmt = format_by_fourcc(f->fmt.pix.pixelformat);
+@@ -2375,9 +2389,9 @@ static int bttv_try_fmt_vid_cap(struct file *file, void *priv,
+ 	width = f->fmt.pix.width;
+ 	height = f->fmt.pix.height;
+ 
++	bttv_get_width_mask_vid_cap(fmt, &width_mask, &width_bias);
+ 	rc = limit_scaled_size_lock(fh, &width, &height, field,
+-			       /* width_mask: 4 pixels */ ~3,
+-			       /* width_bias: nearest */ 2,
++			       width_mask, width_bias,
+ 			       /* adjust_size */ 1,
+ 			       /* adjust_crop */ 0);
+ 	if (0 != rc)
+@@ -2410,6 +2424,7 @@ static int bttv_s_fmt_vid_cap(struct file *file, void *priv,
+ 	struct bttv_fh *fh = priv;
+ 	struct bttv *btv = fh->btv;
+ 	__s32 width, height;
++	unsigned int width_mask, width_bias;
+ 	enum v4l2_field field;
+ 
+ 	retval = bttv_switch_type(fh, f->type);
+@@ -2424,9 +2439,10 @@ static int bttv_s_fmt_vid_cap(struct file *file, void *priv,
+ 	height = f->fmt.pix.height;
+ 	field = f->fmt.pix.field;
+ 
++	fmt = format_by_fourcc(f->fmt.pix.pixelformat);
++	bttv_get_width_mask_vid_cap(fmt, &width_mask, &width_bias);
+ 	retval = limit_scaled_size_lock(fh, &width, &height, f->fmt.pix.field,
+-			       /* width_mask: 4 pixels */ ~3,
+-			       /* width_bias: nearest */ 2,
++			       width_mask, width_bias,
+ 			       /* adjust_size */ 1,
+ 			       /* adjust_crop */ 1);
+ 	if (0 != retval)
+@@ -2434,8 +2450,6 @@ static int bttv_s_fmt_vid_cap(struct file *file, void *priv,
+ 
+ 	f->fmt.pix.field = field;
+ 
+-	fmt = format_by_fourcc(f->fmt.pix.pixelformat);
+-
+ 	/* update our state informations */
+ 	fh->fmt              = fmt;
+ 	fh->cap.field        = f->fmt.pix.field;
+diff --git a/drivers/media/pci/saa7134/saa7134-video.c b/drivers/media/pci/saa7134/saa7134-video.c
+index a63c1366a64e..1293563b7dce 100644
+--- a/drivers/media/pci/saa7134/saa7134-video.c
++++ b/drivers/media/pci/saa7134/saa7134-video.c
+@@ -1219,10 +1219,13 @@ static int saa7134_g_fmt_vid_cap(struct file *file, void *priv,
+ 	f->fmt.pix.height       = dev->height;
+ 	f->fmt.pix.field        = dev->field;
+ 	f->fmt.pix.pixelformat  = dev->fmt->fourcc;
+-	f->fmt.pix.bytesperline =
+-		(f->fmt.pix.width * dev->fmt->depth) >> 3;
++	if (dev->fmt->planar)
++		f->fmt.pix.bytesperline = f->fmt.pix.width;
++	else
++		f->fmt.pix.bytesperline =
++			(f->fmt.pix.width * dev->fmt->depth) / 8;
+ 	f->fmt.pix.sizeimage =
+-		f->fmt.pix.height * f->fmt.pix.bytesperline;
++		(f->fmt.pix.height * f->fmt.pix.width * dev->fmt->depth) / 8;
+ 	f->fmt.pix.colorspace   = V4L2_COLORSPACE_SMPTE170M;
+ 	return 0;
+ }
+@@ -1298,10 +1301,13 @@ static int saa7134_try_fmt_vid_cap(struct file *file, void *priv,
+ 	if (f->fmt.pix.height > maxh)
+ 		f->fmt.pix.height = maxh;
+ 	f->fmt.pix.width &= ~0x03;
+-	f->fmt.pix.bytesperline =
+-		(f->fmt.pix.width * fmt->depth) >> 3;
++	if (fmt->planar)
++		f->fmt.pix.bytesperline = f->fmt.pix.width;
++	else
++		f->fmt.pix.bytesperline =
++			(f->fmt.pix.width * fmt->depth) / 8;
+ 	f->fmt.pix.sizeimage =
+-		f->fmt.pix.height * f->fmt.pix.bytesperline;
++		(f->fmt.pix.height * f->fmt.pix.width * fmt->depth) / 8;
+ 	f->fmt.pix.colorspace   = V4L2_COLORSPACE_SMPTE170M;
+ 
+ 	return 0;
+diff --git a/drivers/media/platform/coda/coda-bit.c b/drivers/media/platform/coda/coda-bit.c
+index 7d28899f89ce..6efe9d002961 100644
+--- a/drivers/media/platform/coda/coda-bit.c
++++ b/drivers/media/platform/coda/coda-bit.c
+@@ -1342,7 +1342,7 @@ static void coda_finish_encode(struct coda_ctx *ctx)
+ 
+ 	/* Calculate bytesused field */
+ 	if (dst_buf->sequence == 0) {
+-		vb2_set_plane_payload(&dst_buf->vb2_buf, 0,
++		vb2_set_plane_payload(&dst_buf->vb2_buf, 0, wr_ptr - start_ptr +
+ 					ctx->vpu_header_size[0] +
+ 					ctx->vpu_header_size[1] +
+ 					ctx->vpu_header_size[2]);
+diff --git a/drivers/media/usb/pwc/pwc-if.c b/drivers/media/usb/pwc/pwc-if.c
+index 086cf1c7bd7d..18aed5dd325e 100644
+--- a/drivers/media/usb/pwc/pwc-if.c
++++ b/drivers/media/usb/pwc/pwc-if.c
+@@ -91,6 +91,7 @@ static const struct usb_device_id pwc_device_table [] = {
+ 	{ USB_DEVICE(0x0471, 0x0312) },
+ 	{ USB_DEVICE(0x0471, 0x0313) }, /* the 'new' 720K */
+ 	{ USB_DEVICE(0x0471, 0x0329) }, /* Philips SPC 900NC PC Camera */
++	{ USB_DEVICE(0x0471, 0x032C) }, /* Philips SPC 880NC PC Camera */
+ 	{ USB_DEVICE(0x069A, 0x0001) }, /* Askey */
+ 	{ USB_DEVICE(0x046D, 0x08B0) }, /* Logitech QuickCam Pro 3000 */
+ 	{ USB_DEVICE(0x046D, 0x08B1) }, /* Logitech QuickCam Notebook Pro */
+@@ -810,6 +811,11 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
+ 			name = "Philips SPC 900NC webcam";
+ 			type_id = 740;
+ 			break;
++		case 0x032C:
++			PWC_INFO("Philips SPC 880NC USB webcam detected.\n");
++			name = "Philips SPC 880NC webcam";
++			type_id = 740;
++			break;
+ 		default:
+ 			return -ENODEV;
+ 			break;
+diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
+index 8fd84a67478a..019644ff627d 100644
+--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
++++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
+@@ -415,7 +415,8 @@ static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
+ 		get_user(kp->index, &up->index) ||
+ 		get_user(kp->type, &up->type) ||
+ 		get_user(kp->flags, &up->flags) ||
+-		get_user(kp->memory, &up->memory))
++		get_user(kp->memory, &up->memory) ||
++		get_user(kp->length, &up->length))
+ 			return -EFAULT;
+ 
+ 	if (V4L2_TYPE_IS_OUTPUT(kp->type))
+@@ -427,9 +428,6 @@ static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
+ 			return -EFAULT;
+ 
+ 	if (V4L2_TYPE_IS_MULTIPLANAR(kp->type)) {
+-		if (get_user(kp->length, &up->length))
+-			return -EFAULT;
+-
+ 		num_planes = kp->length;
+ 		if (num_planes == 0) {
+ 			kp->m.planes = NULL;
+@@ -462,16 +460,14 @@ static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
+ 	} else {
+ 		switch (kp->memory) {
+ 		case V4L2_MEMORY_MMAP:
+-			if (get_user(kp->length, &up->length) ||
+-				get_user(kp->m.offset, &up->m.offset))
++			if (get_user(kp->m.offset, &up->m.offset))
+ 				return -EFAULT;
+ 			break;
+ 		case V4L2_MEMORY_USERPTR:
+ 			{
+ 			compat_long_t tmp;
+ 
+-			if (get_user(kp->length, &up->length) ||
+-			    get_user(tmp, &up->m.userptr))
++			if (get_user(tmp, &up->m.userptr))
+ 				return -EFAULT;
+ 
+ 			kp->m.userptr = (unsigned long)compat_ptr(tmp);
+@@ -513,7 +509,8 @@ static int put_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
+ 		copy_to_user(&up->timecode, &kp->timecode, sizeof(struct v4l2_timecode)) ||
+ 		put_user(kp->sequence, &up->sequence) ||
+ 		put_user(kp->reserved2, &up->reserved2) ||
+-		put_user(kp->reserved, &up->reserved))
++		put_user(kp->reserved, &up->reserved) ||
++		put_user(kp->length, &up->length))
+ 			return -EFAULT;
+ 
+ 	if (V4L2_TYPE_IS_MULTIPLANAR(kp->type)) {
+@@ -536,13 +533,11 @@ static int put_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
+ 	} else {
+ 		switch (kp->memory) {
+ 		case V4L2_MEMORY_MMAP:
+-			if (put_user(kp->length, &up->length) ||
+-				put_user(kp->m.offset, &up->m.offset))
++			if (put_user(kp->m.offset, &up->m.offset))
+ 				return -EFAULT;
+ 			break;
+ 		case V4L2_MEMORY_USERPTR:
+-			if (put_user(kp->length, &up->length) ||
+-				put_user(kp->m.userptr, &up->m.userptr))
++			if (put_user(kp->m.userptr, &up->m.userptr))
+ 				return -EFAULT;
+ 			break;
+ 		case V4L2_MEMORY_OVERLAY:
+diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
+index 0b05aa938799..1a173d0af694 100644
+--- a/drivers/misc/mei/bus.c
++++ b/drivers/misc/mei/bus.c
+@@ -53,6 +53,11 @@ ssize_t __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length,
+ 	bus = cl->dev;
+ 
+ 	mutex_lock(&bus->device_lock);
++	if (bus->dev_state != MEI_DEV_ENABLED) {
++		rets = -ENODEV;
++		goto out;
++	}
++
+ 	if (!mei_cl_is_connected(cl)) {
+ 		rets = -ENODEV;
+ 		goto out;
+@@ -109,6 +114,10 @@ ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length)
+ 	bus = cl->dev;
+ 
+ 	mutex_lock(&bus->device_lock);
++	if (bus->dev_state != MEI_DEV_ENABLED) {
++		rets = -ENODEV;
++		goto out;
++	}
+ 
+ 	cb = mei_cl_read_cb(cl, NULL);
+ 	if (cb)
+diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
+index fe207e542032..5fbffdb6b854 100644
+--- a/drivers/mmc/card/block.c
++++ b/drivers/mmc/card/block.c
+@@ -589,6 +589,14 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
+ 	struct mmc_card *card;
+ 	int err = 0, ioc_err = 0;
+ 
++	/*
++	 * The caller must have CAP_SYS_RAWIO, and must be calling this on the
++	 * whole block device, not on a partition.  This prevents overspray
++	 * between sibling partitions.
++	 */
++	if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains))
++		return -EPERM;
++
+ 	idata = mmc_blk_ioctl_copy_from_user(ic_ptr);
+ 	if (IS_ERR(idata))
+ 		return PTR_ERR(idata);
+@@ -631,6 +639,14 @@ static int mmc_blk_ioctl_multi_cmd(struct block_device *bdev,
+ 	int i, err = 0, ioc_err = 0;
+ 	__u64 num_of_cmds;
+ 
++	/*
++	 * The caller must have CAP_SYS_RAWIO, and must be calling this on the
++	 * whole block device, not on a partition.  This prevents overspray
++	 * between sibling partitions.
++	 */
++	if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains))
++		return -EPERM;
++
+ 	if (copy_from_user(&num_of_cmds, &user->num_of_cmds,
+ 			   sizeof(num_of_cmds)))
+ 		return -EFAULT;
+@@ -688,14 +704,6 @@ cmd_err:
+ static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode,
+ 	unsigned int cmd, unsigned long arg)
+ {
+-	/*
+-	 * The caller must have CAP_SYS_RAWIO, and must be calling this on the
+-	 * whole block device, not on a partition.  This prevents overspray
+-	 * between sibling partitions.
+-	 */
+-	if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains))
+-		return -EPERM;
+-
+ 	switch (cmd) {
+ 	case MMC_IOC_CMD:
+ 		return mmc_blk_ioctl_cmd(bdev,
+diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
+index 851ccd9ac868..25c179592125 100644
+--- a/drivers/mmc/host/atmel-mci.c
++++ b/drivers/mmc/host/atmel-mci.c
+@@ -2443,7 +2443,7 @@ static int atmci_configure_dma(struct atmel_mci *host)
+ 		struct mci_platform_data *pdata = host->pdev->dev.platform_data;
+ 		dma_cap_mask_t mask;
+ 
+-		if (!pdata->dma_filter)
++		if (!pdata || !pdata->dma_filter)
+ 			return -ENODEV;
+ 
+ 		dma_cap_zero(mask);
+diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
+index 3446097a43c0..e77d79c8cd9f 100644
+--- a/drivers/mmc/host/mmc_spi.c
++++ b/drivers/mmc/host/mmc_spi.c
+@@ -1442,6 +1442,12 @@ static int mmc_spi_probe(struct spi_device *spi)
+ 					     host->pdata->cd_debounce);
+ 		if (status != 0)
+ 			goto fail_add_host;
++
++		/* The platform has a CD GPIO signal that may support
++		 * interrupts, so let mmc_gpiod_request_cd_irq() decide
++		 * if polling is needed or not.
++		 */
++		mmc->caps &= ~MMC_CAP_NEEDS_POLL;
+ 		mmc_gpiod_request_cd_irq(mmc);
+ 	}
+ 
+diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c
+index f5edf9d3a18a..c7f27fe4805a 100644
+--- a/drivers/mmc/host/sdhci-pxav3.c
++++ b/drivers/mmc/host/sdhci-pxav3.c
+@@ -137,6 +137,10 @@ static int armada_38x_quirks(struct platform_device *pdev,
+ 
+ 	host->quirks &= ~SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN;
+ 	host->quirks |= SDHCI_QUIRK_MISSING_CAPS;
++
++	host->caps = sdhci_readl(host, SDHCI_CAPABILITIES);
++	host->caps1 = sdhci_readl(host, SDHCI_CAPABILITIES_1);
++
+ 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ 					   "conf-sdio3");
+ 	if (res) {
+@@ -150,7 +154,6 @@ static int armada_38x_quirks(struct platform_device *pdev,
+ 		 * Configuration register, if the adjustment is not done,
+ 		 * remove them from the capabilities.
+ 		 */
+-		host->caps1 = sdhci_readl(host, SDHCI_CAPABILITIES_1);
+ 		host->caps1 &= ~(SDHCI_SUPPORT_SDR50 | SDHCI_SUPPORT_DDR50);
+ 
+ 		dev_warn(&pdev->dev, "conf-sdio3 register not found: disabling SDR50 and DDR50 modes.\nConsider updating your dtb\n");
+@@ -161,7 +164,6 @@ static int armada_38x_quirks(struct platform_device *pdev,
+ 	 * controller has different capabilities than the ones shown
+ 	 * in its registers
+ 	 */
+-	host->caps = sdhci_readl(host, SDHCI_CAPABILITIES);
+ 	if (of_property_read_bool(np, "no-1-8-v")) {
+ 		host->caps &= ~SDHCI_CAN_VDD_180;
+ 		host->mmc->caps &= ~MMC_CAP_1_8V_DDR;
+diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
+index 83c4bf7bc16c..0004721cd213 100644
+--- a/drivers/mmc/host/sdhci-tegra.c
++++ b/drivers/mmc/host/sdhci-tegra.c
+@@ -147,10 +147,16 @@ static void tegra_sdhci_reset(struct sdhci_host *host, u8 mask)
+ 	/* Advertise UHS modes as supported by host */
+ 	if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR50)
+ 		misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDR50;
++	else
++		misc_ctrl &= ~SDHCI_MISC_CTRL_ENABLE_SDR50;
+ 	if (soc_data->nvquirks & NVQUIRK_ENABLE_DDR50)
+ 		misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_DDR50;
++	else
++		misc_ctrl &= ~SDHCI_MISC_CTRL_ENABLE_DDR50;
+ 	if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR104)
+ 		misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDR104;
++	else
++		misc_ctrl &= ~SDHCI_MISC_CTRL_ENABLE_SDR104;
+ 	sdhci_writel(host, misc_ctrl, SDHCI_TEGRA_VENDOR_MISC_CTRL);
+ 
+ 	clk_ctrl = sdhci_readl(host, SDHCI_TEGRA_VENDOR_CLOCK_CTRL);
+@@ -188,7 +194,7 @@ static void tegra_sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
+ 	unsigned long host_clk;
+ 
+ 	if (!clock)
+-		return;
++		return sdhci_set_clock(host, clock);
+ 
+ 	host_clk = tegra_host->ddr_signaling ? clock * 2 : clock;
+ 	clk_set_rate(pltfm_host->clk, host_clk);
+@@ -335,6 +341,10 @@ static const struct sdhci_pltfm_data sdhci_tegra114_pdata = {
+ 
+ static const struct sdhci_tegra_soc_data soc_data_tegra114 = {
+ 	.pdata = &sdhci_tegra114_pdata,
++};
++
++static const struct sdhci_tegra_soc_data soc_data_tegra124 = {
++	.pdata = &sdhci_tegra114_pdata,
+ 	.nvquirks = NVQUIRK_ENABLE_SDR50 |
+ 		    NVQUIRK_ENABLE_DDR50 |
+ 		    NVQUIRK_ENABLE_SDR104,
+@@ -357,7 +367,7 @@ static const struct sdhci_tegra_soc_data soc_data_tegra210 = {
+ 
+ static const struct of_device_id sdhci_tegra_dt_match[] = {
+ 	{ .compatible = "nvidia,tegra210-sdhci", .data = &soc_data_tegra210 },
+-	{ .compatible = "nvidia,tegra124-sdhci", .data = &soc_data_tegra114 },
++	{ .compatible = "nvidia,tegra124-sdhci", .data = &soc_data_tegra124 },
+ 	{ .compatible = "nvidia,tegra114-sdhci", .data = &soc_data_tegra114 },
+ 	{ .compatible = "nvidia,tegra30-sdhci", .data = &soc_data_tegra30 },
+ 	{ .compatible = "nvidia,tegra20-sdhci", .data = &soc_data_tegra20 },
+diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
+index add9fdfd1d8f..8059d7248fff 100644
+--- a/drivers/mmc/host/sdhci.c
++++ b/drivers/mmc/host/sdhci.c
+@@ -465,8 +465,6 @@ static void sdhci_adma_mark_end(void *desc)
+ static int sdhci_adma_table_pre(struct sdhci_host *host,
+ 	struct mmc_data *data)
+ {
+-	int direction;
+-
+ 	void *desc;
+ 	void *align;
+ 	dma_addr_t addr;
+@@ -483,20 +481,9 @@ static int sdhci_adma_table_pre(struct sdhci_host *host,
+ 	 * We currently guess that it is LE.
+ 	 */
+ 
+-	if (data->flags & MMC_DATA_READ)
+-		direction = DMA_FROM_DEVICE;
+-	else
+-		direction = DMA_TO_DEVICE;
+-
+-	host->align_addr = dma_map_single(mmc_dev(host->mmc),
+-		host->align_buffer, host->align_buffer_sz, direction);
+-	if (dma_mapping_error(mmc_dev(host->mmc), host->align_addr))
+-		goto fail;
+-	BUG_ON(host->align_addr & SDHCI_ADMA2_MASK);
+-
+ 	host->sg_count = sdhci_pre_dma_transfer(host, data);
+ 	if (host->sg_count < 0)
+-		goto unmap_align;
++		return -EINVAL;
+ 
+ 	desc = host->adma_table;
+ 	align = host->align_buffer;
+@@ -570,22 +557,7 @@ static int sdhci_adma_table_pre(struct sdhci_host *host,
+ 		/* nop, end, valid */
+ 		sdhci_adma_write_desc(host, desc, 0, 0, ADMA2_NOP_END_VALID);
+ 	}
+-
+-	/*
+-	 * Resync align buffer as we might have changed it.
+-	 */
+-	if (data->flags & MMC_DATA_WRITE) {
+-		dma_sync_single_for_device(mmc_dev(host->mmc),
+-			host->align_addr, host->align_buffer_sz, direction);
+-	}
+-
+ 	return 0;
+-
+-unmap_align:
+-	dma_unmap_single(mmc_dev(host->mmc), host->align_addr,
+-		host->align_buffer_sz, direction);
+-fail:
+-	return -EINVAL;
+ }
+ 
+ static void sdhci_adma_table_post(struct sdhci_host *host,
+@@ -605,9 +577,6 @@ static void sdhci_adma_table_post(struct sdhci_host *host,
+ 	else
+ 		direction = DMA_TO_DEVICE;
+ 
+-	dma_unmap_single(mmc_dev(host->mmc), host->align_addr,
+-		host->align_buffer_sz, direction);
+-
+ 	/* Do a quick scan of the SG list for any unaligned mappings */
+ 	has_unaligned = false;
+ 	for_each_sg(data->sg, sg, host->sg_count, i)
+@@ -666,9 +635,20 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd)
+ 	if (!data)
+ 		target_timeout = cmd->busy_timeout * 1000;
+ 	else {
+-		target_timeout = data->timeout_ns / 1000;
+-		if (host->clock)
+-			target_timeout += data->timeout_clks / host->clock;
++		target_timeout = DIV_ROUND_UP(data->timeout_ns, 1000);
++		if (host->clock && data->timeout_clks) {
++			unsigned long long val;
++
++			/*
++			 * data->timeout_clks is in units of clock cycles.
++			 * host->clock is in Hz.  target_timeout is in us.
++			 * Hence, us = 1000000 * cycles / Hz.  Round up.
++			 */
++			val = 1000000 * data->timeout_clks;
++			if (do_div(val, host->clock))
++				target_timeout++;
++			target_timeout += val;
++		}
+ 	}
+ 
+ 	/*
+@@ -1003,6 +983,9 @@ void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
+ 
+ 	WARN_ON(host->cmd);
+ 
++	/* Initially, a command has no error */
++	cmd->error = 0;
++
+ 	/* Wait max 10 ms */
+ 	timeout = 10;
+ 
+@@ -1097,8 +1080,6 @@ static void sdhci_finish_command(struct sdhci_host *host)
+ 		}
+ 	}
+ 
+-	host->cmd->error = 0;
+-
+ 	/* Finished CMD23, now send actual command. */
+ 	if (host->cmd == host->mrq->sbc) {
+ 		host->cmd = NULL;
+@@ -2114,14 +2095,13 @@ static void sdhci_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
+ 	struct sdhci_host *host = mmc_priv(mmc);
+ 	struct mmc_data *data = mrq->data;
+ 
+-	if (host->flags & SDHCI_REQ_USE_DMA) {
+-		if (data->host_cookie == COOKIE_GIVEN ||
+-				data->host_cookie == COOKIE_MAPPED)
+-			dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
+-					 data->flags & MMC_DATA_WRITE ?
+-					 DMA_TO_DEVICE : DMA_FROM_DEVICE);
+-		data->host_cookie = COOKIE_UNMAPPED;
+-	}
++	if (data->host_cookie == COOKIE_GIVEN ||
++	    data->host_cookie == COOKIE_MAPPED)
++		dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
++			     data->flags & MMC_DATA_WRITE ?
++			       DMA_TO_DEVICE : DMA_FROM_DEVICE);
++
++	data->host_cookie = COOKIE_UNMAPPED;
+ }
+ 
+ static int sdhci_pre_dma_transfer(struct sdhci_host *host,
+@@ -2238,6 +2218,22 @@ static void sdhci_tasklet_finish(unsigned long param)
+ 	mrq = host->mrq;
+ 
+ 	/*
++	 * Always unmap the data buffers if they were mapped by
++	 * sdhci_prepare_data() whenever we finish with a request.
++	 * This avoids leaking DMA mappings on error.
++	 */
++	if (host->flags & SDHCI_REQ_USE_DMA) {
++		struct mmc_data *data = mrq->data;
++
++		if (data && data->host_cookie == COOKIE_MAPPED) {
++			dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
++				     (data->flags & MMC_DATA_READ) ?
++				     DMA_FROM_DEVICE : DMA_TO_DEVICE);
++			data->host_cookie = COOKIE_UNMAPPED;
++		}
++	}
++
++	/*
+ 	 * The controller needs a reset of internal state machines
+ 	 * upon error conditions.
+ 	 */
+@@ -2322,13 +2318,30 @@ static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *mask)
+ 		return;
+ 	}
+ 
+-	if (intmask & SDHCI_INT_TIMEOUT)
+-		host->cmd->error = -ETIMEDOUT;
+-	else if (intmask & (SDHCI_INT_CRC | SDHCI_INT_END_BIT |
+-			SDHCI_INT_INDEX))
+-		host->cmd->error = -EILSEQ;
++	if (intmask & (SDHCI_INT_TIMEOUT | SDHCI_INT_CRC |
++		       SDHCI_INT_END_BIT | SDHCI_INT_INDEX)) {
++		if (intmask & SDHCI_INT_TIMEOUT)
++			host->cmd->error = -ETIMEDOUT;
++		else
++			host->cmd->error = -EILSEQ;
++
++		/*
++		 * If this command initiates a data phase and a response
++		 * CRC error is signalled, the card can start transferring
++		 * data - the card may have received the command without
++		 * error.  We must not terminate the mmc_request early.
++		 *
++		 * If the card did not receive the command or returned an
++		 * error which prevented it sending data, the data phase
++		 * will time out.
++		 */
++		if (host->cmd->data &&
++		    (intmask & (SDHCI_INT_CRC | SDHCI_INT_TIMEOUT)) ==
++		     SDHCI_INT_CRC) {
++			host->cmd = NULL;
++			return;
++		}
+ 
+-	if (host->cmd->error) {
+ 		tasklet_schedule(&host->finish_tasklet);
+ 		return;
+ 	}
+@@ -2967,14 +2980,21 @@ int sdhci_add_host(struct sdhci_host *host)
+ 						      &host->adma_addr,
+ 						      GFP_KERNEL);
+ 		host->align_buffer_sz = SDHCI_MAX_SEGS * SDHCI_ADMA2_ALIGN;
+-		host->align_buffer = kmalloc(host->align_buffer_sz, GFP_KERNEL);
++		host->align_buffer = dma_alloc_coherent(mmc_dev(mmc),
++							host->align_buffer_sz,
++							&host->align_addr,
++							GFP_KERNEL);
+ 		if (!host->adma_table || !host->align_buffer) {
+ 			if (host->adma_table)
+ 				dma_free_coherent(mmc_dev(mmc),
+ 						  host->adma_table_sz,
+ 						  host->adma_table,
+ 						  host->adma_addr);
+-			kfree(host->align_buffer);
++			if (host->align_buffer)
++				dma_free_coherent(mmc_dev(mmc),
++						  host->align_buffer_sz,
++						  host->align_buffer,
++						  host->align_addr);
+ 			pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n",
+ 				mmc_hostname(mmc));
+ 			host->flags &= ~SDHCI_USE_ADMA;
+@@ -2986,10 +3006,14 @@ int sdhci_add_host(struct sdhci_host *host)
+ 			host->flags &= ~SDHCI_USE_ADMA;
+ 			dma_free_coherent(mmc_dev(mmc), host->adma_table_sz,
+ 					  host->adma_table, host->adma_addr);
+-			kfree(host->align_buffer);
++			dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz,
++					  host->align_buffer, host->align_addr);
+ 			host->adma_table = NULL;
+ 			host->align_buffer = NULL;
+ 		}
++
++		/* dma_alloc_coherent returns page aligned and sized buffers */
++		BUG_ON(host->align_addr & SDHCI_ADMA2_MASK);
+ 	}
+ 
+ 	/*
+@@ -3072,14 +3096,14 @@ int sdhci_add_host(struct sdhci_host *host)
+ 		if (caps[0] & SDHCI_TIMEOUT_CLK_UNIT)
+ 			host->timeout_clk *= 1000;
+ 
++		if (override_timeout_clk)
++			host->timeout_clk = override_timeout_clk;
++
+ 		mmc->max_busy_timeout = host->ops->get_max_timeout_count ?
+ 			host->ops->get_max_timeout_count(host) : 1 << 27;
+ 		mmc->max_busy_timeout /= host->timeout_clk;
+ 	}
+ 
+-	if (override_timeout_clk)
+-		host->timeout_clk = override_timeout_clk;
+-
+ 	mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_ERASE | MMC_CAP_CMD23;
+ 	mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
+ 
+@@ -3452,7 +3476,9 @@ void sdhci_remove_host(struct sdhci_host *host, int dead)
+ 	if (host->adma_table)
+ 		dma_free_coherent(mmc_dev(mmc), host->adma_table_sz,
+ 				  host->adma_table, host->adma_addr);
+-	kfree(host->align_buffer);
++	if (host->align_buffer)
++		dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz,
++				  host->align_buffer, host->align_addr);
+ 
+ 	host->adma_table = NULL;
+ 	host->align_buffer = NULL;
+diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c
+index 43b3392ffee7..652d01832873 100644
+--- a/drivers/mtd/onenand/onenand_base.c
++++ b/drivers/mtd/onenand/onenand_base.c
+@@ -2599,6 +2599,7 @@ static int onenand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
+  */
+ static int onenand_block_markbad(struct mtd_info *mtd, loff_t ofs)
+ {
++	struct onenand_chip *this = mtd->priv;
+ 	int ret;
+ 
+ 	ret = onenand_block_isbad(mtd, ofs);
+@@ -2610,7 +2611,7 @@ static int onenand_block_markbad(struct mtd_info *mtd, loff_t ofs)
+ 	}
+ 
+ 	onenand_get_device(mtd, FL_WRITING);
+-	ret = mtd_block_markbad(mtd, ofs);
++	ret = this->block_markbad(mtd, ofs);
+ 	onenand_release_device(mtd);
+ 	return ret;
+ }
+diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
+index b0ae69f84493..acb1c5b2bad3 100644
+--- a/drivers/net/ethernet/marvell/mvneta.c
++++ b/drivers/net/ethernet/marvell/mvneta.c
+@@ -3720,7 +3720,7 @@ static int mvneta_probe(struct platform_device *pdev)
+ 	dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
+ 	dev->hw_features |= dev->features;
+ 	dev->vlan_features |= dev->features;
+-	dev->priv_flags |= IFF_UNICAST_FLT;
++	dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE;
+ 	dev->gso_max_segs = MVNETA_MAX_TSO_SEGS;
+ 
+ 	err = register_netdev(dev);
+diff --git a/drivers/net/irda/irtty-sir.c b/drivers/net/irda/irtty-sir.c
+index 696852eb23c3..7a3f990c1935 100644
+--- a/drivers/net/irda/irtty-sir.c
++++ b/drivers/net/irda/irtty-sir.c
+@@ -430,16 +430,6 @@ static int irtty_open(struct tty_struct *tty)
+ 
+ 	/* Module stuff handled via irda_ldisc.owner - Jean II */
+ 
+-	/* First make sure we're not already connected. */
+-	if (tty->disc_data != NULL) {
+-		priv = tty->disc_data;
+-		if (priv && priv->magic == IRTTY_MAGIC) {
+-			ret = -EEXIST;
+-			goto out;
+-		}
+-		tty->disc_data = NULL;		/* ### */
+-	}
+-
+ 	/* stop the underlying  driver */
+ 	irtty_stop_receiver(tty, TRUE);
+ 	if (tty->ops->stop)
+diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c
+index 01f08a7751f7..e7034c55e796 100644
+--- a/drivers/net/rionet.c
++++ b/drivers/net/rionet.c
+@@ -280,7 +280,7 @@ static void rionet_outb_msg_event(struct rio_mport *mport, void *dev_id, int mbo
+ 	struct net_device *ndev = dev_id;
+ 	struct rionet_private *rnet = netdev_priv(ndev);
+ 
+-	spin_lock(&rnet->lock);
++	spin_lock(&rnet->tx_lock);
+ 
+ 	if (netif_msg_intr(rnet))
+ 		printk(KERN_INFO
+@@ -299,7 +299,7 @@ static void rionet_outb_msg_event(struct rio_mport *mport, void *dev_id, int mbo
+ 	if (rnet->tx_cnt < RIONET_TX_RING_SIZE)
+ 		netif_wake_queue(ndev);
+ 
+-	spin_unlock(&rnet->lock);
++	spin_unlock(&rnet->tx_lock);
+ }
+ 
+ static int rionet_open(struct net_device *ndev)
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c
+index 2ca783fa50cf..7e269f9aa607 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c
+@@ -32,7 +32,7 @@
+ #define BRCMF_FLOWRING_LOW		(BRCMF_FLOWRING_HIGH - 256)
+ #define BRCMF_FLOWRING_INVALID_IFIDX	0xff
+ 
+-#define BRCMF_FLOWRING_HASH_AP(da, fifo, ifidx) (da[5] + fifo + ifidx * 16)
++#define BRCMF_FLOWRING_HASH_AP(da, fifo, ifidx) (da[5] * 2 + fifo + ifidx * 16)
+ #define BRCMF_FLOWRING_HASH_STA(fifo, ifidx) (fifo + ifidx * 16)
+ 
+ static const u8 brcmf_flowring_prio2fifo[] = {
+@@ -68,7 +68,7 @@ u32 brcmf_flowring_lookup(struct brcmf_flowring *flow, u8 da[ETH_ALEN],
+ 			  u8 prio, u8 ifidx)
+ {
+ 	struct brcmf_flowring_hash *hash;
+-	u8 hash_idx;
++	u16 hash_idx;
+ 	u32 i;
+ 	bool found;
+ 	bool sta;
+@@ -88,6 +88,7 @@ u32 brcmf_flowring_lookup(struct brcmf_flowring *flow, u8 da[ETH_ALEN],
+ 	}
+ 	hash_idx =  sta ? BRCMF_FLOWRING_HASH_STA(fifo, ifidx) :
+ 			  BRCMF_FLOWRING_HASH_AP(mac, fifo, ifidx);
++	hash_idx &= (BRCMF_FLOWRING_HASHSIZE - 1);
+ 	found = false;
+ 	hash = flow->hash;
+ 	for (i = 0; i < BRCMF_FLOWRING_HASHSIZE; i++) {
+@@ -98,6 +99,7 @@ u32 brcmf_flowring_lookup(struct brcmf_flowring *flow, u8 da[ETH_ALEN],
+ 			break;
+ 		}
+ 		hash_idx++;
++		hash_idx &= (BRCMF_FLOWRING_HASHSIZE - 1);
+ 	}
+ 	if (found)
+ 		return hash[hash_idx].flowid;
+@@ -111,7 +113,7 @@ u32 brcmf_flowring_create(struct brcmf_flowring *flow, u8 da[ETH_ALEN],
+ {
+ 	struct brcmf_flowring_ring *ring;
+ 	struct brcmf_flowring_hash *hash;
+-	u8 hash_idx;
++	u16 hash_idx;
+ 	u32 i;
+ 	bool found;
+ 	u8 fifo;
+@@ -131,6 +133,7 @@ u32 brcmf_flowring_create(struct brcmf_flowring *flow, u8 da[ETH_ALEN],
+ 	}
+ 	hash_idx =  sta ? BRCMF_FLOWRING_HASH_STA(fifo, ifidx) :
+ 			  BRCMF_FLOWRING_HASH_AP(mac, fifo, ifidx);
++	hash_idx &= (BRCMF_FLOWRING_HASHSIZE - 1);
+ 	found = false;
+ 	hash = flow->hash;
+ 	for (i = 0; i < BRCMF_FLOWRING_HASHSIZE; i++) {
+@@ -140,6 +143,7 @@ u32 brcmf_flowring_create(struct brcmf_flowring *flow, u8 da[ETH_ALEN],
+ 			break;
+ 		}
+ 		hash_idx++;
++		hash_idx &= (BRCMF_FLOWRING_HASHSIZE - 1);
+ 	}
+ 	if (found) {
+ 		for (i = 0; i < flow->nrofrings; i++) {
+@@ -169,7 +173,7 @@ u32 brcmf_flowring_create(struct brcmf_flowring *flow, u8 da[ETH_ALEN],
+ }
+ 
+ 
+-u8 brcmf_flowring_tid(struct brcmf_flowring *flow, u8 flowid)
++u8 brcmf_flowring_tid(struct brcmf_flowring *flow, u16 flowid)
+ {
+ 	struct brcmf_flowring_ring *ring;
+ 
+@@ -179,7 +183,7 @@ u8 brcmf_flowring_tid(struct brcmf_flowring *flow, u8 flowid)
+ }
+ 
+ 
+-static void brcmf_flowring_block(struct brcmf_flowring *flow, u8 flowid,
++static void brcmf_flowring_block(struct brcmf_flowring *flow, u16 flowid,
+ 				 bool blocked)
+ {
+ 	struct brcmf_flowring_ring *ring;
+@@ -228,10 +232,10 @@ static void brcmf_flowring_block(struct brcmf_flowring *flow, u8 flowid,
+ }
+ 
+ 
+-void brcmf_flowring_delete(struct brcmf_flowring *flow, u8 flowid)
++void brcmf_flowring_delete(struct brcmf_flowring *flow, u16 flowid)
+ {
+ 	struct brcmf_flowring_ring *ring;
+-	u8 hash_idx;
++	u16 hash_idx;
+ 	struct sk_buff *skb;
+ 
+ 	ring = flow->rings[flowid];
+@@ -253,7 +257,7 @@ void brcmf_flowring_delete(struct brcmf_flowring *flow, u8 flowid)
+ }
+ 
+ 
+-u32 brcmf_flowring_enqueue(struct brcmf_flowring *flow, u8 flowid,
++u32 brcmf_flowring_enqueue(struct brcmf_flowring *flow, u16 flowid,
+ 			   struct sk_buff *skb)
+ {
+ 	struct brcmf_flowring_ring *ring;
+@@ -279,7 +283,7 @@ u32 brcmf_flowring_enqueue(struct brcmf_flowring *flow, u8 flowid,
+ }
+ 
+ 
+-struct sk_buff *brcmf_flowring_dequeue(struct brcmf_flowring *flow, u8 flowid)
++struct sk_buff *brcmf_flowring_dequeue(struct brcmf_flowring *flow, u16 flowid)
+ {
+ 	struct brcmf_flowring_ring *ring;
+ 	struct sk_buff *skb;
+@@ -300,7 +304,7 @@ struct sk_buff *brcmf_flowring_dequeue(struct brcmf_flowring *flow, u8 flowid)
+ }
+ 
+ 
+-void brcmf_flowring_reinsert(struct brcmf_flowring *flow, u8 flowid,
++void brcmf_flowring_reinsert(struct brcmf_flowring *flow, u16 flowid,
+ 			     struct sk_buff *skb)
+ {
+ 	struct brcmf_flowring_ring *ring;
+@@ -311,7 +315,7 @@ void brcmf_flowring_reinsert(struct brcmf_flowring *flow, u8 flowid,
+ }
+ 
+ 
+-u32 brcmf_flowring_qlen(struct brcmf_flowring *flow, u8 flowid)
++u32 brcmf_flowring_qlen(struct brcmf_flowring *flow, u16 flowid)
+ {
+ 	struct brcmf_flowring_ring *ring;
+ 
+@@ -326,7 +330,7 @@ u32 brcmf_flowring_qlen(struct brcmf_flowring *flow, u8 flowid)
+ }
+ 
+ 
+-void brcmf_flowring_open(struct brcmf_flowring *flow, u8 flowid)
++void brcmf_flowring_open(struct brcmf_flowring *flow, u16 flowid)
+ {
+ 	struct brcmf_flowring_ring *ring;
+ 
+@@ -340,10 +344,10 @@ void brcmf_flowring_open(struct brcmf_flowring *flow, u8 flowid)
+ }
+ 
+ 
+-u8 brcmf_flowring_ifidx_get(struct brcmf_flowring *flow, u8 flowid)
++u8 brcmf_flowring_ifidx_get(struct brcmf_flowring *flow, u16 flowid)
+ {
+ 	struct brcmf_flowring_ring *ring;
+-	u8 hash_idx;
++	u16 hash_idx;
+ 
+ 	ring = flow->rings[flowid];
+ 	hash_idx = ring->hash_id;
+@@ -384,7 +388,7 @@ void brcmf_flowring_detach(struct brcmf_flowring *flow)
+ 	struct brcmf_pub *drvr = bus_if->drvr;
+ 	struct brcmf_flowring_tdls_entry *search;
+ 	struct brcmf_flowring_tdls_entry *remove;
+-	u8 flowid;
++	u16 flowid;
+ 
+ 	for (flowid = 0; flowid < flow->nrofrings; flowid++) {
+ 		if (flow->rings[flowid])
+@@ -408,7 +412,7 @@ void brcmf_flowring_configure_addr_mode(struct brcmf_flowring *flow, int ifidx,
+ 	struct brcmf_bus *bus_if = dev_get_drvdata(flow->dev);
+ 	struct brcmf_pub *drvr = bus_if->drvr;
+ 	u32 i;
+-	u8 flowid;
++	u16 flowid;
+ 
+ 	if (flow->addr_mode[ifidx] != addr_mode) {
+ 		for (i = 0; i < ARRAY_SIZE(flow->hash); i++) {
+@@ -434,7 +438,7 @@ void brcmf_flowring_delete_peer(struct brcmf_flowring *flow, int ifidx,
+ 	struct brcmf_flowring_tdls_entry *prev;
+ 	struct brcmf_flowring_tdls_entry *search;
+ 	u32 i;
+-	u8 flowid;
++	u16 flowid;
+ 	bool sta;
+ 
+ 	sta = (flow->addr_mode[ifidx] == ADDR_INDIRECT);
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.h
+index 95fd1c9675d1..068e68d94999 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.h
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.h
+@@ -16,7 +16,7 @@
+ #define BRCMFMAC_FLOWRING_H
+ 
+ 
+-#define BRCMF_FLOWRING_HASHSIZE		256
++#define BRCMF_FLOWRING_HASHSIZE		512		/* has to be 2^x */
+ #define BRCMF_FLOWRING_INVALID_ID	0xFFFFFFFF
+ 
+ 
+@@ -24,7 +24,7 @@ struct brcmf_flowring_hash {
+ 	u8 mac[ETH_ALEN];
+ 	u8 fifo;
+ 	u8 ifidx;
+-	u8 flowid;
++	u16 flowid;
+ };
+ 
+ enum ring_status {
+@@ -61,16 +61,16 @@ u32 brcmf_flowring_lookup(struct brcmf_flowring *flow, u8 da[ETH_ALEN],
+ 			  u8 prio, u8 ifidx);
+ u32 brcmf_flowring_create(struct brcmf_flowring *flow, u8 da[ETH_ALEN],
+ 			  u8 prio, u8 ifidx);
+-void brcmf_flowring_delete(struct brcmf_flowring *flow, u8 flowid);
+-void brcmf_flowring_open(struct brcmf_flowring *flow, u8 flowid);
+-u8 brcmf_flowring_tid(struct brcmf_flowring *flow, u8 flowid);
+-u32 brcmf_flowring_enqueue(struct brcmf_flowring *flow, u8 flowid,
++void brcmf_flowring_delete(struct brcmf_flowring *flow, u16 flowid);
++void brcmf_flowring_open(struct brcmf_flowring *flow, u16 flowid);
++u8 brcmf_flowring_tid(struct brcmf_flowring *flow, u16 flowid);
++u32 brcmf_flowring_enqueue(struct brcmf_flowring *flow, u16 flowid,
+ 			   struct sk_buff *skb);
+-struct sk_buff *brcmf_flowring_dequeue(struct brcmf_flowring *flow, u8 flowid);
+-void brcmf_flowring_reinsert(struct brcmf_flowring *flow, u8 flowid,
++struct sk_buff *brcmf_flowring_dequeue(struct brcmf_flowring *flow, u16 flowid);
++void brcmf_flowring_reinsert(struct brcmf_flowring *flow, u16 flowid,
+ 			     struct sk_buff *skb);
+-u32 brcmf_flowring_qlen(struct brcmf_flowring *flow, u8 flowid);
+-u8 brcmf_flowring_ifidx_get(struct brcmf_flowring *flow, u8 flowid);
++u32 brcmf_flowring_qlen(struct brcmf_flowring *flow, u16 flowid);
++u8 brcmf_flowring_ifidx_get(struct brcmf_flowring *flow, u16 flowid);
+ struct brcmf_flowring *brcmf_flowring_attach(struct device *dev, u16 nrofrings);
+ void brcmf_flowring_detach(struct brcmf_flowring *flow);
+ void brcmf_flowring_configure_addr_mode(struct brcmf_flowring *flow, int ifidx,
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
+index c2bdb91746cf..922966734a7f 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
+@@ -677,7 +677,7 @@ static u32 brcmf_msgbuf_flowring_create(struct brcmf_msgbuf *msgbuf, int ifidx,
+ }
+ 
+ 
+-static void brcmf_msgbuf_txflow(struct brcmf_msgbuf *msgbuf, u8 flowid)
++static void brcmf_msgbuf_txflow(struct brcmf_msgbuf *msgbuf, u16 flowid)
+ {
+ 	struct brcmf_flowring *flow = msgbuf->flow;
+ 	struct brcmf_commonring *commonring;
+@@ -1310,7 +1310,7 @@ int brcmf_proto_msgbuf_rx_trigger(struct device *dev)
+ }
+ 
+ 
+-void brcmf_msgbuf_delete_flowring(struct brcmf_pub *drvr, u8 flowid)
++void brcmf_msgbuf_delete_flowring(struct brcmf_pub *drvr, u16 flowid)
+ {
+ 	struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
+ 	struct msgbuf_tx_flowring_delete_req *delete;
+@@ -1415,6 +1415,13 @@ int brcmf_proto_msgbuf_attach(struct brcmf_pub *drvr)
+ 	u32 count;
+ 
+ 	if_msgbuf = drvr->bus_if->msgbuf;
++
++	if (if_msgbuf->nrof_flowrings >= BRCMF_FLOWRING_HASHSIZE) {
++		brcmf_err("driver not configured for this many flowrings %d\n",
++			  if_msgbuf->nrof_flowrings);
++		if_msgbuf->nrof_flowrings = BRCMF_FLOWRING_HASHSIZE - 1;
++	}
++
+ 	msgbuf = kzalloc(sizeof(*msgbuf), GFP_KERNEL);
+ 	if (!msgbuf)
+ 		goto fail;
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.h
+index 3d513e407e3d..ee6906a3c3f6 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.h
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.h
+@@ -33,7 +33,7 @@
+ 
+ 
+ int brcmf_proto_msgbuf_rx_trigger(struct device *dev);
+-void brcmf_msgbuf_delete_flowring(struct brcmf_pub *drvr, u8 flowid);
++void brcmf_msgbuf_delete_flowring(struct brcmf_pub *drvr, u16 flowid);
+ int brcmf_proto_msgbuf_attach(struct brcmf_pub *drvr);
+ void brcmf_proto_msgbuf_detach(struct brcmf_pub *drvr);
+ #else
+diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800usb.c b/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
+index bf9afbf46c1b..4b0bb6b4f6f1 100644
+--- a/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
++++ b/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
+@@ -1026,6 +1026,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
+ 	{ USB_DEVICE(0x0411, 0x01a2) },
+ 	{ USB_DEVICE(0x0411, 0x01ee) },
+ 	{ USB_DEVICE(0x0411, 0x01a8) },
++	{ USB_DEVICE(0x0411, 0x01fd) },
+ 	/* Corega */
+ 	{ USB_DEVICE(0x07aa, 0x002f) },
+ 	{ USB_DEVICE(0x07aa, 0x003c) },
+diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
+index 5d28e9405f32..576eb7013792 100644
+--- a/drivers/nvdimm/bus.c
++++ b/drivers/nvdimm/bus.c
+@@ -513,10 +513,10 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
+ 
+ 	/* fail write commands (when read-only) */
+ 	if (read_only)
+-		switch (ioctl_cmd) {
+-		case ND_IOCTL_VENDOR:
+-		case ND_IOCTL_SET_CONFIG_DATA:
+-		case ND_IOCTL_ARS_START:
++		switch (cmd) {
++		case ND_CMD_VENDOR:
++		case ND_CMD_SET_CONFIG_DATA:
++		case ND_CMD_ARS_START:
+ 			dev_dbg(&nvdimm_bus->dev, "'%s' command while read-only.\n",
+ 					nvdimm ? nvdimm_cmd_name(cmd)
+ 					: nvdimm_bus_cmd_name(cmd));
+diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
+index 8d0b54670184..544b802a594c 100644
+--- a/drivers/nvdimm/pmem.c
++++ b/drivers/nvdimm/pmem.c
+@@ -66,22 +66,25 @@ static int pmem_do_bvec(struct pmem_device *pmem, struct page *page,
+ 			unsigned int len, unsigned int off, int rw,
+ 			sector_t sector)
+ {
++	int rc = 0;
+ 	void *mem = kmap_atomic(page);
+ 	phys_addr_t pmem_off = sector * 512 + pmem->data_offset;
+ 	void __pmem *pmem_addr = pmem->virt_addr + pmem_off;
+ 
+ 	if (rw == READ) {
+ 		if (unlikely(is_bad_pmem(&pmem->bb, sector, len)))
+-			return -EIO;
+-		memcpy_from_pmem(mem + off, pmem_addr, len);
+-		flush_dcache_page(page);
++			rc = -EIO;
++		else {
++			memcpy_from_pmem(mem + off, pmem_addr, len);
++			flush_dcache_page(page);
++		}
+ 	} else {
+ 		flush_dcache_page(page);
+ 		memcpy_to_pmem(pmem_addr, mem + off, len);
+ 	}
+ 
+ 	kunmap_atomic(mem);
+-	return 0;
++	return rc;
+ }
+ 
+ static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio)
+diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c
+index 1a3556a9e9ea..ed01c0172e4a 100644
+--- a/drivers/of/of_reserved_mem.c
++++ b/drivers/of/of_reserved_mem.c
+@@ -32,11 +32,13 @@ int __init __weak early_init_dt_alloc_reserved_memory_arch(phys_addr_t size,
+ 	phys_addr_t align, phys_addr_t start, phys_addr_t end, bool nomap,
+ 	phys_addr_t *res_base)
+ {
++	phys_addr_t base;
+ 	/*
+ 	 * We use __memblock_alloc_base() because memblock_alloc_base()
+ 	 * panic()s on allocation failure.
+ 	 */
+-	phys_addr_t base = __memblock_alloc_base(size, align, end);
++	end = !end ? MEMBLOCK_ALLOC_ANYWHERE : end;
++	base = __memblock_alloc_base(size, align, end);
+ 	if (!base)
+ 		return -ENOMEM;
+ 
+diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
+index 6d7ab9bb0d5a..6b0056e9c33e 100644
+--- a/drivers/pci/probe.c
++++ b/drivers/pci/probe.c
+@@ -179,6 +179,9 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
+ 	u16 orig_cmd;
+ 	struct pci_bus_region region, inverted_region;
+ 
++	if (dev->non_compliant_bars)
++		return 0;
++
+ 	mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
+ 
+ 	/* No printks while decoding is disabled! */
+@@ -1171,6 +1174,7 @@ static void pci_msi_setup_pci_dev(struct pci_dev *dev)
+ int pci_setup_device(struct pci_dev *dev)
+ {
+ 	u32 class;
++	u16 cmd;
+ 	u8 hdr_type;
+ 	int pos = 0;
+ 	struct pci_bus_region region;
+@@ -1214,6 +1218,16 @@ int pci_setup_device(struct pci_dev *dev)
+ 	/* device class may be changed after fixup */
+ 	class = dev->class >> 8;
+ 
++	if (dev->non_compliant_bars) {
++		pci_read_config_word(dev, PCI_COMMAND, &cmd);
++		if (cmd & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) {
++			dev_info(&dev->dev, "device has non-compliant BARs; disabling IO/MEM decoding\n");
++			cmd &= ~PCI_COMMAND_IO;
++			cmd &= ~PCI_COMMAND_MEMORY;
++			pci_write_config_word(dev, PCI_COMMAND, cmd);
++		}
++	}
++
+ 	switch (dev->hdr_type) {		    /* header type */
+ 	case PCI_HEADER_TYPE_NORMAL:		    /* standard header */
+ 		if (class == PCI_CLASS_BRIDGE_PCI)
+diff --git a/drivers/pinctrl/bcm/pinctrl-bcm2835.c b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
+index 0f5997ceb494..08b1d93da9fe 100644
+--- a/drivers/pinctrl/bcm/pinctrl-bcm2835.c
++++ b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
+@@ -779,7 +779,7 @@ static int bcm2835_pctl_dt_node_to_map(struct pinctrl_dev *pctldev,
+ 		}
+ 		if (num_pulls) {
+ 			err = of_property_read_u32_index(np, "brcm,pull",
+-					(num_funcs > 1) ? i : 0, &pull);
++					(num_pulls > 1) ? i : 0, &pull);
+ 			if (err)
+ 				goto out;
+ 			err = bcm2835_pctl_dt_node_to_map_pull(pc, np, pin,
+diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
+index d78ee151c9e4..be3bc2f4edd4 100644
+--- a/drivers/platform/x86/ideapad-laptop.c
++++ b/drivers/platform/x86/ideapad-laptop.c
+@@ -865,6 +865,20 @@ static const struct dmi_system_id no_hw_rfkill_list[] = {
+ 		},
+ 	},
+ 	{
++		.ident = "Lenovo ideapad Y700-15ISK",
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++			DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad Y700-15ISK"),
++		},
++	},
++	{
++		.ident = "Lenovo ideapad Y700 Touch-15ISK",
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++			DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad Y700 Touch-15ISK"),
++		},
++	},
++	{
+ 		.ident = "Lenovo ideapad Y700-17ISK",
+ 		.matches = {
+ 			DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
+index d72867257346..3eff2a69fe08 100644
+--- a/drivers/scsi/NCR5380.c
++++ b/drivers/scsi/NCR5380.c
+@@ -760,7 +760,7 @@ static struct scsi_cmnd *dequeue_next_cmd(struct Scsi_Host *instance)
+ 	struct NCR5380_cmd *ncmd;
+ 	struct scsi_cmnd *cmd;
+ 
+-	if (list_empty(&hostdata->autosense)) {
++	if (hostdata->sensing || list_empty(&hostdata->autosense)) {
+ 		list_for_each_entry(ncmd, &hostdata->unissued, list) {
+ 			cmd = NCR5380_to_scmd(ncmd);
+ 			dsprintk(NDEBUG_QUEUES, instance, "dequeue: cmd=%p target=%d busy=0x%02x lun=%llu\n",
+@@ -793,7 +793,7 @@ static void requeue_cmd(struct Scsi_Host *instance, struct scsi_cmnd *cmd)
+ 	struct NCR5380_hostdata *hostdata = shost_priv(instance);
+ 	struct NCR5380_cmd *ncmd = scsi_cmd_priv(cmd);
+ 
+-	if (hostdata->sensing) {
++	if (hostdata->sensing == cmd) {
+ 		scsi_eh_restore_cmnd(cmd, &hostdata->ses);
+ 		list_add(&ncmd->list, &hostdata->autosense);
+ 		hostdata->sensing = NULL;
+@@ -815,15 +815,17 @@ static void NCR5380_main(struct work_struct *work)
+ 	struct NCR5380_hostdata *hostdata =
+ 		container_of(work, struct NCR5380_hostdata, main_task);
+ 	struct Scsi_Host *instance = hostdata->host;
+-	struct scsi_cmnd *cmd;
+ 	int done;
+ 
+ 	do {
+ 		done = 1;
+ 
+ 		spin_lock_irq(&hostdata->lock);
+-		while (!hostdata->connected &&
+-		       (cmd = dequeue_next_cmd(instance))) {
++		while (!hostdata->connected && !hostdata->selecting) {
++			struct scsi_cmnd *cmd = dequeue_next_cmd(instance);
++
++			if (!cmd)
++				break;
+ 
+ 			dsprintk(NDEBUG_MAIN, instance, "main: dequeued %p\n", cmd);
+ 
+@@ -840,8 +842,7 @@ static void NCR5380_main(struct work_struct *work)
+ 			 * entire unit.
+ 			 */
+ 
+-			cmd = NCR5380_select(instance, cmd);
+-			if (!cmd) {
++			if (!NCR5380_select(instance, cmd)) {
+ 				dsprintk(NDEBUG_MAIN, instance, "main: select complete\n");
+ 			} else {
+ 				dsprintk(NDEBUG_MAIN | NDEBUG_QUEUES, instance,
+@@ -1056,6 +1057,11 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance,
+ 		/* Reselection interrupt */
+ 		goto out;
+ 	}
++	if (!hostdata->selecting) {
++		/* Command was aborted */
++		NCR5380_write(MODE_REG, MR_BASE);
++		goto out;
++	}
+ 	if (err < 0) {
+ 		NCR5380_write(MODE_REG, MR_BASE);
+ 		shost_printk(KERN_ERR, instance,
+@@ -1759,9 +1765,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
+ 	unsigned char msgout = NOP;
+ 	int sink = 0;
+ 	int len;
+-#if defined(PSEUDO_DMA) || defined(REAL_DMA_POLL)
+ 	int transfersize;
+-#endif
+ 	unsigned char *data;
+ 	unsigned char phase, tmp, extended_msg[10], old_phase = 0xff;
+ 	struct scsi_cmnd *cmd;
+@@ -1798,6 +1802,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
+ 				do_abort(instance);
+ 				cmd->result = DID_ERROR << 16;
+ 				complete_cmd(instance, cmd);
++				hostdata->connected = NULL;
+ 				return;
+ #endif
+ 			case PHASE_DATAIN:
+@@ -1847,20 +1852,23 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
+ 						sink = 1;
+ 						do_abort(instance);
+ 						cmd->result = DID_ERROR << 16;
+-						complete_cmd(instance, cmd);
+ 						/* XXX - need to source or sink data here, as appropriate */
+ 					} else
+ 						cmd->SCp.this_residual -= transfersize - len;
+ 				} else
+ #endif				/* defined(PSEUDO_DMA) || defined(REAL_DMA_POLL) */
+ 				{
+-					spin_unlock_irq(&hostdata->lock);
+-					NCR5380_transfer_pio(instance, &phase,
+-					                     (int *)&cmd->SCp.this_residual,
++					/* Break up transfer into 3 ms chunks,
++					 * presuming 6 accesses per handshake.
++					 */
++					transfersize = min((unsigned long)cmd->SCp.this_residual,
++					                   hostdata->accesses_per_ms / 2);
++					len = transfersize;
++					NCR5380_transfer_pio(instance, &phase, &len,
+ 					                     (unsigned char **)&cmd->SCp.ptr);
+-					spin_lock_irq(&hostdata->lock);
++					cmd->SCp.this_residual -= transfersize - len;
+ 				}
+-				break;
++				return;
+ 			case PHASE_MSGIN:
+ 				len = 1;
+ 				data = &tmp;
+@@ -2292,14 +2300,17 @@ static bool list_del_cmd(struct list_head *haystack,
+  * [disconnected -> connected ->]...
+  * [autosense -> connected ->] done
+  *
+- * If cmd is unissued then just remove it.
+- * If cmd is disconnected, try to select the target.
+- * If cmd is connected, try to send an abort message.
+- * If cmd is waiting for autosense, give it a chance to complete but check
+- * that it isn't left connected.
+  * If cmd was not found at all then presumably it has already been completed,
+  * in which case return SUCCESS to try to avoid further EH measures.
++ *
+  * If the command has not completed yet, we must not fail to find it.
++ * We have no option but to forget the aborted command (even if it still
++ * lacks sense data). The mid-layer may re-issue a command that is in error
++ * recovery (see scsi_send_eh_cmnd), but the logic and data structures in
++ * this driver are such that a command can appear on one queue only.
++ *
++ * The lock protects driver data structures, but EH handlers also use it
++ * to serialize their own execution and prevent their own re-entry.
+  */
+ 
+ static int NCR5380_abort(struct scsi_cmnd *cmd)
+@@ -2322,6 +2333,7 @@ static int NCR5380_abort(struct scsi_cmnd *cmd)
+ 		         "abort: removed %p from issue queue\n", cmd);
+ 		cmd->result = DID_ABORT << 16;
+ 		cmd->scsi_done(cmd); /* No tag or busy flag to worry about */
++		goto out;
+ 	}
+ 
+ 	if (hostdata->selecting == cmd) {
+@@ -2336,58 +2348,21 @@ static int NCR5380_abort(struct scsi_cmnd *cmd)
+ 	if (list_del_cmd(&hostdata->disconnected, cmd)) {
+ 		dsprintk(NDEBUG_ABORT, instance,
+ 		         "abort: removed %p from disconnected list\n", cmd);
+-		cmd->result = DID_ERROR << 16;
+-		if (!hostdata->connected)
+-			NCR5380_select(instance, cmd);
+-		if (hostdata->connected != cmd) {
+-			complete_cmd(instance, cmd);
+-			result = FAILED;
+-			goto out;
+-		}
++		/* Can't call NCR5380_select() and send ABORT because that
++		 * means releasing the lock. Need a bus reset.
++		 */
++		set_host_byte(cmd, DID_ERROR);
++		complete_cmd(instance, cmd);
++		result = FAILED;
++		goto out;
+ 	}
+ 
+ 	if (hostdata->connected == cmd) {
+ 		dsprintk(NDEBUG_ABORT, instance, "abort: cmd %p is connected\n", cmd);
+ 		hostdata->connected = NULL;
+-		if (do_abort(instance)) {
+-			set_host_byte(cmd, DID_ERROR);
+-			complete_cmd(instance, cmd);
+-			result = FAILED;
+-			goto out;
+-		}
+-		set_host_byte(cmd, DID_ABORT);
+ #ifdef REAL_DMA
+ 		hostdata->dma_len = 0;
+ #endif
+-		if (cmd->cmnd[0] == REQUEST_SENSE)
+-			complete_cmd(instance, cmd);
+-		else {
+-			struct NCR5380_cmd *ncmd = scsi_cmd_priv(cmd);
+-
+-			/* Perform autosense for this command */
+-			list_add(&ncmd->list, &hostdata->autosense);
+-		}
+-	}
+-
+-	if (list_find_cmd(&hostdata->autosense, cmd)) {
+-		dsprintk(NDEBUG_ABORT, instance,
+-		         "abort: found %p on sense queue\n", cmd);
+-		spin_unlock_irqrestore(&hostdata->lock, flags);
+-		queue_work(hostdata->work_q, &hostdata->main_task);
+-		msleep(1000);
+-		spin_lock_irqsave(&hostdata->lock, flags);
+-		if (list_del_cmd(&hostdata->autosense, cmd)) {
+-			dsprintk(NDEBUG_ABORT, instance,
+-			         "abort: removed %p from sense queue\n", cmd);
+-			set_host_byte(cmd, DID_ABORT);
+-			complete_cmd(instance, cmd);
+-			goto out;
+-		}
+-	}
+-
+-	if (hostdata->connected == cmd) {
+-		dsprintk(NDEBUG_ABORT, instance, "abort: cmd %p is connected\n", cmd);
+-		hostdata->connected = NULL;
+ 		if (do_abort(instance)) {
+ 			set_host_byte(cmd, DID_ERROR);
+ 			complete_cmd(instance, cmd);
+@@ -2395,9 +2370,14 @@ static int NCR5380_abort(struct scsi_cmnd *cmd)
+ 			goto out;
+ 		}
+ 		set_host_byte(cmd, DID_ABORT);
+-#ifdef REAL_DMA
+-		hostdata->dma_len = 0;
+-#endif
++		complete_cmd(instance, cmd);
++		goto out;
++	}
++
++	if (list_del_cmd(&hostdata->autosense, cmd)) {
++		dsprintk(NDEBUG_ABORT, instance,
++		         "abort: removed %p from sense queue\n", cmd);
++		set_host_byte(cmd, DID_ERROR);
+ 		complete_cmd(instance, cmd);
+ 	}
+ 
+@@ -2450,7 +2430,16 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd)
+ 	 * commands!
+ 	 */
+ 
+-	hostdata->selecting = NULL;
++	if (list_del_cmd(&hostdata->unissued, cmd)) {
++		cmd->result = DID_RESET << 16;
++		cmd->scsi_done(cmd);
++	}
++
++	if (hostdata->selecting) {
++		hostdata->selecting->result = DID_RESET << 16;
++		complete_cmd(instance, hostdata->selecting);
++		hostdata->selecting = NULL;
++	}
+ 
+ 	list_for_each_entry(ncmd, &hostdata->disconnected, list) {
+ 		struct scsi_cmnd *cmd = NCR5380_to_scmd(ncmd);
+@@ -2458,6 +2447,7 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd)
+ 		set_host_byte(cmd, DID_RESET);
+ 		cmd->scsi_done(cmd);
+ 	}
++	INIT_LIST_HEAD(&hostdata->disconnected);
+ 
+ 	list_for_each_entry(ncmd, &hostdata->autosense, list) {
+ 		struct scsi_cmnd *cmd = NCR5380_to_scmd(ncmd);
+@@ -2465,6 +2455,7 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd)
+ 		set_host_byte(cmd, DID_RESET);
+ 		cmd->scsi_done(cmd);
+ 	}
++	INIT_LIST_HEAD(&hostdata->autosense);
+ 
+ 	if (hostdata->connected) {
+ 		set_host_byte(hostdata->connected, DID_RESET);
+@@ -2472,12 +2463,6 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd)
+ 		hostdata->connected = NULL;
+ 	}
+ 
+-	if (hostdata->sensing) {
+-		set_host_byte(hostdata->connected, DID_RESET);
+-		complete_cmd(instance, hostdata->sensing);
+-		hostdata->sensing = NULL;
+-	}
+-
+ 	for (i = 0; i < 8; ++i)
+ 		hostdata->busy[i] = 0;
+ #ifdef REAL_DMA
+diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
+index 074878b55a0b..d044f3f273be 100644
+--- a/drivers/scsi/aacraid/aacraid.h
++++ b/drivers/scsi/aacraid/aacraid.h
+@@ -944,6 +944,7 @@ struct fib {
+ 	 */
+ 	struct list_head	fiblink;
+ 	void			*data;
++	u32			vector_no;
+ 	struct hw_fib		*hw_fib_va;		/* Actual shared object */
+ 	dma_addr_t		hw_fib_pa;		/* physical address of hw_fib*/
+ };
+@@ -2113,6 +2114,7 @@ static inline unsigned int cap_to_cyls(sector_t capacity, unsigned divisor)
+ int aac_acquire_irq(struct aac_dev *dev);
+ void aac_free_irq(struct aac_dev *dev);
+ const char *aac_driverinfo(struct Scsi_Host *);
++void aac_fib_vector_assign(struct aac_dev *dev);
+ struct fib *aac_fib_alloc(struct aac_dev *dev);
+ int aac_fib_setup(struct aac_dev *dev);
+ void aac_fib_map_free(struct aac_dev *dev);
+diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
+index a1f90fe849c9..4cbf54928640 100644
+--- a/drivers/scsi/aacraid/commsup.c
++++ b/drivers/scsi/aacraid/commsup.c
+@@ -83,13 +83,38 @@ static int fib_map_alloc(struct aac_dev *dev)
+ 
+ void aac_fib_map_free(struct aac_dev *dev)
+ {
+-	pci_free_consistent(dev->pdev,
+-	  dev->max_fib_size * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB),
+-	  dev->hw_fib_va, dev->hw_fib_pa);
++	if (dev->hw_fib_va && dev->max_fib_size) {
++		pci_free_consistent(dev->pdev,
++		(dev->max_fib_size *
++		(dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB)),
++		dev->hw_fib_va, dev->hw_fib_pa);
++	}
+ 	dev->hw_fib_va = NULL;
+ 	dev->hw_fib_pa = 0;
+ }
+ 
++void aac_fib_vector_assign(struct aac_dev *dev)
++{
++	u32 i = 0;
++	u32 vector = 1;
++	struct fib *fibptr = NULL;
++
++	for (i = 0, fibptr = &dev->fibs[i];
++		i < (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB);
++		i++, fibptr++) {
++		if ((dev->max_msix == 1) ||
++		  (i > ((dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB - 1)
++			- dev->vector_cap))) {
++			fibptr->vector_no = 0;
++		} else {
++			fibptr->vector_no = vector;
++			vector++;
++			if (vector == dev->max_msix)
++				vector = 1;
++		}
++	}
++}
++
+ /**
+  *	aac_fib_setup	-	setup the fibs
+  *	@dev: Adapter to set up
+@@ -151,6 +176,12 @@ int aac_fib_setup(struct aac_dev * dev)
+ 		hw_fib_pa = hw_fib_pa +
+ 			dev->max_fib_size + sizeof(struct aac_fib_xporthdr);
+ 	}
++
++	/*
++	 *Assign vector numbers to fibs
++	 */
++	aac_fib_vector_assign(dev);
++
+ 	/*
+ 	 *	Add the fib chain to the free list
+ 	 */
+diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
+index 76eaa38ffd6e..8a8e84548d64 100644
+--- a/drivers/scsi/aacraid/linit.c
++++ b/drivers/scsi/aacraid/linit.c
+@@ -1404,8 +1404,18 @@ static int aac_acquire_resources(struct aac_dev *dev)
+ 
+ 	aac_adapter_enable_int(dev);
+ 
+-	if (!dev->sync_mode)
++	/*max msix may change  after EEH
++	 * Re-assign vectors to fibs
++	 */
++	aac_fib_vector_assign(dev);
++
++	if (!dev->sync_mode) {
++		/* After EEH recovery or suspend resume, max_msix count
++		 * may change, therfore updating in init as well.
++		 */
+ 		aac_adapter_start(dev);
++		dev->init->Sa_MSIXVectors = cpu_to_le32(dev->max_msix);
++	}
+ 	return 0;
+ 
+ error_iounmap:
+diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c
+index 2aa34ea8ceb1..bc0203f3d243 100644
+--- a/drivers/scsi/aacraid/src.c
++++ b/drivers/scsi/aacraid/src.c
+@@ -156,8 +156,8 @@ static irqreturn_t aac_src_intr_message(int irq, void *dev_id)
+ 				break;
+ 			if (dev->msi_enabled && dev->max_msix > 1)
+ 				atomic_dec(&dev->rrq_outstanding[vector_no]);
+-			aac_intr_normal(dev, handle-1, 0, isFastResponse, NULL);
+ 			dev->host_rrq[index++] = 0;
++			aac_intr_normal(dev, handle-1, 0, isFastResponse, NULL);
+ 			if (index == (vector_no + 1) * dev->vector_cap)
+ 				index = vector_no * dev->vector_cap;
+ 			dev->host_rrq_idx[vector_no] = index;
+@@ -452,36 +452,20 @@ static int aac_src_deliver_message(struct fib *fib)
+ #endif
+ 
+ 	u16 hdr_size = le16_to_cpu(fib->hw_fib_va->header.Size);
++	u16 vector_no;
+ 
+ 	atomic_inc(&q->numpending);
+ 
+ 	if (dev->msi_enabled && fib->hw_fib_va->header.Command != AifRequest &&
+ 	    dev->max_msix > 1) {
+-		u_int16_t vector_no, first_choice = 0xffff;
+-
+-		vector_no = dev->fibs_pushed_no % dev->max_msix;
+-		do {
+-			vector_no += 1;
+-			if (vector_no == dev->max_msix)
+-				vector_no = 1;
+-			if (atomic_read(&dev->rrq_outstanding[vector_no]) <
+-			    dev->vector_cap)
+-				break;
+-			if (0xffff == first_choice)
+-				first_choice = vector_no;
+-			else if (vector_no == first_choice)
+-				break;
+-		} while (1);
+-		if (vector_no == first_choice)
+-			vector_no = 0;
+-		atomic_inc(&dev->rrq_outstanding[vector_no]);
+-		if (dev->fibs_pushed_no == 0xffffffff)
+-			dev->fibs_pushed_no = 0;
+-		else
+-			dev->fibs_pushed_no++;
++		vector_no = fib->vector_no;
+ 		fib->hw_fib_va->header.Handle += (vector_no << 16);
++	} else {
++		vector_no = 0;
+ 	}
+ 
++	atomic_inc(&dev->rrq_outstanding[vector_no]);
++
+ 	if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE2) {
+ 		/* Calculate the amount to the fibsize bits */
+ 		fibsize = (hdr_size + 127) / 128 - 1;
+diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.c b/drivers/scsi/aic7xxx/aic7xxx_osm.c
+index b846a4683562..fc6a83188c1e 100644
+--- a/drivers/scsi/aic7xxx/aic7xxx_osm.c
++++ b/drivers/scsi/aic7xxx/aic7xxx_osm.c
+@@ -1336,6 +1336,7 @@ ahc_platform_set_tags(struct ahc_softc *ahc, struct scsi_device *sdev,
+ 	case AHC_DEV_Q_TAGGED:
+ 		scsi_change_queue_depth(sdev,
+ 				dev->openings + dev->active);
++		break;
+ 	default:
+ 		/*
+ 		 * We allow the OS to queue 2 untagged transactions to
+diff --git a/drivers/scsi/atari_NCR5380.c b/drivers/scsi/atari_NCR5380.c
+index e65478651ca9..389825ba5d96 100644
+--- a/drivers/scsi/atari_NCR5380.c
++++ b/drivers/scsi/atari_NCR5380.c
+@@ -862,7 +862,7 @@ static struct scsi_cmnd *dequeue_next_cmd(struct Scsi_Host *instance)
+ 	struct NCR5380_cmd *ncmd;
+ 	struct scsi_cmnd *cmd;
+ 
+-	if (list_empty(&hostdata->autosense)) {
++	if (hostdata->sensing || list_empty(&hostdata->autosense)) {
+ 		list_for_each_entry(ncmd, &hostdata->unissued, list) {
+ 			cmd = NCR5380_to_scmd(ncmd);
+ 			dsprintk(NDEBUG_QUEUES, instance, "dequeue: cmd=%p target=%d busy=0x%02x lun=%llu\n",
+@@ -901,7 +901,7 @@ static void requeue_cmd(struct Scsi_Host *instance, struct scsi_cmnd *cmd)
+ 	struct NCR5380_hostdata *hostdata = shost_priv(instance);
+ 	struct NCR5380_cmd *ncmd = scsi_cmd_priv(cmd);
+ 
+-	if (hostdata->sensing) {
++	if (hostdata->sensing == cmd) {
+ 		scsi_eh_restore_cmnd(cmd, &hostdata->ses);
+ 		list_add(&ncmd->list, &hostdata->autosense);
+ 		hostdata->sensing = NULL;
+@@ -923,7 +923,6 @@ static void NCR5380_main(struct work_struct *work)
+ 	struct NCR5380_hostdata *hostdata =
+ 		container_of(work, struct NCR5380_hostdata, main_task);
+ 	struct Scsi_Host *instance = hostdata->host;
+-	struct scsi_cmnd *cmd;
+ 	int done;
+ 
+ 	/*
+@@ -936,8 +935,11 @@ static void NCR5380_main(struct work_struct *work)
+ 		done = 1;
+ 
+ 		spin_lock_irq(&hostdata->lock);
+-		while (!hostdata->connected &&
+-		       (cmd = dequeue_next_cmd(instance))) {
++		while (!hostdata->connected && !hostdata->selecting) {
++			struct scsi_cmnd *cmd = dequeue_next_cmd(instance);
++
++			if (!cmd)
++				break;
+ 
+ 			dsprintk(NDEBUG_MAIN, instance, "main: dequeued %p\n", cmd);
+ 
+@@ -960,8 +962,7 @@ static void NCR5380_main(struct work_struct *work)
+ #ifdef SUPPORT_TAGS
+ 			cmd_get_tag(cmd, cmd->cmnd[0] != REQUEST_SENSE);
+ #endif
+-			cmd = NCR5380_select(instance, cmd);
+-			if (!cmd) {
++			if (!NCR5380_select(instance, cmd)) {
+ 				dsprintk(NDEBUG_MAIN, instance, "main: select complete\n");
+ 				maybe_release_dma_irq(instance);
+ 			} else {
+@@ -1257,6 +1258,11 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance,
+ 		/* Reselection interrupt */
+ 		goto out;
+ 	}
++	if (!hostdata->selecting) {
++		/* Command was aborted */
++		NCR5380_write(MODE_REG, MR_BASE);
++		goto out;
++	}
+ 	if (err < 0) {
+ 		NCR5380_write(MODE_REG, MR_BASE);
+ 		shost_printk(KERN_ERR, instance,
+@@ -1838,9 +1844,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
+ 	unsigned char msgout = NOP;
+ 	int sink = 0;
+ 	int len;
+-#if defined(REAL_DMA)
+ 	int transfersize;
+-#endif
+ 	unsigned char *data;
+ 	unsigned char phase, tmp, extended_msg[10], old_phase = 0xff;
+ 	struct scsi_cmnd *cmd;
+@@ -1909,6 +1913,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
+ 				do_abort(instance);
+ 				cmd->result = DID_ERROR << 16;
+ 				complete_cmd(instance, cmd);
++				hostdata->connected = NULL;
+ 				return;
+ #endif
+ 			case PHASE_DATAIN:
+@@ -1966,7 +1971,6 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
+ 						sink = 1;
+ 						do_abort(instance);
+ 						cmd->result = DID_ERROR << 16;
+-						complete_cmd(instance, cmd);
+ 						/* XXX - need to source or sink data here, as appropriate */
+ 					} else {
+ #ifdef REAL_DMA
+@@ -1983,18 +1987,22 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
+ 				} else
+ #endif /* defined(REAL_DMA) */
+ 				{
+-					spin_unlock_irq(&hostdata->lock);
+-					NCR5380_transfer_pio(instance, &phase,
+-					                     (int *)&cmd->SCp.this_residual,
++					/* Break up transfer into 3 ms chunks,
++					 * presuming 6 accesses per handshake.
++					 */
++					transfersize = min((unsigned long)cmd->SCp.this_residual,
++					                   hostdata->accesses_per_ms / 2);
++					len = transfersize;
++					NCR5380_transfer_pio(instance, &phase, &len,
+ 					                     (unsigned char **)&cmd->SCp.ptr);
+-					spin_lock_irq(&hostdata->lock);
++					cmd->SCp.this_residual -= transfersize - len;
+ 				}
+ #if defined(CONFIG_SUN3) && defined(REAL_DMA)
+ 				/* if we had intended to dma that command clear it */
+ 				if (sun3_dma_setup_done == cmd)
+ 					sun3_dma_setup_done = NULL;
+ #endif
+-				break;
++				return;
+ 			case PHASE_MSGIN:
+ 				len = 1;
+ 				data = &tmp;
+@@ -2487,14 +2495,17 @@ static bool list_del_cmd(struct list_head *haystack,
+  * [disconnected -> connected ->]...
+  * [autosense -> connected ->] done
+  *
+- * If cmd is unissued then just remove it.
+- * If cmd is disconnected, try to select the target.
+- * If cmd is connected, try to send an abort message.
+- * If cmd is waiting for autosense, give it a chance to complete but check
+- * that it isn't left connected.
+  * If cmd was not found at all then presumably it has already been completed,
+  * in which case return SUCCESS to try to avoid further EH measures.
++ *
+  * If the command has not completed yet, we must not fail to find it.
++ * We have no option but to forget the aborted command (even if it still
++ * lacks sense data). The mid-layer may re-issue a command that is in error
++ * recovery (see scsi_send_eh_cmnd), but the logic and data structures in
++ * this driver are such that a command can appear on one queue only.
++ *
++ * The lock protects driver data structures, but EH handlers also use it
++ * to serialize their own execution and prevent their own re-entry.
+  */
+ 
+ static int NCR5380_abort(struct scsi_cmnd *cmd)
+@@ -2517,6 +2528,7 @@ static int NCR5380_abort(struct scsi_cmnd *cmd)
+ 		         "abort: removed %p from issue queue\n", cmd);
+ 		cmd->result = DID_ABORT << 16;
+ 		cmd->scsi_done(cmd); /* No tag or busy flag to worry about */
++		goto out;
+ 	}
+ 
+ 	if (hostdata->selecting == cmd) {
+@@ -2531,58 +2543,21 @@ static int NCR5380_abort(struct scsi_cmnd *cmd)
+ 	if (list_del_cmd(&hostdata->disconnected, cmd)) {
+ 		dsprintk(NDEBUG_ABORT, instance,
+ 		         "abort: removed %p from disconnected list\n", cmd);
+-		cmd->result = DID_ERROR << 16;
+-		if (!hostdata->connected)
+-			NCR5380_select(instance, cmd);
+-		if (hostdata->connected != cmd) {
+-			complete_cmd(instance, cmd);
+-			result = FAILED;
+-			goto out;
+-		}
++		/* Can't call NCR5380_select() and send ABORT because that
++		 * means releasing the lock. Need a bus reset.
++		 */
++		set_host_byte(cmd, DID_ERROR);
++		complete_cmd(instance, cmd);
++		result = FAILED;
++		goto out;
+ 	}
+ 
+ 	if (hostdata->connected == cmd) {
+ 		dsprintk(NDEBUG_ABORT, instance, "abort: cmd %p is connected\n", cmd);
+ 		hostdata->connected = NULL;
+-		if (do_abort(instance)) {
+-			set_host_byte(cmd, DID_ERROR);
+-			complete_cmd(instance, cmd);
+-			result = FAILED;
+-			goto out;
+-		}
+-		set_host_byte(cmd, DID_ABORT);
+ #ifdef REAL_DMA
+ 		hostdata->dma_len = 0;
+ #endif
+-		if (cmd->cmnd[0] == REQUEST_SENSE)
+-			complete_cmd(instance, cmd);
+-		else {
+-			struct NCR5380_cmd *ncmd = scsi_cmd_priv(cmd);
+-
+-			/* Perform autosense for this command */
+-			list_add(&ncmd->list, &hostdata->autosense);
+-		}
+-	}
+-
+-	if (list_find_cmd(&hostdata->autosense, cmd)) {
+-		dsprintk(NDEBUG_ABORT, instance,
+-		         "abort: found %p on sense queue\n", cmd);
+-		spin_unlock_irqrestore(&hostdata->lock, flags);
+-		queue_work(hostdata->work_q, &hostdata->main_task);
+-		msleep(1000);
+-		spin_lock_irqsave(&hostdata->lock, flags);
+-		if (list_del_cmd(&hostdata->autosense, cmd)) {
+-			dsprintk(NDEBUG_ABORT, instance,
+-			         "abort: removed %p from sense queue\n", cmd);
+-			set_host_byte(cmd, DID_ABORT);
+-			complete_cmd(instance, cmd);
+-			goto out;
+-		}
+-	}
+-
+-	if (hostdata->connected == cmd) {
+-		dsprintk(NDEBUG_ABORT, instance, "abort: cmd %p is connected\n", cmd);
+-		hostdata->connected = NULL;
+ 		if (do_abort(instance)) {
+ 			set_host_byte(cmd, DID_ERROR);
+ 			complete_cmd(instance, cmd);
+@@ -2590,9 +2565,14 @@ static int NCR5380_abort(struct scsi_cmnd *cmd)
+ 			goto out;
+ 		}
+ 		set_host_byte(cmd, DID_ABORT);
+-#ifdef REAL_DMA
+-		hostdata->dma_len = 0;
+-#endif
++		complete_cmd(instance, cmd);
++		goto out;
++	}
++
++	if (list_del_cmd(&hostdata->autosense, cmd)) {
++		dsprintk(NDEBUG_ABORT, instance,
++		         "abort: removed %p from sense queue\n", cmd);
++		set_host_byte(cmd, DID_ERROR);
+ 		complete_cmd(instance, cmd);
+ 	}
+ 
+@@ -2646,7 +2626,16 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd)
+ 	 * commands!
+ 	 */
+ 
+-	hostdata->selecting = NULL;
++	if (list_del_cmd(&hostdata->unissued, cmd)) {
++		cmd->result = DID_RESET << 16;
++		cmd->scsi_done(cmd);
++	}
++
++	if (hostdata->selecting) {
++		hostdata->selecting->result = DID_RESET << 16;
++		complete_cmd(instance, hostdata->selecting);
++		hostdata->selecting = NULL;
++	}
+ 
+ 	list_for_each_entry(ncmd, &hostdata->disconnected, list) {
+ 		struct scsi_cmnd *cmd = NCR5380_to_scmd(ncmd);
+@@ -2654,6 +2643,7 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd)
+ 		set_host_byte(cmd, DID_RESET);
+ 		cmd->scsi_done(cmd);
+ 	}
++	INIT_LIST_HEAD(&hostdata->disconnected);
+ 
+ 	list_for_each_entry(ncmd, &hostdata->autosense, list) {
+ 		struct scsi_cmnd *cmd = NCR5380_to_scmd(ncmd);
+@@ -2661,6 +2651,7 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd)
+ 		set_host_byte(cmd, DID_RESET);
+ 		cmd->scsi_done(cmd);
+ 	}
++	INIT_LIST_HEAD(&hostdata->autosense);
+ 
+ 	if (hostdata->connected) {
+ 		set_host_byte(hostdata->connected, DID_RESET);
+@@ -2668,12 +2659,6 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd)
+ 		hostdata->connected = NULL;
+ 	}
+ 
+-	if (hostdata->sensing) {
+-		set_host_byte(hostdata->connected, DID_RESET);
+-		complete_cmd(instance, hostdata->sensing);
+-		hostdata->sensing = NULL;
+-	}
+-
+ #ifdef SUPPORT_TAGS
+ 	free_all_tags(hostdata);
+ #endif
+diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
+index cb9072a841be..069e5c50abd0 100644
+--- a/drivers/scsi/be2iscsi/be_main.c
++++ b/drivers/scsi/be2iscsi/be_main.c
+@@ -4468,6 +4468,7 @@ put_shost:
+ 	scsi_host_put(phba->shost);
+ free_kset:
+ 	iscsi_boot_destroy_kset(phba->boot_kset);
++	phba->boot_kset = NULL;
+ 	return -ENOMEM;
+ }
+ 
+diff --git a/drivers/scsi/scsi_common.c b/drivers/scsi/scsi_common.c
+index c126966130ab..ce79de822e46 100644
+--- a/drivers/scsi/scsi_common.c
++++ b/drivers/scsi/scsi_common.c
+@@ -278,8 +278,16 @@ int scsi_set_sense_information(u8 *buf, int buf_len, u64 info)
+ 		ucp[3] = 0;
+ 		put_unaligned_be64(info, &ucp[4]);
+ 	} else if ((buf[0] & 0x7f) == 0x70) {
+-		buf[0] |= 0x80;
+-		put_unaligned_be64(info, &buf[3]);
++		/*
++		 * Only set the 'VALID' bit if we can represent the value
++		 * correctly; otherwise just fill out the lower bytes and
++		 * clear the 'VALID' flag.
++		 */
++		if (info <= 0xffffffffUL)
++			buf[0] |= 0x80;
++		else
++			buf[0] &= 0x7f;
++		put_unaligned_be32((u32)info, &buf[3]);
+ 	}
+ 
+ 	return 0;
+diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
+index d749da765df1..5a5457ac9cdb 100644
+--- a/drivers/scsi/sd.c
++++ b/drivers/scsi/sd.c
+@@ -648,7 +648,7 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode)
+ 	 */
+ 	if (sdkp->lbprz) {
+ 		q->limits.discard_alignment = 0;
+-		q->limits.discard_granularity = 1;
++		q->limits.discard_granularity = logical_block_size;
+ 	} else {
+ 		q->limits.discard_alignment = sdkp->unmap_alignment *
+ 			logical_block_size;
+diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
+index 5e820674432c..ae7d9bdf409c 100644
+--- a/drivers/scsi/sg.c
++++ b/drivers/scsi/sg.c
+@@ -652,7 +652,8 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
+ 	else
+ 		hp->dxfer_direction = (mxsize > 0) ? SG_DXFER_FROM_DEV : SG_DXFER_NONE;
+ 	hp->dxfer_len = mxsize;
+-	if (hp->dxfer_direction == SG_DXFER_TO_DEV)
++	if ((hp->dxfer_direction == SG_DXFER_TO_DEV) ||
++	    (hp->dxfer_direction == SG_DXFER_TO_FROM_DEV))
+ 		hp->dxferp = (char __user *)buf + cmd_size;
+ 	else
+ 		hp->dxferp = NULL;
+diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
+index 292c04eec9ad..3ddcabb790a8 100644
+--- a/drivers/scsi/storvsc_drv.c
++++ b/drivers/scsi/storvsc_drv.c
+@@ -914,8 +914,9 @@ static void storvsc_handle_error(struct vmscsi_request *vm_srb,
+ 		do_work = true;
+ 		process_err_fn = storvsc_remove_lun;
+ 		break;
+-	case (SRB_STATUS_ABORTED | SRB_STATUS_AUTOSENSE_VALID):
+-		if ((asc == 0x2a) && (ascq == 0x9)) {
++	case SRB_STATUS_ABORTED:
++		if (vm_srb->srb_status & SRB_STATUS_AUTOSENSE_VALID &&
++		    (asc == 0x2a) && (ascq == 0x9)) {
+ 			do_work = true;
+ 			process_err_fn = storvsc_device_scan;
+ 			/*
+diff --git a/drivers/staging/android/ion/ion_test.c b/drivers/staging/android/ion/ion_test.c
+index b8dcf5a26cc4..58d46893e5ff 100644
+--- a/drivers/staging/android/ion/ion_test.c
++++ b/drivers/staging/android/ion/ion_test.c
+@@ -285,8 +285,8 @@ static int __init ion_test_init(void)
+ {
+ 	ion_test_pdev = platform_device_register_simple("ion-test",
+ 							-1, NULL, 0);
+-	if (!ion_test_pdev)
+-		return -ENODEV;
++	if (IS_ERR(ion_test_pdev))
++		return PTR_ERR(ion_test_pdev);
+ 
+ 	return platform_driver_probe(&ion_test_platform_driver, ion_test_probe);
+ }
+diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c
+index 5e8130a7d670..0e9f77924e26 100644
+--- a/drivers/staging/comedi/drivers/ni_mio_common.c
++++ b/drivers/staging/comedi/drivers/ni_mio_common.c
+@@ -246,24 +246,24 @@ static void ni_writel(struct comedi_device *dev, uint32_t data, int reg)
+ {
+ 	if (dev->mmio)
+ 		writel(data, dev->mmio + reg);
+-
+-	outl(data, dev->iobase + reg);
++	else
++		outl(data, dev->iobase + reg);
+ }
+ 
+ static void ni_writew(struct comedi_device *dev, uint16_t data, int reg)
+ {
+ 	if (dev->mmio)
+ 		writew(data, dev->mmio + reg);
+-
+-	outw(data, dev->iobase + reg);
++	else
++		outw(data, dev->iobase + reg);
+ }
+ 
+ static void ni_writeb(struct comedi_device *dev, uint8_t data, int reg)
+ {
+ 	if (dev->mmio)
+ 		writeb(data, dev->mmio + reg);
+-
+-	outb(data, dev->iobase + reg);
++	else
++		outb(data, dev->iobase + reg);
+ }
+ 
+ static uint32_t ni_readl(struct comedi_device *dev, int reg)
+diff --git a/drivers/staging/comedi/drivers/ni_tiocmd.c b/drivers/staging/comedi/drivers/ni_tiocmd.c
+index 437f723bb34d..823e47910004 100644
+--- a/drivers/staging/comedi/drivers/ni_tiocmd.c
++++ b/drivers/staging/comedi/drivers/ni_tiocmd.c
+@@ -92,7 +92,7 @@ static int ni_tio_input_inttrig(struct comedi_device *dev,
+ 	unsigned long flags;
+ 	int ret = 0;
+ 
+-	if (trig_num != cmd->start_src)
++	if (trig_num != cmd->start_arg)
+ 		return -EINVAL;
+ 
+ 	spin_lock_irqsave(&counter->lock, flags);
+diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
+index 867bc6d0a68a..43d8b42c0f22 100644
+--- a/drivers/target/target_core_transport.c
++++ b/drivers/target/target_core_transport.c
+@@ -2596,8 +2596,6 @@ void target_wait_for_sess_cmds(struct se_session *se_sess)
+ 
+ 	list_for_each_entry_safe(se_cmd, tmp_cmd,
+ 				&se_sess->sess_wait_list, se_cmd_list) {
+-		list_del_init(&se_cmd->se_cmd_list);
+-
+ 		pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:"
+ 			" %d\n", se_cmd, se_cmd->t_state,
+ 			se_cmd->se_tfo->get_cmd_state(se_cmd));
+diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
+index a0a8fd1235e2..d4b54653ecf8 100644
+--- a/drivers/thermal/thermal_core.c
++++ b/drivers/thermal/thermal_core.c
+@@ -454,6 +454,10 @@ static void handle_thermal_trip(struct thermal_zone_device *tz, int trip)
+ {
+ 	enum thermal_trip_type type;
+ 
++	/* Ignore disabled trip points */
++	if (test_bit(trip, &tz->trips_disabled))
++		return;
++
+ 	tz->ops->get_trip_type(tz, trip, &type);
+ 
+ 	if (type == THERMAL_TRIP_CRITICAL || type == THERMAL_TRIP_HOT)
+@@ -1800,6 +1804,7 @@ struct thermal_zone_device *thermal_zone_device_register(const char *type,
+ {
+ 	struct thermal_zone_device *tz;
+ 	enum thermal_trip_type trip_type;
++	int trip_temp;
+ 	int result;
+ 	int count;
+ 	int passive = 0;
+@@ -1871,9 +1876,15 @@ struct thermal_zone_device *thermal_zone_device_register(const char *type,
+ 		goto unregister;
+ 
+ 	for (count = 0; count < trips; count++) {
+-		tz->ops->get_trip_type(tz, count, &trip_type);
++		if (tz->ops->get_trip_type(tz, count, &trip_type))
++			set_bit(count, &tz->trips_disabled);
+ 		if (trip_type == THERMAL_TRIP_PASSIVE)
+ 			passive = 1;
++		if (tz->ops->get_trip_temp(tz, count, &trip_temp))
++			set_bit(count, &tz->trips_disabled);
++		/* Check for bogus trip points */
++		if (trip_temp == 0)
++			set_bit(count, &tz->trips_disabled);
+ 	}
+ 
+ 	if (!passive) {
+diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
+index 8d262bce97e4..720b9465b12e 100644
+--- a/drivers/tty/serial/8250/8250_port.c
++++ b/drivers/tty/serial/8250/8250_port.c
+@@ -731,22 +731,16 @@ static int size_fifo(struct uart_8250_port *up)
+  */
+ static unsigned int autoconfig_read_divisor_id(struct uart_8250_port *p)
+ {
+-	unsigned char old_dll, old_dlm, old_lcr;
+-	unsigned int id;
++	unsigned char old_lcr;
++	unsigned int id, old_dl;
+ 
+ 	old_lcr = serial_in(p, UART_LCR);
+ 	serial_out(p, UART_LCR, UART_LCR_CONF_MODE_A);
++	old_dl = serial_dl_read(p);
++	serial_dl_write(p, 0);
++	id = serial_dl_read(p);
++	serial_dl_write(p, old_dl);
+ 
+-	old_dll = serial_in(p, UART_DLL);
+-	old_dlm = serial_in(p, UART_DLM);
+-
+-	serial_out(p, UART_DLL, 0);
+-	serial_out(p, UART_DLM, 0);
+-
+-	id = serial_in(p, UART_DLL) | serial_in(p, UART_DLM) << 8;
+-
+-	serial_out(p, UART_DLL, old_dll);
+-	serial_out(p, UART_DLM, old_dlm);
+ 	serial_out(p, UART_LCR, old_lcr);
+ 
+ 	return id;
+diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
+index fa4e23930614..d37fdcc3143c 100644
+--- a/drivers/usb/class/cdc-acm.c
++++ b/drivers/usb/class/cdc-acm.c
+@@ -1114,6 +1114,9 @@ static int acm_probe(struct usb_interface *intf,
+ 	if (quirks == NO_UNION_NORMAL) {
+ 		data_interface = usb_ifnum_to_if(usb_dev, 1);
+ 		control_interface = usb_ifnum_to_if(usb_dev, 0);
++		/* we would crash */
++		if (!data_interface || !control_interface)
++			return -ENODEV;
+ 		goto skip_normal_probe;
+ 	}
+ 
+diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
+index 56593a9a8726..2057d91d8336 100644
+--- a/drivers/usb/core/driver.c
++++ b/drivers/usb/core/driver.c
+@@ -502,11 +502,15 @@ static int usb_unbind_interface(struct device *dev)
+ int usb_driver_claim_interface(struct usb_driver *driver,
+ 				struct usb_interface *iface, void *priv)
+ {
+-	struct device *dev = &iface->dev;
++	struct device *dev;
+ 	struct usb_device *udev;
+ 	int retval = 0;
+ 	int lpm_disable_error;
+ 
++	if (!iface)
++		return -ENODEV;
++
++	dev = &iface->dev;
+ 	if (dev->driver)
+ 		return -EBUSY;
+ 
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index 51b436918f78..84f65743f29a 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -4292,7 +4292,7 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
+ {
+ 	struct usb_device	*hdev = hub->hdev;
+ 	struct usb_hcd		*hcd = bus_to_hcd(hdev->bus);
+-	int			i, j, retval;
++	int			retries, operations, retval, i;
+ 	unsigned		delay = HUB_SHORT_RESET_TIME;
+ 	enum usb_device_speed	oldspeed = udev->speed;
+ 	const char		*speed;
+@@ -4394,7 +4394,7 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
+ 	 * first 8 bytes of the device descriptor to get the ep0 maxpacket
+ 	 * value.
+ 	 */
+-	for (i = 0; i < GET_DESCRIPTOR_TRIES; (++i, msleep(100))) {
++	for (retries = 0; retries < GET_DESCRIPTOR_TRIES; (++retries, msleep(100))) {
+ 		bool did_new_scheme = false;
+ 
+ 		if (use_new_scheme(udev, retry_counter)) {
+@@ -4421,7 +4421,7 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
+ 			 * 255 is for WUSB devices, we actually need to use
+ 			 * 512 (WUSB1.0[4.8.1]).
+ 			 */
+-			for (j = 0; j < 3; ++j) {
++			for (operations = 0; operations < 3; ++operations) {
+ 				buf->bMaxPacketSize0 = 0;
+ 				r = usb_control_msg(udev, usb_rcvaddr0pipe(),
+ 					USB_REQ_GET_DESCRIPTOR, USB_DIR_IN,
+@@ -4441,7 +4441,13 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
+ 						r = -EPROTO;
+ 					break;
+ 				}
+-				if (r == 0)
++				/*
++				 * Some devices time out if they are powered on
++				 * when already connected. They need a second
++				 * reset. But only on the first attempt,
++				 * lest we get into a time out/reset loop
++				 */
++				if (r == 0  || (r == -ETIMEDOUT && retries == 0))
+ 					break;
+ 			}
+ 			udev->descriptor.bMaxPacketSize0 =
+@@ -4473,7 +4479,7 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
+ 		 * authorization will assign the final address.
+ 		 */
+ 		if (udev->wusb == 0) {
+-			for (j = 0; j < SET_ADDRESS_TRIES; ++j) {
++			for (operations = 0; operations < SET_ADDRESS_TRIES; ++operations) {
+ 				retval = hub_set_address(udev, devnum);
+ 				if (retval >= 0)
+ 					break;
+diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c
+index c6bfd13f6c92..1950e87b4219 100644
+--- a/drivers/usb/misc/iowarrior.c
++++ b/drivers/usb/misc/iowarrior.c
+@@ -787,6 +787,12 @@ static int iowarrior_probe(struct usb_interface *interface,
+ 	iface_desc = interface->cur_altsetting;
+ 	dev->product_id = le16_to_cpu(udev->descriptor.idProduct);
+ 
++	if (iface_desc->desc.bNumEndpoints < 1) {
++		dev_err(&interface->dev, "Invalid number of endpoints\n");
++		retval = -EINVAL;
++		goto error;
++	}
++
+ 	/* set up the endpoint information */
+ 	for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
+ 		endpoint = &iface_desc->endpoint[i].desc;
+diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
+index 73a366de5102..9bc0e090b881 100644
+--- a/drivers/usb/serial/cp210x.c
++++ b/drivers/usb/serial/cp210x.c
+@@ -165,6 +165,7 @@ static const struct usb_device_id id_table[] = {
+ 	{ USB_DEVICE(0x18EF, 0xE025) }, /* ELV Marble Sound Board 1 */
+ 	{ USB_DEVICE(0x1901, 0x0190) }, /* GE B850 CP2105 Recorder interface */
+ 	{ USB_DEVICE(0x1901, 0x0193) }, /* GE B650 CP2104 PMC interface */
++	{ USB_DEVICE(0x1901, 0x0194) },	/* GE Healthcare Remote Alarm Box */
+ 	{ USB_DEVICE(0x19CF, 0x3000) }, /* Parrot NMEA GPS Flight Recorder */
+ 	{ USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */
+ 	{ USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */
+diff --git a/drivers/usb/serial/cypress_m8.c b/drivers/usb/serial/cypress_m8.c
+index 01bf53392819..244acb1299a9 100644
+--- a/drivers/usb/serial/cypress_m8.c
++++ b/drivers/usb/serial/cypress_m8.c
+@@ -447,6 +447,11 @@ static int cypress_generic_port_probe(struct usb_serial_port *port)
+ 	struct usb_serial *serial = port->serial;
+ 	struct cypress_private *priv;
+ 
++	if (!port->interrupt_out_urb || !port->interrupt_in_urb) {
++		dev_err(&port->dev, "required endpoint is missing\n");
++		return -ENODEV;
++	}
++
+ 	priv = kzalloc(sizeof(struct cypress_private), GFP_KERNEL);
+ 	if (!priv)
+ 		return -ENOMEM;
+@@ -606,12 +611,6 @@ static int cypress_open(struct tty_struct *tty, struct usb_serial_port *port)
+ 		cypress_set_termios(tty, port, &priv->tmp_termios);
+ 
+ 	/* setup the port and start reading from the device */
+-	if (!port->interrupt_in_urb) {
+-		dev_err(&port->dev, "%s - interrupt_in_urb is empty!\n",
+-			__func__);
+-		return -1;
+-	}
+-
+ 	usb_fill_int_urb(port->interrupt_in_urb, serial->dev,
+ 		usb_rcvintpipe(serial->dev, port->interrupt_in_endpointAddress),
+ 		port->interrupt_in_urb->transfer_buffer,
+diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c
+index 12b0e67473ba..3df7b7ec178e 100644
+--- a/drivers/usb/serial/digi_acceleport.c
++++ b/drivers/usb/serial/digi_acceleport.c
+@@ -1251,8 +1251,27 @@ static int digi_port_init(struct usb_serial_port *port, unsigned port_num)
+ 
+ static int digi_startup(struct usb_serial *serial)
+ {
++	struct device *dev = &serial->interface->dev;
+ 	struct digi_serial *serial_priv;
+ 	int ret;
++	int i;
++
++	/* check whether the device has the expected number of endpoints */
++	if (serial->num_port_pointers < serial->type->num_ports + 1) {
++		dev_err(dev, "OOB endpoints missing\n");
++		return -ENODEV;
++	}
++
++	for (i = 0; i < serial->type->num_ports + 1 ; i++) {
++		if (!serial->port[i]->read_urb) {
++			dev_err(dev, "bulk-in endpoint missing\n");
++			return -ENODEV;
++		}
++		if (!serial->port[i]->write_urb) {
++			dev_err(dev, "bulk-out endpoint missing\n");
++			return -ENODEV;
++		}
++	}
+ 
+ 	serial_priv = kzalloc(sizeof(*serial_priv), GFP_KERNEL);
+ 	if (!serial_priv)
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index 8c660ae401d8..b61f12160d37 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -1004,6 +1004,10 @@ static const struct usb_device_id id_table_combined[] = {
+ 	{ USB_DEVICE(FTDI_VID, CHETCO_SEASMART_DISPLAY_PID) },
+ 	{ USB_DEVICE(FTDI_VID, CHETCO_SEASMART_LITE_PID) },
+ 	{ USB_DEVICE(FTDI_VID, CHETCO_SEASMART_ANALOG_PID) },
++	/* ICP DAS I-756xU devices */
++	{ USB_DEVICE(ICPDAS_VID, ICPDAS_I7560U_PID) },
++	{ USB_DEVICE(ICPDAS_VID, ICPDAS_I7561U_PID) },
++	{ USB_DEVICE(ICPDAS_VID, ICPDAS_I7563U_PID) },
+ 	{ }					/* Terminating entry */
+ };
+ 
+diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
+index a84df2513994..c5d6c1e73e8e 100644
+--- a/drivers/usb/serial/ftdi_sio_ids.h
++++ b/drivers/usb/serial/ftdi_sio_ids.h
+@@ -872,6 +872,14 @@
+ #define NOVITUS_BONO_E_PID		0x6010
+ 
+ /*
++ * ICPDAS I-756*U devices
++ */
++#define ICPDAS_VID			0x1b5c
++#define ICPDAS_I7560U_PID		0x0103
++#define ICPDAS_I7561U_PID		0x0104
++#define ICPDAS_I7563U_PID		0x0105
++
++/*
+  * RT Systems programming cables for various ham radios
+  */
+ #define RTSYSTEMS_VID		0x2100	/* Vendor ID */
+diff --git a/drivers/usb/serial/mct_u232.c b/drivers/usb/serial/mct_u232.c
+index fd707d6a10e2..89726f702202 100644
+--- a/drivers/usb/serial/mct_u232.c
++++ b/drivers/usb/serial/mct_u232.c
+@@ -376,14 +376,21 @@ static void mct_u232_msr_to_state(struct usb_serial_port *port,
+ 
+ static int mct_u232_port_probe(struct usb_serial_port *port)
+ {
++	struct usb_serial *serial = port->serial;
+ 	struct mct_u232_private *priv;
+ 
++	/* check first to simplify error handling */
++	if (!serial->port[1] || !serial->port[1]->interrupt_in_urb) {
++		dev_err(&port->dev, "expected endpoint missing\n");
++		return -ENODEV;
++	}
++
+ 	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ 	if (!priv)
+ 		return -ENOMEM;
+ 
+ 	/* Use second interrupt-in endpoint for reading. */
+-	priv->read_urb = port->serial->port[1]->interrupt_in_urb;
++	priv->read_urb = serial->port[1]->interrupt_in_urb;
+ 	priv->read_urb->context = port;
+ 
+ 	spin_lock_init(&priv->lock);
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 348e19834b83..c6f497f16526 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -1818,6 +1818,8 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d02, 0xff, 0x00, 0x00) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x02, 0x01) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x00, 0x00) },
++	{ USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e19, 0xff),			/* D-Link DWM-221 B1 */
++	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
+ 	{ USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) },                /* OLICARD300 - MT6225 */
+diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
+index 9ff9404f99d7..c90a7e46cc7b 100644
+--- a/drivers/usb/storage/uas.c
++++ b/drivers/usb/storage/uas.c
+@@ -812,7 +812,7 @@ static struct scsi_host_template uas_host_template = {
+ 	.slave_configure = uas_slave_configure,
+ 	.eh_abort_handler = uas_eh_abort_handler,
+ 	.eh_bus_reset_handler = uas_eh_bus_reset_handler,
+-	.can_queue = 65536,	/* Is there a limit on the _host_ ? */
++	.can_queue = MAX_CMNDS,
+ 	.this_id = -1,
+ 	.sg_tablesize = SG_NONE,
+ 	.skip_settle_delay = 1,
+diff --git a/drivers/watchdog/rc32434_wdt.c b/drivers/watchdog/rc32434_wdt.c
+index 71e78ef4b736..3a75f3b53452 100644
+--- a/drivers/watchdog/rc32434_wdt.c
++++ b/drivers/watchdog/rc32434_wdt.c
+@@ -237,7 +237,7 @@ static long rc32434_wdt_ioctl(struct file *file, unsigned int cmd,
+ 			return -EINVAL;
+ 		/* Fall through */
+ 	case WDIOC_GETTIMEOUT:
+-		return copy_to_user(argp, &timeout, sizeof(int));
++		return copy_to_user(argp, &timeout, sizeof(int)) ? -EFAULT : 0;
+ 	default:
+ 		return -ENOTTY;
+ 	}
+diff --git a/fs/coredump.c b/fs/coredump.c
+index 9ea87e9fdccf..47c32c3bfa1d 100644
+--- a/fs/coredump.c
++++ b/fs/coredump.c
+@@ -32,6 +32,9 @@
+ #include <linux/pipe_fs_i.h>
+ #include <linux/oom.h>
+ #include <linux/compat.h>
++#include <linux/sched.h>
++#include <linux/fs.h>
++#include <linux/path.h>
+ #include <linux/timekeeping.h>
+ 
+ #include <asm/uaccess.h>
+@@ -649,6 +652,8 @@ void do_coredump(const siginfo_t *siginfo)
+ 		}
+ 	} else {
+ 		struct inode *inode;
++		int open_flags = O_CREAT | O_RDWR | O_NOFOLLOW |
++				 O_LARGEFILE | O_EXCL;
+ 
+ 		if (cprm.limit < binfmt->min_coredump)
+ 			goto fail_unlock;
+@@ -687,10 +692,27 @@ void do_coredump(const siginfo_t *siginfo)
+ 		 * what matters is that at least one of the two processes
+ 		 * writes its coredump successfully, not which one.
+ 		 */
+-		cprm.file = filp_open(cn.corename,
+-				 O_CREAT | 2 | O_NOFOLLOW |
+-				 O_LARGEFILE | O_EXCL,
+-				 0600);
++		if (need_suid_safe) {
++			/*
++			 * Using user namespaces, normal user tasks can change
++			 * their current->fs->root to point to arbitrary
++			 * directories. Since the intention of the "only dump
++			 * with a fully qualified path" rule is to control where
++			 * coredumps may be placed using root privileges,
++			 * current->fs->root must not be used. Instead, use the
++			 * root directory of init_task.
++			 */
++			struct path root;
++
++			task_lock(&init_task);
++			get_fs_root(init_task.fs, &root);
++			task_unlock(&init_task);
++			cprm.file = file_open_root(root.dentry, root.mnt,
++				cn.corename, open_flags, 0600);
++			path_put(&root);
++		} else {
++			cprm.file = filp_open(cn.corename, open_flags, 0600);
++		}
+ 		if (IS_ERR(cprm.file))
+ 			goto fail_unlock;
+ 
+diff --git a/fs/fhandle.c b/fs/fhandle.c
+index d59712dfa3e7..ca3c3dd01789 100644
+--- a/fs/fhandle.c
++++ b/fs/fhandle.c
+@@ -228,7 +228,7 @@ long do_handle_open(int mountdirfd,
+ 		path_put(&path);
+ 		return fd;
+ 	}
+-	file = file_open_root(path.dentry, path.mnt, "", open_flag);
++	file = file_open_root(path.dentry, path.mnt, "", open_flag, 0);
+ 	if (IS_ERR(file)) {
+ 		put_unused_fd(fd);
+ 		retval =  PTR_ERR(file);
+diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
+index 5c46ed9f3e14..fee81e8768c9 100644
+--- a/fs/fs-writeback.c
++++ b/fs/fs-writeback.c
+@@ -281,13 +281,15 @@ locked_inode_to_wb_and_lock_list(struct inode *inode)
+ 		wb_get(wb);
+ 		spin_unlock(&inode->i_lock);
+ 		spin_lock(&wb->list_lock);
+-		wb_put(wb);		/* not gonna deref it anymore */
+ 
+ 		/* i_wb may have changed inbetween, can't use inode_to_wb() */
+-		if (likely(wb == inode->i_wb))
+-			return wb;	/* @inode already has ref */
++		if (likely(wb == inode->i_wb)) {
++			wb_put(wb);	/* @inode already has ref */
++			return wb;
++		}
+ 
+ 		spin_unlock(&wb->list_lock);
++		wb_put(wb);
+ 		cpu_relax();
+ 		spin_lock(&inode->i_lock);
+ 	}
+@@ -1337,10 +1339,10 @@ __writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
+  * we go e.g. from filesystem. Flusher thread uses __writeback_single_inode()
+  * and does more profound writeback list handling in writeback_sb_inodes().
+  */
+-static int
+-writeback_single_inode(struct inode *inode, struct bdi_writeback *wb,
+-		       struct writeback_control *wbc)
++static int writeback_single_inode(struct inode *inode,
++				  struct writeback_control *wbc)
+ {
++	struct bdi_writeback *wb;
+ 	int ret = 0;
+ 
+ 	spin_lock(&inode->i_lock);
+@@ -1378,7 +1380,8 @@ writeback_single_inode(struct inode *inode, struct bdi_writeback *wb,
+ 	ret = __writeback_single_inode(inode, wbc);
+ 
+ 	wbc_detach_inode(wbc);
+-	spin_lock(&wb->list_lock);
++
++	wb = inode_to_wb_and_lock_list(inode);
+ 	spin_lock(&inode->i_lock);
+ 	/*
+ 	 * If inode is clean, remove it from writeback lists. Otherwise don't
+@@ -1453,6 +1456,7 @@ static long writeback_sb_inodes(struct super_block *sb,
+ 
+ 	while (!list_empty(&wb->b_io)) {
+ 		struct inode *inode = wb_inode(wb->b_io.prev);
++		struct bdi_writeback *tmp_wb;
+ 
+ 		if (inode->i_sb != sb) {
+ 			if (work->sb) {
+@@ -1543,15 +1547,23 @@ static long writeback_sb_inodes(struct super_block *sb,
+ 			cond_resched();
+ 		}
+ 
+-
+-		spin_lock(&wb->list_lock);
++		/*
++		 * Requeue @inode if still dirty.  Be careful as @inode may
++		 * have been switched to another wb in the meantime.
++		 */
++		tmp_wb = inode_to_wb_and_lock_list(inode);
+ 		spin_lock(&inode->i_lock);
+ 		if (!(inode->i_state & I_DIRTY_ALL))
+ 			wrote++;
+-		requeue_inode(inode, wb, &wbc);
++		requeue_inode(inode, tmp_wb, &wbc);
+ 		inode_sync_complete(inode);
+ 		spin_unlock(&inode->i_lock);
+ 
++		if (unlikely(tmp_wb != wb)) {
++			spin_unlock(&tmp_wb->list_lock);
++			spin_lock(&wb->list_lock);
++		}
++
+ 		/*
+ 		 * bail out to wb_writeback() often enough to check
+ 		 * background threshold and other termination conditions.
+@@ -2338,7 +2350,6 @@ EXPORT_SYMBOL(sync_inodes_sb);
+  */
+ int write_inode_now(struct inode *inode, int sync)
+ {
+-	struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
+ 	struct writeback_control wbc = {
+ 		.nr_to_write = LONG_MAX,
+ 		.sync_mode = sync ? WB_SYNC_ALL : WB_SYNC_NONE,
+@@ -2350,7 +2361,7 @@ int write_inode_now(struct inode *inode, int sync)
+ 		wbc.nr_to_write = 0;
+ 
+ 	might_sleep();
+-	return writeback_single_inode(inode, wb, &wbc);
++	return writeback_single_inode(inode, &wbc);
+ }
+ EXPORT_SYMBOL(write_inode_now);
+ 
+@@ -2367,7 +2378,7 @@ EXPORT_SYMBOL(write_inode_now);
+  */
+ int sync_inode(struct inode *inode, struct writeback_control *wbc)
+ {
+-	return writeback_single_inode(inode, &inode_to_bdi(inode)->wb, wbc);
++	return writeback_single_inode(inode, wbc);
+ }
+ EXPORT_SYMBOL(sync_inode);
+ 
+diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
+index 8e3ee1936c7e..c5b6b7165489 100644
+--- a/fs/fuse/cuse.c
++++ b/fs/fuse/cuse.c
+@@ -90,7 +90,7 @@ static struct list_head *cuse_conntbl_head(dev_t devt)
+ 
+ static ssize_t cuse_read_iter(struct kiocb *kiocb, struct iov_iter *to)
+ {
+-	struct fuse_io_priv io = { .async = 0, .file = kiocb->ki_filp };
++	struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(kiocb->ki_filp);
+ 	loff_t pos = 0;
+ 
+ 	return fuse_direct_io(&io, to, &pos, FUSE_DIO_CUSE);
+@@ -98,7 +98,7 @@ static ssize_t cuse_read_iter(struct kiocb *kiocb, struct iov_iter *to)
+ 
+ static ssize_t cuse_write_iter(struct kiocb *kiocb, struct iov_iter *from)
+ {
+-	struct fuse_io_priv io = { .async = 0, .file = kiocb->ki_filp };
++	struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(kiocb->ki_filp);
+ 	loff_t pos = 0;
+ 	/*
+ 	 * No locking or generic_write_checks(), the server is
+diff --git a/fs/fuse/file.c b/fs/fuse/file.c
+index b03d253ece15..416108b42412 100644
+--- a/fs/fuse/file.c
++++ b/fs/fuse/file.c
+@@ -528,6 +528,11 @@ static void fuse_release_user_pages(struct fuse_req *req, int write)
+ 	}
+ }
+ 
++static void fuse_io_release(struct kref *kref)
++{
++	kfree(container_of(kref, struct fuse_io_priv, refcnt));
++}
++
+ static ssize_t fuse_get_res_by_io(struct fuse_io_priv *io)
+ {
+ 	if (io->err)
+@@ -585,8 +590,9 @@ static void fuse_aio_complete(struct fuse_io_priv *io, int err, ssize_t pos)
+ 		}
+ 
+ 		io->iocb->ki_complete(io->iocb, res, 0);
+-		kfree(io);
+ 	}
++
++	kref_put(&io->refcnt, fuse_io_release);
+ }
+ 
+ static void fuse_aio_complete_req(struct fuse_conn *fc, struct fuse_req *req)
+@@ -613,6 +619,7 @@ static size_t fuse_async_req_send(struct fuse_conn *fc, struct fuse_req *req,
+ 		size_t num_bytes, struct fuse_io_priv *io)
+ {
+ 	spin_lock(&io->lock);
++	kref_get(&io->refcnt);
+ 	io->size += num_bytes;
+ 	io->reqs++;
+ 	spin_unlock(&io->lock);
+@@ -691,7 +698,7 @@ static void fuse_short_read(struct fuse_req *req, struct inode *inode,
+ 
+ static int fuse_do_readpage(struct file *file, struct page *page)
+ {
+-	struct fuse_io_priv io = { .async = 0, .file = file };
++	struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(file);
+ 	struct inode *inode = page->mapping->host;
+ 	struct fuse_conn *fc = get_fuse_conn(inode);
+ 	struct fuse_req *req;
+@@ -984,7 +991,7 @@ static size_t fuse_send_write_pages(struct fuse_req *req, struct file *file,
+ 	size_t res;
+ 	unsigned offset;
+ 	unsigned i;
+-	struct fuse_io_priv io = { .async = 0, .file = file };
++	struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(file);
+ 
+ 	for (i = 0; i < req->num_pages; i++)
+ 		fuse_wait_on_page_writeback(inode, req->pages[i]->index);
+@@ -1398,7 +1405,7 @@ static ssize_t __fuse_direct_read(struct fuse_io_priv *io,
+ 
+ static ssize_t fuse_direct_read_iter(struct kiocb *iocb, struct iov_iter *to)
+ {
+-	struct fuse_io_priv io = { .async = 0, .file = iocb->ki_filp };
++	struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(iocb->ki_filp);
+ 	return __fuse_direct_read(&io, to, &iocb->ki_pos);
+ }
+ 
+@@ -1406,7 +1413,7 @@ static ssize_t fuse_direct_write_iter(struct kiocb *iocb, struct iov_iter *from)
+ {
+ 	struct file *file = iocb->ki_filp;
+ 	struct inode *inode = file_inode(file);
+-	struct fuse_io_priv io = { .async = 0, .file = file };
++	struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(file);
+ 	ssize_t res;
+ 
+ 	if (is_bad_inode(inode))
+@@ -2843,6 +2850,7 @@ fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t offset)
+ 	loff_t i_size;
+ 	size_t count = iov_iter_count(iter);
+ 	struct fuse_io_priv *io;
++	bool is_sync = is_sync_kiocb(iocb);
+ 
+ 	pos = offset;
+ 	inode = file->f_mapping->host;
+@@ -2863,6 +2871,7 @@ fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t offset)
+ 	if (!io)
+ 		return -ENOMEM;
+ 	spin_lock_init(&io->lock);
++	kref_init(&io->refcnt);
+ 	io->reqs = 1;
+ 	io->bytes = -1;
+ 	io->size = 0;
+@@ -2882,12 +2891,18 @@ fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t offset)
+ 	 * to wait on real async I/O requests, so we must submit this request
+ 	 * synchronously.
+ 	 */
+-	if (!is_sync_kiocb(iocb) && (offset + count > i_size) &&
++	if (!is_sync && (offset + count > i_size) &&
+ 	    iov_iter_rw(iter) == WRITE)
+ 		io->async = false;
+ 
+-	if (io->async && is_sync_kiocb(iocb))
++	if (io->async && is_sync) {
++		/*
++		 * Additional reference to keep io around after
++		 * calling fuse_aio_complete()
++		 */
++		kref_get(&io->refcnt);
+ 		io->done = &wait;
++	}
+ 
+ 	if (iov_iter_rw(iter) == WRITE) {
+ 		ret = fuse_direct_io(io, iter, &pos, FUSE_DIO_WRITE);
+@@ -2900,14 +2915,14 @@ fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t offset)
+ 		fuse_aio_complete(io, ret < 0 ? ret : 0, -1);
+ 
+ 		/* we have a non-extending, async request, so return */
+-		if (!is_sync_kiocb(iocb))
++		if (!is_sync)
+ 			return -EIOCBQUEUED;
+ 
+ 		wait_for_completion(&wait);
+ 		ret = fuse_get_res_by_io(io);
+ 	}
+ 
+-	kfree(io);
++	kref_put(&io->refcnt, fuse_io_release);
+ 
+ 	if (iov_iter_rw(iter) == WRITE) {
+ 		if (ret > 0)
+diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
+index ce394b5fe6b4..eddbe02c4028 100644
+--- a/fs/fuse/fuse_i.h
++++ b/fs/fuse/fuse_i.h
+@@ -22,6 +22,7 @@
+ #include <linux/rbtree.h>
+ #include <linux/poll.h>
+ #include <linux/workqueue.h>
++#include <linux/kref.h>
+ 
+ /** Max number of pages that can be used in a single read request */
+ #define FUSE_MAX_PAGES_PER_REQ 32
+@@ -243,6 +244,7 @@ struct fuse_args {
+ 
+ /** The request IO state (for asynchronous processing) */
+ struct fuse_io_priv {
++	struct kref refcnt;
+ 	int async;
+ 	spinlock_t lock;
+ 	unsigned reqs;
+@@ -256,6 +258,13 @@ struct fuse_io_priv {
+ 	struct completion *done;
+ };
+ 
++#define FUSE_IO_PRIV_SYNC(f) \
++{					\
++	.refcnt = { ATOMIC_INIT(1) },	\
++	.async = 0,			\
++	.file = f,			\
++}
++
+ /**
+  * Request flags
+  *
+diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
+index 81e622681c82..624a57a9c4aa 100644
+--- a/fs/jbd2/journal.c
++++ b/fs/jbd2/journal.c
+@@ -1408,11 +1408,12 @@ out:
+ /**
+  * jbd2_mark_journal_empty() - Mark on disk journal as empty.
+  * @journal: The journal to update.
++ * @write_op: With which operation should we write the journal sb
+  *
+  * Update a journal's dynamic superblock fields to show that journal is empty.
+  * Write updated superblock to disk waiting for IO to complete.
+  */
+-static void jbd2_mark_journal_empty(journal_t *journal)
++static void jbd2_mark_journal_empty(journal_t *journal, int write_op)
+ {
+ 	journal_superblock_t *sb = journal->j_superblock;
+ 
+@@ -1430,7 +1431,7 @@ static void jbd2_mark_journal_empty(journal_t *journal)
+ 	sb->s_start    = cpu_to_be32(0);
+ 	read_unlock(&journal->j_state_lock);
+ 
+-	jbd2_write_superblock(journal, WRITE_FUA);
++	jbd2_write_superblock(journal, write_op);
+ 
+ 	/* Log is no longer empty */
+ 	write_lock(&journal->j_state_lock);
+@@ -1716,7 +1717,13 @@ int jbd2_journal_destroy(journal_t *journal)
+ 	if (journal->j_sb_buffer) {
+ 		if (!is_journal_aborted(journal)) {
+ 			mutex_lock(&journal->j_checkpoint_mutex);
+-			jbd2_mark_journal_empty(journal);
++
++			write_lock(&journal->j_state_lock);
++			journal->j_tail_sequence =
++				++journal->j_transaction_sequence;
++			write_unlock(&journal->j_state_lock);
++
++			jbd2_mark_journal_empty(journal, WRITE_FLUSH_FUA);
+ 			mutex_unlock(&journal->j_checkpoint_mutex);
+ 		} else
+ 			err = -EIO;
+@@ -1975,7 +1982,7 @@ int jbd2_journal_flush(journal_t *journal)
+ 	 * the magic code for a fully-recovered superblock.  Any future
+ 	 * commits of data to the journal will restore the current
+ 	 * s_start value. */
+-	jbd2_mark_journal_empty(journal);
++	jbd2_mark_journal_empty(journal, WRITE_FUA);
+ 	mutex_unlock(&journal->j_checkpoint_mutex);
+ 	write_lock(&journal->j_state_lock);
+ 	J_ASSERT(!journal->j_running_transaction);
+@@ -2021,7 +2028,7 @@ int jbd2_journal_wipe(journal_t *journal, int write)
+ 	if (write) {
+ 		/* Lock to make assertions happy... */
+ 		mutex_lock(&journal->j_checkpoint_mutex);
+-		jbd2_mark_journal_empty(journal);
++		jbd2_mark_journal_empty(journal, WRITE_FUA);
+ 		mutex_unlock(&journal->j_checkpoint_mutex);
+ 	}
+ 
+diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
+index 4cba7865f496..f8082c7cde8b 100644
+--- a/fs/nfsd/nfs4proc.c
++++ b/fs/nfsd/nfs4proc.c
+@@ -878,6 +878,7 @@ nfsd4_secinfo(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+ 				    &exp, &dentry);
+ 	if (err)
+ 		return err;
++	fh_unlock(&cstate->current_fh);
+ 	if (d_really_is_negative(dentry)) {
+ 		exp_put(exp);
+ 		err = nfserr_noent;
+diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
+index d6ef0955a979..1600ec470ce7 100644
+--- a/fs/nfsd/nfs4xdr.c
++++ b/fs/nfsd/nfs4xdr.c
+@@ -1072,8 +1072,9 @@ nfsd4_decode_rename(struct nfsd4_compoundargs *argp, struct nfsd4_rename *rename
+ 
+ 	READ_BUF(4);
+ 	rename->rn_snamelen = be32_to_cpup(p++);
+-	READ_BUF(rename->rn_snamelen + 4);
++	READ_BUF(rename->rn_snamelen);
+ 	SAVEMEM(rename->rn_sname, rename->rn_snamelen);
++	READ_BUF(4);
+ 	rename->rn_tnamelen = be32_to_cpup(p++);
+ 	READ_BUF(rename->rn_tnamelen);
+ 	SAVEMEM(rename->rn_tname, rename->rn_tnamelen);
+@@ -1155,13 +1156,14 @@ nfsd4_decode_setclientid(struct nfsd4_compoundargs *argp, struct nfsd4_setclient
+ 	READ_BUF(8);
+ 	setclientid->se_callback_prog = be32_to_cpup(p++);
+ 	setclientid->se_callback_netid_len = be32_to_cpup(p++);
+-
+-	READ_BUF(setclientid->se_callback_netid_len + 4);
++	READ_BUF(setclientid->se_callback_netid_len);
+ 	SAVEMEM(setclientid->se_callback_netid_val, setclientid->se_callback_netid_len);
++	READ_BUF(4);
+ 	setclientid->se_callback_addr_len = be32_to_cpup(p++);
+ 
+-	READ_BUF(setclientid->se_callback_addr_len + 4);
++	READ_BUF(setclientid->se_callback_addr_len);
+ 	SAVEMEM(setclientid->se_callback_addr_val, setclientid->se_callback_addr_len);
++	READ_BUF(4);
+ 	setclientid->se_callback_ident = be32_to_cpup(p++);
+ 
+ 	DECODE_TAIL;
+@@ -1835,8 +1837,9 @@ nfsd4_decode_compound(struct nfsd4_compoundargs *argp)
+ 
+ 	READ_BUF(4);
+ 	argp->taglen = be32_to_cpup(p++);
+-	READ_BUF(argp->taglen + 8);
++	READ_BUF(argp->taglen);
+ 	SAVEMEM(argp->tag, argp->taglen);
++	READ_BUF(8);
+ 	argp->minorversion = be32_to_cpup(p++);
+ 	argp->opcnt = be32_to_cpup(p++);
+ 	max_reply += 4 + (XDR_QUADLEN(argp->taglen) << 2);
+diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
+index a76b9ea7722e..a2370e2c7295 100644
+--- a/fs/ocfs2/cluster/heartbeat.c
++++ b/fs/ocfs2/cluster/heartbeat.c
+@@ -1445,8 +1445,8 @@ static void o2hb_region_release(struct config_item *item)
+ 	debugfs_remove(reg->hr_debug_dir);
+ 	kfree(reg->hr_db_livenodes);
+ 	kfree(reg->hr_db_regnum);
+-	kfree(reg->hr_debug_elapsed_time);
+-	kfree(reg->hr_debug_pinned);
++	kfree(reg->hr_db_elapsed_time);
++	kfree(reg->hr_db_pinned);
+ 
+ 	spin_lock(&o2hb_live_lock);
+ 	list_del(&reg->hr_all_item);
+diff --git a/fs/ocfs2/dlm/dlmconvert.c b/fs/ocfs2/dlm/dlmconvert.c
+index e36d63ff1783..f90931335c6b 100644
+--- a/fs/ocfs2/dlm/dlmconvert.c
++++ b/fs/ocfs2/dlm/dlmconvert.c
+@@ -262,6 +262,7 @@ enum dlm_status dlmconvert_remote(struct dlm_ctxt *dlm,
+ 				  struct dlm_lock *lock, int flags, int type)
+ {
+ 	enum dlm_status status;
++	u8 old_owner = res->owner;
+ 
+ 	mlog(0, "type=%d, convert_type=%d, busy=%d\n", lock->ml.type,
+ 	     lock->ml.convert_type, res->state & DLM_LOCK_RES_IN_PROGRESS);
+@@ -287,6 +288,19 @@ enum dlm_status dlmconvert_remote(struct dlm_ctxt *dlm,
+ 		status = DLM_DENIED;
+ 		goto bail;
+ 	}
++
++	if (lock->ml.type == type && lock->ml.convert_type == LKM_IVMODE) {
++		mlog(0, "last convert request returned DLM_RECOVERING, but "
++		     "owner has already queued and sent ast to me. res %.*s, "
++		     "(cookie=%u:%llu, type=%d, conv=%d)\n",
++		     res->lockname.len, res->lockname.name,
++		     dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
++		     dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)),
++		     lock->ml.type, lock->ml.convert_type);
++		status = DLM_NORMAL;
++		goto bail;
++	}
++
+ 	res->state |= DLM_LOCK_RES_IN_PROGRESS;
+ 	/* move lock to local convert queue */
+ 	/* do not alter lock refcount.  switching lists. */
+@@ -316,11 +330,19 @@ enum dlm_status dlmconvert_remote(struct dlm_ctxt *dlm,
+ 	spin_lock(&res->spinlock);
+ 	res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
+ 	lock->convert_pending = 0;
+-	/* if it failed, move it back to granted queue */
++	/* if it failed, move it back to granted queue.
++	 * if master returns DLM_NORMAL and then down before sending ast,
++	 * it may have already been moved to granted queue, reset to
++	 * DLM_RECOVERING and retry convert */
+ 	if (status != DLM_NORMAL) {
+ 		if (status != DLM_NOTQUEUED)
+ 			dlm_error(status);
+ 		dlm_revert_pending_convert(res, lock);
++	} else if ((res->state & DLM_LOCK_RES_RECOVERING) ||
++			(old_owner != res->owner)) {
++		mlog(0, "res %.*s is in recovering or has been recovered.\n",
++				res->lockname.len, res->lockname.name);
++		status = DLM_RECOVERING;
+ 	}
+ bail:
+ 	spin_unlock(&res->spinlock);
+diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
+index b94a425f0175..23d0ab881f6e 100644
+--- a/fs/ocfs2/dlm/dlmrecovery.c
++++ b/fs/ocfs2/dlm/dlmrecovery.c
+@@ -2071,7 +2071,6 @@ void dlm_move_lockres_to_recovery_list(struct dlm_ctxt *dlm,
+ 			dlm_lock_get(lock);
+ 			if (lock->convert_pending) {
+ 				/* move converting lock back to granted */
+-				BUG_ON(i != DLM_CONVERTING_LIST);
+ 				mlog(0, "node died with convert pending "
+ 				     "on %.*s. move back to granted list.\n",
+ 				     res->lockname.len, res->lockname.name);
+diff --git a/fs/open.c b/fs/open.c
+index 55bdc75e2172..17cb6b1dab75 100644
+--- a/fs/open.c
++++ b/fs/open.c
+@@ -992,14 +992,12 @@ struct file *filp_open(const char *filename, int flags, umode_t mode)
+ EXPORT_SYMBOL(filp_open);
+ 
+ struct file *file_open_root(struct dentry *dentry, struct vfsmount *mnt,
+-			    const char *filename, int flags)
++			    const char *filename, int flags, umode_t mode)
+ {
+ 	struct open_flags op;
+-	int err = build_open_flags(flags, 0, &op);
++	int err = build_open_flags(flags, mode, &op);
+ 	if (err)
+ 		return ERR_PTR(err);
+-	if (flags & O_CREAT)
+-		return ERR_PTR(-EINVAL);
+ 	return do_file_open_root(dentry, mnt, filename, &op);
+ }
+ EXPORT_SYMBOL(file_open_root);
+diff --git a/fs/proc_namespace.c b/fs/proc_namespace.c
+index 2256e7e23e67..3f1190d18991 100644
+--- a/fs/proc_namespace.c
++++ b/fs/proc_namespace.c
+@@ -199,6 +199,8 @@ static int show_vfsstat(struct seq_file *m, struct vfsmount *mnt)
+ 	if (sb->s_op->show_devname) {
+ 		seq_puts(m, "device ");
+ 		err = sb->s_op->show_devname(m, mnt_path.dentry);
++		if (err)
++			goto out;
+ 	} else {
+ 		if (r->mnt_devname) {
+ 			seq_puts(m, "device ");
+diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
+index 3c3b81bb6dfe..850d17fa0aa3 100644
+--- a/fs/quota/dquot.c
++++ b/fs/quota/dquot.c
+@@ -1398,7 +1398,7 @@ static int dquot_active(const struct inode *inode)
+ static int __dquot_initialize(struct inode *inode, int type)
+ {
+ 	int cnt, init_needed = 0;
+-	struct dquot **dquots, *got[MAXQUOTAS];
++	struct dquot **dquots, *got[MAXQUOTAS] = {};
+ 	struct super_block *sb = inode->i_sb;
+ 	qsize_t rsv;
+ 	int ret = 0;
+@@ -1415,7 +1415,6 @@ static int __dquot_initialize(struct inode *inode, int type)
+ 		int rc;
+ 		struct dquot *dquot;
+ 
+-		got[cnt] = NULL;
+ 		if (type != -1 && cnt != type)
+ 			continue;
+ 		/*
+diff --git a/fs/splice.c b/fs/splice.c
+index 82bc0d64fc38..19e0b103d253 100644
+--- a/fs/splice.c
++++ b/fs/splice.c
+@@ -185,6 +185,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
+ 	unsigned int spd_pages = spd->nr_pages;
+ 	int ret, do_wakeup, page_nr;
+ 
++	if (!spd_pages)
++		return 0;
++
+ 	ret = 0;
+ 	do_wakeup = 0;
+ 	page_nr = 0;
+diff --git a/fs/xfs/xfs_attr_list.c b/fs/xfs/xfs_attr_list.c
+index 0ef7c2ed3f8a..4fa14820e2e2 100644
+--- a/fs/xfs/xfs_attr_list.c
++++ b/fs/xfs/xfs_attr_list.c
+@@ -202,8 +202,10 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context)
+ 					sbp->namelen,
+ 					sbp->valuelen,
+ 					&sbp->name[sbp->namelen]);
+-		if (error)
++		if (error) {
++			kmem_free(sbuf);
+ 			return error;
++		}
+ 		if (context->seen_enough)
+ 			break;
+ 		cursor->offset++;
+@@ -454,14 +456,13 @@ xfs_attr3_leaf_list_int(
+ 				args.rmtblkcnt = xfs_attr3_rmt_blocks(
+ 							args.dp->i_mount, valuelen);
+ 				retval = xfs_attr_rmtval_get(&args);
+-				if (retval)
+-					return retval;
+-				retval = context->put_listent(context,
+-						entry->flags,
+-						name_rmt->name,
+-						(int)name_rmt->namelen,
+-						valuelen,
+-						args.value);
++				if (!retval)
++					retval = context->put_listent(context,
++							entry->flags,
++							name_rmt->name,
++							(int)name_rmt->namelen,
++							valuelen,
++							args.value);
+ 				kmem_free(args.value);
+ 			} else {
+ 				retval = context->put_listent(context,
+diff --git a/include/asm-generic/bitops/lock.h b/include/asm-generic/bitops/lock.h
+index c30266e94806..8ef0ccbf8167 100644
+--- a/include/asm-generic/bitops/lock.h
++++ b/include/asm-generic/bitops/lock.h
+@@ -29,16 +29,16 @@ do {					\
+  * @nr: the bit to set
+  * @addr: the address to start counting from
+  *
+- * This operation is like clear_bit_unlock, however it is not atomic.
+- * It does provide release barrier semantics so it can be used to unlock
+- * a bit lock, however it would only be used if no other CPU can modify
+- * any bits in the memory until the lock is released (a good example is
+- * if the bit lock itself protects access to the other bits in the word).
++ * A weaker form of clear_bit_unlock() as used by __bit_lock_unlock(). If all
++ * the bits in the word are protected by this lock some archs can use weaker
++ * ops to safely unlock.
++ *
++ * See for example x86's implementation.
+  */
+ #define __clear_bit_unlock(nr, addr)	\
+ do {					\
+-	smp_mb();			\
+-	__clear_bit(nr, addr);		\
++	smp_mb__before_atomic();	\
++	clear_bit(nr, addr);		\
+ } while (0)
+ 
+ #endif /* _ASM_GENERIC_BITOPS_LOCK_H_ */
+diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
+index 789471dba6fb..89d944b25d87 100644
+--- a/include/linux/cgroup-defs.h
++++ b/include/linux/cgroup-defs.h
+@@ -210,6 +210,9 @@ struct css_set {
+ 	/* all css_task_iters currently walking this cset */
+ 	struct list_head task_iters;
+ 
++	/* dead and being drained, ignore for migration */
++	bool dead;
++
+ 	/* For RCU-protected deletion */
+ 	struct rcu_head rcu_head;
+ };
+diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
+index ec1c61c87d89..899ab9f8549e 100644
+--- a/include/linux/device-mapper.h
++++ b/include/linux/device-mapper.h
+@@ -124,6 +124,8 @@ struct dm_dev {
+ 	char name[16];
+ };
+ 
++dev_t dm_get_dev_t(const char *path);
++
+ /*
+  * Constructors should call these functions to ensure destination devices
+  * are opened/closed correctly.
+diff --git a/include/linux/fs.h b/include/linux/fs.h
+index ae681002100a..2c7f8d9c3c70 100644
+--- a/include/linux/fs.h
++++ b/include/linux/fs.h
+@@ -2259,7 +2259,7 @@ extern long do_sys_open(int dfd, const char __user *filename, int flags,
+ extern struct file *file_open_name(struct filename *, int, umode_t);
+ extern struct file *filp_open(const char *, int, umode_t);
+ extern struct file *file_open_root(struct dentry *, struct vfsmount *,
+-				   const char *, int);
++				   const char *, int, umode_t);
+ extern struct file * dentry_open(const struct path *, int, const struct cred *);
+ extern int filp_close(struct file *, fl_owner_t id);
+ 
+diff --git a/include/linux/kernel.h b/include/linux/kernel.h
+index f31638c6e873..95452f72349a 100644
+--- a/include/linux/kernel.h
++++ b/include/linux/kernel.h
+@@ -635,7 +635,7 @@ do {							\
+ 
+ #define do_trace_printk(fmt, args...)					\
+ do {									\
+-	static const char *trace_printk_fmt				\
++	static const char *trace_printk_fmt __used			\
+ 		__attribute__((section("__trace_printk_fmt"))) =	\
+ 		__builtin_constant_p(fmt) ? fmt : NULL;			\
+ 									\
+@@ -679,7 +679,7 @@ int __trace_printk(unsigned long ip, const char *fmt, ...);
+  */
+ 
+ #define trace_puts(str) ({						\
+-	static const char *trace_printk_fmt				\
++	static const char *trace_printk_fmt __used			\
+ 		__attribute__((section("__trace_printk_fmt"))) =	\
+ 		__builtin_constant_p(str) ? str : NULL;			\
+ 									\
+@@ -701,7 +701,7 @@ extern void trace_dump_stack(int skip);
+ #define ftrace_vprintk(fmt, vargs)					\
+ do {									\
+ 	if (__builtin_constant_p(fmt)) {				\
+-		static const char *trace_printk_fmt			\
++		static const char *trace_printk_fmt __used		\
+ 		  __attribute__((section("__trace_printk_fmt"))) =	\
+ 			__builtin_constant_p(fmt) ? fmt : NULL;		\
+ 									\
+diff --git a/include/linux/pci.h b/include/linux/pci.h
+index 27716254dcc5..60042ab5d7bd 100644
+--- a/include/linux/pci.h
++++ b/include/linux/pci.h
+@@ -359,6 +359,7 @@ struct pci_dev {
+ 	unsigned int	io_window_1k:1;	/* Intel P2P bridge 1K I/O windows */
+ 	unsigned int	irq_managed:1;
+ 	unsigned int	has_secondary_link:1;
++	unsigned int	non_compliant_bars:1;	/* broken BARs; ignore them */
+ 	pci_dev_flags_t dev_flags;
+ 	atomic_t	enable_cnt;	/* pci_enable_device has been called */
+ 
+diff --git a/include/linux/thermal.h b/include/linux/thermal.h
+index e13a1ace50e9..4a849f19e6c9 100644
+--- a/include/linux/thermal.h
++++ b/include/linux/thermal.h
+@@ -156,6 +156,7 @@ struct thermal_attr {
+  * @trip_hyst_attrs:	attributes for trip points for sysfs: trip hysteresis
+  * @devdata:	private pointer for device private data
+  * @trips:	number of trip points the thermal zone supports
++ * @trips_disabled;	bitmap for disabled trips
+  * @passive_delay:	number of milliseconds to wait between polls when
+  *			performing passive cooling.
+  * @polling_delay:	number of milliseconds to wait between polls when
+@@ -191,6 +192,7 @@ struct thermal_zone_device {
+ 	struct thermal_attr *trip_hyst_attrs;
+ 	void *devdata;
+ 	int trips;
++	unsigned long trips_disabled;	/* bitmap for disabled trips */
+ 	int passive_delay;
+ 	int polling_delay;
+ 	int temperature;
+diff --git a/include/linux/tty.h b/include/linux/tty.h
+index d9fb4b043f56..19199c26783f 100644
+--- a/include/linux/tty.h
++++ b/include/linux/tty.h
+@@ -589,7 +589,7 @@ static inline int tty_ldisc_receive_buf(struct tty_ldisc *ld, unsigned char *p,
+ 		count = ld->ops->receive_buf2(ld->tty, p, f, count);
+ 	else {
+ 		count = min_t(int, count, ld->tty->receive_room);
+-		if (count)
++		if (count && ld->ops->receive_buf)
+ 			ld->ops->receive_buf(ld->tty, p, f, count);
+ 	}
+ 	return count;
+diff --git a/include/sound/hdaudio.h b/include/sound/hdaudio.h
+index c21c38ce7450..93e63c56f48f 100644
+--- a/include/sound/hdaudio.h
++++ b/include/sound/hdaudio.h
+@@ -168,11 +168,13 @@ int snd_hdac_power_up(struct hdac_device *codec);
+ int snd_hdac_power_down(struct hdac_device *codec);
+ int snd_hdac_power_up_pm(struct hdac_device *codec);
+ int snd_hdac_power_down_pm(struct hdac_device *codec);
++int snd_hdac_keep_power_up(struct hdac_device *codec);
+ #else
+ static inline int snd_hdac_power_up(struct hdac_device *codec) { return 0; }
+ static inline int snd_hdac_power_down(struct hdac_device *codec) { return 0; }
+ static inline int snd_hdac_power_up_pm(struct hdac_device *codec) { return 0; }
+ static inline int snd_hdac_power_down_pm(struct hdac_device *codec) { return 0; }
++static inline int snd_hdac_keep_power_up(struct hdac_device *codec) { return 0; }
+ #endif
+ 
+ /*
+diff --git a/kernel/cgroup.c b/kernel/cgroup.c
+index d27904c193da..6a498daf2eec 100644
+--- a/kernel/cgroup.c
++++ b/kernel/cgroup.c
+@@ -2474,6 +2474,14 @@ static void cgroup_migrate_add_src(struct css_set *src_cset,
+ 	lockdep_assert_held(&cgroup_mutex);
+ 	lockdep_assert_held(&css_set_lock);
+ 
++	/*
++	 * If ->dead, @src_set is associated with one or more dead cgroups
++	 * and doesn't contain any migratable tasks.  Ignore it early so
++	 * that the rest of migration path doesn't get confused by it.
++	 */
++	if (src_cset->dead)
++		return;
++
+ 	src_cgrp = cset_cgroup_from_root(src_cset, dst_cgrp->root);
+ 
+ 	if (!list_empty(&src_cset->mg_preload_node))
+@@ -5114,6 +5122,7 @@ static int cgroup_destroy_locked(struct cgroup *cgrp)
+ 	__releases(&cgroup_mutex) __acquires(&cgroup_mutex)
+ {
+ 	struct cgroup_subsys_state *css;
++	struct cgrp_cset_link *link;
+ 	int ssid;
+ 
+ 	lockdep_assert_held(&cgroup_mutex);
+@@ -5134,11 +5143,18 @@ static int cgroup_destroy_locked(struct cgroup *cgrp)
+ 		return -EBUSY;
+ 
+ 	/*
+-	 * Mark @cgrp dead.  This prevents further task migration and child
+-	 * creation by disabling cgroup_lock_live_group().
++	 * Mark @cgrp and the associated csets dead.  The former prevents
++	 * further task migration and child creation by disabling
++	 * cgroup_lock_live_group().  The latter makes the csets ignored by
++	 * the migration path.
+ 	 */
+ 	cgrp->self.flags &= ~CSS_ONLINE;
+ 
++	spin_lock_bh(&css_set_lock);
++	list_for_each_entry(link, &cgrp->cset_links, cset_link)
++		link->cset->dead = true;
++	spin_unlock_bh(&css_set_lock);
++
+ 	/* initiate massacre of all css's */
+ 	for_each_css(css, ssid, cgrp)
+ 		kill_css(css);
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index 614614821f00..f0b4b328d8f5 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -8001,6 +8001,9 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
+ 		}
+ 	}
+ 
++	/* symmetric to unaccount_event() in _free_event() */
++	account_event(event);
++
+ 	return event;
+ 
+ err_per_task:
+@@ -8364,8 +8367,6 @@ SYSCALL_DEFINE5(perf_event_open,
+ 		}
+ 	}
+ 
+-	account_event(event);
+-
+ 	/*
+ 	 * Special case software events and allow them to be part of
+ 	 * any hardware group.
+@@ -8662,8 +8663,6 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
+ 	/* Mark owner so we could distinguish it from user events. */
+ 	event->owner = TASK_TOMBSTONE;
+ 
+-	account_event(event);
+-
+ 	ctx = find_get_context(event->pmu, task, event);
+ 	if (IS_ERR(ctx)) {
+ 		err = PTR_ERR(ctx);
+diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
+index b7342a24f559..b7dd5718836e 100644
+--- a/kernel/power/hibernate.c
++++ b/kernel/power/hibernate.c
+@@ -339,6 +339,7 @@ int hibernation_snapshot(int platform_mode)
+ 	pm_message_t msg;
+ 	int error;
+ 
++	pm_suspend_clear_flags();
+ 	error = platform_begin(platform_mode);
+ 	if (error)
+ 		goto Close;
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index 41f6b2215aa8..a74073f8c08c 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -5630,6 +5630,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
+ 
+ 	case CPU_UP_PREPARE:
+ 		rq->calc_load_update = calc_load_update;
++		account_reset_rq(rq);
+ 		break;
+ 
+ 	case CPU_ONLINE:
+diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
+index b2ab2ffb1adc..ab2b5fb9821d 100644
+--- a/kernel/sched/cputime.c
++++ b/kernel/sched/cputime.c
+@@ -262,21 +262,21 @@ static __always_inline bool steal_account_process_tick(void)
+ #ifdef CONFIG_PARAVIRT
+ 	if (static_key_false(&paravirt_steal_enabled)) {
+ 		u64 steal;
+-		cputime_t steal_ct;
++		unsigned long steal_jiffies;
+ 
+ 		steal = paravirt_steal_clock(smp_processor_id());
+ 		steal -= this_rq()->prev_steal_time;
+ 
+ 		/*
+-		 * cputime_t may be less precise than nsecs (eg: if it's
+-		 * based on jiffies). Lets cast the result to cputime
++		 * steal is in nsecs but our caller is expecting steal
++		 * time in jiffies. Lets cast the result to jiffies
+ 		 * granularity and account the rest on the next rounds.
+ 		 */
+-		steal_ct = nsecs_to_cputime(steal);
+-		this_rq()->prev_steal_time += cputime_to_nsecs(steal_ct);
++		steal_jiffies = nsecs_to_jiffies(steal);
++		this_rq()->prev_steal_time += jiffies_to_nsecs(steal_jiffies);
+ 
+-		account_steal_time(steal_ct);
+-		return steal_ct;
++		account_steal_time(jiffies_to_cputime(steal_jiffies));
++		return steal_jiffies;
+ 	}
+ #endif
+ 	return false;
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index 56b7d4b83947..adff850e5d42 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -4459,9 +4459,17 @@ static void __update_cpu_load(struct rq *this_rq, unsigned long this_load,
+ 
+ 		/* scale is effectively 1 << i now, and >> i divides by scale */
+ 
+-		old_load = this_rq->cpu_load[i] - tickless_load;
++		old_load = this_rq->cpu_load[i];
+ 		old_load = decay_load_missed(old_load, pending_updates - 1, i);
+-		old_load += tickless_load;
++		if (tickless_load) {
++			old_load -= decay_load_missed(tickless_load, pending_updates - 1, i);
++			/*
++			 * old_load can never be a negative value because a
++			 * decayed tickless_load cannot be greater than the
++			 * original tickless_load.
++			 */
++			old_load += tickless_load;
++		}
+ 		new_load = this_load;
+ 		/*
+ 		 * Round up the averaging division if load is increasing. This
+diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
+index 10f16374df7f..ff87d887ff62 100644
+--- a/kernel/sched/sched.h
++++ b/kernel/sched/sched.h
+@@ -1738,3 +1738,16 @@ static inline u64 irq_time_read(int cpu)
+ }
+ #endif /* CONFIG_64BIT */
+ #endif /* CONFIG_IRQ_TIME_ACCOUNTING */
++
++static inline void account_reset_rq(struct rq *rq)
++{
++#ifdef CONFIG_IRQ_TIME_ACCOUNTING
++	rq->prev_irq_time = 0;
++#endif
++#ifdef CONFIG_PARAVIRT
++	rq->prev_steal_time = 0;
++#endif
++#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
++	rq->prev_steal_time_rq = 0;
++#endif
++}
+diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
+index 7e7746a42a62..10a1d7dc9313 100644
+--- a/kernel/sysctl_binary.c
++++ b/kernel/sysctl_binary.c
+@@ -1321,7 +1321,7 @@ static ssize_t binary_sysctl(const int *name, int nlen,
+ 	}
+ 
+ 	mnt = task_active_pid_ns(current)->proc_mnt;
+-	file = file_open_root(mnt->mnt_root, mnt, pathname, flags);
++	file = file_open_root(mnt->mnt_root, mnt, pathname, flags, 0);
+ 	result = PTR_ERR(file);
+ 	if (IS_ERR(file))
+ 		goto out_putname;
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index d9293402ee68..8305cbb2d5a2 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -4949,7 +4949,10 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
+ 
+ 	spd.nr_pages = i;
+ 
+-	ret = splice_to_pipe(pipe, &spd);
++	if (i)
++		ret = splice_to_pipe(pipe, &spd);
++	else
++		ret = 0;
+ out:
+ 	splice_shrink_spd(&spd);
+ 	return ret;
+diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
+index e4e56589ec1d..be3222b7d72e 100644
+--- a/kernel/trace/trace_irqsoff.c
++++ b/kernel/trace/trace_irqsoff.c
+@@ -109,8 +109,12 @@ static int func_prolog_dec(struct trace_array *tr,
+ 		return 0;
+ 
+ 	local_save_flags(*flags);
+-	/* slight chance to get a false positive on tracing_cpu */
+-	if (!irqs_disabled_flags(*flags))
++	/*
++	 * Slight chance to get a false positive on tracing_cpu,
++	 * although I'm starting to think there isn't a chance.
++	 * Leave this for now just to be paranoid.
++	 */
++	if (!irqs_disabled_flags(*flags) && !preempt_count())
+ 		return 0;
+ 
+ 	*data = per_cpu_ptr(tr->trace_buffer.data, cpu);
+diff --git a/kernel/trace/trace_printk.c b/kernel/trace/trace_printk.c
+index 060df67dbdd1..f96f0383f6c6 100644
+--- a/kernel/trace/trace_printk.c
++++ b/kernel/trace/trace_printk.c
+@@ -296,6 +296,9 @@ static int t_show(struct seq_file *m, void *v)
+ 	const char *str = *fmt;
+ 	int i;
+ 
++	if (!*fmt)
++		return 0;
++
+ 	seq_printf(m, "0x%lx : \"", *(unsigned long *)fmt);
+ 
+ 	/*
+diff --git a/kernel/watchdog.c b/kernel/watchdog.c
+index b3ace6ebbba3..9acb29f280ec 100644
+--- a/kernel/watchdog.c
++++ b/kernel/watchdog.c
+@@ -923,6 +923,9 @@ static int proc_watchdog_common(int which, struct ctl_table *table, int write,
+ 		 * both lockup detectors are disabled if proc_watchdog_update()
+ 		 * returns an error.
+ 		 */
++		if (old == new)
++			goto out;
++
+ 		err = proc_watchdog_update();
+ 	}
+ out:
+@@ -967,7 +970,7 @@ int proc_soft_watchdog(struct ctl_table *table, int write,
+ int proc_watchdog_thresh(struct ctl_table *table, int write,
+ 			 void __user *buffer, size_t *lenp, loff_t *ppos)
+ {
+-	int err, old;
++	int err, old, new;
+ 
+ 	get_online_cpus();
+ 	mutex_lock(&watchdog_proc_mutex);
+@@ -987,6 +990,10 @@ int proc_watchdog_thresh(struct ctl_table *table, int write,
+ 	/*
+ 	 * Update the sample period. Restore on failure.
+ 	 */
++	new = ACCESS_ONCE(watchdog_thresh);
++	if (old == new)
++		goto out;
++
+ 	set_sample_period();
+ 	err = proc_watchdog_update();
+ 	if (err) {
+diff --git a/mm/memcontrol.c b/mm/memcontrol.c
+index d06cae2de783..caf3bf73b533 100644
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -1262,7 +1262,7 @@ static unsigned long mem_cgroup_get_limit(struct mem_cgroup *memcg)
+ 	return limit;
+ }
+ 
+-static void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
++static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
+ 				     int order)
+ {
+ 	struct oom_control oc = {
+@@ -1340,6 +1340,7 @@ static void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
+ 	}
+ unlock:
+ 	mutex_unlock(&oom_lock);
++	return chosen;
+ }
+ 
+ #if MAX_NUMNODES > 1
+@@ -5051,6 +5052,7 @@ static ssize_t memory_high_write(struct kernfs_open_file *of,
+ 				 char *buf, size_t nbytes, loff_t off)
+ {
+ 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
++	unsigned long nr_pages;
+ 	unsigned long high;
+ 	int err;
+ 
+@@ -5061,6 +5063,11 @@ static ssize_t memory_high_write(struct kernfs_open_file *of,
+ 
+ 	memcg->high = high;
+ 
++	nr_pages = page_counter_read(&memcg->memory);
++	if (nr_pages > high)
++		try_to_free_mem_cgroup_pages(memcg, nr_pages - high,
++					     GFP_KERNEL, true);
++
+ 	memcg_wb_domain_size_changed(memcg);
+ 	return nbytes;
+ }
+@@ -5082,6 +5089,8 @@ static ssize_t memory_max_write(struct kernfs_open_file *of,
+ 				char *buf, size_t nbytes, loff_t off)
+ {
+ 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
++	unsigned int nr_reclaims = MEM_CGROUP_RECLAIM_RETRIES;
++	bool drained = false;
+ 	unsigned long max;
+ 	int err;
+ 
+@@ -5090,9 +5099,36 @@ static ssize_t memory_max_write(struct kernfs_open_file *of,
+ 	if (err)
+ 		return err;
+ 
+-	err = mem_cgroup_resize_limit(memcg, max);
+-	if (err)
+-		return err;
++	xchg(&memcg->memory.limit, max);
++
++	for (;;) {
++		unsigned long nr_pages = page_counter_read(&memcg->memory);
++
++		if (nr_pages <= max)
++			break;
++
++		if (signal_pending(current)) {
++			err = -EINTR;
++			break;
++		}
++
++		if (!drained) {
++			drain_all_stock(memcg);
++			drained = true;
++			continue;
++		}
++
++		if (nr_reclaims) {
++			if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max,
++							  GFP_KERNEL, true))
++				nr_reclaims--;
++			continue;
++		}
++
++		mem_cgroup_events(memcg, MEMCG_OOM, 1);
++		if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0))
++			break;
++	}
+ 
+ 	memcg_wb_domain_size_changed(memcg);
+ 	return nbytes;
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index 838ca8bb64f7..9d9044e91ac5 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -660,34 +660,28 @@ static inline void __free_one_page(struct page *page,
+ 	unsigned long combined_idx;
+ 	unsigned long uninitialized_var(buddy_idx);
+ 	struct page *buddy;
+-	unsigned int max_order = MAX_ORDER;
++	unsigned int max_order;
++
++	max_order = min_t(unsigned int, MAX_ORDER, pageblock_order + 1);
+ 
+ 	VM_BUG_ON(!zone_is_initialized(zone));
+ 	VM_BUG_ON_PAGE(page->flags & PAGE_FLAGS_CHECK_AT_PREP, page);
+ 
+ 	VM_BUG_ON(migratetype == -1);
+-	if (is_migrate_isolate(migratetype)) {
+-		/*
+-		 * We restrict max order of merging to prevent merge
+-		 * between freepages on isolate pageblock and normal
+-		 * pageblock. Without this, pageblock isolation
+-		 * could cause incorrect freepage accounting.
+-		 */
+-		max_order = min_t(unsigned int, MAX_ORDER, pageblock_order + 1);
+-	} else {
++	if (likely(!is_migrate_isolate(migratetype)))
+ 		__mod_zone_freepage_state(zone, 1 << order, migratetype);
+-	}
+ 
+-	page_idx = pfn & ((1 << max_order) - 1);
++	page_idx = pfn & ((1 << MAX_ORDER) - 1);
+ 
+ 	VM_BUG_ON_PAGE(page_idx & ((1 << order) - 1), page);
+ 	VM_BUG_ON_PAGE(bad_range(zone, page), page);
+ 
++continue_merging:
+ 	while (order < max_order - 1) {
+ 		buddy_idx = __find_buddy_index(page_idx, order);
+ 		buddy = page + (buddy_idx - page_idx);
+ 		if (!page_is_buddy(page, buddy, order))
+-			break;
++			goto done_merging;
+ 		/*
+ 		 * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page,
+ 		 * merge with it and move up one order.
+@@ -704,6 +698,32 @@ static inline void __free_one_page(struct page *page,
+ 		page_idx = combined_idx;
+ 		order++;
+ 	}
++	if (max_order < MAX_ORDER) {
++		/* If we are here, it means order is >= pageblock_order.
++		 * We want to prevent merge between freepages on isolate
++		 * pageblock and normal pageblock. Without this, pageblock
++		 * isolation could cause incorrect freepage or CMA accounting.
++		 *
++		 * We don't want to hit this code for the more frequent
++		 * low-order merging.
++		 */
++		if (unlikely(has_isolate_pageblock(zone))) {
++			int buddy_mt;
++
++			buddy_idx = __find_buddy_index(page_idx, order);
++			buddy = page + (buddy_idx - page_idx);
++			buddy_mt = get_pageblock_migratetype(buddy);
++
++			if (migratetype != buddy_mt
++					&& (is_migrate_isolate(migratetype) ||
++						is_migrate_isolate(buddy_mt)))
++				goto done_merging;
++		}
++		max_order++;
++		goto continue_merging;
++	}
++
++done_merging:
+ 	set_page_order(page, order);
+ 
+ 	/*
+diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
+index 5a5089cb6570..1363b8ffd89c 100644
+--- a/net/bluetooth/mgmt.c
++++ b/net/bluetooth/mgmt.c
+@@ -5979,6 +5979,10 @@ static int add_advertising(struct sock *sk, struct hci_dev *hdev,
+ 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
+ 				       MGMT_STATUS_INVALID_PARAMS);
+ 
++	if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
++		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
++				       MGMT_STATUS_INVALID_PARAMS);
++
+ 	flags = __le32_to_cpu(cp->flags);
+ 	timeout = __le16_to_cpu(cp->timeout);
+ 	duration = __le16_to_cpu(cp->duration);
+diff --git a/scripts/coccinelle/iterators/use_after_iter.cocci b/scripts/coccinelle/iterators/use_after_iter.cocci
+index f085f5968c52..ce8cc9c006e5 100644
+--- a/scripts/coccinelle/iterators/use_after_iter.cocci
++++ b/scripts/coccinelle/iterators/use_after_iter.cocci
+@@ -123,7 +123,7 @@ list_remove_head(x,c,...)
+ |
+ sizeof(<+...c...+>)
+ |
+-&c->member
++ &c->member
+ |
+ c = E
+ |
+diff --git a/scripts/gdb/linux/modules.py b/scripts/gdb/linux/modules.py
+index 25db8cff44a2..0a35d6dbfb80 100644
+--- a/scripts/gdb/linux/modules.py
++++ b/scripts/gdb/linux/modules.py
+@@ -73,10 +73,11 @@ class LxLsmod(gdb.Command):
+                 "        " if utils.get_long_type().sizeof == 8 else ""))
+ 
+         for module in module_list():
++            layout = module['core_layout']
+             gdb.write("{address} {name:<19} {size:>8}  {ref}".format(
+-                address=str(module['module_core']).split()[0],
++                address=str(layout['base']).split()[0],
+                 name=module['name'].string(),
+-                size=str(module['core_size']),
++                size=str(layout['size']),
+                 ref=str(module['refcnt']['counter'])))
+ 
+             source_list = module['source_list']
+diff --git a/scripts/gdb/linux/symbols.py b/scripts/gdb/linux/symbols.py
+index 627750cb420d..9a0f8923f67c 100644
+--- a/scripts/gdb/linux/symbols.py
++++ b/scripts/gdb/linux/symbols.py
+@@ -108,7 +108,7 @@ lx-symbols command."""
+ 
+     def load_module_symbols(self, module):
+         module_name = module['name'].string()
+-        module_addr = str(module['module_core']).split()[0]
++        module_addr = str(module['core_layout']['base']).split()[0]
+ 
+         module_file = self._get_module_file(module_name)
+         if not module_file and not self.module_files_updated:
+diff --git a/scripts/kconfig/Makefile b/scripts/kconfig/Makefile
+index d79cba4ce3eb..ebced77deb9c 100644
+--- a/scripts/kconfig/Makefile
++++ b/scripts/kconfig/Makefile
+@@ -96,13 +96,15 @@ savedefconfig: $(obj)/conf
+ defconfig: $(obj)/conf
+ ifeq ($(KBUILD_DEFCONFIG),)
+ 	$< $(silent) --defconfig $(Kconfig)
+-else ifneq ($(wildcard $(srctree)/arch/$(SRCARCH)/configs/$(KBUILD_DEFCONFIG)),)
++else
++ifneq ($(wildcard $(srctree)/arch/$(SRCARCH)/configs/$(KBUILD_DEFCONFIG)),)
+ 	@$(kecho) "*** Default configuration is based on '$(KBUILD_DEFCONFIG)'"
+ 	$(Q)$< $(silent) --defconfig=arch/$(SRCARCH)/configs/$(KBUILD_DEFCONFIG) $(Kconfig)
+ else
+ 	@$(kecho) "*** Default configuration is based on target '$(KBUILD_DEFCONFIG)'"
+ 	$(Q)$(MAKE) -f $(srctree)/Makefile $(KBUILD_DEFCONFIG)
+ endif
++endif
+ 
+ %_defconfig: $(obj)/conf
+ 	$(Q)$< $(silent) --defconfig=arch/$(SRCARCH)/configs/$@ $(Kconfig)
+diff --git a/scripts/package/mkspec b/scripts/package/mkspec
+index 71004daefe31..fe44d68e9344 100755
+--- a/scripts/package/mkspec
++++ b/scripts/package/mkspec
+@@ -131,11 +131,11 @@ echo 'rm -rf $RPM_BUILD_ROOT'
+ echo ""
+ echo "%post"
+ echo "if [ -x /sbin/installkernel -a -r /boot/vmlinuz-$KERNELRELEASE -a -r /boot/System.map-$KERNELRELEASE ]; then"
+-echo "cp /boot/vmlinuz-$KERNELRELEASE /boot/vmlinuz-$KERNELRELEASE-rpm"
+-echo "cp /boot/System.map-$KERNELRELEASE /boot/System.map-$KERNELRELEASE-rpm"
++echo "cp /boot/vmlinuz-$KERNELRELEASE /boot/.vmlinuz-$KERNELRELEASE-rpm"
++echo "cp /boot/System.map-$KERNELRELEASE /boot/.System.map-$KERNELRELEASE-rpm"
+ echo "rm -f /boot/vmlinuz-$KERNELRELEASE /boot/System.map-$KERNELRELEASE"
+-echo "/sbin/installkernel $KERNELRELEASE /boot/vmlinuz-$KERNELRELEASE-rpm /boot/System.map-$KERNELRELEASE-rpm"
+-echo "rm -f /boot/vmlinuz-$KERNELRELEASE-rpm /boot/System.map-$KERNELRELEASE-rpm"
++echo "/sbin/installkernel $KERNELRELEASE /boot/.vmlinuz-$KERNELRELEASE-rpm /boot/.System.map-$KERNELRELEASE-rpm"
++echo "rm -f /boot/.vmlinuz-$KERNELRELEASE-rpm /boot/.System.map-$KERNELRELEASE-rpm"
+ echo "fi"
+ echo ""
+ echo "%files"
+diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
+index 6b5a811e01a5..3a9b66c6e09c 100644
+--- a/sound/core/pcm_lib.c
++++ b/sound/core/pcm_lib.c
+@@ -322,7 +322,7 @@ static int snd_pcm_update_hw_ptr0(struct snd_pcm_substream *substream,
+ 			char name[16];
+ 			snd_pcm_debug_name(substream, name, sizeof(name));
+ 			pcm_err(substream->pcm,
+-				"BUG: %s, pos = %ld, buffer size = %ld, period size = %ld\n",
++				"invalid position: %s, pos = %ld, buffer size = %ld, period size = %ld\n",
+ 				name, pos, runtime->buffer_size,
+ 				runtime->period_size);
+ 		}
+diff --git a/sound/hda/hdac_device.c b/sound/hda/hdac_device.c
+index e361024eabb6..d1a4d6973330 100644
+--- a/sound/hda/hdac_device.c
++++ b/sound/hda/hdac_device.c
+@@ -611,6 +611,22 @@ int snd_hdac_power_up_pm(struct hdac_device *codec)
+ }
+ EXPORT_SYMBOL_GPL(snd_hdac_power_up_pm);
+ 
++/* like snd_hdac_power_up_pm(), but only increment the pm count when
++ * already powered up.  Returns -1 if not powered up, 1 if incremented
++ * or 0 if unchanged.  Only used in hdac_regmap.c
++ */
++int snd_hdac_keep_power_up(struct hdac_device *codec)
++{
++	if (!atomic_inc_not_zero(&codec->in_pm)) {
++		int ret = pm_runtime_get_if_in_use(&codec->dev);
++		if (!ret)
++			return -1;
++		if (ret < 0)
++			return 0;
++	}
++	return 1;
++}
++
+ /**
+  * snd_hdac_power_down_pm - power down the codec
+  * @codec: the codec object
+diff --git a/sound/hda/hdac_regmap.c b/sound/hda/hdac_regmap.c
+index eb8f7c30cb09..bdbcd6b75ff6 100644
+--- a/sound/hda/hdac_regmap.c
++++ b/sound/hda/hdac_regmap.c
+@@ -21,13 +21,16 @@
+ #include <sound/hdaudio.h>
+ #include <sound/hda_regmap.h>
+ 
+-#ifdef CONFIG_PM
+-#define codec_is_running(codec)				\
+-	(atomic_read(&(codec)->in_pm) ||		\
+-	 !pm_runtime_suspended(&(codec)->dev))
+-#else
+-#define codec_is_running(codec)		true
+-#endif
++static int codec_pm_lock(struct hdac_device *codec)
++{
++	return snd_hdac_keep_power_up(codec);
++}
++
++static void codec_pm_unlock(struct hdac_device *codec, int lock)
++{
++	if (lock == 1)
++		snd_hdac_power_down_pm(codec);
++}
+ 
+ #define get_verb(reg)	(((reg) >> 8) & 0xfff)
+ 
+@@ -238,20 +241,28 @@ static int hda_reg_read(void *context, unsigned int reg, unsigned int *val)
+ 	struct hdac_device *codec = context;
+ 	int verb = get_verb(reg);
+ 	int err;
++	int pm_lock = 0;
+ 
+-	if (!codec_is_running(codec) && verb != AC_VERB_GET_POWER_STATE)
+-		return -EAGAIN;
++	if (verb != AC_VERB_GET_POWER_STATE) {
++		pm_lock = codec_pm_lock(codec);
++		if (pm_lock < 0)
++			return -EAGAIN;
++	}
+ 	reg |= (codec->addr << 28);
+-	if (is_stereo_amp_verb(reg))
+-		return hda_reg_read_stereo_amp(codec, reg, val);
+-	if (verb == AC_VERB_GET_PROC_COEF)
+-		return hda_reg_read_coef(codec, reg, val);
++	if (is_stereo_amp_verb(reg)) {
++		err = hda_reg_read_stereo_amp(codec, reg, val);
++		goto out;
++	}
++	if (verb == AC_VERB_GET_PROC_COEF) {
++		err = hda_reg_read_coef(codec, reg, val);
++		goto out;
++	}
+ 	if ((verb & 0x700) == AC_VERB_SET_AMP_GAIN_MUTE)
+ 		reg &= ~AC_AMP_FAKE_MUTE;
+ 
+ 	err = snd_hdac_exec_verb(codec, reg, 0, val);
+ 	if (err < 0)
+-		return err;
++		goto out;
+ 	/* special handling for asymmetric reads */
+ 	if (verb == AC_VERB_GET_POWER_STATE) {
+ 		if (*val & AC_PWRST_ERROR)
+@@ -259,7 +270,9 @@ static int hda_reg_read(void *context, unsigned int reg, unsigned int *val)
+ 		else /* take only the actual state */
+ 			*val = (*val >> 4) & 0x0f;
+ 	}
+-	return 0;
++ out:
++	codec_pm_unlock(codec, pm_lock);
++	return err;
+ }
+ 
+ static int hda_reg_write(void *context, unsigned int reg, unsigned int val)
+@@ -267,6 +280,7 @@ static int hda_reg_write(void *context, unsigned int reg, unsigned int val)
+ 	struct hdac_device *codec = context;
+ 	unsigned int verb;
+ 	int i, bytes, err;
++	int pm_lock = 0;
+ 
+ 	if (codec->caps_overwriting)
+ 		return 0;
+@@ -275,14 +289,21 @@ static int hda_reg_write(void *context, unsigned int reg, unsigned int val)
+ 	reg |= (codec->addr << 28);
+ 	verb = get_verb(reg);
+ 
+-	if (!codec_is_running(codec) && verb != AC_VERB_SET_POWER_STATE)
+-		return codec->lazy_cache ? 0 : -EAGAIN;
++	if (verb != AC_VERB_SET_POWER_STATE) {
++		pm_lock = codec_pm_lock(codec);
++		if (pm_lock < 0)
++			return codec->lazy_cache ? 0 : -EAGAIN;
++	}
+ 
+-	if (is_stereo_amp_verb(reg))
+-		return hda_reg_write_stereo_amp(codec, reg, val);
++	if (is_stereo_amp_verb(reg)) {
++		err = hda_reg_write_stereo_amp(codec, reg, val);
++		goto out;
++	}
+ 
+-	if (verb == AC_VERB_SET_PROC_COEF)
+-		return hda_reg_write_coef(codec, reg, val);
++	if (verb == AC_VERB_SET_PROC_COEF) {
++		err = hda_reg_write_coef(codec, reg, val);
++		goto out;
++	}
+ 
+ 	switch (verb & 0xf00) {
+ 	case AC_VERB_SET_AMP_GAIN_MUTE:
+@@ -319,10 +340,12 @@ static int hda_reg_write(void *context, unsigned int reg, unsigned int val)
+ 		reg |= (verb + i) << 8 | ((val >> (8 * i)) & 0xff);
+ 		err = snd_hdac_exec_verb(codec, reg, 0, NULL);
+ 		if (err < 0)
+-			return err;
++			goto out;
+ 	}
+ 
+-	return 0;
++ out:
++	codec_pm_unlock(codec, pm_lock);
++	return err;
+ }
+ 
+ static const struct regmap_config hda_regmap_cfg = {
+diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
+index c1c855a6c0af..a47e8ae0eb30 100644
+--- a/sound/pci/hda/patch_cirrus.c
++++ b/sound/pci/hda/patch_cirrus.c
+@@ -174,8 +174,12 @@ static void cs_automute(struct hda_codec *codec)
+ 	snd_hda_gen_update_outputs(codec);
+ 
+ 	if (spec->gpio_eapd_hp || spec->gpio_eapd_speaker) {
+-		spec->gpio_data = spec->gen.hp_jack_present ?
+-			spec->gpio_eapd_hp : spec->gpio_eapd_speaker;
++		if (spec->gen.automute_speaker)
++			spec->gpio_data = spec->gen.hp_jack_present ?
++				spec->gpio_eapd_hp : spec->gpio_eapd_speaker;
++		else
++			spec->gpio_data =
++				spec->gpio_eapd_hp | spec->gpio_eapd_speaker;
+ 		snd_hda_codec_write(codec, 0x01, 0,
+ 				    AC_VERB_SET_GPIO_DATA, spec->gpio_data);
+ 	}
+diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
+index 6122b8ca872f..56fefbd85782 100644
+--- a/sound/pci/hda/patch_conexant.c
++++ b/sound/pci/hda/patch_conexant.c
+@@ -204,8 +204,13 @@ static void cx_auto_reboot_notify(struct hda_codec *codec)
+ {
+ 	struct conexant_spec *spec = codec->spec;
+ 
+-	if (codec->core.vendor_id != 0x14f150f2)
++	switch (codec->core.vendor_id) {
++	case 0x14f150f2: /* CX20722 */
++	case 0x14f150f4: /* CX20724 */
++		break;
++	default:
+ 		return;
++	}
+ 
+ 	/* Turn the CX20722 codec into D3 to avoid spurious noises
+ 	   from the internal speaker during (and after) reboot */
+diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
+index bcbc4ee10130..e68fa449ebef 100644
+--- a/sound/pci/hda/patch_hdmi.c
++++ b/sound/pci/hda/patch_hdmi.c
+@@ -152,13 +152,17 @@ struct hdmi_spec {
+ 	struct hda_pcm_stream pcm_playback;
+ 
+ 	/* i915/powerwell (Haswell+/Valleyview+) specific */
++	bool use_acomp_notifier; /* use i915 eld_notify callback for hotplug */
+ 	struct i915_audio_component_audio_ops i915_audio_ops;
+ 	bool i915_bound; /* was i915 bound in this driver? */
+ };
+ 
+ #ifdef CONFIG_SND_HDA_I915
+-#define codec_has_acomp(codec) \
+-	((codec)->bus->core.audio_component != NULL)
++static inline bool codec_has_acomp(struct hda_codec *codec)
++{
++	struct hdmi_spec *spec = codec->spec;
++	return spec->use_acomp_notifier;
++}
+ #else
+ #define codec_has_acomp(codec)	false
+ #endif
+@@ -1562,6 +1566,7 @@ static void update_eld(struct hda_codec *codec,
+ 			   eld->eld_size) != 0)
+ 			eld_changed = true;
+ 
++	pin_eld->monitor_present = eld->monitor_present;
+ 	pin_eld->eld_valid = eld->eld_valid;
+ 	pin_eld->eld_size = eld->eld_size;
+ 	if (eld->eld_valid)
+@@ -1665,11 +1670,10 @@ static void sync_eld_via_acomp(struct hda_codec *codec,
+ 	int size;
+ 
+ 	mutex_lock(&per_pin->lock);
++	eld->monitor_present = false;
+ 	size = snd_hdac_acomp_get_eld(&codec->bus->core, per_pin->pin_nid,
+ 				      &eld->monitor_present, eld->eld_buffer,
+ 				      ELD_MAX_SIZE);
+-	if (size < 0)
+-		goto unlock;
+ 	if (size > 0) {
+ 		size = min(size, ELD_MAX_SIZE);
+ 		if (snd_hdmi_parse_eld(codec, &eld->info,
+@@ -1873,7 +1877,8 @@ static int generic_hdmi_playback_pcm_prepare(struct hda_pcm_stream *hinfo,
+ 
+ 	/* Call sync_audio_rate to set the N/CTS/M manually if necessary */
+ 	/* Todo: add DP1.2 MST audio support later */
+-	snd_hdac_sync_audio_rate(&codec->bus->core, pin_nid, runtime->rate);
++	if (codec_has_acomp(codec))
++		snd_hdac_sync_audio_rate(&codec->bus->core, pin_nid, runtime->rate);
+ 
+ 	non_pcm = check_non_pcm_per_cvt(codec, cvt_nid);
+ 	mutex_lock(&per_pin->lock);
+@@ -2432,6 +2437,10 @@ static void intel_pin_eld_notify(void *audio_ptr, int port)
+ 	struct hda_codec *codec = audio_ptr;
+ 	int pin_nid = port + 0x04;
+ 
++	/* we assume only from port-B to port-D */
++	if (port < 1 || port > 3)
++		return;
++
+ 	/* skip notification during system suspend (but not in runtime PM);
+ 	 * the state will be updated at resume
+ 	 */
+@@ -2456,11 +2465,24 @@ static int patch_generic_hdmi(struct hda_codec *codec)
+ 	codec->spec = spec;
+ 	hdmi_array_init(spec, 4);
+ 
+-	/* Try to bind with i915 for any Intel codecs (if not done yet) */
+-	if (!codec_has_acomp(codec) &&
+-	    (codec->core.vendor_id >> 16) == 0x8086)
+-		if (!snd_hdac_i915_init(&codec->bus->core))
+-			spec->i915_bound = true;
++#ifdef CONFIG_SND_HDA_I915
++	/* Try to bind with i915 for Intel HSW+ codecs (if not done yet) */
++	if ((codec->core.vendor_id >> 16) == 0x8086 &&
++	    is_haswell_plus(codec)) {
++#if 0
++		/* on-demand binding leads to an unbalanced refcount when
++		 * both i915 and hda drivers are probed concurrently;
++		 * disabled temporarily for now
++		 */
++		if (!codec->bus->core.audio_component)
++			if (!snd_hdac_i915_init(&codec->bus->core))
++				spec->i915_bound = true;
++#endif
++		/* use i915 audio component notifier for hotplug */
++		if (codec->bus->core.audio_component)
++			spec->use_acomp_notifier = true;
++	}
++#endif
+ 
+ 	if (is_haswell_plus(codec)) {
+ 		intel_haswell_enable_all_pins(codec, true);
+@@ -3659,6 +3681,7 @@ HDA_CODEC_ENTRY(0x10de0070, "GPU 70 HDMI/DP",	patch_nvhdmi),
+ HDA_CODEC_ENTRY(0x10de0071, "GPU 71 HDMI/DP",	patch_nvhdmi),
+ HDA_CODEC_ENTRY(0x10de0072, "GPU 72 HDMI/DP",	patch_nvhdmi),
+ HDA_CODEC_ENTRY(0x10de007d, "GPU 7d HDMI/DP",	patch_nvhdmi),
++HDA_CODEC_ENTRY(0x10de0082, "GPU 82 HDMI/DP",	patch_nvhdmi),
+ HDA_CODEC_ENTRY(0x10de0083, "GPU 83 HDMI/DP",	patch_nvhdmi),
+ HDA_CODEC_ENTRY(0x10de8001, "MCP73 HDMI",	patch_nvhdmi_2ch),
+ HDA_CODEC_ENTRY(0x11069f80, "VX900 HDMI/DP",	patch_via_hdmi),
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 93d2156b6241..4f5ca0b9ce27 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -5556,6 +5556,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x17aa, 0x2226, "ThinkPad X250", ALC292_FIXUP_TPT440_DOCK),
+ 	SND_PCI_QUIRK(0x17aa, 0x2233, "Thinkpad", ALC293_FIXUP_LENOVO_SPK_NOISE),
+ 	SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
++	SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
+ 	SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
+ 	SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
+ 	SND_PCI_QUIRK(0x17aa, 0x3978, "IdeaPad Y410P", ALC269_FIXUP_NO_SHUTUP),
+diff --git a/sound/pci/intel8x0.c b/sound/pci/intel8x0.c
+index 42bcbac801a3..ccdab29a8b66 100644
+--- a/sound/pci/intel8x0.c
++++ b/sound/pci/intel8x0.c
+@@ -2879,6 +2879,7 @@ static void intel8x0_measure_ac97_clock(struct intel8x0 *chip)
+ 
+ static struct snd_pci_quirk intel8x0_clock_list[] = {
+ 	SND_PCI_QUIRK(0x0e11, 0x008a, "AD1885", 41000),
++	SND_PCI_QUIRK(0x1014, 0x0581, "AD1981B", 48000),
+ 	SND_PCI_QUIRK(0x1028, 0x00be, "AD1885", 44100),
+ 	SND_PCI_QUIRK(0x1028, 0x0177, "AD1980", 48000),
+ 	SND_PCI_QUIRK(0x1028, 0x01ad, "AD1981B", 48000),
+diff --git a/sound/usb/clock.c b/sound/usb/clock.c
+index 2ed260b10f6d..7ccbcaf6a147 100644
+--- a/sound/usb/clock.c
++++ b/sound/usb/clock.c
+@@ -285,6 +285,8 @@ static int set_sample_rate_v1(struct snd_usb_audio *chip, int iface,
+ 	unsigned char data[3];
+ 	int err, crate;
+ 
++	if (get_iface_desc(alts)->bNumEndpoints < 1)
++		return -EINVAL;
+ 	ep = get_endpoint(alts, 0)->bEndpointAddress;
+ 
+ 	/* if endpoint doesn't have sampling rate control, bail out */
+diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
+index 7b1cb365ffab..c07a7eda42a2 100644
+--- a/sound/usb/endpoint.c
++++ b/sound/usb/endpoint.c
+@@ -438,6 +438,9 @@ exit_clear:
+  *
+  * New endpoints will be added to chip->ep_list and must be freed by
+  * calling snd_usb_endpoint_free().
++ *
++ * For SND_USB_ENDPOINT_TYPE_SYNC, the caller needs to guarantee that
++ * bNumEndpoints > 1 beforehand.
+  */
+ struct snd_usb_endpoint *snd_usb_add_endpoint(struct snd_usb_audio *chip,
+ 					      struct usb_host_interface *alts,
+diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c
+index 279025650568..f6c3bf79af9a 100644
+--- a/sound/usb/mixer_quirks.c
++++ b/sound/usb/mixer_quirks.c
+@@ -1519,7 +1519,11 @@ static int snd_microii_spdif_default_get(struct snd_kcontrol *kcontrol,
+ 
+ 	/* use known values for that card: interface#1 altsetting#1 */
+ 	iface = usb_ifnum_to_if(chip->dev, 1);
++	if (!iface || iface->num_altsetting < 2)
++		return -EINVAL;
+ 	alts = &iface->altsetting[1];
++	if (get_iface_desc(alts)->bNumEndpoints < 1)
++		return -EINVAL;
+ 	ep = get_endpoint(alts, 0)->bEndpointAddress;
+ 
+ 	err = snd_usb_ctl_msg(chip->dev,
+diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
+index 9245f52d43bd..44d178ee9177 100644
+--- a/sound/usb/pcm.c
++++ b/sound/usb/pcm.c
+@@ -159,6 +159,8 @@ static int init_pitch_v1(struct snd_usb_audio *chip, int iface,
+ 	unsigned char data[1];
+ 	int err;
+ 
++	if (get_iface_desc(alts)->bNumEndpoints < 1)
++		return -EINVAL;
+ 	ep = get_endpoint(alts, 0)->bEndpointAddress;
+ 
+ 	data[0] = 1;
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index c458d60d5030..cd7eac28edee 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -150,6 +150,7 @@ static int create_fixed_stream_quirk(struct snd_usb_audio *chip,
+ 		usb_audio_err(chip, "cannot memdup\n");
+ 		return -ENOMEM;
+ 	}
++	INIT_LIST_HEAD(&fp->list);
+ 	if (fp->nr_rates > MAX_NR_RATES) {
+ 		kfree(fp);
+ 		return -EINVAL;
+@@ -167,19 +168,20 @@ static int create_fixed_stream_quirk(struct snd_usb_audio *chip,
+ 	stream = (fp->endpoint & USB_DIR_IN)
+ 		? SNDRV_PCM_STREAM_CAPTURE : SNDRV_PCM_STREAM_PLAYBACK;
+ 	err = snd_usb_add_audio_stream(chip, stream, fp);
+-	if (err < 0) {
+-		kfree(fp);
+-		kfree(rate_table);
+-		return err;
+-	}
++	if (err < 0)
++		goto error;
+ 	if (fp->iface != get_iface_desc(&iface->altsetting[0])->bInterfaceNumber ||
+ 	    fp->altset_idx >= iface->num_altsetting) {
+-		kfree(fp);
+-		kfree(rate_table);
+-		return -EINVAL;
++		err = -EINVAL;
++		goto error;
+ 	}
+ 	alts = &iface->altsetting[fp->altset_idx];
+ 	altsd = get_iface_desc(alts);
++	if (altsd->bNumEndpoints < 1) {
++		err = -EINVAL;
++		goto error;
++	}
++
+ 	fp->protocol = altsd->bInterfaceProtocol;
+ 
+ 	if (fp->datainterval == 0)
+@@ -190,6 +192,12 @@ static int create_fixed_stream_quirk(struct snd_usb_audio *chip,
+ 	snd_usb_init_pitch(chip, fp->iface, alts, fp);
+ 	snd_usb_init_sample_rate(chip, fp->iface, alts, fp, fp->rate_max);
+ 	return 0;
++
++ error:
++	list_del(&fp->list); /* unlink for avoiding double-free */
++	kfree(fp);
++	kfree(rate_table);
++	return err;
+ }
+ 
+ static int create_auto_pcm_quirk(struct snd_usb_audio *chip,
+@@ -462,6 +470,7 @@ static int create_uaxx_quirk(struct snd_usb_audio *chip,
+ 	fp->ep_attr = get_endpoint(alts, 0)->bmAttributes;
+ 	fp->datainterval = 0;
+ 	fp->maxpacksize = le16_to_cpu(get_endpoint(alts, 0)->wMaxPacketSize);
++	INIT_LIST_HEAD(&fp->list);
+ 
+ 	switch (fp->maxpacksize) {
+ 	case 0x120:
+@@ -485,6 +494,7 @@ static int create_uaxx_quirk(struct snd_usb_audio *chip,
+ 		? SNDRV_PCM_STREAM_CAPTURE : SNDRV_PCM_STREAM_PLAYBACK;
+ 	err = snd_usb_add_audio_stream(chip, stream, fp);
+ 	if (err < 0) {
++		list_del(&fp->list); /* unlink for avoiding double-free */
+ 		kfree(fp);
+ 		return err;
+ 	}
+@@ -1121,6 +1131,7 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
+ 	switch (chip->usb_id) {
+ 	case USB_ID(0x045E, 0x075D): /* MS Lifecam Cinema  */
+ 	case USB_ID(0x045E, 0x076D): /* MS Lifecam HD-5000 */
++	case USB_ID(0x045E, 0x076E): /* MS Lifecam HD-5001 */
+ 	case USB_ID(0x045E, 0x076F): /* MS Lifecam HD-6000 */
+ 	case USB_ID(0x045E, 0x0772): /* MS Lifecam Studio */
+ 	case USB_ID(0x045E, 0x0779): /* MS Lifecam HD-3000 */
+diff --git a/sound/usb/stream.c b/sound/usb/stream.c
+index c4dc577ab1bd..8e9548bc1f1a 100644
+--- a/sound/usb/stream.c
++++ b/sound/usb/stream.c
+@@ -314,7 +314,9 @@ static struct snd_pcm_chmap_elem *convert_chmap(int channels, unsigned int bits,
+ /*
+  * add this endpoint to the chip instance.
+  * if a stream with the same endpoint already exists, append to it.
+- * if not, create a new pcm stream.
++ * if not, create a new pcm stream. note, fp is added to the substream
++ * fmt_list and will be freed on the chip instance release. do not free
++ * fp or do remove it from the substream fmt_list to avoid double-free.
+  */
+ int snd_usb_add_audio_stream(struct snd_usb_audio *chip,
+ 			     int stream,
+@@ -675,6 +677,7 @@ int snd_usb_parse_audio_interface(struct snd_usb_audio *chip, int iface_no)
+ 					* (fp->maxpacksize & 0x7ff);
+ 		fp->attributes = parse_uac_endpoint_attributes(chip, alts, protocol, iface_no);
+ 		fp->clock = clock;
++		INIT_LIST_HEAD(&fp->list);
+ 
+ 		/* some quirks for attributes here */
+ 
+@@ -723,6 +726,7 @@ int snd_usb_parse_audio_interface(struct snd_usb_audio *chip, int iface_no)
+ 		dev_dbg(&dev->dev, "%u:%d: add audio endpoint %#x\n", iface_no, altno, fp->endpoint);
+ 		err = snd_usb_add_audio_stream(chip, stream, fp);
+ 		if (err < 0) {
++			list_del(&fp->list); /* unlink for avoiding double-free */
+ 			kfree(fp->rate_table);
+ 			kfree(fp->chmap);
+ 			kfree(fp);
+diff --git a/tools/hv/Makefile b/tools/hv/Makefile
+index a8ab79556926..a8c4644022a6 100644
+--- a/tools/hv/Makefile
++++ b/tools/hv/Makefile
+@@ -5,6 +5,8 @@ PTHREAD_LIBS = -lpthread
+ WARNINGS = -Wall -Wextra
+ CFLAGS = $(WARNINGS) -g $(PTHREAD_LIBS) $(shell getconf LFS_CFLAGS)
+ 
++CFLAGS += -D__EXPORTED_HEADERS__ -I../../include/uapi -I../../include
++
+ all: hv_kvp_daemon hv_vss_daemon hv_fcopy_daemon
+ %: %.c
+ 	$(CC) $(CFLAGS) -o $@ $^
+diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
+index 813d9b272c81..48a1c5e7dc0d 100644
+--- a/tools/perf/util/parse-events.c
++++ b/tools/perf/util/parse-events.c
+@@ -2101,11 +2101,11 @@ char *parse_events_formats_error_string(char *additional_terms)
+ 
+ 	/* valid terms */
+ 	if (additional_terms) {
+-		if (!asprintf(&str, "valid terms: %s,%s",
+-			      additional_terms, static_terms))
++		if (asprintf(&str, "valid terms: %s,%s",
++			     additional_terms, static_terms) < 0)
+ 			goto fail;
+ 	} else {
+-		if (!asprintf(&str, "valid terms: %s", static_terms))
++		if (asprintf(&str, "valid terms: %s", static_terms) < 0)
+ 			goto fail;
+ 	}
+ 	return str;
+diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
+index b597bcc8fc78..37b4f5070353 100644
+--- a/tools/perf/util/pmu.c
++++ b/tools/perf/util/pmu.c
+@@ -284,13 +284,12 @@ static int pmu_aliases_parse(char *dir, struct list_head *head)
+ {
+ 	struct dirent *evt_ent;
+ 	DIR *event_dir;
+-	int ret = 0;
+ 
+ 	event_dir = opendir(dir);
+ 	if (!event_dir)
+ 		return -EINVAL;
+ 
+-	while (!ret && (evt_ent = readdir(event_dir))) {
++	while ((evt_ent = readdir(event_dir))) {
+ 		char path[PATH_MAX];
+ 		char *name = evt_ent->d_name;
+ 		FILE *file;
+@@ -306,17 +305,19 @@ static int pmu_aliases_parse(char *dir, struct list_head *head)
+ 
+ 		snprintf(path, PATH_MAX, "%s/%s", dir, name);
+ 
+-		ret = -EINVAL;
+ 		file = fopen(path, "r");
+-		if (!file)
+-			break;
++		if (!file) {
++			pr_debug("Cannot open %s\n", path);
++			continue;
++		}
+ 
+-		ret = perf_pmu__new_alias(head, dir, name, file);
++		if (perf_pmu__new_alias(head, dir, name, file) < 0)
++			pr_debug("Cannot set up %s\n", name);
+ 		fclose(file);
+ 	}
+ 
+ 	closedir(event_dir);
+-	return ret;
++	return 0;
+ }
+ 
+ /*
+diff --git a/tools/perf/util/setup.py b/tools/perf/util/setup.py
+index 1833103768cb..c8680984d2d6 100644
+--- a/tools/perf/util/setup.py
++++ b/tools/perf/util/setup.py
+@@ -22,6 +22,7 @@ cflags = getenv('CFLAGS', '').split()
+ # switch off several checks (need to be at the end of cflags list)
+ cflags += ['-fno-strict-aliasing', '-Wno-write-strings', '-Wno-unused-parameter' ]
+ 
++src_perf  = getenv('srctree') + '/tools/perf'
+ build_lib = getenv('PYTHON_EXTBUILD_LIB')
+ build_tmp = getenv('PYTHON_EXTBUILD_TMP')
+ libtraceevent = getenv('LIBTRACEEVENT')
+@@ -30,6 +31,9 @@ libapikfs = getenv('LIBAPI')
+ ext_sources = [f.strip() for f in file('util/python-ext-sources')
+ 				if len(f.strip()) > 0 and f[0] != '#']
+ 
++# use full paths with source files
++ext_sources = map(lambda x: '%s/%s' % (src_perf, x) , ext_sources)
++
+ perf = Extension('perf',
+ 		  sources = ext_sources,
+ 		  include_dirs = ['util/include'],
+diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
+index 9102ae172d2a..298473707f17 100644
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -537,6 +537,16 @@ static struct kvm *kvm_create_vm(unsigned long type)
+ 	if (!kvm)
+ 		return ERR_PTR(-ENOMEM);
+ 
++	spin_lock_init(&kvm->mmu_lock);
++	atomic_inc(&current->mm->mm_count);
++	kvm->mm = current->mm;
++	kvm_eventfd_init(kvm);
++	mutex_init(&kvm->lock);
++	mutex_init(&kvm->irq_lock);
++	mutex_init(&kvm->slots_lock);
++	atomic_set(&kvm->users_count, 1);
++	INIT_LIST_HEAD(&kvm->devices);
++
+ 	r = kvm_arch_init_vm(kvm, type);
+ 	if (r)
+ 		goto out_err_no_disable;
+@@ -569,16 +579,6 @@ static struct kvm *kvm_create_vm(unsigned long type)
+ 			goto out_err;
+ 	}
+ 
+-	spin_lock_init(&kvm->mmu_lock);
+-	kvm->mm = current->mm;
+-	atomic_inc(&kvm->mm->mm_count);
+-	kvm_eventfd_init(kvm);
+-	mutex_init(&kvm->lock);
+-	mutex_init(&kvm->irq_lock);
+-	mutex_init(&kvm->slots_lock);
+-	atomic_set(&kvm->users_count, 1);
+-	INIT_LIST_HEAD(&kvm->devices);
+-
+ 	r = kvm_init_mmu_notifier(kvm);
+ 	if (r)
+ 		goto out_err;
+@@ -603,6 +603,7 @@ out_err_no_disable:
+ 	for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++)
+ 		kvm_free_memslots(kvm, kvm->memslots[i]);
+ 	kvm_arch_free_vm(kvm);
++	mmdrop(current->mm);
+ 	return ERR_PTR(r);
+ }
+ 


^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [gentoo-commits] proj/linux-patches:4.5 commit in: /
@ 2016-04-20 11:28 Mike Pagano
  0 siblings, 0 replies; 12+ messages in thread
From: Mike Pagano @ 2016-04-20 11:28 UTC (permalink / raw
  To: gentoo-commits

commit:     0137fe197d8cd84a208c55b1e9951f5ba339cd66
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Apr 20 11:28:54 2016 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Apr 20 11:28:54 2016 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=0137fe19

Linux patch 4.5.2

 0000_README            |    4 +
 1001_linux-4.5.2.patch | 5011 ++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 5015 insertions(+)

diff --git a/0000_README b/0000_README
index 40d6c2b..0fa777f 100644
--- a/0000_README
+++ b/0000_README
@@ -47,6 +47,10 @@ Patch:  1000_linux-4.5.1.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.5.1
 
+Patch:  1001_linux-4.5.2.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.5.2
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1001_linux-4.5.2.patch b/1001_linux-4.5.2.patch
new file mode 100644
index 0000000..6220fd5
--- /dev/null
+++ b/1001_linux-4.5.2.patch
@@ -0,0 +1,5011 @@
+diff --git a/Documentation/devicetree/bindings/pinctrl/img,pistachio-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/img,pistachio-pinctrl.txt
+index 08a4a32c8eb0..0326154c7925 100644
+--- a/Documentation/devicetree/bindings/pinctrl/img,pistachio-pinctrl.txt
++++ b/Documentation/devicetree/bindings/pinctrl/img,pistachio-pinctrl.txt
+@@ -134,12 +134,12 @@ mfio80		ddr_debug, mips_trace_data, mips_debug
+ mfio81		dreq0, mips_trace_data, eth_debug
+ mfio82		dreq1, mips_trace_data, eth_debug
+ mfio83		mips_pll_lock, mips_trace_data, usb_debug
+-mfio84		sys_pll_lock, mips_trace_data, usb_debug
+-mfio85		wifi_pll_lock, mips_trace_data, sdhost_debug
+-mfio86		bt_pll_lock, mips_trace_data, sdhost_debug
+-mfio87		rpu_v_pll_lock, dreq2, socif_debug
+-mfio88		rpu_l_pll_lock, dreq3, socif_debug
+-mfio89		audio_pll_lock, dreq4, dreq5
++mfio84		audio_pll_lock, mips_trace_data, usb_debug
++mfio85		rpu_v_pll_lock, mips_trace_data, sdhost_debug
++mfio86		rpu_l_pll_lock, mips_trace_data, sdhost_debug
++mfio87		sys_pll_lock, dreq2, socif_debug
++mfio88		wifi_pll_lock, dreq3, socif_debug
++mfio89		bt_pll_lock, dreq4, dreq5
+ tck
+ trstn
+ tdi
+diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
+index 9a53c929f017..21e4b4861331 100644
+--- a/Documentation/kernel-parameters.txt
++++ b/Documentation/kernel-parameters.txt
+@@ -4016,6 +4016,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
+ 					sector if the number is odd);
+ 				i = IGNORE_DEVICE (don't bind to this
+ 					device);
++				j = NO_REPORT_LUNS (don't use report luns
++					command, uas only);
+ 				l = NOT_LOCKABLE (don't try to lock and
+ 					unlock ejectable media);
+ 				m = MAX_SECTORS_64 (don't transfer more
+diff --git a/Makefile b/Makefile
+index c621889b8827..1ecaaeb7791d 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 5
+-SUBLEVEL = 1
++SUBLEVEL = 2
+ EXTRAVERSION =
+ NAME = Blurry Fish Butt
+ 
+diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
+index 7d0cba6f1cc5..c86ea8aac203 100644
+--- a/arch/arm/kernel/setup.c
++++ b/arch/arm/kernel/setup.c
+@@ -430,11 +430,13 @@ static void __init patch_aeabi_idiv(void)
+ 	pr_info("CPU: div instructions available: patching division code\n");
+ 
+ 	fn_addr = ((uintptr_t)&__aeabi_uidiv) & ~1;
++	asm ("" : "+g" (fn_addr));
+ 	((u32 *)fn_addr)[0] = udiv_instruction();
+ 	((u32 *)fn_addr)[1] = bx_lr_instruction();
+ 	flush_icache_range(fn_addr, fn_addr + 8);
+ 
+ 	fn_addr = ((uintptr_t)&__aeabi_idiv) & ~1;
++	asm ("" : "+g" (fn_addr));
+ 	((u32 *)fn_addr)[0] = sdiv_instruction();
+ 	((u32 *)fn_addr)[1] = bx_lr_instruction();
+ 	flush_icache_range(fn_addr, fn_addr + 8);
+diff --git a/arch/arm64/include/asm/opcodes.h b/arch/arm64/include/asm/opcodes.h
+index 4e603ea36ad3..123f45d92cd1 100644
+--- a/arch/arm64/include/asm/opcodes.h
++++ b/arch/arm64/include/asm/opcodes.h
+@@ -1 +1,5 @@
++#ifdef CONFIG_CPU_BIG_ENDIAN
++#define CONFIG_CPU_ENDIAN_BE8 CONFIG_CPU_BIG_ENDIAN
++#endif
++
+ #include <../../arm/include/asm/opcodes.h>
+diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
+index c536c9e307b9..0931155f6406 100644
+--- a/arch/arm64/kernel/debug-monitors.c
++++ b/arch/arm64/kernel/debug-monitors.c
+@@ -186,20 +186,21 @@ static void clear_regs_spsr_ss(struct pt_regs *regs)
+ 
+ /* EL1 Single Step Handler hooks */
+ static LIST_HEAD(step_hook);
+-static DEFINE_RWLOCK(step_hook_lock);
++static DEFINE_SPINLOCK(step_hook_lock);
+ 
+ void register_step_hook(struct step_hook *hook)
+ {
+-	write_lock(&step_hook_lock);
+-	list_add(&hook->node, &step_hook);
+-	write_unlock(&step_hook_lock);
++	spin_lock(&step_hook_lock);
++	list_add_rcu(&hook->node, &step_hook);
++	spin_unlock(&step_hook_lock);
+ }
+ 
+ void unregister_step_hook(struct step_hook *hook)
+ {
+-	write_lock(&step_hook_lock);
+-	list_del(&hook->node);
+-	write_unlock(&step_hook_lock);
++	spin_lock(&step_hook_lock);
++	list_del_rcu(&hook->node);
++	spin_unlock(&step_hook_lock);
++	synchronize_rcu();
+ }
+ 
+ /*
+@@ -213,15 +214,15 @@ static int call_step_hook(struct pt_regs *regs, unsigned int esr)
+ 	struct step_hook *hook;
+ 	int retval = DBG_HOOK_ERROR;
+ 
+-	read_lock(&step_hook_lock);
++	rcu_read_lock();
+ 
+-	list_for_each_entry(hook, &step_hook, node)	{
++	list_for_each_entry_rcu(hook, &step_hook, node)	{
+ 		retval = hook->fn(regs, esr);
+ 		if (retval == DBG_HOOK_HANDLED)
+ 			break;
+ 	}
+ 
+-	read_unlock(&step_hook_lock);
++	rcu_read_unlock();
+ 
+ 	return retval;
+ }
+diff --git a/arch/mips/alchemy/devboards/db1000.c b/arch/mips/alchemy/devboards/db1000.c
+index bdeed9d13c6f..433c4b9a9f0a 100644
+--- a/arch/mips/alchemy/devboards/db1000.c
++++ b/arch/mips/alchemy/devboards/db1000.c
+@@ -503,15 +503,15 @@ int __init db1000_dev_setup(void)
+ 	if (board == BCSR_WHOAMI_DB1500) {
+ 		c0 = AU1500_GPIO2_INT;
+ 		c1 = AU1500_GPIO5_INT;
+-		d0 = AU1500_GPIO0_INT;
+-		d1 = AU1500_GPIO3_INT;
++		d0 = 0;	/* GPIO number, NOT irq! */
++		d1 = 3; /* GPIO number, NOT irq! */
+ 		s0 = AU1500_GPIO1_INT;
+ 		s1 = AU1500_GPIO4_INT;
+ 	} else if (board == BCSR_WHOAMI_DB1100) {
+ 		c0 = AU1100_GPIO2_INT;
+ 		c1 = AU1100_GPIO5_INT;
+-		d0 = AU1100_GPIO0_INT;
+-		d1 = AU1100_GPIO3_INT;
++		d0 = 0; /* GPIO number, NOT irq! */
++		d1 = 3; /* GPIO number, NOT irq! */
+ 		s0 = AU1100_GPIO1_INT;
+ 		s1 = AU1100_GPIO4_INT;
+ 
+@@ -545,15 +545,15 @@ int __init db1000_dev_setup(void)
+ 	} else if (board == BCSR_WHOAMI_DB1000) {
+ 		c0 = AU1000_GPIO2_INT;
+ 		c1 = AU1000_GPIO5_INT;
+-		d0 = AU1000_GPIO0_INT;
+-		d1 = AU1000_GPIO3_INT;
++		d0 = 0; /* GPIO number, NOT irq! */
++		d1 = 3; /* GPIO number, NOT irq! */
+ 		s0 = AU1000_GPIO1_INT;
+ 		s1 = AU1000_GPIO4_INT;
+ 		platform_add_devices(db1000_devs, ARRAY_SIZE(db1000_devs));
+ 	} else if ((board == BCSR_WHOAMI_PB1500) ||
+ 		   (board == BCSR_WHOAMI_PB1500R2)) {
+ 		c0 = AU1500_GPIO203_INT;
+-		d0 = AU1500_GPIO201_INT;
++		d0 = 1; /* GPIO number, NOT irq! */
+ 		s0 = AU1500_GPIO202_INT;
+ 		twosocks = 0;
+ 		flashsize = 64;
+@@ -566,7 +566,7 @@ int __init db1000_dev_setup(void)
+ 		 */
+ 	} else if (board == BCSR_WHOAMI_PB1100) {
+ 		c0 = AU1100_GPIO11_INT;
+-		d0 = AU1100_GPIO9_INT;
++		d0 = 9; /* GPIO number, NOT irq! */
+ 		s0 = AU1100_GPIO10_INT;
+ 		twosocks = 0;
+ 		flashsize = 64;
+@@ -583,7 +583,6 @@ int __init db1000_dev_setup(void)
+ 	} else
+ 		return 0; /* unknown board, no further dev setup to do */
+ 
+-	irq_set_irq_type(d0, IRQ_TYPE_EDGE_BOTH);
+ 	irq_set_irq_type(c0, IRQ_TYPE_LEVEL_LOW);
+ 	irq_set_irq_type(s0, IRQ_TYPE_LEVEL_LOW);
+ 
+@@ -597,7 +596,6 @@ int __init db1000_dev_setup(void)
+ 		c0, d0, /*s0*/0, 0, 0);
+ 
+ 	if (twosocks) {
+-		irq_set_irq_type(d1, IRQ_TYPE_EDGE_BOTH);
+ 		irq_set_irq_type(c1, IRQ_TYPE_LEVEL_LOW);
+ 		irq_set_irq_type(s1, IRQ_TYPE_LEVEL_LOW);
+ 
+diff --git a/arch/mips/alchemy/devboards/db1550.c b/arch/mips/alchemy/devboards/db1550.c
+index b518f029f5e7..1c01d6eadb08 100644
+--- a/arch/mips/alchemy/devboards/db1550.c
++++ b/arch/mips/alchemy/devboards/db1550.c
+@@ -514,7 +514,7 @@ static void __init db1550_devices(void)
+ 		AU1000_PCMCIA_MEM_PHYS_ADDR  + 0x000400000 - 1,
+ 		AU1000_PCMCIA_IO_PHYS_ADDR,
+ 		AU1000_PCMCIA_IO_PHYS_ADDR   + 0x000010000 - 1,
+-		AU1550_GPIO3_INT, AU1550_GPIO0_INT,
++		AU1550_GPIO3_INT, 0,
+ 		/*AU1550_GPIO21_INT*/0, 0, 0);
+ 
+ 	db1x_register_pcmcia_socket(
+@@ -524,7 +524,7 @@ static void __init db1550_devices(void)
+ 		AU1000_PCMCIA_MEM_PHYS_ADDR  + 0x004400000 - 1,
+ 		AU1000_PCMCIA_IO_PHYS_ADDR   + 0x004000000,
+ 		AU1000_PCMCIA_IO_PHYS_ADDR   + 0x004010000 - 1,
+-		AU1550_GPIO5_INT, AU1550_GPIO1_INT,
++		AU1550_GPIO5_INT, 1,
+ 		/*AU1550_GPIO22_INT*/0, 0, 1);
+ 
+ 	platform_device_register(&db1550_nand_dev);
+diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c
+index 490cea569d57..5c62065cbf22 100644
+--- a/arch/mips/kernel/unaligned.c
++++ b/arch/mips/kernel/unaligned.c
+@@ -885,7 +885,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
+ {
+ 	union mips_instruction insn;
+ 	unsigned long value;
+-	unsigned int res;
++	unsigned int res, preempted;
+ 	unsigned long origpc;
+ 	unsigned long orig31;
+ 	void __user *fault_addr = NULL;
+@@ -1226,27 +1226,36 @@ static void emulate_load_store_insn(struct pt_regs *regs,
+ 			if (!access_ok(VERIFY_READ, addr, sizeof(*fpr)))
+ 				goto sigbus;
+ 
+-			/*
+-			 * Disable preemption to avoid a race between copying
+-			 * state from userland, migrating to another CPU and
+-			 * updating the hardware vector register below.
+-			 */
+-			preempt_disable();
+-
+-			res = __copy_from_user_inatomic(fpr, addr,
+-							sizeof(*fpr));
+-			if (res)
+-				goto fault;
+-
+-			/*
+-			 * Update the hardware register if it is in use by the
+-			 * task in this quantum, in order to avoid having to
+-			 * save & restore the whole vector context.
+-			 */
+-			if (test_thread_flag(TIF_USEDMSA))
+-				write_msa_wr(wd, fpr, df);
++			do {
++				/*
++				 * If we have live MSA context keep track of
++				 * whether we get preempted in order to avoid
++				 * the register context we load being clobbered
++				 * by the live context as it's saved during
++				 * preemption. If we don't have live context
++				 * then it can't be saved to clobber the value
++				 * we load.
++				 */
++				preempted = test_thread_flag(TIF_USEDMSA);
++
++				res = __copy_from_user_inatomic(fpr, addr,
++								sizeof(*fpr));
++				if (res)
++					goto fault;
+ 
+-			preempt_enable();
++				/*
++				 * Update the hardware register if it is in use
++				 * by the task in this quantum, in order to
++				 * avoid having to save & restore the whole
++				 * vector context.
++				 */
++				preempt_disable();
++				if (test_thread_flag(TIF_USEDMSA)) {
++					write_msa_wr(wd, fpr, df);
++					preempted = 0;
++				}
++				preempt_enable();
++			} while (preempted);
+ 			break;
+ 
+ 		case msa_st_op:
+diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
+index 14f655cf542e..86ed37671ef5 100644
+--- a/arch/parisc/Kconfig
++++ b/arch/parisc/Kconfig
+@@ -29,6 +29,7 @@ config PARISC
+ 	select TTY # Needed for pdc_cons.c
+ 	select HAVE_DEBUG_STACKOVERFLOW
+ 	select HAVE_ARCH_AUDITSYSCALL
++	select HAVE_ARCH_SECCOMP_FILTER
+ 	select ARCH_NO_COHERENT_DMA_MMAP
+ 
+ 	help
+diff --git a/arch/parisc/include/asm/compat.h b/arch/parisc/include/asm/compat.h
+index 0448a2c8eafb..3387307cc33e 100644
+--- a/arch/parisc/include/asm/compat.h
++++ b/arch/parisc/include/asm/compat.h
+@@ -183,6 +183,13 @@ typedef struct compat_siginfo {
+ 			int _band;      /* POLL_IN, POLL_OUT, POLL_MSG */
+ 			int _fd;
+ 		} _sigpoll;
++
++		/* SIGSYS */
++		struct {
++			compat_uptr_t _call_addr; /* calling user insn */
++			int _syscall;	/* triggering system call number */
++			compat_uint_t _arch;	/* AUDIT_ARCH_* of syscall */
++		} _sigsys;
+ 	} _sifields;
+ } compat_siginfo_t;
+ 
+diff --git a/arch/parisc/include/asm/syscall.h b/arch/parisc/include/asm/syscall.h
+index a5eba95d87fe..637ce8d6f375 100644
+--- a/arch/parisc/include/asm/syscall.h
++++ b/arch/parisc/include/asm/syscall.h
+@@ -39,6 +39,19 @@ static inline void syscall_get_arguments(struct task_struct *tsk,
+ 	}
+ }
+ 
++static inline void syscall_set_return_value(struct task_struct *task,
++					    struct pt_regs *regs,
++					    int error, long val)
++{
++	regs->gr[28] = error ? error : val;
++}
++
++static inline void syscall_rollback(struct task_struct *task,
++				    struct pt_regs *regs)
++{
++	/* do nothing */
++}
++
+ static inline int syscall_get_arch(void)
+ {
+ 	int arch = AUDIT_ARCH_PARISC;
+diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
+index 0abdd4c607ed..1960b87c1c8b 100644
+--- a/arch/parisc/include/asm/uaccess.h
++++ b/arch/parisc/include/asm/uaccess.h
+@@ -76,6 +76,7 @@ struct exception_table_entry {
+  */
+ struct exception_data {
+ 	unsigned long fault_ip;
++	unsigned long fault_gp;
+ 	unsigned long fault_space;
+ 	unsigned long fault_addr;
+ };
+diff --git a/arch/parisc/kernel/asm-offsets.c b/arch/parisc/kernel/asm-offsets.c
+index d2f62570a7b1..78d30d2ea2d8 100644
+--- a/arch/parisc/kernel/asm-offsets.c
++++ b/arch/parisc/kernel/asm-offsets.c
+@@ -299,6 +299,7 @@ int main(void)
+ #endif
+ 	BLANK();
+ 	DEFINE(EXCDATA_IP, offsetof(struct exception_data, fault_ip));
++	DEFINE(EXCDATA_GP, offsetof(struct exception_data, fault_gp));
+ 	DEFINE(EXCDATA_SPACE, offsetof(struct exception_data, fault_space));
+ 	DEFINE(EXCDATA_ADDR, offsetof(struct exception_data, fault_addr));
+ 	BLANK();
+diff --git a/arch/parisc/kernel/parisc_ksyms.c b/arch/parisc/kernel/parisc_ksyms.c
+index 568b2c61ea02..3cad8aadc69e 100644
+--- a/arch/parisc/kernel/parisc_ksyms.c
++++ b/arch/parisc/kernel/parisc_ksyms.c
+@@ -47,11 +47,11 @@ EXPORT_SYMBOL(__cmpxchg_u64);
+ EXPORT_SYMBOL(lclear_user);
+ EXPORT_SYMBOL(lstrnlen_user);
+ 
+-/* Global fixups */
+-extern void fixup_get_user_skip_1(void);
+-extern void fixup_get_user_skip_2(void);
+-extern void fixup_put_user_skip_1(void);
+-extern void fixup_put_user_skip_2(void);
++/* Global fixups - defined as int to avoid creation of function pointers */
++extern int fixup_get_user_skip_1;
++extern int fixup_get_user_skip_2;
++extern int fixup_put_user_skip_1;
++extern int fixup_put_user_skip_2;
+ EXPORT_SYMBOL(fixup_get_user_skip_1);
+ EXPORT_SYMBOL(fixup_get_user_skip_2);
+ EXPORT_SYMBOL(fixup_put_user_skip_1);
+diff --git a/arch/parisc/kernel/ptrace.c b/arch/parisc/kernel/ptrace.c
+index ce0b2b4075c7..8fb81a391599 100644
+--- a/arch/parisc/kernel/ptrace.c
++++ b/arch/parisc/kernel/ptrace.c
+@@ -270,7 +270,8 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
+ long do_syscall_trace_enter(struct pt_regs *regs)
+ {
+ 	/* Do the secure computing check first. */
+-	secure_computing_strict(regs->gr[20]);
++	if (secure_computing() == -1)
++		return -1;
+ 
+ 	if (test_thread_flag(TIF_SYSCALL_TRACE) &&
+ 	    tracehook_report_syscall_entry(regs)) {
+@@ -296,7 +297,11 @@ long do_syscall_trace_enter(struct pt_regs *regs)
+ 			regs->gr[23] & 0xffffffff);
+ 
+ out:
+-	return regs->gr[20];
++	/*
++	 * Sign extend the syscall number to 64bit since it may have been
++	 * modified by a compat ptrace call
++	 */
++	return (int) ((u32) regs->gr[20]);
+ }
+ 
+ void do_syscall_trace_exit(struct pt_regs *regs)
+diff --git a/arch/parisc/kernel/signal32.c b/arch/parisc/kernel/signal32.c
+index 984abbee71ca..c342b2e17492 100644
+--- a/arch/parisc/kernel/signal32.c
++++ b/arch/parisc/kernel/signal32.c
+@@ -371,6 +371,11 @@ copy_siginfo_to_user32 (compat_siginfo_t __user *to, const siginfo_t *from)
+ 			val = (compat_int_t)from->si_int;
+ 			err |= __put_user(val, &to->si_int);
+ 			break;
++		case __SI_SYS >> 16:
++			err |= __put_user(ptr_to_compat(from->si_call_addr), &to->si_call_addr);
++			err |= __put_user(from->si_syscall, &to->si_syscall);
++			err |= __put_user(from->si_arch, &to->si_arch);
++			break;
+ 		}
+ 	}
+ 	return err;
+diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
+index fbafa0d0e2bf..c976ebfe2269 100644
+--- a/arch/parisc/kernel/syscall.S
++++ b/arch/parisc/kernel/syscall.S
+@@ -329,6 +329,7 @@ tracesys_next:
+ 
+ 	ldo     -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1      /* get task ptr */
+ 	LDREG	TI_TASK(%r1), %r1
++	LDREG   TASK_PT_GR28(%r1), %r28		/* Restore return value */
+ 	LDREG   TASK_PT_GR26(%r1), %r26		/* Restore the users args */
+ 	LDREG   TASK_PT_GR25(%r1), %r25
+ 	LDREG   TASK_PT_GR24(%r1), %r24
+@@ -342,6 +343,7 @@ tracesys_next:
+ 	stw     %r21, -56(%r30)                 /* 6th argument */
+ #endif
+ 
++	cmpib,COND(=),n -1,%r20,tracesys_exit /* seccomp may have returned -1 */
+ 	comiclr,>>=	__NR_Linux_syscalls, %r20, %r0
+ 	b,n	.Ltracesys_nosys
+ 
+diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
+index 553b09855cfd..77e2262c97f6 100644
+--- a/arch/parisc/kernel/traps.c
++++ b/arch/parisc/kernel/traps.c
+@@ -798,6 +798,9 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
+ 
+ 	    if (fault_space == 0 && !faulthandler_disabled())
+ 	    {
++		/* Clean up and return if in exception table. */
++		if (fixup_exception(regs))
++			return;
+ 		pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
+ 		parisc_terminate("Kernel Fault", regs, code, fault_address);
+ 	    }
+diff --git a/arch/parisc/lib/fixup.S b/arch/parisc/lib/fixup.S
+index 536ef66bb94b..1052b747e011 100644
+--- a/arch/parisc/lib/fixup.S
++++ b/arch/parisc/lib/fixup.S
+@@ -26,6 +26,7 @@
+ 
+ #ifdef CONFIG_SMP
+ 	.macro  get_fault_ip t1 t2
++	loadgp
+ 	addil LT%__per_cpu_offset,%r27
+ 	LDREG RT%__per_cpu_offset(%r1),\t1
+ 	/* t2 = smp_processor_id() */
+@@ -40,14 +41,19 @@
+ 	LDREG RT%exception_data(%r1),\t1
+ 	/* t1 = this_cpu_ptr(&exception_data) */
+ 	add,l \t1,\t2,\t1
++	/* %r27 = t1->fault_gp - restore gp */
++	LDREG EXCDATA_GP(\t1), %r27
+ 	/* t1 = t1->fault_ip */
+ 	LDREG EXCDATA_IP(\t1), \t1
+ 	.endm
+ #else
+ 	.macro  get_fault_ip t1 t2
++	loadgp
+ 	/* t1 = this_cpu_ptr(&exception_data) */
+ 	addil LT%exception_data,%r27
+ 	LDREG RT%exception_data(%r1),\t2
++	/* %r27 = t2->fault_gp - restore gp */
++	LDREG EXCDATA_GP(\t2), %r27
+ 	/* t1 = t2->fault_ip */
+ 	LDREG EXCDATA_IP(\t2), \t1
+ 	.endm
+diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
+index a762864ec92e..f9064449908a 100644
+--- a/arch/parisc/mm/fault.c
++++ b/arch/parisc/mm/fault.c
+@@ -151,6 +151,7 @@ int fixup_exception(struct pt_regs *regs)
+ 		struct exception_data *d;
+ 		d = this_cpu_ptr(&exception_data);
+ 		d->fault_ip = regs->iaoq[0];
++		d->fault_gp = regs->gr[27];
+ 		d->fault_space = regs->isr;
+ 		d->fault_addr = regs->ior;
+ 
+diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
+index 3c5736e52a14..54ed9c7663e6 100644
+--- a/arch/powerpc/kernel/process.c
++++ b/arch/powerpc/kernel/process.c
+@@ -854,7 +854,7 @@ void restore_tm_state(struct pt_regs *regs)
+ static inline void save_sprs(struct thread_struct *t)
+ {
+ #ifdef CONFIG_ALTIVEC
+-	if (cpu_has_feature(cpu_has_feature(CPU_FTR_ALTIVEC)))
++	if (cpu_has_feature(CPU_FTR_ALTIVEC))
+ 		t->vrsave = mfspr(SPRN_VRSAVE);
+ #endif
+ #ifdef CONFIG_PPC_BOOK3S_64
+diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
+index 744e24bcb85c..4a811ca7ac9d 100644
+--- a/arch/powerpc/mm/hugetlbpage.c
++++ b/arch/powerpc/mm/hugetlbpage.c
+@@ -414,13 +414,13 @@ static void hugepd_free(struct mmu_gather *tlb, void *hugepte)
+ {
+ 	struct hugepd_freelist **batchp;
+ 
+-	batchp = this_cpu_ptr(&hugepd_freelist_cur);
++	batchp = &get_cpu_var(hugepd_freelist_cur);
+ 
+ 	if (atomic_read(&tlb->mm->mm_users) < 2 ||
+ 	    cpumask_equal(mm_cpumask(tlb->mm),
+ 			  cpumask_of(smp_processor_id()))) {
+ 		kmem_cache_free(hugepte_cache, hugepte);
+-        put_cpu_var(hugepd_freelist_cur);
++		put_cpu_var(hugepd_freelist_cur);
+ 		return;
+ 	}
+ 
+diff --git a/arch/s390/mm/gup.c b/arch/s390/mm/gup.c
+index 13dab0c1645c..3776aca22082 100644
+--- a/arch/s390/mm/gup.c
++++ b/arch/s390/mm/gup.c
+@@ -20,9 +20,9 @@
+ static inline int gup_pte_range(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
+ 		unsigned long end, int write, struct page **pages, int *nr)
+ {
++	struct page *head, *page;
+ 	unsigned long mask;
+ 	pte_t *ptep, pte;
+-	struct page *page;
+ 
+ 	mask = (write ? _PAGE_PROTECT : 0) | _PAGE_INVALID | _PAGE_SPECIAL;
+ 
+@@ -37,12 +37,14 @@ static inline int gup_pte_range(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
+ 			return 0;
+ 		VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
+ 		page = pte_page(pte);
+-		if (!page_cache_get_speculative(page))
++		head = compound_head(page);
++		if (!page_cache_get_speculative(head))
+ 			return 0;
+ 		if (unlikely(pte_val(pte) != pte_val(*ptep))) {
+-			put_page(page);
++			put_page(head);
+ 			return 0;
+ 		}
++		VM_BUG_ON_PAGE(compound_head(page) != head, page);
+ 		pages[*nr] = page;
+ 		(*nr)++;
+ 
+diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
+index 44adbb819041..f8dba2097c40 100644
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -42,7 +42,7 @@
+ 
+ #define KVM_PIO_PAGE_OFFSET 1
+ #define KVM_COALESCED_MMIO_PAGE_OFFSET 2
+-#define KVM_HALT_POLL_NS_DEFAULT 500000
++#define KVM_HALT_POLL_NS_DEFAULT 400000
+ 
+ #define KVM_IRQCHIP_NUM_PINS  KVM_IOAPIC_NUM_PINS
+ 
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index d47d231e0d4b..eca5bd9f0e47 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -6074,12 +6074,10 @@ static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win)
+ 	}
+ 
+ 	/* try to inject new event if pending */
+-	if (vcpu->arch.nmi_pending) {
+-		if (kvm_x86_ops->nmi_allowed(vcpu)) {
+-			--vcpu->arch.nmi_pending;
+-			vcpu->arch.nmi_injected = true;
+-			kvm_x86_ops->set_nmi(vcpu);
+-		}
++	if (vcpu->arch.nmi_pending && kvm_x86_ops->nmi_allowed(vcpu)) {
++		--vcpu->arch.nmi_pending;
++		vcpu->arch.nmi_injected = true;
++		kvm_x86_ops->set_nmi(vcpu);
+ 	} else if (kvm_cpu_has_injectable_intr(vcpu)) {
+ 		/*
+ 		 * Because interrupts can be injected asynchronously, we are
+@@ -6548,10 +6546,12 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
+ 		if (inject_pending_event(vcpu, req_int_win) != 0)
+ 			req_immediate_exit = true;
+ 		/* enable NMI/IRQ window open exits if needed */
+-		else if (vcpu->arch.nmi_pending)
+-			kvm_x86_ops->enable_nmi_window(vcpu);
+-		else if (kvm_cpu_has_injectable_intr(vcpu) || req_int_win)
+-			kvm_x86_ops->enable_irq_window(vcpu);
++		else {
++			if (vcpu->arch.nmi_pending)
++				kvm_x86_ops->enable_nmi_window(vcpu);
++			if (kvm_cpu_has_injectable_intr(vcpu) || req_int_win)
++				kvm_x86_ops->enable_irq_window(vcpu);
++		}
+ 
+ 		if (kvm_lapic_enabled(vcpu)) {
+ 			update_cr8_intercept(vcpu);
+diff --git a/crypto/asymmetric_keys/pkcs7_trust.c b/crypto/asymmetric_keys/pkcs7_trust.c
+index 90d6d47965b0..ecdb5a2ce085 100644
+--- a/crypto/asymmetric_keys/pkcs7_trust.c
++++ b/crypto/asymmetric_keys/pkcs7_trust.c
+@@ -178,6 +178,8 @@ int pkcs7_validate_trust(struct pkcs7_message *pkcs7,
+ 	int cached_ret = -ENOKEY;
+ 	int ret;
+ 
++	*_trusted = false;
++
+ 	for (p = pkcs7->certs; p; p = p->next)
+ 		p->seen = false;
+ 
+diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
+index 4a876785b68c..9745cf9fcd3c 100644
+--- a/drivers/block/rbd.c
++++ b/drivers/block/rbd.c
+@@ -1955,7 +1955,7 @@ static struct ceph_osd_request *rbd_osd_req_create(
+ 
+ 	osdc = &rbd_dev->rbd_client->client->osdc;
+ 	osd_req = ceph_osdc_alloc_request(osdc, snapc, num_ops, false,
+-					  GFP_ATOMIC);
++					  GFP_NOIO);
+ 	if (!osd_req)
+ 		return NULL;	/* ENOMEM */
+ 
+@@ -2004,7 +2004,7 @@ rbd_osd_req_create_copyup(struct rbd_obj_request *obj_request)
+ 	rbd_dev = img_request->rbd_dev;
+ 	osdc = &rbd_dev->rbd_client->client->osdc;
+ 	osd_req = ceph_osdc_alloc_request(osdc, snapc, num_osd_ops,
+-						false, GFP_ATOMIC);
++						false, GFP_NOIO);
+ 	if (!osd_req)
+ 		return NULL;	/* ENOMEM */
+ 
+@@ -2506,7 +2506,7 @@ static int rbd_img_request_fill(struct rbd_img_request *img_request,
+ 					bio_chain_clone_range(&bio_list,
+ 								&bio_offset,
+ 								clone_size,
+-								GFP_ATOMIC);
++								GFP_NOIO);
+ 			if (!obj_request->bio_list)
+ 				goto out_unwind;
+ 		} else if (type == OBJ_REQUEST_PAGES) {
+diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
+index 99b375c95998..90c1511d731f 100644
+--- a/drivers/gpio/gpio-pca953x.c
++++ b/drivers/gpio/gpio-pca953x.c
+@@ -18,6 +18,7 @@
+ #include <linux/i2c.h>
+ #include <linux/platform_data/pca953x.h>
+ #include <linux/slab.h>
++#include <asm/unaligned.h>
+ #include <linux/of_platform.h>
+ #include <linux/acpi.h>
+ 
+@@ -159,7 +160,7 @@ static int pca953x_write_regs(struct pca953x_chip *chip, int reg, u8 *val)
+ 		switch (chip->chip_type) {
+ 		case PCA953X_TYPE:
+ 			ret = i2c_smbus_write_word_data(chip->client,
+-							reg << 1, (u16) *val);
++			    reg << 1, cpu_to_le16(get_unaligned((u16 *)val)));
+ 			break;
+ 		case PCA957X_TYPE:
+ 			ret = i2c_smbus_write_byte_data(chip->client, reg << 1,
+diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c
+index b2b7b78664b8..76ac906b4d78 100644
+--- a/drivers/gpio/gpio-pxa.c
++++ b/drivers/gpio/gpio-pxa.c
+@@ -283,8 +283,8 @@ static int pxa_gpio_direction_output(struct gpio_chip *chip,
+ 	writel_relaxed(mask, base + (value ? GPSR_OFFSET : GPCR_OFFSET));
+ 
+ 	ret = pinctrl_gpio_direction_output(chip->base + offset);
+-	if (!ret)
+-		return 0;
++	if (ret)
++		return ret;
+ 
+ 	spin_lock_irqsave(&gpio_lock, flags);
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+index 7a4b101e10c6..75cb5b9b88f8 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+@@ -816,10 +816,13 @@ static int amdgpu_cgs_get_active_displays_info(void *cgs_device,
+ 	struct drm_device *ddev = adev->ddev;
+ 	struct drm_crtc *crtc;
+ 	uint32_t line_time_us, vblank_lines;
++	struct cgs_mode_info *mode_info;
+ 
+ 	if (info == NULL)
+ 		return -EINVAL;
+ 
++	mode_info = info->mode_info;
++
+ 	if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
+ 		list_for_each_entry(crtc,
+ 				&ddev->mode_config.crtc_list, head) {
+@@ -828,7 +831,7 @@ static int amdgpu_cgs_get_active_displays_info(void *cgs_device,
+ 				info->active_display_mask |= (1 << amdgpu_crtc->crtc_id);
+ 				info->display_count++;
+ 			}
+-			if (info->mode_info != NULL &&
++			if (mode_info != NULL &&
+ 				crtc->enabled && amdgpu_crtc->enabled &&
+ 				amdgpu_crtc->hw_mode.clock) {
+ 				line_time_us = (amdgpu_crtc->hw_mode.crtc_htotal * 1000) /
+@@ -836,10 +839,10 @@ static int amdgpu_cgs_get_active_displays_info(void *cgs_device,
+ 				vblank_lines = amdgpu_crtc->hw_mode.crtc_vblank_end -
+ 							amdgpu_crtc->hw_mode.crtc_vdisplay +
+ 							(amdgpu_crtc->v_border * 2);
+-				info->mode_info->vblank_time_us = vblank_lines * line_time_us;
+-				info->mode_info->refresh_rate = drm_mode_vrefresh(&amdgpu_crtc->hw_mode);
+-				info->mode_info->ref_clock = adev->clock.spll.reference_freq;
+-				info->mode_info++;
++				mode_info->vblank_time_us = vblank_lines * line_time_us;
++				mode_info->refresh_rate = drm_mode_vrefresh(&amdgpu_crtc->hw_mode);
++				mode_info->ref_clock = adev->clock.spll.reference_freq;
++				mode_info = NULL;
+ 			}
+ 		}
+ 	}
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+index b8060795b27b..53964b14ff48 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+@@ -902,14 +902,6 @@ static int gmc_v7_0_early_init(void *handle)
+ 	gmc_v7_0_set_gart_funcs(adev);
+ 	gmc_v7_0_set_irq_funcs(adev);
+ 
+-	if (adev->flags & AMD_IS_APU) {
+-		adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
+-	} else {
+-		u32 tmp = RREG32(mmMC_SEQ_MISC0);
+-		tmp &= MC_SEQ_MISC0__MT__MASK;
+-		adev->mc.vram_type = gmc_v7_0_convert_vram_type(tmp);
+-	}
+-
+ 	return 0;
+ }
+ 
+@@ -930,6 +922,14 @@ static int gmc_v7_0_sw_init(void *handle)
+ 	if (r)
+ 		return r;
+ 
++	if (adev->flags & AMD_IS_APU) {
++		adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
++	} else {
++		u32 tmp = RREG32(mmMC_SEQ_MISC0);
++		tmp &= MC_SEQ_MISC0__MT__MASK;
++		adev->mc.vram_type = gmc_v7_0_convert_vram_type(tmp);
++	}
++
+ 	r = amdgpu_irq_add_id(adev, 146, &adev->mc.vm_fault);
+ 	if (r)
+ 		return r;
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+index 3efd45546241..e59251f4a85d 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+@@ -856,14 +856,6 @@ static int gmc_v8_0_early_init(void *handle)
+ 	gmc_v8_0_set_gart_funcs(adev);
+ 	gmc_v8_0_set_irq_funcs(adev);
+ 
+-	if (adev->flags & AMD_IS_APU) {
+-		adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
+-	} else {
+-		u32 tmp = RREG32(mmMC_SEQ_MISC0);
+-		tmp &= MC_SEQ_MISC0__MT__MASK;
+-		adev->mc.vram_type = gmc_v8_0_convert_vram_type(tmp);
+-	}
+-
+ 	return 0;
+ }
+ 
+@@ -874,6 +866,8 @@ static int gmc_v8_0_late_init(void *handle)
+ 	return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
+ }
+ 
++#define mmMC_SEQ_MISC0_FIJI 0xA71
++
+ static int gmc_v8_0_sw_init(void *handle)
+ {
+ 	int r;
+@@ -884,6 +878,19 @@ static int gmc_v8_0_sw_init(void *handle)
+ 	if (r)
+ 		return r;
+ 
++	if (adev->flags & AMD_IS_APU) {
++		adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
++	} else {
++		u32 tmp;
++
++		if (adev->asic_type == CHIP_FIJI)
++			tmp = RREG32(mmMC_SEQ_MISC0_FIJI);
++		else
++			tmp = RREG32(mmMC_SEQ_MISC0);
++		tmp &= MC_SEQ_MISC0__MT__MASK;
++		adev->mc.vram_type = gmc_v8_0_convert_vram_type(tmp);
++	}
++
+ 	r = amdgpu_irq_add_id(adev, 146, &adev->mc.vm_fault);
+ 	if (r)
+ 		return r;
+diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
+index 9535c5b60387..7e5a97204051 100644
+--- a/drivers/gpu/drm/drm_dp_helper.c
++++ b/drivers/gpu/drm/drm_dp_helper.c
+@@ -178,7 +178,7 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request,
+ {
+ 	struct drm_dp_aux_msg msg;
+ 	unsigned int retry;
+-	int err;
++	int err = 0;
+ 
+ 	memset(&msg, 0, sizeof(msg));
+ 	msg.address = offset;
+@@ -186,6 +186,8 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request,
+ 	msg.buffer = buffer;
+ 	msg.size = size;
+ 
++	mutex_lock(&aux->hw_mutex);
++
+ 	/*
+ 	 * The specification doesn't give any recommendation on how often to
+ 	 * retry native transactions. We used to retry 7 times like for
+@@ -194,25 +196,24 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request,
+ 	 */
+ 	for (retry = 0; retry < 32; retry++) {
+ 
+-		mutex_lock(&aux->hw_mutex);
+ 		err = aux->transfer(aux, &msg);
+-		mutex_unlock(&aux->hw_mutex);
+ 		if (err < 0) {
+ 			if (err == -EBUSY)
+ 				continue;
+ 
+-			return err;
++			goto unlock;
+ 		}
+ 
+ 
+ 		switch (msg.reply & DP_AUX_NATIVE_REPLY_MASK) {
+ 		case DP_AUX_NATIVE_REPLY_ACK:
+ 			if (err < size)
+-				return -EPROTO;
+-			return err;
++				err = -EPROTO;
++			goto unlock;
+ 
+ 		case DP_AUX_NATIVE_REPLY_NACK:
+-			return -EIO;
++			err = -EIO;
++			goto unlock;
+ 
+ 		case DP_AUX_NATIVE_REPLY_DEFER:
+ 			usleep_range(AUX_RETRY_INTERVAL, AUX_RETRY_INTERVAL + 100);
+@@ -221,7 +222,11 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request,
+ 	}
+ 
+ 	DRM_DEBUG_KMS("too many retries, giving up\n");
+-	return -EIO;
++	err = -EIO;
++
++unlock:
++	mutex_unlock(&aux->hw_mutex);
++	return err;
+ }
+ 
+ /**
+@@ -543,9 +548,7 @@ static int drm_dp_i2c_do_msg(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
+ 	int max_retries = max(7, drm_dp_i2c_retry_count(msg, dp_aux_i2c_speed_khz));
+ 
+ 	for (retry = 0, defer_i2c = 0; retry < (max_retries + defer_i2c); retry++) {
+-		mutex_lock(&aux->hw_mutex);
+ 		ret = aux->transfer(aux, msg);
+-		mutex_unlock(&aux->hw_mutex);
+ 		if (ret < 0) {
+ 			if (ret == -EBUSY)
+ 				continue;
+@@ -684,6 +687,8 @@ static int drm_dp_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
+ 
+ 	memset(&msg, 0, sizeof(msg));
+ 
++	mutex_lock(&aux->hw_mutex);
++
+ 	for (i = 0; i < num; i++) {
+ 		msg.address = msgs[i].addr;
+ 		drm_dp_i2c_msg_set_request(&msg, &msgs[i]);
+@@ -738,6 +743,8 @@ static int drm_dp_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
+ 	msg.size = 0;
+ 	(void)drm_dp_i2c_do_msg(aux, &msg);
+ 
++	mutex_unlock(&aux->hw_mutex);
++
+ 	return err;
+ }
+ 
+diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
+index a82b891ae1fe..7285adb27099 100644
+--- a/drivers/gpu/drm/radeon/si_dpm.c
++++ b/drivers/gpu/drm/radeon/si_dpm.c
+@@ -2926,9 +2926,11 @@ static struct si_dpm_quirk si_dpm_quirk_list[] = {
+ 	/* PITCAIRN - https://bugs.freedesktop.org/show_bug.cgi?id=76490 */
+ 	{ PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 },
+ 	{ PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0xe271, 0, 120000 },
++	{ PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0x2015, 0, 120000 },
+ 	{ PCI_VENDOR_ID_ATI, 0x6810, 0x174b, 0xe271, 85000, 90000 },
+ 	{ PCI_VENDOR_ID_ATI, 0x6811, 0x1462, 0x2015, 0, 120000 },
+ 	{ PCI_VENDOR_ID_ATI, 0x6811, 0x1043, 0x2015, 0, 120000 },
++	{ PCI_VENDOR_ID_ATI, 0x6811, 0x148c, 0x2015, 0, 120000 },
+ 	{ 0, 0, 0, 0 },
+ };
+ 
+@@ -3008,6 +3010,10 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
+ 		}
+ 		++p;
+ 	}
++	/* limit mclk on all R7 370 parts for stability */
++	if (rdev->pdev->device == 0x6811 &&
++	    rdev->pdev->revision == 0x81)
++		max_mclk = 120000;
+ 
+ 	if (rps->vce_active) {
+ 		rps->evclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].evclk;
+diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
+index 200419d4d43c..18a2acbccb7d 100644
+--- a/drivers/gpu/drm/udl/udl_fb.c
++++ b/drivers/gpu/drm/udl/udl_fb.c
+@@ -538,7 +538,7 @@ static int udlfb_create(struct drm_fb_helper *helper,
+ out_destroy_fbi:
+ 	drm_fb_helper_release_fbi(helper);
+ out_gfree:
+-	drm_gem_object_unreference(&ufbdev->ufb.obj->base);
++	drm_gem_object_unreference_unlocked(&ufbdev->ufb.obj->base);
+ out:
+ 	return ret;
+ }
+diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
+index 2a0a784ab6ee..d7528e0d8442 100644
+--- a/drivers/gpu/drm/udl/udl_gem.c
++++ b/drivers/gpu/drm/udl/udl_gem.c
+@@ -52,7 +52,7 @@ udl_gem_create(struct drm_file *file,
+ 		return ret;
+ 	}
+ 
+-	drm_gem_object_unreference(&obj->base);
++	drm_gem_object_unreference_unlocked(&obj->base);
+ 	*handle_p = handle;
+ 	return 0;
+ }
+diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
+index ad71160b9ea4..ae83af649a60 100644
+--- a/drivers/hid/usbhid/hid-core.c
++++ b/drivers/hid/usbhid/hid-core.c
+@@ -951,14 +951,6 @@ static int usbhid_output_report(struct hid_device *hid, __u8 *buf, size_t count)
+ 	return ret;
+ }
+ 
+-static void usbhid_restart_queues(struct usbhid_device *usbhid)
+-{
+-	if (usbhid->urbout && !test_bit(HID_OUT_RUNNING, &usbhid->iofl))
+-		usbhid_restart_out_queue(usbhid);
+-	if (!test_bit(HID_CTRL_RUNNING, &usbhid->iofl))
+-		usbhid_restart_ctrl_queue(usbhid);
+-}
+-
+ static void hid_free_buffers(struct usb_device *dev, struct hid_device *hid)
+ {
+ 	struct usbhid_device *usbhid = hid->driver_data;
+@@ -1404,6 +1396,37 @@ static void hid_cease_io(struct usbhid_device *usbhid)
+ 	usb_kill_urb(usbhid->urbout);
+ }
+ 
++static void hid_restart_io(struct hid_device *hid)
++{
++	struct usbhid_device *usbhid = hid->driver_data;
++	int clear_halt = test_bit(HID_CLEAR_HALT, &usbhid->iofl);
++	int reset_pending = test_bit(HID_RESET_PENDING, &usbhid->iofl);
++
++	spin_lock_irq(&usbhid->lock);
++	clear_bit(HID_SUSPENDED, &usbhid->iofl);
++	usbhid_mark_busy(usbhid);
++
++	if (clear_halt || reset_pending)
++		schedule_work(&usbhid->reset_work);
++	usbhid->retry_delay = 0;
++	spin_unlock_irq(&usbhid->lock);
++
++	if (reset_pending || !test_bit(HID_STARTED, &usbhid->iofl))
++		return;
++
++	if (!clear_halt) {
++		if (hid_start_in(hid) < 0)
++			hid_io_error(hid);
++	}
++
++	spin_lock_irq(&usbhid->lock);
++	if (usbhid->urbout && !test_bit(HID_OUT_RUNNING, &usbhid->iofl))
++		usbhid_restart_out_queue(usbhid);
++	if (!test_bit(HID_CTRL_RUNNING, &usbhid->iofl))
++		usbhid_restart_ctrl_queue(usbhid);
++	spin_unlock_irq(&usbhid->lock);
++}
++
+ /* Treat USB reset pretty much the same as suspend/resume */
+ static int hid_pre_reset(struct usb_interface *intf)
+ {
+@@ -1453,14 +1476,14 @@ static int hid_post_reset(struct usb_interface *intf)
+ 		return 1;
+ 	}
+ 
++	/* No need to do another reset or clear a halted endpoint */
+ 	spin_lock_irq(&usbhid->lock);
+ 	clear_bit(HID_RESET_PENDING, &usbhid->iofl);
++	clear_bit(HID_CLEAR_HALT, &usbhid->iofl);
+ 	spin_unlock_irq(&usbhid->lock);
+ 	hid_set_idle(dev, intf->cur_altsetting->desc.bInterfaceNumber, 0, 0);
+-	status = hid_start_in(hid);
+-	if (status < 0)
+-		hid_io_error(hid);
+-	usbhid_restart_queues(usbhid);
++
++	hid_restart_io(hid);
+ 
+ 	return 0;
+ }
+@@ -1483,25 +1506,9 @@ void usbhid_put_power(struct hid_device *hid)
+ #ifdef CONFIG_PM
+ static int hid_resume_common(struct hid_device *hid, bool driver_suspended)
+ {
+-	struct usbhid_device *usbhid = hid->driver_data;
+-	int status;
+-
+-	spin_lock_irq(&usbhid->lock);
+-	clear_bit(HID_SUSPENDED, &usbhid->iofl);
+-	usbhid_mark_busy(usbhid);
+-
+-	if (test_bit(HID_CLEAR_HALT, &usbhid->iofl) ||
+-			test_bit(HID_RESET_PENDING, &usbhid->iofl))
+-		schedule_work(&usbhid->reset_work);
+-	usbhid->retry_delay = 0;
+-
+-	usbhid_restart_queues(usbhid);
+-	spin_unlock_irq(&usbhid->lock);
+-
+-	status = hid_start_in(hid);
+-	if (status < 0)
+-		hid_io_error(hid);
++	int status = 0;
+ 
++	hid_restart_io(hid);
+ 	if (driver_suspended && hid->driver && hid->driver->resume)
+ 		status = hid->driver->resume(hid);
+ 	return status;
+@@ -1570,12 +1577,8 @@ static int hid_suspend(struct usb_interface *intf, pm_message_t message)
+ static int hid_resume(struct usb_interface *intf)
+ {
+ 	struct hid_device *hid = usb_get_intfdata (intf);
+-	struct usbhid_device *usbhid = hid->driver_data;
+ 	int status;
+ 
+-	if (!test_bit(HID_STARTED, &usbhid->iofl))
+-		return 0;
+-
+ 	status = hid_resume_common(hid, true);
+ 	dev_dbg(&intf->dev, "resume status %d\n", status);
+ 	return 0;
+@@ -1584,10 +1587,8 @@ static int hid_resume(struct usb_interface *intf)
+ static int hid_reset_resume(struct usb_interface *intf)
+ {
+ 	struct hid_device *hid = usb_get_intfdata(intf);
+-	struct usbhid_device *usbhid = hid->driver_data;
+ 	int status;
+ 
+-	clear_bit(HID_SUSPENDED, &usbhid->iofl);
+ 	status = hid_post_reset(intf);
+ 	if (status >= 0 && hid->driver && hid->driver->reset_resume) {
+ 		int ret = hid->driver->reset_resume(hid);
+diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
+index 99ef77fcfb80..94a8875b38fb 100644
+--- a/drivers/hid/wacom_wac.c
++++ b/drivers/hid/wacom_wac.c
+@@ -2409,6 +2409,17 @@ void wacom_setup_device_quirks(struct wacom *wacom)
+ 	}
+ 
+ 	/*
++	 * Hack for the Bamboo One:
++	 * the device presents a PAD/Touch interface as most Bamboos and even
++	 * sends ghosts PAD data on it. However, later, we must disable this
++	 * ghost interface, and we can not detect it unless we set it here
++	 * to WACOM_DEVICETYPE_PAD or WACOM_DEVICETYPE_TOUCH.
++	 */
++	if (features->type == BAMBOO_PEN &&
++	    features->pktlen == WACOM_PKGLEN_BBTOUCH3)
++		features->device_type |= WACOM_DEVICETYPE_PAD;
++
++	/*
+ 	 * Raw Wacom-mode pen and touch events both come from interface
+ 	 * 0, whose HID descriptor has an application usage of 0xFF0D
+ 	 * (i.e., WACOM_VENDORDEFINED_PEN). We route pen packets back
+diff --git a/drivers/hwmon/max1111.c b/drivers/hwmon/max1111.c
+index 36544c4f653c..303d0c9df907 100644
+--- a/drivers/hwmon/max1111.c
++++ b/drivers/hwmon/max1111.c
+@@ -85,6 +85,9 @@ static struct max1111_data *the_max1111;
+ 
+ int max1111_read_channel(int channel)
+ {
++	if (!the_max1111 || !the_max1111->spi)
++		return -ENODEV;
++
+ 	return max1111_read(&the_max1111->spi->dev, channel);
+ }
+ EXPORT_SYMBOL(max1111_read_channel);
+@@ -258,6 +261,9 @@ static int max1111_remove(struct spi_device *spi)
+ {
+ 	struct max1111_data *data = spi_get_drvdata(spi);
+ 
++#ifdef CONFIG_SHARPSL_PM
++	the_max1111 = NULL;
++#endif
+ 	hwmon_device_unregister(data->hwmon_dev);
+ 	sysfs_remove_group(&spi->dev.kobj, &max1110_attr_group);
+ 	sysfs_remove_group(&spi->dev.kobj, &max1111_attr_group);
+diff --git a/drivers/iio/accel/bmc150-accel-core.c b/drivers/iio/accel/bmc150-accel-core.c
+index c73331f7782b..2072a31e813b 100644
+--- a/drivers/iio/accel/bmc150-accel-core.c
++++ b/drivers/iio/accel/bmc150-accel-core.c
+@@ -547,7 +547,7 @@ static int bmc150_accel_get_axis(struct bmc150_accel_data *data,
+ {
+ 	int ret;
+ 	int axis = chan->scan_index;
+-	unsigned int raw_val;
++	__le16 raw_val;
+ 
+ 	mutex_lock(&data->mutex);
+ 	ret = bmc150_accel_set_power_state(data, true);
+@@ -557,14 +557,14 @@ static int bmc150_accel_get_axis(struct bmc150_accel_data *data,
+ 	}
+ 
+ 	ret = regmap_bulk_read(data->regmap, BMC150_ACCEL_AXIS_TO_REG(axis),
+-			       &raw_val, 2);
++			       &raw_val, sizeof(raw_val));
+ 	if (ret < 0) {
+ 		dev_err(data->dev, "Error reading axis %d\n", axis);
+ 		bmc150_accel_set_power_state(data, false);
+ 		mutex_unlock(&data->mutex);
+ 		return ret;
+ 	}
+-	*val = sign_extend32(raw_val >> chan->scan_type.shift,
++	*val = sign_extend32(le16_to_cpu(raw_val) >> chan->scan_type.shift,
+ 			     chan->scan_type.realbits - 1);
+ 	ret = bmc150_accel_set_power_state(data, false);
+ 	mutex_unlock(&data->mutex);
+@@ -988,6 +988,7 @@ static const struct iio_event_spec bmc150_accel_event = {
+ 		.realbits = (bits),					\
+ 		.storagebits = 16,					\
+ 		.shift = 16 - (bits),					\
++		.endianness = IIO_LE,					\
+ 	},								\
+ 	.event_spec = &bmc150_accel_event,				\
+ 	.num_event_specs = 1						\
+diff --git a/drivers/iio/gyro/bmg160_core.c b/drivers/iio/gyro/bmg160_core.c
+index bbce3b09ac45..4dac567e75b4 100644
+--- a/drivers/iio/gyro/bmg160_core.c
++++ b/drivers/iio/gyro/bmg160_core.c
+@@ -452,7 +452,7 @@ static int bmg160_get_temp(struct bmg160_data *data, int *val)
+ static int bmg160_get_axis(struct bmg160_data *data, int axis, int *val)
+ {
+ 	int ret;
+-	unsigned int raw_val;
++	__le16 raw_val;
+ 
+ 	mutex_lock(&data->mutex);
+ 	ret = bmg160_set_power_state(data, true);
+@@ -462,7 +462,7 @@ static int bmg160_get_axis(struct bmg160_data *data, int axis, int *val)
+ 	}
+ 
+ 	ret = regmap_bulk_read(data->regmap, BMG160_AXIS_TO_REG(axis), &raw_val,
+-			       2);
++			       sizeof(raw_val));
+ 	if (ret < 0) {
+ 		dev_err(data->dev, "Error reading axis %d\n", axis);
+ 		bmg160_set_power_state(data, false);
+@@ -470,7 +470,7 @@ static int bmg160_get_axis(struct bmg160_data *data, int axis, int *val)
+ 		return ret;
+ 	}
+ 
+-	*val = sign_extend32(raw_val, 15);
++	*val = sign_extend32(le16_to_cpu(raw_val), 15);
+ 	ret = bmg160_set_power_state(data, false);
+ 	mutex_unlock(&data->mutex);
+ 	if (ret < 0)
+@@ -733,6 +733,7 @@ static const struct iio_event_spec bmg160_event = {
+ 		.sign = 's',						\
+ 		.realbits = 16,					\
+ 		.storagebits = 16,					\
++		.endianness = IIO_LE,					\
+ 	},								\
+ 	.event_spec = &bmg160_event,					\
+ 	.num_event_specs = 1						\
+@@ -780,7 +781,7 @@ static irqreturn_t bmg160_trigger_handler(int irq, void *p)
+ 			mutex_unlock(&data->mutex);
+ 			goto err;
+ 		}
+-		data->buffer[i++] = ret;
++		data->buffer[i++] = val;
+ 	}
+ 	mutex_unlock(&data->mutex);
+ 
+diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
+index 139ae916225f..5b6abc541e8c 100644
+--- a/drivers/iio/industrialio-buffer.c
++++ b/drivers/iio/industrialio-buffer.c
+@@ -645,6 +645,7 @@ static int iio_verify_update(struct iio_dev *indio_dev,
+ 	unsigned int modes;
+ 
+ 	memset(config, 0, sizeof(*config));
++	config->watermark = ~0;
+ 
+ 	/*
+ 	 * If there is just one buffer and we are removing it there is nothing
+diff --git a/drivers/iio/magnetometer/st_magn.h b/drivers/iio/magnetometer/st_magn.h
+index 06a4d9c35581..9daca4681922 100644
+--- a/drivers/iio/magnetometer/st_magn.h
++++ b/drivers/iio/magnetometer/st_magn.h
+@@ -44,6 +44,7 @@ static inline int st_magn_allocate_ring(struct iio_dev *indio_dev)
+ static inline void st_magn_deallocate_ring(struct iio_dev *indio_dev)
+ {
+ }
++#define ST_MAGN_TRIGGER_SET_STATE NULL
+ #endif /* CONFIG_IIO_BUFFER */
+ 
+ #endif /* ST_MAGN_H */
+diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
+index 0e3b0092ec92..515bb8b80952 100644
+--- a/drivers/iommu/iommu.c
++++ b/drivers/iommu/iommu.c
+@@ -848,7 +848,8 @@ struct iommu_group *iommu_group_get_for_dev(struct device *dev)
+ 	if (!group->default_domain) {
+ 		group->default_domain = __iommu_domain_alloc(dev->bus,
+ 							     IOMMU_DOMAIN_DMA);
+-		group->domain = group->default_domain;
++		if (!group->domain)
++			group->domain = group->default_domain;
+ 	}
+ 
+ 	ret = iommu_group_add_device(group, dev);
+diff --git a/drivers/media/platform/coda/coda-common.c b/drivers/media/platform/coda/coda-common.c
+index 2d782ce94a67..7ae89c684783 100644
+--- a/drivers/media/platform/coda/coda-common.c
++++ b/drivers/media/platform/coda/coda-common.c
+@@ -2118,14 +2118,12 @@ static int coda_probe(struct platform_device *pdev)
+ 
+ 	pdev_id = of_id ? of_id->data : platform_get_device_id(pdev);
+ 
+-	if (of_id) {
++	if (of_id)
+ 		dev->devtype = of_id->data;
+-	} else if (pdev_id) {
++	else if (pdev_id)
+ 		dev->devtype = &coda_devdata[pdev_id->driver_data];
+-	} else {
+-		ret = -EINVAL;
+-		goto err_v4l2_register;
+-	}
++	else
++		return -EINVAL;
+ 
+ 	spin_lock_init(&dev->irqlock);
+ 	INIT_LIST_HEAD(&dev->instances);
+diff --git a/drivers/media/platform/vsp1/vsp1_sru.c b/drivers/media/platform/vsp1/vsp1_sru.c
+index 6310acab60e7..d41ae950d1a1 100644
+--- a/drivers/media/platform/vsp1/vsp1_sru.c
++++ b/drivers/media/platform/vsp1/vsp1_sru.c
+@@ -154,6 +154,7 @@ static int sru_s_stream(struct v4l2_subdev *subdev, int enable)
+ 	mutex_lock(sru->ctrls.lock);
+ 	ctrl0 |= vsp1_sru_read(sru, VI6_SRU_CTRL0)
+ 	       & (VI6_SRU_CTRL0_PARAM0_MASK | VI6_SRU_CTRL0_PARAM1_MASK);
++	vsp1_sru_write(sru, VI6_SRU_CTRL0, ctrl0);
+ 	mutex_unlock(sru->ctrls.lock);
+ 
+ 	vsp1_sru_write(sru, VI6_SRU_CTRL1, VI6_SRU_CTRL1_PARAM5);
+diff --git a/drivers/media/usb/au0828/au0828-core.c b/drivers/media/usb/au0828/au0828-core.c
+index 9e29e70a78d7..d8988801dde0 100644
+--- a/drivers/media/usb/au0828/au0828-core.c
++++ b/drivers/media/usb/au0828/au0828-core.c
+@@ -192,7 +192,7 @@ static void au0828_usb_disconnect(struct usb_interface *interface)
+ 	   Set the status so poll routines can check and avoid
+ 	   access after disconnect.
+ 	*/
+-	dev->dev_state = DEV_DISCONNECTED;
++	set_bit(DEV_DISCONNECTED, &dev->dev_state);
+ 
+ 	au0828_rc_unregister(dev);
+ 	/* Digital TV */
+diff --git a/drivers/media/usb/au0828/au0828-input.c b/drivers/media/usb/au0828/au0828-input.c
+index b0f067971979..3d6687f0407d 100644
+--- a/drivers/media/usb/au0828/au0828-input.c
++++ b/drivers/media/usb/au0828/au0828-input.c
+@@ -130,7 +130,7 @@ static int au0828_get_key_au8522(struct au0828_rc *ir)
+ 	bool first = true;
+ 
+ 	/* do nothing if device is disconnected */
+-	if (ir->dev->dev_state == DEV_DISCONNECTED)
++	if (test_bit(DEV_DISCONNECTED, &ir->dev->dev_state))
+ 		return 0;
+ 
+ 	/* Check IR int */
+@@ -260,7 +260,7 @@ static void au0828_rc_stop(struct rc_dev *rc)
+ 	cancel_delayed_work_sync(&ir->work);
+ 
+ 	/* do nothing if device is disconnected */
+-	if (ir->dev->dev_state != DEV_DISCONNECTED) {
++	if (!test_bit(DEV_DISCONNECTED, &ir->dev->dev_state)) {
+ 		/* Disable IR */
+ 		au8522_rc_clear(ir, 0xe0, 1 << 4);
+ 	}
+diff --git a/drivers/media/usb/au0828/au0828-video.c b/drivers/media/usb/au0828/au0828-video.c
+index a13625722848..8bc69af874a0 100644
+--- a/drivers/media/usb/au0828/au0828-video.c
++++ b/drivers/media/usb/au0828/au0828-video.c
+@@ -104,14 +104,13 @@ static inline void print_err_status(struct au0828_dev *dev,
+ 
+ static int check_dev(struct au0828_dev *dev)
+ {
+-	if (dev->dev_state & DEV_DISCONNECTED) {
++	if (test_bit(DEV_DISCONNECTED, &dev->dev_state)) {
+ 		pr_info("v4l2 ioctl: device not present\n");
+ 		return -ENODEV;
+ 	}
+ 
+-	if (dev->dev_state & DEV_MISCONFIGURED) {
+-		pr_info("v4l2 ioctl: device is misconfigured; "
+-		       "close and open it again\n");
++	if (test_bit(DEV_MISCONFIGURED, &dev->dev_state)) {
++		pr_info("v4l2 ioctl: device is misconfigured; close and open it again\n");
+ 		return -EIO;
+ 	}
+ 	return 0;
+@@ -519,8 +518,8 @@ static inline int au0828_isoc_copy(struct au0828_dev *dev, struct urb *urb)
+ 	if (!dev)
+ 		return 0;
+ 
+-	if ((dev->dev_state & DEV_DISCONNECTED) ||
+-	    (dev->dev_state & DEV_MISCONFIGURED))
++	if (test_bit(DEV_DISCONNECTED, &dev->dev_state) ||
++	    test_bit(DEV_MISCONFIGURED, &dev->dev_state))
+ 		return 0;
+ 
+ 	if (urb->status < 0) {
+@@ -822,10 +821,10 @@ static int au0828_stream_interrupt(struct au0828_dev *dev)
+ 	int ret = 0;
+ 
+ 	dev->stream_state = STREAM_INTERRUPT;
+-	if (dev->dev_state == DEV_DISCONNECTED)
++	if (test_bit(DEV_DISCONNECTED, &dev->dev_state))
+ 		return -ENODEV;
+ 	else if (ret) {
+-		dev->dev_state = DEV_MISCONFIGURED;
++		set_bit(DEV_MISCONFIGURED, &dev->dev_state);
+ 		dprintk(1, "%s device is misconfigured!\n", __func__);
+ 		return ret;
+ 	}
+@@ -1014,7 +1013,7 @@ static int au0828_v4l2_open(struct file *filp)
+ 	int ret;
+ 
+ 	dprintk(1,
+-		"%s called std_set %d dev_state %d stream users %d users %d\n",
++		"%s called std_set %d dev_state %ld stream users %d users %d\n",
+ 		__func__, dev->std_set_in_tuner_core, dev->dev_state,
+ 		dev->streaming_users, dev->users);
+ 
+@@ -1033,7 +1032,7 @@ static int au0828_v4l2_open(struct file *filp)
+ 		au0828_analog_stream_enable(dev);
+ 		au0828_analog_stream_reset(dev);
+ 		dev->stream_state = STREAM_OFF;
+-		dev->dev_state |= DEV_INITIALIZED;
++		set_bit(DEV_INITIALIZED, &dev->dev_state);
+ 	}
+ 	dev->users++;
+ 	mutex_unlock(&dev->lock);
+@@ -1047,7 +1046,7 @@ static int au0828_v4l2_close(struct file *filp)
+ 	struct video_device *vdev = video_devdata(filp);
+ 
+ 	dprintk(1,
+-		"%s called std_set %d dev_state %d stream users %d users %d\n",
++		"%s called std_set %d dev_state %ld stream users %d users %d\n",
+ 		__func__, dev->std_set_in_tuner_core, dev->dev_state,
+ 		dev->streaming_users, dev->users);
+ 
+@@ -1063,7 +1062,7 @@ static int au0828_v4l2_close(struct file *filp)
+ 		del_timer_sync(&dev->vbi_timeout);
+ 	}
+ 
+-	if (dev->dev_state == DEV_DISCONNECTED)
++	if (test_bit(DEV_DISCONNECTED, &dev->dev_state))
+ 		goto end;
+ 
+ 	if (dev->users == 1) {
+@@ -1092,7 +1091,7 @@ static void au0828_init_tuner(struct au0828_dev *dev)
+ 		.type = V4L2_TUNER_ANALOG_TV,
+ 	};
+ 
+-	dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
++	dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
+ 		dev->std_set_in_tuner_core, dev->dev_state);
+ 
+ 	if (dev->std_set_in_tuner_core)
+@@ -1164,7 +1163,7 @@ static int vidioc_querycap(struct file *file, void  *priv,
+ 	struct video_device *vdev = video_devdata(file);
+ 	struct au0828_dev *dev = video_drvdata(file);
+ 
+-	dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
++	dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
+ 		dev->std_set_in_tuner_core, dev->dev_state);
+ 
+ 	strlcpy(cap->driver, "au0828", sizeof(cap->driver));
+@@ -1207,7 +1206,7 @@ static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
+ {
+ 	struct au0828_dev *dev = video_drvdata(file);
+ 
+-	dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
++	dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
+ 		dev->std_set_in_tuner_core, dev->dev_state);
+ 
+ 	f->fmt.pix.width = dev->width;
+@@ -1226,7 +1225,7 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
+ {
+ 	struct au0828_dev *dev = video_drvdata(file);
+ 
+-	dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
++	dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
+ 		dev->std_set_in_tuner_core, dev->dev_state);
+ 
+ 	return au0828_set_format(dev, VIDIOC_TRY_FMT, f);
+@@ -1238,7 +1237,7 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
+ 	struct au0828_dev *dev = video_drvdata(file);
+ 	int rc;
+ 
+-	dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
++	dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
+ 		dev->std_set_in_tuner_core, dev->dev_state);
+ 
+ 	rc = check_dev(dev);
+@@ -1260,7 +1259,7 @@ static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id norm)
+ {
+ 	struct au0828_dev *dev = video_drvdata(file);
+ 
+-	dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
++	dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
+ 		dev->std_set_in_tuner_core, dev->dev_state);
+ 
+ 	if (norm == dev->std)
+@@ -1292,7 +1291,7 @@ static int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *norm)
+ {
+ 	struct au0828_dev *dev = video_drvdata(file);
+ 
+-	dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
++	dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
+ 		dev->std_set_in_tuner_core, dev->dev_state);
+ 
+ 	*norm = dev->std;
+@@ -1315,7 +1314,7 @@ static int vidioc_enum_input(struct file *file, void *priv,
+ 		[AU0828_VMUX_DEBUG] = "tv debug"
+ 	};
+ 
+-	dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
++	dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
+ 		dev->std_set_in_tuner_core, dev->dev_state);
+ 
+ 	tmp = input->index;
+@@ -1345,7 +1344,7 @@ static int vidioc_g_input(struct file *file, void *priv, unsigned int *i)
+ {
+ 	struct au0828_dev *dev = video_drvdata(file);
+ 
+-	dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
++	dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
+ 		dev->std_set_in_tuner_core, dev->dev_state);
+ 
+ 	*i = dev->ctrl_input;
+@@ -1356,7 +1355,7 @@ static void au0828_s_input(struct au0828_dev *dev, int index)
+ {
+ 	int i;
+ 
+-	dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
++	dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
+ 		dev->std_set_in_tuner_core, dev->dev_state);
+ 
+ 	switch (AUVI_INPUT(index).type) {
+@@ -1441,7 +1440,7 @@ static int vidioc_g_audio(struct file *file, void *priv, struct v4l2_audio *a)
+ {
+ 	struct au0828_dev *dev = video_drvdata(file);
+ 
+-	dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
++	dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
+ 		dev->std_set_in_tuner_core, dev->dev_state);
+ 
+ 	a->index = dev->ctrl_ainput;
+@@ -1461,7 +1460,7 @@ static int vidioc_s_audio(struct file *file, void *priv, const struct v4l2_audio
+ 	if (a->index != dev->ctrl_ainput)
+ 		return -EINVAL;
+ 
+-	dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
++	dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
+ 		dev->std_set_in_tuner_core, dev->dev_state);
+ 	return 0;
+ }
+@@ -1473,7 +1472,7 @@ static int vidioc_g_tuner(struct file *file, void *priv, struct v4l2_tuner *t)
+ 	if (t->index != 0)
+ 		return -EINVAL;
+ 
+-	dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
++	dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
+ 		dev->std_set_in_tuner_core, dev->dev_state);
+ 
+ 	strcpy(t->name, "Auvitek tuner");
+@@ -1493,7 +1492,7 @@ static int vidioc_s_tuner(struct file *file, void *priv,
+ 	if (t->index != 0)
+ 		return -EINVAL;
+ 
+-	dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
++	dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
+ 		dev->std_set_in_tuner_core, dev->dev_state);
+ 
+ 	au0828_init_tuner(dev);
+@@ -1515,7 +1514,7 @@ static int vidioc_g_frequency(struct file *file, void *priv,
+ 
+ 	if (freq->tuner != 0)
+ 		return -EINVAL;
+-	dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
++	dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
+ 		dev->std_set_in_tuner_core, dev->dev_state);
+ 	freq->frequency = dev->ctrl_freq;
+ 	return 0;
+@@ -1530,7 +1529,7 @@ static int vidioc_s_frequency(struct file *file, void *priv,
+ 	if (freq->tuner != 0)
+ 		return -EINVAL;
+ 
+-	dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
++	dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
+ 		dev->std_set_in_tuner_core, dev->dev_state);
+ 
+ 	au0828_init_tuner(dev);
+@@ -1556,7 +1555,7 @@ static int vidioc_g_fmt_vbi_cap(struct file *file, void *priv,
+ {
+ 	struct au0828_dev *dev = video_drvdata(file);
+ 
+-	dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
++	dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
+ 		dev->std_set_in_tuner_core, dev->dev_state);
+ 
+ 	format->fmt.vbi.samples_per_line = dev->vbi_width;
+@@ -1582,7 +1581,7 @@ static int vidioc_cropcap(struct file *file, void *priv,
+ 	if (cc->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ 		return -EINVAL;
+ 
+-	dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
++	dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
+ 		dev->std_set_in_tuner_core, dev->dev_state);
+ 
+ 	cc->bounds.left = 0;
+@@ -1604,7 +1603,7 @@ static int vidioc_g_register(struct file *file, void *priv,
+ {
+ 	struct au0828_dev *dev = video_drvdata(file);
+ 
+-	dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
++	dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
+ 		dev->std_set_in_tuner_core, dev->dev_state);
+ 
+ 	reg->val = au0828_read(dev, reg->reg);
+@@ -1617,7 +1616,7 @@ static int vidioc_s_register(struct file *file, void *priv,
+ {
+ 	struct au0828_dev *dev = video_drvdata(file);
+ 
+-	dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
++	dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
+ 		dev->std_set_in_tuner_core, dev->dev_state);
+ 
+ 	return au0828_writereg(dev, reg->reg, reg->val);
+diff --git a/drivers/media/usb/au0828/au0828.h b/drivers/media/usb/au0828/au0828.h
+index 8276072bc55a..b28a05d76618 100644
+--- a/drivers/media/usb/au0828/au0828.h
++++ b/drivers/media/usb/au0828/au0828.h
+@@ -21,6 +21,7 @@
+ 
+ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+ 
++#include <linux/bitops.h>
+ #include <linux/usb.h>
+ #include <linux/i2c.h>
+ #include <linux/i2c-algo-bit.h>
+@@ -122,9 +123,9 @@ enum au0828_stream_state {
+ 
+ /* device state */
+ enum au0828_dev_state {
+-	DEV_INITIALIZED = 0x01,
+-	DEV_DISCONNECTED = 0x02,
+-	DEV_MISCONFIGURED = 0x04
++	DEV_INITIALIZED = 0,
++	DEV_DISCONNECTED = 1,
++	DEV_MISCONFIGURED = 2
+ };
+ 
+ struct au0828_dev;
+@@ -248,7 +249,7 @@ struct au0828_dev {
+ 	int input_type;
+ 	int std_set_in_tuner_core;
+ 	unsigned int ctrl_input;
+-	enum au0828_dev_state dev_state;
++	long unsigned int dev_state; /* defined at enum au0828_dev_state */;
+ 	enum au0828_stream_state stream_state;
+ 	wait_queue_head_t open;
+ 
+diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
+index df3b8eced8c4..a04d0f7ee00e 100644
+--- a/drivers/mmc/host/sdhci-pci-core.c
++++ b/drivers/mmc/host/sdhci-pci-core.c
+@@ -390,6 +390,7 @@ static int byt_sd_probe_slot(struct sdhci_pci_slot *slot)
+ 	slot->cd_idx = 0;
+ 	slot->cd_override_level = true;
+ 	if (slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BXT_SD ||
++	    slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BXTM_SD ||
+ 	    slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_APL_SD)
+ 		slot->host->mmc_host_ops.get_cd = bxt_get_cd;
+ 
+@@ -1173,6 +1174,30 @@ static const struct pci_device_id pci_ids[] = {
+ 
+ 	{
+ 		.vendor		= PCI_VENDOR_ID_INTEL,
++		.device		= PCI_DEVICE_ID_INTEL_BXTM_EMMC,
++		.subvendor	= PCI_ANY_ID,
++		.subdevice	= PCI_ANY_ID,
++		.driver_data	= (kernel_ulong_t)&sdhci_intel_byt_emmc,
++	},
++
++	{
++		.vendor		= PCI_VENDOR_ID_INTEL,
++		.device		= PCI_DEVICE_ID_INTEL_BXTM_SDIO,
++		.subvendor	= PCI_ANY_ID,
++		.subdevice	= PCI_ANY_ID,
++		.driver_data	= (kernel_ulong_t)&sdhci_intel_byt_sdio,
++	},
++
++	{
++		.vendor		= PCI_VENDOR_ID_INTEL,
++		.device		= PCI_DEVICE_ID_INTEL_BXTM_SD,
++		.subvendor	= PCI_ANY_ID,
++		.subdevice	= PCI_ANY_ID,
++		.driver_data	= (kernel_ulong_t)&sdhci_intel_byt_sd,
++	},
++
++	{
++		.vendor		= PCI_VENDOR_ID_INTEL,
+ 		.device		= PCI_DEVICE_ID_INTEL_APL_EMMC,
+ 		.subvendor	= PCI_ANY_ID,
+ 		.subdevice	= PCI_ANY_ID,
+diff --git a/drivers/mmc/host/sdhci-pci.h b/drivers/mmc/host/sdhci-pci.h
+index d1a0b4db60db..89e7151684a1 100644
+--- a/drivers/mmc/host/sdhci-pci.h
++++ b/drivers/mmc/host/sdhci-pci.h
+@@ -28,6 +28,9 @@
+ #define PCI_DEVICE_ID_INTEL_BXT_SD	0x0aca
+ #define PCI_DEVICE_ID_INTEL_BXT_EMMC	0x0acc
+ #define PCI_DEVICE_ID_INTEL_BXT_SDIO	0x0ad0
++#define PCI_DEVICE_ID_INTEL_BXTM_SD	0x1aca
++#define PCI_DEVICE_ID_INTEL_BXTM_EMMC	0x1acc
++#define PCI_DEVICE_ID_INTEL_BXTM_SDIO	0x1ad0
+ #define PCI_DEVICE_ID_INTEL_APL_SD	0x5aca
+ #define PCI_DEVICE_ID_INTEL_APL_EMMC	0x5acc
+ #define PCI_DEVICE_ID_INTEL_APL_SDIO	0x5ad0
+diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c
+index c7f27fe4805a..452bf500c82e 100644
+--- a/drivers/mmc/host/sdhci-pxav3.c
++++ b/drivers/mmc/host/sdhci-pxav3.c
+@@ -309,8 +309,30 @@ static void pxav3_set_uhs_signaling(struct sdhci_host *host, unsigned int uhs)
+ 		__func__, uhs, ctrl_2);
+ }
+ 
++static void pxav3_set_power(struct sdhci_host *host, unsigned char mode,
++			    unsigned short vdd)
++{
++	struct mmc_host *mmc = host->mmc;
++	u8 pwr = host->pwr;
++
++	sdhci_set_power(host, mode, vdd);
++
++	if (host->pwr == pwr)
++		return;
++
++	if (host->pwr == 0)
++		vdd = 0;
++
++	if (!IS_ERR(mmc->supply.vmmc)) {
++		spin_unlock_irq(&host->lock);
++		mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
++		spin_lock_irq(&host->lock);
++	}
++}
++
+ static const struct sdhci_ops pxav3_sdhci_ops = {
+ 	.set_clock = sdhci_set_clock,
++	.set_power = pxav3_set_power,
+ 	.platform_send_init_74_clocks = pxav3_gen_init_74_clocks,
+ 	.get_max_clock = sdhci_pltfm_clk_get_max_clock,
+ 	.set_bus_width = sdhci_set_bus_width,
+diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
+index 8059d7248fff..6d485b5fa5ca 100644
+--- a/drivers/mmc/host/sdhci.c
++++ b/drivers/mmc/host/sdhci.c
+@@ -1250,10 +1250,24 @@ clock_set:
+ }
+ EXPORT_SYMBOL_GPL(sdhci_set_clock);
+ 
+-static void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
+-			    unsigned short vdd)
++static void sdhci_set_power_reg(struct sdhci_host *host, unsigned char mode,
++				unsigned short vdd)
+ {
+ 	struct mmc_host *mmc = host->mmc;
++
++	spin_unlock_irq(&host->lock);
++	mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
++	spin_lock_irq(&host->lock);
++
++	if (mode != MMC_POWER_OFF)
++		sdhci_writeb(host, SDHCI_POWER_ON, SDHCI_POWER_CONTROL);
++	else
++		sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
++}
++
++void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
++		     unsigned short vdd)
++{
+ 	u8 pwr = 0;
+ 
+ 	if (mode != MMC_POWER_OFF) {
+@@ -1285,7 +1299,6 @@ static void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
+ 		sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
+ 		if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
+ 			sdhci_runtime_pm_bus_off(host);
+-		vdd = 0;
+ 	} else {
+ 		/*
+ 		 * Spec says that we should clear the power reg before setting
+@@ -1316,12 +1329,20 @@ static void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
+ 		if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER)
+ 			mdelay(10);
+ 	}
++}
++EXPORT_SYMBOL_GPL(sdhci_set_power);
+ 
+-	if (!IS_ERR(mmc->supply.vmmc)) {
+-		spin_unlock_irq(&host->lock);
+-		mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
+-		spin_lock_irq(&host->lock);
+-	}
++static void __sdhci_set_power(struct sdhci_host *host, unsigned char mode,
++			      unsigned short vdd)
++{
++	struct mmc_host *mmc = host->mmc;
++
++	if (host->ops->set_power)
++		host->ops->set_power(host, mode, vdd);
++	else if (!IS_ERR(mmc->supply.vmmc))
++		sdhci_set_power_reg(host, mode, vdd);
++	else
++		sdhci_set_power(host, mode, vdd);
+ }
+ 
+ /*****************************************************************************\
+@@ -1471,7 +1492,7 @@ static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios)
+ 		}
+ 	}
+ 
+-	sdhci_set_power(host, ios->power_mode, ios->vdd);
++	__sdhci_set_power(host, ios->power_mode, ios->vdd);
+ 
+ 	if (host->ops->platform_send_init_74_clocks)
+ 		host->ops->platform_send_init_74_clocks(host, ios->power_mode);
+diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
+index 0115e9907bf8..033d72b5bbd5 100644
+--- a/drivers/mmc/host/sdhci.h
++++ b/drivers/mmc/host/sdhci.h
+@@ -529,6 +529,8 @@ struct sdhci_ops {
+ #endif
+ 
+ 	void	(*set_clock)(struct sdhci_host *host, unsigned int clock);
++	void	(*set_power)(struct sdhci_host *host, unsigned char mode,
++			     unsigned short vdd);
+ 
+ 	int		(*enable_dma)(struct sdhci_host *host);
+ 	unsigned int	(*get_max_clock)(struct sdhci_host *host);
+@@ -660,6 +662,8 @@ static inline bool sdhci_sdio_irq_enabled(struct sdhci_host *host)
+ }
+ 
+ void sdhci_set_clock(struct sdhci_host *host, unsigned int clock);
++void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
++		     unsigned short vdd);
+ void sdhci_set_bus_width(struct sdhci_host *host, int width);
+ void sdhci_reset(struct sdhci_host *host, u8 mask);
+ void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing);
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index b7f1a9919033..5ec8195b02c0 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -3308,6 +3308,30 @@ static int bond_close(struct net_device *bond_dev)
+ 	return 0;
+ }
+ 
++/* fold stats, assuming all rtnl_link_stats64 fields are u64, but
++ * that some drivers can provide 32bit values only.
++ */
++static void bond_fold_stats(struct rtnl_link_stats64 *_res,
++			    const struct rtnl_link_stats64 *_new,
++			    const struct rtnl_link_stats64 *_old)
++{
++	const u64 *new = (const u64 *)_new;
++	const u64 *old = (const u64 *)_old;
++	u64 *res = (u64 *)_res;
++	int i;
++
++	for (i = 0; i < sizeof(*_res) / sizeof(u64); i++) {
++		u64 nv = new[i];
++		u64 ov = old[i];
++
++		/* detects if this particular field is 32bit only */
++		if (((nv | ov) >> 32) == 0)
++			res[i] += (u32)nv - (u32)ov;
++		else
++			res[i] += nv - ov;
++	}
++}
++
+ static struct rtnl_link_stats64 *bond_get_stats(struct net_device *bond_dev,
+ 						struct rtnl_link_stats64 *stats)
+ {
+@@ -3316,43 +3340,23 @@ static struct rtnl_link_stats64 *bond_get_stats(struct net_device *bond_dev,
+ 	struct list_head *iter;
+ 	struct slave *slave;
+ 
++	spin_lock(&bond->stats_lock);
+ 	memcpy(stats, &bond->bond_stats, sizeof(*stats));
+ 
+-	bond_for_each_slave(bond, slave, iter) {
+-		const struct rtnl_link_stats64 *sstats =
++	rcu_read_lock();
++	bond_for_each_slave_rcu(bond, slave, iter) {
++		const struct rtnl_link_stats64 *new =
+ 			dev_get_stats(slave->dev, &temp);
+-		struct rtnl_link_stats64 *pstats = &slave->slave_stats;
+-
+-		stats->rx_packets +=  sstats->rx_packets - pstats->rx_packets;
+-		stats->rx_bytes += sstats->rx_bytes - pstats->rx_bytes;
+-		stats->rx_errors += sstats->rx_errors - pstats->rx_errors;
+-		stats->rx_dropped += sstats->rx_dropped - pstats->rx_dropped;
+-
+-		stats->tx_packets += sstats->tx_packets - pstats->tx_packets;;
+-		stats->tx_bytes += sstats->tx_bytes - pstats->tx_bytes;
+-		stats->tx_errors += sstats->tx_errors - pstats->tx_errors;
+-		stats->tx_dropped += sstats->tx_dropped - pstats->tx_dropped;
+-
+-		stats->multicast += sstats->multicast - pstats->multicast;
+-		stats->collisions += sstats->collisions - pstats->collisions;
+-
+-		stats->rx_length_errors += sstats->rx_length_errors - pstats->rx_length_errors;
+-		stats->rx_over_errors += sstats->rx_over_errors - pstats->rx_over_errors;
+-		stats->rx_crc_errors += sstats->rx_crc_errors - pstats->rx_crc_errors;
+-		stats->rx_frame_errors += sstats->rx_frame_errors - pstats->rx_frame_errors;
+-		stats->rx_fifo_errors += sstats->rx_fifo_errors - pstats->rx_fifo_errors;
+-		stats->rx_missed_errors += sstats->rx_missed_errors - pstats->rx_missed_errors;
+-
+-		stats->tx_aborted_errors += sstats->tx_aborted_errors - pstats->tx_aborted_errors;
+-		stats->tx_carrier_errors += sstats->tx_carrier_errors - pstats->tx_carrier_errors;
+-		stats->tx_fifo_errors += sstats->tx_fifo_errors - pstats->tx_fifo_errors;
+-		stats->tx_heartbeat_errors += sstats->tx_heartbeat_errors - pstats->tx_heartbeat_errors;
+-		stats->tx_window_errors += sstats->tx_window_errors - pstats->tx_window_errors;
++
++		bond_fold_stats(stats, new, &slave->slave_stats);
+ 
+ 		/* save off the slave stats for the next run */
+-		memcpy(pstats, sstats, sizeof(*sstats));
++		memcpy(&slave->slave_stats, new, sizeof(*new));
+ 	}
++	rcu_read_unlock();
++
+ 	memcpy(&bond->bond_stats, stats, sizeof(*stats));
++	spin_unlock(&bond->stats_lock);
+ 
+ 	return stats;
+ }
+@@ -4166,6 +4170,7 @@ void bond_setup(struct net_device *bond_dev)
+ 	struct bonding *bond = netdev_priv(bond_dev);
+ 
+ 	spin_lock_init(&bond->mode_lock);
++	spin_lock_init(&bond->stats_lock);
+ 	bond->params = bonding_defaults;
+ 
+ 	/* Initialize pointers */
+diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+index d7e01a74e927..6746fd03cb3a 100644
+--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
++++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+@@ -1197,7 +1197,7 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
+ 			dev->stats.tx_bytes += tx_cb_ptr->skb->len;
+ 			dma_unmap_single(&dev->dev,
+ 					 dma_unmap_addr(tx_cb_ptr, dma_addr),
+-					 tx_cb_ptr->skb->len,
++					 dma_unmap_len(tx_cb_ptr, dma_len),
+ 					 DMA_TO_DEVICE);
+ 			bcmgenet_free_cb(tx_cb_ptr);
+ 		} else if (dma_unmap_addr(tx_cb_ptr, dma_addr)) {
+@@ -1308,7 +1308,7 @@ static int bcmgenet_xmit_single(struct net_device *dev,
+ 	}
+ 
+ 	dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping);
+-	dma_unmap_len_set(tx_cb_ptr, dma_len, skb->len);
++	dma_unmap_len_set(tx_cb_ptr, dma_len, skb_len);
+ 	length_status = (skb_len << DMA_BUFLENGTH_SHIFT) | dma_desc_flags |
+ 			(priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT) |
+ 			DMA_TX_APPEND_CRC;
+diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
+index acb1c5b2bad3..2ee05cebea75 100644
+--- a/drivers/net/ethernet/marvell/mvneta.c
++++ b/drivers/net/ethernet/marvell/mvneta.c
+@@ -3070,17 +3070,17 @@ static int mvneta_stop(struct net_device *dev)
+ 	struct mvneta_port *pp = netdev_priv(dev);
+ 
+ 	/* Inform that we are stopping so we don't want to setup the
+-	 * driver for new CPUs in the notifiers
++	 * driver for new CPUs in the notifiers. The code of the
++	 * notifier for CPU online is protected by the same spinlock,
++	 * so when we get the lock, the notifer work is done.
+ 	 */
+ 	spin_lock(&pp->lock);
+ 	pp->is_stopped = true;
++	spin_unlock(&pp->lock);
++
+ 	mvneta_stop_dev(pp);
+ 	mvneta_mdio_remove(pp);
+ 	unregister_cpu_notifier(&pp->cpu_notifier);
+-	/* Now that the notifier are unregistered, we can release le
+-	 * lock
+-	 */
+-	spin_unlock(&pp->lock);
+ 	on_each_cpu(mvneta_percpu_disable, pp, true);
+ 	free_percpu_irq(dev->irq, pp->ports);
+ 	mvneta_cleanup_rxqs(pp);
+@@ -3612,6 +3612,7 @@ static int mvneta_probe(struct platform_device *pdev)
+ 	dev->ethtool_ops = &mvneta_eth_tool_ops;
+ 
+ 	pp = netdev_priv(dev);
++	spin_lock_init(&pp->lock);
+ 	pp->phy_node = phy_node;
+ 	pp->phy_interface = phy_mode;
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+index 25ce1b030a00..cd9b2b28df88 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
++++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+@@ -3141,7 +3141,7 @@ static int verify_qp_parameters(struct mlx4_dev *dev,
+ 		case QP_TRANS_RTS2RTS:
+ 		case QP_TRANS_SQD2SQD:
+ 		case QP_TRANS_SQD2RTS:
+-			if (slave != mlx4_master_func_num(dev))
++			if (slave != mlx4_master_func_num(dev)) {
+ 				if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
+ 					port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
+ 					if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
+@@ -3160,6 +3160,7 @@ static int verify_qp_parameters(struct mlx4_dev *dev,
+ 					if (qp_ctx->alt_path.mgid_index >= num_gids)
+ 						return -EINVAL;
+ 				}
++			}
+ 			break;
+ 		default:
+ 			break;
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+index 3b89ed2f3c76..65a115fc0c96 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+@@ -118,6 +118,8 @@ struct mlxsw_sp {
+ #define MLXSW_SP_DEFAULT_LEARNING_INTERVAL 100
+ 		unsigned int interval; /* ms */
+ 	} fdb_notify;
++#define MLXSW_SP_MIN_AGEING_TIME 10
++#define MLXSW_SP_MAX_AGEING_TIME 1000000
+ #define MLXSW_SP_DEFAULT_AGEING_TIME 300
+ 	u32 ageing_time;
+ 	struct mlxsw_sp_upper master_bridge;
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+index 7b56098acc58..e1c74efff51a 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+@@ -311,8 +311,13 @@ static int mlxsw_sp_port_attr_br_ageing_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ 	unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
+ 	u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000;
+ 
+-	if (switchdev_trans_ph_prepare(trans))
+-		return 0;
++	if (switchdev_trans_ph_prepare(trans)) {
++		if (ageing_time < MLXSW_SP_MIN_AGEING_TIME ||
++		    ageing_time > MLXSW_SP_MAX_AGEING_TIME)
++			return -ERANGE;
++		else
++			return 0;
++	}
+ 
+ 	return mlxsw_sp_ageing_set(mlxsw_sp, ageing_time);
+ }
+diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+index 46bbea8e023c..55007f1e6bbc 100644
+--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
++++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+@@ -566,6 +566,7 @@ struct qlcnic_adapter_stats {
+ 	u64  tx_dma_map_error;
+ 	u64  spurious_intr;
+ 	u64  mac_filter_limit_overrun;
++	u64  mbx_spurious_intr;
+ };
+ 
+ /*
+@@ -1099,7 +1100,7 @@ struct qlcnic_mailbox {
+ 	unsigned long		status;
+ 	spinlock_t		queue_lock;	/* Mailbox queue lock */
+ 	spinlock_t		aen_lock;	/* Mailbox response/AEN lock */
+-	atomic_t		rsp_status;
++	u32			rsp_status;
+ 	u32			num_cmds;
+ };
+ 
+diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+index 37a731be7d39..f9640d5ce6ba 100644
+--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
++++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+@@ -491,7 +491,7 @@ irqreturn_t qlcnic_83xx_clear_legacy_intr(struct qlcnic_adapter *adapter)
+ 
+ static inline void qlcnic_83xx_notify_mbx_response(struct qlcnic_mailbox *mbx)
+ {
+-	atomic_set(&mbx->rsp_status, QLC_83XX_MBX_RESPONSE_ARRIVED);
++	mbx->rsp_status = QLC_83XX_MBX_RESPONSE_ARRIVED;
+ 	complete(&mbx->completion);
+ }
+ 
+@@ -510,7 +510,7 @@ static void qlcnic_83xx_poll_process_aen(struct qlcnic_adapter *adapter)
+ 	if (event &  QLCNIC_MBX_ASYNC_EVENT) {
+ 		__qlcnic_83xx_process_aen(adapter);
+ 	} else {
+-		if (atomic_read(&mbx->rsp_status) != rsp_status)
++		if (mbx->rsp_status != rsp_status)
+ 			qlcnic_83xx_notify_mbx_response(mbx);
+ 	}
+ out:
+@@ -1023,7 +1023,7 @@ static void qlcnic_83xx_process_aen(struct qlcnic_adapter *adapter)
+ 		if (event &  QLCNIC_MBX_ASYNC_EVENT) {
+ 			__qlcnic_83xx_process_aen(adapter);
+ 		} else {
+-			if (atomic_read(&mbx->rsp_status) != rsp_status)
++			if (mbx->rsp_status != rsp_status)
+ 				qlcnic_83xx_notify_mbx_response(mbx);
+ 		}
+ 	}
+@@ -2338,9 +2338,9 @@ static void qlcnic_83xx_handle_link_aen(struct qlcnic_adapter *adapter,
+ 
+ static irqreturn_t qlcnic_83xx_handle_aen(int irq, void *data)
+ {
++	u32 mask, resp, event, rsp_status = QLC_83XX_MBX_RESPONSE_ARRIVED;
+ 	struct qlcnic_adapter *adapter = data;
+ 	struct qlcnic_mailbox *mbx;
+-	u32 mask, resp, event;
+ 	unsigned long flags;
+ 
+ 	mbx = adapter->ahw->mailbox;
+@@ -2350,10 +2350,14 @@ static irqreturn_t qlcnic_83xx_handle_aen(int irq, void *data)
+ 		goto out;
+ 
+ 	event = readl(QLCNIC_MBX_FW(adapter->ahw, 0));
+-	if (event &  QLCNIC_MBX_ASYNC_EVENT)
++	if (event &  QLCNIC_MBX_ASYNC_EVENT) {
+ 		__qlcnic_83xx_process_aen(adapter);
+-	else
+-		qlcnic_83xx_notify_mbx_response(mbx);
++	} else {
++		if (mbx->rsp_status != rsp_status)
++			qlcnic_83xx_notify_mbx_response(mbx);
++		else
++			adapter->stats.mbx_spurious_intr++;
++	}
+ 
+ out:
+ 	mask = QLCRDX(adapter->ahw, QLCNIC_DEF_INT_MASK);
+@@ -4050,10 +4054,10 @@ static void qlcnic_83xx_mailbox_worker(struct work_struct *work)
+ 	struct qlcnic_adapter *adapter = mbx->adapter;
+ 	const struct qlcnic_mbx_ops *mbx_ops = mbx->ops;
+ 	struct device *dev = &adapter->pdev->dev;
+-	atomic_t *rsp_status = &mbx->rsp_status;
+ 	struct list_head *head = &mbx->cmd_q;
+ 	struct qlcnic_hardware_context *ahw;
+ 	struct qlcnic_cmd_args *cmd = NULL;
++	unsigned long flags;
+ 
+ 	ahw = adapter->ahw;
+ 
+@@ -4063,7 +4067,9 @@ static void qlcnic_83xx_mailbox_worker(struct work_struct *work)
+ 			return;
+ 		}
+ 
+-		atomic_set(rsp_status, QLC_83XX_MBX_RESPONSE_WAIT);
++		spin_lock_irqsave(&mbx->aen_lock, flags);
++		mbx->rsp_status = QLC_83XX_MBX_RESPONSE_WAIT;
++		spin_unlock_irqrestore(&mbx->aen_lock, flags);
+ 
+ 		spin_lock(&mbx->queue_lock);
+ 
+diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+index 494e8105adee..0a2318cad34d 100644
+--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
++++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+@@ -59,7 +59,8 @@ static const struct qlcnic_stats qlcnic_gstrings_stats[] = {
+ 	 QLC_OFF(stats.mac_filter_limit_overrun)},
+ 	{"spurious intr", QLC_SIZEOF(stats.spurious_intr),
+ 	 QLC_OFF(stats.spurious_intr)},
+-
++	{"mbx spurious intr", QLC_SIZEOF(stats.mbx_spurious_intr),
++	 QLC_OFF(stats.mbx_spurious_intr)},
+ };
+ 
+ static const char qlcnic_device_gstrings_stats[][ETH_GSTRING_LEN] = {
+diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+index 997976426799..b28e73ea2c25 100644
+--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
++++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+@@ -1648,7 +1648,18 @@ static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
+ 		return;
+ 	}
+ 	skb_reserve(new_skb, NET_IP_ALIGN);
++
++	pci_dma_sync_single_for_cpu(qdev->pdev,
++				    dma_unmap_addr(sbq_desc, mapaddr),
++				    dma_unmap_len(sbq_desc, maplen),
++				    PCI_DMA_FROMDEVICE);
++
+ 	memcpy(skb_put(new_skb, length), skb->data, length);
++
++	pci_dma_sync_single_for_device(qdev->pdev,
++				       dma_unmap_addr(sbq_desc, mapaddr),
++				       dma_unmap_len(sbq_desc, maplen),
++				       PCI_DMA_FROMDEVICE);
+ 	skb = new_skb;
+ 
+ 	/* Frame error, so drop the packet. */
+diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
+index 738449992876..01f6d5bbd420 100644
+--- a/drivers/net/ethernet/renesas/sh_eth.c
++++ b/drivers/net/ethernet/renesas/sh_eth.c
+@@ -1136,11 +1136,8 @@ static void sh_eth_ring_format(struct net_device *ndev)
+ 			break;
+ 		sh_eth_set_receive_align(skb);
+ 
+-		/* RX descriptor */
+-		rxdesc = &mdp->rx_ring[i];
+ 		/* The size of the buffer is a multiple of 32 bytes. */
+ 		buf_len = ALIGN(mdp->rx_buf_sz, 32);
+-		rxdesc->len = cpu_to_le32(buf_len << 16);
+ 		dma_addr = dma_map_single(&ndev->dev, skb->data, buf_len,
+ 					  DMA_FROM_DEVICE);
+ 		if (dma_mapping_error(&ndev->dev, dma_addr)) {
+@@ -1148,6 +1145,10 @@ static void sh_eth_ring_format(struct net_device *ndev)
+ 			break;
+ 		}
+ 		mdp->rx_skbuff[i] = skb;
++
++		/* RX descriptor */
++		rxdesc = &mdp->rx_ring[i];
++		rxdesc->len = cpu_to_le32(buf_len << 16);
+ 		rxdesc->addr = cpu_to_le32(dma_addr);
+ 		rxdesc->status = cpu_to_le32(RD_RACT | RD_RFP);
+ 
+@@ -1163,7 +1164,8 @@ static void sh_eth_ring_format(struct net_device *ndev)
+ 	mdp->dirty_rx = (u32) (i - mdp->num_rx_ring);
+ 
+ 	/* Mark the last entry as wrapping the ring. */
+-	rxdesc->status |= cpu_to_le32(RD_RDLE);
++	if (rxdesc)
++		rxdesc->status |= cpu_to_le32(RD_RDLE);
+ 
+ 	memset(mdp->tx_ring, 0, tx_ringsize);
+ 
+diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c
+index 166a7fc87e2f..f39e7198e818 100644
+--- a/drivers/net/ethernet/rocker/rocker.c
++++ b/drivers/net/ethernet/rocker/rocker.c
+@@ -239,6 +239,7 @@ struct rocker {
+ 	struct {
+ 		u64 id;
+ 	} hw;
++	unsigned long ageing_time;
+ 	spinlock_t cmd_ring_lock;		/* for cmd ring accesses */
+ 	struct rocker_dma_ring_info cmd_ring;
+ 	struct rocker_dma_ring_info event_ring;
+@@ -3704,7 +3705,7 @@ static void rocker_fdb_cleanup(unsigned long data)
+ 	struct rocker_port *rocker_port;
+ 	struct rocker_fdb_tbl_entry *entry;
+ 	struct hlist_node *tmp;
+-	unsigned long next_timer = jiffies + BR_MIN_AGEING_TIME;
++	unsigned long next_timer = jiffies + rocker->ageing_time;
+ 	unsigned long expires;
+ 	unsigned long lock_flags;
+ 	int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_REMOVE |
+@@ -4367,8 +4368,12 @@ static int rocker_port_bridge_ageing_time(struct rocker_port *rocker_port,
+ 					  struct switchdev_trans *trans,
+ 					  u32 ageing_time)
+ {
++	struct rocker *rocker = rocker_port->rocker;
++
+ 	if (!switchdev_trans_ph_prepare(trans)) {
+ 		rocker_port->ageing_time = clock_t_to_jiffies(ageing_time);
++		if (rocker_port->ageing_time < rocker->ageing_time)
++			rocker->ageing_time = rocker_port->ageing_time;
+ 		mod_timer(&rocker_port->rocker->fdb_cleanup_timer, jiffies);
+ 	}
+ 
+@@ -5206,10 +5211,13 @@ static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ 		goto err_init_tbls;
+ 	}
+ 
++	rocker->ageing_time = BR_DEFAULT_AGEING_TIME;
+ 	setup_timer(&rocker->fdb_cleanup_timer, rocker_fdb_cleanup,
+ 		    (unsigned long) rocker);
+ 	mod_timer(&rocker->fdb_cleanup_timer, jiffies);
+ 
++	rocker->ageing_time = BR_DEFAULT_AGEING_TIME;
++
+ 	err = rocker_probe_ports(rocker);
+ 	if (err) {
+ 		dev_err(&pdev->dev, "failed to probe ports\n");
+diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
+index d636d051fac8..95394edd1ed5 100644
+--- a/drivers/net/macvtap.c
++++ b/drivers/net/macvtap.c
+@@ -760,6 +760,8 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
+ 			macvtap16_to_cpu(q, vnet_hdr.hdr_len) : GOODCOPY_LEN;
+ 		if (copylen > good_linear)
+ 			copylen = good_linear;
++		else if (copylen < ETH_HLEN)
++			copylen = ETH_HLEN;
+ 		linear = copylen;
+ 		i = *from;
+ 		iov_iter_advance(&i, copylen);
+@@ -769,10 +771,11 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
+ 
+ 	if (!zerocopy) {
+ 		copylen = len;
+-		if (macvtap16_to_cpu(q, vnet_hdr.hdr_len) > good_linear)
++		linear = macvtap16_to_cpu(q, vnet_hdr.hdr_len);
++		if (linear > good_linear)
+ 			linear = good_linear;
+-		else
+-			linear = macvtap16_to_cpu(q, vnet_hdr.hdr_len);
++		else if (linear < ETH_HLEN)
++			linear = ETH_HLEN;
+ 	}
+ 
+ 	skb = macvtap_alloc_skb(&q->sk, MACVTAP_RESERVE, copylen,
+diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
+index d61da9ece3ba..aafe237b25ac 100644
+--- a/drivers/net/ppp/ppp_generic.c
++++ b/drivers/net/ppp/ppp_generic.c
+@@ -575,7 +575,7 @@ static int get_filter(void __user *arg, struct sock_filter **p)
+ 
+ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+ {
+-	struct ppp_file *pf = file->private_data;
++	struct ppp_file *pf;
+ 	struct ppp *ppp;
+ 	int err = -EFAULT, val, val2, i;
+ 	struct ppp_idle idle;
+@@ -585,9 +585,14 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+ 	void __user *argp = (void __user *)arg;
+ 	int __user *p = argp;
+ 
+-	if (!pf)
+-		return ppp_unattached_ioctl(current->nsproxy->net_ns,
+-					pf, file, cmd, arg);
++	mutex_lock(&ppp_mutex);
++
++	pf = file->private_data;
++	if (!pf) {
++		err = ppp_unattached_ioctl(current->nsproxy->net_ns,
++					   pf, file, cmd, arg);
++		goto out;
++	}
+ 
+ 	if (cmd == PPPIOCDETACH) {
+ 		/*
+@@ -602,7 +607,6 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+ 		 * this fd and reopening /dev/ppp.
+ 		 */
+ 		err = -EINVAL;
+-		mutex_lock(&ppp_mutex);
+ 		if (pf->kind == INTERFACE) {
+ 			ppp = PF_TO_PPP(pf);
+ 			rtnl_lock();
+@@ -616,15 +620,13 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+ 		} else
+ 			pr_warn("PPPIOCDETACH file->f_count=%ld\n",
+ 				atomic_long_read(&file->f_count));
+-		mutex_unlock(&ppp_mutex);
+-		return err;
++		goto out;
+ 	}
+ 
+ 	if (pf->kind == CHANNEL) {
+ 		struct channel *pch;
+ 		struct ppp_channel *chan;
+ 
+-		mutex_lock(&ppp_mutex);
+ 		pch = PF_TO_CHANNEL(pf);
+ 
+ 		switch (cmd) {
+@@ -646,17 +648,16 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+ 				err = chan->ops->ioctl(chan, cmd, arg);
+ 			up_read(&pch->chan_sem);
+ 		}
+-		mutex_unlock(&ppp_mutex);
+-		return err;
++		goto out;
+ 	}
+ 
+ 	if (pf->kind != INTERFACE) {
+ 		/* can't happen */
+ 		pr_err("PPP: not interface or channel??\n");
+-		return -EINVAL;
++		err = -EINVAL;
++		goto out;
+ 	}
+ 
+-	mutex_lock(&ppp_mutex);
+ 	ppp = PF_TO_PPP(pf);
+ 	switch (cmd) {
+ 	case PPPIOCSMRU:
+@@ -831,7 +832,10 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+ 	default:
+ 		err = -ENOTTY;
+ 	}
++
++out:
+ 	mutex_unlock(&ppp_mutex);
++
+ 	return err;
+ }
+ 
+@@ -844,7 +848,6 @@ static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf,
+ 	struct ppp_net *pn;
+ 	int __user *p = (int __user *)arg;
+ 
+-	mutex_lock(&ppp_mutex);
+ 	switch (cmd) {
+ 	case PPPIOCNEWUNIT:
+ 		/* Create a new ppp unit */
+@@ -894,7 +897,7 @@ static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf,
+ 	default:
+ 		err = -ENOTTY;
+ 	}
+-	mutex_unlock(&ppp_mutex);
++
+ 	return err;
+ }
+ 
+@@ -2304,7 +2307,7 @@ int ppp_register_net_channel(struct net *net, struct ppp_channel *chan)
+ 
+ 	pch->ppp = NULL;
+ 	pch->chan = chan;
+-	pch->chan_net = net;
++	pch->chan_net = get_net(net);
+ 	chan->ppp = pch;
+ 	init_ppp_file(&pch->file, CHANNEL);
+ 	pch->file.hdrlen = chan->hdrlen;
+@@ -2401,6 +2404,8 @@ ppp_unregister_channel(struct ppp_channel *chan)
+ 	spin_lock_bh(&pn->all_channels_lock);
+ 	list_del(&pch->list);
+ 	spin_unlock_bh(&pn->all_channels_lock);
++	put_net(pch->chan_net);
++	pch->chan_net = NULL;
+ 
+ 	pch->file.dead = 1;
+ 	wake_up_interruptible(&pch->file.rwait);
+diff --git a/drivers/net/tun.c b/drivers/net/tun.c
+index 88bb8cc3555b..81ecc2ed8137 100644
+--- a/drivers/net/tun.c
++++ b/drivers/net/tun.c
+@@ -621,7 +621,8 @@ static int tun_attach(struct tun_struct *tun, struct file *file, bool skip_filte
+ 
+ 	/* Re-attach the filter to persist device */
+ 	if (!skip_filter && (tun->filter_attached == true)) {
+-		err = sk_attach_filter(&tun->fprog, tfile->socket.sk);
++		err = __sk_attach_filter(&tun->fprog, tfile->socket.sk,
++					 lockdep_rtnl_is_held());
+ 		if (!err)
+ 			goto out;
+ 	}
+@@ -1000,7 +1001,6 @@ static void tun_net_init(struct net_device *dev)
+ 		/* Zero header length */
+ 		dev->type = ARPHRD_NONE;
+ 		dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
+-		dev->tx_queue_len = TUN_READQ_SIZE;  /* We prefer our own queue length */
+ 		break;
+ 
+ 	case IFF_TAP:
+@@ -1012,7 +1012,6 @@ static void tun_net_init(struct net_device *dev)
+ 
+ 		eth_hw_addr_random(dev);
+ 
+-		dev->tx_queue_len = TUN_READQ_SIZE;  /* We prefer our own queue length */
+ 		break;
+ 	}
+ }
+@@ -1466,6 +1465,8 @@ static void tun_setup(struct net_device *dev)
+ 
+ 	dev->ethtool_ops = &tun_ethtool_ops;
+ 	dev->destructor = tun_free_netdev;
++	/* We prefer our own queue length */
++	dev->tx_queue_len = TUN_READQ_SIZE;
+ }
+ 
+ /* Trivial set of netlink ops to allow deleting tun or tap
+@@ -1807,7 +1808,7 @@ static void tun_detach_filter(struct tun_struct *tun, int n)
+ 
+ 	for (i = 0; i < n; i++) {
+ 		tfile = rtnl_dereference(tun->tfiles[i]);
+-		sk_detach_filter(tfile->socket.sk);
++		__sk_detach_filter(tfile->socket.sk, lockdep_rtnl_is_held());
+ 	}
+ 
+ 	tun->filter_attached = false;
+@@ -1820,7 +1821,8 @@ static int tun_attach_filter(struct tun_struct *tun)
+ 
+ 	for (i = 0; i < tun->numqueues; i++) {
+ 		tfile = rtnl_dereference(tun->tfiles[i]);
+-		ret = sk_attach_filter(&tun->fprog, tfile->socket.sk);
++		ret = __sk_attach_filter(&tun->fprog, tfile->socket.sk,
++					 lockdep_rtnl_is_held());
+ 		if (ret) {
+ 			tun_detach_filter(tun, i);
+ 			return ret;
+diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
+index a3a4ccf7cf52..1232a8c608b4 100644
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -844,6 +844,7 @@ static const struct usb_device_id products[] = {
+ 	{QMI_FIXED_INTF(0x19d2, 0x1426, 2)},	/* ZTE MF91 */
+ 	{QMI_FIXED_INTF(0x19d2, 0x1428, 2)},	/* Telewell TW-LTE 4G v2 */
+ 	{QMI_FIXED_INTF(0x19d2, 0x2002, 4)},	/* ZTE (Vodafone) K3765-Z */
++	{QMI_FIXED_INTF(0x2001, 0x7e19, 4)},	/* D-Link DWM-221 B1 */
+ 	{QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)},    /* Sierra Wireless MC7700 */
+ 	{QMI_FIXED_INTF(0x114f, 0x68a2, 8)},    /* Sierra Wireless MC7750 */
+ 	{QMI_FIXED_INTF(0x1199, 0x68a2, 8)},	/* Sierra Wireless MC7710 in QMI mode */
+diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c
+index 44541dbc5c28..69b994f3b8c5 100644
+--- a/drivers/net/wan/farsync.c
++++ b/drivers/net/wan/farsync.c
+@@ -2516,7 +2516,7 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+                 dev->mem_start   = card->phys_mem
+                                  + BUF_OFFSET ( txBuffer[i][0][0]);
+                 dev->mem_end     = card->phys_mem
+-                                 + BUF_OFFSET ( txBuffer[i][NUM_TX_BUFFER][0]);
++                                 + BUF_OFFSET ( txBuffer[i][NUM_TX_BUFFER - 1][LEN_RX_BUFFER - 1]);
+                 dev->base_addr   = card->pci_conf;
+                 dev->irq         = card->irq;
+ 
+diff --git a/drivers/net/wireless/ath/ath9k/eeprom.c b/drivers/net/wireless/ath/ath9k/eeprom.c
+index 73fb4232f9f2..a794157a147d 100644
+--- a/drivers/net/wireless/ath/ath9k/eeprom.c
++++ b/drivers/net/wireless/ath/ath9k/eeprom.c
+@@ -477,10 +477,9 @@ void ath9k_hw_get_gain_boundaries_pdadcs(struct ath_hw *ah,
+ 
+ 	if (match) {
+ 		if (AR_SREV_9287(ah)) {
+-			/* FIXME: array overrun? */
+ 			for (i = 0; i < numXpdGains; i++) {
+ 				minPwrT4[i] = data_9287[idxL].pwrPdg[i][0];
+-				maxPwrT4[i] = data_9287[idxL].pwrPdg[i][4];
++				maxPwrT4[i] = data_9287[idxL].pwrPdg[i][intercepts - 1];
+ 				ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i],
+ 						data_9287[idxL].pwrPdg[i],
+ 						data_9287[idxL].vpdPdg[i],
+@@ -490,7 +489,7 @@ void ath9k_hw_get_gain_boundaries_pdadcs(struct ath_hw *ah,
+ 		} else if (eeprom_4k) {
+ 			for (i = 0; i < numXpdGains; i++) {
+ 				minPwrT4[i] = data_4k[idxL].pwrPdg[i][0];
+-				maxPwrT4[i] = data_4k[idxL].pwrPdg[i][4];
++				maxPwrT4[i] = data_4k[idxL].pwrPdg[i][intercepts - 1];
+ 				ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i],
+ 						data_4k[idxL].pwrPdg[i],
+ 						data_4k[idxL].vpdPdg[i],
+@@ -500,7 +499,7 @@ void ath9k_hw_get_gain_boundaries_pdadcs(struct ath_hw *ah,
+ 		} else {
+ 			for (i = 0; i < numXpdGains; i++) {
+ 				minPwrT4[i] = data_def[idxL].pwrPdg[i][0];
+-				maxPwrT4[i] = data_def[idxL].pwrPdg[i][4];
++				maxPwrT4[i] = data_def[idxL].pwrPdg[i][intercepts - 1];
+ 				ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i],
+ 						data_def[idxL].pwrPdg[i],
+ 						data_def[idxL].vpdPdg[i],
+diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
+index 576eb7013792..cdbab06bf74f 100644
+--- a/drivers/nvdimm/bus.c
++++ b/drivers/nvdimm/bus.c
+@@ -335,7 +335,7 @@ static const struct nd_cmd_desc __nd_cmd_dimm_descs[] = {
+ 	[ND_CMD_IMPLEMENTED] = { },
+ 	[ND_CMD_SMART] = {
+ 		.out_num = 2,
+-		.out_sizes = { 4, 8, },
++		.out_sizes = { 4, 128, },
+ 	},
+ 	[ND_CMD_SMART_THRESHOLD] = {
+ 		.out_num = 2,
+diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
+index ae81a2f1da50..f0b56b3aac4d 100644
+--- a/drivers/nvdimm/pfn_devs.c
++++ b/drivers/nvdimm/pfn_devs.c
+@@ -315,7 +315,7 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn)
+ 	} else {
+ 		/* from init we validate */
+ 		if (memcmp(nd_pfn->uuid, pfn_sb->uuid, 16) != 0)
+-			return -EINVAL;
++			return -ENODEV;
+ 	}
+ 
+ 	if (nd_pfn->align > nvdimm_namespace_capacity(ndns)) {
+diff --git a/drivers/pcmcia/db1xxx_ss.c b/drivers/pcmcia/db1xxx_ss.c
+index 4c2fa05b4589..944674ee3464 100644
+--- a/drivers/pcmcia/db1xxx_ss.c
++++ b/drivers/pcmcia/db1xxx_ss.c
+@@ -56,6 +56,7 @@ struct db1x_pcmcia_sock {
+ 	int	stschg_irq;	/* card-status-change irq */
+ 	int	card_irq;	/* card irq */
+ 	int	eject_irq;	/* db1200/pb1200 have these */
++	int	insert_gpio;	/* db1000 carddetect gpio */
+ 
+ #define BOARD_TYPE_DEFAULT	0	/* most boards */
+ #define BOARD_TYPE_DB1200	1	/* IRQs aren't gpios */
+@@ -83,7 +84,7 @@ static int db1200_card_inserted(struct db1x_pcmcia_sock *sock)
+ /* carddetect gpio: low-active */
+ static int db1000_card_inserted(struct db1x_pcmcia_sock *sock)
+ {
+-	return !gpio_get_value(irq_to_gpio(sock->insert_irq));
++	return !gpio_get_value(sock->insert_gpio);
+ }
+ 
+ static int db1x_card_inserted(struct db1x_pcmcia_sock *sock)
+@@ -457,9 +458,15 @@ static int db1x_pcmcia_socket_probe(struct platform_device *pdev)
+ 	r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "card");
+ 	sock->card_irq = r ? r->start : 0;
+ 
+-	/* insert: irq which triggers on card insertion/ejection */
++	/* insert: irq which triggers on card insertion/ejection
++	 * BIG FAT NOTE: on DB1000/1100/1500/1550 we pass a GPIO here!
++	 */
+ 	r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "insert");
+ 	sock->insert_irq = r ? r->start : -1;
++	if (sock->board_type == BOARD_TYPE_DEFAULT) {
++		sock->insert_gpio = r ? r->start : -1;
++		sock->insert_irq = r ? gpio_to_irq(r->start) : -1;
++	}
+ 
+ 	/* stschg: irq which trigger on card status change (optional) */
+ 	r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "stschg");
+diff --git a/drivers/pinctrl/freescale/pinctrl-imx.c b/drivers/pinctrl/freescale/pinctrl-imx.c
+index a5bb93987378..1029aa7889b5 100644
+--- a/drivers/pinctrl/freescale/pinctrl-imx.c
++++ b/drivers/pinctrl/freescale/pinctrl-imx.c
+@@ -726,19 +726,18 @@ int imx_pinctrl_probe(struct platform_device *pdev,
+ 
+ 	if (of_property_read_bool(dev_np, "fsl,input-sel")) {
+ 		np = of_parse_phandle(dev_np, "fsl,input-sel", 0);
+-		if (np) {
+-			ipctl->input_sel_base = of_iomap(np, 0);
+-			if (IS_ERR(ipctl->input_sel_base)) {
+-				of_node_put(np);
+-				dev_err(&pdev->dev,
+-					"iomuxc input select base address not found\n");
+-				return PTR_ERR(ipctl->input_sel_base);
+-			}
+-		} else {
++		if (!np) {
+ 			dev_err(&pdev->dev, "iomuxc fsl,input-sel property not found\n");
+ 			return -EINVAL;
+ 		}
++
++		ipctl->input_sel_base = of_iomap(np, 0);
+ 		of_node_put(np);
++		if (!ipctl->input_sel_base) {
++			dev_err(&pdev->dev,
++				"iomuxc input select base address not found\n");
++			return -ENOMEM;
++		}
+ 	}
+ 
+ 	imx_pinctrl_desc.name = dev_name(&pdev->dev);
+diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik.c b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
+index 352406108fa0..c8969dd49449 100644
+--- a/drivers/pinctrl/nomadik/pinctrl-nomadik.c
++++ b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
+@@ -990,7 +990,7 @@ static void nmk_gpio_dbg_show_one(struct seq_file *s,
+ 		int val;
+ 
+ 		if (pull)
+-			pullidx = data_out ? 1 : 2;
++			pullidx = data_out ? 2 : 1;
+ 
+ 		seq_printf(s, " gpio-%-3d (%-20.20s) in  %s %s",
+ 			   gpio,
+diff --git a/drivers/pinctrl/pinctrl-pistachio.c b/drivers/pinctrl/pinctrl-pistachio.c
+index 856f736cb1a6..2673cd9d106e 100644
+--- a/drivers/pinctrl/pinctrl-pistachio.c
++++ b/drivers/pinctrl/pinctrl-pistachio.c
+@@ -469,27 +469,27 @@ static const char * const pistachio_mips_pll_lock_groups[] = {
+ 	"mfio83",
+ };
+ 
+-static const char * const pistachio_sys_pll_lock_groups[] = {
++static const char * const pistachio_audio_pll_lock_groups[] = {
+ 	"mfio84",
+ };
+ 
+-static const char * const pistachio_wifi_pll_lock_groups[] = {
++static const char * const pistachio_rpu_v_pll_lock_groups[] = {
+ 	"mfio85",
+ };
+ 
+-static const char * const pistachio_bt_pll_lock_groups[] = {
++static const char * const pistachio_rpu_l_pll_lock_groups[] = {
+ 	"mfio86",
+ };
+ 
+-static const char * const pistachio_rpu_v_pll_lock_groups[] = {
++static const char * const pistachio_sys_pll_lock_groups[] = {
+ 	"mfio87",
+ };
+ 
+-static const char * const pistachio_rpu_l_pll_lock_groups[] = {
++static const char * const pistachio_wifi_pll_lock_groups[] = {
+ 	"mfio88",
+ };
+ 
+-static const char * const pistachio_audio_pll_lock_groups[] = {
++static const char * const pistachio_bt_pll_lock_groups[] = {
+ 	"mfio89",
+ };
+ 
+@@ -559,12 +559,12 @@ enum pistachio_mux_option {
+ 	PISTACHIO_FUNCTION_DREQ4,
+ 	PISTACHIO_FUNCTION_DREQ5,
+ 	PISTACHIO_FUNCTION_MIPS_PLL_LOCK,
++	PISTACHIO_FUNCTION_AUDIO_PLL_LOCK,
++	PISTACHIO_FUNCTION_RPU_V_PLL_LOCK,
++	PISTACHIO_FUNCTION_RPU_L_PLL_LOCK,
+ 	PISTACHIO_FUNCTION_SYS_PLL_LOCK,
+ 	PISTACHIO_FUNCTION_WIFI_PLL_LOCK,
+ 	PISTACHIO_FUNCTION_BT_PLL_LOCK,
+-	PISTACHIO_FUNCTION_RPU_V_PLL_LOCK,
+-	PISTACHIO_FUNCTION_RPU_L_PLL_LOCK,
+-	PISTACHIO_FUNCTION_AUDIO_PLL_LOCK,
+ 	PISTACHIO_FUNCTION_DEBUG_RAW_CCA_IND,
+ 	PISTACHIO_FUNCTION_DEBUG_ED_SEC20_CCA_IND,
+ 	PISTACHIO_FUNCTION_DEBUG_ED_SEC40_CCA_IND,
+@@ -620,12 +620,12 @@ static const struct pistachio_function pistachio_functions[] = {
+ 	FUNCTION(dreq4),
+ 	FUNCTION(dreq5),
+ 	FUNCTION(mips_pll_lock),
++	FUNCTION(audio_pll_lock),
++	FUNCTION(rpu_v_pll_lock),
++	FUNCTION(rpu_l_pll_lock),
+ 	FUNCTION(sys_pll_lock),
+ 	FUNCTION(wifi_pll_lock),
+ 	FUNCTION(bt_pll_lock),
+-	FUNCTION(rpu_v_pll_lock),
+-	FUNCTION(rpu_l_pll_lock),
+-	FUNCTION(audio_pll_lock),
+ 	FUNCTION(debug_raw_cca_ind),
+ 	FUNCTION(debug_ed_sec20_cca_ind),
+ 	FUNCTION(debug_ed_sec40_cca_ind),
+diff --git a/drivers/pinctrl/sh-pfc/core.c b/drivers/pinctrl/sh-pfc/core.c
+index 181ea98a63b7..2b0d70217bbd 100644
+--- a/drivers/pinctrl/sh-pfc/core.c
++++ b/drivers/pinctrl/sh-pfc/core.c
+@@ -545,7 +545,9 @@ static int sh_pfc_probe(struct platform_device *pdev)
+ 			return ret;
+ 	}
+ 
+-	pinctrl_provide_dummies();
++	/* Enable dummy states for those platforms without pinctrl support */
++	if (!of_have_populated_dt())
++		pinctrl_provide_dummies();
+ 
+ 	ret = sh_pfc_init_ranges(pfc);
+ 	if (ret < 0)
+diff --git a/drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c b/drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c
+index 00265f0435a7..8b381d69df86 100644
+--- a/drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c
++++ b/drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c
+@@ -485,6 +485,7 @@ static const struct sunxi_pinctrl_desc sun8i_a33_pinctrl_data = {
+ 	.pins = sun8i_a33_pins,
+ 	.npins = ARRAY_SIZE(sun8i_a33_pins),
+ 	.irq_banks = 2,
++	.irq_bank_base = 1,
+ };
+ 
+ static int sun8i_a33_pinctrl_probe(struct platform_device *pdev)
+diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
+index 7a2465f5e71e..884c2b314567 100644
+--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c
++++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
+@@ -578,7 +578,7 @@ static void sunxi_pinctrl_irq_release_resources(struct irq_data *d)
+ static int sunxi_pinctrl_irq_set_type(struct irq_data *d, unsigned int type)
+ {
+ 	struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d);
+-	u32 reg = sunxi_irq_cfg_reg(d->hwirq);
++	u32 reg = sunxi_irq_cfg_reg(d->hwirq, pctl->desc->irq_bank_base);
+ 	u8 index = sunxi_irq_cfg_offset(d->hwirq);
+ 	unsigned long flags;
+ 	u32 regval;
+@@ -625,7 +625,8 @@ static int sunxi_pinctrl_irq_set_type(struct irq_data *d, unsigned int type)
+ static void sunxi_pinctrl_irq_ack(struct irq_data *d)
+ {
+ 	struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d);
+-	u32 status_reg = sunxi_irq_status_reg(d->hwirq);
++	u32 status_reg = sunxi_irq_status_reg(d->hwirq,
++					      pctl->desc->irq_bank_base);
+ 	u8 status_idx = sunxi_irq_status_offset(d->hwirq);
+ 
+ 	/* Clear the IRQ */
+@@ -635,7 +636,7 @@ static void sunxi_pinctrl_irq_ack(struct irq_data *d)
+ static void sunxi_pinctrl_irq_mask(struct irq_data *d)
+ {
+ 	struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d);
+-	u32 reg = sunxi_irq_ctrl_reg(d->hwirq);
++	u32 reg = sunxi_irq_ctrl_reg(d->hwirq, pctl->desc->irq_bank_base);
+ 	u8 idx = sunxi_irq_ctrl_offset(d->hwirq);
+ 	unsigned long flags;
+ 	u32 val;
+@@ -652,7 +653,7 @@ static void sunxi_pinctrl_irq_mask(struct irq_data *d)
+ static void sunxi_pinctrl_irq_unmask(struct irq_data *d)
+ {
+ 	struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d);
+-	u32 reg = sunxi_irq_ctrl_reg(d->hwirq);
++	u32 reg = sunxi_irq_ctrl_reg(d->hwirq, pctl->desc->irq_bank_base);
+ 	u8 idx = sunxi_irq_ctrl_offset(d->hwirq);
+ 	unsigned long flags;
+ 	u32 val;
+@@ -744,7 +745,7 @@ static void sunxi_pinctrl_irq_handler(struct irq_desc *desc)
+ 	if (bank == pctl->desc->irq_banks)
+ 		return;
+ 
+-	reg = sunxi_irq_status_reg_from_bank(bank);
++	reg = sunxi_irq_status_reg_from_bank(bank, pctl->desc->irq_bank_base);
+ 	val = readl(pctl->membase + reg);
+ 
+ 	if (val) {
+@@ -1023,9 +1024,11 @@ int sunxi_pinctrl_init(struct platform_device *pdev,
+ 
+ 	for (i = 0; i < pctl->desc->irq_banks; i++) {
+ 		/* Mask and clear all IRQs before registering a handler */
+-		writel(0, pctl->membase + sunxi_irq_ctrl_reg_from_bank(i));
++		writel(0, pctl->membase + sunxi_irq_ctrl_reg_from_bank(i,
++						pctl->desc->irq_bank_base));
+ 		writel(0xffffffff,
+-			pctl->membase + sunxi_irq_status_reg_from_bank(i));
++		       pctl->membase + sunxi_irq_status_reg_from_bank(i,
++						pctl->desc->irq_bank_base));
+ 
+ 		irq_set_chained_handler_and_data(pctl->irq[i],
+ 						 sunxi_pinctrl_irq_handler,
+diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.h b/drivers/pinctrl/sunxi/pinctrl-sunxi.h
+index e248e81a0f9e..0afce1ab12d0 100644
+--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.h
++++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.h
+@@ -97,6 +97,7 @@ struct sunxi_pinctrl_desc {
+ 	int				npins;
+ 	unsigned			pin_base;
+ 	unsigned			irq_banks;
++	unsigned			irq_bank_base;
+ 	bool				irq_read_needs_mux;
+ };
+ 
+@@ -233,12 +234,12 @@ static inline u32 sunxi_pull_offset(u16 pin)
+ 	return pin_num * PULL_PINS_BITS;
+ }
+ 
+-static inline u32 sunxi_irq_cfg_reg(u16 irq)
++static inline u32 sunxi_irq_cfg_reg(u16 irq, unsigned bank_base)
+ {
+ 	u8 bank = irq / IRQ_PER_BANK;
+ 	u8 reg = (irq % IRQ_PER_BANK) / IRQ_CFG_IRQ_PER_REG * 0x04;
+ 
+-	return IRQ_CFG_REG + bank * IRQ_MEM_SIZE + reg;
++	return IRQ_CFG_REG + (bank_base + bank) * IRQ_MEM_SIZE + reg;
+ }
+ 
+ static inline u32 sunxi_irq_cfg_offset(u16 irq)
+@@ -247,16 +248,16 @@ static inline u32 sunxi_irq_cfg_offset(u16 irq)
+ 	return irq_num * IRQ_CFG_IRQ_BITS;
+ }
+ 
+-static inline u32 sunxi_irq_ctrl_reg_from_bank(u8 bank)
++static inline u32 sunxi_irq_ctrl_reg_from_bank(u8 bank, unsigned bank_base)
+ {
+-	return IRQ_CTRL_REG + bank * IRQ_MEM_SIZE;
++	return IRQ_CTRL_REG + (bank_base + bank) * IRQ_MEM_SIZE;
+ }
+ 
+-static inline u32 sunxi_irq_ctrl_reg(u16 irq)
++static inline u32 sunxi_irq_ctrl_reg(u16 irq, unsigned bank_base)
+ {
+ 	u8 bank = irq / IRQ_PER_BANK;
+ 
+-	return sunxi_irq_ctrl_reg_from_bank(bank);
++	return sunxi_irq_ctrl_reg_from_bank(bank, bank_base);
+ }
+ 
+ static inline u32 sunxi_irq_ctrl_offset(u16 irq)
+@@ -265,16 +266,16 @@ static inline u32 sunxi_irq_ctrl_offset(u16 irq)
+ 	return irq_num * IRQ_CTRL_IRQ_BITS;
+ }
+ 
+-static inline u32 sunxi_irq_status_reg_from_bank(u8 bank)
++static inline u32 sunxi_irq_status_reg_from_bank(u8 bank, unsigned bank_base)
+ {
+-	return IRQ_STATUS_REG + bank * IRQ_MEM_SIZE;
++	return IRQ_STATUS_REG + (bank_base + bank) * IRQ_MEM_SIZE;
+ }
+ 
+-static inline u32 sunxi_irq_status_reg(u16 irq)
++static inline u32 sunxi_irq_status_reg(u16 irq, unsigned bank_base)
+ {
+ 	u8 bank = irq / IRQ_PER_BANK;
+ 
+-	return sunxi_irq_status_reg_from_bank(bank);
++	return sunxi_irq_status_reg_from_bank(bank, bank_base);
+ }
+ 
+ static inline u32 sunxi_irq_status_offset(u16 irq)
+diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
+index b1bf42b93fcc..1deb6adc411f 100644
+--- a/drivers/scsi/scsi.c
++++ b/drivers/scsi/scsi.c
+@@ -784,8 +784,9 @@ void scsi_attach_vpd(struct scsi_device *sdev)
+ 	int pg83_supported = 0;
+ 	unsigned char __rcu *vpd_buf, *orig_vpd_buf = NULL;
+ 
+-	if (sdev->skip_vpd_pages)
++	if (!scsi_device_supports_vpd(sdev))
+ 		return;
++
+ retry_pg0:
+ 	vpd_buf = kmalloc(vpd_len, GFP_KERNEL);
+ 	if (!vpd_buf)
+diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
+index 5a5457ac9cdb..974ca5b45f8d 100644
+--- a/drivers/scsi/sd.c
++++ b/drivers/scsi/sd.c
+@@ -1275,18 +1275,19 @@ static int sd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+ 	struct scsi_disk *sdkp = scsi_disk(bdev->bd_disk);
+ 	struct scsi_device *sdp = sdkp->device;
+ 	struct Scsi_Host *host = sdp->host;
++	sector_t capacity = logical_to_sectors(sdp, sdkp->capacity);
+ 	int diskinfo[4];
+ 
+ 	/* default to most commonly used values */
+-        diskinfo[0] = 0x40;	/* 1 << 6 */
+-       	diskinfo[1] = 0x20;	/* 1 << 5 */
+-       	diskinfo[2] = sdkp->capacity >> 11;
+-	
++	diskinfo[0] = 0x40;	/* 1 << 6 */
++	diskinfo[1] = 0x20;	/* 1 << 5 */
++	diskinfo[2] = capacity >> 11;
++
+ 	/* override with calculated, extended default, or driver values */
+ 	if (host->hostt->bios_param)
+-		host->hostt->bios_param(sdp, bdev, sdkp->capacity, diskinfo);
++		host->hostt->bios_param(sdp, bdev, capacity, diskinfo);
+ 	else
+-		scsicam_bios_param(bdev, sdkp->capacity, diskinfo);
++		scsicam_bios_param(bdev, capacity, diskinfo);
+ 
+ 	geo->heads = diskinfo[0];
+ 	geo->sectors = diskinfo[1];
+@@ -2337,14 +2338,6 @@ got_data:
+ 	if (sdkp->capacity > 0xffffffff)
+ 		sdp->use_16_for_rw = 1;
+ 
+-	/* Rescale capacity to 512-byte units */
+-	if (sector_size == 4096)
+-		sdkp->capacity <<= 3;
+-	else if (sector_size == 2048)
+-		sdkp->capacity <<= 2;
+-	else if (sector_size == 1024)
+-		sdkp->capacity <<= 1;
+-
+ 	blk_queue_physical_block_size(sdp->request_queue,
+ 				      sdkp->physical_block_size);
+ 	sdkp->device->sector_size = sector_size;
+@@ -2795,28 +2788,6 @@ static void sd_read_write_same(struct scsi_disk *sdkp, unsigned char *buffer)
+ 		sdkp->ws10 = 1;
+ }
+ 
+-static int sd_try_extended_inquiry(struct scsi_device *sdp)
+-{
+-	/* Attempt VPD inquiry if the device blacklist explicitly calls
+-	 * for it.
+-	 */
+-	if (sdp->try_vpd_pages)
+-		return 1;
+-	/*
+-	 * Although VPD inquiries can go to SCSI-2 type devices,
+-	 * some USB ones crash on receiving them, and the pages
+-	 * we currently ask for are for SPC-3 and beyond
+-	 */
+-	if (sdp->scsi_level > SCSI_SPC_2 && !sdp->skip_vpd_pages)
+-		return 1;
+-	return 0;
+-}
+-
+-static inline u32 logical_to_sectors(struct scsi_device *sdev, u32 blocks)
+-{
+-	return blocks << (ilog2(sdev->sector_size) - 9);
+-}
+-
+ /**
+  *	sd_revalidate_disk - called the first time a new disk is seen,
+  *	performs disk spin up, read_capacity, etc.
+@@ -2856,7 +2827,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
+ 	if (sdkp->media_present) {
+ 		sd_read_capacity(sdkp, buffer);
+ 
+-		if (sd_try_extended_inquiry(sdp)) {
++		if (scsi_device_supports_vpd(sdp)) {
+ 			sd_read_block_provisioning(sdkp);
+ 			sd_read_block_limits(sdkp);
+ 			sd_read_block_characteristics(sdkp);
+@@ -2900,7 +2871,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
+ 	/* Combine with controller limits */
+ 	q->limits.max_sectors = min(rw_max, queue_max_hw_sectors(q));
+ 
+-	set_capacity(disk, sdkp->capacity);
++	set_capacity(disk, logical_to_sectors(sdp, sdkp->capacity));
+ 	sd_config_write_same(sdkp);
+ 	kfree(buffer);
+ 
+diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
+index 5f2a84aff29f..654630bb7d0e 100644
+--- a/drivers/scsi/sd.h
++++ b/drivers/scsi/sd.h
+@@ -65,7 +65,7 @@ struct scsi_disk {
+ 	struct device	dev;
+ 	struct gendisk	*disk;
+ 	atomic_t	openers;
+-	sector_t	capacity;	/* size in 512-byte sectors */
++	sector_t	capacity;	/* size in logical blocks */
+ 	u32		max_xfer_blocks;
+ 	u32		opt_xfer_blocks;
+ 	u32		max_ws_blocks;
+@@ -146,6 +146,11 @@ static inline int scsi_medium_access_command(struct scsi_cmnd *scmd)
+ 	return 0;
+ }
+ 
++static inline sector_t logical_to_sectors(struct scsi_device *sdev, sector_t blocks)
++{
++	return blocks << (ilog2(sdev->sector_size) - 9);
++}
++
+ /*
+  * A DIF-capable target device can be formatted with different
+  * protection schemes.  Currently 0 through 3 are defined:
+diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
+index e237e9f3312d..df560216d702 100644
+--- a/drivers/staging/android/ion/ion.c
++++ b/drivers/staging/android/ion/ion.c
+@@ -251,8 +251,10 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
+ 	 * memory coming from the heaps is ready for dma, ie if it has a
+ 	 * cached mapping that mapping has been invalidated
+ 	 */
+-	for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i)
++	for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
+ 		sg_dma_address(sg) = sg_phys(sg);
++		sg_dma_len(sg) = sg->length;
++	}
+ 	mutex_lock(&dev->buffer_lock);
+ 	ion_buffer_add(dev, buffer);
+ 	mutex_unlock(&dev->buffer_lock);
+diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c
+index c0f5c652d272..f1893e08e51a 100644
+--- a/drivers/usb/renesas_usbhs/fifo.c
++++ b/drivers/usb/renesas_usbhs/fifo.c
+@@ -190,7 +190,8 @@ static int usbhsf_pkt_handler(struct usbhs_pipe *pipe, int type)
+ 		goto __usbhs_pkt_handler_end;
+ 	}
+ 
+-	ret = func(pkt, &is_done);
++	if (likely(func))
++		ret = func(pkt, &is_done);
+ 
+ 	if (is_done)
+ 		__usbhsf_pkt_del(pkt);
+@@ -889,6 +890,7 @@ static int usbhsf_dma_prepare_push(struct usbhs_pkt *pkt, int *is_done)
+ 
+ 	pkt->trans = len;
+ 
++	usbhsf_tx_irq_ctrl(pipe, 0);
+ 	INIT_WORK(&pkt->work, xfer_work);
+ 	schedule_work(&pkt->work);
+ 
+diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c
+index 657f9672ceba..251053551866 100644
+--- a/drivers/usb/renesas_usbhs/mod_gadget.c
++++ b/drivers/usb/renesas_usbhs/mod_gadget.c
+@@ -158,10 +158,14 @@ static void usbhsg_queue_done(struct usbhs_priv *priv, struct usbhs_pkt *pkt)
+ 	struct usbhs_pipe *pipe = pkt->pipe;
+ 	struct usbhsg_uep *uep = usbhsg_pipe_to_uep(pipe);
+ 	struct usbhsg_request *ureq = usbhsg_pkt_to_ureq(pkt);
++	unsigned long flags;
+ 
+ 	ureq->req.actual = pkt->actual;
+ 
+-	usbhsg_queue_pop(uep, ureq, 0);
++	usbhs_lock(priv, flags);
++	if (uep)
++		__usbhsg_queue_pop(uep, ureq, 0);
++	usbhs_unlock(priv, flags);
+ }
+ 
+ static void usbhsg_queue_push(struct usbhsg_uep *uep,
+diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
+index c90a7e46cc7b..e4ade8d89eb5 100644
+--- a/drivers/usb/storage/uas.c
++++ b/drivers/usb/storage/uas.c
+@@ -2,7 +2,7 @@
+  * USB Attached SCSI
+  * Note that this is not the same as the USB Mass Storage driver
+  *
+- * Copyright Hans de Goede <hdegoede@redhat.com> for Red Hat, Inc. 2013 - 2014
++ * Copyright Hans de Goede <hdegoede@redhat.com> for Red Hat, Inc. 2013 - 2016
+  * Copyright Matthew Wilcox for Intel Corp, 2010
+  * Copyright Sarah Sharp for Intel Corp, 2010
+  *
+@@ -757,6 +757,17 @@ static int uas_eh_bus_reset_handler(struct scsi_cmnd *cmnd)
+ 	return SUCCESS;
+ }
+ 
++static int uas_target_alloc(struct scsi_target *starget)
++{
++	struct uas_dev_info *devinfo = (struct uas_dev_info *)
++			dev_to_shost(starget->dev.parent)->hostdata;
++
++	if (devinfo->flags & US_FL_NO_REPORT_LUNS)
++		starget->no_report_luns = 1;
++
++	return 0;
++}
++
+ static int uas_slave_alloc(struct scsi_device *sdev)
+ {
+ 	struct uas_dev_info *devinfo =
+@@ -800,7 +811,6 @@ static int uas_slave_configure(struct scsi_device *sdev)
+ 	if (devinfo->flags & US_FL_BROKEN_FUA)
+ 		sdev->broken_fua = 1;
+ 
+-	scsi_change_queue_depth(sdev, devinfo->qdepth - 2);
+ 	return 0;
+ }
+ 
+@@ -808,6 +818,7 @@ static struct scsi_host_template uas_host_template = {
+ 	.module = THIS_MODULE,
+ 	.name = "uas",
+ 	.queuecommand = uas_queuecommand,
++	.target_alloc = uas_target_alloc,
+ 	.slave_alloc = uas_slave_alloc,
+ 	.slave_configure = uas_slave_configure,
+ 	.eh_abort_handler = uas_eh_abort_handler,
+@@ -932,6 +943,12 @@ static int uas_probe(struct usb_interface *intf, const struct usb_device_id *id)
+ 	if (result)
+ 		goto set_alt0;
+ 
++	/*
++	 * 1 tag is reserved for untagged commands +
++	 * 1 tag to avoid off by one errors in some bridge firmwares
++	 */
++	shost->can_queue = devinfo->qdepth - 2;
++
+ 	usb_set_intfdata(intf, shost);
+ 	result = scsi_add_host(shost, &intf->dev);
+ 	if (result)
+diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
+index ccc113e83d88..53341a77d89f 100644
+--- a/drivers/usb/storage/unusual_uas.h
++++ b/drivers/usb/storage/unusual_uas.h
+@@ -64,6 +64,13 @@ UNUSUAL_DEV(0x0bc2, 0x3312, 0x0000, 0x9999,
+ 		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ 		US_FL_NO_ATA_1X),
+ 
++/* Reported-by: David Webb <djw@noc.ac.uk> */
++UNUSUAL_DEV(0x0bc2, 0x331a, 0x0000, 0x9999,
++		"Seagate",
++		"Expansion Desk",
++		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
++		US_FL_NO_REPORT_LUNS),
++
+ /* Reported-by: Hans de Goede <hdegoede@redhat.com> */
+ UNUSUAL_DEV(0x0bc2, 0x3320, 0x0000, 0x9999,
+ 		"Seagate",
+diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
+index 43576ed31ccd..9de988a0f856 100644
+--- a/drivers/usb/storage/usb.c
++++ b/drivers/usb/storage/usb.c
+@@ -482,7 +482,7 @@ void usb_stor_adjust_quirks(struct usb_device *udev, unsigned long *fflags)
+ 			US_FL_NO_READ_DISC_INFO | US_FL_NO_READ_CAPACITY_16 |
+ 			US_FL_INITIAL_READ10 | US_FL_WRITE_CACHE |
+ 			US_FL_NO_ATA_1X | US_FL_NO_REPORT_OPCODES |
+-			US_FL_MAX_SECTORS_240);
++			US_FL_MAX_SECTORS_240 | US_FL_NO_REPORT_LUNS);
+ 
+ 	p = quirks;
+ 	while (*p) {
+@@ -532,6 +532,9 @@ void usb_stor_adjust_quirks(struct usb_device *udev, unsigned long *fflags)
+ 		case 'i':
+ 			f |= US_FL_IGNORE_DEVICE;
+ 			break;
++		case 'j':
++			f |= US_FL_NO_REPORT_LUNS;
++			break;
+ 		case 'l':
+ 			f |= US_FL_NOT_LOCKABLE;
+ 			break;
+diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c
+index 7760fc1a2218..1f413a2f70b6 100644
+--- a/drivers/virtio/virtio_pci_modern.c
++++ b/drivers/virtio/virtio_pci_modern.c
+@@ -17,6 +17,7 @@
+  *
+  */
+ 
++#include <linux/delay.h>
+ #define VIRTIO_PCI_NO_LEGACY
+ #include "virtio_pci_common.h"
+ 
+@@ -271,9 +272,13 @@ static void vp_reset(struct virtio_device *vdev)
+ 	struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+ 	/* 0 status means a reset. */
+ 	vp_iowrite8(0, &vp_dev->common->device_status);
+-	/* Flush out the status write, and flush in device writes,
+-	 * including MSI-X interrupts, if any. */
+-	vp_ioread8(&vp_dev->common->device_status);
++	/* After writing 0 to device_status, the driver MUST wait for a read of
++	 * device_status to return 0 before reinitializing the device.
++	 * This will flush out the status write, and flush in device writes,
++	 * including MSI-X interrupts, if any.
++	 */
++	while (vp_ioread8(&vp_dev->common->device_status))
++		msleep(1);
+ 	/* Flush pending VQ/configuration callbacks. */
+ 	vp_synchronize_vectors(vdev);
+ }
+diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
+index 524c22146429..44367783f07a 100644
+--- a/drivers/xen/events/events_base.c
++++ b/drivers/xen/events/events_base.c
+@@ -484,9 +484,19 @@ static void eoi_pirq(struct irq_data *data)
+ 	struct physdev_eoi eoi = { .irq = pirq_from_irq(data->irq) };
+ 	int rc = 0;
+ 
+-	irq_move_irq(data);
++	if (!VALID_EVTCHN(evtchn))
++		return;
+ 
+-	if (VALID_EVTCHN(evtchn))
++	if (unlikely(irqd_is_setaffinity_pending(data))) {
++		int masked = test_and_set_mask(evtchn);
++
++		clear_evtchn(evtchn);
++
++		irq_move_masked_irq(data);
++
++		if (!masked)
++			unmask_evtchn(evtchn);
++	} else
+ 		clear_evtchn(evtchn);
+ 
+ 	if (pirq_needs_eoi(data->irq)) {
+@@ -1357,9 +1367,19 @@ static void ack_dynirq(struct irq_data *data)
+ {
+ 	int evtchn = evtchn_from_irq(data->irq);
+ 
+-	irq_move_irq(data);
++	if (!VALID_EVTCHN(evtchn))
++		return;
+ 
+-	if (VALID_EVTCHN(evtchn))
++	if (unlikely(irqd_is_setaffinity_pending(data))) {
++		int masked = test_and_set_mask(evtchn);
++
++		clear_evtchn(evtchn);
++
++		irq_move_masked_irq(data);
++
++		if (!masked)
++			unmask_evtchn(evtchn);
++	} else
+ 		clear_evtchn(evtchn);
+ }
+ 
+diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
+index 098bb8f690c9..9a30ca64066b 100644
+--- a/fs/btrfs/file.c
++++ b/fs/btrfs/file.c
+@@ -1883,7 +1883,7 @@ static int start_ordered_ops(struct inode *inode, loff_t start, loff_t end)
+  */
+ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
+ {
+-	struct dentry *dentry = file->f_path.dentry;
++	struct dentry *dentry = file_dentry(file);
+ 	struct inode *inode = d_inode(dentry);
+ 	struct btrfs_root *root = BTRFS_I(inode)->root;
+ 	struct btrfs_trans_handle *trans;
+diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
+index 978c3a810893..849a30aa117d 100644
+--- a/fs/btrfs/tree-log.c
++++ b/fs/btrfs/tree-log.c
+@@ -4414,6 +4414,127 @@ static int btrfs_log_trailing_hole(struct btrfs_trans_handle *trans,
+ 	return ret;
+ }
+ 
++/*
++ * When we are logging a new inode X, check if it doesn't have a reference that
++ * matches the reference from some other inode Y created in a past transaction
++ * and that was renamed in the current transaction. If we don't do this, then at
++ * log replay time we can lose inode Y (and all its files if it's a directory):
++ *
++ * mkdir /mnt/x
++ * echo "hello world" > /mnt/x/foobar
++ * sync
++ * mv /mnt/x /mnt/y
++ * mkdir /mnt/x                 # or touch /mnt/x
++ * xfs_io -c fsync /mnt/x
++ * <power fail>
++ * mount fs, trigger log replay
++ *
++ * After the log replay procedure, we would lose the first directory and all its
++ * files (file foobar).
++ * For the case where inode Y is not a directory we simply end up losing it:
++ *
++ * echo "123" > /mnt/foo
++ * sync
++ * mv /mnt/foo /mnt/bar
++ * echo "abc" > /mnt/foo
++ * xfs_io -c fsync /mnt/foo
++ * <power fail>
++ *
++ * We also need this for cases where a snapshot entry is replaced by some other
++ * entry (file or directory) otherwise we end up with an unreplayable log due to
++ * attempts to delete the snapshot entry (entry of type BTRFS_ROOT_ITEM_KEY) as
++ * if it were a regular entry:
++ *
++ * mkdir /mnt/x
++ * btrfs subvolume snapshot /mnt /mnt/x/snap
++ * btrfs subvolume delete /mnt/x/snap
++ * rmdir /mnt/x
++ * mkdir /mnt/x
++ * fsync /mnt/x or fsync some new file inside it
++ * <power fail>
++ *
++ * The snapshot delete, rmdir of x, mkdir of a new x and the fsync all happen in
++ * the same transaction.
++ */
++static int btrfs_check_ref_name_override(struct extent_buffer *eb,
++					 const int slot,
++					 const struct btrfs_key *key,
++					 struct inode *inode)
++{
++	int ret;
++	struct btrfs_path *search_path;
++	char *name = NULL;
++	u32 name_len = 0;
++	u32 item_size = btrfs_item_size_nr(eb, slot);
++	u32 cur_offset = 0;
++	unsigned long ptr = btrfs_item_ptr_offset(eb, slot);
++
++	search_path = btrfs_alloc_path();
++	if (!search_path)
++		return -ENOMEM;
++	search_path->search_commit_root = 1;
++	search_path->skip_locking = 1;
++
++	while (cur_offset < item_size) {
++		u64 parent;
++		u32 this_name_len;
++		u32 this_len;
++		unsigned long name_ptr;
++		struct btrfs_dir_item *di;
++
++		if (key->type == BTRFS_INODE_REF_KEY) {
++			struct btrfs_inode_ref *iref;
++
++			iref = (struct btrfs_inode_ref *)(ptr + cur_offset);
++			parent = key->offset;
++			this_name_len = btrfs_inode_ref_name_len(eb, iref);
++			name_ptr = (unsigned long)(iref + 1);
++			this_len = sizeof(*iref) + this_name_len;
++		} else {
++			struct btrfs_inode_extref *extref;
++
++			extref = (struct btrfs_inode_extref *)(ptr +
++							       cur_offset);
++			parent = btrfs_inode_extref_parent(eb, extref);
++			this_name_len = btrfs_inode_extref_name_len(eb, extref);
++			name_ptr = (unsigned long)&extref->name;
++			this_len = sizeof(*extref) + this_name_len;
++		}
++
++		if (this_name_len > name_len) {
++			char *new_name;
++
++			new_name = krealloc(name, this_name_len, GFP_NOFS);
++			if (!new_name) {
++				ret = -ENOMEM;
++				goto out;
++			}
++			name_len = this_name_len;
++			name = new_name;
++		}
++
++		read_extent_buffer(eb, name, name_ptr, this_name_len);
++		di = btrfs_lookup_dir_item(NULL, BTRFS_I(inode)->root,
++					   search_path, parent,
++					   name, this_name_len, 0);
++		if (di && !IS_ERR(di)) {
++			ret = 1;
++			goto out;
++		} else if (IS_ERR(di)) {
++			ret = PTR_ERR(di);
++			goto out;
++		}
++		btrfs_release_path(search_path);
++
++		cur_offset += this_len;
++	}
++	ret = 0;
++out:
++	btrfs_free_path(search_path);
++	kfree(name);
++	return ret;
++}
++
+ /* log a single inode in the tree log.
+  * At least one parent directory for this inode must exist in the tree
+  * or be logged already.
+@@ -4586,6 +4707,22 @@ again:
+ 		if (min_key.type == BTRFS_INODE_ITEM_KEY)
+ 			need_log_inode_item = false;
+ 
++		if ((min_key.type == BTRFS_INODE_REF_KEY ||
++		     min_key.type == BTRFS_INODE_EXTREF_KEY) &&
++		    BTRFS_I(inode)->generation == trans->transid) {
++			ret = btrfs_check_ref_name_override(path->nodes[0],
++							    path->slots[0],
++							    &min_key, inode);
++			if (ret < 0) {
++				err = ret;
++				goto out_unlock;
++			} else if (ret > 0) {
++				err = 1;
++				btrfs_set_log_full_commit(root->fs_info, trans);
++				goto out_unlock;
++			}
++		}
++
+ 		/* Skip xattrs, we log them later with btrfs_log_all_xattrs() */
+ 		if (min_key.type == BTRFS_XATTR_ITEM_KEY) {
+ 			if (ins_nr == 0)
+diff --git a/fs/dcache.c b/fs/dcache.c
+index 2398f9f94337..7566b2689609 100644
+--- a/fs/dcache.c
++++ b/fs/dcache.c
+@@ -1667,7 +1667,8 @@ void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op)
+ 				DCACHE_OP_REVALIDATE	|
+ 				DCACHE_OP_WEAK_REVALIDATE	|
+ 				DCACHE_OP_DELETE	|
+-				DCACHE_OP_SELECT_INODE));
++				DCACHE_OP_SELECT_INODE	|
++				DCACHE_OP_REAL));
+ 	dentry->d_op = op;
+ 	if (!op)
+ 		return;
+@@ -1685,6 +1686,8 @@ void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op)
+ 		dentry->d_flags |= DCACHE_OP_PRUNE;
+ 	if (op->d_select_inode)
+ 		dentry->d_flags |= DCACHE_OP_SELECT_INODE;
++	if (op->d_real)
++		dentry->d_flags |= DCACHE_OP_REAL;
+ 
+ }
+ EXPORT_SYMBOL(d_set_d_op);
+diff --git a/fs/ext4/crypto.c b/fs/ext4/crypto.c
+index 38f7562489bb..ecb54394492a 100644
+--- a/fs/ext4/crypto.c
++++ b/fs/ext4/crypto.c
+@@ -475,13 +475,16 @@ uint32_t ext4_validate_encryption_key_size(uint32_t mode, uint32_t size)
+  */
+ static int ext4_d_revalidate(struct dentry *dentry, unsigned int flags)
+ {
+-	struct inode *dir = d_inode(dentry->d_parent);
+-	struct ext4_crypt_info *ci = EXT4_I(dir)->i_crypt_info;
++	struct dentry *dir;
++	struct ext4_crypt_info *ci;
+ 	int dir_has_key, cached_with_key;
+ 
+-	if (!ext4_encrypted_inode(dir))
++	dir = dget_parent(dentry);
++	if (!ext4_encrypted_inode(d_inode(dir))) {
++		dput(dir);
+ 		return 0;
+-
++	}
++	ci = EXT4_I(d_inode(dir))->i_crypt_info;
+ 	if (ci && ci->ci_keyring_key &&
+ 	    (ci->ci_keyring_key->flags & ((1 << KEY_FLAG_INVALIDATED) |
+ 					  (1 << KEY_FLAG_REVOKED) |
+@@ -491,6 +494,7 @@ static int ext4_d_revalidate(struct dentry *dentry, unsigned int flags)
+ 	/* this should eventually be an flag in d_flags */
+ 	cached_with_key = dentry->d_fsdata != NULL;
+ 	dir_has_key = (ci != NULL);
++	dput(dir);
+ 
+ 	/*
+ 	 * If the dentry was cached without the key, and it is a
+diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
+index 157b458a69d4..b213449a5d1b 100644
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -900,6 +900,29 @@ do {									       \
+ #include "extents_status.h"
+ 
+ /*
++ * Lock subclasses for i_data_sem in the ext4_inode_info structure.
++ *
++ * These are needed to avoid lockdep false positives when we need to
++ * allocate blocks to the quota inode during ext4_map_blocks(), while
++ * holding i_data_sem for a normal (non-quota) inode.  Since we don't
++ * do quota tracking for the quota inode, this avoids deadlock (as
++ * well as infinite recursion, since it isn't turtles all the way
++ * down...)
++ *
++ *  I_DATA_SEM_NORMAL - Used for most inodes
++ *  I_DATA_SEM_OTHER  - Used by move_inode.c for the second normal inode
++ *			  where the second inode has larger inode number
++ *			  than the first
++ *  I_DATA_SEM_QUOTA  - Used for quota inodes only
++ */
++enum {
++	I_DATA_SEM_NORMAL = 0,
++	I_DATA_SEM_OTHER,
++	I_DATA_SEM_QUOTA,
++};
++
++
++/*
+  * fourth extended file system inode data in memory
+  */
+ struct ext4_inode_info {
+diff --git a/fs/ext4/file.c b/fs/ext4/file.c
+index 4cd318f31cbe..38847f38b34a 100644
+--- a/fs/ext4/file.c
++++ b/fs/ext4/file.c
+@@ -335,7 +335,7 @@ static int ext4_file_open(struct inode * inode, struct file * filp)
+ 	struct super_block *sb = inode->i_sb;
+ 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+ 	struct vfsmount *mnt = filp->f_path.mnt;
+-	struct inode *dir = filp->f_path.dentry->d_parent->d_inode;
++	struct dentry *dir;
+ 	struct path path;
+ 	char buf[64], *cp;
+ 	int ret;
+@@ -379,14 +379,18 @@ static int ext4_file_open(struct inode * inode, struct file * filp)
+ 		if (ext4_encryption_info(inode) == NULL)
+ 			return -ENOKEY;
+ 	}
+-	if (ext4_encrypted_inode(dir) &&
+-	    !ext4_is_child_context_consistent_with_parent(dir, inode)) {
++
++	dir = dget_parent(file_dentry(filp));
++	if (ext4_encrypted_inode(d_inode(dir)) &&
++	    !ext4_is_child_context_consistent_with_parent(d_inode(dir), inode)) {
+ 		ext4_warning(inode->i_sb,
+ 			     "Inconsistent encryption contexts: %lu/%lu\n",
+-			     (unsigned long) dir->i_ino,
++			     (unsigned long) d_inode(dir)->i_ino,
+ 			     (unsigned long) inode->i_ino);
++		dput(dir);
+ 		return -EPERM;
+ 	}
++	dput(dir);
+ 	/*
+ 	 * Set up the jbd2_inode if we are opening the inode for
+ 	 * writing and the journal is present
+diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c
+index 4098acc701c3..796ff0eafd3c 100644
+--- a/fs/ext4/move_extent.c
++++ b/fs/ext4/move_extent.c
+@@ -60,10 +60,10 @@ ext4_double_down_write_data_sem(struct inode *first, struct inode *second)
+ {
+ 	if (first < second) {
+ 		down_write(&EXT4_I(first)->i_data_sem);
+-		down_write_nested(&EXT4_I(second)->i_data_sem, SINGLE_DEPTH_NESTING);
++		down_write_nested(&EXT4_I(second)->i_data_sem, I_DATA_SEM_OTHER);
+ 	} else {
+ 		down_write(&EXT4_I(second)->i_data_sem);
+-		down_write_nested(&EXT4_I(first)->i_data_sem, SINGLE_DEPTH_NESTING);
++		down_write_nested(&EXT4_I(first)->i_data_sem, I_DATA_SEM_OTHER);
+ 
+ 	}
+ }
+@@ -484,6 +484,13 @@ mext_check_arguments(struct inode *orig_inode,
+ 		return -EBUSY;
+ 	}
+ 
++	if (IS_NOQUOTA(orig_inode) || IS_NOQUOTA(donor_inode)) {
++		ext4_debug("ext4 move extent: The argument files should "
++			"not be quota files [ino:orig %lu, donor %lu]\n",
++			orig_inode->i_ino, donor_inode->i_ino);
++		return -EBUSY;
++	}
++
+ 	/* Ext4 move extent supports only extent based file */
+ 	if (!(ext4_test_inode_flag(orig_inode, EXT4_INODE_EXTENTS))) {
+ 		ext4_debug("ext4 move extent: orig file is not extents "
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index 3ed01ec011d7..a76ca677fd1a 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -1324,9 +1324,9 @@ static int set_qf_name(struct super_block *sb, int qtype, substring_t *args)
+ 		return -1;
+ 	}
+ 	if (ext4_has_feature_quota(sb)) {
+-		ext4_msg(sb, KERN_ERR, "Cannot set journaled quota options "
+-			 "when QUOTA feature is enabled");
+-		return -1;
++		ext4_msg(sb, KERN_INFO, "Journaled quota options "
++			 "ignored when QUOTA feature is enabled");
++		return 1;
+ 	}
+ 	qname = match_strdup(args);
+ 	if (!qname) {
+@@ -1689,10 +1689,10 @@ static int handle_mount_opt(struct super_block *sb, char *opt, int token,
+ 			return -1;
+ 		}
+ 		if (ext4_has_feature_quota(sb)) {
+-			ext4_msg(sb, KERN_ERR,
+-				 "Cannot set journaled quota options "
++			ext4_msg(sb, KERN_INFO,
++				 "Quota format mount options ignored "
+ 				 "when QUOTA feature is enabled");
+-			return -1;
++			return 1;
+ 		}
+ 		sbi->s_jquota_fmt = m->mount_opt;
+ #endif
+@@ -1753,11 +1753,11 @@ static int parse_options(char *options, struct super_block *sb,
+ #ifdef CONFIG_QUOTA
+ 	if (ext4_has_feature_quota(sb) &&
+ 	    (test_opt(sb, USRQUOTA) || test_opt(sb, GRPQUOTA))) {
+-		ext4_msg(sb, KERN_ERR, "Cannot set quota options when QUOTA "
+-			 "feature is enabled");
+-		return 0;
+-	}
+-	if (sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA]) {
++		ext4_msg(sb, KERN_INFO, "Quota feature enabled, usrquota and grpquota "
++			 "mount options ignored.");
++		clear_opt(sb, USRQUOTA);
++		clear_opt(sb, GRPQUOTA);
++	} else if (sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA]) {
+ 		if (test_opt(sb, USRQUOTA) && sbi->s_qf_names[USRQUOTA])
+ 			clear_opt(sb, USRQUOTA);
+ 
+@@ -5021,6 +5021,20 @@ static int ext4_quota_on_mount(struct super_block *sb, int type)
+ 					EXT4_SB(sb)->s_jquota_fmt, type);
+ }
+ 
++static void lockdep_set_quota_inode(struct inode *inode, int subclass)
++{
++	struct ext4_inode_info *ei = EXT4_I(inode);
++
++	/* The first argument of lockdep_set_subclass has to be
++	 * *exactly* the same as the argument to init_rwsem() --- in
++	 * this case, in init_once() --- or lockdep gets unhappy
++	 * because the name of the lock is set using the
++	 * stringification of the argument to init_rwsem().
++	 */
++	(void) ei;	/* shut up clang warning if !CONFIG_LOCKDEP */
++	lockdep_set_subclass(&ei->i_data_sem, subclass);
++}
++
+ /*
+  * Standard function to be called on quota_on
+  */
+@@ -5060,8 +5074,12 @@ static int ext4_quota_on(struct super_block *sb, int type, int format_id,
+ 		if (err)
+ 			return err;
+ 	}
+-
+-	return dquot_quota_on(sb, type, format_id, path);
++	lockdep_set_quota_inode(path->dentry->d_inode, I_DATA_SEM_QUOTA);
++	err = dquot_quota_on(sb, type, format_id, path);
++	if (err)
++		lockdep_set_quota_inode(path->dentry->d_inode,
++					     I_DATA_SEM_NORMAL);
++	return err;
+ }
+ 
+ static int ext4_quota_enable(struct super_block *sb, int type, int format_id,
+@@ -5088,8 +5106,11 @@ static int ext4_quota_enable(struct super_block *sb, int type, int format_id,
+ 
+ 	/* Don't account quota for quota files to avoid recursion */
+ 	qf_inode->i_flags |= S_NOQUOTA;
++	lockdep_set_quota_inode(qf_inode, I_DATA_SEM_QUOTA);
+ 	err = dquot_enable(qf_inode, type, format_id, flags);
+ 	iput(qf_inode);
++	if (err)
++		lockdep_set_quota_inode(qf_inode, I_DATA_SEM_NORMAL);
+ 
+ 	return err;
+ }
+diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
+index 9cce67043f92..7ded17764754 100644
+--- a/fs/nfs/dir.c
++++ b/fs/nfs/dir.c
+@@ -377,7 +377,7 @@ int nfs_readdir_xdr_filler(struct page **pages, nfs_readdir_descriptor_t *desc,
+  again:
+ 	timestamp = jiffies;
+ 	gencount = nfs_inc_attr_generation_counter();
+-	error = NFS_PROTO(inode)->readdir(file->f_path.dentry, cred, entry->cookie, pages,
++	error = NFS_PROTO(inode)->readdir(file_dentry(file), cred, entry->cookie, pages,
+ 					  NFS_SERVER(inode)->dtsize, desc->plus);
+ 	if (error < 0) {
+ 		/* We requested READDIRPLUS, but the server doesn't grok it */
+@@ -560,7 +560,7 @@ int nfs_readdir_page_filler(nfs_readdir_descriptor_t *desc, struct nfs_entry *en
+ 		count++;
+ 
+ 		if (desc->plus != 0)
+-			nfs_prime_dcache(desc->file->f_path.dentry, entry);
++			nfs_prime_dcache(file_dentry(desc->file), entry);
+ 
+ 		status = nfs_readdir_add_to_array(entry, page);
+ 		if (status != 0)
+@@ -864,7 +864,7 @@ static bool nfs_dir_mapping_need_revalidate(struct inode *dir)
+  */
+ static int nfs_readdir(struct file *file, struct dir_context *ctx)
+ {
+-	struct dentry	*dentry = file->f_path.dentry;
++	struct dentry	*dentry = file_dentry(file);
+ 	struct inode	*inode = d_inode(dentry);
+ 	nfs_readdir_descriptor_t my_desc,
+ 			*desc = &my_desc;
+diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
+index 86faecf8f328..847b678af4f0 100644
+--- a/fs/nfs/inode.c
++++ b/fs/nfs/inode.c
+@@ -940,7 +940,7 @@ int nfs_open(struct inode *inode, struct file *filp)
+ {
+ 	struct nfs_open_context *ctx;
+ 
+-	ctx = alloc_nfs_open_context(filp->f_path.dentry, filp->f_mode);
++	ctx = alloc_nfs_open_context(file_dentry(filp), filp->f_mode);
+ 	if (IS_ERR(ctx))
+ 		return PTR_ERR(ctx);
+ 	nfs_file_set_open_context(filp, ctx);
+diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c
+index 57ca1c8039c1..2a9ff14cfb3b 100644
+--- a/fs/nfs/nfs4file.c
++++ b/fs/nfs/nfs4file.c
+@@ -26,7 +26,7 @@ static int
+ nfs4_file_open(struct inode *inode, struct file *filp)
+ {
+ 	struct nfs_open_context *ctx;
+-	struct dentry *dentry = filp->f_path.dentry;
++	struct dentry *dentry = file_dentry(filp);
+ 	struct dentry *parent = NULL;
+ 	struct inode *dir;
+ 	unsigned openflags = filp->f_flags;
+@@ -57,7 +57,7 @@ nfs4_file_open(struct inode *inode, struct file *filp)
+ 	parent = dget_parent(dentry);
+ 	dir = d_inode(parent);
+ 
+-	ctx = alloc_nfs_open_context(filp->f_path.dentry, filp->f_mode);
++	ctx = alloc_nfs_open_context(file_dentry(filp), filp->f_mode);
+ 	err = PTR_ERR(ctx);
+ 	if (IS_ERR(ctx))
+ 		goto out;
+diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
+index 619ad4b016d2..4399ea804447 100644
+--- a/fs/overlayfs/super.c
++++ b/fs/overlayfs/super.c
+@@ -295,6 +295,37 @@ static void ovl_dentry_release(struct dentry *dentry)
+ 	}
+ }
+ 
++static struct dentry *ovl_d_real(struct dentry *dentry, struct inode *inode)
++{
++	struct dentry *real;
++
++	if (d_is_dir(dentry)) {
++		if (!inode || inode == d_inode(dentry))
++			return dentry;
++		goto bug;
++	}
++
++	real = ovl_dentry_upper(dentry);
++	if (real && (!inode || inode == d_inode(real)))
++		return real;
++
++	real = ovl_dentry_lower(dentry);
++	if (!real)
++		goto bug;
++
++	if (!inode || inode == d_inode(real))
++		return real;
++
++	/* Handle recursion */
++	if (real->d_flags & DCACHE_OP_REAL)
++		return real->d_op->d_real(real, inode);
++
++bug:
++	WARN(1, "ovl_d_real(%pd4, %s:%lu\n): real dentry not found\n", dentry,
++	     inode ? inode->i_sb->s_id : "NULL", inode ? inode->i_ino : 0);
++	return dentry;
++}
++
+ static int ovl_dentry_revalidate(struct dentry *dentry, unsigned int flags)
+ {
+ 	struct ovl_entry *oe = dentry->d_fsdata;
+@@ -339,11 +370,13 @@ static int ovl_dentry_weak_revalidate(struct dentry *dentry, unsigned int flags)
+ static const struct dentry_operations ovl_dentry_operations = {
+ 	.d_release = ovl_dentry_release,
+ 	.d_select_inode = ovl_d_select_inode,
++	.d_real = ovl_d_real,
+ };
+ 
+ static const struct dentry_operations ovl_reval_dentry_operations = {
+ 	.d_release = ovl_dentry_release,
+ 	.d_select_inode = ovl_d_select_inode,
++	.d_real = ovl_d_real,
+ 	.d_revalidate = ovl_dentry_revalidate,
+ 	.d_weak_revalidate = ovl_dentry_weak_revalidate,
+ };
+diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
+index 22ab246feed3..eeae401a2412 100644
+--- a/include/linux/compiler-gcc.h
++++ b/include/linux/compiler-gcc.h
+@@ -199,7 +199,7 @@
+ #define unreachable() __builtin_unreachable()
+ 
+ /* Mark a function definition as prohibited from being cloned. */
+-#define __noclone	__attribute__((__noclone__))
++#define __noclone	__attribute__((__noclone__, __optimize__("no-tracer")))
+ 
+ #endif /* GCC_VERSION >= 40500 */
+ 
+diff --git a/include/linux/dcache.h b/include/linux/dcache.h
+index c4b5f4b3f8f8..03dda7ba73ac 100644
+--- a/include/linux/dcache.h
++++ b/include/linux/dcache.h
+@@ -161,6 +161,7 @@ struct dentry_operations {
+ 	struct vfsmount *(*d_automount)(struct path *);
+ 	int (*d_manage)(struct dentry *, bool);
+ 	struct inode *(*d_select_inode)(struct dentry *, unsigned);
++	struct dentry *(*d_real)(struct dentry *, struct inode *);
+ } ____cacheline_aligned;
+ 
+ /*
+@@ -227,6 +228,7 @@ struct dentry_operations {
+ #define DCACHE_MAY_FREE			0x00800000
+ #define DCACHE_FALLTHRU			0x01000000 /* Fall through to lower layer */
+ #define DCACHE_OP_SELECT_INODE		0x02000000 /* Unioned entry: dcache op selects inode */
++#define DCACHE_OP_REAL			0x08000000
+ 
+ extern seqlock_t rename_lock;
+ 
+@@ -582,4 +584,12 @@ static inline struct dentry *d_backing_dentry(struct dentry *upper)
+ 	return upper;
+ }
+ 
++static inline struct dentry *d_real(struct dentry *dentry)
++{
++	if (unlikely(dentry->d_flags & DCACHE_OP_REAL))
++		return dentry->d_op->d_real(dentry, NULL);
++	else
++		return dentry;
++}
++
+ #endif	/* __LINUX_DCACHE_H */
+diff --git a/include/linux/filter.h b/include/linux/filter.h
+index 43aa1f8855c7..a51a5361695f 100644
+--- a/include/linux/filter.h
++++ b/include/linux/filter.h
+@@ -465,10 +465,14 @@ int bpf_prog_create_from_user(struct bpf_prog **pfp, struct sock_fprog *fprog,
+ void bpf_prog_destroy(struct bpf_prog *fp);
+ 
+ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk);
++int __sk_attach_filter(struct sock_fprog *fprog, struct sock *sk,
++		       bool locked);
+ int sk_attach_bpf(u32 ufd, struct sock *sk);
+ int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk);
+ int sk_reuseport_attach_bpf(u32 ufd, struct sock *sk);
+ int sk_detach_filter(struct sock *sk);
++int __sk_detach_filter(struct sock *sk, bool locked);
++
+ int sk_get_filter(struct sock *sk, struct sock_filter __user *filter,
+ 		  unsigned int len);
+ 
+diff --git a/include/linux/fs.h b/include/linux/fs.h
+index 2c7f8d9c3c70..83c77b093ce2 100644
+--- a/include/linux/fs.h
++++ b/include/linux/fs.h
+@@ -1234,6 +1234,16 @@ static inline struct inode *file_inode(const struct file *f)
+ 	return f->f_inode;
+ }
+ 
++static inline struct dentry *file_dentry(const struct file *file)
++{
++	struct dentry *dentry = file->f_path.dentry;
++
++	if (unlikely(dentry->d_flags & DCACHE_OP_REAL))
++		return dentry->d_op->d_real(dentry, file_inode(file));
++	else
++		return dentry;
++}
++
+ static inline int locks_lock_file_wait(struct file *filp, struct file_lock *fl)
+ {
+ 	return locks_lock_inode_wait(file_inode(filp), fl);
+diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h
+index a338a688ee4a..dcb89e3515db 100644
+--- a/include/linux/if_bridge.h
++++ b/include/linux/if_bridge.h
+@@ -46,10 +46,6 @@ struct br_ip_list {
+ #define BR_LEARNING_SYNC	BIT(9)
+ #define BR_PROXYARP_WIFI	BIT(10)
+ 
+-/* values as per ieee8021QBridgeFdbAgingTime */
+-#define BR_MIN_AGEING_TIME	(10 * HZ)
+-#define BR_MAX_AGEING_TIME	(1000000 * HZ)
+-
+ #define BR_DEFAULT_AGEING_TIME	(300 * HZ)
+ 
+ extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __user *));
+diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
+index 5440b7b705eb..6d1d8f4f759b 100644
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -267,6 +267,7 @@ struct header_ops {
+ 	void	(*cache_update)(struct hh_cache *hh,
+ 				const struct net_device *dev,
+ 				const unsigned char *haddr);
++	bool	(*validate)(const char *ll_header, unsigned int len);
+ };
+ 
+ /* These flag bits are private to the generic network queueing
+@@ -1420,8 +1421,7 @@ enum netdev_priv_flags {
+  *	@dma:		DMA channel
+  *	@mtu:		Interface MTU value
+  *	@type:		Interface hardware type
+- *	@hard_header_len: Hardware header length, which means that this is the
+- *			  minimum size of a packet.
++ *	@hard_header_len: Maximum hardware header length.
+  *
+  *	@needed_headroom: Extra headroom the hardware may need, but not in all
+  *			  cases can this be guaranteed
+@@ -2627,6 +2627,24 @@ static inline int dev_parse_header(const struct sk_buff *skb,
+ 	return dev->header_ops->parse(skb, haddr);
+ }
+ 
++/* ll_header must have at least hard_header_len allocated */
++static inline bool dev_validate_header(const struct net_device *dev,
++				       char *ll_header, int len)
++{
++	if (likely(len >= dev->hard_header_len))
++		return true;
++
++	if (capable(CAP_SYS_RAWIO)) {
++		memset(ll_header + len, 0, dev->hard_header_len - len);
++		return true;
++	}
++
++	if (dev->header_ops && dev->header_ops->validate)
++		return dev->header_ops->validate(ll_header, len);
++
++	return false;
++}
++
+ typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len);
+ int register_gifconf(unsigned int family, gifconf_func_t *gifconf);
+ static inline int unregister_gifconf(unsigned int family)
+diff --git a/include/linux/usb_usual.h b/include/linux/usb_usual.h
+index 7f5f78bd15ad..245f57dbbb61 100644
+--- a/include/linux/usb_usual.h
++++ b/include/linux/usb_usual.h
+@@ -79,6 +79,8 @@
+ 		/* Cannot handle MI_REPORT_SUPPORTED_OPERATION_CODES */	\
+ 	US_FLAG(MAX_SECTORS_240,	0x08000000)		\
+ 		/* Sets max_sectors to 240 */			\
++	US_FLAG(NO_REPORT_LUNS,	0x10000000)			\
++		/* Cannot handle REPORT_LUNS */			\
+ 
+ #define US_FLAG(name, value)	US_FL_##name = value ,
+ enum { US_DO_ALL_FLAGS };
+diff --git a/include/net/bonding.h b/include/net/bonding.h
+index ee6c52053aa3..791800ddd6d9 100644
+--- a/include/net/bonding.h
++++ b/include/net/bonding.h
+@@ -215,6 +215,7 @@ struct bonding {
+ 	 * ALB mode (6) - to sync the use and modifications of its hash table
+ 	 */
+ 	spinlock_t mode_lock;
++	spinlock_t stats_lock;
+ 	u8	 send_peer_notif;
+ 	u8       igmp_retrans;
+ #ifdef CONFIG_PROC_FS
+diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
+index f63a16760ae9..ba93c0f69870 100644
+--- a/include/scsi/scsi_device.h
++++ b/include/scsi/scsi_device.h
+@@ -513,6 +513,31 @@ static inline int scsi_device_tpgs(struct scsi_device *sdev)
+ 	return sdev->inquiry ? (sdev->inquiry[5] >> 4) & 0x3 : 0;
+ }
+ 
++/**
++ * scsi_device_supports_vpd - test if a device supports VPD pages
++ * @sdev: the &struct scsi_device to test
++ *
++ * If the 'try_vpd_pages' flag is set it takes precedence.
++ * Otherwise we will assume VPD pages are supported if the
++ * SCSI level is at least SPC-3 and 'skip_vpd_pages' is not set.
++ */
++static inline int scsi_device_supports_vpd(struct scsi_device *sdev)
++{
++	/* Attempt VPD inquiry if the device blacklist explicitly calls
++	 * for it.
++	 */
++	if (sdev->try_vpd_pages)
++		return 1;
++	/*
++	 * Although VPD inquiries can go to SCSI-2 type devices,
++	 * some USB ones crash on receiving them, and the pages
++	 * we currently ask for are for SPC-3 and beyond
++	 */
++	if (sdev->scsi_level > SCSI_SPC_2 && !sdev->skip_vpd_pages)
++		return 1;
++	return 0;
++}
++
+ #define MODULE_ALIAS_SCSI_DEVICE(type) \
+ 	MODULE_ALIAS("scsi:t-" __stringify(type) "*")
+ #define SCSI_DEVICE_MODALIAS_FMT "scsi:t-0x%02x"
+diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
+index 4504ca66118d..50da680c479f 100644
+--- a/kernel/bpf/helpers.c
++++ b/kernel/bpf/helpers.c
+@@ -166,7 +166,7 @@ static u64 bpf_get_current_comm(u64 r1, u64 size, u64 r3, u64 r4, u64 r5)
+ 	if (!task)
+ 		return -EINVAL;
+ 
+-	memcpy(buf, task->comm, min_t(size_t, size, sizeof(task->comm)));
++	strlcpy(buf, task->comm, min_t(size_t, size, sizeof(task->comm)));
+ 	return 0;
+ }
+ 
+diff --git a/mm/page_isolation.c b/mm/page_isolation.c
+index 92c4c36501e7..31555b689eb9 100644
+--- a/mm/page_isolation.c
++++ b/mm/page_isolation.c
+@@ -289,11 +289,11 @@ struct page *alloc_migrate_target(struct page *page, unsigned long private,
+ 	 * now as a simple work-around, we use the next node for destination.
+ 	 */
+ 	if (PageHuge(page)) {
+-		nodemask_t src = nodemask_of_node(page_to_nid(page));
+-		nodemask_t dst;
+-		nodes_complement(dst, src);
++		int node = next_online_node(page_to_nid(page));
++		if (node == MAX_NUMNODES)
++			node = first_online_node;
+ 		return alloc_huge_page_node(page_hstate(compound_head(page)),
+-					    next_node(page_to_nid(page), dst));
++					    node);
+ 	}
+ 
+ 	if (PageHighMem(page))
+diff --git a/net/ax25/ax25_ip.c b/net/ax25/ax25_ip.c
+index b563a3f5f2a8..2fa3be965101 100644
+--- a/net/ax25/ax25_ip.c
++++ b/net/ax25/ax25_ip.c
+@@ -228,8 +228,23 @@ netdev_tx_t ax25_ip_xmit(struct sk_buff *skb)
+ }
+ #endif
+ 
++static bool ax25_validate_header(const char *header, unsigned int len)
++{
++	ax25_digi digi;
++
++	if (!len)
++		return false;
++
++	if (header[0])
++		return true;
++
++	return ax25_addr_parse(header + 1, len - 1, NULL, NULL, &digi, NULL,
++			       NULL);
++}
++
+ const struct header_ops ax25_header_ops = {
+ 	.create = ax25_hard_header,
++	.validate = ax25_validate_header,
+ };
+ 
+ EXPORT_SYMBOL(ax25_header_ops);
+diff --git a/net/bridge/br_stp.c b/net/bridge/br_stp.c
+index b3cca126b103..e2670c5baafd 100644
+--- a/net/bridge/br_stp.c
++++ b/net/bridge/br_stp.c
+@@ -568,6 +568,14 @@ int br_set_max_age(struct net_bridge *br, unsigned long val)
+ 
+ }
+ 
++/* Set time interval that dynamic forwarding entries live
++ * For pure software bridge, allow values outside the 802.1
++ * standard specification for special cases:
++ *  0 - entry never ages (all permanant)
++ *  1 - entry disappears (no persistance)
++ *
++ * Offloaded switch entries maybe more restrictive
++ */
+ int br_set_ageing_time(struct net_bridge *br, u32 ageing_time)
+ {
+ 	struct switchdev_attr attr = {
+@@ -579,11 +587,8 @@ int br_set_ageing_time(struct net_bridge *br, u32 ageing_time)
+ 	unsigned long t = clock_t_to_jiffies(ageing_time);
+ 	int err;
+ 
+-	if (t < BR_MIN_AGEING_TIME || t > BR_MAX_AGEING_TIME)
+-		return -ERANGE;
+-
+ 	err = switchdev_port_attr_set(br->dev, &attr);
+-	if (err)
++	if (err && err != -EOPNOTSUPP)
+ 		return err;
+ 
+ 	br->ageing_time = t;
+diff --git a/net/core/filter.c b/net/core/filter.c
+index bba502f7cd57..fb2951c3532d 100644
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -1147,7 +1147,8 @@ void bpf_prog_destroy(struct bpf_prog *fp)
+ }
+ EXPORT_SYMBOL_GPL(bpf_prog_destroy);
+ 
+-static int __sk_attach_prog(struct bpf_prog *prog, struct sock *sk)
++static int __sk_attach_prog(struct bpf_prog *prog, struct sock *sk,
++			    bool locked)
+ {
+ 	struct sk_filter *fp, *old_fp;
+ 
+@@ -1163,10 +1164,8 @@ static int __sk_attach_prog(struct bpf_prog *prog, struct sock *sk)
+ 		return -ENOMEM;
+ 	}
+ 
+-	old_fp = rcu_dereference_protected(sk->sk_filter,
+-					   sock_owned_by_user(sk));
++	old_fp = rcu_dereference_protected(sk->sk_filter, locked);
+ 	rcu_assign_pointer(sk->sk_filter, fp);
+-
+ 	if (old_fp)
+ 		sk_filter_uncharge(sk, old_fp);
+ 
+@@ -1245,7 +1244,8 @@ struct bpf_prog *__get_filter(struct sock_fprog *fprog, struct sock *sk)
+  * occurs or there is insufficient memory for the filter a negative
+  * errno code is returned. On success the return is zero.
+  */
+-int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
++int __sk_attach_filter(struct sock_fprog *fprog, struct sock *sk,
++		       bool locked)
+ {
+ 	struct bpf_prog *prog = __get_filter(fprog, sk);
+ 	int err;
+@@ -1253,7 +1253,7 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
+ 	if (IS_ERR(prog))
+ 		return PTR_ERR(prog);
+ 
+-	err = __sk_attach_prog(prog, sk);
++	err = __sk_attach_prog(prog, sk, locked);
+ 	if (err < 0) {
+ 		__bpf_prog_release(prog);
+ 		return err;
+@@ -1261,7 +1261,12 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
+ 
+ 	return 0;
+ }
+-EXPORT_SYMBOL_GPL(sk_attach_filter);
++EXPORT_SYMBOL_GPL(__sk_attach_filter);
++
++int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
++{
++	return __sk_attach_filter(fprog, sk, sock_owned_by_user(sk));
++}
+ 
+ int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk)
+ {
+@@ -1307,7 +1312,7 @@ int sk_attach_bpf(u32 ufd, struct sock *sk)
+ 	if (IS_ERR(prog))
+ 		return PTR_ERR(prog);
+ 
+-	err = __sk_attach_prog(prog, sk);
++	err = __sk_attach_prog(prog, sk, sock_owned_by_user(sk));
+ 	if (err < 0) {
+ 		bpf_prog_put(prog);
+ 		return err;
+@@ -2105,7 +2110,7 @@ static int __init register_sk_filter_ops(void)
+ }
+ late_initcall(register_sk_filter_ops);
+ 
+-int sk_detach_filter(struct sock *sk)
++int __sk_detach_filter(struct sock *sk, bool locked)
+ {
+ 	int ret = -ENOENT;
+ 	struct sk_filter *filter;
+@@ -2113,8 +2118,7 @@ int sk_detach_filter(struct sock *sk)
+ 	if (sock_flag(sk, SOCK_FILTER_LOCKED))
+ 		return -EPERM;
+ 
+-	filter = rcu_dereference_protected(sk->sk_filter,
+-					   sock_owned_by_user(sk));
++	filter = rcu_dereference_protected(sk->sk_filter, locked);
+ 	if (filter) {
+ 		RCU_INIT_POINTER(sk->sk_filter, NULL);
+ 		sk_filter_uncharge(sk, filter);
+@@ -2123,7 +2127,12 @@ int sk_detach_filter(struct sock *sk)
+ 
+ 	return ret;
+ }
+-EXPORT_SYMBOL_GPL(sk_detach_filter);
++EXPORT_SYMBOL_GPL(__sk_detach_filter);
++
++int sk_detach_filter(struct sock *sk)
++{
++	return __sk_detach_filter(sk, sock_owned_by_user(sk));
++}
+ 
+ int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf,
+ 		  unsigned int len)
+diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
+index 8261d95dd846..215e6137f6ff 100644
+--- a/net/core/rtnetlink.c
++++ b/net/core/rtnetlink.c
+@@ -905,6 +905,7 @@ static noinline size_t if_nlmsg_size(const struct net_device *dev,
+ 	       + rtnl_link_get_af_size(dev, ext_filter_mask) /* IFLA_AF_SPEC */
+ 	       + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_PORT_ID */
+ 	       + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_SWITCH_ID */
++	       + nla_total_size(IFNAMSIZ) /* IFLA_PHYS_PORT_NAME */
+ 	       + nla_total_size(1); /* IFLA_PROTO_DOWN */
+ 
+ }
+diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
+index 902d606324a0..8be8f27bfacc 100644
+--- a/net/dccp/ipv4.c
++++ b/net/dccp/ipv4.c
+@@ -204,8 +204,6 @@ void dccp_req_err(struct sock *sk, u64 seq)
+ 	 * ICMPs are not backlogged, hence we cannot get an established
+ 	 * socket here.
+ 	 */
+-	WARN_ON(req->sk);
+-
+ 	if (!between48(seq, dccp_rsk(req)->dreq_iss, dccp_rsk(req)->dreq_gss)) {
+ 		NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
+ 	} else {
+diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
+index fa4daba8db55..d8fb47fcad05 100644
+--- a/net/dsa/dsa.c
++++ b/net/dsa/dsa.c
+@@ -935,6 +935,14 @@ static void dsa_remove_dst(struct dsa_switch_tree *dst)
+ {
+ 	int i;
+ 
++	dst->master_netdev->dsa_ptr = NULL;
++
++	/* If we used a tagging format that doesn't have an ethertype
++	 * field, make sure that all packets from this point get sent
++	 * without the tag and go through the regular receive path.
++	 */
++	wmb();
++
+ 	for (i = 0; i < dst->pd->nr_chips; i++) {
+ 		struct dsa_switch *ds = dst->ds[i];
+ 
+@@ -988,14 +996,6 @@ static int dsa_suspend(struct device *d)
+ 	struct dsa_switch_tree *dst = platform_get_drvdata(pdev);
+ 	int i, ret = 0;
+ 
+-	dst->master_netdev->dsa_ptr = NULL;
+-
+-	/* If we used a tagging format that doesn't have an ethertype
+-	 * field, make sure that all packets from this point get sent
+-	 * without the tag and go through the regular receive path.
+-	 */
+-	wmb();
+-
+ 	for (i = 0; i < dst->pd->nr_chips; i++) {
+ 		struct dsa_switch *ds = dst->ds[i];
+ 
+diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
+index f6303b17546b..0212591b0077 100644
+--- a/net/ipv4/devinet.c
++++ b/net/ipv4/devinet.c
+@@ -334,6 +334,9 @@ static void __inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap,
+ 
+ 	ASSERT_RTNL();
+ 
++	if (in_dev->dead)
++		goto no_promotions;
++
+ 	/* 1. Deleting primary ifaddr forces deletion all secondaries
+ 	 * unless alias promotion is set
+ 	 **/
+@@ -380,6 +383,7 @@ static void __inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap,
+ 			fib_del_ifaddr(ifa, ifa1);
+ 	}
+ 
++no_promotions:
+ 	/* 2. Unlink it */
+ 
+ 	*ifap = ifa1->ifa_next;
+diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
+index 473447593060..8a9246deccfe 100644
+--- a/net/ipv4/fib_frontend.c
++++ b/net/ipv4/fib_frontend.c
+@@ -280,7 +280,6 @@ __be32 fib_compute_spec_dst(struct sk_buff *skb)
+ 	struct in_device *in_dev;
+ 	struct fib_result res;
+ 	struct rtable *rt;
+-	struct flowi4 fl4;
+ 	struct net *net;
+ 	int scope;
+ 
+@@ -296,14 +295,13 @@ __be32 fib_compute_spec_dst(struct sk_buff *skb)
+ 
+ 	scope = RT_SCOPE_UNIVERSE;
+ 	if (!ipv4_is_zeronet(ip_hdr(skb)->saddr)) {
+-		fl4.flowi4_oif = 0;
+-		fl4.flowi4_iif = LOOPBACK_IFINDEX;
+-		fl4.daddr = ip_hdr(skb)->saddr;
+-		fl4.saddr = 0;
+-		fl4.flowi4_tos = RT_TOS(ip_hdr(skb)->tos);
+-		fl4.flowi4_scope = scope;
+-		fl4.flowi4_mark = IN_DEV_SRC_VMARK(in_dev) ? skb->mark : 0;
+-		fl4.flowi4_tun_key.tun_id = 0;
++		struct flowi4 fl4 = {
++			.flowi4_iif = LOOPBACK_IFINDEX,
++			.daddr = ip_hdr(skb)->saddr,
++			.flowi4_tos = RT_TOS(ip_hdr(skb)->tos),
++			.flowi4_scope = scope,
++			.flowi4_mark = IN_DEV_SRC_VMARK(in_dev) ? skb->mark : 0,
++		};
+ 		if (!fib_lookup(net, &fl4, &res, 0))
+ 			return FIB_RES_PREFSRC(net, res);
+ 	} else {
+@@ -922,6 +920,9 @@ void fib_del_ifaddr(struct in_ifaddr *ifa, struct in_ifaddr *iprim)
+ 		subnet = 1;
+ 	}
+ 
++	if (in_dev->dead)
++		goto no_promotions;
++
+ 	/* Deletion is more complicated than add.
+ 	 * We should take care of not to delete too much :-)
+ 	 *
+@@ -997,6 +998,7 @@ void fib_del_ifaddr(struct in_ifaddr *ifa, struct in_ifaddr *iprim)
+ 		}
+ 	}
+ 
++no_promotions:
+ 	if (!(ok & BRD_OK))
+ 		fib_magic(RTM_DELROUTE, RTN_BROADCAST, ifa->ifa_broadcast, 32, prim);
+ 	if (subnet && ifa->ifa_prefixlen < 31) {
+diff --git a/net/ipv4/netfilter/nf_nat_masquerade_ipv4.c b/net/ipv4/netfilter/nf_nat_masquerade_ipv4.c
+index c6eb42100e9a..ea91058b5f6f 100644
+--- a/net/ipv4/netfilter/nf_nat_masquerade_ipv4.c
++++ b/net/ipv4/netfilter/nf_nat_masquerade_ipv4.c
+@@ -108,10 +108,18 @@ static int masq_inet_event(struct notifier_block *this,
+ 			   unsigned long event,
+ 			   void *ptr)
+ {
+-	struct net_device *dev = ((struct in_ifaddr *)ptr)->ifa_dev->dev;
++	struct in_device *idev = ((struct in_ifaddr *)ptr)->ifa_dev;
+ 	struct netdev_notifier_info info;
+ 
+-	netdev_notifier_info_init(&info, dev);
++	/* The masq_dev_notifier will catch the case of the device going
++	 * down.  So if the inetdev is dead and being destroyed we have
++	 * no work to do.  Otherwise this is an individual address removal
++	 * and we have to perform the flush.
++	 */
++	if (idev->dead)
++		return NOTIFY_DONE;
++
++	netdev_notifier_info_init(&info, idev->dev);
+ 	return masq_device_event(this, event, &info);
+ }
+ 
+diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
+index 487ac67059e2..a7b1a905580b 100644
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -319,8 +319,6 @@ void tcp_req_err(struct sock *sk, u32 seq, bool abort)
+ 	/* ICMPs are not backlogged, hence we cannot get
+ 	 * an established socket here.
+ 	 */
+-	WARN_ON(req->sk);
+-
+ 	if (seq != tcp_rsk(req)->snt_isn) {
+ 		NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
+ 	} else if (abort) {
+diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
+index 95d2f198017e..eb8933bc0e6e 100644
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -2082,10 +2082,14 @@ void udp_v4_early_demux(struct sk_buff *skb)
+ 		if (!in_dev)
+ 			return;
+ 
+-		ours = ip_check_mc_rcu(in_dev, iph->daddr, iph->saddr,
+-				       iph->protocol);
+-		if (!ours)
+-			return;
++		/* we are supposed to accept bcast packets */
++		if (skb->pkt_type == PACKET_MULTICAST) {
++			ours = ip_check_mc_rcu(in_dev, iph->daddr, iph->saddr,
++					       iph->protocol);
++			if (!ours)
++				return;
++		}
++
+ 		sk = __udp4_lib_mcast_demux_lookup(net, uh->dest, iph->daddr,
+ 						   uh->source, iph->saddr, dif);
+ 	} else if (skb->pkt_type == PACKET_HOST) {
+diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
+index a163102f1803..2a6606c935cc 100644
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -1091,8 +1091,8 @@ static inline int ip6_ufo_append_data(struct sock *sk,
+ 			int getfrag(void *from, char *to, int offset, int len,
+ 			int odd, struct sk_buff *skb),
+ 			void *from, int length, int hh_len, int fragheaderlen,
+-			int transhdrlen, int mtu, unsigned int flags,
+-			const struct flowi6 *fl6)
++			int exthdrlen, int transhdrlen, int mtu,
++			unsigned int flags, const struct flowi6 *fl6)
+ 
+ {
+ 	struct sk_buff *skb;
+@@ -1117,7 +1117,7 @@ static inline int ip6_ufo_append_data(struct sock *sk,
+ 		skb_put(skb, fragheaderlen + transhdrlen);
+ 
+ 		/* initialize network header pointer */
+-		skb_reset_network_header(skb);
++		skb_set_network_header(skb, exthdrlen);
+ 
+ 		/* initialize protocol header pointer */
+ 		skb->transport_header = skb->network_header + fragheaderlen;
+@@ -1359,7 +1359,7 @@ emsgsize:
+ 	    (rt->dst.dev->features & NETIF_F_UFO) &&
+ 	    (sk->sk_type == SOCK_DGRAM) && !udp_get_no_check6_tx(sk)) {
+ 		err = ip6_ufo_append_data(sk, queue, getfrag, from, length,
+-					  hh_len, fragheaderlen,
++					  hh_len, fragheaderlen, exthdrlen,
+ 					  transhdrlen, mtu, flags, fl6);
+ 		if (err)
+ 			goto error;
+diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
+index 6c5dfec7a377..3991b21e24ad 100644
+--- a/net/ipv6/ip6_tunnel.c
++++ b/net/ipv6/ip6_tunnel.c
+@@ -343,12 +343,12 @@ static int ip6_tnl_create2(struct net_device *dev)
+ 
+ 	t = netdev_priv(dev);
+ 
++	dev->rtnl_link_ops = &ip6_link_ops;
+ 	err = register_netdevice(dev);
+ 	if (err < 0)
+ 		goto out;
+ 
+ 	strcpy(t->parms.name, dev->name);
+-	dev->rtnl_link_ops = &ip6_link_ops;
+ 
+ 	dev_hold(dev);
+ 	ip6_tnl_link(ip6n, t);
+diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
+index 422dd014aa2c..6794120f53b8 100644
+--- a/net/ipv6/udp.c
++++ b/net/ipv6/udp.c
+@@ -883,8 +883,8 @@ start_lookup:
+ 		flush_stack(stack, count, skb, count - 1);
+ 	} else {
+ 		if (!inner_flushed)
+-			UDP_INC_STATS_BH(net, UDP_MIB_IGNOREDMULTI,
+-					 proto == IPPROTO_UDPLITE);
++			UDP6_INC_STATS_BH(net, UDP_MIB_IGNOREDMULTI,
++					  proto == IPPROTO_UDPLITE);
+ 		consume_skb(skb);
+ 	}
+ 	return 0;
+diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
+index ec22078b0914..42de4ccd159f 100644
+--- a/net/l2tp/l2tp_ip.c
++++ b/net/l2tp/l2tp_ip.c
+@@ -123,12 +123,11 @@ static int l2tp_ip_recv(struct sk_buff *skb)
+ 	struct l2tp_tunnel *tunnel = NULL;
+ 	int length;
+ 
+-	/* Point to L2TP header */
+-	optr = ptr = skb->data;
+-
+ 	if (!pskb_may_pull(skb, 4))
+ 		goto discard;
+ 
++	/* Point to L2TP header */
++	optr = ptr = skb->data;
+ 	session_id = ntohl(*((__be32 *) ptr));
+ 	ptr += 4;
+ 
+@@ -156,6 +155,9 @@ static int l2tp_ip_recv(struct sk_buff *skb)
+ 		if (!pskb_may_pull(skb, length))
+ 			goto discard;
+ 
++		/* Point to L2TP header */
++		optr = ptr = skb->data;
++		ptr += 4;
+ 		pr_debug("%s: ip recv\n", tunnel->name);
+ 		print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length);
+ 	}
+diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
+index a2c8747d2936..9ee4ddb6b397 100644
+--- a/net/l2tp/l2tp_ip6.c
++++ b/net/l2tp/l2tp_ip6.c
+@@ -135,12 +135,11 @@ static int l2tp_ip6_recv(struct sk_buff *skb)
+ 	struct l2tp_tunnel *tunnel = NULL;
+ 	int length;
+ 
+-	/* Point to L2TP header */
+-	optr = ptr = skb->data;
+-
+ 	if (!pskb_may_pull(skb, 4))
+ 		goto discard;
+ 
++	/* Point to L2TP header */
++	optr = ptr = skb->data;
+ 	session_id = ntohl(*((__be32 *) ptr));
+ 	ptr += 4;
+ 
+@@ -168,6 +167,9 @@ static int l2tp_ip6_recv(struct sk_buff *skb)
+ 		if (!pskb_may_pull(skb, length))
+ 			goto discard;
+ 
++		/* Point to L2TP header */
++		optr = ptr = skb->data;
++		ptr += 4;
+ 		pr_debug("%s: ip recv\n", tunnel->name);
+ 		print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length);
+ 	}
+diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
+index 978d3bc31df7..1b33d89906e1 100644
+--- a/net/mac80211/ibss.c
++++ b/net/mac80211/ibss.c
+@@ -7,6 +7,7 @@
+  * Copyright 2007, Michael Wu <flamingice@sourmilk.net>
+  * Copyright 2009, Johannes Berg <johannes@sipsolutions.net>
+  * Copyright 2013-2014  Intel Mobile Communications GmbH
++ * Copyright(c) 2016 Intel Deutschland GmbH
+  *
+  * This program is free software; you can redistribute it and/or modify
+  * it under the terms of the GNU General Public License version 2 as
+@@ -1485,14 +1486,21 @@ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata)
+ 
+ 		sdata_info(sdata, "Trigger new scan to find an IBSS to join\n");
+ 
+-		num = ieee80211_ibss_setup_scan_channels(local->hw.wiphy,
+-							 &ifibss->chandef,
+-							 channels,
+-							 ARRAY_SIZE(channels));
+ 		scan_width = cfg80211_chandef_to_scan_width(&ifibss->chandef);
+-		ieee80211_request_ibss_scan(sdata, ifibss->ssid,
+-					    ifibss->ssid_len, channels, num,
+-					    scan_width);
++
++		if (ifibss->fixed_channel) {
++			num = ieee80211_ibss_setup_scan_channels(local->hw.wiphy,
++								 &ifibss->chandef,
++								 channels,
++								 ARRAY_SIZE(channels));
++			ieee80211_request_ibss_scan(sdata, ifibss->ssid,
++						    ifibss->ssid_len, channels,
++						    num, scan_width);
++		} else {
++			ieee80211_request_ibss_scan(sdata, ifibss->ssid,
++						    ifibss->ssid_len, NULL,
++						    0, scan_width);
++		}
+ 	} else {
+ 		int interval = IEEE80211_SCAN_INTERVAL;
+ 
+diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
+index c9e325d2e120..7a2b7915093b 100644
+--- a/net/mac80211/iface.c
++++ b/net/mac80211/iface.c
+@@ -977,7 +977,10 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
+ 	if (sdata->vif.txq) {
+ 		struct txq_info *txqi = to_txq_info(sdata->vif.txq);
+ 
++		spin_lock_bh(&txqi->queue.lock);
+ 		ieee80211_purge_tx_queue(&local->hw, &txqi->queue);
++		spin_unlock_bh(&txqi->queue.lock);
++
+ 		atomic_set(&sdata->txqs_len[txqi->txq.ac], 0);
+ 	}
+ 
+diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
+index 60d093f40f1d..261df74eaf38 100644
+--- a/net/mac80211/rx.c
++++ b/net/mac80211/rx.c
+@@ -2249,7 +2249,7 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
+ 	struct ieee80211_local *local = rx->local;
+ 	struct ieee80211_sub_if_data *sdata = rx->sdata;
+ 	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
+-	u16 q, hdrlen;
++	u16 ac, q, hdrlen;
+ 
+ 	hdr = (struct ieee80211_hdr *) skb->data;
+ 	hdrlen = ieee80211_hdrlen(hdr->frame_control);
+@@ -2318,7 +2318,8 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
+ 	    ether_addr_equal(sdata->vif.addr, hdr->addr3))
+ 		return RX_CONTINUE;
+ 
+-	q = ieee80211_select_queue_80211(sdata, skb, hdr);
++	ac = ieee80211_select_queue_80211(sdata, skb, hdr);
++	q = sdata->vif.hw_queue[ac];
+ 	if (ieee80211_queue_stopped(&local->hw, q)) {
+ 		IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_congestion);
+ 		return RX_DROP_MONITOR;
+diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
+index a4a4f89d3ba0..23ed038cf7f9 100644
+--- a/net/mac80211/sta_info.c
++++ b/net/mac80211/sta_info.c
+@@ -257,11 +257,11 @@ void sta_info_free(struct ieee80211_local *local, struct sta_info *sta)
+ }
+ 
+ /* Caller must hold local->sta_mtx */
+-static void sta_info_hash_add(struct ieee80211_local *local,
+-			      struct sta_info *sta)
++static int sta_info_hash_add(struct ieee80211_local *local,
++			     struct sta_info *sta)
+ {
+-	rhashtable_insert_fast(&local->sta_hash, &sta->hash_node,
+-			       sta_rht_params);
++	return rhashtable_insert_fast(&local->sta_hash, &sta->hash_node,
++				      sta_rht_params);
+ }
+ 
+ static void sta_deliver_ps_frames(struct work_struct *wk)
+@@ -498,11 +498,17 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU)
+ {
+ 	struct ieee80211_local *local = sta->local;
+ 	struct ieee80211_sub_if_data *sdata = sta->sdata;
+-	struct station_info sinfo;
++	struct station_info *sinfo;
+ 	int err = 0;
+ 
+ 	lockdep_assert_held(&local->sta_mtx);
+ 
++	sinfo = kzalloc(sizeof(struct station_info), GFP_KERNEL);
++	if (!sinfo) {
++		err = -ENOMEM;
++		goto out_err;
++	}
++
+ 	/* check if STA exists already */
+ 	if (sta_info_get_bss(sdata, sta->sta.addr)) {
+ 		err = -EEXIST;
+@@ -517,7 +523,9 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU)
+ 	set_sta_flag(sta, WLAN_STA_BLOCK_BA);
+ 
+ 	/* make the station visible */
+-	sta_info_hash_add(local, sta);
++	err = sta_info_hash_add(local, sta);
++	if (err)
++		goto out_drop_sta;
+ 
+ 	list_add_tail_rcu(&sta->list, &local->sta_list);
+ 
+@@ -534,10 +542,9 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU)
+ 	ieee80211_sta_debugfs_add(sta);
+ 	rate_control_add_sta_debugfs(sta);
+ 
+-	memset(&sinfo, 0, sizeof(sinfo));
+-	sinfo.filled = 0;
+-	sinfo.generation = local->sta_generation;
+-	cfg80211_new_sta(sdata->dev, sta->sta.addr, &sinfo, GFP_KERNEL);
++	sinfo->generation = local->sta_generation;
++	cfg80211_new_sta(sdata->dev, sta->sta.addr, sinfo, GFP_KERNEL);
++	kfree(sinfo);
+ 
+ 	sta_dbg(sdata, "Inserted STA %pM\n", sta->sta.addr);
+ 
+@@ -552,6 +559,7 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU)
+  out_remove:
+ 	sta_info_hash_del(local, sta);
+ 	list_del_rcu(&sta->list);
++ out_drop_sta:
+ 	local->num_sta--;
+ 	synchronize_net();
+ 	__cleanup_single_sta(sta);
+@@ -898,7 +906,7 @@ static void __sta_info_destroy_part2(struct sta_info *sta)
+ {
+ 	struct ieee80211_local *local = sta->local;
+ 	struct ieee80211_sub_if_data *sdata = sta->sdata;
+-	struct station_info sinfo = {};
++	struct station_info *sinfo;
+ 	int ret;
+ 
+ 	/*
+@@ -936,8 +944,11 @@ static void __sta_info_destroy_part2(struct sta_info *sta)
+ 
+ 	sta_dbg(sdata, "Removed STA %pM\n", sta->sta.addr);
+ 
+-	sta_set_sinfo(sta, &sinfo);
+-	cfg80211_del_sta_sinfo(sdata->dev, sta->sta.addr, &sinfo, GFP_KERNEL);
++	sinfo = kzalloc(sizeof(*sinfo), GFP_KERNEL);
++	if (sinfo)
++		sta_set_sinfo(sta, sinfo);
++	cfg80211_del_sta_sinfo(sdata->dev, sta->sta.addr, sinfo, GFP_KERNEL);
++	kfree(sinfo);
+ 
+ 	rate_control_remove_sta_debugfs(sta);
+ 	ieee80211_sta_debugfs_remove(sta);
+diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
+index b18c5ed42d95..0b80a7140cc4 100644
+--- a/net/mpls/af_mpls.c
++++ b/net/mpls/af_mpls.c
+@@ -543,6 +543,9 @@ static struct net_device *find_outdev(struct net *net,
+ 	if (!dev)
+ 		return ERR_PTR(-ENODEV);
+ 
++	if (IS_ERR(dev))
++		return dev;
++
+ 	/* The caller is holding rtnl anyways, so release the dev reference */
+ 	dev_put(dev);
+ 
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index 992396aa635c..da1ae0e13cb5 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -1916,6 +1916,10 @@ retry:
+ 		goto retry;
+ 	}
+ 
++	if (!dev_validate_header(dev, skb->data, len)) {
++		err = -EINVAL;
++		goto out_unlock;
++	}
+ 	if (len > (dev->mtu + dev->hard_header_len + extra_len) &&
+ 	    !packet_extra_vlan_len_allowed(dev, skb)) {
+ 		err = -EMSGSIZE;
+@@ -2326,18 +2330,6 @@ static void tpacket_destruct_skb(struct sk_buff *skb)
+ 	sock_wfree(skb);
+ }
+ 
+-static bool ll_header_truncated(const struct net_device *dev, int len)
+-{
+-	/* net device doesn't like empty head */
+-	if (unlikely(len < dev->hard_header_len)) {
+-		net_warn_ratelimited("%s: packet size is too short (%d < %d)\n",
+-				     current->comm, len, dev->hard_header_len);
+-		return true;
+-	}
+-
+-	return false;
+-}
+-
+ static void tpacket_set_protocol(const struct net_device *dev,
+ 				 struct sk_buff *skb)
+ {
+@@ -2420,19 +2412,19 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
+ 		if (unlikely(err < 0))
+ 			return -EINVAL;
+ 	} else if (dev->hard_header_len) {
+-		if (ll_header_truncated(dev, tp_len))
+-			return -EINVAL;
++		int hdrlen = min_t(int, dev->hard_header_len, tp_len);
+ 
+ 		skb_push(skb, dev->hard_header_len);
+-		err = skb_store_bits(skb, 0, data,
+-				dev->hard_header_len);
++		err = skb_store_bits(skb, 0, data, hdrlen);
+ 		if (unlikely(err))
+ 			return err;
++		if (!dev_validate_header(dev, skb->data, hdrlen))
++			return -EINVAL;
+ 		if (!skb->protocol)
+ 			tpacket_set_protocol(dev, skb);
+ 
+-		data += dev->hard_header_len;
+-		to_write -= dev->hard_header_len;
++		data += hdrlen;
++		to_write -= hdrlen;
+ 	}
+ 
+ 	offset = offset_in_page(data);
+@@ -2763,9 +2755,6 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
+ 		offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len);
+ 		if (unlikely(offset < 0))
+ 			goto out_free;
+-	} else {
+-		if (ll_header_truncated(dev, len))
+-			goto out_free;
+ 	}
+ 
+ 	/* Returns -EFAULT on error */
+@@ -2773,6 +2762,12 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
+ 	if (err)
+ 		goto out_free;
+ 
++	if (sock->type == SOCK_RAW &&
++	    !dev_validate_header(dev, skb->data, len)) {
++		err = -EINVAL;
++		goto out_free;
++	}
++
+ 	sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags);
+ 
+ 	if (!gso_type && (len > dev->mtu + reserve + extra_len) &&
+diff --git a/net/socket.c b/net/socket.c
+index c044d1e8508c..db13ae893dce 100644
+--- a/net/socket.c
++++ b/net/socket.c
+@@ -2240,31 +2240,31 @@ int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen,
+ 		cond_resched();
+ 	}
+ 
+-out_put:
+-	fput_light(sock->file, fput_needed);
+-
+ 	if (err == 0)
+-		return datagrams;
++		goto out_put;
+ 
+-	if (datagrams != 0) {
++	if (datagrams == 0) {
++		datagrams = err;
++		goto out_put;
++	}
++
++	/*
++	 * We may return less entries than requested (vlen) if the
++	 * sock is non block and there aren't enough datagrams...
++	 */
++	if (err != -EAGAIN) {
+ 		/*
+-		 * We may return less entries than requested (vlen) if the
+-		 * sock is non block and there aren't enough datagrams...
++		 * ... or  if recvmsg returns an error after we
++		 * received some datagrams, where we record the
++		 * error to return on the next call or if the
++		 * app asks about it using getsockopt(SO_ERROR).
+ 		 */
+-		if (err != -EAGAIN) {
+-			/*
+-			 * ... or  if recvmsg returns an error after we
+-			 * received some datagrams, where we record the
+-			 * error to return on the next call or if the
+-			 * app asks about it using getsockopt(SO_ERROR).
+-			 */
+-			sock->sk->sk_err = -err;
+-		}
+-
+-		return datagrams;
++		sock->sk->sk_err = -err;
+ 	}
++out_put:
++	fput_light(sock->file, fput_needed);
+ 
+-	return err;
++	return datagrams;
+ }
+ 
+ SYSCALL_DEFINE5(recvmmsg, int, fd, struct mmsghdr __user *, mmsg,
+diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
+index ad7f5b3f9b61..1c4ad477ce93 100644
+--- a/net/xfrm/xfrm_input.c
++++ b/net/xfrm/xfrm_input.c
+@@ -292,12 +292,15 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
+ 		XFRM_SKB_CB(skb)->seq.input.hi = seq_hi;
+ 
+ 		skb_dst_force(skb);
++		dev_hold(skb->dev);
+ 
+ 		nexthdr = x->type->input(x, skb);
+ 
+ 		if (nexthdr == -EINPROGRESS)
+ 			return 0;
+ resume:
++		dev_put(skb->dev);
++
+ 		spin_lock(&x->lock);
+ 		if (nexthdr <= 0) {
+ 			if (nexthdr == -EBADMSG) {
+diff --git a/sound/core/timer.c b/sound/core/timer.c
+index dca817fc7894..e5e7e4368996 100644
+--- a/sound/core/timer.c
++++ b/sound/core/timer.c
+@@ -1041,8 +1041,8 @@ static int snd_timer_s_start(struct snd_timer * timer)
+ 		njiff += timer->sticks - priv->correction;
+ 		priv->correction = 0;
+ 	}
+-	priv->last_expires = priv->tlist.expires = njiff;
+-	add_timer(&priv->tlist);
++	priv->last_expires = njiff;
++	mod_timer(&priv->tlist, njiff);
+ 	return 0;
+ }
+ 
+diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
+index e68fa449ebef..0c9585602bf3 100644
+--- a/sound/pci/hda/patch_hdmi.c
++++ b/sound/pci/hda/patch_hdmi.c
+@@ -1623,6 +1623,8 @@ static bool hdmi_present_sense_via_verbs(struct hdmi_spec_per_pin *per_pin,
+ 
+ 	mutex_lock(&per_pin->lock);
+ 	pin_eld->monitor_present = !!(present & AC_PINSENSE_PRESENCE);
++	eld->monitor_present = pin_eld->monitor_present;
++
+ 	if (pin_eld->monitor_present)
+ 		eld->eld_valid  = !!(present & AC_PINSENSE_ELDV);
+ 	else
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 4f5ca0b9ce27..1402ba954b3d 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -4759,6 +4759,8 @@ enum {
+ 	ALC255_FIXUP_DELL_SPK_NOISE,
+ 	ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
+ 	ALC280_FIXUP_HP_HEADSET_MIC,
++	ALC221_FIXUP_HP_FRONT_MIC,
++	ALC292_FIXUP_TPT460,
+ };
+ 
+ static const struct hda_fixup alc269_fixups[] = {
+@@ -5401,6 +5403,19 @@ static const struct hda_fixup alc269_fixups[] = {
+ 		.chained = true,
+ 		.chain_id = ALC269_FIXUP_HEADSET_MIC,
+ 	},
++	[ALC221_FIXUP_HP_FRONT_MIC] = {
++		.type = HDA_FIXUP_PINS,
++		.v.pins = (const struct hda_pintbl[]) {
++			{ 0x19, 0x02a19020 }, /* Front Mic */
++			{ }
++		},
++	},
++	[ALC292_FIXUP_TPT460] = {
++		.type = HDA_FIXUP_FUNC,
++		.v.func = alc_fixup_tpt440_dock,
++		.chained = true,
++		.chain_id = ALC293_FIXUP_LENOVO_SPK_NOISE,
++	},
+ };
+ 
+ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+@@ -5506,6 +5521,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x2336, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ 	SND_PCI_QUIRK(0x103c, 0x2337, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ 	SND_PCI_QUIRK(0x103c, 0x221c, "HP EliteBook 755 G2", ALC280_FIXUP_HP_HEADSET_MIC),
++	SND_PCI_QUIRK(0x103c, 0x8256, "HP", ALC221_FIXUP_HP_FRONT_MIC),
+ 	SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
+ 	SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+ 	SND_PCI_QUIRK(0x1043, 0x115d, "Asus 1015E", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+@@ -5554,7 +5570,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x17aa, 0x2218, "Thinkpad X1 Carbon 2nd", ALC292_FIXUP_TPT440_DOCK),
+ 	SND_PCI_QUIRK(0x17aa, 0x2223, "ThinkPad T550", ALC292_FIXUP_TPT440_DOCK),
+ 	SND_PCI_QUIRK(0x17aa, 0x2226, "ThinkPad X250", ALC292_FIXUP_TPT440_DOCK),
+-	SND_PCI_QUIRK(0x17aa, 0x2233, "Thinkpad", ALC293_FIXUP_LENOVO_SPK_NOISE),
++	SND_PCI_QUIRK(0x17aa, 0x2233, "Thinkpad", ALC292_FIXUP_TPT460),
+ 	SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
+ 	SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
+ 	SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
+@@ -5649,6 +5665,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
+ 	{.id = ALC283_FIXUP_SENSE_COMBO_JACK, .name = "alc283-sense-combo"},
+ 	{.id = ALC292_FIXUP_TPT440_DOCK, .name = "tpt440-dock"},
+ 	{.id = ALC292_FIXUP_TPT440, .name = "tpt440"},
++	{.id = ALC292_FIXUP_TPT460, .name = "tpt460"},
+ 	{}
+ };
+ #define ALC225_STANDARD_PINS \
+@@ -6406,6 +6423,7 @@ enum {
+ 	ALC668_FIXUP_AUTO_MUTE,
+ 	ALC668_FIXUP_DELL_DISABLE_AAMIX,
+ 	ALC668_FIXUP_DELL_XPS13,
++	ALC662_FIXUP_ASUS_Nx50,
+ };
+ 
+ static const struct hda_fixup alc662_fixups[] = {
+@@ -6646,6 +6664,12 @@ static const struct hda_fixup alc662_fixups[] = {
+ 		.type = HDA_FIXUP_FUNC,
+ 		.v.func = alc_fixup_bass_chmap,
+ 	},
++	[ALC662_FIXUP_ASUS_Nx50] = {
++		.type = HDA_FIXUP_FUNC,
++		.v.func = alc_fixup_auto_mute_via_amp,
++		.chained = true,
++		.chain_id = ALC662_FIXUP_BASS_1A
++	},
+ };
+ 
+ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
+@@ -6668,8 +6692,9 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1028, 0x0698, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1028, 0x069f, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x103c, 0x1632, "HP RP5800", ALC662_FIXUP_HP_RP5800),
+-	SND_PCI_QUIRK(0x1043, 0x11cd, "Asus N550", ALC662_FIXUP_BASS_1A),
++	SND_PCI_QUIRK(0x1043, 0x11cd, "Asus N550", ALC662_FIXUP_ASUS_Nx50),
+ 	SND_PCI_QUIRK(0x1043, 0x13df, "Asus N550JX", ALC662_FIXUP_BASS_1A),
++	SND_PCI_QUIRK(0x1043, 0x129d, "Asus N750", ALC662_FIXUP_ASUS_Nx50),
+ 	SND_PCI_QUIRK(0x1043, 0x1477, "ASUS N56VZ", ALC662_FIXUP_BASS_MODE4_CHMAP),
+ 	SND_PCI_QUIRK(0x1043, 0x15a7, "ASUS UX51VZH", ALC662_FIXUP_BASS_16),
+ 	SND_PCI_QUIRK(0x1043, 0x1b73, "ASUS N55SF", ALC662_FIXUP_BASS_16),
+diff --git a/sound/usb/mixer_maps.c b/sound/usb/mixer_maps.c
+index ddca6547399b..1f8fb0d904e0 100644
+--- a/sound/usb/mixer_maps.c
++++ b/sound/usb/mixer_maps.c
+@@ -349,6 +349,16 @@ static struct usbmix_name_map bose_companion5_map[] = {
+ };
+ 
+ /*
++ * Dell usb dock with ALC4020 codec had a firmware problem where it got
++ * screwed up when zero volume is passed; just skip it as a workaround
++ */
++static const struct usbmix_name_map dell_alc4020_map[] = {
++	{ 16, NULL },
++	{ 19, NULL },
++	{ 0 }
++};
++
++/*
+  * Control map entries
+  */
+ 
+@@ -431,6 +441,10 @@ static struct usbmix_ctl_map usbmix_ctl_maps[] = {
+ 		.map = aureon_51_2_map,
+ 	},
+ 	{
++		.id = USB_ID(0x0bda, 0x4014),
++		.map = dell_alc4020_map,
++	},
++	{
+ 		.id = USB_ID(0x0dba, 0x1000),
+ 		.map = mbox1_map,
+ 	},
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index cd7eac28edee..001fb4dc0722 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -1135,9 +1135,11 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
+ 	case USB_ID(0x045E, 0x076F): /* MS Lifecam HD-6000 */
+ 	case USB_ID(0x045E, 0x0772): /* MS Lifecam Studio */
+ 	case USB_ID(0x045E, 0x0779): /* MS Lifecam HD-3000 */
++	case USB_ID(0x047F, 0x0415): /* Plantronics BT-300 */
+ 	case USB_ID(0x047F, 0xAA05): /* Plantronics DA45 */
+ 	case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */
+ 	case USB_ID(0x074D, 0x3553): /* Outlaw RR2150 (Micronas UAC3553B) */
++	case USB_ID(0x1de7, 0x0014): /* Phoenix Audio TMX320 */
+ 	case USB_ID(0x21B4, 0x0081): /* AudioQuest DragonFly */
+ 		return true;
+ 	}


^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [gentoo-commits] proj/linux-patches:4.5 commit in: /
@ 2016-05-04 23:56 Mike Pagano
  0 siblings, 0 replies; 12+ messages in thread
From: Mike Pagano @ 2016-05-04 23:56 UTC (permalink / raw
  To: gentoo-commits

commit:     aced38b03d4a5900cd1aadb745ab0d02ff4151db
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed May  4 23:56:12 2016 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed May  4 23:56:12 2016 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=aced38b0

Linux patch 4.5.3

 0000_README            |    4 +
 1002_linux-4.5.3.patch | 7456 ++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 7460 insertions(+)

diff --git a/0000_README b/0000_README
index 0fa777f..0147ad9 100644
--- a/0000_README
+++ b/0000_README
@@ -51,6 +51,10 @@ Patch:  1001_linux-4.5.2.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.5.2
 
+Patch:  1002_linux-4.5.3.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.5.3
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1002_linux-4.5.3.patch b/1002_linux-4.5.3.patch
new file mode 100644
index 0000000..6401e8f
--- /dev/null
+++ b/1002_linux-4.5.3.patch
@@ -0,0 +1,7456 @@
+diff --git a/Makefile b/Makefile
+index 1ecaaeb7791d..9b56a6c5e36f 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 5
+-SUBLEVEL = 2
++SUBLEVEL = 3
+ EXTRAVERSION =
+ NAME = Blurry Fish Butt
+ 
+diff --git a/arch/arm/boot/dts/am33xx.dtsi b/arch/arm/boot/dts/am33xx.dtsi
+index 1fafaad516ba..97471d62d5e4 100644
+--- a/arch/arm/boot/dts/am33xx.dtsi
++++ b/arch/arm/boot/dts/am33xx.dtsi
+@@ -860,7 +860,7 @@
+ 			ti,no-idle-on-init;
+ 			reg = <0x50000000 0x2000>;
+ 			interrupts = <100>;
+-			dmas = <&edma 52>;
++			dmas = <&edma 52 0>;
+ 			dma-names = "rxtx";
+ 			gpmc,num-cs = <7>;
+ 			gpmc,num-waitpins = <2>;
+diff --git a/arch/arm/boot/dts/am4372.dtsi b/arch/arm/boot/dts/am4372.dtsi
+index 92068fbf8b57..6bd38a28e26c 100644
+--- a/arch/arm/boot/dts/am4372.dtsi
++++ b/arch/arm/boot/dts/am4372.dtsi
+@@ -207,7 +207,7 @@
+ 			ti,tptcs = <&edma_tptc0 7>, <&edma_tptc1 5>,
+ 				   <&edma_tptc2 0>;
+ 
+-			ti,edma-memcpy-channels = <32 33>;
++			ti,edma-memcpy-channels = <58 59>;
+ 		};
+ 
+ 		edma_tptc0: tptc@49800000 {
+@@ -884,7 +884,7 @@
+ 		gpmc: gpmc@50000000 {
+ 			compatible = "ti,am3352-gpmc";
+ 			ti,hwmods = "gpmc";
+-			dmas = <&edma 52>;
++			dmas = <&edma 52 0>;
+ 			dma-names = "rxtx";
+ 			clocks = <&l3s_gclk>;
+ 			clock-names = "fck";
+diff --git a/arch/arm/boot/dts/am43x-epos-evm.dts b/arch/arm/boot/dts/am43x-epos-evm.dts
+index d580e2b70f9a..637dc5dbc8ac 100644
+--- a/arch/arm/boot/dts/am43x-epos-evm.dts
++++ b/arch/arm/boot/dts/am43x-epos-evm.dts
+@@ -792,3 +792,8 @@
+ 	tx-num-evt = <32>;
+ 	rx-num-evt = <32>;
+ };
++
++&synctimer_32kclk {
++	assigned-clocks = <&mux_synctimer32k_ck>;
++	assigned-clock-parents = <&clkdiv32k_ick>;
++};
+diff --git a/arch/arm/boot/dts/armada-375.dtsi b/arch/arm/boot/dts/armada-375.dtsi
+index 7ccce7529b0c..cc952cf8ec30 100644
+--- a/arch/arm/boot/dts/armada-375.dtsi
++++ b/arch/arm/boot/dts/armada-375.dtsi
+@@ -529,7 +529,7 @@
+ 			};
+ 
+ 			sata@a0000 {
+-				compatible = "marvell,orion-sata";
++				compatible = "marvell,armada-370-sata";
+ 				reg = <0xa0000 0x5000>;
+ 				interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>;
+ 				clocks = <&gateclk 14>, <&gateclk 20>;
+diff --git a/arch/arm/boot/dts/armada-385-linksys.dtsi b/arch/arm/boot/dts/armada-385-linksys.dtsi
+index 3710755c6d76..85d2c377c332 100644
+--- a/arch/arm/boot/dts/armada-385-linksys.dtsi
++++ b/arch/arm/boot/dts/armada-385-linksys.dtsi
+@@ -117,7 +117,7 @@
+ 			};
+ 
+ 			/* USB part of the eSATA/USB 2.0 port */
+-			usb@50000 {
++			usb@58000 {
+ 				status = "okay";
+ 			};
+ 
+diff --git a/arch/arm/boot/dts/pxa3xx.dtsi b/arch/arm/boot/dts/pxa3xx.dtsi
+index cf6998a0804d..564341af7e97 100644
+--- a/arch/arm/boot/dts/pxa3xx.dtsi
++++ b/arch/arm/boot/dts/pxa3xx.dtsi
+@@ -30,7 +30,7 @@
+ 			reg = <0x43100000 90>;
+ 			interrupts = <45>;
+ 			clocks = <&clks CLK_NAND>;
+-			dmas = <&pdma 97>;
++			dmas = <&pdma 97 3>;
+ 			dma-names = "data";
+ 			#address-cells = <1>;
+ 			#size-cells = <1>;	
+diff --git a/arch/arm/mach-exynos/Kconfig b/arch/arm/mach-exynos/Kconfig
+index 652a0bb11578..5189bcecad12 100644
+--- a/arch/arm/mach-exynos/Kconfig
++++ b/arch/arm/mach-exynos/Kconfig
+@@ -27,6 +27,7 @@ menuconfig ARCH_EXYNOS
+ 	select S5P_DEV_MFC
+ 	select SRAM
+ 	select THERMAL
++	select THERMAL_OF
+ 	select MFD_SYSCON
+ 	select CLKSRC_EXYNOS_MCT
+ 	select POWER_RESET
+diff --git a/arch/arm/mach-omap2/cpuidle34xx.c b/arch/arm/mach-omap2/cpuidle34xx.c
+index aa7b379e2661..2a3db0bd9e15 100644
+--- a/arch/arm/mach-omap2/cpuidle34xx.c
++++ b/arch/arm/mach-omap2/cpuidle34xx.c
+@@ -34,6 +34,7 @@
+ #include "pm.h"
+ #include "control.h"
+ #include "common.h"
++#include "soc.h"
+ 
+ /* Mach specific information to be recorded in the C-state driver_data */
+ struct omap3_idle_statedata {
+@@ -315,6 +316,69 @@ static struct cpuidle_driver omap3_idle_driver = {
+ 	.safe_state_index = 0,
+ };
+ 
++/*
++ * Numbers based on measurements made in October 2009 for PM optimized kernel
++ * with CPU freq enabled on device Nokia N900. Assumes OPP2 (main idle OPP,
++ * and worst case latencies).
++ */
++static struct cpuidle_driver omap3430_idle_driver = {
++	.name             = "omap3430_idle",
++	.owner            = THIS_MODULE,
++	.states = {
++		{
++			.enter		  = omap3_enter_idle_bm,
++			.exit_latency	  = 110 + 162,
++			.target_residency = 5,
++			.name		  = "C1",
++			.desc		  = "MPU ON + CORE ON",
++		},
++		{
++			.enter		  = omap3_enter_idle_bm,
++			.exit_latency	  = 106 + 180,
++			.target_residency = 309,
++			.name		  = "C2",
++			.desc		  = "MPU ON + CORE ON",
++		},
++		{
++			.enter		  = omap3_enter_idle_bm,
++			.exit_latency	  = 107 + 410,
++			.target_residency = 46057,
++			.name		  = "C3",
++			.desc		  = "MPU RET + CORE ON",
++		},
++		{
++			.enter		  = omap3_enter_idle_bm,
++			.exit_latency	  = 121 + 3374,
++			.target_residency = 46057,
++			.name		  = "C4",
++			.desc		  = "MPU OFF + CORE ON",
++		},
++		{
++			.enter		  = omap3_enter_idle_bm,
++			.exit_latency	  = 855 + 1146,
++			.target_residency = 46057,
++			.name		  = "C5",
++			.desc		  = "MPU RET + CORE RET",
++		},
++		{
++			.enter		  = omap3_enter_idle_bm,
++			.exit_latency	  = 7580 + 4134,
++			.target_residency = 484329,
++			.name		  = "C6",
++			.desc		  = "MPU OFF + CORE RET",
++		},
++		{
++			.enter		  = omap3_enter_idle_bm,
++			.exit_latency	  = 7505 + 15274,
++			.target_residency = 484329,
++			.name		  = "C7",
++			.desc		  = "MPU OFF + CORE OFF",
++		},
++	},
++	.state_count = ARRAY_SIZE(omap3_idle_data),
++	.safe_state_index = 0,
++};
++
+ /* Public functions */
+ 
+ /**
+@@ -333,5 +397,8 @@ int __init omap3_idle_init(void)
+ 	if (!mpu_pd || !core_pd || !per_pd || !cam_pd)
+ 		return -ENODEV;
+ 
+-	return cpuidle_register(&omap3_idle_driver, NULL);
++	if (cpu_is_omap3430())
++		return cpuidle_register(&omap3430_idle_driver, NULL);
++	else
++		return cpuidle_register(&omap3_idle_driver, NULL);
+ }
+diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c
+index 3c87e40650cf..9821be6dfd5e 100644
+--- a/arch/arm/mach-omap2/io.c
++++ b/arch/arm/mach-omap2/io.c
+@@ -368,6 +368,7 @@ void __init omap5_map_io(void)
+ void __init dra7xx_map_io(void)
+ {
+ 	iotable_init(dra7xx_io_desc, ARRAY_SIZE(dra7xx_io_desc));
++	omap_barriers_init();
+ }
+ #endif
+ /*
+diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
+index b6d62e4cdfdd..2af6ff63e3b4 100644
+--- a/arch/arm/mach-omap2/omap_hwmod.c
++++ b/arch/arm/mach-omap2/omap_hwmod.c
+@@ -1416,9 +1416,7 @@ static void _enable_sysc(struct omap_hwmod *oh)
+ 	    (sf & SYSC_HAS_CLOCKACTIVITY))
+ 		_set_clockactivity(oh, oh->class->sysc->clockact, &v);
+ 
+-	/* If the cached value is the same as the new value, skip the write */
+-	if (oh->_sysc_cache != v)
+-		_write_sysconfig(v, oh);
++	_write_sysconfig(v, oh);
+ 
+ 	/*
+ 	 * Set the autoidle bit only after setting the smartidle bit
+@@ -1481,7 +1479,9 @@ static void _idle_sysc(struct omap_hwmod *oh)
+ 		_set_master_standbymode(oh, idlemode, &v);
+ 	}
+ 
+-	_write_sysconfig(v, oh);
++	/* If the cached value is the same as the new value, skip the write */
++	if (oh->_sysc_cache != v)
++		_write_sysconfig(v, oh);
+ }
+ 
+ /**
+diff --git a/arch/arm/mach-prima2/Kconfig b/arch/arm/mach-prima2/Kconfig
+index f998eb1c698e..0cf4426183cf 100644
+--- a/arch/arm/mach-prima2/Kconfig
++++ b/arch/arm/mach-prima2/Kconfig
+@@ -2,6 +2,7 @@ menuconfig ARCH_SIRF
+ 	bool "CSR SiRF"
+ 	depends on ARCH_MULTI_V7
+ 	select ARCH_HAS_RESET_CONTROLLER
++	select RESET_CONTROLLER
+ 	select ARCH_REQUIRE_GPIOLIB
+ 	select GENERIC_IRQ_CHIP
+ 	select NO_IOPORT_MAP
+diff --git a/arch/powerpc/include/uapi/asm/cputable.h b/arch/powerpc/include/uapi/asm/cputable.h
+index 8dde19962a5b..f63c96cd3608 100644
+--- a/arch/powerpc/include/uapi/asm/cputable.h
++++ b/arch/powerpc/include/uapi/asm/cputable.h
+@@ -31,6 +31,7 @@
+ #define PPC_FEATURE_PSERIES_PERFMON_COMPAT \
+ 					0x00000040
+ 
++/* Reserved - do not use		0x00000004 */
+ #define PPC_FEATURE_TRUE_LE		0x00000002
+ #define PPC_FEATURE_PPC_LE		0x00000001
+ 
+diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
+index 7030b035905d..a15fe1d4e84a 100644
+--- a/arch/powerpc/kernel/prom.c
++++ b/arch/powerpc/kernel/prom.c
+@@ -148,23 +148,25 @@ static struct ibm_pa_feature {
+ 	unsigned long	cpu_features;	/* CPU_FTR_xxx bit */
+ 	unsigned long	mmu_features;	/* MMU_FTR_xxx bit */
+ 	unsigned int	cpu_user_ftrs;	/* PPC_FEATURE_xxx bit */
++	unsigned int	cpu_user_ftrs2;	/* PPC_FEATURE2_xxx bit */
+ 	unsigned char	pabyte;		/* byte number in ibm,pa-features */
+ 	unsigned char	pabit;		/* bit number (big-endian) */
+ 	unsigned char	invert;		/* if 1, pa bit set => clear feature */
+ } ibm_pa_features[] __initdata = {
+-	{0, 0, PPC_FEATURE_HAS_MMU,	0, 0, 0},
+-	{0, 0, PPC_FEATURE_HAS_FPU,	0, 1, 0},
+-	{CPU_FTR_CTRL, 0, 0,		0, 3, 0},
+-	{CPU_FTR_NOEXECUTE, 0, 0,	0, 6, 0},
+-	{CPU_FTR_NODSISRALIGN, 0, 0,	1, 1, 1},
+-	{0, MMU_FTR_CI_LARGE_PAGE, 0,	1, 2, 0},
+-	{CPU_FTR_REAL_LE, PPC_FEATURE_TRUE_LE, 5, 0, 0},
++	{0, 0, PPC_FEATURE_HAS_MMU, 0,		0, 0, 0},
++	{0, 0, PPC_FEATURE_HAS_FPU, 0,		0, 1, 0},
++	{CPU_FTR_CTRL, 0, 0, 0,			0, 3, 0},
++	{CPU_FTR_NOEXECUTE, 0, 0, 0,		0, 6, 0},
++	{CPU_FTR_NODSISRALIGN, 0, 0, 0,		1, 1, 1},
++	{0, MMU_FTR_CI_LARGE_PAGE, 0, 0,		1, 2, 0},
++	{CPU_FTR_REAL_LE, 0, PPC_FEATURE_TRUE_LE, 0, 5, 0, 0},
+ 	/*
+-	 * If the kernel doesn't support TM (ie. CONFIG_PPC_TRANSACTIONAL_MEM=n),
+-	 * we don't want to turn on CPU_FTR_TM here, so we use CPU_FTR_TM_COMP
+-	 * which is 0 if the kernel doesn't support TM.
++	 * If the kernel doesn't support TM (ie CONFIG_PPC_TRANSACTIONAL_MEM=n),
++	 * we don't want to turn on TM here, so we use the *_COMP versions
++	 * which are 0 if the kernel doesn't support TM.
+ 	 */
+-	{CPU_FTR_TM_COMP, 0, 0,		22, 0, 0},
++	{CPU_FTR_TM_COMP, 0, 0,
++	 PPC_FEATURE2_HTM_COMP|PPC_FEATURE2_HTM_NOSC_COMP, 22, 0, 0},
+ };
+ 
+ static void __init scan_features(unsigned long node, const unsigned char *ftrs,
+@@ -195,10 +197,12 @@ static void __init scan_features(unsigned long node, const unsigned char *ftrs,
+ 		if (bit ^ fp->invert) {
+ 			cur_cpu_spec->cpu_features |= fp->cpu_features;
+ 			cur_cpu_spec->cpu_user_features |= fp->cpu_user_ftrs;
++			cur_cpu_spec->cpu_user_features2 |= fp->cpu_user_ftrs2;
+ 			cur_cpu_spec->mmu_features |= fp->mmu_features;
+ 		} else {
+ 			cur_cpu_spec->cpu_features &= ~fp->cpu_features;
+ 			cur_cpu_spec->cpu_user_features &= ~fp->cpu_user_ftrs;
++			cur_cpu_spec->cpu_user_features2 &= ~fp->cpu_user_ftrs2;
+ 			cur_cpu_spec->mmu_features &= ~fp->mmu_features;
+ 		}
+ 	}
+diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h
+index 2b2ced9dc00a..6dafabb6ae1a 100644
+--- a/arch/s390/include/asm/pci.h
++++ b/arch/s390/include/asm/pci.h
+@@ -45,7 +45,8 @@ struct zpci_fmb {
+ 	u64 rpcit_ops;
+ 	u64 dma_rbytes;
+ 	u64 dma_wbytes;
+-} __packed __aligned(64);
++	u64 pad[2];
++} __packed __aligned(128);
+ 
+ enum zpci_state {
+ 	ZPCI_FN_STATE_RESERVED,
+diff --git a/arch/x86/crypto/sha-mb/sha1_mb.c b/arch/x86/crypto/sha-mb/sha1_mb.c
+index a841e9765bd6..8381c09d2870 100644
+--- a/arch/x86/crypto/sha-mb/sha1_mb.c
++++ b/arch/x86/crypto/sha-mb/sha1_mb.c
+@@ -453,10 +453,10 @@ static int sha_complete_job(struct mcryptd_hash_request_ctx *rctx,
+ 
+ 			req = cast_mcryptd_ctx_to_req(req_ctx);
+ 			if (irqs_disabled())
+-				rctx->complete(&req->base, ret);
++				req_ctx->complete(&req->base, ret);
+ 			else {
+ 				local_bh_disable();
+-				rctx->complete(&req->base, ret);
++				req_ctx->complete(&req->base, ret);
+ 				local_bh_enable();
+ 			}
+ 		}
+diff --git a/arch/x86/include/asm/hugetlb.h b/arch/x86/include/asm/hugetlb.h
+index f8a29d2c97b0..e6a8613fbfb0 100644
+--- a/arch/x86/include/asm/hugetlb.h
++++ b/arch/x86/include/asm/hugetlb.h
+@@ -4,6 +4,7 @@
+ #include <asm/page.h>
+ #include <asm-generic/hugetlb.h>
+ 
++#define hugepages_supported() cpu_has_pse
+ 
+ static inline int is_hugepage_only_range(struct mm_struct *mm,
+ 					 unsigned long addr,
+diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
+index ad59d70bcb1a..ef495511f019 100644
+--- a/arch/x86/kernel/apic/vector.c
++++ b/arch/x86/kernel/apic/vector.c
+@@ -256,7 +256,8 @@ static void clear_irq_vector(int irq, struct apic_chip_data *data)
+ 	struct irq_desc *desc;
+ 	int cpu, vector;
+ 
+-	BUG_ON(!data->cfg.vector);
++	if (!data->cfg.vector)
++		return;
+ 
+ 	vector = data->cfg.vector;
+ 	for_each_cpu_and(cpu, data->domain, cpu_online_mask)
+diff --git a/arch/x86/kernel/cpu/mcheck/mce-genpool.c b/arch/x86/kernel/cpu/mcheck/mce-genpool.c
+index 0a850100c594..2658e2af74ec 100644
+--- a/arch/x86/kernel/cpu/mcheck/mce-genpool.c
++++ b/arch/x86/kernel/cpu/mcheck/mce-genpool.c
+@@ -29,7 +29,7 @@ static char gen_pool_buf[MCE_POOLSZ];
+ void mce_gen_pool_process(void)
+ {
+ 	struct llist_node *head;
+-	struct mce_evt_llist *node;
++	struct mce_evt_llist *node, *tmp;
+ 	struct mce *mce;
+ 
+ 	head = llist_del_all(&mce_event_llist);
+@@ -37,7 +37,7 @@ void mce_gen_pool_process(void)
+ 		return;
+ 
+ 	head = llist_reverse_order(head);
+-	llist_for_each_entry(node, head, llnode) {
++	llist_for_each_entry_safe(node, tmp, head, llnode) {
+ 		mce = &node->mce;
+ 		atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, mce);
+ 		gen_pool_free(mce_evt_pool, (unsigned long)node, sizeof(*node));
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index eca5bd9f0e47..ac4963c38aa3 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -697,7 +697,6 @@ static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
+ 		if ((xcr0 & XFEATURE_MASK_AVX512) != XFEATURE_MASK_AVX512)
+ 			return 1;
+ 	}
+-	kvm_put_guest_xcr0(vcpu);
+ 	vcpu->arch.xcr0 = xcr0;
+ 
+ 	if ((xcr0 ^ old_xcr0) & XFEATURE_MASK_EXTEND)
+@@ -6569,8 +6568,6 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
+ 	kvm_x86_ops->prepare_guest_switch(vcpu);
+ 	if (vcpu->fpu_active)
+ 		kvm_load_guest_fpu(vcpu);
+-	kvm_load_guest_xcr0(vcpu);
+-
+ 	vcpu->mode = IN_GUEST_MODE;
+ 
+ 	srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
+@@ -6593,6 +6590,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
+ 		goto cancel_injection;
+ 	}
+ 
++	kvm_load_guest_xcr0(vcpu);
++
+ 	if (req_immediate_exit)
+ 		smp_send_reschedule(vcpu->cpu);
+ 
+@@ -6642,6 +6641,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
+ 	vcpu->mode = OUTSIDE_GUEST_MODE;
+ 	smp_wmb();
+ 
++	kvm_put_guest_xcr0(vcpu);
++
+ 	/* Interrupt is enabled by handle_external_intr() */
+ 	kvm_x86_ops->handle_external_intr(vcpu);
+ 
+@@ -7289,7 +7290,6 @@ void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
+ 	 * and assume host would use all available bits.
+ 	 * Guest xcr0 would be loaded later.
+ 	 */
+-	kvm_put_guest_xcr0(vcpu);
+ 	vcpu->guest_fpu_loaded = 1;
+ 	__kernel_fpu_begin();
+ 	__copy_kernel_to_fpregs(&vcpu->arch.guest_fpu.state);
+@@ -7298,8 +7298,6 @@ void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
+ 
+ void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
+ {
+-	kvm_put_guest_xcr0(vcpu);
+-
+ 	if (!vcpu->guest_fpu_loaded) {
+ 		vcpu->fpu_counter = 0;
+ 		return;
+diff --git a/arch/x86/mm/kmmio.c b/arch/x86/mm/kmmio.c
+index 637ab34ed632..ddb2244b06a1 100644
+--- a/arch/x86/mm/kmmio.c
++++ b/arch/x86/mm/kmmio.c
+@@ -33,7 +33,7 @@
+ struct kmmio_fault_page {
+ 	struct list_head list;
+ 	struct kmmio_fault_page *release_next;
+-	unsigned long page; /* location of the fault page */
++	unsigned long addr; /* the requested address */
+ 	pteval_t old_presence; /* page presence prior to arming */
+ 	bool armed;
+ 
+@@ -70,9 +70,16 @@ unsigned int kmmio_count;
+ static struct list_head kmmio_page_table[KMMIO_PAGE_TABLE_SIZE];
+ static LIST_HEAD(kmmio_probes);
+ 
+-static struct list_head *kmmio_page_list(unsigned long page)
++static struct list_head *kmmio_page_list(unsigned long addr)
+ {
+-	return &kmmio_page_table[hash_long(page, KMMIO_PAGE_HASH_BITS)];
++	unsigned int l;
++	pte_t *pte = lookup_address(addr, &l);
++
++	if (!pte)
++		return NULL;
++	addr &= page_level_mask(l);
++
++	return &kmmio_page_table[hash_long(addr, KMMIO_PAGE_HASH_BITS)];
+ }
+ 
+ /* Accessed per-cpu */
+@@ -98,15 +105,19 @@ static struct kmmio_probe *get_kmmio_probe(unsigned long addr)
+ }
+ 
+ /* You must be holding RCU read lock. */
+-static struct kmmio_fault_page *get_kmmio_fault_page(unsigned long page)
++static struct kmmio_fault_page *get_kmmio_fault_page(unsigned long addr)
+ {
+ 	struct list_head *head;
+ 	struct kmmio_fault_page *f;
++	unsigned int l;
++	pte_t *pte = lookup_address(addr, &l);
+ 
+-	page &= PAGE_MASK;
+-	head = kmmio_page_list(page);
++	if (!pte)
++		return NULL;
++	addr &= page_level_mask(l);
++	head = kmmio_page_list(addr);
+ 	list_for_each_entry_rcu(f, head, list) {
+-		if (f->page == page)
++		if (f->addr == addr)
+ 			return f;
+ 	}
+ 	return NULL;
+@@ -137,10 +148,10 @@ static void clear_pte_presence(pte_t *pte, bool clear, pteval_t *old)
+ static int clear_page_presence(struct kmmio_fault_page *f, bool clear)
+ {
+ 	unsigned int level;
+-	pte_t *pte = lookup_address(f->page, &level);
++	pte_t *pte = lookup_address(f->addr, &level);
+ 
+ 	if (!pte) {
+-		pr_err("no pte for page 0x%08lx\n", f->page);
++		pr_err("no pte for addr 0x%08lx\n", f->addr);
+ 		return -1;
+ 	}
+ 
+@@ -156,7 +167,7 @@ static int clear_page_presence(struct kmmio_fault_page *f, bool clear)
+ 		return -1;
+ 	}
+ 
+-	__flush_tlb_one(f->page);
++	__flush_tlb_one(f->addr);
+ 	return 0;
+ }
+ 
+@@ -176,12 +187,12 @@ static int arm_kmmio_fault_page(struct kmmio_fault_page *f)
+ 	int ret;
+ 	WARN_ONCE(f->armed, KERN_ERR pr_fmt("kmmio page already armed.\n"));
+ 	if (f->armed) {
+-		pr_warning("double-arm: page 0x%08lx, ref %d, old %d\n",
+-			   f->page, f->count, !!f->old_presence);
++		pr_warning("double-arm: addr 0x%08lx, ref %d, old %d\n",
++			   f->addr, f->count, !!f->old_presence);
+ 	}
+ 	ret = clear_page_presence(f, true);
+-	WARN_ONCE(ret < 0, KERN_ERR pr_fmt("arming 0x%08lx failed.\n"),
+-		  f->page);
++	WARN_ONCE(ret < 0, KERN_ERR pr_fmt("arming at 0x%08lx failed.\n"),
++		  f->addr);
+ 	f->armed = true;
+ 	return ret;
+ }
+@@ -191,7 +202,7 @@ static void disarm_kmmio_fault_page(struct kmmio_fault_page *f)
+ {
+ 	int ret = clear_page_presence(f, false);
+ 	WARN_ONCE(ret < 0,
+-			KERN_ERR "kmmio disarming 0x%08lx failed.\n", f->page);
++			KERN_ERR "kmmio disarming at 0x%08lx failed.\n", f->addr);
+ 	f->armed = false;
+ }
+ 
+@@ -215,6 +226,12 @@ int kmmio_handler(struct pt_regs *regs, unsigned long addr)
+ 	struct kmmio_context *ctx;
+ 	struct kmmio_fault_page *faultpage;
+ 	int ret = 0; /* default to fault not handled */
++	unsigned long page_base = addr;
++	unsigned int l;
++	pte_t *pte = lookup_address(addr, &l);
++	if (!pte)
++		return -EINVAL;
++	page_base &= page_level_mask(l);
+ 
+ 	/*
+ 	 * Preemption is now disabled to prevent process switch during
+@@ -227,7 +244,7 @@ int kmmio_handler(struct pt_regs *regs, unsigned long addr)
+ 	preempt_disable();
+ 	rcu_read_lock();
+ 
+-	faultpage = get_kmmio_fault_page(addr);
++	faultpage = get_kmmio_fault_page(page_base);
+ 	if (!faultpage) {
+ 		/*
+ 		 * Either this page fault is not caused by kmmio, or
+@@ -239,7 +256,7 @@ int kmmio_handler(struct pt_regs *regs, unsigned long addr)
+ 
+ 	ctx = &get_cpu_var(kmmio_ctx);
+ 	if (ctx->active) {
+-		if (addr == ctx->addr) {
++		if (page_base == ctx->addr) {
+ 			/*
+ 			 * A second fault on the same page means some other
+ 			 * condition needs handling by do_page_fault(), the
+@@ -267,9 +284,9 @@ int kmmio_handler(struct pt_regs *regs, unsigned long addr)
+ 	ctx->active++;
+ 
+ 	ctx->fpage = faultpage;
+-	ctx->probe = get_kmmio_probe(addr);
++	ctx->probe = get_kmmio_probe(page_base);
+ 	ctx->saved_flags = (regs->flags & (X86_EFLAGS_TF | X86_EFLAGS_IF));
+-	ctx->addr = addr;
++	ctx->addr = page_base;
+ 
+ 	if (ctx->probe && ctx->probe->pre_handler)
+ 		ctx->probe->pre_handler(ctx->probe, regs, addr);
+@@ -354,12 +371,11 @@ out:
+ }
+ 
+ /* You must be holding kmmio_lock. */
+-static int add_kmmio_fault_page(unsigned long page)
++static int add_kmmio_fault_page(unsigned long addr)
+ {
+ 	struct kmmio_fault_page *f;
+ 
+-	page &= PAGE_MASK;
+-	f = get_kmmio_fault_page(page);
++	f = get_kmmio_fault_page(addr);
+ 	if (f) {
+ 		if (!f->count)
+ 			arm_kmmio_fault_page(f);
+@@ -372,26 +388,25 @@ static int add_kmmio_fault_page(unsigned long page)
+ 		return -1;
+ 
+ 	f->count = 1;
+-	f->page = page;
++	f->addr = addr;
+ 
+ 	if (arm_kmmio_fault_page(f)) {
+ 		kfree(f);
+ 		return -1;
+ 	}
+ 
+-	list_add_rcu(&f->list, kmmio_page_list(f->page));
++	list_add_rcu(&f->list, kmmio_page_list(f->addr));
+ 
+ 	return 0;
+ }
+ 
+ /* You must be holding kmmio_lock. */
+-static void release_kmmio_fault_page(unsigned long page,
++static void release_kmmio_fault_page(unsigned long addr,
+ 				struct kmmio_fault_page **release_list)
+ {
+ 	struct kmmio_fault_page *f;
+ 
+-	page &= PAGE_MASK;
+-	f = get_kmmio_fault_page(page);
++	f = get_kmmio_fault_page(addr);
+ 	if (!f)
+ 		return;
+ 
+@@ -420,18 +435,27 @@ int register_kmmio_probe(struct kmmio_probe *p)
+ 	int ret = 0;
+ 	unsigned long size = 0;
+ 	const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK);
++	unsigned int l;
++	pte_t *pte;
+ 
+ 	spin_lock_irqsave(&kmmio_lock, flags);
+ 	if (get_kmmio_probe(p->addr)) {
+ 		ret = -EEXIST;
+ 		goto out;
+ 	}
++
++	pte = lookup_address(p->addr, &l);
++	if (!pte) {
++		ret = -EINVAL;
++		goto out;
++	}
++
+ 	kmmio_count++;
+ 	list_add_rcu(&p->list, &kmmio_probes);
+ 	while (size < size_lim) {
+ 		if (add_kmmio_fault_page(p->addr + size))
+ 			pr_err("Unable to set page fault.\n");
+-		size += PAGE_SIZE;
++		size += page_level_size(l);
+ 	}
+ out:
+ 	spin_unlock_irqrestore(&kmmio_lock, flags);
+@@ -506,11 +530,17 @@ void unregister_kmmio_probe(struct kmmio_probe *p)
+ 	const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK);
+ 	struct kmmio_fault_page *release_list = NULL;
+ 	struct kmmio_delayed_release *drelease;
++	unsigned int l;
++	pte_t *pte;
++
++	pte = lookup_address(p->addr, &l);
++	if (!pte)
++		return;
+ 
+ 	spin_lock_irqsave(&kmmio_lock, flags);
+ 	while (size < size_lim) {
+ 		release_kmmio_fault_page(p->addr + size, &release_list);
+-		size += PAGE_SIZE;
++		size += page_level_size(l);
+ 	}
+ 	list_del_rcu(&p->list);
+ 	kmmio_count--;
+diff --git a/block/partition-generic.c b/block/partition-generic.c
+index fefd01b496a0..cfcfe1b0ecbc 100644
+--- a/block/partition-generic.c
++++ b/block/partition-generic.c
+@@ -350,15 +350,20 @@ struct hd_struct *add_partition(struct gendisk *disk, int partno,
+ 			goto out_del;
+ 	}
+ 
++	err = hd_ref_init(p);
++	if (err) {
++		if (flags & ADDPART_FLAG_WHOLEDISK)
++			goto out_remove_file;
++		goto out_del;
++	}
++
+ 	/* everything is up and running, commence */
+ 	rcu_assign_pointer(ptbl->part[partno], p);
+ 
+ 	/* suppress uevent if the disk suppresses it */
+ 	if (!dev_get_uevent_suppress(ddev))
+ 		kobject_uevent(&pdev->kobj, KOBJ_ADD);
+-
+-	if (!hd_ref_init(p))
+-		return p;
++	return p;
+ 
+ out_free_info:
+ 	free_part_info(p);
+@@ -367,6 +372,8 @@ out_free_stats:
+ out_free:
+ 	kfree(p);
+ 	return ERR_PTR(err);
++out_remove_file:
++	device_remove_file(pdev, &dev_attr_whole_disk);
+ out_del:
+ 	kobject_put(p->holder_dir);
+ 	device_del(pdev);
+diff --git a/crypto/rsa-pkcs1pad.c b/crypto/rsa-pkcs1pad.c
+index 50f5c97e1087..0cbc5a5025c2 100644
+--- a/crypto/rsa-pkcs1pad.c
++++ b/crypto/rsa-pkcs1pad.c
+@@ -310,16 +310,16 @@ static int pkcs1pad_decrypt(struct akcipher_request *req)
+ 	req_ctx->child_req.src = req->src;
+ 	req_ctx->child_req.src_len = req->src_len;
+ 	req_ctx->child_req.dst = req_ctx->out_sg;
+-	req_ctx->child_req.dst_len = ctx->key_size - 1;
++	req_ctx->child_req.dst_len = ctx->key_size ;
+ 
+-	req_ctx->out_buf = kmalloc(ctx->key_size - 1,
++	req_ctx->out_buf = kmalloc(ctx->key_size,
+ 			(req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ 			GFP_KERNEL : GFP_ATOMIC);
+ 	if (!req_ctx->out_buf)
+ 		return -ENOMEM;
+ 
+ 	pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf,
+-			ctx->key_size - 1, NULL);
++			    ctx->key_size, NULL);
+ 
+ 	akcipher_request_set_tfm(&req_ctx->child_req, ctx->child);
+ 	akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
+@@ -491,16 +491,16 @@ static int pkcs1pad_verify(struct akcipher_request *req)
+ 	req_ctx->child_req.src = req->src;
+ 	req_ctx->child_req.src_len = req->src_len;
+ 	req_ctx->child_req.dst = req_ctx->out_sg;
+-	req_ctx->child_req.dst_len = ctx->key_size - 1;
++	req_ctx->child_req.dst_len = ctx->key_size;
+ 
+-	req_ctx->out_buf = kmalloc(ctx->key_size - 1,
++	req_ctx->out_buf = kmalloc(ctx->key_size,
+ 			(req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ 			GFP_KERNEL : GFP_ATOMIC);
+ 	if (!req_ctx->out_buf)
+ 		return -ENOMEM;
+ 
+ 	pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf,
+-			ctx->key_size - 1, NULL);
++			    ctx->key_size, NULL);
+ 
+ 	akcipher_request_set_tfm(&req_ctx->child_req, ctx->child);
+ 	akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
+diff --git a/drivers/acpi/acpica/nsinit.c b/drivers/acpi/acpica/nsinit.c
+index bd75d46234a4..ddb436f86415 100644
+--- a/drivers/acpi/acpica/nsinit.c
++++ b/drivers/acpi/acpica/nsinit.c
+@@ -83,6 +83,8 @@ acpi_status acpi_ns_initialize_objects(void)
+ 
+ 	ACPI_FUNCTION_TRACE(ns_initialize_objects);
+ 
++	ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
++			  "[Init] Completing Initialization of ACPI Objects\n"));
+ 	ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
+ 			  "**** Starting initialization of namespace objects ****\n"));
+ 	ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT,
+diff --git a/drivers/acpi/acpica/tbxfload.c b/drivers/acpi/acpica/tbxfload.c
+index 278666e39563..c37d47982fbe 100644
+--- a/drivers/acpi/acpica/tbxfload.c
++++ b/drivers/acpi/acpica/tbxfload.c
+@@ -83,6 +83,20 @@ acpi_status __init acpi_load_tables(void)
+ 				"While loading namespace from ACPI tables"));
+ 	}
+ 
++	if (!acpi_gbl_group_module_level_code) {
++		/*
++		 * Initialize the objects that remain uninitialized. This
++		 * runs the executable AML that may be part of the
++		 * declaration of these objects:
++		 * operation_regions, buffer_fields, Buffers, and Packages.
++		 */
++		status = acpi_ns_initialize_objects();
++		if (ACPI_FAILURE(status)) {
++			return_ACPI_STATUS(status);
++		}
++	}
++
++	acpi_gbl_reg_methods_enabled = TRUE;
+ 	return_ACPI_STATUS(status);
+ }
+ 
+diff --git a/drivers/acpi/acpica/utxfinit.c b/drivers/acpi/acpica/utxfinit.c
+index 721b87cce908..638fbd4ad72b 100644
+--- a/drivers/acpi/acpica/utxfinit.c
++++ b/drivers/acpi/acpica/utxfinit.c
+@@ -267,7 +267,6 @@ acpi_status __init acpi_initialize_objects(u32 flags)
+ 	 * initialized, even if they contain executable AML (see the call to
+ 	 * acpi_ns_initialize_objects below).
+ 	 */
+-	acpi_gbl_reg_methods_enabled = TRUE;
+ 	if (!(flags & ACPI_NO_ADDRESS_SPACE_INIT)) {
+ 		ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+ 				  "[Init] Executing _REG OpRegion methods\n"));
+@@ -299,20 +298,18 @@ acpi_status __init acpi_initialize_objects(u32 flags)
+ 	 */
+ 	if (acpi_gbl_group_module_level_code) {
+ 		acpi_ns_exec_module_code_list();
+-	}
+ 
+-	/*
+-	 * Initialize the objects that remain uninitialized. This runs the
+-	 * executable AML that may be part of the declaration of these objects:
+-	 * operation_regions, buffer_fields, Buffers, and Packages.
+-	 */
+-	if (!(flags & ACPI_NO_OBJECT_INIT)) {
+-		ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+-				  "[Init] Completing Initialization of ACPI Objects\n"));
+-
+-		status = acpi_ns_initialize_objects();
+-		if (ACPI_FAILURE(status)) {
+-			return_ACPI_STATUS(status);
++		/*
++		 * Initialize the objects that remain uninitialized. This
++		 * runs the executable AML that may be part of the
++		 * declaration of these objects:
++		 * operation_regions, buffer_fields, Buffers, and Packages.
++		 */
++		if (!(flags & ACPI_NO_OBJECT_INIT)) {
++			status = acpi_ns_initialize_objects();
++			if (ACPI_FAILURE(status)) {
++				return_ACPI_STATUS(status);
++			}
+ 		}
+ 	}
+ 
+diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
+index 301b785f9f56..0caf92ae25f3 100644
+--- a/drivers/base/power/domain.c
++++ b/drivers/base/power/domain.c
+@@ -1378,7 +1378,7 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
+ 	mutex_lock(&subdomain->lock);
+ 	mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
+ 
+-	if (!list_empty(&subdomain->slave_links) || subdomain->device_count) {
++	if (!list_empty(&subdomain->master_links) || subdomain->device_count) {
+ 		pr_warn("%s: unable to remove subdomain %s\n", genpd->name,
+ 			subdomain->name);
+ 		ret = -EBUSY;
+diff --git a/drivers/base/power/opp/core.c b/drivers/base/power/opp/core.c
+index cf351d3dab1c..0708f301ad97 100644
+--- a/drivers/base/power/opp/core.c
++++ b/drivers/base/power/opp/core.c
+@@ -844,8 +844,14 @@ static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev,
+ 	}
+ 
+ 	opp->u_volt = microvolt[0];
+-	opp->u_volt_min = microvolt[1];
+-	opp->u_volt_max = microvolt[2];
++
++	if (count == 1) {
++		opp->u_volt_min = opp->u_volt;
++		opp->u_volt_max = opp->u_volt;
++	} else {
++		opp->u_volt_min = microvolt[1];
++		opp->u_volt_max = microvolt[2];
++	}
+ 
+ 	/* Search for "opp-microamp-<name>" */
+ 	prop = NULL;
+diff --git a/drivers/block/loop.c b/drivers/block/loop.c
+index 423f4ca7d712..80cf8add46ff 100644
+--- a/drivers/block/loop.c
++++ b/drivers/block/loop.c
+@@ -488,6 +488,12 @@ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd,
+ 	bvec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
+ 	iov_iter_bvec(&iter, ITER_BVEC | rw, bvec,
+ 		      bio_segments(bio), blk_rq_bytes(cmd->rq));
++	/*
++	 * This bio may be started from the middle of the 'bvec'
++	 * because of bio splitting, so offset from the bvec must
++	 * be passed to iov iterator
++	 */
++	iter.iov_offset = bio->bi_iter.bi_bvec_done;
+ 
+ 	cmd->iocb.ki_pos = pos;
+ 	cmd->iocb.ki_filp = file;
+diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c
+index 562b5a4ca7b7..78a39f736c64 100644
+--- a/drivers/block/paride/pd.c
++++ b/drivers/block/paride/pd.c
+@@ -126,7 +126,7 @@
+ */
+ #include <linux/types.h>
+ 
+-static bool verbose = 0;
++static int verbose = 0;
+ static int major = PD_MAJOR;
+ static char *name = PD_NAME;
+ static int cluster = 64;
+@@ -161,7 +161,7 @@ enum {D_PRT, D_PRO, D_UNI, D_MOD, D_GEO, D_SBY, D_DLY, D_SLV};
+ static DEFINE_MUTEX(pd_mutex);
+ static DEFINE_SPINLOCK(pd_lock);
+ 
+-module_param(verbose, bool, 0);
++module_param(verbose, int, 0);
+ module_param(major, int, 0);
+ module_param(name, charp, 0);
+ module_param(cluster, int, 0);
+diff --git a/drivers/block/paride/pt.c b/drivers/block/paride/pt.c
+index 1740d75e8a32..216a94fed5b4 100644
+--- a/drivers/block/paride/pt.c
++++ b/drivers/block/paride/pt.c
+@@ -117,7 +117,7 @@
+ 
+ */
+ 
+-static bool verbose = 0;
++static int verbose = 0;
+ static int major = PT_MAJOR;
+ static char *name = PT_NAME;
+ static int disable = 0;
+@@ -152,7 +152,7 @@ static int (*drives[4])[6] = {&drive0, &drive1, &drive2, &drive3};
+ 
+ #include <asm/uaccess.h>
+ 
+-module_param(verbose, bool, 0);
++module_param(verbose, int, 0);
+ module_param(major, int, 0);
+ module_param(name, charp, 0);
+ module_param_array(drive0, int, NULL, 0);
+diff --git a/drivers/bus/imx-weim.c b/drivers/bus/imx-weim.c
+index e98d15eaa799..1827fc4d15c1 100644
+--- a/drivers/bus/imx-weim.c
++++ b/drivers/bus/imx-weim.c
+@@ -150,7 +150,7 @@ static int __init weim_parse_dt(struct platform_device *pdev,
+ 			return ret;
+ 	}
+ 
+-	for_each_child_of_node(pdev->dev.of_node, child) {
++	for_each_available_child_of_node(pdev->dev.of_node, child) {
+ 		if (!child->name)
+ 			continue;
+ 
+diff --git a/drivers/bus/uniphier-system-bus.c b/drivers/bus/uniphier-system-bus.c
+index 834a2aeaf27a..350b7309c26d 100644
+--- a/drivers/bus/uniphier-system-bus.c
++++ b/drivers/bus/uniphier-system-bus.c
+@@ -108,7 +108,7 @@ static int uniphier_system_bus_check_overlap(
+ 
+ 	for (i = 0; i < ARRAY_SIZE(priv->bank); i++) {
+ 		for (j = i + 1; j < ARRAY_SIZE(priv->bank); j++) {
+-			if (priv->bank[i].end > priv->bank[j].base ||
++			if (priv->bank[i].end > priv->bank[j].base &&
+ 			    priv->bank[i].base < priv->bank[j].end) {
+ 				dev_err(priv->dev,
+ 					"region overlap between bank%d and bank%d\n",
+diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
+index 45a634016f95..b28e4da3d2cf 100644
+--- a/drivers/char/tpm/tpm2-cmd.c
++++ b/drivers/char/tpm/tpm2-cmd.c
+@@ -20,7 +20,11 @@
+ #include <keys/trusted-type.h>
+ 
+ enum tpm2_object_attributes {
+-	TPM2_ATTR_USER_WITH_AUTH	= BIT(6),
++	TPM2_OA_USER_WITH_AUTH		= BIT(6),
++};
++
++enum tpm2_session_attributes {
++	TPM2_SA_CONTINUE_SESSION	= BIT(0),
+ };
+ 
+ struct tpm2_startup_in {
+@@ -478,22 +482,18 @@ int tpm2_seal_trusted(struct tpm_chip *chip,
+ 	tpm_buf_append_u8(&buf, payload->migratable);
+ 
+ 	/* public */
+-	if (options->policydigest)
+-		tpm_buf_append_u16(&buf, 14 + options->digest_len);
+-	else
+-		tpm_buf_append_u16(&buf, 14);
+-
++	tpm_buf_append_u16(&buf, 14 + options->policydigest_len);
+ 	tpm_buf_append_u16(&buf, TPM2_ALG_KEYEDHASH);
+ 	tpm_buf_append_u16(&buf, hash);
+ 
+ 	/* policy */
+-	if (options->policydigest) {
++	if (options->policydigest_len) {
+ 		tpm_buf_append_u32(&buf, 0);
+-		tpm_buf_append_u16(&buf, options->digest_len);
++		tpm_buf_append_u16(&buf, options->policydigest_len);
+ 		tpm_buf_append(&buf, options->policydigest,
+-			       options->digest_len);
++			       options->policydigest_len);
+ 	} else {
+-		tpm_buf_append_u32(&buf, TPM2_ATTR_USER_WITH_AUTH);
++		tpm_buf_append_u32(&buf, TPM2_OA_USER_WITH_AUTH);
+ 		tpm_buf_append_u16(&buf, 0);
+ 	}
+ 
+@@ -631,7 +631,7 @@ static int tpm2_unseal(struct tpm_chip *chip,
+ 			     options->policyhandle ?
+ 			     options->policyhandle : TPM2_RS_PW,
+ 			     NULL /* nonce */, 0,
+-			     0 /* session_attributes */,
++			     TPM2_SA_CONTINUE_SESSION,
+ 			     options->blobauth /* hmac */,
+ 			     TPM_DIGEST_SIZE);
+ 
+diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
+index cd83d477e32d..e89512383c3c 100644
+--- a/drivers/cpufreq/intel_pstate.c
++++ b/drivers/cpufreq/intel_pstate.c
+@@ -673,6 +673,11 @@ static int core_get_max_pstate(void)
+ 			if (err)
+ 				goto skip_tar;
+ 
++			/* For level 1 and 2, bits[23:16] contain the ratio */
++			if (tdp_ctrl)
++				tdp_ratio >>= 16;
++
++			tdp_ratio &= 0xff; /* ratios are only 8 bits long */
+ 			if (tdp_ratio - 1 == tar) {
+ 				max_pstate = tar;
+ 				pr_debug("max_pstate=TAC %x\n", max_pstate);
+diff --git a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
+index 3d9acc53d247..60fc0fa26fd3 100644
+--- a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
++++ b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
+@@ -225,6 +225,9 @@ static int ccp_aes_cmac_export(struct ahash_request *req, void *out)
+ 	struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req);
+ 	struct ccp_aes_cmac_exp_ctx state;
+ 
++	/* Don't let anything leak to 'out' */
++	memset(&state, 0, sizeof(state));
++
+ 	state.null_msg = rctx->null_msg;
+ 	memcpy(state.iv, rctx->iv, sizeof(state.iv));
+ 	state.buf_count = rctx->buf_count;
+diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c b/drivers/crypto/ccp/ccp-crypto-sha.c
+index 8ef06fad8b14..ab9945f2cb7a 100644
+--- a/drivers/crypto/ccp/ccp-crypto-sha.c
++++ b/drivers/crypto/ccp/ccp-crypto-sha.c
+@@ -212,6 +212,9 @@ static int ccp_sha_export(struct ahash_request *req, void *out)
+ 	struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req);
+ 	struct ccp_sha_exp_ctx state;
+ 
++	/* Don't let anything leak to 'out' */
++	memset(&state, 0, sizeof(state));
++
+ 	state.type = rctx->type;
+ 	state.msg_bits = rctx->msg_bits;
+ 	state.first = rctx->first;
+diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
+index a0d4a08313ae..aae05547b924 100644
+--- a/drivers/crypto/talitos.c
++++ b/drivers/crypto/talitos.c
+@@ -63,6 +63,14 @@ static void to_talitos_ptr(struct talitos_ptr *ptr, dma_addr_t dma_addr,
+ 		ptr->eptr = upper_32_bits(dma_addr);
+ }
+ 
++static void copy_talitos_ptr(struct talitos_ptr *dst_ptr,
++			     struct talitos_ptr *src_ptr, bool is_sec1)
++{
++	dst_ptr->ptr = src_ptr->ptr;
++	if (!is_sec1)
++		dst_ptr->eptr = src_ptr->eptr;
++}
++
+ static void to_talitos_ptr_len(struct talitos_ptr *ptr, unsigned int len,
+ 			       bool is_sec1)
+ {
+@@ -1083,21 +1091,20 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
+ 	sg_count = dma_map_sg(dev, areq->src, edesc->src_nents ?: 1,
+ 			      (areq->src == areq->dst) ? DMA_BIDIRECTIONAL
+ 							   : DMA_TO_DEVICE);
+-
+ 	/* hmac data */
+ 	desc->ptr[1].len = cpu_to_be16(areq->assoclen);
+ 	if (sg_count > 1 &&
+ 	    (ret = sg_to_link_tbl_offset(areq->src, sg_count, 0,
+ 					 areq->assoclen,
+ 					 &edesc->link_tbl[tbl_off])) > 1) {
+-		tbl_off += ret;
+-
+ 		to_talitos_ptr(&desc->ptr[1], edesc->dma_link_tbl + tbl_off *
+ 			       sizeof(struct talitos_ptr), 0);
+ 		desc->ptr[1].j_extent = DESC_PTR_LNKTBL_JUMP;
+ 
+ 		dma_sync_single_for_device(dev, edesc->dma_link_tbl,
+ 					   edesc->dma_len, DMA_BIDIRECTIONAL);
++
++		tbl_off += ret;
+ 	} else {
+ 		to_talitos_ptr(&desc->ptr[1], sg_dma_address(areq->src), 0);
+ 		desc->ptr[1].j_extent = 0;
+@@ -1126,11 +1133,13 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
+ 	if (edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV)
+ 		sg_link_tbl_len += authsize;
+ 
+-	if (sg_count > 1 &&
+-	    (ret = sg_to_link_tbl_offset(areq->src, sg_count, areq->assoclen,
+-					 sg_link_tbl_len,
+-					 &edesc->link_tbl[tbl_off])) > 1) {
+-		tbl_off += ret;
++	if (sg_count == 1) {
++		to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src) +
++			       areq->assoclen, 0);
++	} else if ((ret = sg_to_link_tbl_offset(areq->src, sg_count,
++						areq->assoclen, sg_link_tbl_len,
++						&edesc->link_tbl[tbl_off])) >
++		   1) {
+ 		desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
+ 		to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl +
+ 					      tbl_off *
+@@ -1138,8 +1147,10 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
+ 		dma_sync_single_for_device(dev, edesc->dma_link_tbl,
+ 					   edesc->dma_len,
+ 					   DMA_BIDIRECTIONAL);
+-	} else
+-		to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src), 0);
++		tbl_off += ret;
++	} else {
++		copy_talitos_ptr(&desc->ptr[4], &edesc->link_tbl[tbl_off], 0);
++	}
+ 
+ 	/* cipher out */
+ 	desc->ptr[5].len = cpu_to_be16(cryptlen);
+@@ -1151,11 +1162,13 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
+ 
+ 	edesc->icv_ool = false;
+ 
+-	if (sg_count > 1 &&
+-	    (sg_count = sg_to_link_tbl_offset(areq->dst, sg_count,
++	if (sg_count == 1) {
++		to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst) +
++			       areq->assoclen, 0);
++	} else if ((sg_count =
++			sg_to_link_tbl_offset(areq->dst, sg_count,
+ 					      areq->assoclen, cryptlen,
+-					      &edesc->link_tbl[tbl_off])) >
+-	    1) {
++					      &edesc->link_tbl[tbl_off])) > 1) {
+ 		struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
+ 
+ 		to_talitos_ptr(&desc->ptr[5], edesc->dma_link_tbl +
+@@ -1178,8 +1191,9 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
+ 					   edesc->dma_len, DMA_BIDIRECTIONAL);
+ 
+ 		edesc->icv_ool = true;
+-	} else
+-		to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst), 0);
++	} else {
++		copy_talitos_ptr(&desc->ptr[5], &edesc->link_tbl[tbl_off], 0);
++	}
+ 
+ 	/* iv out */
+ 	map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv,
+@@ -2629,21 +2643,11 @@ struct talitos_crypto_alg {
+ 	struct talitos_alg_template algt;
+ };
+ 
+-static int talitos_cra_init(struct crypto_tfm *tfm)
++static int talitos_init_common(struct talitos_ctx *ctx,
++			       struct talitos_crypto_alg *talitos_alg)
+ {
+-	struct crypto_alg *alg = tfm->__crt_alg;
+-	struct talitos_crypto_alg *talitos_alg;
+-	struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
+ 	struct talitos_private *priv;
+ 
+-	if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
+-		talitos_alg = container_of(__crypto_ahash_alg(alg),
+-					   struct talitos_crypto_alg,
+-					   algt.alg.hash);
+-	else
+-		talitos_alg = container_of(alg, struct talitos_crypto_alg,
+-					   algt.alg.crypto);
+-
+ 	/* update context with ptr to dev */
+ 	ctx->dev = talitos_alg->dev;
+ 
+@@ -2661,10 +2665,33 @@ static int talitos_cra_init(struct crypto_tfm *tfm)
+ 	return 0;
+ }
+ 
++static int talitos_cra_init(struct crypto_tfm *tfm)
++{
++	struct crypto_alg *alg = tfm->__crt_alg;
++	struct talitos_crypto_alg *talitos_alg;
++	struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
++
++	if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
++		talitos_alg = container_of(__crypto_ahash_alg(alg),
++					   struct talitos_crypto_alg,
++					   algt.alg.hash);
++	else
++		talitos_alg = container_of(alg, struct talitos_crypto_alg,
++					   algt.alg.crypto);
++
++	return talitos_init_common(ctx, talitos_alg);
++}
++
+ static int talitos_cra_init_aead(struct crypto_aead *tfm)
+ {
+-	talitos_cra_init(crypto_aead_tfm(tfm));
+-	return 0;
++	struct aead_alg *alg = crypto_aead_alg(tfm);
++	struct talitos_crypto_alg *talitos_alg;
++	struct talitos_ctx *ctx = crypto_aead_ctx(tfm);
++
++	talitos_alg = container_of(alg, struct talitos_crypto_alg,
++				   algt.alg.aead);
++
++	return talitos_init_common(ctx, talitos_alg);
+ }
+ 
+ static int talitos_cra_init_ahash(struct crypto_tfm *tfm)
+diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
+index 5ad0ec1f0e29..97199b3c25a2 100644
+--- a/drivers/dma/dw/core.c
++++ b/drivers/dma/dw/core.c
+@@ -130,26 +130,14 @@ static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
+ static void dwc_initialize(struct dw_dma_chan *dwc)
+ {
+ 	struct dw_dma *dw = to_dw_dma(dwc->chan.device);
+-	struct dw_dma_slave *dws = dwc->chan.private;
+ 	u32 cfghi = DWC_CFGH_FIFO_MODE;
+ 	u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority);
+ 
+ 	if (dwc->initialized == true)
+ 		return;
+ 
+-	if (dws) {
+-		/*
+-		 * We need controller-specific data to set up slave
+-		 * transfers.
+-		 */
+-		BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev);
+-
+-		cfghi |= DWC_CFGH_DST_PER(dws->dst_id);
+-		cfghi |= DWC_CFGH_SRC_PER(dws->src_id);
+-	} else {
+-		cfghi |= DWC_CFGH_DST_PER(dwc->dst_id);
+-		cfghi |= DWC_CFGH_SRC_PER(dwc->src_id);
+-	}
++	cfghi |= DWC_CFGH_DST_PER(dwc->dst_id);
++	cfghi |= DWC_CFGH_SRC_PER(dwc->src_id);
+ 
+ 	channel_writel(dwc, CFG_LO, cfglo);
+ 	channel_writel(dwc, CFG_HI, cfghi);
+@@ -941,7 +929,7 @@ bool dw_dma_filter(struct dma_chan *chan, void *param)
+ 	struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
+ 	struct dw_dma_slave *dws = param;
+ 
+-	if (!dws || dws->dma_dev != chan->device->dev)
++	if (dws->dma_dev != chan->device->dev)
+ 		return false;
+ 
+ 	/* We have to copy data since dws can be temporary storage */
+@@ -1165,6 +1153,14 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
+ 	 * doesn't mean what you think it means), and status writeback.
+ 	 */
+ 
++	/*
++	 * We need controller-specific data to set up slave transfers.
++	 */
++	if (chan->private && !dw_dma_filter(chan, chan->private)) {
++		dev_warn(chan2dev(chan), "Wrong controller-specific data\n");
++		return -EINVAL;
++	}
++
+ 	/* Enable controller here if needed */
+ 	if (!dw->in_use)
+ 		dw_dma_on(dw);
+@@ -1226,6 +1222,14 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
+ 	spin_lock_irqsave(&dwc->lock, flags);
+ 	list_splice_init(&dwc->free_list, &list);
+ 	dwc->descs_allocated = 0;
++
++	/* Clear custom channel configuration */
++	dwc->src_id = 0;
++	dwc->dst_id = 0;
++
++	dwc->src_master = 0;
++	dwc->dst_master = 0;
++
+ 	dwc->initialized = false;
+ 
+ 	/* Disable interrupts */
+diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
+index e3d7fcb69b4c..2dac314a2d7a 100644
+--- a/drivers/dma/edma.c
++++ b/drivers/dma/edma.c
+@@ -1563,32 +1563,6 @@ static irqreturn_t dma_ccerr_handler(int irq, void *data)
+ 	return IRQ_HANDLED;
+ }
+ 
+-static void edma_tc_set_pm_state(struct edma_tc *tc, bool enable)
+-{
+-	struct platform_device *tc_pdev;
+-	int ret;
+-
+-	if (!IS_ENABLED(CONFIG_OF) || !tc)
+-		return;
+-
+-	tc_pdev = of_find_device_by_node(tc->node);
+-	if (!tc_pdev) {
+-		pr_err("%s: TPTC device is not found\n", __func__);
+-		return;
+-	}
+-	if (!pm_runtime_enabled(&tc_pdev->dev))
+-		pm_runtime_enable(&tc_pdev->dev);
+-
+-	if (enable)
+-		ret = pm_runtime_get_sync(&tc_pdev->dev);
+-	else
+-		ret = pm_runtime_put_sync(&tc_pdev->dev);
+-
+-	if (ret < 0)
+-		pr_err("%s: pm_runtime_%s_sync() failed for %s\n", __func__,
+-		       enable ? "get" : "put", dev_name(&tc_pdev->dev));
+-}
+-
+ /* Alloc channel resources */
+ static int edma_alloc_chan_resources(struct dma_chan *chan)
+ {
+@@ -1625,8 +1599,6 @@ static int edma_alloc_chan_resources(struct dma_chan *chan)
+ 		EDMA_CHAN_SLOT(echan->ch_num), chan->chan_id,
+ 		echan->hw_triggered ? "HW" : "SW");
+ 
+-	edma_tc_set_pm_state(echan->tc, true);
+-
+ 	return 0;
+ 
+ err_slot:
+@@ -1663,7 +1635,6 @@ static void edma_free_chan_resources(struct dma_chan *chan)
+ 		echan->alloced = false;
+ 	}
+ 
+-	edma_tc_set_pm_state(echan->tc, false);
+ 	echan->tc = NULL;
+ 	echan->hw_triggered = false;
+ 
+@@ -2408,10 +2379,8 @@ static int edma_pm_suspend(struct device *dev)
+ 	int i;
+ 
+ 	for (i = 0; i < ecc->num_channels; i++) {
+-		if (echan[i].alloced) {
++		if (echan[i].alloced)
+ 			edma_setup_interrupt(&echan[i], false);
+-			edma_tc_set_pm_state(echan[i].tc, false);
+-		}
+ 	}
+ 
+ 	return 0;
+@@ -2441,8 +2410,6 @@ static int edma_pm_resume(struct device *dev)
+ 
+ 			/* Set up channel -> slot mapping for the entry slot */
+ 			edma_set_chmap(&echan[i], echan[i].slot[0]);
+-
+-			edma_tc_set_pm_state(echan[i].tc, true);
+ 		}
+ 	}
+ 
+@@ -2466,7 +2433,8 @@ static struct platform_driver edma_driver = {
+ 
+ static int edma_tptc_probe(struct platform_device *pdev)
+ {
+-	return 0;
++	pm_runtime_enable(&pdev->dev);
++	return pm_runtime_get_sync(&pdev->dev);
+ }
+ 
+ static struct platform_driver edma_tptc_driver = {
+diff --git a/drivers/dma/hsu/hsu.c b/drivers/dma/hsu/hsu.c
+index eef145edb936..025d375fc3d7 100644
+--- a/drivers/dma/hsu/hsu.c
++++ b/drivers/dma/hsu/hsu.c
+@@ -135,7 +135,7 @@ static u32 hsu_dma_chan_get_sr(struct hsu_dma_chan *hsuc)
+ 	sr = hsu_chan_readl(hsuc, HSU_CH_SR);
+ 	spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
+ 
+-	return sr;
++	return sr & ~(HSU_CH_SR_DESCE_ANY | HSU_CH_SR_CDESC_ANY);
+ }
+ 
+ irqreturn_t hsu_dma_irq(struct hsu_dma_chip *chip, unsigned short nr)
+@@ -254,10 +254,13 @@ static void hsu_dma_issue_pending(struct dma_chan *chan)
+ static size_t hsu_dma_active_desc_size(struct hsu_dma_chan *hsuc)
+ {
+ 	struct hsu_dma_desc *desc = hsuc->desc;
+-	size_t bytes = desc->length;
++	size_t bytes = 0;
+ 	int i;
+ 
+-	i = desc->active % HSU_DMA_CHAN_NR_DESC;
++	for (i = desc->active; i < desc->nents; i++)
++		bytes += desc->sg[i].len;
++
++	i = HSU_DMA_CHAN_NR_DESC - 1;
+ 	do {
+ 		bytes += hsu_chan_readl(hsuc, HSU_CH_DxTSR(i));
+ 	} while (--i >= 0);
+diff --git a/drivers/dma/hsu/hsu.h b/drivers/dma/hsu/hsu.h
+index 578a8ee8cd05..6b070c22b1df 100644
+--- a/drivers/dma/hsu/hsu.h
++++ b/drivers/dma/hsu/hsu.h
+@@ -41,6 +41,9 @@
+ #define HSU_CH_SR_DESCTO(x)	BIT(8 + (x))
+ #define HSU_CH_SR_DESCTO_ANY	(BIT(11) | BIT(10) | BIT(9) | BIT(8))
+ #define HSU_CH_SR_CHE		BIT(15)
++#define HSU_CH_SR_DESCE(x)	BIT(16 + (x))
++#define HSU_CH_SR_DESCE_ANY	(BIT(19) | BIT(18) | BIT(17) | BIT(16))
++#define HSU_CH_SR_CDESC_ANY	(BIT(31) | BIT(30))
+ 
+ /* Bits in HSU_CH_CR */
+ #define HSU_CH_CR_CHA		BIT(0)
+diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c
+index 9794b073d7d7..a5ed9407c51b 100644
+--- a/drivers/dma/omap-dma.c
++++ b/drivers/dma/omap-dma.c
+@@ -48,6 +48,7 @@ struct omap_chan {
+ 	unsigned dma_sig;
+ 	bool cyclic;
+ 	bool paused;
++	bool running;
+ 
+ 	int dma_ch;
+ 	struct omap_desc *desc;
+@@ -294,6 +295,8 @@ static void omap_dma_start(struct omap_chan *c, struct omap_desc *d)
+ 
+ 	/* Enable channel */
+ 	omap_dma_chan_write(c, CCR, d->ccr | CCR_ENABLE);
++
++	c->running = true;
+ }
+ 
+ static void omap_dma_stop(struct omap_chan *c)
+@@ -355,6 +358,8 @@ static void omap_dma_stop(struct omap_chan *c)
+ 
+ 		omap_dma_chan_write(c, CLNK_CTRL, val);
+ 	}
++
++	c->running = false;
+ }
+ 
+ static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d,
+@@ -673,15 +678,20 @@ static enum dma_status omap_dma_tx_status(struct dma_chan *chan,
+ 	struct omap_chan *c = to_omap_dma_chan(chan);
+ 	struct virt_dma_desc *vd;
+ 	enum dma_status ret;
+-	uint32_t ccr;
+ 	unsigned long flags;
+ 
+-	ccr = omap_dma_chan_read(c, CCR);
+-	/* The channel is no longer active, handle the completion right away */
+-	if (!(ccr & CCR_ENABLE))
+-		omap_dma_callback(c->dma_ch, 0, c);
+-
+ 	ret = dma_cookie_status(chan, cookie, txstate);
++
++	if (!c->paused && c->running) {
++		uint32_t ccr = omap_dma_chan_read(c, CCR);
++		/*
++		 * The channel is no longer active, set the return value
++		 * accordingly
++		 */
++		if (!(ccr & CCR_ENABLE))
++			ret = DMA_COMPLETE;
++	}
++
+ 	if (ret == DMA_COMPLETE || !txstate)
+ 		return ret;
+ 
+diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c
+index debca824bed6..77c1c44009d8 100644
+--- a/drivers/dma/pxa_dma.c
++++ b/drivers/dma/pxa_dma.c
+@@ -122,6 +122,7 @@ struct pxad_chan {
+ struct pxad_device {
+ 	struct dma_device		slave;
+ 	int				nr_chans;
++	int				nr_requestors;
+ 	void __iomem			*base;
+ 	struct pxad_phy			*phys;
+ 	spinlock_t			phy_lock;	/* Phy association */
+@@ -473,7 +474,7 @@ static void pxad_free_phy(struct pxad_chan *chan)
+ 		return;
+ 
+ 	/* clear the channel mapping in DRCMR */
+-	if (chan->drcmr <= DRCMR_CHLNUM) {
++	if (chan->drcmr <= pdev->nr_requestors) {
+ 		reg = pxad_drcmr(chan->drcmr);
+ 		writel_relaxed(0, chan->phy->base + reg);
+ 	}
+@@ -509,6 +510,7 @@ static bool is_running_chan_misaligned(struct pxad_chan *chan)
+ 
+ static void phy_enable(struct pxad_phy *phy, bool misaligned)
+ {
++	struct pxad_device *pdev;
+ 	u32 reg, dalgn;
+ 
+ 	if (!phy->vchan)
+@@ -518,7 +520,8 @@ static void phy_enable(struct pxad_phy *phy, bool misaligned)
+ 		"%s(); phy=%p(%d) misaligned=%d\n", __func__,
+ 		phy, phy->idx, misaligned);
+ 
+-	if (phy->vchan->drcmr <= DRCMR_CHLNUM) {
++	pdev = to_pxad_dev(phy->vchan->vc.chan.device);
++	if (phy->vchan->drcmr <= pdev->nr_requestors) {
+ 		reg = pxad_drcmr(phy->vchan->drcmr);
+ 		writel_relaxed(DRCMR_MAPVLD | phy->idx, phy->base + reg);
+ 	}
+@@ -914,6 +917,7 @@ static void pxad_get_config(struct pxad_chan *chan,
+ {
+ 	u32 maxburst = 0, dev_addr = 0;
+ 	enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
++	struct pxad_device *pdev = to_pxad_dev(chan->vc.chan.device);
+ 
+ 	*dcmd = 0;
+ 	if (dir == DMA_DEV_TO_MEM) {
+@@ -922,7 +926,7 @@ static void pxad_get_config(struct pxad_chan *chan,
+ 		dev_addr = chan->cfg.src_addr;
+ 		*dev_src = dev_addr;
+ 		*dcmd |= PXA_DCMD_INCTRGADDR;
+-		if (chan->drcmr <= DRCMR_CHLNUM)
++		if (chan->drcmr <= pdev->nr_requestors)
+ 			*dcmd |= PXA_DCMD_FLOWSRC;
+ 	}
+ 	if (dir == DMA_MEM_TO_DEV) {
+@@ -931,7 +935,7 @@ static void pxad_get_config(struct pxad_chan *chan,
+ 		dev_addr = chan->cfg.dst_addr;
+ 		*dev_dst = dev_addr;
+ 		*dcmd |= PXA_DCMD_INCSRCADDR;
+-		if (chan->drcmr <= DRCMR_CHLNUM)
++		if (chan->drcmr <= pdev->nr_requestors)
+ 			*dcmd |= PXA_DCMD_FLOWTRG;
+ 	}
+ 	if (dir == DMA_MEM_TO_MEM)
+@@ -1341,13 +1345,15 @@ static struct dma_chan *pxad_dma_xlate(struct of_phandle_args *dma_spec,
+ 
+ static int pxad_init_dmadev(struct platform_device *op,
+ 			    struct pxad_device *pdev,
+-			    unsigned int nr_phy_chans)
++			    unsigned int nr_phy_chans,
++			    unsigned int nr_requestors)
+ {
+ 	int ret;
+ 	unsigned int i;
+ 	struct pxad_chan *c;
+ 
+ 	pdev->nr_chans = nr_phy_chans;
++	pdev->nr_requestors = nr_requestors;
+ 	INIT_LIST_HEAD(&pdev->slave.channels);
+ 	pdev->slave.device_alloc_chan_resources = pxad_alloc_chan_resources;
+ 	pdev->slave.device_free_chan_resources = pxad_free_chan_resources;
+@@ -1382,7 +1388,7 @@ static int pxad_probe(struct platform_device *op)
+ 	const struct of_device_id *of_id;
+ 	struct mmp_dma_platdata *pdata = dev_get_platdata(&op->dev);
+ 	struct resource *iores;
+-	int ret, dma_channels = 0;
++	int ret, dma_channels = 0, nb_requestors = 0;
+ 	const enum dma_slave_buswidth widths =
+ 		DMA_SLAVE_BUSWIDTH_1_BYTE   | DMA_SLAVE_BUSWIDTH_2_BYTES |
+ 		DMA_SLAVE_BUSWIDTH_4_BYTES;
+@@ -1399,13 +1405,23 @@ static int pxad_probe(struct platform_device *op)
+ 		return PTR_ERR(pdev->base);
+ 
+ 	of_id = of_match_device(pxad_dt_ids, &op->dev);
+-	if (of_id)
++	if (of_id) {
+ 		of_property_read_u32(op->dev.of_node, "#dma-channels",
+ 				     &dma_channels);
+-	else if (pdata && pdata->dma_channels)
++		ret = of_property_read_u32(op->dev.of_node, "#dma-requests",
++					   &nb_requestors);
++		if (ret) {
++			dev_warn(pdev->slave.dev,
++				 "#dma-requests set to default 32 as missing in OF: %d",
++				 ret);
++			nb_requestors = 32;
++		};
++	} else if (pdata && pdata->dma_channels) {
+ 		dma_channels = pdata->dma_channels;
+-	else
++		nb_requestors = pdata->nb_requestors;
++	} else {
+ 		dma_channels = 32;	/* default 32 channel */
++	}
+ 
+ 	dma_cap_set(DMA_SLAVE, pdev->slave.cap_mask);
+ 	dma_cap_set(DMA_MEMCPY, pdev->slave.cap_mask);
+@@ -1423,7 +1439,7 @@ static int pxad_probe(struct platform_device *op)
+ 	pdev->slave.descriptor_reuse = true;
+ 
+ 	pdev->slave.dev = &op->dev;
+-	ret = pxad_init_dmadev(op, pdev, dma_channels);
++	ret = pxad_init_dmadev(op, pdev, dma_channels, nb_requestors);
+ 	if (ret) {
+ 		dev_err(pdev->slave.dev, "unable to register\n");
+ 		return ret;
+@@ -1442,7 +1458,8 @@ static int pxad_probe(struct platform_device *op)
+ 
+ 	platform_set_drvdata(op, pdev);
+ 	pxad_init_debugfs(pdev);
+-	dev_info(pdev->slave.dev, "initialized %d channels\n", dma_channels);
++	dev_info(pdev->slave.dev, "initialized %d channels on %d requestors\n",
++		 dma_channels, nb_requestors);
+ 	return 0;
+ }
+ 
+diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
+index 01087a38da22..792bdae2b91d 100644
+--- a/drivers/edac/i7core_edac.c
++++ b/drivers/edac/i7core_edac.c
+@@ -1866,7 +1866,7 @@ static int i7core_mce_check_error(struct notifier_block *nb, unsigned long val,
+ 
+ 	i7_dev = get_i7core_dev(mce->socketid);
+ 	if (!i7_dev)
+-		return NOTIFY_BAD;
++		return NOTIFY_DONE;
+ 
+ 	mci = i7_dev->mci;
+ 	pvt = mci->pvt_info;
+diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
+index 93f0d4120289..8bf745d2da7e 100644
+--- a/drivers/edac/sb_edac.c
++++ b/drivers/edac/sb_edac.c
+@@ -362,6 +362,7 @@ struct sbridge_pvt {
+ 
+ 	/* Memory type detection */
+ 	bool			is_mirrored, is_lockstep, is_close_pg;
++	bool			is_chan_hash;
+ 
+ 	/* Fifo double buffers */
+ 	struct mce		mce_entry[MCE_LOG_LEN];
+@@ -1060,6 +1061,20 @@ static inline u8 sad_pkg_ha(u8 pkg)
+ 	return (pkg >> 2) & 0x1;
+ }
+ 
++static int haswell_chan_hash(int idx, u64 addr)
++{
++	int i;
++
++	/*
++	 * XOR even bits from 12:26 to bit0 of idx,
++	 *     odd bits from 13:27 to bit1
++	 */
++	for (i = 12; i < 28; i += 2)
++		idx ^= (addr >> i) & 3;
++
++	return idx;
++}
++
+ /****************************************************************************
+ 			Memory check routines
+  ****************************************************************************/
+@@ -1616,6 +1631,10 @@ static int get_dimm_config(struct mem_ctl_info *mci)
+ 		KNL_MAX_CHANNELS : NUM_CHANNELS;
+ 	u64 knl_mc_sizes[KNL_MAX_CHANNELS];
+ 
++	if (pvt->info.type == HASWELL || pvt->info.type == BROADWELL) {
++		pci_read_config_dword(pvt->pci_ha0, HASWELL_HASYSDEFEATURE2, &reg);
++		pvt->is_chan_hash = GET_BITFIELD(reg, 21, 21);
++	}
+ 	if (pvt->info.type == HASWELL || pvt->info.type == BROADWELL ||
+ 			pvt->info.type == KNIGHTS_LANDING)
+ 		pci_read_config_dword(pvt->pci_sad1, SAD_TARGET, &reg);
+@@ -2118,12 +2137,15 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
+ 	}
+ 
+ 	ch_way = TAD_CH(reg) + 1;
+-	sck_way = 1 << TAD_SOCK(reg);
++	sck_way = TAD_SOCK(reg);
+ 
+ 	if (ch_way == 3)
+ 		idx = addr >> 6;
+-	else
++	else {
+ 		idx = (addr >> (6 + sck_way + shiftup)) & 0x3;
++		if (pvt->is_chan_hash)
++			idx = haswell_chan_hash(idx, addr);
++	}
+ 	idx = idx % ch_way;
+ 
+ 	/*
+@@ -2157,7 +2179,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
+ 		switch(ch_way) {
+ 		case 2:
+ 		case 4:
+-			sck_xch = 1 << sck_way * (ch_way >> 1);
++			sck_xch = (1 << sck_way) * (ch_way >> 1);
+ 			break;
+ 		default:
+ 			sprintf(msg, "Invalid mirror set. Can't decode addr");
+@@ -2193,7 +2215,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
+ 
+ 	ch_addr = addr - offset;
+ 	ch_addr >>= (6 + shiftup);
+-	ch_addr /= ch_way * sck_way;
++	ch_addr /= sck_xch;
+ 	ch_addr <<= (6 + shiftup);
+ 	ch_addr |= addr & ((1 << (6 + shiftup)) - 1);
+ 
+@@ -3146,7 +3168,7 @@ static int sbridge_mce_check_error(struct notifier_block *nb, unsigned long val,
+ 
+ 	mci = get_mci_for_node_id(mce->socketid);
+ 	if (!mci)
+-		return NOTIFY_BAD;
++		return NOTIFY_DONE;
+ 	pvt = mci->pvt_info;
+ 
+ 	/*
+diff --git a/drivers/extcon/extcon-max77843.c b/drivers/extcon/extcon-max77843.c
+index 74dfb7f4f277..d8cac4661cfe 100644
+--- a/drivers/extcon/extcon-max77843.c
++++ b/drivers/extcon/extcon-max77843.c
+@@ -803,7 +803,7 @@ static int max77843_muic_probe(struct platform_device *pdev)
+ 	/* Clear IRQ bits before request IRQs */
+ 	ret = regmap_bulk_read(max77843->regmap_muic,
+ 			MAX77843_MUIC_REG_INT1, info->status,
+-			MAX77843_MUIC_IRQ_NUM);
++			MAX77843_MUIC_STATUS_NUM);
+ 	if (ret) {
+ 		dev_err(&pdev->dev, "Failed to Clear IRQ bits\n");
+ 		goto err_muic_irq;
+diff --git a/drivers/firmware/efi/arm-init.c b/drivers/firmware/efi/arm-init.c
+index 9e15d571b53c..a76c35fc0b92 100644
+--- a/drivers/firmware/efi/arm-init.c
++++ b/drivers/firmware/efi/arm-init.c
+@@ -203,7 +203,19 @@ void __init efi_init(void)
+ 
+ 	reserve_regions();
+ 	early_memunmap(memmap.map, params.mmap_size);
+-	memblock_mark_nomap(params.mmap & PAGE_MASK,
+-			    PAGE_ALIGN(params.mmap_size +
+-				       (params.mmap & ~PAGE_MASK)));
++
++	if (IS_ENABLED(CONFIG_ARM)) {
++		/*
++		 * ARM currently does not allow ioremap_cache() to be called on
++		 * memory regions that are covered by struct page. So remove the
++		 * UEFI memory map from the linear mapping.
++		 */
++		memblock_mark_nomap(params.mmap & PAGE_MASK,
++				    PAGE_ALIGN(params.mmap_size +
++					       (params.mmap & ~PAGE_MASK)));
++	} else {
++		memblock_reserve(params.mmap & PAGE_MASK,
++				 PAGE_ALIGN(params.mmap_size +
++					    (params.mmap & ~PAGE_MASK)));
++	}
+ }
+diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
+index 2cd37dad67a6..c51f3b2fe3c0 100644
+--- a/drivers/firmware/efi/efi.c
++++ b/drivers/firmware/efi/efi.c
+@@ -182,6 +182,7 @@ static int generic_ops_register(void)
+ {
+ 	generic_ops.get_variable = efi.get_variable;
+ 	generic_ops.set_variable = efi.set_variable;
++	generic_ops.set_variable_nonblocking = efi.set_variable_nonblocking;
+ 	generic_ops.get_next_variable = efi.get_next_variable;
+ 	generic_ops.query_variable_store = efi_query_variable_store;
+ 
+diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c
+index 7f2ea21c730d..6f182fd91a6d 100644
+--- a/drivers/firmware/efi/vars.c
++++ b/drivers/firmware/efi/vars.c
+@@ -202,29 +202,44 @@ static const struct variable_validate variable_validate[] = {
+ 	{ NULL_GUID, "", NULL },
+ };
+ 
++/*
++ * Check if @var_name matches the pattern given in @match_name.
++ *
++ * @var_name: an array of @len non-NUL characters.
++ * @match_name: a NUL-terminated pattern string, optionally ending in "*". A
++ *              final "*" character matches any trailing characters @var_name,
++ *              including the case when there are none left in @var_name.
++ * @match: on output, the number of non-wildcard characters in @match_name
++ *         that @var_name matches, regardless of the return value.
++ * @return: whether @var_name fully matches @match_name.
++ */
+ static bool
+ variable_matches(const char *var_name, size_t len, const char *match_name,
+ 		 int *match)
+ {
+ 	for (*match = 0; ; (*match)++) {
+ 		char c = match_name[*match];
+-		char u = var_name[*match];
+ 
+-		/* Wildcard in the matching name means we've matched */
+-		if (c == '*')
++		switch (c) {
++		case '*':
++			/* Wildcard in @match_name means we've matched. */
+ 			return true;
+ 
+-		/* Case sensitive match */
+-		if (!c && *match == len)
+-			return true;
++		case '\0':
++			/* @match_name has ended. Has @var_name too? */
++			return (*match == len);
+ 
+-		if (c != u)
++		default:
++			/*
++			 * We've reached a non-wildcard char in @match_name.
++			 * Continue only if there's an identical character in
++			 * @var_name.
++			 */
++			if (*match < len && c == var_name[*match])
++				continue;
+ 			return false;
+-
+-		if (!c)
+-			return true;
++		}
+ 	}
+-	return true;
+ }
+ 
+ bool
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+index 5e7770f9a415..ff299752d5e6 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+@@ -1619,6 +1619,7 @@ struct amdgpu_uvd {
+ 	struct amdgpu_bo	*vcpu_bo;
+ 	void			*cpu_addr;
+ 	uint64_t		gpu_addr;
++	unsigned		fw_version;
+ 	atomic_t		handles[AMDGPU_MAX_UVD_HANDLES];
+ 	struct drm_file		*filp[AMDGPU_MAX_UVD_HANDLES];
+ 	struct delayed_work	idle_work;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+index 81dc6b65436f..3c895863fcf5 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+@@ -63,10 +63,6 @@ bool amdgpu_has_atpx(void) {
+ 	return amdgpu_atpx_priv.atpx_detected;
+ }
+ 
+-bool amdgpu_has_atpx_dgpu_power_cntl(void) {
+-	return amdgpu_atpx_priv.atpx.functions.power_cntl;
+-}
+-
+ /**
+  * amdgpu_atpx_call - call an ATPX method
+  *
+@@ -146,6 +142,10 @@ static void amdgpu_atpx_parse_functions(struct amdgpu_atpx_functions *f, u32 mas
+  */
+ static int amdgpu_atpx_validate(struct amdgpu_atpx *atpx)
+ {
++	/* make sure required functions are enabled */
++	/* dGPU power control is required */
++	atpx->functions.power_cntl = true;
++
+ 	if (atpx->functions.px_params) {
+ 		union acpi_object *info;
+ 		struct atpx_px_params output;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index d6c68d00cbb0..51bfc114584e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -62,12 +62,6 @@ static const char *amdgpu_asic_name[] = {
+ 	"LAST",
+ };
+ 
+-#if defined(CONFIG_VGA_SWITCHEROO)
+-bool amdgpu_has_atpx_dgpu_power_cntl(void);
+-#else
+-static inline bool amdgpu_has_atpx_dgpu_power_cntl(void) { return false; }
+-#endif
+-
+ bool amdgpu_device_is_px(struct drm_device *dev)
+ {
+ 	struct amdgpu_device *adev = dev->dev_private;
+@@ -1517,7 +1511,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
+ 
+ 	if (amdgpu_runtime_pm == 1)
+ 		runtime = true;
+-	if (amdgpu_device_is_px(ddev) && amdgpu_has_atpx_dgpu_power_cntl())
++	if (amdgpu_device_is_px(ddev))
+ 		runtime = true;
+ 	vga_switcheroo_register_client(adev->pdev, &amdgpu_switcheroo_ops, runtime);
+ 	if (runtime)
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+index e23843f4d877..4488e82f87b0 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+@@ -303,7 +303,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
+ 			fw_info.feature = adev->vce.fb_version;
+ 			break;
+ 		case AMDGPU_INFO_FW_UVD:
+-			fw_info.ver = 0;
++			fw_info.ver = adev->uvd.fw_version;
+ 			fw_info.feature = 0;
+ 			break;
+ 		case AMDGPU_INFO_FW_GMC:
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+index fdc1be8550da..3b2d75d96ea0 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+@@ -53,7 +53,7 @@ struct amdgpu_hpd;
+ 
+ #define AMDGPU_MAX_HPD_PINS 6
+ #define AMDGPU_MAX_CRTCS 6
+-#define AMDGPU_MAX_AFMT_BLOCKS 7
++#define AMDGPU_MAX_AFMT_BLOCKS 9
+ 
+ enum amdgpu_rmx_type {
+ 	RMX_OFF,
+@@ -309,8 +309,8 @@ struct amdgpu_mode_info {
+ 	struct atom_context *atom_context;
+ 	struct card_info *atom_card_info;
+ 	bool mode_config_initialized;
+-	struct amdgpu_crtc *crtcs[6];
+-	struct amdgpu_afmt *afmt[7];
++	struct amdgpu_crtc *crtcs[AMDGPU_MAX_CRTCS];
++	struct amdgpu_afmt *afmt[AMDGPU_MAX_AFMT_BLOCKS];
+ 	/* DVI-I properties */
+ 	struct drm_property *coherent_mode_property;
+ 	/* DAC enable load detect */
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+index 53f987aeeacf..3b35ad83867c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+@@ -156,6 +156,9 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
+ 	DRM_INFO("Found UVD firmware Version: %hu.%hu Family ID: %hu\n",
+ 		version_major, version_minor, family_id);
+ 
++	adev->uvd.fw_version = ((version_major << 24) | (version_minor << 16) |
++				(family_id << 8));
++
+ 	bo_size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8)
+ 		 +  AMDGPU_UVD_STACK_SIZE + AMDGPU_UVD_HEAP_SIZE;
+ 	r = amdgpu_bo_create(adev, bo_size, PAGE_SIZE, true,
+@@ -273,6 +276,8 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev)
+ 	memcpy(adev->uvd.cpu_addr, (adev->uvd.fw->data) + offset,
+ 		(adev->uvd.fw->size) - offset);
+ 
++	cancel_delayed_work_sync(&adev->uvd.idle_work);
++
+ 	size = amdgpu_bo_size(adev->uvd.vcpu_bo);
+ 	size -= le32_to_cpu(hdr->ucode_size_bytes);
+ 	ptr = adev->uvd.cpu_addr;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+index a745eeeb5d82..bb0da76051a1 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+@@ -220,6 +220,7 @@ int amdgpu_vce_suspend(struct amdgpu_device *adev)
+ 	if (i == AMDGPU_MAX_VCE_HANDLES)
+ 		return 0;
+ 
++	cancel_delayed_work_sync(&adev->vce.idle_work);
+ 	/* TODO: suspending running encoding sessions isn't supported */
+ 	return -EINVAL;
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+index 06602df707f8..9b1c43005c80 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+@@ -3628,7 +3628,7 @@ static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
+ 					unsigned vm_id, uint64_t pd_addr)
+ {
+ 	int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
+-	uint32_t seq = ring->fence_drv.sync_seq;
++	uint32_t seq = ring->fence_drv.sync_seq[ring->idx];
+ 	uint64_t addr = ring->fence_drv.gpu_addr;
+ 
+ 	amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
+index c34c393e9aea..d5e19b5fbbfb 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
+@@ -513,7 +513,7 @@ static int dbgdev_wave_control_set_registers(
+ 				union SQ_CMD_BITS *in_reg_sq_cmd,
+ 				union GRBM_GFX_INDEX_BITS *in_reg_gfx_index)
+ {
+-	int status;
++	int status = 0;
+ 	union SQ_CMD_BITS reg_sq_cmd;
+ 	union GRBM_GFX_INDEX_BITS reg_gfx_index;
+ 	struct HsaDbgWaveMsgAMDGen2 *pMsg;
+diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
+index 27fbd79d0daf..71ea0521ea96 100644
+--- a/drivers/gpu/drm/drm_dp_mst_topology.c
++++ b/drivers/gpu/drm/drm_dp_mst_topology.c
+@@ -1672,13 +1672,19 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
+ 	u8 sinks[DRM_DP_MAX_SDP_STREAMS];
+ 	int i;
+ 
++	port = drm_dp_get_validated_port_ref(mgr, port);
++	if (!port)
++		return -EINVAL;
++
+ 	port_num = port->port_num;
+ 	mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
+ 	if (!mstb) {
+ 		mstb = drm_dp_get_last_connected_port_and_mstb(mgr, port->parent, &port_num);
+ 
+-		if (!mstb)
++		if (!mstb) {
++			drm_dp_put_port(port);
+ 			return -EINVAL;
++		}
+ 	}
+ 
+ 	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
+@@ -1707,6 +1713,7 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
+ 	kfree(txmsg);
+ fail_put:
+ 	drm_dp_put_mst_branch_device(mstb);
++	drm_dp_put_port(port);
+ 	return ret;
+ }
+ 
+@@ -1789,6 +1796,11 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
+ 		req_payload.start_slot = cur_slots;
+ 		if (mgr->proposed_vcpis[i]) {
+ 			port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
++			port = drm_dp_get_validated_port_ref(mgr, port);
++			if (!port) {
++				mutex_unlock(&mgr->payload_lock);
++				return -EINVAL;
++			}
+ 			req_payload.num_slots = mgr->proposed_vcpis[i]->num_slots;
+ 			req_payload.vcpi = mgr->proposed_vcpis[i]->vcpi;
+ 		} else {
+@@ -1816,6 +1828,9 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
+ 			mgr->payloads[i].payload_state = req_payload.payload_state;
+ 		}
+ 		cur_slots += req_payload.num_slots;
++
++		if (port)
++			drm_dp_put_port(port);
+ 	}
+ 
+ 	for (i = 0; i < mgr->max_payloads; i++) {
+@@ -2121,6 +2136,8 @@ int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr)
+ 
+ 	if (mgr->mst_primary) {
+ 		int sret;
++		u8 guid[16];
++
+ 		sret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
+ 		if (sret != DP_RECEIVER_CAP_SIZE) {
+ 			DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
+@@ -2135,6 +2152,16 @@ int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr)
+ 			ret = -1;
+ 			goto out_unlock;
+ 		}
++
++		/* Some hubs forget their guids after they resume */
++		sret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16);
++		if (sret != 16) {
++			DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
++			ret = -1;
++			goto out_unlock;
++		}
++		drm_dp_check_mstb_guid(mgr->mst_primary, guid);
++
+ 		ret = 0;
+ 	} else
+ 		ret = -1;
+diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c
+index 647d85e77c2f..597cfb5ca847 100644
+--- a/drivers/gpu/drm/i915/intel_csr.c
++++ b/drivers/gpu/drm/i915/intel_csr.c
+@@ -177,7 +177,8 @@ static const struct stepping_info kbl_stepping_info[] = {
+ static const struct stepping_info skl_stepping_info[] = {
+ 	{'A', '0'}, {'B', '0'}, {'C', '0'},
+ 	{'D', '0'}, {'E', '0'}, {'F', '0'},
+-	{'G', '0'}, {'H', '0'}, {'I', '0'}
++	{'G', '0'}, {'H', '0'}, {'I', '0'},
++	{'J', '0'}, {'K', '0'}
+ };
+ 
+ static const struct stepping_info bxt_stepping_info[] = {
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index 46947fffd599..a9c35134f2e2 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -4455,7 +4455,7 @@ int skl_update_scaler_crtc(struct intel_crtc_state *state)
+ 		      intel_crtc->base.base.id, intel_crtc->pipe, SKL_CRTC_INDEX);
+ 
+ 	return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
+-		&state->scaler_state.scaler_id, DRM_ROTATE_0,
++		&state->scaler_state.scaler_id, BIT(DRM_ROTATE_0),
+ 		state->pipe_src_w, state->pipe_src_h,
+ 		adjusted_mode->crtc_hdisplay, adjusted_mode->crtc_vdisplay);
+ }
+diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
+index fa0dabf578dc..db6361b5a6ab 100644
+--- a/drivers/gpu/drm/i915/intel_dp_mst.c
++++ b/drivers/gpu/drm/i915/intel_dp_mst.c
+@@ -184,7 +184,7 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder)
+ 	intel_mst->port = found->port;
+ 
+ 	if (intel_dp->active_mst_links == 0) {
+-		intel_ddi_clk_select(encoder, intel_crtc->config);
++		intel_ddi_clk_select(&intel_dig_port->base, intel_crtc->config);
+ 
+ 		intel_dp_set_link_params(intel_dp, intel_crtc->config);
+ 
+@@ -499,6 +499,8 @@ static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
+ 	struct intel_connector *intel_connector = to_intel_connector(connector);
+ 	struct drm_device *dev = connector->dev;
+ 
++	intel_connector->unregister(intel_connector);
++
+ 	/* need to nuke the connector */
+ 	drm_modeset_lock_all(dev);
+ 	if (connector->state->crtc) {
+@@ -512,11 +514,7 @@ static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
+ 
+ 		WARN(ret, "Disabling mst crtc failed with %i\n", ret);
+ 	}
+-	drm_modeset_unlock_all(dev);
+ 
+-	intel_connector->unregister(intel_connector);
+-
+-	drm_modeset_lock_all(dev);
+ 	intel_connector_remove_from_fbdev(intel_connector);
+ 	drm_connector_cleanup(connector);
+ 	drm_modeset_unlock_all(dev);
+diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
+index f1fa756c5d5d..cfd5f9fff2f4 100644
+--- a/drivers/gpu/drm/i915/intel_lrc.c
++++ b/drivers/gpu/drm/i915/intel_lrc.c
+@@ -781,11 +781,11 @@ static int logical_ring_prepare(struct drm_i915_gem_request *req, int bytes)
+ 		if (unlikely(total_bytes > remain_usable)) {
+ 			/*
+ 			 * The base request will fit but the reserved space
+-			 * falls off the end. So only need to to wait for the
+-			 * reserved size after flushing out the remainder.
++			 * falls off the end. So don't need an immediate wrap
++			 * and only need to effectively wait for the reserved
++			 * size space from the start of ringbuffer.
+ 			 */
+ 			wait_bytes = remain_actual + ringbuf->reserved_size;
+-			need_wrap = true;
+ 		} else if (total_bytes > ringbuf->space) {
+ 			/* No wrapping required, just waiting. */
+ 			wait_bytes = total_bytes;
+diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
+index b28c29f20e75..7e4a9842b9ea 100644
+--- a/drivers/gpu/drm/i915/intel_pm.c
++++ b/drivers/gpu/drm/i915/intel_pm.c
+@@ -2281,6 +2281,7 @@ static int ilk_compute_pipe_wm(struct intel_crtc *intel_crtc,
+ 		return PTR_ERR(cstate);
+ 
+ 	pipe_wm = &cstate->wm.optimal.ilk;
++	memset(pipe_wm, 0, sizeof(*pipe_wm));
+ 
+ 	for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
+ 		ps = drm_atomic_get_plane_state(state,
+@@ -3606,23 +3607,43 @@ static void skl_update_wm(struct drm_crtc *crtc)
+ 	dev_priv->wm.skl_hw = *results;
+ }
+ 
++static void ilk_compute_wm_config(struct drm_device *dev,
++				  struct intel_wm_config *config)
++{
++	struct intel_crtc *crtc;
++
++	/* Compute the currently _active_ config */
++	for_each_intel_crtc(dev, crtc) {
++		const struct intel_pipe_wm *wm = &crtc->wm.active.ilk;
++
++		if (!wm->pipe_enabled)
++			continue;
++
++		config->sprites_enabled |= wm->sprites_enabled;
++		config->sprites_scaled |= wm->sprites_scaled;
++		config->num_pipes_active++;
++	}
++}
++
+ static void ilk_program_watermarks(struct drm_i915_private *dev_priv)
+ {
+ 	struct drm_device *dev = dev_priv->dev;
+ 	struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
+ 	struct ilk_wm_maximums max;
+-	struct intel_wm_config *config = &dev_priv->wm.config;
++	struct intel_wm_config config = {};
+ 	struct ilk_wm_values results = {};
+ 	enum intel_ddb_partitioning partitioning;
+ 
+-	ilk_compute_wm_maximums(dev, 1, config, INTEL_DDB_PART_1_2, &max);
+-	ilk_wm_merge(dev, config, &max, &lp_wm_1_2);
++	ilk_compute_wm_config(dev, &config);
++
++	ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_1_2, &max);
++	ilk_wm_merge(dev, &config, &max, &lp_wm_1_2);
+ 
+ 	/* 5/6 split only in single pipe config on IVB+ */
+ 	if (INTEL_INFO(dev)->gen >= 7 &&
+-	    config->num_pipes_active == 1 && config->sprites_enabled) {
+-		ilk_compute_wm_maximums(dev, 1, config, INTEL_DDB_PART_5_6, &max);
+-		ilk_wm_merge(dev, config, &max, &lp_wm_5_6);
++	    config.num_pipes_active == 1 && config.sprites_enabled) {
++		ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_5_6, &max);
++		ilk_wm_merge(dev, &config, &max, &lp_wm_5_6);
+ 
+ 		best_lp_wm = ilk_find_best_result(dev, &lp_wm_1_2, &lp_wm_5_6);
+ 	} else {
+diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
+index 40c6aff57256..549afa7bc75f 100644
+--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
++++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
+@@ -951,7 +951,7 @@ static int gen9_init_workarounds(struct intel_engine_cs *ring)
+ 
+ 	/* WaForceContextSaveRestoreNonCoherent:skl,bxt */
+ 	tmp = HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT;
+-	if (IS_SKL_REVID(dev, SKL_REVID_F0, SKL_REVID_F0) ||
++	if (IS_SKL_REVID(dev, SKL_REVID_F0, REVID_FOREVER) ||
+ 	    IS_BXT_REVID(dev, BXT_REVID_B0, REVID_FOREVER))
+ 		tmp |= HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE;
+ 	WA_SET_BIT_MASKED(HDC_CHICKEN0, tmp);
+@@ -1044,7 +1044,8 @@ static int skl_init_workarounds(struct intel_engine_cs *ring)
+ 		WA_SET_BIT_MASKED(HIZ_CHICKEN,
+ 				  BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE);
+ 
+-	if (IS_SKL_REVID(dev, 0, SKL_REVID_F0)) {
++	/* This is tied to WaForceContextSaveRestoreNonCoherent */
++	if (IS_SKL_REVID(dev, 0, REVID_FOREVER)) {
+ 		/*
+ 		 *Use Force Non-Coherent whenever executing a 3D context. This
+ 		 * is a workaround for a possible hang in the unlikely event
+@@ -1901,6 +1902,17 @@ i915_dispatch_execbuffer(struct drm_i915_gem_request *req,
+ 	return 0;
+ }
+ 
++static void cleanup_phys_status_page(struct intel_engine_cs *ring)
++{
++	struct drm_i915_private *dev_priv = to_i915(ring->dev);
++
++	if (!dev_priv->status_page_dmah)
++		return;
++
++	drm_pci_free(ring->dev, dev_priv->status_page_dmah);
++	ring->status_page.page_addr = NULL;
++}
++
+ static void cleanup_status_page(struct intel_engine_cs *ring)
+ {
+ 	struct drm_i915_gem_object *obj;
+@@ -1917,9 +1929,9 @@ static void cleanup_status_page(struct intel_engine_cs *ring)
+ 
+ static int init_status_page(struct intel_engine_cs *ring)
+ {
+-	struct drm_i915_gem_object *obj;
++	struct drm_i915_gem_object *obj = ring->status_page.obj;
+ 
+-	if ((obj = ring->status_page.obj) == NULL) {
++	if (obj == NULL) {
+ 		unsigned flags;
+ 		int ret;
+ 
+@@ -2019,10 +2031,12 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
+ {
+ 	struct drm_i915_private *dev_priv = to_i915(dev);
+ 	struct drm_i915_gem_object *obj = ringbuf->obj;
++	/* Ring wraparound at offset 0 sometimes hangs. No idea why. */
++	unsigned flags = PIN_OFFSET_BIAS | 4096;
+ 	int ret;
+ 
+ 	if (HAS_LLC(dev_priv) && !obj->stolen) {
+-		ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, 0);
++		ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, flags);
+ 		if (ret)
+ 			return ret;
+ 
+@@ -2038,7 +2052,8 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
+ 			return -ENOMEM;
+ 		}
+ 	} else {
+-		ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE);
++		ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE,
++					    flags | PIN_MAPPABLE);
+ 		if (ret)
+ 			return ret;
+ 
+@@ -2164,7 +2179,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
+ 		if (ret)
+ 			goto error;
+ 	} else {
+-		BUG_ON(ring->id != RCS);
++		WARN_ON(ring->id != RCS);
+ 		ret = init_phys_status_page(ring);
+ 		if (ret)
+ 			goto error;
+@@ -2210,7 +2225,12 @@ void intel_cleanup_ring_buffer(struct intel_engine_cs *ring)
+ 	if (ring->cleanup)
+ 		ring->cleanup(ring);
+ 
+-	cleanup_status_page(ring);
++	if (I915_NEED_GFX_HWS(ring->dev)) {
++		cleanup_status_page(ring);
++	} else {
++		WARN_ON(ring->id != RCS);
++		cleanup_phys_status_page(ring);
++	}
+ 
+ 	i915_cmd_parser_fini_ring(ring);
+ 	i915_gem_batch_pool_fini(&ring->batch_pool);
+@@ -2373,11 +2393,11 @@ static int __intel_ring_prepare(struct intel_engine_cs *ring, int bytes)
+ 		if (unlikely(total_bytes > remain_usable)) {
+ 			/*
+ 			 * The base request will fit but the reserved space
+-			 * falls off the end. So only need to to wait for the
+-			 * reserved size after flushing out the remainder.
++			 * falls off the end. So don't need an immediate wrap
++			 * and only need to effectively wait for the reserved
++			 * size space from the start of ringbuffer.
+ 			 */
+ 			wait_bytes = remain_actual + ringbuf->reserved_size;
+-			need_wrap = true;
+ 		} else if (total_bytes > ringbuf->space) {
+ 			/* No wrapping required, just waiting. */
+ 			wait_bytes = total_bytes;
+diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
+index 277e60ae0e47..08961f7d151c 100644
+--- a/drivers/gpu/drm/i915/intel_uncore.c
++++ b/drivers/gpu/drm/i915/intel_uncore.c
+@@ -1155,7 +1155,11 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev)
+ 	} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
+ 		dev_priv->uncore.funcs.force_wake_get =
+ 			fw_domains_get_with_thread_status;
+-		dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
++		if (IS_HASWELL(dev))
++			dev_priv->uncore.funcs.force_wake_put =
++				fw_domains_put_with_fifo;
++		else
++			dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
+ 		fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
+ 			       FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
+ 	} else if (IS_IVYBRIDGE(dev)) {
+diff --git a/drivers/gpu/drm/nouveau/nvkm/core/ramht.c b/drivers/gpu/drm/nouveau/nvkm/core/ramht.c
+index 3216e157a8a0..89da47234016 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/core/ramht.c
++++ b/drivers/gpu/drm/nouveau/nvkm/core/ramht.c
+@@ -131,7 +131,7 @@ nvkm_ramht_del(struct nvkm_ramht **pramht)
+ 	struct nvkm_ramht *ramht = *pramht;
+ 	if (ramht) {
+ 		nvkm_gpuobj_del(&ramht->gpuobj);
+-		kfree(*pramht);
++		vfree(*pramht);
+ 		*pramht = NULL;
+ 	}
+ }
+@@ -143,8 +143,8 @@ nvkm_ramht_new(struct nvkm_device *device, u32 size, u32 align,
+ 	struct nvkm_ramht *ramht;
+ 	int ret, i;
+ 
+-	if (!(ramht = *pramht = kzalloc(sizeof(*ramht) + (size >> 3) *
+-					sizeof(*ramht->data), GFP_KERNEL)))
++	if (!(ramht = *pramht = vzalloc(sizeof(*ramht) +
++					(size >> 3) * sizeof(*ramht->data))))
+ 		return -ENOMEM;
+ 
+ 	ramht->device = device;
+diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+index 1f81069edc58..332b5fe687fe 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+@@ -1807,6 +1807,8 @@ gf100_gr_init(struct gf100_gr *gr)
+ 
+ 	gf100_gr_mmio(gr, gr->func->mmio);
+ 
++	nvkm_mask(device, TPC_UNIT(0, 0, 0x05c), 0x00000001, 0x00000001);
++
+ 	memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
+ 	for (i = 0, gpc = -1; i < gr->tpc_total; i++) {
+ 		do {
+diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
+index 86276519b2ef..47e52647c9e5 100644
+--- a/drivers/gpu/drm/qxl/qxl_display.c
++++ b/drivers/gpu/drm/qxl/qxl_display.c
+@@ -375,10 +375,15 @@ static int qxl_crtc_cursor_set2(struct drm_crtc *crtc,
+ 
+ 	qxl_bo_kunmap(user_bo);
+ 
++	qcrtc->cur_x += qcrtc->hot_spot_x - hot_x;
++	qcrtc->cur_y += qcrtc->hot_spot_y - hot_y;
++	qcrtc->hot_spot_x = hot_x;
++	qcrtc->hot_spot_y = hot_y;
++
+ 	cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release);
+ 	cmd->type = QXL_CURSOR_SET;
+-	cmd->u.set.position.x = qcrtc->cur_x;
+-	cmd->u.set.position.y = qcrtc->cur_y;
++	cmd->u.set.position.x = qcrtc->cur_x + qcrtc->hot_spot_x;
++	cmd->u.set.position.y = qcrtc->cur_y + qcrtc->hot_spot_y;
+ 
+ 	cmd->u.set.shape = qxl_bo_physical_address(qdev, cursor_bo, 0);
+ 
+@@ -441,8 +446,8 @@ static int qxl_crtc_cursor_move(struct drm_crtc *crtc,
+ 
+ 	cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release);
+ 	cmd->type = QXL_CURSOR_MOVE;
+-	cmd->u.position.x = qcrtc->cur_x;
+-	cmd->u.position.y = qcrtc->cur_y;
++	cmd->u.position.x = qcrtc->cur_x + qcrtc->hot_spot_x;
++	cmd->u.position.y = qcrtc->cur_y + qcrtc->hot_spot_y;
+ 	qxl_release_unmap(qdev, release, &cmd->release_info);
+ 
+ 	qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
+diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
+index 6e6b9b1519b8..3f3897eb458c 100644
+--- a/drivers/gpu/drm/qxl/qxl_drv.h
++++ b/drivers/gpu/drm/qxl/qxl_drv.h
+@@ -135,6 +135,8 @@ struct qxl_crtc {
+ 	int index;
+ 	int cur_x;
+ 	int cur_y;
++	int hot_spot_x;
++	int hot_spot_y;
+ };
+ 
+ struct qxl_output {
+diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
+index 2ad462896896..32491355a1d4 100644
+--- a/drivers/gpu/drm/radeon/evergreen.c
++++ b/drivers/gpu/drm/radeon/evergreen.c
+@@ -2608,10 +2608,152 @@ static void evergreen_agp_enable(struct radeon_device *rdev)
+ 	WREG32(VM_CONTEXT1_CNTL, 0);
+ }
+ 
++static const unsigned ni_dig_offsets[] =
++{
++	NI_DIG0_REGISTER_OFFSET,
++	NI_DIG1_REGISTER_OFFSET,
++	NI_DIG2_REGISTER_OFFSET,
++	NI_DIG3_REGISTER_OFFSET,
++	NI_DIG4_REGISTER_OFFSET,
++	NI_DIG5_REGISTER_OFFSET
++};
++
++static const unsigned ni_tx_offsets[] =
++{
++	NI_DCIO_UNIPHY0_UNIPHY_TX_CONTROL1,
++	NI_DCIO_UNIPHY1_UNIPHY_TX_CONTROL1,
++	NI_DCIO_UNIPHY2_UNIPHY_TX_CONTROL1,
++	NI_DCIO_UNIPHY3_UNIPHY_TX_CONTROL1,
++	NI_DCIO_UNIPHY4_UNIPHY_TX_CONTROL1,
++	NI_DCIO_UNIPHY5_UNIPHY_TX_CONTROL1
++};
++
++static const unsigned evergreen_dp_offsets[] =
++{
++	EVERGREEN_DP0_REGISTER_OFFSET,
++	EVERGREEN_DP1_REGISTER_OFFSET,
++	EVERGREEN_DP2_REGISTER_OFFSET,
++	EVERGREEN_DP3_REGISTER_OFFSET,
++	EVERGREEN_DP4_REGISTER_OFFSET,
++	EVERGREEN_DP5_REGISTER_OFFSET
++};
++
++
++/*
++ * Assumption is that EVERGREEN_CRTC_MASTER_EN enable for requested crtc
++ * We go from crtc to connector and it is not relible  since it
++ * should be an opposite direction .If crtc is enable then
++ * find the dig_fe which selects this crtc and insure that it enable.
++ * if such dig_fe is found then find dig_be which selects found dig_be and
++ * insure that it enable and in DP_SST mode.
++ * if UNIPHY_PLL_CONTROL1.enable then we should disconnect timing
++ * from dp symbols clocks .
++ */
++static bool evergreen_is_dp_sst_stream_enabled(struct radeon_device *rdev,
++					       unsigned crtc_id, unsigned *ret_dig_fe)
++{
++	unsigned i;
++	unsigned dig_fe;
++	unsigned dig_be;
++	unsigned dig_en_be;
++	unsigned uniphy_pll;
++	unsigned digs_fe_selected;
++	unsigned dig_be_mode;
++	unsigned dig_fe_mask;
++	bool is_enabled = false;
++	bool found_crtc = false;
++
++	/* loop through all running dig_fe to find selected crtc */
++	for (i = 0; i < ARRAY_SIZE(ni_dig_offsets); i++) {
++		dig_fe = RREG32(NI_DIG_FE_CNTL + ni_dig_offsets[i]);
++		if (dig_fe & NI_DIG_FE_CNTL_SYMCLK_FE_ON &&
++		    crtc_id == NI_DIG_FE_CNTL_SOURCE_SELECT(dig_fe)) {
++			/* found running pipe */
++			found_crtc = true;
++			dig_fe_mask = 1 << i;
++			dig_fe = i;
++			break;
++		}
++	}
++
++	if (found_crtc) {
++		/* loop through all running dig_be to find selected dig_fe */
++		for (i = 0; i < ARRAY_SIZE(ni_dig_offsets); i++) {
++			dig_be = RREG32(NI_DIG_BE_CNTL + ni_dig_offsets[i]);
++			/* if dig_fe_selected by dig_be? */
++			digs_fe_selected = NI_DIG_BE_CNTL_FE_SOURCE_SELECT(dig_be);
++			dig_be_mode = NI_DIG_FE_CNTL_MODE(dig_be);
++			if (dig_fe_mask &  digs_fe_selected &&
++			    /* if dig_be in sst mode? */
++			    dig_be_mode == NI_DIG_BE_DPSST) {
++				dig_en_be = RREG32(NI_DIG_BE_EN_CNTL +
++						   ni_dig_offsets[i]);
++				uniphy_pll = RREG32(NI_DCIO_UNIPHY0_PLL_CONTROL1 +
++						    ni_tx_offsets[i]);
++				/* dig_be enable and tx is running */
++				if (dig_en_be & NI_DIG_BE_EN_CNTL_ENABLE &&
++				    dig_en_be & NI_DIG_BE_EN_CNTL_SYMBCLK_ON &&
++				    uniphy_pll & NI_DCIO_UNIPHY0_PLL_CONTROL1_ENABLE) {
++					is_enabled = true;
++					*ret_dig_fe = dig_fe;
++					break;
++				}
++			}
++		}
++	}
++
++	return is_enabled;
++}
++
++/*
++ * Blank dig when in dp sst mode
++ * Dig ignores crtc timing
++ */
++static void evergreen_blank_dp_output(struct radeon_device *rdev,
++				      unsigned dig_fe)
++{
++	unsigned stream_ctrl;
++	unsigned fifo_ctrl;
++	unsigned counter = 0;
++
++	if (dig_fe >= ARRAY_SIZE(evergreen_dp_offsets)) {
++		DRM_ERROR("invalid dig_fe %d\n", dig_fe);
++		return;
++	}
++
++	stream_ctrl = RREG32(EVERGREEN_DP_VID_STREAM_CNTL +
++			     evergreen_dp_offsets[dig_fe]);
++	if (!(stream_ctrl & EVERGREEN_DP_VID_STREAM_CNTL_ENABLE)) {
++		DRM_ERROR("dig %d , should be enable\n", dig_fe);
++		return;
++	}
++
++	stream_ctrl &=~EVERGREEN_DP_VID_STREAM_CNTL_ENABLE;
++	WREG32(EVERGREEN_DP_VID_STREAM_CNTL +
++	       evergreen_dp_offsets[dig_fe], stream_ctrl);
++
++	stream_ctrl = RREG32(EVERGREEN_DP_VID_STREAM_CNTL +
++			     evergreen_dp_offsets[dig_fe]);
++	while (counter < 32 && stream_ctrl & EVERGREEN_DP_VID_STREAM_STATUS) {
++		msleep(1);
++		counter++;
++		stream_ctrl = RREG32(EVERGREEN_DP_VID_STREAM_CNTL +
++				     evergreen_dp_offsets[dig_fe]);
++	}
++	if (counter >= 32 )
++		DRM_ERROR("counter exceeds %d\n", counter);
++
++	fifo_ctrl = RREG32(EVERGREEN_DP_STEER_FIFO + evergreen_dp_offsets[dig_fe]);
++	fifo_ctrl |= EVERGREEN_DP_STEER_FIFO_RESET;
++	WREG32(EVERGREEN_DP_STEER_FIFO + evergreen_dp_offsets[dig_fe], fifo_ctrl);
++
++}
++
+ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save)
+ {
+ 	u32 crtc_enabled, tmp, frame_count, blackout;
+ 	int i, j;
++	unsigned dig_fe;
+ 
+ 	if (!ASIC_IS_NODCE(rdev)) {
+ 		save->vga_render_control = RREG32(VGA_RENDER_CONTROL);
+@@ -2651,7 +2793,17 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
+ 					break;
+ 				udelay(1);
+ 			}
+-
++			/*we should disable dig if it drives dp sst*/
++			/*but we are in radeon_device_init and the topology is unknown*/
++			/*and it is available after radeon_modeset_init*/
++			/*the following method radeon_atom_encoder_dpms_dig*/
++			/*does the job if we initialize it properly*/
++			/*for now we do it this manually*/
++			/**/
++			if (ASIC_IS_DCE5(rdev) &&
++			    evergreen_is_dp_sst_stream_enabled(rdev, i ,&dig_fe))
++				evergreen_blank_dp_output(rdev, dig_fe);
++			/*we could remove 6 lines below*/
+ 			/* XXX this is a hack to avoid strange behavior with EFI on certain systems */
+ 			WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+ 			tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
+diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h
+index aa939dfed3a3..b436badf9efa 100644
+--- a/drivers/gpu/drm/radeon/evergreen_reg.h
++++ b/drivers/gpu/drm/radeon/evergreen_reg.h
+@@ -250,8 +250,43 @@
+ 
+ /* HDMI blocks at 0x7030, 0x7c30, 0x10830, 0x11430, 0x12030, 0x12c30 */
+ #define EVERGREEN_HDMI_BASE				0x7030
++/*DIG block*/
++#define NI_DIG0_REGISTER_OFFSET                 (0x7000  - 0x7000)
++#define NI_DIG1_REGISTER_OFFSET                 (0x7C00  - 0x7000)
++#define NI_DIG2_REGISTER_OFFSET                 (0x10800 - 0x7000)
++#define NI_DIG3_REGISTER_OFFSET                 (0x11400 - 0x7000)
++#define NI_DIG4_REGISTER_OFFSET                 (0x12000 - 0x7000)
++#define NI_DIG5_REGISTER_OFFSET                 (0x12C00 - 0x7000)
++
++
++#define NI_DIG_FE_CNTL                               0x7000
++#       define NI_DIG_FE_CNTL_SOURCE_SELECT(x)        ((x) & 0x3)
++#       define NI_DIG_FE_CNTL_SYMCLK_FE_ON            (1<<24)
++
++
++#define NI_DIG_BE_CNTL                    0x7140
++#       define NI_DIG_BE_CNTL_FE_SOURCE_SELECT(x)     (((x) >> 8 ) & 0x3F)
++#       define NI_DIG_FE_CNTL_MODE(x)                 (((x) >> 16) & 0x7 )
++
++#define NI_DIG_BE_EN_CNTL                              0x7144
++#       define NI_DIG_BE_EN_CNTL_ENABLE               (1 << 0)
++#       define NI_DIG_BE_EN_CNTL_SYMBCLK_ON           (1 << 8)
++#       define NI_DIG_BE_DPSST 0
+ 
+ /* Display Port block */
++#define EVERGREEN_DP0_REGISTER_OFFSET                 (0x730C  - 0x730C)
++#define EVERGREEN_DP1_REGISTER_OFFSET                 (0x7F0C  - 0x730C)
++#define EVERGREEN_DP2_REGISTER_OFFSET                 (0x10B0C - 0x730C)
++#define EVERGREEN_DP3_REGISTER_OFFSET                 (0x1170C - 0x730C)
++#define EVERGREEN_DP4_REGISTER_OFFSET                 (0x1230C - 0x730C)
++#define EVERGREEN_DP5_REGISTER_OFFSET                 (0x12F0C - 0x730C)
++
++
++#define EVERGREEN_DP_VID_STREAM_CNTL                    0x730C
++#       define EVERGREEN_DP_VID_STREAM_CNTL_ENABLE     (1 << 0)
++#       define EVERGREEN_DP_VID_STREAM_STATUS          (1 <<16)
++#define EVERGREEN_DP_STEER_FIFO                         0x7310
++#       define EVERGREEN_DP_STEER_FIFO_RESET           (1 << 0)
+ #define EVERGREEN_DP_SEC_CNTL                           0x7280
+ #       define EVERGREEN_DP_SEC_STREAM_ENABLE           (1 << 0)
+ #       define EVERGREEN_DP_SEC_ASP_ENABLE              (1 << 4)
+@@ -266,4 +301,15 @@
+ #       define EVERGREEN_DP_SEC_N_BASE_MULTIPLE(x)      (((x) & 0xf) << 24)
+ #       define EVERGREEN_DP_SEC_SS_EN                   (1 << 28)
+ 
++/*DCIO_UNIPHY block*/
++#define NI_DCIO_UNIPHY0_UNIPHY_TX_CONTROL1            (0x6600  -0x6600)
++#define NI_DCIO_UNIPHY1_UNIPHY_TX_CONTROL1            (0x6640  -0x6600)
++#define NI_DCIO_UNIPHY2_UNIPHY_TX_CONTROL1            (0x6680 - 0x6600)
++#define NI_DCIO_UNIPHY3_UNIPHY_TX_CONTROL1            (0x66C0 - 0x6600)
++#define NI_DCIO_UNIPHY4_UNIPHY_TX_CONTROL1            (0x6700 - 0x6600)
++#define NI_DCIO_UNIPHY5_UNIPHY_TX_CONTROL1            (0x6740 - 0x6600)
++
++#define NI_DCIO_UNIPHY0_PLL_CONTROL1                   0x6618
++#       define NI_DCIO_UNIPHY0_PLL_CONTROL1_ENABLE     (1 << 0)
++
+ #endif
+diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+index 9bc408c9f9f6..c4b4f298a283 100644
+--- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
++++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+@@ -62,10 +62,6 @@ bool radeon_has_atpx(void) {
+ 	return radeon_atpx_priv.atpx_detected;
+ }
+ 
+-bool radeon_has_atpx_dgpu_power_cntl(void) {
+-	return radeon_atpx_priv.atpx.functions.power_cntl;
+-}
+-
+ /**
+  * radeon_atpx_call - call an ATPX method
+  *
+@@ -145,6 +141,10 @@ static void radeon_atpx_parse_functions(struct radeon_atpx_functions *f, u32 mas
+  */
+ static int radeon_atpx_validate(struct radeon_atpx *atpx)
+ {
++	/* make sure required functions are enabled */
++	/* dGPU power control is required */
++	atpx->functions.power_cntl = true;
++
+ 	if (atpx->functions.px_params) {
+ 		union acpi_object *info;
+ 		struct atpx_px_params output;
+diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
+index 340f3f549f29..9cfc1c3e1965 100644
+--- a/drivers/gpu/drm/radeon/radeon_connectors.c
++++ b/drivers/gpu/drm/radeon/radeon_connectors.c
+@@ -1996,10 +1996,12 @@ radeon_add_atom_connector(struct drm_device *dev,
+ 						   rdev->mode_info.dither_property,
+ 						   RADEON_FMT_DITHER_DISABLE);
+ 
+-			if (radeon_audio != 0)
++			if (radeon_audio != 0) {
+ 				drm_object_attach_property(&radeon_connector->base.base,
+ 							   rdev->mode_info.audio_property,
+ 							   RADEON_AUDIO_AUTO);
++				radeon_connector->audio = RADEON_AUDIO_AUTO;
++			}
+ 			if (ASIC_IS_DCE5(rdev))
+ 				drm_object_attach_property(&radeon_connector->base.base,
+ 							   rdev->mode_info.output_csc_property,
+@@ -2124,6 +2126,7 @@ radeon_add_atom_connector(struct drm_device *dev,
+ 				drm_object_attach_property(&radeon_connector->base.base,
+ 							   rdev->mode_info.audio_property,
+ 							   RADEON_AUDIO_AUTO);
++				radeon_connector->audio = RADEON_AUDIO_AUTO;
+ 			}
+ 			if (connector_type == DRM_MODE_CONNECTOR_DVII) {
+ 				radeon_connector->dac_load_detect = true;
+@@ -2179,6 +2182,7 @@ radeon_add_atom_connector(struct drm_device *dev,
+ 				drm_object_attach_property(&radeon_connector->base.base,
+ 							   rdev->mode_info.audio_property,
+ 							   RADEON_AUDIO_AUTO);
++				radeon_connector->audio = RADEON_AUDIO_AUTO;
+ 			}
+ 			if (ASIC_IS_DCE5(rdev))
+ 				drm_object_attach_property(&radeon_connector->base.base,
+@@ -2231,6 +2235,7 @@ radeon_add_atom_connector(struct drm_device *dev,
+ 				drm_object_attach_property(&radeon_connector->base.base,
+ 							   rdev->mode_info.audio_property,
+ 							   RADEON_AUDIO_AUTO);
++				radeon_connector->audio = RADEON_AUDIO_AUTO;
+ 			}
+ 			if (ASIC_IS_DCE5(rdev))
+ 				drm_object_attach_property(&radeon_connector->base.base,
+diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
+index e2396336f9e8..4197ca1bb1e4 100644
+--- a/drivers/gpu/drm/radeon/radeon_device.c
++++ b/drivers/gpu/drm/radeon/radeon_device.c
+@@ -103,12 +103,6 @@ static const char radeon_family_name[][16] = {
+ 	"LAST",
+ };
+ 
+-#if defined(CONFIG_VGA_SWITCHEROO)
+-bool radeon_has_atpx_dgpu_power_cntl(void);
+-#else
+-static inline bool radeon_has_atpx_dgpu_power_cntl(void) { return false; }
+-#endif
+-
+ #define RADEON_PX_QUIRK_DISABLE_PX  (1 << 0)
+ #define RADEON_PX_QUIRK_LONG_WAKEUP (1 << 1)
+ 
+@@ -1439,7 +1433,7 @@ int radeon_device_init(struct radeon_device *rdev,
+ 	 * ignore it */
+ 	vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
+ 
+-	if ((rdev->flags & RADEON_IS_PX) && radeon_has_atpx_dgpu_power_cntl())
++	if (rdev->flags & RADEON_IS_PX)
+ 		runtime = true;
+ 	vga_switcheroo_register_client(rdev->pdev, &radeon_switcheroo_ops, runtime);
+ 	if (runtime)
+diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
+index e06ac546a90f..f342aad79cc6 100644
+--- a/drivers/gpu/drm/radeon/radeon_ttm.c
++++ b/drivers/gpu/drm/radeon/radeon_ttm.c
+@@ -235,6 +235,8 @@ static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp)
+ {
+ 	struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo);
+ 
++	if (radeon_ttm_tt_has_userptr(bo->ttm))
++		return -EPERM;
+ 	return drm_vma_node_verify_access(&rbo->gem_base.vma_node, filp);
+ }
+ 
+diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
+index 7285adb27099..caa73de584a5 100644
+--- a/drivers/gpu/drm/radeon/si_dpm.c
++++ b/drivers/gpu/drm/radeon/si_dpm.c
+@@ -2931,6 +2931,7 @@ static struct si_dpm_quirk si_dpm_quirk_list[] = {
+ 	{ PCI_VENDOR_ID_ATI, 0x6811, 0x1462, 0x2015, 0, 120000 },
+ 	{ PCI_VENDOR_ID_ATI, 0x6811, 0x1043, 0x2015, 0, 120000 },
+ 	{ PCI_VENDOR_ID_ATI, 0x6811, 0x148c, 0x2015, 0, 120000 },
++	{ PCI_VENDOR_ID_ATI, 0x6810, 0x1682, 0x9275, 0, 120000 },
+ 	{ 0, 0, 0, 0 },
+ };
+ 
+diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
+index 4cbf26555093..e3daafa1be13 100644
+--- a/drivers/gpu/drm/ttm/ttm_bo.c
++++ b/drivers/gpu/drm/ttm/ttm_bo.c
+@@ -230,22 +230,13 @@ EXPORT_SYMBOL(ttm_bo_del_sub_from_lru);
+ 
+ void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo)
+ {
+-	struct ttm_bo_device *bdev = bo->bdev;
+-	struct ttm_mem_type_manager *man;
++	int put_count = 0;
+ 
+ 	lockdep_assert_held(&bo->resv->lock.base);
+ 
+-	if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) {
+-		list_del_init(&bo->swap);
+-		list_del_init(&bo->lru);
+-
+-	} else {
+-		if (bo->ttm && !(bo->ttm->page_flags & TTM_PAGE_FLAG_SG))
+-			list_move_tail(&bo->swap, &bo->glob->swap_lru);
+-
+-		man = &bdev->man[bo->mem.mem_type];
+-		list_move_tail(&bo->lru, &man->lru);
+-	}
++	put_count = ttm_bo_del_from_lru(bo);
++	ttm_bo_list_ref_sub(bo, put_count, true);
++	ttm_bo_add_to_lru(bo);
+ }
+ EXPORT_SYMBOL(ttm_bo_move_to_lru_tail);
+ 
+diff --git a/drivers/hwtracing/stm/Kconfig b/drivers/hwtracing/stm/Kconfig
+index 83e9f591a54b..e7a348807f0c 100644
+--- a/drivers/hwtracing/stm/Kconfig
++++ b/drivers/hwtracing/stm/Kconfig
+@@ -1,6 +1,7 @@
+ config STM
+ 	tristate "System Trace Module devices"
+ 	select CONFIGFS_FS
++	select SRCU
+ 	help
+ 	  A System Trace Module (STM) is a device exporting data in System
+ 	  Trace Protocol (STP) format as defined by MIPI STP standards.
+diff --git a/drivers/i2c/busses/i2c-cpm.c b/drivers/i2c/busses/i2c-cpm.c
+index 714bdc837769..b167ab25310a 100644
+--- a/drivers/i2c/busses/i2c-cpm.c
++++ b/drivers/i2c/busses/i2c-cpm.c
+@@ -116,8 +116,8 @@ struct cpm_i2c {
+ 	cbd_t __iomem *rbase;
+ 	u_char *txbuf[CPM_MAXBD];
+ 	u_char *rxbuf[CPM_MAXBD];
+-	u32 txdma[CPM_MAXBD];
+-	u32 rxdma[CPM_MAXBD];
++	dma_addr_t txdma[CPM_MAXBD];
++	dma_addr_t rxdma[CPM_MAXBD];
+ };
+ 
+ static irqreturn_t cpm_i2c_interrupt(int irq, void *dev_id)
+diff --git a/drivers/i2c/busses/i2c-exynos5.c b/drivers/i2c/busses/i2c-exynos5.c
+index b29c7500461a..f54ece8fce78 100644
+--- a/drivers/i2c/busses/i2c-exynos5.c
++++ b/drivers/i2c/busses/i2c-exynos5.c
+@@ -671,7 +671,9 @@ static int exynos5_i2c_xfer(struct i2c_adapter *adap,
+ 		return -EIO;
+ 	}
+ 
+-	clk_prepare_enable(i2c->clk);
++	ret = clk_enable(i2c->clk);
++	if (ret)
++		return ret;
+ 
+ 	for (i = 0; i < num; i++, msgs++) {
+ 		stop = (i == num - 1);
+@@ -695,7 +697,7 @@ static int exynos5_i2c_xfer(struct i2c_adapter *adap,
+ 	}
+ 
+  out:
+-	clk_disable_unprepare(i2c->clk);
++	clk_disable(i2c->clk);
+ 	return ret;
+ }
+ 
+@@ -747,7 +749,9 @@ static int exynos5_i2c_probe(struct platform_device *pdev)
+ 		return -ENOENT;
+ 	}
+ 
+-	clk_prepare_enable(i2c->clk);
++	ret = clk_prepare_enable(i2c->clk);
++	if (ret)
++		return ret;
+ 
+ 	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ 	i2c->regs = devm_ioremap_resource(&pdev->dev, mem);
+@@ -799,6 +803,10 @@ static int exynos5_i2c_probe(struct platform_device *pdev)
+ 
+ 	platform_set_drvdata(pdev, i2c);
+ 
++	clk_disable(i2c->clk);
++
++	return 0;
++
+  err_clk:
+ 	clk_disable_unprepare(i2c->clk);
+ 	return ret;
+@@ -810,6 +818,8 @@ static int exynos5_i2c_remove(struct platform_device *pdev)
+ 
+ 	i2c_del_adapter(&i2c->adap);
+ 
++	clk_unprepare(i2c->clk);
++
+ 	return 0;
+ }
+ 
+@@ -821,6 +831,8 @@ static int exynos5_i2c_suspend_noirq(struct device *dev)
+ 
+ 	i2c->suspended = 1;
+ 
++	clk_unprepare(i2c->clk);
++
+ 	return 0;
+ }
+ 
+@@ -830,7 +842,9 @@ static int exynos5_i2c_resume_noirq(struct device *dev)
+ 	struct exynos5_i2c *i2c = platform_get_drvdata(pdev);
+ 	int ret = 0;
+ 
+-	clk_prepare_enable(i2c->clk);
++	ret = clk_prepare_enable(i2c->clk);
++	if (ret)
++		return ret;
+ 
+ 	ret = exynos5_hsi2c_clock_setup(i2c);
+ 	if (ret) {
+@@ -839,7 +853,7 @@ static int exynos5_i2c_resume_noirq(struct device *dev)
+ 	}
+ 
+ 	exynos5_i2c_init(i2c);
+-	clk_disable_unprepare(i2c->clk);
++	clk_disable(i2c->clk);
+ 	i2c->suspended = 0;
+ 
+ 	return 0;
+diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
+index 53343ffbff7a..1b109b2a235e 100644
+--- a/drivers/infiniband/core/cache.c
++++ b/drivers/infiniband/core/cache.c
+@@ -691,7 +691,8 @@ void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port,
+ 			      NULL);
+ 
+ 		/* Coudn't find default GID location */
+-		WARN_ON(ix < 0);
++		if (WARN_ON(ix < 0))
++			goto release;
+ 
+ 		zattr_type.gid_type = gid_type;
+ 
+diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c
+index 6b4e8a008bc0..564adf3116e8 100644
+--- a/drivers/infiniband/core/ucm.c
++++ b/drivers/infiniband/core/ucm.c
+@@ -48,6 +48,7 @@
+ 
+ #include <asm/uaccess.h>
+ 
++#include <rdma/ib.h>
+ #include <rdma/ib_cm.h>
+ #include <rdma/ib_user_cm.h>
+ #include <rdma/ib_marshall.h>
+@@ -1103,6 +1104,9 @@ static ssize_t ib_ucm_write(struct file *filp, const char __user *buf,
+ 	struct ib_ucm_cmd_hdr hdr;
+ 	ssize_t result;
+ 
++	if (WARN_ON_ONCE(!ib_safe_file_access(filp)))
++		return -EACCES;
++
+ 	if (len < sizeof(hdr))
+ 		return -EINVAL;
+ 
+diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
+index 8b5a934e1133..886f61ea6cc7 100644
+--- a/drivers/infiniband/core/ucma.c
++++ b/drivers/infiniband/core/ucma.c
+@@ -1574,6 +1574,9 @@ static ssize_t ucma_write(struct file *filp, const char __user *buf,
+ 	struct rdma_ucm_cmd_hdr hdr;
+ 	ssize_t ret;
+ 
++	if (WARN_ON_ONCE(!ib_safe_file_access(filp)))
++		return -EACCES;
++
+ 	if (len < sizeof(hdr))
+ 		return -EINVAL;
+ 
+diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
+index 39680aed99dd..d3fb8aa46c59 100644
+--- a/drivers/infiniband/core/uverbs_main.c
++++ b/drivers/infiniband/core/uverbs_main.c
+@@ -48,6 +48,8 @@
+ 
+ #include <asm/uaccess.h>
+ 
++#include <rdma/ib.h>
++
+ #include "uverbs.h"
+ 
+ MODULE_AUTHOR("Roland Dreier");
+@@ -693,6 +695,9 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
+ 	int srcu_key;
+ 	ssize_t ret;
+ 
++	if (WARN_ON_ONCE(!ib_safe_file_access(filp)))
++		return -EACCES;
++
+ 	if (count < sizeof hdr)
+ 		return -EINVAL;
+ 
+diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
+index 03c418ccbc98..ed9cefa1f6f1 100644
+--- a/drivers/infiniband/hw/mlx5/main.c
++++ b/drivers/infiniband/hw/mlx5/main.c
+@@ -517,7 +517,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
+ 		     sizeof(struct mlx5_wqe_ctrl_seg)) /
+ 		     sizeof(struct mlx5_wqe_data_seg);
+ 	props->max_sge = min(max_rq_sg, max_sq_sg);
+-	props->max_sge_rd = props->max_sge;
++	props->max_sge_rd	   = MLX5_MAX_SGE_RD;
+ 	props->max_cq		   = 1 << MLX5_CAP_GEN(mdev, log_max_cq);
+ 	props->max_cqe = (1 << MLX5_CAP_GEN(mdev, log_max_cq_sz)) - 1;
+ 	props->max_mr		   = 1 << MLX5_CAP_GEN(mdev, log_max_mkey);
+diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
+index e449e394963f..24f4a782e0f4 100644
+--- a/drivers/infiniband/hw/qib/qib_file_ops.c
++++ b/drivers/infiniband/hw/qib/qib_file_ops.c
+@@ -45,6 +45,8 @@
+ #include <linux/export.h>
+ #include <linux/uio.h>
+ 
++#include <rdma/ib.h>
++
+ #include "qib.h"
+ #include "qib_common.h"
+ #include "qib_user_sdma.h"
+@@ -2067,6 +2069,9 @@ static ssize_t qib_write(struct file *fp, const char __user *data,
+ 	ssize_t ret = 0;
+ 	void *dest;
+ 
++	if (WARN_ON_ONCE(!ib_safe_file_access(fp)))
++		return -EACCES;
++
+ 	if (count < sizeof(cmd.type)) {
+ 		ret = -EINVAL;
+ 		goto bail;
+diff --git a/drivers/input/misc/pmic8xxx-pwrkey.c b/drivers/input/misc/pmic8xxx-pwrkey.c
+index 3f02e0e03d12..67aab86048ad 100644
+--- a/drivers/input/misc/pmic8xxx-pwrkey.c
++++ b/drivers/input/misc/pmic8xxx-pwrkey.c
+@@ -353,7 +353,8 @@ static int pmic8xxx_pwrkey_probe(struct platform_device *pdev)
+ 	if (of_property_read_u32(pdev->dev.of_node, "debounce", &kpd_delay))
+ 		kpd_delay = 15625;
+ 
+-	if (kpd_delay > 62500 || kpd_delay == 0) {
++	/* Valid range of pwr key trigger delay is 1/64 sec to 2 seconds. */
++	if (kpd_delay > USEC_PER_SEC * 2 || kpd_delay < USEC_PER_SEC / 64) {
+ 		dev_err(&pdev->dev, "invalid power key trigger delay\n");
+ 		return -EINVAL;
+ 	}
+@@ -385,8 +386,8 @@ static int pmic8xxx_pwrkey_probe(struct platform_device *pdev)
+ 	pwr->name = "pmic8xxx_pwrkey";
+ 	pwr->phys = "pmic8xxx_pwrkey/input0";
+ 
+-	delay = (kpd_delay << 10) / USEC_PER_SEC;
+-	delay = 1 + ilog2(delay);
++	delay = (kpd_delay << 6) / USEC_PER_SEC;
++	delay = ilog2(delay);
+ 
+ 	err = regmap_read(regmap, PON_CNTL_1, &pon_cntl);
+ 	if (err < 0) {
+diff --git a/drivers/input/tablet/gtco.c b/drivers/input/tablet/gtco.c
+index 3a7f3a4a4396..7c18249d6c8e 100644
+--- a/drivers/input/tablet/gtco.c
++++ b/drivers/input/tablet/gtco.c
+@@ -858,6 +858,14 @@ static int gtco_probe(struct usb_interface *usbinterface,
+ 		goto err_free_buf;
+ 	}
+ 
++	/* Sanity check that a device has an endpoint */
++	if (usbinterface->altsetting[0].desc.bNumEndpoints < 1) {
++		dev_err(&usbinterface->dev,
++			"Invalid number of endpoints\n");
++		error = -EINVAL;
++		goto err_free_urb;
++	}
++
+ 	/*
+ 	 * The endpoint is always altsetting 0, we know this since we know
+ 	 * this device only has one interrupt endpoint
+@@ -879,7 +887,7 @@ static int gtco_probe(struct usb_interface *usbinterface,
+ 	 * HID report descriptor
+ 	 */
+ 	if (usb_get_extra_descriptor(usbinterface->cur_altsetting,
+-				     HID_DEVICE_TYPE, &hid_desc) != 0){
++				     HID_DEVICE_TYPE, &hid_desc) != 0) {
+ 		dev_err(&usbinterface->dev,
+ 			"Can't retrieve exta USB descriptor to get hid report descriptor length\n");
+ 		error = -EIO;
+diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
+index 374c129219ef..5efadad4615b 100644
+--- a/drivers/iommu/amd_iommu.c
++++ b/drivers/iommu/amd_iommu.c
+@@ -92,6 +92,7 @@ struct iommu_dev_data {
+ 	struct list_head dev_data_list;	  /* For global dev_data_list */
+ 	struct protection_domain *domain; /* Domain the device is bound to */
+ 	u16 devid;			  /* PCI Device ID */
++	u16 alias;			  /* Alias Device ID */
+ 	bool iommu_v2;			  /* Device can make use of IOMMUv2 */
+ 	bool passthrough;		  /* Device is identity mapped */
+ 	struct {
+@@ -166,6 +167,13 @@ static struct protection_domain *to_pdomain(struct iommu_domain *dom)
+ 	return container_of(dom, struct protection_domain, domain);
+ }
+ 
++static inline u16 get_device_id(struct device *dev)
++{
++	struct pci_dev *pdev = to_pci_dev(dev);
++
++	return PCI_DEVID(pdev->bus->number, pdev->devfn);
++}
++
+ static struct iommu_dev_data *alloc_dev_data(u16 devid)
+ {
+ 	struct iommu_dev_data *dev_data;
+@@ -203,6 +211,68 @@ out_unlock:
+ 	return dev_data;
+ }
+ 
++static int __last_alias(struct pci_dev *pdev, u16 alias, void *data)
++{
++	*(u16 *)data = alias;
++	return 0;
++}
++
++static u16 get_alias(struct device *dev)
++{
++	struct pci_dev *pdev = to_pci_dev(dev);
++	u16 devid, ivrs_alias, pci_alias;
++
++	devid = get_device_id(dev);
++	ivrs_alias = amd_iommu_alias_table[devid];
++	pci_for_each_dma_alias(pdev, __last_alias, &pci_alias);
++
++	if (ivrs_alias == pci_alias)
++		return ivrs_alias;
++
++	/*
++	 * DMA alias showdown
++	 *
++	 * The IVRS is fairly reliable in telling us about aliases, but it
++	 * can't know about every screwy device.  If we don't have an IVRS
++	 * reported alias, use the PCI reported alias.  In that case we may
++	 * still need to initialize the rlookup and dev_table entries if the
++	 * alias is to a non-existent device.
++	 */
++	if (ivrs_alias == devid) {
++		if (!amd_iommu_rlookup_table[pci_alias]) {
++			amd_iommu_rlookup_table[pci_alias] =
++				amd_iommu_rlookup_table[devid];
++			memcpy(amd_iommu_dev_table[pci_alias].data,
++			       amd_iommu_dev_table[devid].data,
++			       sizeof(amd_iommu_dev_table[pci_alias].data));
++		}
++
++		return pci_alias;
++	}
++
++	pr_info("AMD-Vi: Using IVRS reported alias %02x:%02x.%d "
++		"for device %s[%04x:%04x], kernel reported alias "
++		"%02x:%02x.%d\n", PCI_BUS_NUM(ivrs_alias), PCI_SLOT(ivrs_alias),
++		PCI_FUNC(ivrs_alias), dev_name(dev), pdev->vendor, pdev->device,
++		PCI_BUS_NUM(pci_alias), PCI_SLOT(pci_alias),
++		PCI_FUNC(pci_alias));
++
++	/*
++	 * If we don't have a PCI DMA alias and the IVRS alias is on the same
++	 * bus, then the IVRS table may know about a quirk that we don't.
++	 */
++	if (pci_alias == devid &&
++	    PCI_BUS_NUM(ivrs_alias) == pdev->bus->number) {
++		pdev->dev_flags |= PCI_DEV_FLAGS_DMA_ALIAS_DEVFN;
++		pdev->dma_alias_devfn = ivrs_alias & 0xff;
++		pr_info("AMD-Vi: Added PCI DMA alias %02x.%d for %s\n",
++			PCI_SLOT(ivrs_alias), PCI_FUNC(ivrs_alias),
++			dev_name(dev));
++	}
++
++	return ivrs_alias;
++}
++
+ static struct iommu_dev_data *find_dev_data(u16 devid)
+ {
+ 	struct iommu_dev_data *dev_data;
+@@ -215,13 +285,6 @@ static struct iommu_dev_data *find_dev_data(u16 devid)
+ 	return dev_data;
+ }
+ 
+-static inline u16 get_device_id(struct device *dev)
+-{
+-	struct pci_dev *pdev = to_pci_dev(dev);
+-
+-	return PCI_DEVID(pdev->bus->number, pdev->devfn);
+-}
+-
+ static struct iommu_dev_data *get_dev_data(struct device *dev)
+ {
+ 	return dev->archdata.iommu;
+@@ -349,6 +412,8 @@ static int iommu_init_device(struct device *dev)
+ 	if (!dev_data)
+ 		return -ENOMEM;
+ 
++	dev_data->alias = get_alias(dev);
++
+ 	if (pci_iommuv2_capable(pdev)) {
+ 		struct amd_iommu *iommu;
+ 
+@@ -369,7 +434,7 @@ static void iommu_ignore_device(struct device *dev)
+ 	u16 devid, alias;
+ 
+ 	devid = get_device_id(dev);
+-	alias = amd_iommu_alias_table[devid];
++	alias = get_alias(dev);
+ 
+ 	memset(&amd_iommu_dev_table[devid], 0, sizeof(struct dev_table_entry));
+ 	memset(&amd_iommu_dev_table[alias], 0, sizeof(struct dev_table_entry));
+@@ -1061,7 +1126,7 @@ static int device_flush_dte(struct iommu_dev_data *dev_data)
+ 	int ret;
+ 
+ 	iommu = amd_iommu_rlookup_table[dev_data->devid];
+-	alias = amd_iommu_alias_table[dev_data->devid];
++	alias = dev_data->alias;
+ 
+ 	ret = iommu_flush_dte(iommu, dev_data->devid);
+ 	if (!ret && alias != dev_data->devid)
+@@ -2039,7 +2104,7 @@ static void do_attach(struct iommu_dev_data *dev_data,
+ 	bool ats;
+ 
+ 	iommu = amd_iommu_rlookup_table[dev_data->devid];
+-	alias = amd_iommu_alias_table[dev_data->devid];
++	alias = dev_data->alias;
+ 	ats   = dev_data->ats.enabled;
+ 
+ 	/* Update data structures */
+@@ -2073,7 +2138,7 @@ static void do_detach(struct iommu_dev_data *dev_data)
+ 		return;
+ 
+ 	iommu = amd_iommu_rlookup_table[dev_data->devid];
+-	alias = amd_iommu_alias_table[dev_data->devid];
++	alias = dev_data->alias;
+ 
+ 	/* decrease reference counters */
+ 	dev_data->domain->dev_iommu[iommu->index] -= 1;
+diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
+index 72d6182666cb..58f2fe687a24 100644
+--- a/drivers/iommu/dma-iommu.c
++++ b/drivers/iommu/dma-iommu.c
+@@ -403,7 +403,7 @@ static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
+ 		unsigned int s_length = sg_dma_len(s);
+ 		unsigned int s_dma_len = s->length;
+ 
+-		s->offset = s_offset;
++		s->offset += s_offset;
+ 		s->length = s_length;
+ 		sg_dma_address(s) = dma_addr + s_offset;
+ 		dma_addr += s_dma_len;
+@@ -422,7 +422,7 @@ static void __invalidate_sg(struct scatterlist *sg, int nents)
+ 
+ 	for_each_sg(sg, s, nents, i) {
+ 		if (sg_dma_address(s) != DMA_ERROR_CODE)
+-			s->offset = sg_dma_address(s);
++			s->offset += sg_dma_address(s);
+ 		if (sg_dma_len(s))
+ 			s->length = sg_dma_len(s);
+ 		sg_dma_address(s) = DMA_ERROR_CODE;
+diff --git a/drivers/irqchip/irq-mxs.c b/drivers/irqchip/irq-mxs.c
+index efe50845939d..17304705f2cf 100644
+--- a/drivers/irqchip/irq-mxs.c
++++ b/drivers/irqchip/irq-mxs.c
+@@ -183,7 +183,7 @@ static void __iomem * __init icoll_init_iobase(struct device_node *np)
+ 	void __iomem *icoll_base;
+ 
+ 	icoll_base = of_io_request_and_map(np, 0, np->name);
+-	if (!icoll_base)
++	if (IS_ERR(icoll_base))
+ 		panic("%s: unable to map resource", np->full_name);
+ 	return icoll_base;
+ }
+diff --git a/drivers/irqchip/irq-sunxi-nmi.c b/drivers/irqchip/irq-sunxi-nmi.c
+index 0820f67cc9a7..668730c5cb66 100644
+--- a/drivers/irqchip/irq-sunxi-nmi.c
++++ b/drivers/irqchip/irq-sunxi-nmi.c
+@@ -160,9 +160,9 @@ static int __init sunxi_sc_nmi_irq_init(struct device_node *node,
+ 
+ 	gc = irq_get_domain_generic_chip(domain, 0);
+ 	gc->reg_base = of_io_request_and_map(node, 0, of_node_full_name(node));
+-	if (!gc->reg_base) {
++	if (IS_ERR(gc->reg_base)) {
+ 		pr_err("unable to map resource\n");
+-		ret = -ENOMEM;
++		ret = PTR_ERR(gc->reg_base);
+ 		goto fail_irqd_remove;
+ 	}
+ 
+diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
+index 27f2ef300f8b..3970cda10080 100644
+--- a/drivers/md/dm-cache-metadata.c
++++ b/drivers/md/dm-cache-metadata.c
+@@ -867,39 +867,55 @@ static int blocks_are_unmapped_or_clean(struct dm_cache_metadata *cmd,
+ 	return 0;
+ }
+ 
+-#define WRITE_LOCK(cmd)	\
+-	down_write(&cmd->root_lock); \
+-	if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) { \
+-		up_write(&cmd->root_lock); \
+-		return -EINVAL; \
++static bool cmd_write_lock(struct dm_cache_metadata *cmd)
++{
++	down_write(&cmd->root_lock);
++	if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) {
++		up_write(&cmd->root_lock);
++		return false;
+ 	}
++	return true;
++}
+ 
+-#define WRITE_LOCK_VOID(cmd) \
+-	down_write(&cmd->root_lock); \
+-	if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) { \
+-		up_write(&cmd->root_lock); \
+-		return; \
+-	}
++#define WRITE_LOCK(cmd)				\
++	do {					\
++		if (!cmd_write_lock((cmd)))	\
++			return -EINVAL;		\
++	} while(0)
++
++#define WRITE_LOCK_VOID(cmd)			\
++	do {					\
++		if (!cmd_write_lock((cmd)))	\
++			return;			\
++	} while(0)
+ 
+ #define WRITE_UNLOCK(cmd) \
+-	up_write(&cmd->root_lock)
++	up_write(&(cmd)->root_lock)
+ 
+-#define READ_LOCK(cmd) \
+-	down_read(&cmd->root_lock); \
+-	if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) { \
+-		up_read(&cmd->root_lock); \
+-		return -EINVAL; \
++static bool cmd_read_lock(struct dm_cache_metadata *cmd)
++{
++	down_read(&cmd->root_lock);
++	if (cmd->fail_io) {
++		up_read(&cmd->root_lock);
++		return false;
+ 	}
++	return true;
++}
+ 
+-#define READ_LOCK_VOID(cmd)	\
+-	down_read(&cmd->root_lock); \
+-	if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) { \
+-		up_read(&cmd->root_lock); \
+-		return; \
+-	}
++#define READ_LOCK(cmd)				\
++	do {					\
++		if (!cmd_read_lock((cmd)))	\
++			return -EINVAL;		\
++	} while(0)
++
++#define READ_LOCK_VOID(cmd)			\
++	do {					\
++		if (!cmd_read_lock((cmd)))	\
++			return;			\
++	} while(0)
+ 
+ #define READ_UNLOCK(cmd) \
+-	up_read(&cmd->root_lock)
++	up_read(&(cmd)->root_lock)
+ 
+ int dm_cache_resize(struct dm_cache_metadata *cmd, dm_cblock_t new_cache_size)
+ {
+diff --git a/drivers/media/usb/usbvision/usbvision-video.c b/drivers/media/usb/usbvision/usbvision-video.c
+index de9ff3bb8edd..6996ab8db108 100644
+--- a/drivers/media/usb/usbvision/usbvision-video.c
++++ b/drivers/media/usb/usbvision/usbvision-video.c
+@@ -1461,13 +1461,6 @@ static int usbvision_probe(struct usb_interface *intf,
+ 	printk(KERN_INFO "%s: %s found\n", __func__,
+ 				usbvision_device_data[model].model_string);
+ 
+-	/*
+-	 * this is a security check.
+-	 * an exploit using an incorrect bInterfaceNumber is known
+-	 */
+-	if (ifnum >= USB_MAXINTERFACES || !dev->actconfig->interface[ifnum])
+-		return -ENODEV;
+-
+ 	if (usbvision_device_data[model].interface >= 0)
+ 		interface = &dev->actconfig->interface[usbvision_device_data[model].interface]->altsetting[0];
+ 	else if (ifnum < dev->actconfig->desc.bNumInterfaces)
+diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
+index ff8953ae52d1..d7d7c52a3060 100644
+--- a/drivers/media/v4l2-core/videobuf2-core.c
++++ b/drivers/media/v4l2-core/videobuf2-core.c
+@@ -1643,7 +1643,7 @@ static int __vb2_wait_for_done_vb(struct vb2_queue *q, int nonblocking)
+  * Will sleep if required for nonblocking == false.
+  */
+ static int __vb2_get_done_vb(struct vb2_queue *q, struct vb2_buffer **vb,
+-				int nonblocking)
++			     void *pb, int nonblocking)
+ {
+ 	unsigned long flags;
+ 	int ret;
+@@ -1664,10 +1664,10 @@ static int __vb2_get_done_vb(struct vb2_queue *q, struct vb2_buffer **vb,
+ 	/*
+ 	 * Only remove the buffer from done_list if v4l2_buffer can handle all
+ 	 * the planes.
+-	 * Verifying planes is NOT necessary since it already has been checked
+-	 * before the buffer is queued/prepared. So it can never fail.
+ 	 */
+-	list_del(&(*vb)->done_entry);
++	ret = call_bufop(q, verify_planes_array, *vb, pb);
++	if (!ret)
++		list_del(&(*vb)->done_entry);
+ 	spin_unlock_irqrestore(&q->done_lock, flags);
+ 
+ 	return ret;
+@@ -1746,7 +1746,7 @@ int vb2_core_dqbuf(struct vb2_queue *q, unsigned int *pindex, void *pb,
+ 	struct vb2_buffer *vb = NULL;
+ 	int ret;
+ 
+-	ret = __vb2_get_done_vb(q, &vb, nonblocking);
++	ret = __vb2_get_done_vb(q, &vb, pb, nonblocking);
+ 	if (ret < 0)
+ 		return ret;
+ 
+@@ -2293,6 +2293,16 @@ unsigned int vb2_core_poll(struct vb2_queue *q, struct file *file,
+ 		return POLLERR;
+ 
+ 	/*
++	 * If this quirk is set and QBUF hasn't been called yet then
++	 * return POLLERR as well. This only affects capture queues, output
++	 * queues will always initialize waiting_for_buffers to false.
++	 * This quirk is set by V4L2 for backwards compatibility reasons.
++	 */
++	if (q->quirk_poll_must_check_waiting_for_buffers &&
++	    q->waiting_for_buffers && (req_events & (POLLIN | POLLRDNORM)))
++		return POLLERR;
++
++	/*
+ 	 * For output streams you can call write() as long as there are fewer
+ 	 * buffers queued than there are buffers available.
+ 	 */
+diff --git a/drivers/media/v4l2-core/videobuf2-memops.c b/drivers/media/v4l2-core/videobuf2-memops.c
+index dbec5923fcf0..3c3b517f1d1c 100644
+--- a/drivers/media/v4l2-core/videobuf2-memops.c
++++ b/drivers/media/v4l2-core/videobuf2-memops.c
+@@ -49,7 +49,7 @@ struct frame_vector *vb2_create_framevec(unsigned long start,
+ 	vec = frame_vector_create(nr);
+ 	if (!vec)
+ 		return ERR_PTR(-ENOMEM);
+-	ret = get_vaddr_frames(start, nr, write, 1, vec);
++	ret = get_vaddr_frames(start & PAGE_MASK, nr, write, true, vec);
+ 	if (ret < 0)
+ 		goto out_destroy;
+ 	/* We accept only complete set of PFNs */
+diff --git a/drivers/media/v4l2-core/videobuf2-v4l2.c b/drivers/media/v4l2-core/videobuf2-v4l2.c
+index 91f552124050..7f366f1b0377 100644
+--- a/drivers/media/v4l2-core/videobuf2-v4l2.c
++++ b/drivers/media/v4l2-core/videobuf2-v4l2.c
+@@ -74,6 +74,11 @@ static int __verify_planes_array(struct vb2_buffer *vb, const struct v4l2_buffer
+ 	return 0;
+ }
+ 
++static int __verify_planes_array_core(struct vb2_buffer *vb, const void *pb)
++{
++	return __verify_planes_array(vb, pb);
++}
++
+ /**
+  * __verify_length() - Verify that the bytesused value for each plane fits in
+  * the plane length and that the data offset doesn't exceed the bytesused value.
+@@ -437,6 +442,7 @@ static int __fill_vb2_buffer(struct vb2_buffer *vb,
+ }
+ 
+ static const struct vb2_buf_ops v4l2_buf_ops = {
++	.verify_planes_array	= __verify_planes_array_core,
+ 	.fill_user_buffer	= __fill_v4l2_buffer,
+ 	.fill_vb2_buffer	= __fill_vb2_buffer,
+ 	.copy_timestamp		= __copy_timestamp,
+@@ -765,6 +771,12 @@ int vb2_queue_init(struct vb2_queue *q)
+ 	q->is_output = V4L2_TYPE_IS_OUTPUT(q->type);
+ 	q->copy_timestamp = (q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK)
+ 			== V4L2_BUF_FLAG_TIMESTAMP_COPY;
++	/*
++	 * For compatibility with vb1: if QBUF hasn't been called yet, then
++	 * return POLLERR as well. This only affects capture queues, output
++	 * queues will always initialize waiting_for_buffers to false.
++	 */
++	q->quirk_poll_must_check_waiting_for_buffers = true;
+ 
+ 	return vb2_core_queue_init(q);
+ }
+@@ -818,14 +830,6 @@ unsigned int vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait)
+ 			poll_wait(file, &fh->wait, wait);
+ 	}
+ 
+-	/*
+-	 * For compatibility with vb1: if QBUF hasn't been called yet, then
+-	 * return POLLERR as well. This only affects capture queues, output
+-	 * queues will always initialize waiting_for_buffers to false.
+-	 */
+-	if (q->waiting_for_buffers && (req_events & (POLLIN | POLLRDNORM)))
+-		return POLLERR;
+-
+ 	return res | vb2_core_poll(q, file, wait);
+ }
+ EXPORT_SYMBOL_GPL(vb2_poll);
+diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
+index 054fc10cb3b6..b22c03264270 100644
+--- a/drivers/misc/Kconfig
++++ b/drivers/misc/Kconfig
+@@ -440,7 +440,7 @@ config ARM_CHARLCD
+ 	  still useful.
+ 
+ config BMP085
+-	bool
++	tristate
+ 	depends on SYSFS
+ 
+ config BMP085_I2C
+diff --git a/drivers/misc/ad525x_dpot.c b/drivers/misc/ad525x_dpot.c
+index 15e88078ba1e..f1a0b99f5a9a 100644
+--- a/drivers/misc/ad525x_dpot.c
++++ b/drivers/misc/ad525x_dpot.c
+@@ -216,7 +216,7 @@ static s32 dpot_read_i2c(struct dpot_data *dpot, u8 reg)
+ 			 */
+ 			value = swab16(value);
+ 
+-			if (dpot->uid == DPOT_UID(AD5271_ID))
++			if (dpot->uid == DPOT_UID(AD5274_ID))
+ 				value = value >> 2;
+ 		return value;
+ 	default:
+diff --git a/drivers/misc/cxl/irq.c b/drivers/misc/cxl/irq.c
+index 09a406058c46..efbb6945eb18 100644
+--- a/drivers/misc/cxl/irq.c
++++ b/drivers/misc/cxl/irq.c
+@@ -288,7 +288,6 @@ unsigned int cxl_map_irq(struct cxl *adapter, irq_hw_number_t hwirq,
+ void cxl_unmap_irq(unsigned int virq, void *cookie)
+ {
+ 	free_irq(virq, cookie);
+-	irq_dispose_mapping(virq);
+ }
+ 
+ static int cxl_register_one_irq(struct cxl *adapter,
+diff --git a/drivers/misc/mic/scif/scif_rma.c b/drivers/misc/mic/scif/scif_rma.c
+index 8310b4dbff06..6a451bd65bf3 100644
+--- a/drivers/misc/mic/scif/scif_rma.c
++++ b/drivers/misc/mic/scif/scif_rma.c
+@@ -1511,7 +1511,7 @@ off_t scif_register_pinned_pages(scif_epd_t epd,
+ 	if ((map_flags & SCIF_MAP_FIXED) &&
+ 	    ((ALIGN(offset, PAGE_SIZE) != offset) ||
+ 	    (offset < 0) ||
+-	    (offset + (off_t)len < offset)))
++	    (len > LONG_MAX - offset)))
+ 		return -EINVAL;
+ 
+ 	might_sleep();
+@@ -1614,7 +1614,7 @@ off_t scif_register(scif_epd_t epd, void *addr, size_t len, off_t offset,
+ 	if ((map_flags & SCIF_MAP_FIXED) &&
+ 	    ((ALIGN(offset, PAGE_SIZE) != offset) ||
+ 	    (offset < 0) ||
+-	    (offset + (off_t)len < offset)))
++	    (len > LONG_MAX - offset)))
+ 		return -EINVAL;
+ 
+ 	/* Unsupported protection requested */
+@@ -1732,7 +1732,8 @@ scif_unregister(scif_epd_t epd, off_t offset, size_t len)
+ 
+ 	/* Offset is not page aligned or offset+len wraps around */
+ 	if ((ALIGN(offset, PAGE_SIZE) != offset) ||
+-	    (offset + (off_t)len < offset))
++	    (offset < 0) ||
++	    (len > LONG_MAX - offset))
+ 		return -EINVAL;
+ 
+ 	err = scif_verify_epd(ep);
+diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
+index 5fbffdb6b854..c6f36f3ca5d2 100644
+--- a/drivers/mmc/card/block.c
++++ b/drivers/mmc/card/block.c
+@@ -86,7 +86,6 @@ static int max_devices;
+ 
+ /* TODO: Replace these with struct ida */
+ static DECLARE_BITMAP(dev_use, MAX_DEVICES);
+-static DECLARE_BITMAP(name_use, MAX_DEVICES);
+ 
+ /*
+  * There is one mmc_blk_data per slot.
+@@ -105,7 +104,6 @@ struct mmc_blk_data {
+ 	unsigned int	usage;
+ 	unsigned int	read_only;
+ 	unsigned int	part_type;
+-	unsigned int	name_idx;
+ 	unsigned int	reset_done;
+ #define MMC_BLK_READ		BIT(0)
+ #define MMC_BLK_WRITE		BIT(1)
+@@ -2202,19 +2200,6 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
+ 		goto out;
+ 	}
+ 
+-	/*
+-	 * !subname implies we are creating main mmc_blk_data that will be
+-	 * associated with mmc_card with dev_set_drvdata. Due to device
+-	 * partitions, devidx will not coincide with a per-physical card
+-	 * index anymore so we keep track of a name index.
+-	 */
+-	if (!subname) {
+-		md->name_idx = find_first_zero_bit(name_use, max_devices);
+-		__set_bit(md->name_idx, name_use);
+-	} else
+-		md->name_idx = ((struct mmc_blk_data *)
+-				dev_to_disk(parent)->private_data)->name_idx;
+-
+ 	md->area_type = area_type;
+ 
+ 	/*
+@@ -2264,7 +2249,7 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
+ 	 */
+ 
+ 	snprintf(md->disk->disk_name, sizeof(md->disk->disk_name),
+-		 "mmcblk%u%s", md->name_idx, subname ? subname : "");
++		 "mmcblk%u%s", card->host->index, subname ? subname : "");
+ 
+ 	if (mmc_card_mmc(card))
+ 		blk_queue_logical_block_size(md->queue.queue,
+@@ -2418,7 +2403,6 @@ static void mmc_blk_remove_parts(struct mmc_card *card,
+ 	struct list_head *pos, *q;
+ 	struct mmc_blk_data *part_md;
+ 
+-	__clear_bit(md->name_idx, name_use);
+ 	list_for_each_safe(pos, q, &md->part) {
+ 		part_md = list_entry(pos, struct mmc_blk_data, part);
+ 		list_del(pos);
+diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
+index 1526b8a10b09..3b944fc70eec 100644
+--- a/drivers/mmc/host/Kconfig
++++ b/drivers/mmc/host/Kconfig
+@@ -97,6 +97,7 @@ config MMC_RICOH_MMC
+ config MMC_SDHCI_ACPI
+ 	tristate "SDHCI support for ACPI enumerated SDHCI controllers"
+ 	depends on MMC_SDHCI && ACPI
++	select IOSF_MBI if X86
+ 	help
+ 	  This selects support for ACPI enumerated SDHCI controllers,
+ 	  identified by ACPI Compatibility ID PNP0D40 or specific
+diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
+index a5cda926d38e..975139f97498 100644
+--- a/drivers/mmc/host/sdhci-acpi.c
++++ b/drivers/mmc/host/sdhci-acpi.c
+@@ -41,6 +41,11 @@
+ #include <linux/mmc/pm.h>
+ #include <linux/mmc/slot-gpio.h>
+ 
++#ifdef CONFIG_X86
++#include <asm/cpu_device_id.h>
++#include <asm/iosf_mbi.h>
++#endif
++
+ #include "sdhci.h"
+ 
+ enum {
+@@ -146,6 +151,75 @@ static const struct sdhci_acpi_chip sdhci_acpi_chip_int = {
+ 	.ops = &sdhci_acpi_ops_int,
+ };
+ 
++#ifdef CONFIG_X86
++
++static bool sdhci_acpi_byt(void)
++{
++	static const struct x86_cpu_id byt[] = {
++		{ X86_VENDOR_INTEL, 6, 0x37 },
++		{}
++	};
++
++	return x86_match_cpu(byt);
++}
++
++#define BYT_IOSF_SCCEP			0x63
++#define BYT_IOSF_OCP_NETCTRL0		0x1078
++#define BYT_IOSF_OCP_TIMEOUT_BASE	GENMASK(10, 8)
++
++static void sdhci_acpi_byt_setting(struct device *dev)
++{
++	u32 val = 0;
++
++	if (!sdhci_acpi_byt())
++		return;
++
++	if (iosf_mbi_read(BYT_IOSF_SCCEP, MBI_CR_READ, BYT_IOSF_OCP_NETCTRL0,
++			  &val)) {
++		dev_err(dev, "%s read error\n", __func__);
++		return;
++	}
++
++	if (!(val & BYT_IOSF_OCP_TIMEOUT_BASE))
++		return;
++
++	val &= ~BYT_IOSF_OCP_TIMEOUT_BASE;
++
++	if (iosf_mbi_write(BYT_IOSF_SCCEP, MBI_CR_WRITE, BYT_IOSF_OCP_NETCTRL0,
++			   val)) {
++		dev_err(dev, "%s write error\n", __func__);
++		return;
++	}
++
++	dev_dbg(dev, "%s completed\n", __func__);
++}
++
++static bool sdhci_acpi_byt_defer(struct device *dev)
++{
++	if (!sdhci_acpi_byt())
++		return false;
++
++	if (!iosf_mbi_available())
++		return true;
++
++	sdhci_acpi_byt_setting(dev);
++
++	return false;
++}
++
++#else
++
++static inline void sdhci_acpi_byt_setting(struct device *dev)
++{
++}
++
++static inline bool sdhci_acpi_byt_defer(struct device *dev)
++{
++	return false;
++}
++
++#endif
++
+ static int bxt_get_cd(struct mmc_host *mmc)
+ {
+ 	int gpio_cd = mmc_gpio_get_cd(mmc);
+@@ -337,6 +411,9 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
+ 	if (acpi_bus_get_status(device) || !device->status.present)
+ 		return -ENODEV;
+ 
++	if (sdhci_acpi_byt_defer(dev))
++		return -EPROBE_DEFER;
++
+ 	hid = acpi_device_hid(device);
+ 	uid = device->pnp.unique_id;
+ 
+@@ -460,6 +537,8 @@ static int sdhci_acpi_resume(struct device *dev)
+ {
+ 	struct sdhci_acpi_host *c = dev_get_drvdata(dev);
+ 
++	sdhci_acpi_byt_setting(&c->pdev->dev);
++
+ 	return sdhci_resume_host(c->host);
+ }
+ 
+@@ -483,6 +562,8 @@ static int sdhci_acpi_runtime_resume(struct device *dev)
+ {
+ 	struct sdhci_acpi_host *c = dev_get_drvdata(dev);
+ 
++	sdhci_acpi_byt_setting(&c->pdev->dev);
++
+ 	return sdhci_runtime_resume_host(c->host);
+ }
+ 
+diff --git a/drivers/mtd/nand/brcmnand/brcmnand.c b/drivers/mtd/nand/brcmnand/brcmnand.c
+index 844fc07d22cd..f7009c1cb90c 100644
+--- a/drivers/mtd/nand/brcmnand/brcmnand.c
++++ b/drivers/mtd/nand/brcmnand/brcmnand.c
+@@ -311,6 +311,36 @@ static const u16 brcmnand_regs_v60[] = {
+ 	[BRCMNAND_FC_BASE]		= 0x400,
+ };
+ 
++/* BRCMNAND v7.1 */
++static const u16 brcmnand_regs_v71[] = {
++	[BRCMNAND_CMD_START]		=  0x04,
++	[BRCMNAND_CMD_EXT_ADDRESS]	=  0x08,
++	[BRCMNAND_CMD_ADDRESS]		=  0x0c,
++	[BRCMNAND_INTFC_STATUS]		=  0x14,
++	[BRCMNAND_CS_SELECT]		=  0x18,
++	[BRCMNAND_CS_XOR]		=  0x1c,
++	[BRCMNAND_LL_OP]		=  0x20,
++	[BRCMNAND_CS0_BASE]		=  0x50,
++	[BRCMNAND_CS1_BASE]		=     0,
++	[BRCMNAND_CORR_THRESHOLD]	=  0xdc,
++	[BRCMNAND_CORR_THRESHOLD_EXT]	=  0xe0,
++	[BRCMNAND_UNCORR_COUNT]		=  0xfc,
++	[BRCMNAND_CORR_COUNT]		= 0x100,
++	[BRCMNAND_CORR_EXT_ADDR]	= 0x10c,
++	[BRCMNAND_CORR_ADDR]		= 0x110,
++	[BRCMNAND_UNCORR_EXT_ADDR]	= 0x114,
++	[BRCMNAND_UNCORR_ADDR]		= 0x118,
++	[BRCMNAND_SEMAPHORE]		= 0x150,
++	[BRCMNAND_ID]			= 0x194,
++	[BRCMNAND_ID_EXT]		= 0x198,
++	[BRCMNAND_LL_RDATA]		= 0x19c,
++	[BRCMNAND_OOB_READ_BASE]	= 0x200,
++	[BRCMNAND_OOB_READ_10_BASE]	=     0,
++	[BRCMNAND_OOB_WRITE_BASE]	= 0x280,
++	[BRCMNAND_OOB_WRITE_10_BASE]	=     0,
++	[BRCMNAND_FC_BASE]		= 0x400,
++};
++
+ enum brcmnand_cs_reg {
+ 	BRCMNAND_CS_CFG_EXT = 0,
+ 	BRCMNAND_CS_CFG,
+@@ -406,7 +436,9 @@ static int brcmnand_revision_init(struct brcmnand_controller *ctrl)
+ 	}
+ 
+ 	/* Register offsets */
+-	if (ctrl->nand_version >= 0x0600)
++	if (ctrl->nand_version >= 0x0701)
++		ctrl->reg_offsets = brcmnand_regs_v71;
++	else if (ctrl->nand_version >= 0x0600)
+ 		ctrl->reg_offsets = brcmnand_regs_v60;
+ 	else if (ctrl->nand_version >= 0x0500)
+ 		ctrl->reg_offsets = brcmnand_regs_v50;
+diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
+index f2c8ff398d6c..171d146645ba 100644
+--- a/drivers/mtd/nand/nand_base.c
++++ b/drivers/mtd/nand/nand_base.c
+@@ -4021,7 +4021,6 @@ static int nand_dt_init(struct nand_chip *chip)
+  * This is the first phase of the normal nand_scan() function. It reads the
+  * flash ID and sets up MTD fields accordingly.
+  *
+- * The mtd->owner field must be set to the module of the caller.
+  */
+ int nand_scan_ident(struct mtd_info *mtd, int maxchips,
+ 		    struct nand_flash_dev *table)
+@@ -4443,19 +4442,12 @@ EXPORT_SYMBOL(nand_scan_tail);
+  *
+  * This fills out all the uninitialized function pointers with the defaults.
+  * The flash ID is read and the mtd/chip structures are filled with the
+- * appropriate values. The mtd->owner field must be set to the module of the
+- * caller.
++ * appropriate values.
+  */
+ int nand_scan(struct mtd_info *mtd, int maxchips)
+ {
+ 	int ret;
+ 
+-	/* Many callers got this wrong, so check for it for a while... */
+-	if (!mtd->owner && caller_is_module()) {
+-		pr_crit("%s called with NULL mtd->owner!\n", __func__);
+-		BUG();
+-	}
+-
+ 	ret = nand_scan_ident(mtd, maxchips, NULL);
+ 	if (!ret)
+ 		ret = nand_scan_tail(mtd);
+diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
+index 86fc245dc71a..fd78644469fa 100644
+--- a/drivers/mtd/nand/pxa3xx_nand.c
++++ b/drivers/mtd/nand/pxa3xx_nand.c
+@@ -1738,7 +1738,7 @@ static int alloc_nand_resource(struct platform_device *pdev)
+ 	if (ret < 0)
+ 		return ret;
+ 
+-	if (use_dma) {
++	if (!np && use_dma) {
+ 		r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
+ 		if (r == NULL) {
+ 			dev_err(&pdev->dev,
+diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c
+index ed0c19c558b5..3028c06547c1 100644
+--- a/drivers/mtd/spi-nor/spi-nor.c
++++ b/drivers/mtd/spi-nor/spi-nor.c
+@@ -1100,45 +1100,6 @@ static int spansion_quad_enable(struct spi_nor *nor)
+ 	return 0;
+ }
+ 
+-static int micron_quad_enable(struct spi_nor *nor)
+-{
+-	int ret;
+-	u8 val;
+-
+-	ret = nor->read_reg(nor, SPINOR_OP_RD_EVCR, &val, 1);
+-	if (ret < 0) {
+-		dev_err(nor->dev, "error %d reading EVCR\n", ret);
+-		return ret;
+-	}
+-
+-	write_enable(nor);
+-
+-	/* set EVCR, enable quad I/O */
+-	nor->cmd_buf[0] = val & ~EVCR_QUAD_EN_MICRON;
+-	ret = nor->write_reg(nor, SPINOR_OP_WD_EVCR, nor->cmd_buf, 1);
+-	if (ret < 0) {
+-		dev_err(nor->dev, "error while writing EVCR register\n");
+-		return ret;
+-	}
+-
+-	ret = spi_nor_wait_till_ready(nor);
+-	if (ret)
+-		return ret;
+-
+-	/* read EVCR and check it */
+-	ret = nor->read_reg(nor, SPINOR_OP_RD_EVCR, &val, 1);
+-	if (ret < 0) {
+-		dev_err(nor->dev, "error %d reading EVCR\n", ret);
+-		return ret;
+-	}
+-	if (val & EVCR_QUAD_EN_MICRON) {
+-		dev_err(nor->dev, "Micron EVCR Quad bit not clear\n");
+-		return -EINVAL;
+-	}
+-
+-	return 0;
+-}
+-
+ static int set_quad_mode(struct spi_nor *nor, const struct flash_info *info)
+ {
+ 	int status;
+@@ -1152,12 +1113,7 @@ static int set_quad_mode(struct spi_nor *nor, const struct flash_info *info)
+ 		}
+ 		return status;
+ 	case SNOR_MFR_MICRON:
+-		status = micron_quad_enable(nor);
+-		if (status) {
+-			dev_err(nor->dev, "Micron quad-read not enabled\n");
+-			return -EINVAL;
+-		}
+-		return status;
++		return 0;
+ 	default:
+ 		status = spansion_quad_enable(nor);
+ 		if (status) {
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+index d70a1716f3e0..1486f33a743e 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+@@ -1143,6 +1143,8 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
+ 	/* the fw is stopped, the aux sta is dead: clean up driver state */
+ 	iwl_mvm_del_aux_sta(mvm);
+ 
++	iwl_free_fw_paging(mvm);
++
+ 	/*
+ 	 * Clear IN_HW_RESTART flag when stopping the hw (as restart_complete()
+ 	 * won't be called in this case).
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+index e80be9a59520..89ea70deeb84 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+@@ -684,8 +684,6 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
+ 	for (i = 0; i < NVM_MAX_NUM_SECTIONS; i++)
+ 		kfree(mvm->nvm_sections[i].data);
+ 
+-	iwl_free_fw_paging(mvm);
+-
+ 	iwl_mvm_tof_clean(mvm);
+ 
+ 	ieee80211_free_hw(mvm->hw);
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+index 5a854c609477..1198caac35c8 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+@@ -731,8 +731,8 @@ static int iwl_pcie_rsa_race_bug_wa(struct iwl_trans *trans)
+ 	 */
+ 	val = iwl_read_prph(trans, PREG_AUX_BUS_WPROT_0);
+ 	if (val & (BIT(1) | BIT(17))) {
+-		IWL_INFO(trans,
+-			 "can't access the RSA semaphore it is write protected\n");
++		IWL_DEBUG_INFO(trans,
++			       "can't access the RSA semaphore it is write protected\n");
+ 		return 0;
+ 	}
+ 
+diff --git a/drivers/net/wireless/marvell/mwifiex/sta_event.c b/drivers/net/wireless/marvell/mwifiex/sta_event.c
+index ff3ee9dfbbd5..23bae87d4d3d 100644
+--- a/drivers/net/wireless/marvell/mwifiex/sta_event.c
++++ b/drivers/net/wireless/marvell/mwifiex/sta_event.c
+@@ -607,11 +607,13 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
+ 
+ 	case EVENT_PS_AWAKE:
+ 		mwifiex_dbg(adapter, EVENT, "info: EVENT: AWAKE\n");
+-		if (!adapter->pps_uapsd_mode && priv->port_open &&
++		if (!adapter->pps_uapsd_mode &&
++		    (priv->port_open ||
++		     (priv->bss_mode == NL80211_IFTYPE_ADHOC)) &&
+ 		    priv->media_connected && adapter->sleep_period.period) {
+-				adapter->pps_uapsd_mode = true;
+-				mwifiex_dbg(adapter, EVENT,
+-					    "event: PPS/UAPSD mode activated\n");
++			adapter->pps_uapsd_mode = true;
++			mwifiex_dbg(adapter, EVENT,
++				    "event: PPS/UAPSD mode activated\n");
+ 		}
+ 		adapter->tx_lock_flag = false;
+ 		if (adapter->pps_uapsd_mode && adapter->gen_null_pkt) {
+diff --git a/drivers/net/wireless/marvell/mwifiex/wmm.c b/drivers/net/wireless/marvell/mwifiex/wmm.c
+index acccd6734e3b..499e5a741c62 100644
+--- a/drivers/net/wireless/marvell/mwifiex/wmm.c
++++ b/drivers/net/wireless/marvell/mwifiex/wmm.c
+@@ -475,7 +475,8 @@ mwifiex_wmm_lists_empty(struct mwifiex_adapter *adapter)
+ 		priv = adapter->priv[i];
+ 		if (!priv)
+ 			continue;
+-		if (!priv->port_open)
++		if (!priv->port_open &&
++		    (priv->bss_mode != NL80211_IFTYPE_ADHOC))
+ 			continue;
+ 		if (adapter->if_ops.is_port_ready &&
+ 		    !adapter->if_ops.is_port_ready(priv))
+@@ -1099,7 +1100,8 @@ mwifiex_wmm_get_highest_priolist_ptr(struct mwifiex_adapter *adapter,
+ 
+ 			priv_tmp = adapter->bss_prio_tbl[j].bss_prio_cur->priv;
+ 
+-			if (!priv_tmp->port_open ||
++			if (((priv_tmp->bss_mode != NL80211_IFTYPE_ADHOC) &&
++			     !priv_tmp->port_open) ||
+ 			    (atomic_read(&priv_tmp->wmm.tx_pkts_queued) == 0))
+ 				continue;
+ 
+diff --git a/drivers/ntb/hw/amd/ntb_hw_amd.c b/drivers/ntb/hw/amd/ntb_hw_amd.c
+index 588803ad6847..6ccba0d862df 100644
+--- a/drivers/ntb/hw/amd/ntb_hw_amd.c
++++ b/drivers/ntb/hw/amd/ntb_hw_amd.c
+@@ -357,20 +357,6 @@ static int amd_ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits)
+ 	return 0;
+ }
+ 
+-static int amd_ntb_peer_db_addr(struct ntb_dev *ntb,
+-				phys_addr_t *db_addr,
+-				resource_size_t *db_size)
+-{
+-	struct amd_ntb_dev *ndev = ntb_ndev(ntb);
+-
+-	if (db_addr)
+-		*db_addr = (phys_addr_t)(ndev->peer_mmio + AMD_DBREQ_OFFSET);
+-	if (db_size)
+-		*db_size = sizeof(u32);
+-
+-	return 0;
+-}
+-
+ static int amd_ntb_peer_db_set(struct ntb_dev *ntb, u64 db_bits)
+ {
+ 	struct amd_ntb_dev *ndev = ntb_ndev(ntb);
+@@ -415,20 +401,6 @@ static int amd_ntb_spad_write(struct ntb_dev *ntb,
+ 	return 0;
+ }
+ 
+-static int amd_ntb_peer_spad_addr(struct ntb_dev *ntb, int idx,
+-				  phys_addr_t *spad_addr)
+-{
+-	struct amd_ntb_dev *ndev = ntb_ndev(ntb);
+-
+-	if (idx < 0 || idx >= ndev->spad_count)
+-		return -EINVAL;
+-
+-	if (spad_addr)
+-		*spad_addr = (phys_addr_t)(ndev->self_mmio + AMD_SPAD_OFFSET +
+-					   ndev->peer_spad + (idx << 2));
+-	return 0;
+-}
+-
+ static u32 amd_ntb_peer_spad_read(struct ntb_dev *ntb, int idx)
+ {
+ 	struct amd_ntb_dev *ndev = ntb_ndev(ntb);
+@@ -472,12 +444,10 @@ static const struct ntb_dev_ops amd_ntb_ops = {
+ 	.db_clear		= amd_ntb_db_clear,
+ 	.db_set_mask		= amd_ntb_db_set_mask,
+ 	.db_clear_mask		= amd_ntb_db_clear_mask,
+-	.peer_db_addr		= amd_ntb_peer_db_addr,
+ 	.peer_db_set		= amd_ntb_peer_db_set,
+ 	.spad_count		= amd_ntb_spad_count,
+ 	.spad_read		= amd_ntb_spad_read,
+ 	.spad_write		= amd_ntb_spad_write,
+-	.peer_spad_addr		= amd_ntb_peer_spad_addr,
+ 	.peer_spad_read		= amd_ntb_peer_spad_read,
+ 	.peer_spad_write	= amd_ntb_peer_spad_write,
+ };
+diff --git a/drivers/ntb/test/ntb_perf.c b/drivers/ntb/test/ntb_perf.c
+index c8a37ba4b4f9..6bdc1e7b7503 100644
+--- a/drivers/ntb/test/ntb_perf.c
++++ b/drivers/ntb/test/ntb_perf.c
+@@ -178,7 +178,7 @@ static void perf_copy_callback(void *data)
+ 	atomic_dec(&pctx->dma_sync);
+ }
+ 
+-static ssize_t perf_copy(struct pthr_ctx *pctx, char *dst,
++static ssize_t perf_copy(struct pthr_ctx *pctx, char __iomem *dst,
+ 			 char *src, size_t size)
+ {
+ 	struct perf_ctx *perf = pctx->perf;
+@@ -189,7 +189,8 @@ static ssize_t perf_copy(struct pthr_ctx *pctx, char *dst,
+ 	dma_cookie_t cookie;
+ 	size_t src_off, dst_off;
+ 	struct perf_mw *mw = &perf->mw;
+-	u64 vbase, dst_vaddr;
++	void __iomem *vbase;
++	void __iomem *dst_vaddr;
+ 	dma_addr_t dst_phys;
+ 	int retries = 0;
+ 
+@@ -204,14 +205,14 @@ static ssize_t perf_copy(struct pthr_ctx *pctx, char *dst,
+ 	}
+ 
+ 	device = chan->device;
+-	src_off = (size_t)src & ~PAGE_MASK;
+-	dst_off = (size_t)dst & ~PAGE_MASK;
++	src_off = (uintptr_t)src & ~PAGE_MASK;
++	dst_off = (uintptr_t __force)dst & ~PAGE_MASK;
+ 
+ 	if (!is_dma_copy_aligned(device, src_off, dst_off, size))
+ 		return -ENODEV;
+ 
+-	vbase = (u64)(u64 *)mw->vbase;
+-	dst_vaddr = (u64)(u64 *)dst;
++	vbase = mw->vbase;
++	dst_vaddr = dst;
+ 	dst_phys = mw->phys_addr + (dst_vaddr - vbase);
+ 
+ 	unmap = dmaengine_get_unmap_data(device->dev, 1, GFP_NOWAIT);
+@@ -261,13 +262,13 @@ err_get_unmap:
+ 	return 0;
+ }
+ 
+-static int perf_move_data(struct pthr_ctx *pctx, char *dst, char *src,
++static int perf_move_data(struct pthr_ctx *pctx, char __iomem *dst, char *src,
+ 			  u64 buf_size, u64 win_size, u64 total)
+ {
+ 	int chunks, total_chunks, i;
+ 	int copied_chunks = 0;
+ 	u64 copied = 0, result;
+-	char *tmp = dst;
++	char __iomem *tmp = dst;
+ 	u64 perf, diff_us;
+ 	ktime_t kstart, kstop, kdiff;
+ 
+@@ -324,7 +325,7 @@ static int ntb_perf_thread(void *data)
+ 	struct perf_ctx *perf = pctx->perf;
+ 	struct pci_dev *pdev = perf->ntb->pdev;
+ 	struct perf_mw *mw = &perf->mw;
+-	char *dst;
++	char __iomem *dst;
+ 	u64 win_size, buf_size, total;
+ 	void *src;
+ 	int rc, node, i;
+@@ -364,7 +365,7 @@ static int ntb_perf_thread(void *data)
+ 	if (buf_size > MAX_TEST_SIZE)
+ 		buf_size = MAX_TEST_SIZE;
+ 
+-	dst = (char *)mw->vbase;
++	dst = (char __iomem *)mw->vbase;
+ 
+ 	atomic_inc(&perf->tsync);
+ 	while (atomic_read(&perf->tsync) != perf->perf_threads)
+diff --git a/drivers/pci/host/pci-imx6.c b/drivers/pci/host/pci-imx6.c
+index fe600964fa50..88ccfeaa49c7 100644
+--- a/drivers/pci/host/pci-imx6.c
++++ b/drivers/pci/host/pci-imx6.c
+@@ -32,7 +32,7 @@
+ #define to_imx6_pcie(x)	container_of(x, struct imx6_pcie, pp)
+ 
+ struct imx6_pcie {
+-	struct gpio_desc	*reset_gpio;
++	int			reset_gpio;
+ 	struct clk		*pcie_bus;
+ 	struct clk		*pcie_phy;
+ 	struct clk		*pcie;
+@@ -287,10 +287,10 @@ static int imx6_pcie_deassert_core_reset(struct pcie_port *pp)
+ 	usleep_range(200, 500);
+ 
+ 	/* Some boards don't have PCIe reset GPIO. */
+-	if (imx6_pcie->reset_gpio) {
+-		gpiod_set_value_cansleep(imx6_pcie->reset_gpio, 0);
++	if (gpio_is_valid(imx6_pcie->reset_gpio)) {
++		gpio_set_value_cansleep(imx6_pcie->reset_gpio, 0);
+ 		msleep(100);
+-		gpiod_set_value_cansleep(imx6_pcie->reset_gpio, 1);
++		gpio_set_value_cansleep(imx6_pcie->reset_gpio, 1);
+ 	}
+ 	return 0;
+ 
+@@ -561,6 +561,7 @@ static int __init imx6_pcie_probe(struct platform_device *pdev)
+ {
+ 	struct imx6_pcie *imx6_pcie;
+ 	struct pcie_port *pp;
++	struct device_node *np = pdev->dev.of_node;
+ 	struct resource *dbi_base;
+ 	int ret;
+ 
+@@ -581,8 +582,15 @@ static int __init imx6_pcie_probe(struct platform_device *pdev)
+ 		return PTR_ERR(pp->dbi_base);
+ 
+ 	/* Fetch GPIOs */
+-	imx6_pcie->reset_gpio = devm_gpiod_get_optional(&pdev->dev, "reset",
+-							GPIOD_OUT_LOW);
++	imx6_pcie->reset_gpio = of_get_named_gpio(np, "reset-gpio", 0);
++	if (gpio_is_valid(imx6_pcie->reset_gpio)) {
++		ret = devm_gpio_request_one(&pdev->dev, imx6_pcie->reset_gpio,
++					    GPIOF_OUT_INIT_LOW, "PCIe reset");
++		if (ret) {
++			dev_err(&pdev->dev, "unable to get reset gpio\n");
++			return ret;
++		}
++	}
+ 
+ 	/* Fetch clocks */
+ 	imx6_pcie->pcie_phy = devm_clk_get(&pdev->dev, "pcie_phy");
+diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
+index e96e86d2e745..3878d23ca7a8 100644
+--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
++++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
+@@ -949,7 +949,8 @@ static int mtk_gpio_set_debounce(struct gpio_chip *chip, unsigned offset,
+ 	struct mtk_pinctrl *pctl = dev_get_drvdata(chip->parent);
+ 	int eint_num, virq, eint_offset;
+ 	unsigned int set_offset, bit, clr_bit, clr_offset, rst, i, unmask, dbnc;
+-	static const unsigned int dbnc_arr[] = {0 , 1, 16, 32, 64, 128, 256};
++	static const unsigned int debounce_time[] = {500, 1000, 16000, 32000, 64000,
++						128000, 256000};
+ 	const struct mtk_desc_pin *pin;
+ 	struct irq_data *d;
+ 
+@@ -967,9 +968,9 @@ static int mtk_gpio_set_debounce(struct gpio_chip *chip, unsigned offset,
+ 	if (!mtk_eint_can_en_debounce(pctl, eint_num))
+ 		return -ENOSYS;
+ 
+-	dbnc = ARRAY_SIZE(dbnc_arr);
+-	for (i = 0; i < ARRAY_SIZE(dbnc_arr); i++) {
+-		if (debounce <= dbnc_arr[i]) {
++	dbnc = ARRAY_SIZE(debounce_time);
++	for (i = 0; i < ARRAY_SIZE(debounce_time); i++) {
++		if (debounce <= debounce_time[i]) {
+ 			dbnc = i;
+ 			break;
+ 		}
+diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
+index d24e5f1d1525..bd2e657163b8 100644
+--- a/drivers/pinctrl/pinctrl-single.c
++++ b/drivers/pinctrl/pinctrl-single.c
+@@ -1273,9 +1273,9 @@ static int pcs_parse_bits_in_pinctrl_entry(struct pcs_device *pcs,
+ 
+ 		/* Parse pins in each row from LSB */
+ 		while (mask) {
+-			bit_pos = ffs(mask);
++			bit_pos = __ffs(mask);
+ 			pin_num_from_lsb = bit_pos / pcs->bits_per_pin;
+-			mask_pos = ((pcs->fmask) << (bit_pos - 1));
++			mask_pos = ((pcs->fmask) << bit_pos);
+ 			val_pos = val & mask_pos;
+ 			submask = mask & mask_pos;
+ 
+@@ -1844,7 +1844,7 @@ static int pcs_probe(struct platform_device *pdev)
+ 	ret = of_property_read_u32(np, "pinctrl-single,function-mask",
+ 				   &pcs->fmask);
+ 	if (!ret) {
+-		pcs->fshift = ffs(pcs->fmask) - 1;
++		pcs->fshift = __ffs(pcs->fmask);
+ 		pcs->fmax = pcs->fmask >> pcs->fshift;
+ 	} else {
+ 		/* If mask property doesn't exist, function mux is invalid. */
+diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
+index 73833079bac8..d6baea6a7544 100644
+--- a/drivers/platform/x86/toshiba_acpi.c
++++ b/drivers/platform/x86/toshiba_acpi.c
+@@ -133,7 +133,7 @@ MODULE_LICENSE("GPL");
+ /* Field definitions */
+ #define HCI_ACCEL_MASK			0x7fff
+ #define HCI_HOTKEY_DISABLE		0x0b
+-#define HCI_HOTKEY_ENABLE		0x01
++#define HCI_HOTKEY_ENABLE		0x09
+ #define HCI_HOTKEY_SPECIAL_FUNCTIONS	0x10
+ #define HCI_LCD_BRIGHTNESS_BITS		3
+ #define HCI_LCD_BRIGHTNESS_SHIFT	(16-HCI_LCD_BRIGHTNESS_BITS)
+diff --git a/drivers/pwm/pwm-brcmstb.c b/drivers/pwm/pwm-brcmstb.c
+index 423ce087cd9c..5d5adee16886 100644
+--- a/drivers/pwm/pwm-brcmstb.c
++++ b/drivers/pwm/pwm-brcmstb.c
+@@ -274,8 +274,8 @@ static int brcmstb_pwm_probe(struct platform_device *pdev)
+ 
+ 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ 	p->base = devm_ioremap_resource(&pdev->dev, res);
+-	if (!p->base) {
+-		ret = -ENOMEM;
++	if (IS_ERR(p->base)) {
++		ret = PTR_ERR(p->base);
+ 		goto out_clk;
+ 	}
+ 
+diff --git a/drivers/regulator/s5m8767.c b/drivers/regulator/s5m8767.c
+index 58f5d3b8e981..27343e1c43ef 100644
+--- a/drivers/regulator/s5m8767.c
++++ b/drivers/regulator/s5m8767.c
+@@ -202,9 +202,10 @@ static int s5m8767_get_register(struct s5m8767_info *s5m8767, int reg_id,
+ 		}
+ 	}
+ 
+-	if (i < s5m8767->num_regulators)
+-		*enable_ctrl =
+-		s5m8767_opmode_reg[reg_id][mode] << S5M8767_ENCTRL_SHIFT;
++	if (i >= s5m8767->num_regulators)
++		return -EINVAL;
++
++	*enable_ctrl = s5m8767_opmode_reg[reg_id][mode] << S5M8767_ENCTRL_SHIFT;
+ 
+ 	return 0;
+ }
+@@ -937,8 +938,12 @@ static int s5m8767_pmic_probe(struct platform_device *pdev)
+ 			else
+ 				regulators[id].vsel_mask = 0xff;
+ 
+-			s5m8767_get_register(s5m8767, id, &enable_reg,
++			ret = s5m8767_get_register(s5m8767, id, &enable_reg,
+ 					     &enable_val);
++			if (ret) {
++				dev_err(s5m8767->dev, "error reading registers\n");
++				return ret;
++			}
+ 			regulators[id].enable_reg = enable_reg;
+ 			regulators[id].enable_mask = S5M8767_ENCTRL_MASK;
+ 			regulators[id].enable_val = enable_val;
+diff --git a/drivers/rtc/rtc-ds1685.c b/drivers/rtc/rtc-ds1685.c
+index 535050fc5e9f..08e0ff8c786a 100644
+--- a/drivers/rtc/rtc-ds1685.c
++++ b/drivers/rtc/rtc-ds1685.c
+@@ -187,9 +187,9 @@ ds1685_rtc_end_data_access(struct ds1685_priv *rtc)
+  * Only use this where you are certain another lock will not be held.
+  */
+ static inline void
+-ds1685_rtc_begin_ctrl_access(struct ds1685_priv *rtc, unsigned long flags)
++ds1685_rtc_begin_ctrl_access(struct ds1685_priv *rtc, unsigned long *flags)
+ {
+-	spin_lock_irqsave(&rtc->lock, flags);
++	spin_lock_irqsave(&rtc->lock, *flags);
+ 	ds1685_rtc_switch_to_bank1(rtc);
+ }
+ 
+@@ -1300,7 +1300,7 @@ ds1685_rtc_sysfs_ctrl_regs_store(struct device *dev,
+ {
+ 	struct ds1685_priv *rtc = dev_get_drvdata(dev);
+ 	u8 reg = 0, bit = 0, tmp;
+-	unsigned long flags = 0;
++	unsigned long flags;
+ 	long int val = 0;
+ 	const struct ds1685_rtc_ctrl_regs *reg_info =
+ 		ds1685_rtc_sysfs_ctrl_regs_lookup(attr->attr.name);
+@@ -1321,7 +1321,7 @@ ds1685_rtc_sysfs_ctrl_regs_store(struct device *dev,
+ 	bit = reg_info->bit;
+ 
+ 	/* Safe to spinlock during a write. */
+-	ds1685_rtc_begin_ctrl_access(rtc, flags);
++	ds1685_rtc_begin_ctrl_access(rtc, &flags);
+ 	tmp = rtc->read(rtc, reg);
+ 	rtc->write(rtc, reg, (val ? (tmp | bit) : (tmp & ~(bit))));
+ 	ds1685_rtc_end_ctrl_access(rtc, flags);
+diff --git a/drivers/rtc/rtc-hym8563.c b/drivers/rtc/rtc-hym8563.c
+index 097325d96db5..b1b4746a0eab 100644
+--- a/drivers/rtc/rtc-hym8563.c
++++ b/drivers/rtc/rtc-hym8563.c
+@@ -144,7 +144,7 @@ static int hym8563_rtc_set_time(struct device *dev, struct rtc_time *tm)
+ 	 * it does not seem to carry it over a subsequent write/read.
+ 	 * So we'll limit ourself to 100 years, starting at 2000 for now.
+ 	 */
+-	buf[6] = tm->tm_year - 100;
++	buf[6] = bin2bcd(tm->tm_year - 100);
+ 
+ 	/*
+ 	 * CTL1 only contains TEST-mode bits apart from stop,
+diff --git a/drivers/rtc/rtc-max77686.c b/drivers/rtc/rtc-max77686.c
+index 7184a0eda793..725dccae24e7 100644
+--- a/drivers/rtc/rtc-max77686.c
++++ b/drivers/rtc/rtc-max77686.c
+@@ -465,7 +465,7 @@ static int max77686_rtc_probe(struct platform_device *pdev)
+ 
+ 	info->virq = regmap_irq_get_virq(max77686->rtc_irq_data,
+ 					 MAX77686_RTCIRQ_RTCA1);
+-	if (!info->virq) {
++	if (info->virq <= 0) {
+ 		ret = -ENXIO;
+ 		goto err_rtc;
+ 	}
+diff --git a/drivers/rtc/rtc-rx8025.c b/drivers/rtc/rtc-rx8025.c
+index bd911bafb809..17341feadad1 100644
+--- a/drivers/rtc/rtc-rx8025.c
++++ b/drivers/rtc/rtc-rx8025.c
+@@ -65,7 +65,6 @@
+ 
+ static const struct i2c_device_id rx8025_id[] = {
+ 	{ "rx8025", 0 },
+-	{ "rv8803", 1 },
+ 	{ }
+ };
+ MODULE_DEVICE_TABLE(i2c, rx8025_id);
+diff --git a/drivers/rtc/rtc-vr41xx.c b/drivers/rtc/rtc-vr41xx.c
+index f64c282275b3..e1b86bb01062 100644
+--- a/drivers/rtc/rtc-vr41xx.c
++++ b/drivers/rtc/rtc-vr41xx.c
+@@ -272,12 +272,13 @@ static irqreturn_t rtclong1_interrupt(int irq, void *dev_id)
+ }
+ 
+ static const struct rtc_class_ops vr41xx_rtc_ops = {
+-	.release	= vr41xx_rtc_release,
+-	.ioctl		= vr41xx_rtc_ioctl,
+-	.read_time	= vr41xx_rtc_read_time,
+-	.set_time	= vr41xx_rtc_set_time,
+-	.read_alarm	= vr41xx_rtc_read_alarm,
+-	.set_alarm	= vr41xx_rtc_set_alarm,
++	.release		= vr41xx_rtc_release,
++	.ioctl			= vr41xx_rtc_ioctl,
++	.read_time		= vr41xx_rtc_read_time,
++	.set_time		= vr41xx_rtc_set_time,
++	.read_alarm		= vr41xx_rtc_read_alarm,
++	.set_alarm		= vr41xx_rtc_set_alarm,
++	.alarm_irq_enable	= vr41xx_rtc_alarm_irq_enable,
+ };
+ 
+ static int rtc_probe(struct platform_device *pdev)
+diff --git a/drivers/scsi/device_handler/Kconfig b/drivers/scsi/device_handler/Kconfig
+index e5647d59224f..0b331c9c0a8f 100644
+--- a/drivers/scsi/device_handler/Kconfig
++++ b/drivers/scsi/device_handler/Kconfig
+@@ -13,13 +13,13 @@ menuconfig SCSI_DH
+ 
+ config SCSI_DH_RDAC
+ 	tristate "LSI RDAC Device Handler"
+-	depends on SCSI_DH
++	depends on SCSI_DH && SCSI
+ 	help
+ 	If you have a LSI RDAC select y. Otherwise, say N.
+ 
+ config SCSI_DH_HP_SW
+ 	tristate "HP/COMPAQ MSA Device Handler"
+-	depends on SCSI_DH
++	depends on SCSI_DH && SCSI
+ 	help
+ 	If you have a HP/COMPAQ MSA device that requires START_STOP to
+ 	be sent to start it and cannot upgrade the firmware then select y.
+@@ -27,13 +27,13 @@ config SCSI_DH_HP_SW
+ 
+ config SCSI_DH_EMC
+ 	tristate "EMC CLARiiON Device Handler"
+-	depends on SCSI_DH
++	depends on SCSI_DH && SCSI
+ 	help
+ 	If you have a EMC CLARiiON select y. Otherwise, say N.
+ 
+ config SCSI_DH_ALUA
+ 	tristate "SPC-3 ALUA Device Handler"
+-	depends on SCSI_DH
++	depends on SCSI_DH && SCSI
+ 	help
+ 	  SCSI Device handler for generic SPC-3 Asymmetric Logical Unit
+ 	  Access (ALUA).
+diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
+index 97a1c1c33b05..00ce3e269a43 100644
+--- a/drivers/scsi/megaraid/megaraid_sas_base.c
++++ b/drivers/scsi/megaraid/megaraid_sas_base.c
+@@ -6282,12 +6282,13 @@ out:
+ 	}
+ 
+ 	for (i = 0; i < ioc->sge_count; i++) {
+-		if (kbuff_arr[i])
++		if (kbuff_arr[i]) {
+ 			dma_free_coherent(&instance->pdev->dev,
+ 					  le32_to_cpu(kern_sge32[i].length),
+ 					  kbuff_arr[i],
+ 					  le32_to_cpu(kern_sge32[i].phys_addr));
+ 			kbuff_arr[i] = NULL;
++		}
+ 	}
+ 
+ 	megasas_return_cmd(instance, cmd);
+diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c
+index 7cb1b2d710c1..475fb44c1883 100644
+--- a/drivers/spi/spi-rockchip.c
++++ b/drivers/spi/spi-rockchip.c
+@@ -265,7 +265,10 @@ static inline u32 rx_max(struct rockchip_spi *rs)
+ static void rockchip_spi_set_cs(struct spi_device *spi, bool enable)
+ {
+ 	u32 ser;
+-	struct rockchip_spi *rs = spi_master_get_devdata(spi->master);
++	struct spi_master *master = spi->master;
++	struct rockchip_spi *rs = spi_master_get_devdata(master);
++
++	pm_runtime_get_sync(rs->dev);
+ 
+ 	ser = readl_relaxed(rs->regs + ROCKCHIP_SPI_SER) & SER_MASK;
+ 
+@@ -290,6 +293,8 @@ static void rockchip_spi_set_cs(struct spi_device *spi, bool enable)
+ 		ser &= ~(1 << spi->chip_select);
+ 
+ 	writel_relaxed(ser, rs->regs + ROCKCHIP_SPI_SER);
++
++	pm_runtime_put_sync(rs->dev);
+ }
+ 
+ static int rockchip_spi_prepare_message(struct spi_master *master,
+diff --git a/drivers/staging/rdma/hfi1/TODO b/drivers/staging/rdma/hfi1/TODO
+index 05de0dad8762..4c6f1d7d2eaf 100644
+--- a/drivers/staging/rdma/hfi1/TODO
++++ b/drivers/staging/rdma/hfi1/TODO
+@@ -3,4 +3,4 @@ July, 2015
+ - Remove unneeded file entries in sysfs
+ - Remove software processing of IB protocol and place in library for use
+   by qib, ipath (if still present), hfi1, and eventually soft-roce
+-
++- Replace incorrect uAPI
+diff --git a/drivers/staging/rdma/hfi1/file_ops.c b/drivers/staging/rdma/hfi1/file_ops.c
+index d57d549052c8..29ae777556d2 100644
+--- a/drivers/staging/rdma/hfi1/file_ops.c
++++ b/drivers/staging/rdma/hfi1/file_ops.c
+@@ -52,6 +52,8 @@
+ #include <linux/vmalloc.h>
+ #include <linux/io.h>
+ 
++#include <rdma/ib.h>
++
+ #include "hfi.h"
+ #include "pio.h"
+ #include "device.h"
+@@ -194,6 +196,10 @@ static ssize_t hfi1_file_write(struct file *fp, const char __user *data,
+ 	int uctxt_required = 1;
+ 	int must_be_root = 0;
+ 
++	/* FIXME: This interface cannot continue out of staging */
++	if (WARN_ON_ONCE(!ib_safe_file_access(fp)))
++		return -EACCES;
++
+ 	if (count < sizeof(cmd)) {
+ 		ret = -EINVAL;
+ 		goto bail;
+diff --git a/drivers/thermal/rockchip_thermal.c b/drivers/thermal/rockchip_thermal.c
+index b58e3fb9b311..433085a97626 100644
+--- a/drivers/thermal/rockchip_thermal.c
++++ b/drivers/thermal/rockchip_thermal.c
+@@ -693,15 +693,14 @@ static int rockchip_configure_from_dt(struct device *dev,
+ 			 thermal->chip->tshut_temp);
+ 		thermal->tshut_temp = thermal->chip->tshut_temp;
+ 	} else {
++		if (shut_temp > INT_MAX) {
++			dev_err(dev, "Invalid tshut temperature specified: %d\n",
++				shut_temp);
++			return -ERANGE;
++		}
+ 		thermal->tshut_temp = shut_temp;
+ 	}
+ 
+-	if (thermal->tshut_temp > INT_MAX) {
+-		dev_err(dev, "Invalid tshut temperature specified: %d\n",
+-			thermal->tshut_temp);
+-		return -ERANGE;
+-	}
+-
+ 	if (of_property_read_u32(np, "rockchip,hw-tshut-mode", &tshut_mode)) {
+ 		dev_warn(dev,
+ 			 "Missing tshut mode property, using default (%s)\n",
+diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
+index 9eb1cff28bd4..b8b580e5ae6e 100644
+--- a/drivers/usb/core/hcd-pci.c
++++ b/drivers/usb/core/hcd-pci.c
+@@ -74,6 +74,15 @@ static void for_each_companion(struct pci_dev *pdev, struct usb_hcd *hcd,
+ 		if (companion->bus != pdev->bus ||
+ 				PCI_SLOT(companion->devfn) != slot)
+ 			continue;
++
++		/*
++		 * Companion device should be either UHCI,OHCI or EHCI host
++		 * controller, otherwise skip.
++		 */
++		if (companion->class != CL_UHCI && companion->class != CL_OHCI &&
++				companion->class != CL_EHCI)
++			continue;
++
+ 		companion_hcd = pci_get_drvdata(companion);
+ 		if (!companion_hcd || !companion_hcd->self.root_hub)
+ 			continue;
+diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
+index cf43e9e18368..79d895c2dd71 100644
+--- a/drivers/usb/gadget/function/f_fs.c
++++ b/drivers/usb/gadget/function/f_fs.c
+@@ -646,6 +646,7 @@ static void ffs_user_copy_worker(struct work_struct *work)
+ 						   work);
+ 	int ret = io_data->req->status ? io_data->req->status :
+ 					 io_data->req->actual;
++	bool kiocb_has_eventfd = io_data->kiocb->ki_flags & IOCB_EVENTFD;
+ 
+ 	if (io_data->read && ret > 0) {
+ 		use_mm(io_data->mm);
+@@ -657,13 +658,11 @@ static void ffs_user_copy_worker(struct work_struct *work)
+ 
+ 	io_data->kiocb->ki_complete(io_data->kiocb, ret, ret);
+ 
+-	if (io_data->ffs->ffs_eventfd &&
+-	    !(io_data->kiocb->ki_flags & IOCB_EVENTFD))
++	if (io_data->ffs->ffs_eventfd && !kiocb_has_eventfd)
+ 		eventfd_signal(io_data->ffs->ffs_eventfd, 1);
+ 
+ 	usb_ep_free_request(io_data->ep, io_data->req);
+ 
+-	io_data->kiocb->private = NULL;
+ 	if (io_data->read)
+ 		kfree(io_data->to_free);
+ 	kfree(io_data->buf);
+diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
+index 5cd080e0a685..743d9a20e248 100644
+--- a/drivers/usb/host/xhci-mem.c
++++ b/drivers/usb/host/xhci-mem.c
+@@ -1873,6 +1873,12 @@ no_bw:
+ 	kfree(xhci->rh_bw);
+ 	kfree(xhci->ext_caps);
+ 
++	xhci->usb2_ports = NULL;
++	xhci->usb3_ports = NULL;
++	xhci->port_array = NULL;
++	xhci->rh_bw = NULL;
++	xhci->ext_caps = NULL;
++
+ 	xhci->page_size = 0;
+ 	xhci->page_shift = 0;
+ 	xhci->bus_state[0].bus_suspended = 0;
+diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
+index f0640b7a1c42..48672fac7ff3 100644
+--- a/drivers/usb/host/xhci-pci.c
++++ b/drivers/usb/host/xhci-pci.c
+@@ -48,6 +48,7 @@
+ #define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI		0xa12f
+ #define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI	0x9d2f
+ #define PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI		0x0aa8
++#define PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI		0x1aa8
+ 
+ static const char hcd_name[] = "xhci_hcd";
+ 
+@@ -155,7 +156,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
+ 		(pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI ||
+ 		 pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI ||
+ 		 pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI ||
+-		 pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI)) {
++		 pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI ||
++		 pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI)) {
+ 		xhci->quirks |= XHCI_PME_STUCK_QUIRK;
+ 	}
+ 	if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
+@@ -302,6 +304,7 @@ static void xhci_pci_remove(struct pci_dev *dev)
+ 	struct xhci_hcd *xhci;
+ 
+ 	xhci = hcd_to_xhci(pci_get_drvdata(dev));
++	xhci->xhc_state |= XHCI_STATE_REMOVING;
+ 	if (xhci->shared_hcd) {
+ 		usb_remove_hcd(xhci->shared_hcd);
+ 		usb_put_hcd(xhci->shared_hcd);
+diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
+index d39d6bf1d090..d4962208be30 100644
+--- a/drivers/usb/host/xhci-plat.c
++++ b/drivers/usb/host/xhci-plat.c
+@@ -39,12 +39,25 @@ static const struct xhci_driver_overrides xhci_plat_overrides __initconst = {
+ 
+ static void xhci_plat_quirks(struct device *dev, struct xhci_hcd *xhci)
+ {
++	struct usb_hcd *hcd = xhci_to_hcd(xhci);
++
+ 	/*
+ 	 * As of now platform drivers don't provide MSI support so we ensure
+ 	 * here that the generic code does not try to make a pci_dev from our
+ 	 * dev struct in order to setup MSI
+ 	 */
+ 	xhci->quirks |= XHCI_PLAT;
++
++	/*
++	 * On R-Car Gen2 and Gen3, the AC64 bit (bit 0) of HCCPARAMS1 is set
++	 * to 1. However, these SoCs don't support 64-bit address memory
++	 * pointers. So, this driver clears the AC64 bit of xhci->hcc_params
++	 * to call dma_set_coherent_mask(dev, DMA_BIT_MASK(32)) in
++	 * xhci_gen_setup().
++	 */
++	if (xhci_plat_type_is(hcd, XHCI_PLAT_TYPE_RENESAS_RCAR_GEN2) ||
++	    xhci_plat_type_is(hcd, XHCI_PLAT_TYPE_RENESAS_RCAR_GEN3))
++		xhci->quirks |= XHCI_NO_64BIT_SUPPORT;
+ }
+ 
+ /* called during probe() after chip reset completes */
+diff --git a/drivers/usb/host/xhci-plat.h b/drivers/usb/host/xhci-plat.h
+index 5a2e2e3936c4..529c3c40f901 100644
+--- a/drivers/usb/host/xhci-plat.h
++++ b/drivers/usb/host/xhci-plat.h
+@@ -14,7 +14,7 @@
+ #include "xhci.h"	/* for hcd_to_xhci() */
+ 
+ enum xhci_plat_type {
+-	XHCI_PLAT_TYPE_MARVELL_ARMADA,
++	XHCI_PLAT_TYPE_MARVELL_ARMADA = 1,
+ 	XHCI_PLAT_TYPE_RENESAS_RCAR_GEN2,
+ 	XHCI_PLAT_TYPE_RENESAS_RCAR_GEN3,
+ };
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index 3915657e6078..a85a1c993d61 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -4014,7 +4014,8 @@ static int queue_command(struct xhci_hcd *xhci, struct xhci_command *cmd,
+ 	int reserved_trbs = xhci->cmd_ring_reserved_trbs;
+ 	int ret;
+ 
+-	if (xhci->xhc_state) {
++	if ((xhci->xhc_state & XHCI_STATE_DYING) ||
++		(xhci->xhc_state & XHCI_STATE_HALTED)) {
+ 		xhci_dbg(xhci, "xHCI dying or halted, can't queue_command\n");
+ 		return -ESHUTDOWN;
+ 	}
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index 0c8087d3c313..8e713cca58ed 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -147,7 +147,8 @@ static int xhci_start(struct xhci_hcd *xhci)
+ 				"waited %u microseconds.\n",
+ 				XHCI_MAX_HALT_USEC);
+ 	if (!ret)
+-		xhci->xhc_state &= ~(XHCI_STATE_HALTED | XHCI_STATE_DYING);
++		/* clear state flags. Including dying, halted or removing */
++		xhci->xhc_state = 0;
+ 
+ 	return ret;
+ }
+@@ -1108,8 +1109,8 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
+ 		/* Resume root hubs only when have pending events. */
+ 		status = readl(&xhci->op_regs->status);
+ 		if (status & STS_EINT) {
+-			usb_hcd_resume_root_hub(hcd);
+ 			usb_hcd_resume_root_hub(xhci->shared_hcd);
++			usb_hcd_resume_root_hub(hcd);
+ 		}
+ 	}
+ 
+@@ -1124,10 +1125,10 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
+ 
+ 	/* Re-enable port polling. */
+ 	xhci_dbg(xhci, "%s: starting port polling.\n", __func__);
+-	set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
+-	usb_hcd_poll_rh_status(hcd);
+ 	set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
+ 	usb_hcd_poll_rh_status(xhci->shared_hcd);
++	set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
++	usb_hcd_poll_rh_status(hcd);
+ 
+ 	return retval;
+ }
+@@ -2770,7 +2771,8 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
+ 	if (ret <= 0)
+ 		return ret;
+ 	xhci = hcd_to_xhci(hcd);
+-	if (xhci->xhc_state & XHCI_STATE_DYING)
++	if ((xhci->xhc_state & XHCI_STATE_DYING) ||
++		(xhci->xhc_state & XHCI_STATE_REMOVING))
+ 		return -ENODEV;
+ 
+ 	xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
+@@ -3817,7 +3819,7 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
+ 
+ 	mutex_lock(&xhci->mutex);
+ 
+-	if (xhci->xhc_state)	/* dying or halted */
++	if (xhci->xhc_state)	/* dying, removing or halted */
+ 		goto out;
+ 
+ 	if (!udev->slot_id) {
+@@ -4944,6 +4946,16 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
+ 		return retval;
+ 	xhci_dbg(xhci, "Reset complete\n");
+ 
++	/*
++	 * On some xHCI controllers (e.g. R-Car SoCs), the AC64 bit (bit 0)
++	 * of HCCPARAMS1 is set to 1. However, the xHCs don't support 64-bit
++	 * address memory pointers actually. So, this driver clears the AC64
++	 * bit of xhci->hcc_params to call dma_set_coherent_mask(dev,
++	 * DMA_BIT_MASK(32)) in this xhci_gen_setup().
++	 */
++	if (xhci->quirks & XHCI_NO_64BIT_SUPPORT)
++		xhci->hcc_params &= ~BIT(0);
++
+ 	/* Set dma_mask and coherent_dma_mask to 64-bits,
+ 	 * if xHC supports 64-bit addressing */
+ 	if (HCC_64BIT_ADDR(xhci->hcc_params) &&
+diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
+index cc651383ce5a..1cdea4a8c895 100644
+--- a/drivers/usb/host/xhci.h
++++ b/drivers/usb/host/xhci.h
+@@ -1596,6 +1596,7 @@ struct xhci_hcd {
+  */
+ #define XHCI_STATE_DYING	(1 << 0)
+ #define XHCI_STATE_HALTED	(1 << 1)
++#define XHCI_STATE_REMOVING	(1 << 2)
+ 	/* Statistics */
+ 	int			error_bitmask;
+ 	unsigned int		quirks;
+@@ -1632,6 +1633,7 @@ struct xhci_hcd {
+ #define XHCI_PME_STUCK_QUIRK	(1 << 20)
+ #define XHCI_MTK_HOST		(1 << 21)
+ #define XHCI_SSIC_PORT_UNUSED	(1 << 22)
++#define XHCI_NO_64BIT_SUPPORT	(1 << 23)
+ 	unsigned int		num_active_eps;
+ 	unsigned int		limit_active_eps;
+ 	/* There are two roothubs to keep track of bus suspend info for */
+diff --git a/drivers/usb/usbip/usbip_common.c b/drivers/usb/usbip/usbip_common.c
+index facaaf003f19..e40da7759a0e 100644
+--- a/drivers/usb/usbip/usbip_common.c
++++ b/drivers/usb/usbip/usbip_common.c
+@@ -741,6 +741,17 @@ int usbip_recv_xbuff(struct usbip_device *ud, struct urb *urb)
+ 	if (!(size > 0))
+ 		return 0;
+ 
++	if (size > urb->transfer_buffer_length) {
++		/* should not happen, probably malicious packet */
++		if (ud->side == USBIP_STUB) {
++			usbip_event_add(ud, SDEV_EVENT_ERROR_TCP);
++			return 0;
++		} else {
++			usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
++			return -EPIPE;
++		}
++	}
++
+ 	ret = usbip_recv(ud->tcp_socket, urb->transfer_buffer, size);
+ 	if (ret != size) {
+ 		dev_err(&urb->dev->dev, "recv xbuf, %d\n", ret);
+diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
+index 8ea45a5cd806..d889ef2048df 100644
+--- a/drivers/video/fbdev/Kconfig
++++ b/drivers/video/fbdev/Kconfig
+@@ -2246,7 +2246,6 @@ config XEN_FBDEV_FRONTEND
+ 	select FB_SYS_IMAGEBLIT
+ 	select FB_SYS_FOPS
+ 	select FB_DEFERRED_IO
+-	select INPUT_XEN_KBDDEV_FRONTEND if INPUT_MISC
+ 	select XEN_XENBUS_FRONTEND
+ 	default y
+ 	help
+diff --git a/drivers/video/fbdev/amba-clcd.c b/drivers/video/fbdev/amba-clcd.c
+index 9362424c2340..f9ef06d0cd48 100644
+--- a/drivers/video/fbdev/amba-clcd.c
++++ b/drivers/video/fbdev/amba-clcd.c
+@@ -440,13 +440,14 @@ static int clcdfb_register(struct clcd_fb *fb)
+ 		fb->off_ienb = CLCD_PL111_IENB;
+ 		fb->off_cntl = CLCD_PL111_CNTL;
+ 	} else {
+-#ifdef CONFIG_ARCH_VERSATILE
+-		fb->off_ienb = CLCD_PL111_IENB;
+-		fb->off_cntl = CLCD_PL111_CNTL;
+-#else
+-		fb->off_ienb = CLCD_PL110_IENB;
+-		fb->off_cntl = CLCD_PL110_CNTL;
+-#endif
++		if (of_machine_is_compatible("arm,versatile-ab") ||
++		    of_machine_is_compatible("arm,versatile-pb")) {
++			fb->off_ienb = CLCD_PL111_IENB;
++			fb->off_cntl = CLCD_PL111_CNTL;
++		} else {
++			fb->off_ienb = CLCD_PL110_IENB;
++			fb->off_cntl = CLCD_PL110_CNTL;
++		}
+ 	}
+ 
+ 	fb->clk = clk_get(&fb->dev->dev, NULL);
+diff --git a/drivers/video/fbdev/da8xx-fb.c b/drivers/video/fbdev/da8xx-fb.c
+index 6b2a06d09f2b..d8d583d32a37 100644
+--- a/drivers/video/fbdev/da8xx-fb.c
++++ b/drivers/video/fbdev/da8xx-fb.c
+@@ -209,8 +209,7 @@ static struct fb_videomode known_lcd_panels[] = {
+ 		.lower_margin   = 2,
+ 		.hsync_len      = 0,
+ 		.vsync_len      = 0,
+-		.sync           = FB_SYNC_CLK_INVERT |
+-			FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
++		.sync           = FB_SYNC_CLK_INVERT,
+ 	},
+ 	/* Sharp LK043T1DG01 */
+ 	[1] = {
+@@ -224,7 +223,7 @@ static struct fb_videomode known_lcd_panels[] = {
+ 		.lower_margin   = 2,
+ 		.hsync_len      = 41,
+ 		.vsync_len      = 10,
+-		.sync           = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
++		.sync           = 0,
+ 		.flag           = 0,
+ 	},
+ 	[2] = {
+@@ -239,7 +238,7 @@ static struct fb_videomode known_lcd_panels[] = {
+ 		.lower_margin   = 10,
+ 		.hsync_len      = 10,
+ 		.vsync_len      = 10,
+-		.sync           = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
++		.sync           = 0,
+ 		.flag           = 0,
+ 	},
+ 	[3] = {
+diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
+index 4545e2e2ad45..d8d68af5aef0 100644
+--- a/fs/btrfs/disk-io.c
++++ b/fs/btrfs/disk-io.c
+@@ -1830,7 +1830,7 @@ static int cleaner_kthread(void *arg)
+ 		 */
+ 		btrfs_delete_unused_bgs(root->fs_info);
+ sleep:
+-		if (!try_to_freeze() && !again) {
++		if (!again) {
+ 			set_current_state(TASK_INTERRUPTIBLE);
+ 			if (!kthread_should_stop())
+ 				schedule();
+diff --git a/fs/btrfs/tests/btrfs-tests.c b/fs/btrfs/tests/btrfs-tests.c
+index 0e1e61a7ec23..d39f714dabeb 100644
+--- a/fs/btrfs/tests/btrfs-tests.c
++++ b/fs/btrfs/tests/btrfs-tests.c
+@@ -189,12 +189,6 @@ btrfs_alloc_dummy_block_group(unsigned long length)
+ 		kfree(cache);
+ 		return NULL;
+ 	}
+-	cache->fs_info = btrfs_alloc_dummy_fs_info();
+-	if (!cache->fs_info) {
+-		kfree(cache->free_space_ctl);
+-		kfree(cache);
+-		return NULL;
+-	}
+ 
+ 	cache->key.objectid = 0;
+ 	cache->key.offset = length;
+diff --git a/fs/btrfs/tests/free-space-tree-tests.c b/fs/btrfs/tests/free-space-tree-tests.c
+index d05fe1ab4808..7cea4462acd5 100644
+--- a/fs/btrfs/tests/free-space-tree-tests.c
++++ b/fs/btrfs/tests/free-space-tree-tests.c
+@@ -485,6 +485,7 @@ static int run_test(test_func_t test_func, int bitmaps)
+ 	cache->bitmap_low_thresh = 0;
+ 	cache->bitmap_high_thresh = (u32)-1;
+ 	cache->needs_free_space = 1;
++	cache->fs_info = root->fs_info;
+ 
+ 	btrfs_init_dummy_trans(&trans);
+ 
+diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
+index bece948b363d..8580831ed237 100644
+--- a/fs/debugfs/inode.c
++++ b/fs/debugfs/inode.c
+@@ -457,7 +457,7 @@ struct dentry *debugfs_create_automount(const char *name,
+ 	if (unlikely(!inode))
+ 		return failed_creating(dentry);
+ 
+-	inode->i_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
++	make_empty_dir_inode(inode);
+ 	inode->i_flags |= S_AUTOMOUNT;
+ 	inode->i_private = data;
+ 	dentry->d_fsdata = (void *)f;
+diff --git a/fs/ext4/crypto.c b/fs/ext4/crypto.c
+index ecb54394492a..25634c353191 100644
+--- a/fs/ext4/crypto.c
++++ b/fs/ext4/crypto.c
+@@ -34,6 +34,7 @@
+ #include <linux/random.h>
+ #include <linux/scatterlist.h>
+ #include <linux/spinlock_types.h>
++#include <linux/namei.h>
+ 
+ #include "ext4_extents.h"
+ #include "xattr.h"
+@@ -479,6 +480,9 @@ static int ext4_d_revalidate(struct dentry *dentry, unsigned int flags)
+ 	struct ext4_crypt_info *ci;
+ 	int dir_has_key, cached_with_key;
+ 
++	if (flags & LOOKUP_RCU)
++		return -ECHILD;
++
+ 	dir = dget_parent(dentry);
+ 	if (!ext4_encrypted_inode(d_inode(dir))) {
+ 		dput(dir);
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index aee960b1af34..e6218cbc8332 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -5261,6 +5261,8 @@ int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
+ 	might_sleep();
+ 	trace_ext4_mark_inode_dirty(inode, _RET_IP_);
+ 	err = ext4_reserve_inode_write(handle, inode, &iloc);
++	if (err)
++		return err;
+ 	if (ext4_handle_valid(handle) &&
+ 	    EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize &&
+ 	    !ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND)) {
+@@ -5291,9 +5293,7 @@ int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
+ 			}
+ 		}
+ 	}
+-	if (!err)
+-		err = ext4_mark_iloc_dirty(handle, inode, &iloc);
+-	return err;
++	return ext4_mark_iloc_dirty(handle, inode, &iloc);
+ }
+ 
+ /*
+diff --git a/fs/f2fs/crypto_policy.c b/fs/f2fs/crypto_policy.c
+index d4a96af513c2..596f02490f27 100644
+--- a/fs/f2fs/crypto_policy.c
++++ b/fs/f2fs/crypto_policy.c
+@@ -192,7 +192,8 @@ int f2fs_inherit_context(struct inode *parent, struct inode *child,
+ 		return res;
+ 
+ 	ci = F2FS_I(parent)->i_crypt_info;
+-	BUG_ON(ci == NULL);
++	if (ci == NULL)
++		return -ENOKEY;
+ 
+ 	ctx.format = F2FS_ENCRYPTION_CONTEXT_FORMAT_V1;
+ 
+diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
+index 5c06db17e41f..44802599fa67 100644
+--- a/fs/f2fs/data.c
++++ b/fs/f2fs/data.c
+@@ -67,7 +67,6 @@ static void f2fs_write_end_io(struct bio *bio)
+ 		f2fs_restore_and_release_control_page(&page);
+ 
+ 		if (unlikely(bio->bi_error)) {
+-			set_page_dirty(page);
+ 			set_bit(AS_EIO, &page->mapping->flags);
+ 			f2fs_stop_checkpoint(sbi);
+ 		}
+@@ -504,7 +503,7 @@ static int __allocate_data_blocks(struct inode *inode, loff_t offset,
+ 	struct dnode_of_data dn;
+ 	u64 start = F2FS_BYTES_TO_BLK(offset);
+ 	u64 len = F2FS_BYTES_TO_BLK(count);
+-	bool allocated;
++	bool allocated = false;
+ 	u64 end_offset;
+ 	int err = 0;
+ 
+@@ -546,7 +545,7 @@ static int __allocate_data_blocks(struct inode *inode, loff_t offset,
+ 		f2fs_put_dnode(&dn);
+ 		f2fs_unlock_op(sbi);
+ 
+-		f2fs_balance_fs(sbi, dn.node_changed);
++		f2fs_balance_fs(sbi, allocated);
+ 	}
+ 	return err;
+ 
+@@ -556,7 +555,7 @@ sync_out:
+ 	f2fs_put_dnode(&dn);
+ out:
+ 	f2fs_unlock_op(sbi);
+-	f2fs_balance_fs(sbi, dn.node_changed);
++	f2fs_balance_fs(sbi, allocated);
+ 	return err;
+ }
+ 
+@@ -650,14 +649,14 @@ get_next:
+ 	if (dn.ofs_in_node >= end_offset) {
+ 		if (allocated)
+ 			sync_inode_page(&dn);
+-		allocated = false;
+ 		f2fs_put_dnode(&dn);
+ 
+ 		if (create) {
+ 			f2fs_unlock_op(sbi);
+-			f2fs_balance_fs(sbi, dn.node_changed);
++			f2fs_balance_fs(sbi, allocated);
+ 			f2fs_lock_op(sbi);
+ 		}
++		allocated = false;
+ 
+ 		set_new_dnode(&dn, inode, NULL, NULL, 0);
+ 		err = get_dnode_of_data(&dn, pgofs, mode);
+@@ -715,7 +714,7 @@ put_out:
+ unlock_out:
+ 	if (create) {
+ 		f2fs_unlock_op(sbi);
+-		f2fs_balance_fs(sbi, dn.node_changed);
++		f2fs_balance_fs(sbi, allocated);
+ 	}
+ out:
+ 	trace_f2fs_map_blocks(inode, map, err);
+diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
+index faa7495e2d7e..30e6b6563494 100644
+--- a/fs/f2fs/dir.c
++++ b/fs/f2fs/dir.c
+@@ -892,11 +892,19 @@ out:
+ 	return err;
+ }
+ 
++static int f2fs_dir_open(struct inode *inode, struct file *filp)
++{
++	if (f2fs_encrypted_inode(inode))
++		return f2fs_get_encryption_info(inode) ? -EACCES : 0;
++	return 0;
++}
++
+ const struct file_operations f2fs_dir_operations = {
+ 	.llseek		= generic_file_llseek,
+ 	.read		= generic_read_dir,
+ 	.iterate	= f2fs_readdir,
+ 	.fsync		= f2fs_sync_file,
++	.open		= f2fs_dir_open,
+ 	.unlocked_ioctl	= f2fs_ioctl,
+ #ifdef CONFIG_COMPAT
+ 	.compat_ioctl   = f2fs_compat_ioctl,
+diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
+index ea272be62677..5a322bc00ac4 100644
+--- a/fs/f2fs/file.c
++++ b/fs/f2fs/file.c
+@@ -425,6 +425,8 @@ static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma)
+ 		err = f2fs_get_encryption_info(inode);
+ 		if (err)
+ 			return 0;
++		if (!f2fs_encrypted_inode(inode))
++			return -ENOKEY;
+ 	}
+ 
+ 	/* we don't need to use inline_data strictly */
+@@ -444,7 +446,9 @@ static int f2fs_file_open(struct inode *inode, struct file *filp)
+ 	if (!ret && f2fs_encrypted_inode(inode)) {
+ 		ret = f2fs_get_encryption_info(inode);
+ 		if (ret)
+-			ret = -EACCES;
++			return -EACCES;
++		if (!f2fs_encrypted_inode(inode))
++			return -ENOKEY;
+ 	}
+ 	return ret;
+ }
+diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
+index 6f944e5eb76e..7e9e38769660 100644
+--- a/fs/f2fs/namei.c
++++ b/fs/f2fs/namei.c
+@@ -980,12 +980,6 @@ static const char *f2fs_encrypted_get_link(struct dentry *dentry,
+ 	}
+ 	memcpy(cstr.name, sd->encrypted_path, cstr.len);
+ 
+-	/* this is broken symlink case */
+-	if (unlikely(cstr.name[0] == 0)) {
+-		res = -ENOENT;
+-		goto errout;
+-	}
+-
+ 	if ((cstr.len + sizeof(struct f2fs_encrypted_symlink_data) - 1) >
+ 								max_size) {
+ 		/* Symlink data on the disk is corrupted */
+@@ -1002,6 +996,12 @@ static const char *f2fs_encrypted_get_link(struct dentry *dentry,
+ 
+ 	kfree(cstr.name);
+ 
++	/* this is broken symlink case */
++	if (unlikely(pstr.name[0] == 0)) {
++		res = -ENOENT;
++		goto errout;
++	}
++
+ 	paddr = pstr.name;
+ 
+ 	/* Null-terminate the name */
+diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
+index 6134832baaaf..013a62b2f8ca 100644
+--- a/fs/f2fs/super.c
++++ b/fs/f2fs/super.c
+@@ -926,9 +926,25 @@ static loff_t max_file_blocks(void)
+ 	return result;
+ }
+ 
++static int __f2fs_commit_super(struct buffer_head *bh,
++			struct f2fs_super_block *super)
++{
++	lock_buffer(bh);
++	if (super)
++		memcpy(bh->b_data + F2FS_SUPER_OFFSET, super, sizeof(*super));
++	set_buffer_uptodate(bh);
++	set_buffer_dirty(bh);
++	unlock_buffer(bh);
++
++	/* it's rare case, we can do fua all the time */
++	return __sync_dirty_buffer(bh, WRITE_FLUSH_FUA);
++}
++
+ static inline bool sanity_check_area_boundary(struct super_block *sb,
+-					struct f2fs_super_block *raw_super)
++					struct buffer_head *bh)
+ {
++	struct f2fs_super_block *raw_super = (struct f2fs_super_block *)
++					(bh->b_data + F2FS_SUPER_OFFSET);
+ 	u32 segment0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr);
+ 	u32 cp_blkaddr = le32_to_cpu(raw_super->cp_blkaddr);
+ 	u32 sit_blkaddr = le32_to_cpu(raw_super->sit_blkaddr);
+@@ -942,6 +958,10 @@ static inline bool sanity_check_area_boundary(struct super_block *sb,
+ 	u32 segment_count_main = le32_to_cpu(raw_super->segment_count_main);
+ 	u32 segment_count = le32_to_cpu(raw_super->segment_count);
+ 	u32 log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
++	u64 main_end_blkaddr = main_blkaddr +
++				(segment_count_main << log_blocks_per_seg);
++	u64 seg_end_blkaddr = segment0_blkaddr +
++				(segment_count << log_blocks_per_seg);
+ 
+ 	if (segment0_blkaddr != cp_blkaddr) {
+ 		f2fs_msg(sb, KERN_INFO,
+@@ -986,22 +1006,45 @@ static inline bool sanity_check_area_boundary(struct super_block *sb,
+ 		return true;
+ 	}
+ 
+-	if (main_blkaddr + (segment_count_main << log_blocks_per_seg) !=
+-		segment0_blkaddr + (segment_count << log_blocks_per_seg)) {
++	if (main_end_blkaddr > seg_end_blkaddr) {
+ 		f2fs_msg(sb, KERN_INFO,
+-			"Wrong MAIN_AREA boundary, start(%u) end(%u) blocks(%u)",
++			"Wrong MAIN_AREA boundary, start(%u) end(%u) block(%u)",
+ 			main_blkaddr,
+-			segment0_blkaddr + (segment_count << log_blocks_per_seg),
++			segment0_blkaddr +
++				(segment_count << log_blocks_per_seg),
+ 			segment_count_main << log_blocks_per_seg);
+ 		return true;
++	} else if (main_end_blkaddr < seg_end_blkaddr) {
++		int err = 0;
++		char *res;
++
++		/* fix in-memory information all the time */
++		raw_super->segment_count = cpu_to_le32((main_end_blkaddr -
++				segment0_blkaddr) >> log_blocks_per_seg);
++
++		if (f2fs_readonly(sb) || bdev_read_only(sb->s_bdev)) {
++			res = "internally";
++		} else {
++			err = __f2fs_commit_super(bh, NULL);
++			res = err ? "failed" : "done";
++		}
++		f2fs_msg(sb, KERN_INFO,
++			"Fix alignment : %s, start(%u) end(%u) block(%u)",
++			res, main_blkaddr,
++			segment0_blkaddr +
++				(segment_count << log_blocks_per_seg),
++			segment_count_main << log_blocks_per_seg);
++		if (err)
++			return true;
+ 	}
+-
+ 	return false;
+ }
+ 
+ static int sanity_check_raw_super(struct super_block *sb,
+-			struct f2fs_super_block *raw_super)
++				struct buffer_head *bh)
+ {
++	struct f2fs_super_block *raw_super = (struct f2fs_super_block *)
++					(bh->b_data + F2FS_SUPER_OFFSET);
+ 	unsigned int blocksize;
+ 
+ 	if (F2FS_SUPER_MAGIC != le32_to_cpu(raw_super->magic)) {
+@@ -1068,7 +1111,7 @@ static int sanity_check_raw_super(struct super_block *sb,
+ 	}
+ 
+ 	/* check CP/SIT/NAT/SSA/MAIN_AREA area boundary */
+-	if (sanity_check_area_boundary(sb, raw_super))
++	if (sanity_check_area_boundary(sb, bh))
+ 		return 1;
+ 
+ 	return 0;
+@@ -1134,103 +1177,87 @@ static void init_sb_info(struct f2fs_sb_info *sbi)
+ 
+ /*
+  * Read f2fs raw super block.
+- * Because we have two copies of super block, so read the first one at first,
+- * if the first one is invalid, move to read the second one.
++ * Because we have two copies of super block, so read both of them
++ * to get the first valid one. If any one of them is broken, we pass
++ * them recovery flag back to the caller.
+  */
+ static int read_raw_super_block(struct super_block *sb,
+ 			struct f2fs_super_block **raw_super,
+ 			int *valid_super_block, int *recovery)
+ {
+-	int block = 0;
++	int block;
+ 	struct buffer_head *bh;
+-	struct f2fs_super_block *super, *buf;
++	struct f2fs_super_block *super;
+ 	int err = 0;
+ 
+ 	super = kzalloc(sizeof(struct f2fs_super_block), GFP_KERNEL);
+ 	if (!super)
+ 		return -ENOMEM;
+-retry:
+-	bh = sb_bread(sb, block);
+-	if (!bh) {
+-		*recovery = 1;
+-		f2fs_msg(sb, KERN_ERR, "Unable to read %dth superblock",
++
++	for (block = 0; block < 2; block++) {
++		bh = sb_bread(sb, block);
++		if (!bh) {
++			f2fs_msg(sb, KERN_ERR, "Unable to read %dth superblock",
+ 				block + 1);
+-		err = -EIO;
+-		goto next;
+-	}
++			err = -EIO;
++			continue;
++		}
+ 
+-	buf = (struct f2fs_super_block *)(bh->b_data + F2FS_SUPER_OFFSET);
++		/* sanity checking of raw super */
++		if (sanity_check_raw_super(sb, bh)) {
++			f2fs_msg(sb, KERN_ERR,
++				"Can't find valid F2FS filesystem in %dth superblock",
++				block + 1);
++			err = -EINVAL;
++			brelse(bh);
++			continue;
++		}
+ 
+-	/* sanity checking of raw super */
+-	if (sanity_check_raw_super(sb, buf)) {
++		if (!*raw_super) {
++			memcpy(super, bh->b_data + F2FS_SUPER_OFFSET,
++							sizeof(*super));
++			*valid_super_block = block;
++			*raw_super = super;
++		}
+ 		brelse(bh);
+-		*recovery = 1;
+-		f2fs_msg(sb, KERN_ERR,
+-			"Can't find valid F2FS filesystem in %dth superblock",
+-								block + 1);
+-		err = -EINVAL;
+-		goto next;
+ 	}
+ 
+-	if (!*raw_super) {
+-		memcpy(super, buf, sizeof(*super));
+-		*valid_super_block = block;
+-		*raw_super = super;
+-	}
+-	brelse(bh);
+-
+-next:
+-	/* check the validity of the second superblock */
+-	if (block == 0) {
+-		block++;
+-		goto retry;
+-	}
++	/* Fail to read any one of the superblocks*/
++	if (err < 0)
++		*recovery = 1;
+ 
+ 	/* No valid superblock */
+-	if (!*raw_super) {
++	if (!*raw_super)
+ 		kfree(super);
+-		return err;
+-	}
++	else
++		err = 0;
+ 
+-	return 0;
++	return err;
+ }
+ 
+-static int __f2fs_commit_super(struct f2fs_sb_info *sbi, int block)
++int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover)
+ {
+-	struct f2fs_super_block *super = F2FS_RAW_SUPER(sbi);
+ 	struct buffer_head *bh;
+ 	int err;
+ 
+-	bh = sb_getblk(sbi->sb, block);
++	/* write back-up superblock first */
++	bh = sb_getblk(sbi->sb, sbi->valid_super_block ? 0: 1);
+ 	if (!bh)
+ 		return -EIO;
+-
+-	lock_buffer(bh);
+-	memcpy(bh->b_data + F2FS_SUPER_OFFSET, super, sizeof(*super));
+-	set_buffer_uptodate(bh);
+-	set_buffer_dirty(bh);
+-	unlock_buffer(bh);
+-
+-	/* it's rare case, we can do fua all the time */
+-	err = __sync_dirty_buffer(bh, WRITE_FLUSH_FUA);
++	err = __f2fs_commit_super(bh, F2FS_RAW_SUPER(sbi));
+ 	brelse(bh);
+ 
+-	return err;
+-}
+-
+-int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover)
+-{
+-	int err;
+-
+-	/* write back-up superblock first */
+-	err = __f2fs_commit_super(sbi, sbi->valid_super_block ? 0 : 1);
+-
+ 	/* if we are in recovery path, skip writing valid superblock */
+ 	if (recover || err)
+ 		return err;
+ 
+ 	/* write current valid superblock */
+-	return __f2fs_commit_super(sbi, sbi->valid_super_block);
++	bh = sb_getblk(sbi->sb, sbi->valid_super_block);
++	if (!bh)
++		return -EIO;
++	err = __f2fs_commit_super(bh, F2FS_RAW_SUPER(sbi));
++	brelse(bh);
++	return err;
+ }
+ 
+ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
+diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
+index fa95ab2d3674..9d2f3e0a6360 100644
+--- a/fs/proc/task_mmu.c
++++ b/fs/proc/task_mmu.c
+@@ -1504,6 +1504,32 @@ static struct page *can_gather_numa_stats(pte_t pte, struct vm_area_struct *vma,
+ 	return page;
+ }
+ 
++#ifdef CONFIG_TRANSPARENT_HUGEPAGE
++static struct page *can_gather_numa_stats_pmd(pmd_t pmd,
++					      struct vm_area_struct *vma,
++					      unsigned long addr)
++{
++	struct page *page;
++	int nid;
++
++	if (!pmd_present(pmd))
++		return NULL;
++
++	page = vm_normal_page_pmd(vma, addr, pmd);
++	if (!page)
++		return NULL;
++
++	if (PageReserved(page))
++		return NULL;
++
++	nid = page_to_nid(page);
++	if (!node_isset(nid, node_states[N_MEMORY]))
++		return NULL;
++
++	return page;
++}
++#endif
++
+ static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
+ 		unsigned long end, struct mm_walk *walk)
+ {
+@@ -1513,14 +1539,14 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
+ 	pte_t *orig_pte;
+ 	pte_t *pte;
+ 
++#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ 	ptl = pmd_trans_huge_lock(pmd, vma);
+ 	if (ptl) {
+-		pte_t huge_pte = *(pte_t *)pmd;
+ 		struct page *page;
+ 
+-		page = can_gather_numa_stats(huge_pte, vma, addr);
++		page = can_gather_numa_stats_pmd(*pmd, vma, addr);
+ 		if (page)
+-			gather_stats(page, md, pte_dirty(huge_pte),
++			gather_stats(page, md, pmd_dirty(*pmd),
+ 				     HPAGE_PMD_SIZE/PAGE_SIZE);
+ 		spin_unlock(ptl);
+ 		return 0;
+@@ -1528,6 +1554,7 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
+ 
+ 	if (pmd_trans_unstable(pmd))
+ 		return 0;
++#endif
+ 	orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
+ 	do {
+ 		struct page *page = can_gather_numa_stats(*pte, vma, addr);
+diff --git a/include/asm-generic/futex.h b/include/asm-generic/futex.h
+index e56272c919b5..bf2d34c9d804 100644
+--- a/include/asm-generic/futex.h
++++ b/include/asm-generic/futex.h
+@@ -108,11 +108,15 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
+ 	u32 val;
+ 
+ 	preempt_disable();
+-	if (unlikely(get_user(val, uaddr) != 0))
++	if (unlikely(get_user(val, uaddr) != 0)) {
++		preempt_enable();
+ 		return -EFAULT;
++	}
+ 
+-	if (val == oldval && unlikely(put_user(newval, uaddr) != 0))
++	if (val == oldval && unlikely(put_user(newval, uaddr) != 0)) {
++		preempt_enable();
+ 		return -EFAULT;
++	}
+ 
+ 	*uval = val;
+ 	preempt_enable();
+diff --git a/include/drm/drm_cache.h b/include/drm/drm_cache.h
+index 461a0558bca4..cebecff536a3 100644
+--- a/include/drm/drm_cache.h
++++ b/include/drm/drm_cache.h
+@@ -39,6 +39,8 @@ static inline bool drm_arch_can_wc_memory(void)
+ {
+ #if defined(CONFIG_PPC) && !defined(CONFIG_NOT_COHERENT_CACHE)
+ 	return false;
++#elif defined(CONFIG_MIPS) && defined(CONFIG_CPU_LOONGSON3)
++	return false;
+ #else
+ 	return true;
+ #endif
+diff --git a/include/keys/trusted-type.h b/include/keys/trusted-type.h
+index 42cf2d991bf4..4ea7e55f20b0 100644
+--- a/include/keys/trusted-type.h
++++ b/include/keys/trusted-type.h
+@@ -38,7 +38,7 @@ struct trusted_key_options {
+ 	unsigned char pcrinfo[MAX_PCRINFO_SIZE];
+ 	int pcrlock;
+ 	uint32_t hash;
+-	uint32_t digest_len;
++	uint32_t policydigest_len;
+ 	unsigned char policydigest[MAX_DIGEST_SIZE];
+ 	uint32_t policyhandle;
+ };
+diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
+index 89d944b25d87..7fc7cb7872e3 100644
+--- a/include/linux/cgroup-defs.h
++++ b/include/linux/cgroup-defs.h
+@@ -442,6 +442,7 @@ struct cgroup_subsys {
+ 	int (*can_attach)(struct cgroup_taskset *tset);
+ 	void (*cancel_attach)(struct cgroup_taskset *tset);
+ 	void (*attach)(struct cgroup_taskset *tset);
++	void (*post_attach)(void);
+ 	int (*can_fork)(struct task_struct *task);
+ 	void (*cancel_fork)(struct task_struct *task);
+ 	void (*fork)(struct task_struct *task);
+diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
+index fea160ee5803..85a868ccb493 100644
+--- a/include/linux/cpuset.h
++++ b/include/linux/cpuset.h
+@@ -137,8 +137,6 @@ static inline void set_mems_allowed(nodemask_t nodemask)
+ 	task_unlock(current);
+ }
+ 
+-extern void cpuset_post_attach_flush(void);
+-
+ #else /* !CONFIG_CPUSETS */
+ 
+ static inline bool cpusets_enabled(void) { return false; }
+@@ -245,10 +243,6 @@ static inline bool read_mems_allowed_retry(unsigned int seq)
+ 	return false;
+ }
+ 
+-static inline void cpuset_post_attach_flush(void)
+-{
+-}
+-
+ #endif /* !CONFIG_CPUSETS */
+ 
+ #endif /* _LINUX_CPUSET_H */
+diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
+index 987764afa65c..f8b83792939b 100644
+--- a/include/linux/mlx5/device.h
++++ b/include/linux/mlx5/device.h
+@@ -363,6 +363,17 @@ enum {
+ 	MLX5_CAP_OFF_CMDIF_CSUM		= 46,
+ };
+ 
++enum {
++	/*
++	 * Max wqe size for rdma read is 512 bytes, so this
++	 * limits our max_sge_rd as the wqe needs to fit:
++	 * - ctrl segment (16 bytes)
++	 * - rdma segment (16 bytes)
++	 * - scatter elements (16 bytes each)
++	 */
++	MLX5_MAX_SGE_RD	= (512 - 16 - 16) / 16
++};
++
+ struct mlx5_inbox_hdr {
+ 	__be16		opcode;
+ 	u8		rsvd[4];
+diff --git a/include/linux/mm.h b/include/linux/mm.h
+index 516e14944339..a6c240e885c0 100644
+--- a/include/linux/mm.h
++++ b/include/linux/mm.h
+@@ -1010,6 +1010,8 @@ static inline bool page_mapped(struct page *page)
+ 	page = compound_head(page);
+ 	if (atomic_read(compound_mapcount_ptr(page)) >= 0)
+ 		return true;
++	if (PageHuge(page))
++		return false;
+ 	for (i = 0; i < hpage_nr_pages(page); i++) {
+ 		if (atomic_read(&page[i]._mapcount) >= 0)
+ 			return true;
+@@ -1117,6 +1119,8 @@ struct zap_details {
+ 
+ struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
+ 		pte_t pte);
++struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
++				pmd_t pmd);
+ 
+ int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
+ 		unsigned long size);
+diff --git a/include/linux/platform_data/mmp_dma.h b/include/linux/platform_data/mmp_dma.h
+index 2a330ec9e2af..d1397c8ed94e 100644
+--- a/include/linux/platform_data/mmp_dma.h
++++ b/include/linux/platform_data/mmp_dma.h
+@@ -14,6 +14,7 @@
+ 
+ struct mmp_dma_platdata {
+ 	int dma_channels;
++	int nb_requestors;
+ };
+ 
+ #endif /* MMP_DMA_H */
+diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h
+index 8a0f55b6c2ba..88e3ab496e8f 100644
+--- a/include/media/videobuf2-core.h
++++ b/include/media/videobuf2-core.h
+@@ -375,6 +375,9 @@ struct vb2_ops {
+ /**
+  * struct vb2_ops - driver-specific callbacks
+  *
++ * @verify_planes_array: Verify that a given user space structure contains
++ *			enough planes for the buffer. This is called
++ *			for each dequeued buffer.
+  * @fill_user_buffer:	given a vb2_buffer fill in the userspace structure.
+  *			For V4L2 this is a struct v4l2_buffer.
+  * @fill_vb2_buffer:	given a userspace structure, fill in the vb2_buffer.
+@@ -384,6 +387,7 @@ struct vb2_ops {
+  *			the vb2_buffer struct.
+  */
+ struct vb2_buf_ops {
++	int (*verify_planes_array)(struct vb2_buffer *vb, const void *pb);
+ 	void (*fill_user_buffer)(struct vb2_buffer *vb, void *pb);
+ 	int (*fill_vb2_buffer)(struct vb2_buffer *vb, const void *pb,
+ 				struct vb2_plane *planes);
+@@ -400,6 +404,9 @@ struct vb2_buf_ops {
+  * @fileio_read_once:		report EOF after reading the first buffer
+  * @fileio_write_immediately:	queue buffer after each write() call
+  * @allow_zero_bytesused:	allow bytesused == 0 to be passed to the driver
++ * @quirk_poll_must_check_waiting_for_buffers: Return POLLERR at poll when QBUF
++ *              has not been called. This is a vb1 idiom that has been adopted
++ *              also by vb2.
+  * @lock:	pointer to a mutex that protects the vb2_queue struct. The
+  *		driver can set this to a mutex to let the v4l2 core serialize
+  *		the queuing ioctls. If the driver wants to handle locking
+@@ -463,6 +470,7 @@ struct vb2_queue {
+ 	unsigned			fileio_read_once:1;
+ 	unsigned			fileio_write_immediately:1;
+ 	unsigned			allow_zero_bytesused:1;
++	unsigned		   quirk_poll_must_check_waiting_for_buffers:1;
+ 
+ 	struct mutex			*lock;
+ 	void				*owner;
+diff --git a/include/rdma/ib.h b/include/rdma/ib.h
+index cf8f9e700e48..a6b93706b0fc 100644
+--- a/include/rdma/ib.h
++++ b/include/rdma/ib.h
+@@ -34,6 +34,7 @@
+ #define _RDMA_IB_H
+ 
+ #include <linux/types.h>
++#include <linux/sched.h>
+ 
+ struct ib_addr {
+ 	union {
+@@ -86,4 +87,19 @@ struct sockaddr_ib {
+ 	__u64			sib_scope_id;
+ };
+ 
++/*
++ * The IB interfaces that use write() as bi-directional ioctl() are
++ * fundamentally unsafe, since there are lots of ways to trigger "write()"
++ * calls from various contexts with elevated privileges. That includes the
++ * traditional suid executable error message writes, but also various kernel
++ * interfaces that can write to file descriptors.
++ *
++ * This function provides protection for the legacy API by restricting the
++ * calling context.
++ */
++static inline bool ib_safe_file_access(struct file *filp)
++{
++	return filp->f_cred == current_cred() && segment_eq(get_fs(), USER_DS);
++}
++
+ #endif /* _RDMA_IB_H */
+diff --git a/include/sound/hda_i915.h b/include/sound/hda_i915.h
+index fa341fcb5829..f5842bcd9c94 100644
+--- a/include/sound/hda_i915.h
++++ b/include/sound/hda_i915.h
+@@ -9,7 +9,7 @@
+ #ifdef CONFIG_SND_HDA_I915
+ int snd_hdac_set_codec_wakeup(struct hdac_bus *bus, bool enable);
+ int snd_hdac_display_power(struct hdac_bus *bus, bool enable);
+-int snd_hdac_get_display_clk(struct hdac_bus *bus);
++void snd_hdac_i915_set_bclk(struct hdac_bus *bus);
+ int snd_hdac_sync_audio_rate(struct hdac_bus *bus, hda_nid_t nid, int rate);
+ int snd_hdac_acomp_get_eld(struct hdac_bus *bus, hda_nid_t nid,
+ 			   bool *audio_enabled, char *buffer, int max_bytes);
+@@ -25,9 +25,8 @@ static inline int snd_hdac_display_power(struct hdac_bus *bus, bool enable)
+ {
+ 	return 0;
+ }
+-static inline int snd_hdac_get_display_clk(struct hdac_bus *bus)
++static inline void snd_hdac_i915_set_bclk(struct hdac_bus *bus)
+ {
+-	return 0;
+ }
+ static inline int snd_hdac_sync_audio_rate(struct hdac_bus *bus, hda_nid_t nid,
+ 					   int rate)
+diff --git a/include/uapi/linux/v4l2-dv-timings.h b/include/uapi/linux/v4l2-dv-timings.h
+index c039f1d68a09..086168e18ca8 100644
+--- a/include/uapi/linux/v4l2-dv-timings.h
++++ b/include/uapi/linux/v4l2-dv-timings.h
+@@ -183,7 +183,8 @@
+ 
+ #define V4L2_DV_BT_CEA_3840X2160P24 { \
+ 	.type = V4L2_DV_BT_656_1120, \
+-	V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
++	V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
++		V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 		297000000, 1276, 88, 296, 8, 10, 72, 0, 0, 0, \
+ 		V4L2_DV_BT_STD_CEA861, \
+ 		V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
+@@ -191,14 +192,16 @@
+ 
+ #define V4L2_DV_BT_CEA_3840X2160P25 { \
+ 	.type = V4L2_DV_BT_656_1120, \
+-	V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
++	V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
++		V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 		297000000, 1056, 88, 296, 8, 10, 72, 0, 0, 0, \
+ 		V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
+ }
+ 
+ #define V4L2_DV_BT_CEA_3840X2160P30 { \
+ 	.type = V4L2_DV_BT_656_1120, \
+-	V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
++	V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
++		V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 		297000000, 176, 88, 296, 8, 10, 72, 0, 0, 0, \
+ 		V4L2_DV_BT_STD_CEA861, \
+ 		V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
+@@ -206,14 +209,16 @@
+ 
+ #define V4L2_DV_BT_CEA_3840X2160P50 { \
+ 	.type = V4L2_DV_BT_656_1120, \
+-	V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
++	V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
++		V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 		594000000, 1056, 88, 296, 8, 10, 72, 0, 0, 0, \
+ 		V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
+ }
+ 
+ #define V4L2_DV_BT_CEA_3840X2160P60 { \
+ 	.type = V4L2_DV_BT_656_1120, \
+-	V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
++	V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
++		V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 		594000000, 176, 88, 296, 8, 10, 72, 0, 0, 0, \
+ 		V4L2_DV_BT_STD_CEA861, \
+ 		V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
+@@ -221,7 +226,8 @@
+ 
+ #define V4L2_DV_BT_CEA_4096X2160P24 { \
+ 	.type = V4L2_DV_BT_656_1120, \
+-	V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
++	V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
++		V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 		297000000, 1020, 88, 296, 8, 10, 72, 0, 0, 0, \
+ 		V4L2_DV_BT_STD_CEA861, \
+ 		V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
+@@ -229,14 +235,16 @@
+ 
+ #define V4L2_DV_BT_CEA_4096X2160P25 { \
+ 	.type = V4L2_DV_BT_656_1120, \
+-	V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
++	V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
++		V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 		297000000, 968, 88, 128, 8, 10, 72, 0, 0, 0, \
+ 		V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
+ }
+ 
+ #define V4L2_DV_BT_CEA_4096X2160P30 { \
+ 	.type = V4L2_DV_BT_656_1120, \
+-	V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
++	V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
++		V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 		297000000, 88, 88, 128, 8, 10, 72, 0, 0, 0, \
+ 		V4L2_DV_BT_STD_CEA861, \
+ 		V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
+@@ -244,14 +252,16 @@
+ 
+ #define V4L2_DV_BT_CEA_4096X2160P50 { \
+ 	.type = V4L2_DV_BT_656_1120, \
+-	V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
++	V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
++		V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 		594000000, 968, 88, 128, 8, 10, 72, 0, 0, 0, \
+ 		V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
+ }
+ 
+ #define V4L2_DV_BT_CEA_4096X2160P60 { \
+ 	.type = V4L2_DV_BT_656_1120, \
+-	V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
++	V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
++		V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 		594000000, 88, 88, 128, 8, 10, 72, 0, 0, 0, \
+ 		V4L2_DV_BT_STD_CEA861, \
+ 		V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
+diff --git a/kernel/cgroup.c b/kernel/cgroup.c
+index 6a498daf2eec..355cd5f2b416 100644
+--- a/kernel/cgroup.c
++++ b/kernel/cgroup.c
+@@ -2697,9 +2697,10 @@ static ssize_t __cgroup_procs_write(struct kernfs_open_file *of, char *buf,
+ 				    size_t nbytes, loff_t off, bool threadgroup)
+ {
+ 	struct task_struct *tsk;
++	struct cgroup_subsys *ss;
+ 	struct cgroup *cgrp;
+ 	pid_t pid;
+-	int ret;
++	int ssid, ret;
+ 
+ 	if (kstrtoint(strstrip(buf), 0, &pid) || pid < 0)
+ 		return -EINVAL;
+@@ -2747,8 +2748,10 @@ out_unlock_rcu:
+ 	rcu_read_unlock();
+ out_unlock_threadgroup:
+ 	percpu_up_write(&cgroup_threadgroup_rwsem);
++	for_each_subsys(ss, ssid)
++		if (ss->post_attach)
++			ss->post_attach();
+ 	cgroup_kn_unlock(of->kn);
+-	cpuset_post_attach_flush();
+ 	return ret ?: nbytes;
+ }
+ 
+diff --git a/kernel/cpuset.c b/kernel/cpuset.c
+index 41989ab4db57..df16d0c9349f 100644
+--- a/kernel/cpuset.c
++++ b/kernel/cpuset.c
+@@ -58,7 +58,6 @@
+ #include <asm/uaccess.h>
+ #include <linux/atomic.h>
+ #include <linux/mutex.h>
+-#include <linux/workqueue.h>
+ #include <linux/cgroup.h>
+ #include <linux/wait.h>
+ 
+@@ -1016,7 +1015,7 @@ static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
+ 	}
+ }
+ 
+-void cpuset_post_attach_flush(void)
++static void cpuset_post_attach(void)
+ {
+ 	flush_workqueue(cpuset_migrate_mm_wq);
+ }
+@@ -2087,6 +2086,7 @@ struct cgroup_subsys cpuset_cgrp_subsys = {
+ 	.can_attach	= cpuset_can_attach,
+ 	.cancel_attach	= cpuset_cancel_attach,
+ 	.attach		= cpuset_attach,
++	.post_attach	= cpuset_post_attach,
+ 	.bind		= cpuset_bind,
+ 	.legacy_cftypes	= files,
+ 	.early_init	= 1,
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index f0b4b328d8f5..a0ef98b258d7 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -2402,14 +2402,24 @@ static void ctx_sched_out(struct perf_event_context *ctx,
+ 			cpuctx->task_ctx = NULL;
+ 	}
+ 
+-	is_active ^= ctx->is_active; /* changed bits */
+-
++	/*
++	 * Always update time if it was set; not only when it changes.
++	 * Otherwise we can 'forget' to update time for any but the last
++	 * context we sched out. For example:
++	 *
++	 *   ctx_sched_out(.event_type = EVENT_FLEXIBLE)
++	 *   ctx_sched_out(.event_type = EVENT_PINNED)
++	 *
++	 * would only update time for the pinned events.
++	 */
+ 	if (is_active & EVENT_TIME) {
+ 		/* update (and stop) ctx time */
+ 		update_context_time(ctx);
+ 		update_cgrp_time_from_cpuctx(cpuctx);
+ 	}
+ 
++	is_active ^= ctx->is_active; /* changed bits */
++
+ 	if (!ctx->nr_active || !(is_active & EVENT_ALL))
+ 		return;
+ 
+@@ -8479,6 +8489,7 @@ SYSCALL_DEFINE5(perf_event_open,
+ 					f_flags);
+ 	if (IS_ERR(event_file)) {
+ 		err = PTR_ERR(event_file);
++		event_file = NULL;
+ 		goto err_context;
+ 	}
+ 
+diff --git a/kernel/futex.c b/kernel/futex.c
+index 5d6ce6413ef1..11b502159f3a 100644
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -1212,10 +1212,20 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this,
+ 	if (unlikely(should_fail_futex(true)))
+ 		ret = -EFAULT;
+ 
+-	if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval))
++	if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)) {
+ 		ret = -EFAULT;
+-	else if (curval != uval)
+-		ret = -EINVAL;
++	} else if (curval != uval) {
++		/*
++		 * If a unconditional UNLOCK_PI operation (user space did not
++		 * try the TID->0 transition) raced with a waiter setting the
++		 * FUTEX_WAITERS flag between get_user() and locking the hash
++		 * bucket lock, retry the operation.
++		 */
++		if ((FUTEX_TID_MASK & curval) == uval)
++			ret = -EAGAIN;
++		else
++			ret = -EINVAL;
++	}
+ 	if (ret) {
+ 		raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
+ 		return ret;
+@@ -1442,8 +1452,8 @@ void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1,
+ 	if (likely(&hb1->chain != &hb2->chain)) {
+ 		plist_del(&q->list, &hb1->chain);
+ 		hb_waiters_dec(hb1);
+-		plist_add(&q->list, &hb2->chain);
+ 		hb_waiters_inc(hb2);
++		plist_add(&q->list, &hb2->chain);
+ 		q->lock_ptr = &hb2->lock;
+ 	}
+ 	get_futex_key_refs(key2);
+@@ -2536,6 +2546,15 @@ retry:
+ 		if (ret == -EFAULT)
+ 			goto pi_faulted;
+ 		/*
++		 * A unconditional UNLOCK_PI op raced against a waiter
++		 * setting the FUTEX_WAITERS bit. Try again.
++		 */
++		if (ret == -EAGAIN) {
++			spin_unlock(&hb->lock);
++			put_futex_key(&key);
++			goto retry;
++		}
++		/*
+ 		 * wake_futex_pi has detected invalid state. Tell user
+ 		 * space.
+ 		 */
+diff --git a/kernel/locking/mcs_spinlock.h b/kernel/locking/mcs_spinlock.h
+index 5b9102a47ea5..c835270f0c2f 100644
+--- a/kernel/locking/mcs_spinlock.h
++++ b/kernel/locking/mcs_spinlock.h
+@@ -67,7 +67,13 @@ void mcs_spin_lock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
+ 	node->locked = 0;
+ 	node->next   = NULL;
+ 
+-	prev = xchg_acquire(lock, node);
++	/*
++	 * We rely on the full barrier with global transitivity implied by the
++	 * below xchg() to order the initialization stores above against any
++	 * observation of @node. And to provide the ACQUIRE ordering associated
++	 * with a LOCK primitive.
++	 */
++	prev = xchg(lock, node);
+ 	if (likely(prev == NULL)) {
+ 		/*
+ 		 * Lock acquired, don't need to set node->locked to 1. Threads
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index a74073f8c08c..1c1d2a00ad95 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -7802,7 +7802,7 @@ void set_curr_task(int cpu, struct task_struct *p)
+ /* task_group_lock serializes the addition/removal of task groups */
+ static DEFINE_SPINLOCK(task_group_lock);
+ 
+-static void free_sched_group(struct task_group *tg)
++static void sched_free_group(struct task_group *tg)
+ {
+ 	free_fair_sched_group(tg);
+ 	free_rt_sched_group(tg);
+@@ -7828,7 +7828,7 @@ struct task_group *sched_create_group(struct task_group *parent)
+ 	return tg;
+ 
+ err:
+-	free_sched_group(tg);
++	sched_free_group(tg);
+ 	return ERR_PTR(-ENOMEM);
+ }
+ 
+@@ -7848,17 +7848,16 @@ void sched_online_group(struct task_group *tg, struct task_group *parent)
+ }
+ 
+ /* rcu callback to free various structures associated with a task group */
+-static void free_sched_group_rcu(struct rcu_head *rhp)
++static void sched_free_group_rcu(struct rcu_head *rhp)
+ {
+ 	/* now it should be safe to free those cfs_rqs */
+-	free_sched_group(container_of(rhp, struct task_group, rcu));
++	sched_free_group(container_of(rhp, struct task_group, rcu));
+ }
+ 
+-/* Destroy runqueue etc associated with a task group */
+ void sched_destroy_group(struct task_group *tg)
+ {
+ 	/* wait for possible concurrent references to cfs_rqs complete */
+-	call_rcu(&tg->rcu, free_sched_group_rcu);
++	call_rcu(&tg->rcu, sched_free_group_rcu);
+ }
+ 
+ void sched_offline_group(struct task_group *tg)
+@@ -8319,31 +8318,26 @@ cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
+ 	if (IS_ERR(tg))
+ 		return ERR_PTR(-ENOMEM);
+ 
++	sched_online_group(tg, parent);
++
+ 	return &tg->css;
+ }
+ 
+-static int cpu_cgroup_css_online(struct cgroup_subsys_state *css)
++static void cpu_cgroup_css_released(struct cgroup_subsys_state *css)
+ {
+ 	struct task_group *tg = css_tg(css);
+-	struct task_group *parent = css_tg(css->parent);
+ 
+-	if (parent)
+-		sched_online_group(tg, parent);
+-	return 0;
++	sched_offline_group(tg);
+ }
+ 
+ static void cpu_cgroup_css_free(struct cgroup_subsys_state *css)
+ {
+ 	struct task_group *tg = css_tg(css);
+ 
+-	sched_destroy_group(tg);
+-}
+-
+-static void cpu_cgroup_css_offline(struct cgroup_subsys_state *css)
+-{
+-	struct task_group *tg = css_tg(css);
+-
+-	sched_offline_group(tg);
++	/*
++	 * Relies on the RCU grace period between css_released() and this.
++	 */
++	sched_free_group(tg);
+ }
+ 
+ static void cpu_cgroup_fork(struct task_struct *task)
+@@ -8703,9 +8697,8 @@ static struct cftype cpu_files[] = {
+ 
+ struct cgroup_subsys cpu_cgrp_subsys = {
+ 	.css_alloc	= cpu_cgroup_css_alloc,
++	.css_released	= cpu_cgroup_css_released,
+ 	.css_free	= cpu_cgroup_css_free,
+-	.css_online	= cpu_cgroup_css_online,
+-	.css_offline	= cpu_cgroup_css_offline,
+ 	.fork		= cpu_cgroup_fork,
+ 	.can_attach	= cpu_cgroup_can_attach,
+ 	.attach		= cpu_cgroup_attach,
+diff --git a/kernel/workqueue.c b/kernel/workqueue.c
+index 7ff5dc7d2ac5..9e82d0450fad 100644
+--- a/kernel/workqueue.c
++++ b/kernel/workqueue.c
+@@ -667,6 +667,35 @@ static void set_work_pool_and_clear_pending(struct work_struct *work,
+ 	 */
+ 	smp_wmb();
+ 	set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 0);
++	/*
++	 * The following mb guarantees that previous clear of a PENDING bit
++	 * will not be reordered with any speculative LOADS or STORES from
++	 * work->current_func, which is executed afterwards.  This possible
++	 * reordering can lead to a missed execution on attempt to qeueue
++	 * the same @work.  E.g. consider this case:
++	 *
++	 *   CPU#0                         CPU#1
++	 *   ----------------------------  --------------------------------
++	 *
++	 * 1  STORE event_indicated
++	 * 2  queue_work_on() {
++	 * 3    test_and_set_bit(PENDING)
++	 * 4 }                             set_..._and_clear_pending() {
++	 * 5                                 set_work_data() # clear bit
++	 * 6                                 smp_mb()
++	 * 7                               work->current_func() {
++	 * 8				      LOAD event_indicated
++	 *				   }
++	 *
++	 * Without an explicit full barrier speculative LOAD on line 8 can
++	 * be executed before CPU#0 does STORE on line 1.  If that happens,
++	 * CPU#0 observes the PENDING bit is still set and new execution of
++	 * a @work is not queued in a hope, that CPU#1 will eventually
++	 * finish the queued @work.  Meanwhile CPU#1 does not see
++	 * event_indicated is set, because speculative LOAD was executed
++	 * before actual STORE.
++	 */
++	smp_mb();
+ }
+ 
+ static void clear_work_data(struct work_struct *work)
+diff --git a/lib/assoc_array.c b/lib/assoc_array.c
+index 03dd576e6773..59fd7c0b119c 100644
+--- a/lib/assoc_array.c
++++ b/lib/assoc_array.c
+@@ -524,7 +524,9 @@ static bool assoc_array_insert_into_terminal_node(struct assoc_array_edit *edit,
+ 			free_slot = i;
+ 			continue;
+ 		}
+-		if (ops->compare_object(assoc_array_ptr_to_leaf(ptr), index_key)) {
++		if (assoc_array_ptr_is_leaf(ptr) &&
++		    ops->compare_object(assoc_array_ptr_to_leaf(ptr),
++					index_key)) {
+ 			pr_devel("replace in slot %d\n", i);
+ 			edit->leaf_p = &node->slots[i];
+ 			edit->dead_leaf = node->slots[i];
+diff --git a/lib/lz4/lz4defs.h b/lib/lz4/lz4defs.h
+index abcecdc2d0f2..0710a62ad2f6 100644
+--- a/lib/lz4/lz4defs.h
++++ b/lib/lz4/lz4defs.h
+@@ -11,8 +11,7 @@
+ /*
+  * Detects 64 bits mode
+  */
+-#if (defined(__x86_64__) || defined(__x86_64) || defined(__amd64__) \
+-	|| defined(__ppc64__) || defined(__LP64__))
++#if defined(CONFIG_64BIT)
+ #define LZ4_ARCH64 1
+ #else
+ #define LZ4_ARCH64 0
+@@ -35,6 +34,10 @@ typedef struct _U64_S { u64 v; } U64_S;
+ 
+ #define PUT4(s, d) (A32(d) = A32(s))
+ #define PUT8(s, d) (A64(d) = A64(s))
++
++#define LZ4_READ_LITTLEENDIAN_16(d, s, p)	\
++	(d = s - A16(p))
++
+ #define LZ4_WRITE_LITTLEENDIAN_16(p, v)	\
+ 	do {	\
+ 		A16(p) = v; \
+@@ -51,10 +54,13 @@ typedef struct _U64_S { u64 v; } U64_S;
+ #define PUT8(s, d) \
+ 	put_unaligned(get_unaligned((const u64 *) s), (u64 *) d)
+ 
+-#define LZ4_WRITE_LITTLEENDIAN_16(p, v)	\
+-	do {	\
+-		put_unaligned(v, (u16 *)(p)); \
+-		p += 2; \
++#define LZ4_READ_LITTLEENDIAN_16(d, s, p)	\
++	(d = s - get_unaligned_le16(p))
++
++#define LZ4_WRITE_LITTLEENDIAN_16(p, v)			\
++	do {						\
++		put_unaligned_le16(v, (u16 *)(p));	\
++		p += 2;					\
+ 	} while (0)
+ #endif
+ 
+@@ -140,9 +146,6 @@ typedef struct _U64_S { u64 v; } U64_S;
+ 
+ #endif
+ 
+-#define LZ4_READ_LITTLEENDIAN_16(d, s, p) \
+-	(d = s - get_unaligned_le16(p))
+-
+ #define LZ4_WILDCOPY(s, d, e)		\
+ 	do {				\
+ 		LZ4_COPYPACKET(s, d);	\
+diff --git a/lib/mpi/mpicoder.c b/lib/mpi/mpicoder.c
+index ec533a6c77b5..eb15e7dc7b65 100644
+--- a/lib/mpi/mpicoder.c
++++ b/lib/mpi/mpicoder.c
+@@ -128,6 +128,23 @@ leave:
+ }
+ EXPORT_SYMBOL_GPL(mpi_read_from_buffer);
+ 
++static int count_lzeros(MPI a)
++{
++	mpi_limb_t alimb;
++	int i, lzeros = 0;
++
++	for (i = a->nlimbs - 1; i >= 0; i--) {
++		alimb = a->d[i];
++		if (alimb == 0) {
++			lzeros += sizeof(mpi_limb_t);
++		} else {
++			lzeros += count_leading_zeros(alimb) / 8;
++			break;
++		}
++	}
++	return lzeros;
++}
++
+ /**
+  * mpi_read_buffer() - read MPI to a bufer provided by user (msb first)
+  *
+@@ -148,7 +165,7 @@ int mpi_read_buffer(MPI a, uint8_t *buf, unsigned buf_len, unsigned *nbytes,
+ 	uint8_t *p;
+ 	mpi_limb_t alimb;
+ 	unsigned int n = mpi_get_size(a);
+-	int i, lzeros = 0;
++	int i, lzeros;
+ 
+ 	if (!buf || !nbytes)
+ 		return -EINVAL;
+@@ -156,14 +173,7 @@ int mpi_read_buffer(MPI a, uint8_t *buf, unsigned buf_len, unsigned *nbytes,
+ 	if (sign)
+ 		*sign = a->sign;
+ 
+-	p = (void *)&a->d[a->nlimbs] - 1;
+-
+-	for (i = a->nlimbs * sizeof(alimb) - 1; i >= 0; i--, p--) {
+-		if (!*p)
+-			lzeros++;
+-		else
+-			break;
+-	}
++	lzeros = count_lzeros(a);
+ 
+ 	if (buf_len < n - lzeros) {
+ 		*nbytes = n - lzeros;
+@@ -351,7 +361,7 @@ int mpi_write_to_sgl(MPI a, struct scatterlist *sgl, unsigned *nbytes,
+ 	u8 *p, *p2;
+ 	mpi_limb_t alimb, alimb2;
+ 	unsigned int n = mpi_get_size(a);
+-	int i, x, y = 0, lzeros = 0, buf_len;
++	int i, x, y = 0, lzeros, buf_len;
+ 
+ 	if (!nbytes)
+ 		return -EINVAL;
+@@ -359,14 +369,7 @@ int mpi_write_to_sgl(MPI a, struct scatterlist *sgl, unsigned *nbytes,
+ 	if (sign)
+ 		*sign = a->sign;
+ 
+-	p = (void *)&a->d[a->nlimbs] - 1;
+-
+-	for (i = a->nlimbs * sizeof(alimb) - 1; i >= 0; i--, p--) {
+-		if (!*p)
+-			lzeros++;
+-		else
+-			break;
+-	}
++	lzeros = count_lzeros(a);
+ 
+ 	if (*nbytes < n - lzeros) {
+ 		*nbytes = n - lzeros;
+diff --git a/mm/huge_memory.c b/mm/huge_memory.c
+index e10a4fee88d2..a7db0a2db1ab 100644
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -1919,10 +1919,9 @@ int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
+ 		 * page fault if needed.
+ 		 */
+ 		return 0;
+-	if (vma->vm_ops)
++	if (vma->vm_ops || (vm_flags & VM_NO_THP))
+ 		/* khugepaged not yet working on file or special mappings */
+ 		return 0;
+-	VM_BUG_ON_VMA(vm_flags & VM_NO_THP, vma);
+ 	hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
+ 	hend = vma->vm_end & HPAGE_PMD_MASK;
+ 	if (hstart < hend)
+@@ -2310,8 +2309,7 @@ static bool hugepage_vma_check(struct vm_area_struct *vma)
+ 		return false;
+ 	if (is_vma_temporary_stack(vma))
+ 		return false;
+-	VM_BUG_ON_VMA(vma->vm_flags & VM_NO_THP, vma);
+-	return true;
++	return !(vma->vm_flags & VM_NO_THP);
+ }
+ 
+ static void collapse_huge_page(struct mm_struct *mm,
+diff --git a/mm/memcontrol.c b/mm/memcontrol.c
+index caf3bf73b533..a65ad1d59232 100644
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -207,6 +207,7 @@ static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
+ /* "mc" and its members are protected by cgroup_mutex */
+ static struct move_charge_struct {
+ 	spinlock_t	  lock; /* for from, to */
++	struct mm_struct  *mm;
+ 	struct mem_cgroup *from;
+ 	struct mem_cgroup *to;
+ 	unsigned long flags;
+@@ -4730,6 +4731,8 @@ static void __mem_cgroup_clear_mc(void)
+ 
+ static void mem_cgroup_clear_mc(void)
+ {
++	struct mm_struct *mm = mc.mm;
++
+ 	/*
+ 	 * we must clear moving_task before waking up waiters at the end of
+ 	 * task migration.
+@@ -4739,7 +4742,10 @@ static void mem_cgroup_clear_mc(void)
+ 	spin_lock(&mc.lock);
+ 	mc.from = NULL;
+ 	mc.to = NULL;
++	mc.mm = NULL;
+ 	spin_unlock(&mc.lock);
++
++	mmput(mm);
+ }
+ 
+ static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
+@@ -4796,6 +4802,7 @@ static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
+ 		VM_BUG_ON(mc.moved_swap);
+ 
+ 		spin_lock(&mc.lock);
++		mc.mm = mm;
+ 		mc.from = from;
+ 		mc.to = memcg;
+ 		mc.flags = move_flags;
+@@ -4805,8 +4812,9 @@ static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
+ 		ret = mem_cgroup_precharge_mc(mm);
+ 		if (ret)
+ 			mem_cgroup_clear_mc();
++	} else {
++		mmput(mm);
+ 	}
+-	mmput(mm);
+ 	return ret;
+ }
+ 
+@@ -4915,11 +4923,11 @@ put:			/* get_mctgt_type() gets the page */
+ 	return ret;
+ }
+ 
+-static void mem_cgroup_move_charge(struct mm_struct *mm)
++static void mem_cgroup_move_charge(void)
+ {
+ 	struct mm_walk mem_cgroup_move_charge_walk = {
+ 		.pmd_entry = mem_cgroup_move_charge_pte_range,
+-		.mm = mm,
++		.mm = mc.mm,
+ 	};
+ 
+ 	lru_add_drain_all();
+@@ -4931,7 +4939,7 @@ static void mem_cgroup_move_charge(struct mm_struct *mm)
+ 	atomic_inc(&mc.from->moving_account);
+ 	synchronize_rcu();
+ retry:
+-	if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
++	if (unlikely(!down_read_trylock(&mc.mm->mmap_sem))) {
+ 		/*
+ 		 * Someone who are holding the mmap_sem might be waiting in
+ 		 * waitq. So we cancel all extra charges, wake up all waiters,
+@@ -4948,23 +4956,16 @@ retry:
+ 	 * additional charge, the page walk just aborts.
+ 	 */
+ 	walk_page_range(0, ~0UL, &mem_cgroup_move_charge_walk);
+-	up_read(&mm->mmap_sem);
++	up_read(&mc.mm->mmap_sem);
+ 	atomic_dec(&mc.from->moving_account);
+ }
+ 
+-static void mem_cgroup_move_task(struct cgroup_taskset *tset)
++static void mem_cgroup_move_task(void)
+ {
+-	struct cgroup_subsys_state *css;
+-	struct task_struct *p = cgroup_taskset_first(tset, &css);
+-	struct mm_struct *mm = get_task_mm(p);
+-
+-	if (mm) {
+-		if (mc.to)
+-			mem_cgroup_move_charge(mm);
+-		mmput(mm);
+-	}
+-	if (mc.to)
++	if (mc.to) {
++		mem_cgroup_move_charge();
+ 		mem_cgroup_clear_mc();
++	}
+ }
+ #else	/* !CONFIG_MMU */
+ static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
+@@ -4974,7 +4975,7 @@ static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
+ static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
+ {
+ }
+-static void mem_cgroup_move_task(struct cgroup_taskset *tset)
++static void mem_cgroup_move_task(void)
+ {
+ }
+ #endif
+@@ -5246,7 +5247,7 @@ struct cgroup_subsys memory_cgrp_subsys = {
+ 	.css_reset = mem_cgroup_css_reset,
+ 	.can_attach = mem_cgroup_can_attach,
+ 	.cancel_attach = mem_cgroup_cancel_attach,
+-	.attach = mem_cgroup_move_task,
++	.post_attach = mem_cgroup_move_task,
+ 	.bind = mem_cgroup_bind,
+ 	.dfl_cftypes = memory_files,
+ 	.legacy_cftypes = mem_cgroup_legacy_files,
+diff --git a/mm/memory.c b/mm/memory.c
+index 8132787ae4d5..3345dcf862cf 100644
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -792,6 +792,46 @@ out:
+ 	return pfn_to_page(pfn);
+ }
+ 
++#ifdef CONFIG_TRANSPARENT_HUGEPAGE
++struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
++				pmd_t pmd)
++{
++	unsigned long pfn = pmd_pfn(pmd);
++
++	/*
++	 * There is no pmd_special() but there may be special pmds, e.g.
++	 * in a direct-access (dax) mapping, so let's just replicate the
++	 * !HAVE_PTE_SPECIAL case from vm_normal_page() here.
++	 */
++	if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
++		if (vma->vm_flags & VM_MIXEDMAP) {
++			if (!pfn_valid(pfn))
++				return NULL;
++			goto out;
++		} else {
++			unsigned long off;
++			off = (addr - vma->vm_start) >> PAGE_SHIFT;
++			if (pfn == vma->vm_pgoff + off)
++				return NULL;
++			if (!is_cow_mapping(vma->vm_flags))
++				return NULL;
++		}
++	}
++
++	if (is_zero_pfn(pfn))
++		return NULL;
++	if (unlikely(pfn > highest_memmap_pfn))
++		return NULL;
++
++	/*
++	 * NOTE! We still have PageReserved() pages in the page tables.
++	 * eg. VDSO mappings can cause them to exist.
++	 */
++out:
++	return pfn_to_page(pfn);
++}
++#endif
++
+ /*
+  * copy one vm_area from one task to the other. Assumes the page tables
+  * already present in the new task to be cleared in the whole range
+diff --git a/mm/migrate.c b/mm/migrate.c
+index 3ad0fea5c438..625741faa068 100644
+--- a/mm/migrate.c
++++ b/mm/migrate.c
+@@ -967,7 +967,13 @@ out:
+ 		dec_zone_page_state(page, NR_ISOLATED_ANON +
+ 				page_is_file_cache(page));
+ 		/* Soft-offlined page shouldn't go through lru cache list */
+-		if (reason == MR_MEMORY_FAILURE) {
++		if (reason == MR_MEMORY_FAILURE && rc == MIGRATEPAGE_SUCCESS) {
++			/*
++			 * With this release, we free successfully migrated
++			 * page and set PG_HWPoison on just freed page
++			 * intentionally. Although it's rather weird, it's how
++			 * HWPoison flag works at the moment.
++			 */
+ 			put_page(page);
+ 			if (!test_set_page_hwpoison(page))
+ 				num_poisoned_pages_inc();
+diff --git a/mm/slub.c b/mm/slub.c
+index d8fbd4a6ed59..2a722e141958 100644
+--- a/mm/slub.c
++++ b/mm/slub.c
+@@ -2815,6 +2815,7 @@ struct detached_freelist {
+ 	void *tail;
+ 	void *freelist;
+ 	int cnt;
++	struct kmem_cache *s;
+ };
+ 
+ /*
+@@ -2829,8 +2830,9 @@ struct detached_freelist {
+  * synchronization primitive.  Look ahead in the array is limited due
+  * to performance reasons.
+  */
+-static int build_detached_freelist(struct kmem_cache *s, size_t size,
+-				   void **p, struct detached_freelist *df)
++static inline
++int build_detached_freelist(struct kmem_cache *s, size_t size,
++			    void **p, struct detached_freelist *df)
+ {
+ 	size_t first_skipped_index = 0;
+ 	int lookahead = 3;
+@@ -2846,8 +2848,11 @@ static int build_detached_freelist(struct kmem_cache *s, size_t size,
+ 	if (!object)
+ 		return 0;
+ 
++	/* Support for memcg, compiler can optimize this out */
++	df->s = cache_from_obj(s, object);
++
+ 	/* Start new detached freelist */
+-	set_freepointer(s, object, NULL);
++	set_freepointer(df->s, object, NULL);
+ 	df->page = virt_to_head_page(object);
+ 	df->tail = object;
+ 	df->freelist = object;
+@@ -2862,7 +2867,7 @@ static int build_detached_freelist(struct kmem_cache *s, size_t size,
+ 		/* df->page is always set at this point */
+ 		if (df->page == virt_to_head_page(object)) {
+ 			/* Opportunity build freelist */
+-			set_freepointer(s, object, df->freelist);
++			set_freepointer(df->s, object, df->freelist);
+ 			df->freelist = object;
+ 			df->cnt++;
+ 			p[size] = NULL; /* mark object processed */
+@@ -2881,25 +2886,20 @@ static int build_detached_freelist(struct kmem_cache *s, size_t size,
+ 	return first_skipped_index;
+ }
+ 
+-
+ /* Note that interrupts must be enabled when calling this function. */
+-void kmem_cache_free_bulk(struct kmem_cache *orig_s, size_t size, void **p)
++void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
+ {
+ 	if (WARN_ON(!size))
+ 		return;
+ 
+ 	do {
+ 		struct detached_freelist df;
+-		struct kmem_cache *s;
+-
+-		/* Support for memcg */
+-		s = cache_from_obj(orig_s, p[size - 1]);
+ 
+ 		size = build_detached_freelist(s, size, p, &df);
+ 		if (unlikely(!df.page))
+ 			continue;
+ 
+-		slab_free(s, df.page, df.freelist, df.tail, df.cnt, _RET_IP_);
++		slab_free(df.s, df.page, df.freelist, df.tail, df.cnt,_RET_IP_);
+ 	} while (likely(size));
+ }
+ EXPORT_SYMBOL(kmem_cache_free_bulk);
+diff --git a/mm/vmscan.c b/mm/vmscan.c
+index 71b1c29948db..c712b016e0ab 100644
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -2561,7 +2561,7 @@ static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
+ 		sc->gfp_mask |= __GFP_HIGHMEM;
+ 
+ 	for_each_zone_zonelist_nodemask(zone, z, zonelist,
+-					requested_highidx, sc->nodemask) {
++					gfp_zone(sc->gfp_mask), sc->nodemask) {
+ 		enum zone_type classzone_idx;
+ 
+ 		if (!populated_zone(zone))
+diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
+index f1ffb34e253f..d2bc03f0b4d7 100644
+--- a/net/netlink/af_netlink.c
++++ b/net/netlink/af_netlink.c
+@@ -1305,7 +1305,7 @@ static int netlink_release(struct socket *sock)
+ 
+ 	skb_queue_purge(&sk->sk_write_queue);
+ 
+-	if (nlk->portid) {
++	if (nlk->portid && nlk->bound) {
+ 		struct netlink_notify n = {
+ 						.net = sock_net(sk),
+ 						.protocol = sk->sk_protocol,
+diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
+index 273bc3a35425..008c25d1b9f9 100644
+--- a/net/sunrpc/cache.c
++++ b/net/sunrpc/cache.c
+@@ -1182,14 +1182,14 @@ int sunrpc_cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h)
+ 	}
+ 
+ 	crq->q.reader = 0;
+-	crq->item = cache_get(h);
+ 	crq->buf = buf;
+ 	crq->len = 0;
+ 	crq->readers = 0;
+ 	spin_lock(&queue_lock);
+-	if (test_bit(CACHE_PENDING, &h->flags))
++	if (test_bit(CACHE_PENDING, &h->flags)) {
++		crq->item = cache_get(h);
+ 		list_add_tail(&crq->q.list, &detail->queue);
+-	else
++	} else
+ 		/* Lost a race, no longer PENDING, so don't enqueue */
+ 		ret = -EAGAIN;
+ 	spin_unlock(&queue_lock);
+diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
+index 711cb7ad6ae0..ab62d305b48b 100644
+--- a/net/wireless/nl80211.c
++++ b/net/wireless/nl80211.c
+@@ -13201,7 +13201,7 @@ static int nl80211_netlink_notify(struct notifier_block * nb,
+ 	struct wireless_dev *wdev;
+ 	struct cfg80211_beacon_registration *reg, *tmp;
+ 
+-	if (state != NETLINK_URELEASE)
++	if (state != NETLINK_URELEASE || notify->protocol != NETLINK_GENERIC)
+ 		return NOTIFY_DONE;
+ 
+ 	rcu_read_lock();
+diff --git a/scripts/kconfig/confdata.c b/scripts/kconfig/confdata.c
+index 0b7dc2fd7bac..dd243d2abd87 100644
+--- a/scripts/kconfig/confdata.c
++++ b/scripts/kconfig/confdata.c
+@@ -267,10 +267,8 @@ int conf_read_simple(const char *name, int def)
+ 		if (in)
+ 			goto load;
+ 		sym_add_change_count(1);
+-		if (!sym_defconfig_list) {
+-			sym_calc_value(modules_sym);
++		if (!sym_defconfig_list)
+ 			return 1;
+-		}
+ 
+ 		for_all_defaults(sym_defconfig_list, prop) {
+ 			if (expr_calc_value(prop->visible.expr) == no ||
+@@ -403,7 +401,6 @@ setsym:
+ 	}
+ 	free(line);
+ 	fclose(in);
+-	sym_calc_value(modules_sym);
+ 	return 0;
+ }
+ 
+@@ -414,8 +411,12 @@ int conf_read(const char *name)
+ 
+ 	sym_set_change_count(0);
+ 
+-	if (conf_read_simple(name, S_DEF_USER))
++	if (conf_read_simple(name, S_DEF_USER)) {
++		sym_calc_value(modules_sym);
+ 		return 1;
++	}
++
++	sym_calc_value(modules_sym);
+ 
+ 	for_all_symbols(i, sym) {
+ 		sym_calc_value(sym);
+@@ -846,6 +847,7 @@ static int conf_split_config(void)
+ 
+ 	name = conf_get_autoconfig_name();
+ 	conf_read_simple(name, S_DEF_AUTO);
++	sym_calc_value(modules_sym);
+ 
+ 	if (chdir("include/config"))
+ 		return 1;
+diff --git a/security/keys/trusted.c b/security/keys/trusted.c
+index 0dcab20cdacd..90d61751ff12 100644
+--- a/security/keys/trusted.c
++++ b/security/keys/trusted.c
+@@ -744,6 +744,7 @@ static int getoptions(char *c, struct trusted_key_payload *pay,
+ 	unsigned long handle;
+ 	unsigned long lock;
+ 	unsigned long token_mask = 0;
++	unsigned int digest_len;
+ 	int i;
+ 	int tpm2;
+ 
+@@ -752,7 +753,6 @@ static int getoptions(char *c, struct trusted_key_payload *pay,
+ 		return tpm2;
+ 
+ 	opt->hash = tpm2 ? HASH_ALGO_SHA256 : HASH_ALGO_SHA1;
+-	opt->digest_len = hash_digest_size[opt->hash];
+ 
+ 	while ((p = strsep(&c, " \t"))) {
+ 		if (*p == '\0' || *p == ' ' || *p == '\t')
+@@ -812,8 +812,6 @@ static int getoptions(char *c, struct trusted_key_payload *pay,
+ 			for (i = 0; i < HASH_ALGO__LAST; i++) {
+ 				if (!strcmp(args[0].from, hash_algo_name[i])) {
+ 					opt->hash = i;
+-					opt->digest_len =
+-						hash_digest_size[opt->hash];
+ 					break;
+ 				}
+ 			}
+@@ -825,13 +823,14 @@ static int getoptions(char *c, struct trusted_key_payload *pay,
+ 			}
+ 			break;
+ 		case Opt_policydigest:
+-			if (!tpm2 ||
+-			    strlen(args[0].from) != (2 * opt->digest_len))
++			digest_len = hash_digest_size[opt->hash];
++			if (!tpm2 || strlen(args[0].from) != (2 * digest_len))
+ 				return -EINVAL;
+ 			res = hex2bin(opt->policydigest, args[0].from,
+-				      opt->digest_len);
++				      digest_len);
+ 			if (res < 0)
+ 				return -EINVAL;
++			opt->policydigest_len = digest_len;
+ 			break;
+ 		case Opt_policyhandle:
+ 			if (!tpm2)
+diff --git a/sound/hda/hdac_i915.c b/sound/hda/hdac_i915.c
+index f6854dbd7d8d..69ead7150a5c 100644
+--- a/sound/hda/hdac_i915.c
++++ b/sound/hda/hdac_i915.c
+@@ -20,6 +20,7 @@
+ #include <sound/core.h>
+ #include <sound/hdaudio.h>
+ #include <sound/hda_i915.h>
++#include <sound/hda_register.h>
+ 
+ static struct i915_audio_component *hdac_acomp;
+ 
+@@ -97,26 +98,65 @@ int snd_hdac_display_power(struct hdac_bus *bus, bool enable)
+ }
+ EXPORT_SYMBOL_GPL(snd_hdac_display_power);
+ 
++#define CONTROLLER_IN_GPU(pci) (((pci)->device == 0x0a0c) || \
++				((pci)->device == 0x0c0c) || \
++				((pci)->device == 0x0d0c) || \
++				((pci)->device == 0x160c))
++
+ /**
+- * snd_hdac_get_display_clk - Get CDCLK in kHz
++ * snd_hdac_i915_set_bclk - Reprogram BCLK for HSW/BDW
+  * @bus: HDA core bus
+  *
+- * This function is supposed to be used only by a HD-audio controller
+- * driver that needs the interaction with i915 graphics.
++ * Intel HSW/BDW display HDA controller is in GPU. Both its power and link BCLK
++ * depends on GPU. Two Extended Mode registers EM4 (M value) and EM5 (N Value)
++ * are used to convert CDClk (Core Display Clock) to 24MHz BCLK:
++ * BCLK = CDCLK * M / N
++ * The values will be lost when the display power well is disabled and need to
++ * be restored to avoid abnormal playback speed.
+  *
+- * This function queries CDCLK value in kHz from the graphics driver and
+- * returns the value.  A negative code is returned in error.
++ * Call this function at initializing and changing power well, as well as
++ * at ELD notifier for the hotplug.
+  */
+-int snd_hdac_get_display_clk(struct hdac_bus *bus)
++void snd_hdac_i915_set_bclk(struct hdac_bus *bus)
+ {
+ 	struct i915_audio_component *acomp = bus->audio_component;
++	struct pci_dev *pci = to_pci_dev(bus->dev);
++	int cdclk_freq;
++	unsigned int bclk_m, bclk_n;
++
++	if (!acomp || !acomp->ops || !acomp->ops->get_cdclk_freq)
++		return; /* only for i915 binding */
++	if (!CONTROLLER_IN_GPU(pci))
++		return; /* only HSW/BDW */
++
++	cdclk_freq = acomp->ops->get_cdclk_freq(acomp->dev);
++	switch (cdclk_freq) {
++	case 337500:
++		bclk_m = 16;
++		bclk_n = 225;
++		break;
++
++	case 450000:
++	default: /* default CDCLK 450MHz */
++		bclk_m = 4;
++		bclk_n = 75;
++		break;
++
++	case 540000:
++		bclk_m = 4;
++		bclk_n = 90;
++		break;
++
++	case 675000:
++		bclk_m = 8;
++		bclk_n = 225;
++		break;
++	}
+ 
+-	if (!acomp || !acomp->ops)
+-		return -ENODEV;
+-
+-	return acomp->ops->get_cdclk_freq(acomp->dev);
++	snd_hdac_chip_writew(bus, HSW_EM4, bclk_m);
++	snd_hdac_chip_writew(bus, HSW_EM5, bclk_n);
+ }
+-EXPORT_SYMBOL_GPL(snd_hdac_get_display_clk);
++EXPORT_SYMBOL_GPL(snd_hdac_i915_set_bclk);
+ 
+ /* There is a fixed mapping between audio pin node and display port
+  * on current Intel platforms:
+diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
+index 7ca5b89f088a..dfaf1a93fb8a 100644
+--- a/sound/pci/hda/hda_generic.c
++++ b/sound/pci/hda/hda_generic.c
+@@ -826,7 +826,7 @@ static hda_nid_t path_power_update(struct hda_codec *codec,
+ 				   bool allow_powerdown)
+ {
+ 	hda_nid_t nid, changed = 0;
+-	int i, state;
++	int i, state, power;
+ 
+ 	for (i = 0; i < path->depth; i++) {
+ 		nid = path->path[i];
+@@ -838,7 +838,9 @@ static hda_nid_t path_power_update(struct hda_codec *codec,
+ 			state = AC_PWRST_D0;
+ 		else
+ 			state = AC_PWRST_D3;
+-		if (!snd_hda_check_power_state(codec, nid, state)) {
++		power = snd_hda_codec_read(codec, nid, 0,
++					   AC_VERB_GET_POWER_STATE, 0);
++		if (power != (state | (state << 4))) {
+ 			snd_hda_codec_write(codec, nid, 0,
+ 					    AC_VERB_SET_POWER_STATE, state);
+ 			changed = nid;
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
+index e5240cb3749f..c0b772bb49af 100644
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -857,50 +857,6 @@ static int param_set_xint(const char *val, const struct kernel_param *kp)
+ #define azx_del_card_list(chip) /* NOP */
+ #endif /* CONFIG_PM */
+ 
+-/* Intel HSW/BDW display HDA controller is in GPU. Both its power and link BCLK
+- * depends on GPU. Two Extended Mode registers EM4 (M value) and EM5 (N Value)
+- * are used to convert CDClk (Core Display Clock) to 24MHz BCLK:
+- * BCLK = CDCLK * M / N
+- * The values will be lost when the display power well is disabled and need to
+- * be restored to avoid abnormal playback speed.
+- */
+-static void haswell_set_bclk(struct hda_intel *hda)
+-{
+-	struct azx *chip = &hda->chip;
+-	int cdclk_freq;
+-	unsigned int bclk_m, bclk_n;
+-
+-	if (!hda->need_i915_power)
+-		return;
+-
+-	cdclk_freq = snd_hdac_get_display_clk(azx_bus(chip));
+-	switch (cdclk_freq) {
+-	case 337500:
+-		bclk_m = 16;
+-		bclk_n = 225;
+-		break;
+-
+-	case 450000:
+-	default: /* default CDCLK 450MHz */
+-		bclk_m = 4;
+-		bclk_n = 75;
+-		break;
+-
+-	case 540000:
+-		bclk_m = 4;
+-		bclk_n = 90;
+-		break;
+-
+-	case 675000:
+-		bclk_m = 8;
+-		bclk_n = 225;
+-		break;
+-	}
+-
+-	azx_writew(chip, HSW_EM4, bclk_m);
+-	azx_writew(chip, HSW_EM5, bclk_n);
+-}
+-
+ #if defined(CONFIG_PM_SLEEP) || defined(SUPPORT_VGA_SWITCHEROO)
+ /*
+  * power management
+@@ -958,7 +914,7 @@ static int azx_resume(struct device *dev)
+ 	if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL
+ 		&& hda->need_i915_power) {
+ 		snd_hdac_display_power(azx_bus(chip), true);
+-		haswell_set_bclk(hda);
++		snd_hdac_i915_set_bclk(azx_bus(chip));
+ 	}
+ 	if (chip->msi)
+ 		if (pci_enable_msi(pci) < 0)
+@@ -1058,7 +1014,7 @@ static int azx_runtime_resume(struct device *dev)
+ 		bus = azx_bus(chip);
+ 		if (hda->need_i915_power) {
+ 			snd_hdac_display_power(bus, true);
+-			haswell_set_bclk(hda);
++			snd_hdac_i915_set_bclk(bus);
+ 		} else {
+ 			/* toggle codec wakeup bit for STATESTS read */
+ 			snd_hdac_set_codec_wakeup(bus, true);
+@@ -1796,12 +1752,8 @@ static int azx_first_init(struct azx *chip)
+ 	/* initialize chip */
+ 	azx_init_pci(chip);
+ 
+-	if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) {
+-		struct hda_intel *hda;
+-
+-		hda = container_of(chip, struct hda_intel, chip);
+-		haswell_set_bclk(hda);
+-	}
++	if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL)
++		snd_hdac_i915_set_bclk(bus);
+ 
+ 	hda_intel_init_chip(chip, (probe_only[dev] & 2) == 0);
+ 
+@@ -2232,6 +2184,9 @@ static const struct pci_device_id azx_ids[] = {
+ 	/* Broxton-P(Apollolake) */
+ 	{ PCI_DEVICE(0x8086, 0x5a98),
+ 	  .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_BROXTON },
++	/* Broxton-T */
++	{ PCI_DEVICE(0x8086, 0x1a98),
++	  .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_BROXTON },
+ 	/* Haswell */
+ 	{ PCI_DEVICE(0x8086, 0x0a0c),
+ 	  .driver_data = AZX_DRIVER_HDMI | AZX_DCAPS_INTEL_HASWELL },
+diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
+index a47e8ae0eb30..80bbadc83721 100644
+--- a/sound/pci/hda/patch_cirrus.c
++++ b/sound/pci/hda/patch_cirrus.c
+@@ -361,6 +361,7 @@ static int cs_parse_auto_config(struct hda_codec *codec)
+ {
+ 	struct cs_spec *spec = codec->spec;
+ 	int err;
++	int i;
+ 
+ 	err = snd_hda_parse_pin_defcfg(codec, &spec->gen.autocfg, NULL, 0);
+ 	if (err < 0)
+@@ -370,6 +371,19 @@ static int cs_parse_auto_config(struct hda_codec *codec)
+ 	if (err < 0)
+ 		return err;
+ 
++	/* keep the ADCs powered up when it's dynamically switchable */
++	if (spec->gen.dyn_adc_switch) {
++		unsigned int done = 0;
++		for (i = 0; i < spec->gen.input_mux.num_items; i++) {
++			int idx = spec->gen.dyn_adc_idx[i];
++			if (done & (1 << idx))
++				continue;
++			snd_hda_gen_fix_pin_power(codec,
++						  spec->gen.adc_nids[idx]);
++			done |= 1 << idx;
++		}
++	}
++
+ 	return 0;
+ }
+ 
+diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
+index 0c9585602bf3..c98e404afbe0 100644
+--- a/sound/pci/hda/patch_hdmi.c
++++ b/sound/pci/hda/patch_hdmi.c
+@@ -2452,6 +2452,7 @@ static void intel_pin_eld_notify(void *audio_ptr, int port)
+ 	if (atomic_read(&(codec)->core.in_pm))
+ 		return;
+ 
++	snd_hdac_i915_set_bclk(&codec->bus->core);
+ 	check_presence_and_report(codec, pin_nid);
+ }
+ 
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 1402ba954b3d..ac4490a96863 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -5449,6 +5449,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1028, 0x064a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1028, 0x064b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1028, 0x0665, "Dell XPS 13", ALC288_FIXUP_DELL_XPS_13),
++	SND_PCI_QUIRK(0x1028, 0x0669, "Dell Optiplex 9020m", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1028, 0x069a, "Dell Vostro 5480", ALC290_FIXUP_SUBWOOFER_HSJACK),
+ 	SND_PCI_QUIRK(0x1028, 0x06c7, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1028, 0x06d9, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
+@@ -5583,6 +5584,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x17aa, 0x5034, "Thinkpad T450", ALC292_FIXUP_TPT440_DOCK),
+ 	SND_PCI_QUIRK(0x17aa, 0x5036, "Thinkpad T450s", ALC292_FIXUP_TPT440_DOCK),
+ 	SND_PCI_QUIRK(0x17aa, 0x503c, "Thinkpad L450", ALC292_FIXUP_TPT440_DOCK),
++	SND_PCI_QUIRK(0x17aa, 0x504a, "ThinkPad X260", ALC292_FIXUP_TPT440_DOCK),
+ 	SND_PCI_QUIRK(0x17aa, 0x504b, "Thinkpad", ALC293_FIXUP_LENOVO_SPK_NOISE),
+ 	SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+ 	SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
+diff --git a/sound/pci/pcxhr/pcxhr_core.c b/sound/pci/pcxhr/pcxhr_core.c
+index c5194f5b150a..d7e71f309299 100644
+--- a/sound/pci/pcxhr/pcxhr_core.c
++++ b/sound/pci/pcxhr/pcxhr_core.c
+@@ -1341,5 +1341,6 @@ irqreturn_t pcxhr_threaded_irq(int irq, void *dev_id)
+ 	}
+ 
+ 	pcxhr_msg_thread(mgr);
++	mutex_unlock(&mgr->lock);
+ 	return IRQ_HANDLED;
+ }
+diff --git a/sound/soc/codecs/rt5640.c b/sound/soc/codecs/rt5640.c
+index 11d032cdc658..48dbb2fdeb09 100644
+--- a/sound/soc/codecs/rt5640.c
++++ b/sound/soc/codecs/rt5640.c
+@@ -359,7 +359,7 @@ static const DECLARE_TLV_DB_RANGE(bst_tlv,
+ 
+ /* Interface data select */
+ static const char * const rt5640_data_select[] = {
+-	"Normal", "left copy to right", "right copy to left", "Swap"};
++	"Normal", "Swap", "left copy to right", "right copy to left"};
+ 
+ static SOC_ENUM_SINGLE_DECL(rt5640_if1_dac_enum, RT5640_DIG_INF_DATA,
+ 			    RT5640_IF1_DAC_SEL_SFT, rt5640_data_select);
+diff --git a/sound/soc/codecs/rt5640.h b/sound/soc/codecs/rt5640.h
+index 83a7150ddc24..f84231e7d1dd 100644
+--- a/sound/soc/codecs/rt5640.h
++++ b/sound/soc/codecs/rt5640.h
+@@ -442,39 +442,39 @@
+ #define RT5640_IF1_DAC_SEL_MASK			(0x3 << 14)
+ #define RT5640_IF1_DAC_SEL_SFT			14
+ #define RT5640_IF1_DAC_SEL_NOR			(0x0 << 14)
+-#define RT5640_IF1_DAC_SEL_L2R			(0x1 << 14)
+-#define RT5640_IF1_DAC_SEL_R2L			(0x2 << 14)
+-#define RT5640_IF1_DAC_SEL_SWAP			(0x3 << 14)
++#define RT5640_IF1_DAC_SEL_SWAP			(0x1 << 14)
++#define RT5640_IF1_DAC_SEL_L2R			(0x2 << 14)
++#define RT5640_IF1_DAC_SEL_R2L			(0x3 << 14)
+ #define RT5640_IF1_ADC_SEL_MASK			(0x3 << 12)
+ #define RT5640_IF1_ADC_SEL_SFT			12
+ #define RT5640_IF1_ADC_SEL_NOR			(0x0 << 12)
+-#define RT5640_IF1_ADC_SEL_L2R			(0x1 << 12)
+-#define RT5640_IF1_ADC_SEL_R2L			(0x2 << 12)
+-#define RT5640_IF1_ADC_SEL_SWAP			(0x3 << 12)
++#define RT5640_IF1_ADC_SEL_SWAP			(0x1 << 12)
++#define RT5640_IF1_ADC_SEL_L2R			(0x2 << 12)
++#define RT5640_IF1_ADC_SEL_R2L			(0x3 << 12)
+ #define RT5640_IF2_DAC_SEL_MASK			(0x3 << 10)
+ #define RT5640_IF2_DAC_SEL_SFT			10
+ #define RT5640_IF2_DAC_SEL_NOR			(0x0 << 10)
+-#define RT5640_IF2_DAC_SEL_L2R			(0x1 << 10)
+-#define RT5640_IF2_DAC_SEL_R2L			(0x2 << 10)
+-#define RT5640_IF2_DAC_SEL_SWAP			(0x3 << 10)
++#define RT5640_IF2_DAC_SEL_SWAP			(0x1 << 10)
++#define RT5640_IF2_DAC_SEL_L2R			(0x2 << 10)
++#define RT5640_IF2_DAC_SEL_R2L			(0x3 << 10)
+ #define RT5640_IF2_ADC_SEL_MASK			(0x3 << 8)
+ #define RT5640_IF2_ADC_SEL_SFT			8
+ #define RT5640_IF2_ADC_SEL_NOR			(0x0 << 8)
+-#define RT5640_IF2_ADC_SEL_L2R			(0x1 << 8)
+-#define RT5640_IF2_ADC_SEL_R2L			(0x2 << 8)
+-#define RT5640_IF2_ADC_SEL_SWAP			(0x3 << 8)
++#define RT5640_IF2_ADC_SEL_SWAP			(0x1 << 8)
++#define RT5640_IF2_ADC_SEL_L2R			(0x2 << 8)
++#define RT5640_IF2_ADC_SEL_R2L			(0x3 << 8)
+ #define RT5640_IF3_DAC_SEL_MASK			(0x3 << 6)
+ #define RT5640_IF3_DAC_SEL_SFT			6
+ #define RT5640_IF3_DAC_SEL_NOR			(0x0 << 6)
+-#define RT5640_IF3_DAC_SEL_L2R			(0x1 << 6)
+-#define RT5640_IF3_DAC_SEL_R2L			(0x2 << 6)
+-#define RT5640_IF3_DAC_SEL_SWAP			(0x3 << 6)
++#define RT5640_IF3_DAC_SEL_SWAP			(0x1 << 6)
++#define RT5640_IF3_DAC_SEL_L2R			(0x2 << 6)
++#define RT5640_IF3_DAC_SEL_R2L			(0x3 << 6)
+ #define RT5640_IF3_ADC_SEL_MASK			(0x3 << 4)
+ #define RT5640_IF3_ADC_SEL_SFT			4
+ #define RT5640_IF3_ADC_SEL_NOR			(0x0 << 4)
+-#define RT5640_IF3_ADC_SEL_L2R			(0x1 << 4)
+-#define RT5640_IF3_ADC_SEL_R2L			(0x2 << 4)
+-#define RT5640_IF3_ADC_SEL_SWAP			(0x3 << 4)
++#define RT5640_IF3_ADC_SEL_SWAP			(0x1 << 4)
++#define RT5640_IF3_ADC_SEL_L2R			(0x2 << 4)
++#define RT5640_IF3_ADC_SEL_R2L			(0x3 << 4)
+ 
+ /* REC Left Mixer Control 1 (0x3b) */
+ #define RT5640_G_HP_L_RM_L_MASK			(0x7 << 13)
+diff --git a/sound/soc/codecs/ssm4567.c b/sound/soc/codecs/ssm4567.c
+index e619d5651b09..080c78e88e10 100644
+--- a/sound/soc/codecs/ssm4567.c
++++ b/sound/soc/codecs/ssm4567.c
+@@ -352,6 +352,11 @@ static int ssm4567_set_power(struct ssm4567 *ssm4567, bool enable)
+ 	regcache_cache_only(ssm4567->regmap, !enable);
+ 
+ 	if (enable) {
++		ret = regmap_write(ssm4567->regmap, SSM4567_REG_SOFT_RESET,
++			0x00);
++		if (ret)
++			return ret;
++
+ 		ret = regmap_update_bits(ssm4567->regmap,
+ 			SSM4567_REG_POWER_CTRL,
+ 			SSM4567_POWER_SPWDN, 0x00);
+diff --git a/sound/soc/samsung/s3c-i2s-v2.c b/sound/soc/samsung/s3c-i2s-v2.c
+index df65c5b494b1..b6ab3fc5789e 100644
+--- a/sound/soc/samsung/s3c-i2s-v2.c
++++ b/sound/soc/samsung/s3c-i2s-v2.c
+@@ -709,7 +709,7 @@ static int s3c2412_i2s_resume(struct snd_soc_dai *dai)
+ #endif
+ 
+ int s3c_i2sv2_register_component(struct device *dev, int id,
+-			   struct snd_soc_component_driver *cmp_drv,
++			   const struct snd_soc_component_driver *cmp_drv,
+ 			   struct snd_soc_dai_driver *dai_drv)
+ {
+ 	struct snd_soc_dai_ops *ops = (struct snd_soc_dai_ops *)dai_drv->ops;
+diff --git a/sound/soc/samsung/s3c-i2s-v2.h b/sound/soc/samsung/s3c-i2s-v2.h
+index 90abab364b49..d0684145ed1f 100644
+--- a/sound/soc/samsung/s3c-i2s-v2.h
++++ b/sound/soc/samsung/s3c-i2s-v2.h
+@@ -101,7 +101,7 @@ extern int s3c_i2sv2_probe(struct snd_soc_dai *dai,
+  * soc core.
+  */
+ extern int s3c_i2sv2_register_component(struct device *dev, int id,
+-					struct snd_soc_component_driver *cmp_drv,
++					const struct snd_soc_component_driver *cmp_drv,
+ 					struct snd_soc_dai_driver *dai_drv);
+ 
+ #endif /* __SND_SOC_S3C24XX_S3C_I2SV2_I2S_H */
+diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
+index 581175a51ecf..5e811dc02fb9 100644
+--- a/sound/soc/soc-dapm.c
++++ b/sound/soc/soc-dapm.c
+@@ -2188,6 +2188,13 @@ static ssize_t dapm_widget_show_component(struct snd_soc_component *cmpnt,
+ 	int count = 0;
+ 	char *state = "not set";
+ 
++	/* card won't be set for the dummy component, as a spot fix
++	 * we're checking for that case specifically here but in future
++	 * we will ensure that the dummy component looks like others.
++	 */
++	if (!cmpnt->card)
++		return 0;
++
+ 	list_for_each_entry(w, &cmpnt->card->widgets, list) {
+ 		if (w->dapm != dapm)
+ 			continue;
+diff --git a/tools/perf/Documentation/perf-stat.txt b/tools/perf/Documentation/perf-stat.txt
+index 52ef7a9d50aa..14d9e8ffaff7 100644
+--- a/tools/perf/Documentation/perf-stat.txt
++++ b/tools/perf/Documentation/perf-stat.txt
+@@ -69,6 +69,14 @@ report::
+ --scale::
+ 	scale/normalize counter values
+ 
++-d::
++--detailed::
++	print more detailed statistics, can be specified up to 3 times
++
++	   -d:          detailed events, L1 and LLC data cache
++        -d -d:     more detailed events, dTLB and iTLB events
++     -d -d -d:     very detailed events, adding prefetch events
++
+ -r::
+ --repeat=<n>::
+ 	repeat command and print average + stddev (max: 100). 0 means forever.
+diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c
+index 08c09ad755d2..7bb47424bc49 100644
+--- a/tools/perf/ui/browsers/hists.c
++++ b/tools/perf/ui/browsers/hists.c
+@@ -302,7 +302,7 @@ static void callchain_node__init_have_children(struct callchain_node *node,
+ 	chain = list_entry(node->val.next, struct callchain_list, list);
+ 	chain->has_children = has_sibling;
+ 
+-	if (node->val.next != node->val.prev) {
++	if (!list_empty(&node->val)) {
+ 		chain = list_entry(node->val.prev, struct callchain_list, list);
+ 		chain->has_children = !RB_EMPTY_ROOT(&node->rb_root);
+ 	}
+@@ -844,7 +844,7 @@ next:
+ 	return row - first_row;
+ }
+ 
+-static int hist_browser__show_callchain(struct hist_browser *browser,
++static int hist_browser__show_callchain_graph(struct hist_browser *browser,
+ 					struct rb_root *root, int level,
+ 					unsigned short row, u64 total,
+ 					print_callchain_entry_fn print,
+@@ -898,7 +898,7 @@ static int hist_browser__show_callchain(struct hist_browser *browser,
+ 			else
+ 				new_total = total;
+ 
+-			row += hist_browser__show_callchain(browser, &child->rb_root,
++			row += hist_browser__show_callchain_graph(browser, &child->rb_root,
+ 							    new_level, row, new_total,
+ 							    print, arg, is_output_full);
+ 		}
+@@ -910,6 +910,43 @@ out:
+ 	return row - first_row;
+ }
+ 
++static int hist_browser__show_callchain(struct hist_browser *browser,
++					struct hist_entry *entry, int level,
++					unsigned short row,
++					print_callchain_entry_fn print,
++					struct callchain_print_arg *arg,
++					check_output_full_fn is_output_full)
++{
++	u64 total = hists__total_period(entry->hists);
++	int printed;
++
++	if (callchain_param.mode == CHAIN_GRAPH_REL) {
++		if (symbol_conf.cumulate_callchain)
++			total = entry->stat_acc->period;
++		else
++			total = entry->stat.period;
++	}
++
++	if (callchain_param.mode == CHAIN_FLAT) {
++		printed = hist_browser__show_callchain_flat(browser,
++						&entry->sorted_chain, row, total,
++						print, arg, is_output_full);
++	} else if (callchain_param.mode == CHAIN_FOLDED) {
++		printed = hist_browser__show_callchain_folded(browser,
++						&entry->sorted_chain, row, total,
++						print, arg, is_output_full);
++	} else {
++		printed = hist_browser__show_callchain_graph(browser,
++						&entry->sorted_chain, level, row, total,
++						print, arg, is_output_full);
++	}
++
++	if (arg->is_current_entry)
++		browser->he_selection = entry;
++
++	return printed;
++}
++
+ struct hpp_arg {
+ 	struct ui_browser *b;
+ 	char folded_sign;
+@@ -1084,38 +1121,14 @@ static int hist_browser__show_entry(struct hist_browser *browser,
+ 		--row_offset;
+ 
+ 	if (folded_sign == '-' && row != browser->b.rows) {
+-		u64 total = hists__total_period(entry->hists);
+ 		struct callchain_print_arg arg = {
+ 			.row_offset = row_offset,
+ 			.is_current_entry = current_entry,
+ 		};
+ 
+-		if (callchain_param.mode == CHAIN_GRAPH_REL) {
+-			if (symbol_conf.cumulate_callchain)
+-				total = entry->stat_acc->period;
+-			else
+-				total = entry->stat.period;
+-		}
+-
+-		if (callchain_param.mode == CHAIN_FLAT) {
+-			printed += hist_browser__show_callchain_flat(browser,
+-					&entry->sorted_chain, row, total,
++		printed += hist_browser__show_callchain(browser, entry, 1, row,
+ 					hist_browser__show_callchain_entry, &arg,
+ 					hist_browser__check_output_full);
+-		} else if (callchain_param.mode == CHAIN_FOLDED) {
+-			printed += hist_browser__show_callchain_folded(browser,
+-					&entry->sorted_chain, row, total,
+-					hist_browser__show_callchain_entry, &arg,
+-					hist_browser__check_output_full);
+-		} else {
+-			printed += hist_browser__show_callchain(browser,
+-					&entry->sorted_chain, 1, row, total,
+-					hist_browser__show_callchain_entry, &arg,
+-					hist_browser__check_output_full);
+-		}
+-
+-		if (arg.is_current_entry)
+-			browser->he_selection = entry;
+ 	}
+ 
+ 	return printed;
+@@ -1380,15 +1393,11 @@ do_offset:
+ static int hist_browser__fprintf_callchain(struct hist_browser *browser,
+ 					   struct hist_entry *he, FILE *fp)
+ {
+-	u64 total = hists__total_period(he->hists);
+ 	struct callchain_print_arg arg  = {
+ 		.fp = fp,
+ 	};
+ 
+-	if (symbol_conf.cumulate_callchain)
+-		total = he->stat_acc->period;
+-
+-	hist_browser__show_callchain(browser, &he->sorted_chain, 1, 0, total,
++	hist_browser__show_callchain(browser, he, 1, 0,
+ 				     hist_browser__fprintf_callchain_entry, &arg,
+ 				     hist_browser__check_dump_full);
+ 	return arg.printed;
+@@ -2320,10 +2329,12 @@ skip_annotation:
+ 			 *
+ 			 * See hist_browser__show_entry.
+ 			 */
+-			nr_options += add_script_opt(browser,
+-						     &actions[nr_options],
+-						     &options[nr_options],
+-						     NULL, browser->selection->sym);
++			if (sort__has_sym && browser->selection->sym) {
++				nr_options += add_script_opt(browser,
++							     &actions[nr_options],
++							     &options[nr_options],
++							     NULL, browser->selection->sym);
++			}
+ 		}
+ 		nr_options += add_script_opt(browser, &actions[nr_options],
+ 					     &options[nr_options], NULL, NULL);
+diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
+index 85155e91b61b..7bad5c3fa7b7 100644
+--- a/tools/perf/util/event.c
++++ b/tools/perf/util/event.c
+@@ -282,7 +282,7 @@ int perf_event__synthesize_mmap_events(struct perf_tool *tool,
+ 		strcpy(execname, "");
+ 
+ 		/* 00400000-0040c000 r-xp 00000000 fd:01 41038  /bin/cat */
+-		n = sscanf(bf, "%"PRIx64"-%"PRIx64" %s %"PRIx64" %x:%x %u %s\n",
++		n = sscanf(bf, "%"PRIx64"-%"PRIx64" %s %"PRIx64" %x:%x %u %[^\n]\n",
+ 		       &event->mmap2.start, &event->mmap2.len, prot,
+ 		       &event->mmap2.pgoff, &event->mmap2.maj,
+ 		       &event->mmap2.min,
+diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
+index d81f13de2476..a7eb0eae9938 100644
+--- a/tools/perf/util/evlist.c
++++ b/tools/perf/util/evlist.c
+@@ -1181,12 +1181,12 @@ void perf_evlist__set_maps(struct perf_evlist *evlist, struct cpu_map *cpus,
+ 	 */
+ 	if (cpus != evlist->cpus) {
+ 		cpu_map__put(evlist->cpus);
+-		evlist->cpus = cpus;
++		evlist->cpus = cpu_map__get(cpus);
+ 	}
+ 
+ 	if (threads != evlist->threads) {
+ 		thread_map__put(evlist->threads);
+-		evlist->threads = threads;
++		evlist->threads = thread_map__get(threads);
+ 	}
+ 
+ 	perf_evlist__propagate_maps(evlist);
+diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
+index 8e75434bd01c..4d8037a3d8a4 100644
+--- a/tools/perf/util/evsel.h
++++ b/tools/perf/util/evsel.h
+@@ -93,10 +93,8 @@ struct perf_evsel {
+ 	const char		*unit;
+ 	struct event_format	*tp_format;
+ 	off_t			id_offset;
+-	union {
+-		void		*priv;
+-		u64		db_id;
+-	};
++	void			*priv;
++	u64			db_id;
+ 	struct cgroup_sel	*cgrp;
+ 	void			*handler;
+ 	struct cpu_map		*cpus;
+diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
+index 05d815851be1..4e1590ba8902 100644
+--- a/tools/perf/util/intel-pt.c
++++ b/tools/perf/util/intel-pt.c
+@@ -1127,7 +1127,7 @@ static int intel_pt_synth_transaction_sample(struct intel_pt_queue *ptq)
+ 		pr_err("Intel Processor Trace: failed to deliver transaction event, error %d\n",
+ 		       ret);
+ 
+-	if (pt->synth_opts.callchain)
++	if (pt->synth_opts.last_branch)
+ 		intel_pt_reset_last_branch_rb(ptq);
+ 
+ 	return ret;
+diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c
+index ea6064696fe4..a7b9022b5c8f 100644
+--- a/virt/kvm/arm/arch_timer.c
++++ b/virt/kvm/arm/arch_timer.c
+@@ -86,6 +86,8 @@ static void kvm_timer_inject_irq_work(struct work_struct *work)
+ 	vcpu = container_of(work, struct kvm_vcpu, arch.timer_cpu.expired);
+ 	vcpu->arch.timer_cpu.armed = false;
+ 
++	WARN_ON(!kvm_timer_should_fire(vcpu));
++
+ 	/*
+ 	 * If the vcpu is blocked we want to wake it up so that it will see
+ 	 * the timer has expired when entering the guest.
+@@ -93,10 +95,46 @@ static void kvm_timer_inject_irq_work(struct work_struct *work)
+ 	kvm_vcpu_kick(vcpu);
+ }
+ 
++static u64 kvm_timer_compute_delta(struct kvm_vcpu *vcpu)
++{
++	cycle_t cval, now;
++
++	cval = vcpu->arch.timer_cpu.cntv_cval;
++	now = kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff;
++
++	if (now < cval) {
++		u64 ns;
++
++		ns = cyclecounter_cyc2ns(timecounter->cc,
++					 cval - now,
++					 timecounter->mask,
++					 &timecounter->frac);
++		return ns;
++	}
++
++	return 0;
++}
++
+ static enum hrtimer_restart kvm_timer_expire(struct hrtimer *hrt)
+ {
+ 	struct arch_timer_cpu *timer;
++	struct kvm_vcpu *vcpu;
++	u64 ns;
++
+ 	timer = container_of(hrt, struct arch_timer_cpu, timer);
++	vcpu = container_of(timer, struct kvm_vcpu, arch.timer_cpu);
++
++	/*
++	 * Check that the timer has really expired from the guest's
++	 * PoV (NTP on the host may have forced it to expire
++	 * early). If we should have slept longer, restart it.
++	 */
++	ns = kvm_timer_compute_delta(vcpu);
++	if (unlikely(ns)) {
++		hrtimer_forward_now(hrt, ns_to_ktime(ns));
++		return HRTIMER_RESTART;
++	}
++
+ 	queue_work(wqueue, &timer->expired);
+ 	return HRTIMER_NORESTART;
+ }
+@@ -170,8 +208,6 @@ static int kvm_timer_update_state(struct kvm_vcpu *vcpu)
+ void kvm_timer_schedule(struct kvm_vcpu *vcpu)
+ {
+ 	struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
+-	u64 ns;
+-	cycle_t cval, now;
+ 
+ 	BUG_ON(timer_is_armed(timer));
+ 
+@@ -191,14 +227,7 @@ void kvm_timer_schedule(struct kvm_vcpu *vcpu)
+ 		return;
+ 
+ 	/*  The timer has not yet expired, schedule a background timer */
+-	cval = timer->cntv_cval;
+-	now = kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff;
+-
+-	ns = cyclecounter_cyc2ns(timecounter->cc,
+-				 cval - now,
+-				 timecounter->mask,
+-				 &timecounter->frac);
+-	timer_arm(timer, ns);
++	timer_arm(timer, kvm_timer_compute_delta(vcpu));
+ }
+ 
+ void kvm_timer_unschedule(struct kvm_vcpu *vcpu)


^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [gentoo-commits] proj/linux-patches:4.5 commit in: /
@ 2016-05-12  0:16 Mike Pagano
  0 siblings, 0 replies; 12+ messages in thread
From: Mike Pagano @ 2016-05-12  0:16 UTC (permalink / raw
  To: gentoo-commits

commit:     4040e317a60e2caff24458b02c1f87a41a84c644
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu May 12 00:15:55 2016 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu May 12 00:15:55 2016 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=4040e317

Linux patch 4.5.4

 0000_README            |    4 +
 1003_linux-4.5.4.patch | 2354 ++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 2358 insertions(+)

diff --git a/0000_README b/0000_README
index 0147ad9..a736c59 100644
--- a/0000_README
+++ b/0000_README
@@ -55,6 +55,10 @@ Patch:  1002_linux-4.5.3.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.5.3
 
+Patch:  1003_linux-4.5.4.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.5.4
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1003_linux-4.5.4.patch b/1003_linux-4.5.4.patch
new file mode 100644
index 0000000..a783ee2
--- /dev/null
+++ b/1003_linux-4.5.4.patch
@@ -0,0 +1,2354 @@
+diff --git a/Documentation/devicetree/bindings/ata/ahci-platform.txt b/Documentation/devicetree/bindings/ata/ahci-platform.txt
+index c2340eeeb97f..c000832a7fb9 100644
+--- a/Documentation/devicetree/bindings/ata/ahci-platform.txt
++++ b/Documentation/devicetree/bindings/ata/ahci-platform.txt
+@@ -30,6 +30,10 @@ Optional properties:
+ - target-supply     : regulator for SATA target power
+ - phys              : reference to the SATA PHY node
+ - phy-names         : must be "sata-phy"
++- ports-implemented : Mask that indicates which ports that the HBA supports
++		      are available for software to use. Useful if PORTS_IMPL
++		      is not programmed by the BIOS, which is true with
++		      some embedded SOC's.
+ 
+ Required properties when using sub-nodes:
+ - #address-cells    : number of cells to encode an address
+diff --git a/MAINTAINERS b/MAINTAINERS
+index 5a389bc68e0e..77e4c10b4c06 100644
+--- a/MAINTAINERS
++++ b/MAINTAINERS
+@@ -4163,8 +4163,8 @@ F:	Documentation/efi-stub.txt
+ F:	arch/ia64/kernel/efi.c
+ F:	arch/x86/boot/compressed/eboot.[ch]
+ F:	arch/x86/include/asm/efi.h
+-F:	arch/x86/platform/efi/*
+-F:	drivers/firmware/efi/*
++F:	arch/x86/platform/efi/
++F:	drivers/firmware/efi/
+ F:	include/linux/efi*.h
+ 
+ EFI VARIABLE FILESYSTEM
+diff --git a/Makefile b/Makefile
+index 9b56a6c5e36f..d64eade37241 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 5
+-SUBLEVEL = 3
++SUBLEVEL = 4
+ EXTRAVERSION =
+ NAME = Blurry Fish Butt
+ 
+diff --git a/arch/arc/include/asm/io.h b/arch/arc/include/asm/io.h
+index 27b17adea50d..cb69299a492e 100644
+--- a/arch/arc/include/asm/io.h
++++ b/arch/arc/include/asm/io.h
+@@ -13,6 +13,15 @@
+ #include <asm/byteorder.h>
+ #include <asm/page.h>
+ 
++#ifdef CONFIG_ISA_ARCV2
++#include <asm/barrier.h>
++#define __iormb()		rmb()
++#define __iowmb()		wmb()
++#else
++#define __iormb()		do { } while (0)
++#define __iowmb()		do { } while (0)
++#endif
++
+ extern void __iomem *ioremap(unsigned long physaddr, unsigned long size);
+ extern void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size,
+ 				  unsigned long flags);
+@@ -22,6 +31,15 @@ extern void iounmap(const void __iomem *addr);
+ #define ioremap_wc(phy, sz)		ioremap(phy, sz)
+ #define ioremap_wt(phy, sz)		ioremap(phy, sz)
+ 
++/*
++ * io{read,write}{16,32}be() macros
++ */
++#define ioread16be(p)		({ u16 __v = be16_to_cpu((__force __be16)__raw_readw(p)); __iormb(); __v; })
++#define ioread32be(p)		({ u32 __v = be32_to_cpu((__force __be32)__raw_readl(p)); __iormb(); __v; })
++
++#define iowrite16be(v,p)	({ __iowmb(); __raw_writew((__force u16)cpu_to_be16(v), p); })
++#define iowrite32be(v,p)	({ __iowmb(); __raw_writel((__force u32)cpu_to_be32(v), p); })
++
+ /* Change struct page to physical address */
+ #define page_to_phys(page)		(page_to_pfn(page) << PAGE_SHIFT)
+ 
+@@ -99,15 +117,6 @@ static inline void __raw_writel(u32 w, volatile void __iomem *addr)
+ 
+ }
+ 
+-#ifdef CONFIG_ISA_ARCV2
+-#include <asm/barrier.h>
+-#define __iormb()		rmb()
+-#define __iowmb()		wmb()
+-#else
+-#define __iormb()		do { } while (0)
+-#define __iowmb()		do { } while (0)
+-#endif
+-
+ /*
+  * MMIO can also get buffered/optimized in micro-arch, so barriers needed
+  * Based on ARM model for the typical use case
+diff --git a/arch/arm/boot/dts/qcom-apq8064.dtsi b/arch/arm/boot/dts/qcom-apq8064.dtsi
+index ed521e85e208..e8bc7e8bedd2 100644
+--- a/arch/arm/boot/dts/qcom-apq8064.dtsi
++++ b/arch/arm/boot/dts/qcom-apq8064.dtsi
+@@ -665,7 +665,7 @@
+ 		};
+ 
+ 		sata0: sata@29000000 {
+-			compatible		= "generic-ahci";
++			compatible		= "qcom,apq8064-ahci", "generic-ahci";
+ 			status			= "disabled";
+ 			reg			= <0x29000000 0x180>;
+ 			interrupts		= <GIC_SPI 209 IRQ_TYPE_NONE>;
+@@ -687,6 +687,7 @@
+ 
+ 			phys			= <&sata_phy0>;
+ 			phy-names		= "sata-phy";
++			ports-implemented	= <0x1>;
+ 		};
+ 
+ 		/* Temporary fixed regulator */
+diff --git a/arch/arm/mach-cns3xxx/pcie.c b/arch/arm/mach-cns3xxx/pcie.c
+index 47905a50e075..318394ed5c7a 100644
+--- a/arch/arm/mach-cns3xxx/pcie.c
++++ b/arch/arm/mach-cns3xxx/pcie.c
+@@ -220,13 +220,13 @@ static void cns3xxx_write_config(struct cns3xxx_pcie *cnspci,
+ 	u32 mask = (0x1ull << (size * 8)) - 1;
+ 	int shift = (where % 4) * 8;
+ 
+-	v = readl_relaxed(base + (where & 0xffc));
++	v = readl_relaxed(base);
+ 
+ 	v &= ~(mask << shift);
+ 	v |= (val & mask) << shift;
+ 
+-	writel_relaxed(v, base + (where & 0xffc));
+-	readl_relaxed(base + (where & 0xffc));
++	writel_relaxed(v, base);
++	readl_relaxed(base);
+ }
+ 
+ static void __init cns3xxx_pcie_hw_init(struct cns3xxx_pcie *cnspci)
+diff --git a/arch/arm/mach-exynos/pm_domains.c b/arch/arm/mach-exynos/pm_domains.c
+index 7c21760f590f..875a2bab64f6 100644
+--- a/arch/arm/mach-exynos/pm_domains.c
++++ b/arch/arm/mach-exynos/pm_domains.c
+@@ -92,7 +92,7 @@ static int exynos_pd_power(struct generic_pm_domain *domain, bool power_on)
+ 			if (IS_ERR(pd->clk[i]))
+ 				break;
+ 
+-			if (IS_ERR(pd->clk[i]))
++			if (IS_ERR(pd->pclk[i]))
+ 				continue; /* Skip on first power up */
+ 			if (clk_set_parent(pd->clk[i], pd->pclk[i]))
+ 				pr_err("%s: error setting parent to clock%d\n",
+diff --git a/arch/arm/mach-socfpga/headsmp.S b/arch/arm/mach-socfpga/headsmp.S
+index 5d94b7a2fb10..c160fa3007e9 100644
+--- a/arch/arm/mach-socfpga/headsmp.S
++++ b/arch/arm/mach-socfpga/headsmp.S
+@@ -13,6 +13,7 @@
+ #include <asm/assembler.h>
+ 
+ 	.arch	armv7-a
++	.arm
+ 
+ ENTRY(secondary_trampoline)
+ 	/* CPU1 will always fetch from 0x0 when it is brought out of reset.
+diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
+index c976ebfe2269..57b4836b7ecd 100644
+--- a/arch/parisc/kernel/syscall.S
++++ b/arch/parisc/kernel/syscall.S
+@@ -344,7 +344,7 @@ tracesys_next:
+ #endif
+ 
+ 	cmpib,COND(=),n -1,%r20,tracesys_exit /* seccomp may have returned -1 */
+-	comiclr,>>=	__NR_Linux_syscalls, %r20, %r0
++	comiclr,>>	__NR_Linux_syscalls, %r20, %r0
+ 	b,n	.Ltracesys_nosys
+ 
+ 	LDREGX  %r20(%r19), %r19
+diff --git a/arch/powerpc/include/asm/word-at-a-time.h b/arch/powerpc/include/asm/word-at-a-time.h
+index e4396a7d0f7c..4afe66aa1400 100644
+--- a/arch/powerpc/include/asm/word-at-a-time.h
++++ b/arch/powerpc/include/asm/word-at-a-time.h
+@@ -82,7 +82,7 @@ static inline unsigned long create_zero_mask(unsigned long bits)
+ 	    "andc	%1,%1,%2\n\t"
+ 	    "popcntd	%0,%1"
+ 		: "=r" (leading_zero_bits), "=&r" (trailing_zero_bit_mask)
+-		: "r" (bits));
++		: "b" (bits));
+ 
+ 	return leading_zero_bits;
+ }
+diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c
+index 2c5aaf8c2e2f..05538582a809 100644
+--- a/arch/x86/kernel/cpu/mcheck/therm_throt.c
++++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c
+@@ -385,6 +385,9 @@ static void intel_thermal_interrupt(void)
+ {
+ 	__u64 msr_val;
+ 
++	if (static_cpu_has(X86_FEATURE_HWP))
++		wrmsrl_safe(MSR_HWP_STATUS, 0);
++
+ 	rdmsrl(MSR_IA32_THERM_STATUS, msr_val);
+ 
+ 	/* Check for violation of core thermal thresholds*/
+diff --git a/arch/x86/kernel/sysfb_efi.c b/arch/x86/kernel/sysfb_efi.c
+index b285d4e8c68e..5da924bbf0a0 100644
+--- a/arch/x86/kernel/sysfb_efi.c
++++ b/arch/x86/kernel/sysfb_efi.c
+@@ -106,14 +106,24 @@ static int __init efifb_set_system(const struct dmi_system_id *id)
+ 					continue;
+ 				for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
+ 					resource_size_t start, end;
++					unsigned long flags;
++
++					flags = pci_resource_flags(dev, i);
++					if (!(flags & IORESOURCE_MEM))
++						continue;
++
++					if (flags & IORESOURCE_UNSET)
++						continue;
++
++					if (pci_resource_len(dev, i) == 0)
++						continue;
+ 
+ 					start = pci_resource_start(dev, i);
+-					if (start == 0)
+-						break;
+ 					end = pci_resource_end(dev, i);
+ 					if (screen_info.lfb_base >= start &&
+ 					    screen_info.lfb_base < end) {
+ 						found_bar = 1;
++						break;
+ 					}
+ 				}
+ 			}
+diff --git a/arch/x86/kernel/tsc_msr.c b/arch/x86/kernel/tsc_msr.c
+index 92ae6acac8a7..6aa0f4d9eea6 100644
+--- a/arch/x86/kernel/tsc_msr.c
++++ b/arch/x86/kernel/tsc_msr.c
+@@ -92,7 +92,7 @@ unsigned long try_msr_calibrate_tsc(void)
+ 
+ 	if (freq_desc_tables[cpu_index].msr_plat) {
+ 		rdmsr(MSR_PLATFORM_INFO, lo, hi);
+-		ratio = (lo >> 8) & 0x1f;
++		ratio = (lo >> 8) & 0xff;
+ 	} else {
+ 		rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
+ 		ratio = (hi >> 8) & 0x1f;
+diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c
+index 6979186dbd4b..9f77943653fb 100644
+--- a/drivers/acpi/acpi_processor.c
++++ b/drivers/acpi/acpi_processor.c
+@@ -491,6 +491,58 @@ static void acpi_processor_remove(struct acpi_device *device)
+ }
+ #endif /* CONFIG_ACPI_HOTPLUG_CPU */
+ 
++#ifdef CONFIG_X86
++static bool acpi_hwp_native_thermal_lvt_set;
++static acpi_status __init acpi_hwp_native_thermal_lvt_osc(acpi_handle handle,
++							  u32 lvl,
++							  void *context,
++							  void **rv)
++{
++	u8 sb_uuid_str[] = "4077A616-290C-47BE-9EBD-D87058713953";
++	u32 capbuf[2];
++	struct acpi_osc_context osc_context = {
++		.uuid_str = sb_uuid_str,
++		.rev = 1,
++		.cap.length = 8,
++		.cap.pointer = capbuf,
++	};
++
++	if (acpi_hwp_native_thermal_lvt_set)
++		return AE_CTRL_TERMINATE;
++
++	capbuf[0] = 0x0000;
++	capbuf[1] = 0x1000; /* set bit 12 */
++
++	if (ACPI_SUCCESS(acpi_run_osc(handle, &osc_context))) {
++		if (osc_context.ret.pointer && osc_context.ret.length > 1) {
++			u32 *capbuf_ret = osc_context.ret.pointer;
++
++			if (capbuf_ret[1] & 0x1000) {
++				acpi_handle_info(handle,
++					"_OSC native thermal LVT Acked\n");
++				acpi_hwp_native_thermal_lvt_set = true;
++			}
++		}
++		kfree(osc_context.ret.pointer);
++	}
++
++	return AE_OK;
++}
++
++void __init acpi_early_processor_osc(void)
++{
++	if (boot_cpu_has(X86_FEATURE_HWP)) {
++		acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT,
++				    ACPI_UINT32_MAX,
++				    acpi_hwp_native_thermal_lvt_osc,
++				    NULL, NULL, NULL);
++		acpi_get_devices(ACPI_PROCESSOR_DEVICE_HID,
++				 acpi_hwp_native_thermal_lvt_osc,
++				 NULL, NULL);
++	}
++}
++#endif
++
+ /*
+  * The following ACPI IDs are known to be suitable for representing as
+  * processor devices.
+diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c
+index 6a72047aae1c..c3a052d43317 100644
+--- a/drivers/acpi/acpica/dsmethod.c
++++ b/drivers/acpi/acpica/dsmethod.c
+@@ -428,6 +428,9 @@ acpi_ds_begin_method_execution(struct acpi_namespace_node *method_node,
+ 				obj_desc->method.mutex->mutex.
+ 				    original_sync_level =
+ 				    obj_desc->method.mutex->mutex.sync_level;
++
++				obj_desc->method.mutex->mutex.thread_id =
++				    acpi_os_get_thread_id();
+ 			}
+ 		}
+ 
+diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
+index 891c42d1cd65..f9081b791b81 100644
+--- a/drivers/acpi/bus.c
++++ b/drivers/acpi/bus.c
+@@ -1005,6 +1005,9 @@ static int __init acpi_bus_init(void)
+ 		goto error1;
+ 	}
+ 
++	/* Set capability bits for _OSC under processor scope */
++	acpi_early_processor_osc();
++
+ 	/*
+ 	 * _OSC method may exist in module level code,
+ 	 * so it must be run after ACPI_FULL_INITIALIZATION
+diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
+index 1e6833a5cd44..6f41c73f82bb 100644
+--- a/drivers/acpi/internal.h
++++ b/drivers/acpi/internal.h
+@@ -138,6 +138,12 @@ void acpi_early_processor_set_pdc(void);
+ static inline void acpi_early_processor_set_pdc(void) {}
+ #endif
+ 
++#ifdef CONFIG_X86
++void acpi_early_processor_osc(void);
++#else
++static inline void acpi_early_processor_osc(void) {}
++#endif
++
+ /* --------------------------------------------------------------------------
+                                   Embedded Controller
+    -------------------------------------------------------------------------- */
+diff --git a/drivers/ata/ahci_platform.c b/drivers/ata/ahci_platform.c
+index 04975b851c23..639adb1f8abd 100644
+--- a/drivers/ata/ahci_platform.c
++++ b/drivers/ata/ahci_platform.c
+@@ -51,6 +51,9 @@ static int ahci_probe(struct platform_device *pdev)
+ 	if (rc)
+ 		return rc;
+ 
++	of_property_read_u32(dev->of_node,
++			     "ports-implemented", &hpriv->force_port_map);
++
+ 	if (of_device_is_compatible(dev->of_node, "hisilicon,hisi-ahci"))
+ 		hpriv->flags |= AHCI_HFLAG_NO_FBS | AHCI_HFLAG_NO_NCQ;
+ 
+diff --git a/drivers/ata/ahci_xgene.c b/drivers/ata/ahci_xgene.c
+index 8e3f7faf00d3..73b19b277138 100644
+--- a/drivers/ata/ahci_xgene.c
++++ b/drivers/ata/ahci_xgene.c
+@@ -821,9 +821,9 @@ static int xgene_ahci_probe(struct platform_device *pdev)
+ 				dev_warn(&pdev->dev, "%s: Error reading device info. Assume version1\n",
+ 					__func__);
+ 				version = XGENE_AHCI_V1;
+-			}
+-			if (info->valid & ACPI_VALID_CID)
++			} else if (info->valid & ACPI_VALID_CID) {
+ 				version = XGENE_AHCI_V2;
++			}
+ 		}
+ 	}
+ #endif
+diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
+index 85ea5142a095..bb050ea26101 100644
+--- a/drivers/ata/libahci.c
++++ b/drivers/ata/libahci.c
+@@ -469,6 +469,7 @@ void ahci_save_initial_config(struct device *dev, struct ahci_host_priv *hpriv)
+ 		dev_info(dev, "forcing port_map 0x%x -> 0x%x\n",
+ 			 port_map, hpriv->force_port_map);
+ 		port_map = hpriv->force_port_map;
++		hpriv->saved_port_map = port_map;
+ 	}
+ 
+ 	if (hpriv->mask_port_map) {
+diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
+index e4c5cc107934..c65d41f4007a 100644
+--- a/drivers/block/nbd.c
++++ b/drivers/block/nbd.c
+@@ -618,8 +618,8 @@ static void nbd_request_handler(struct request_queue *q)
+ 			req, req->cmd_type);
+ 
+ 		if (unlikely(!nbd->sock)) {
+-			dev_err(disk_to_dev(nbd->disk),
+-				"Attempted send on closed socket\n");
++			dev_err_ratelimited(disk_to_dev(nbd->disk),
++					    "Attempted send on closed socket\n");
+ 			req->errors++;
+ 			nbd_end_request(nbd, req);
+ 			spin_lock_irq(q->queue_lock);
+diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
+index 64a7b5971b57..cab97593ba54 100644
+--- a/drivers/block/null_blk.c
++++ b/drivers/block/null_blk.c
+@@ -742,10 +742,11 @@ static int null_add_dev(void)
+ 
+ 	add_disk(disk);
+ 
++done:
+ 	mutex_lock(&lock);
+ 	list_add_tail(&nullb->list, &nullb_list);
+ 	mutex_unlock(&lock);
+-done:
++
+ 	return 0;
+ 
+ out_cleanup_lightnvm:
+diff --git a/drivers/clk/bcm/clk-bcm2835-aux.c b/drivers/clk/bcm/clk-bcm2835-aux.c
+index e4f89e28b5ec..3a177ade6e6c 100644
+--- a/drivers/clk/bcm/clk-bcm2835-aux.c
++++ b/drivers/clk/bcm/clk-bcm2835-aux.c
+@@ -38,8 +38,8 @@ static int bcm2835_aux_clk_probe(struct platform_device *pdev)
+ 
+ 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ 	reg = devm_ioremap_resource(dev, res);
+-	if (!reg)
+-		return -ENODEV;
++	if (IS_ERR(reg))
++		return PTR_ERR(reg);
+ 
+ 	onecell = devm_kmalloc(dev, sizeof(*onecell), GFP_KERNEL);
+ 	if (!onecell)
+diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c
+index ded3ff4b91b9..aa1dacdaa39d 100644
+--- a/drivers/clk/clk-divider.c
++++ b/drivers/clk/clk-divider.c
+@@ -423,6 +423,12 @@ const struct clk_ops clk_divider_ops = {
+ };
+ EXPORT_SYMBOL_GPL(clk_divider_ops);
+ 
++const struct clk_ops clk_divider_ro_ops = {
++	.recalc_rate = clk_divider_recalc_rate,
++	.round_rate = clk_divider_round_rate,
++};
++EXPORT_SYMBOL_GPL(clk_divider_ro_ops);
++
+ static struct clk *_register_divider(struct device *dev, const char *name,
+ 		const char *parent_name, unsigned long flags,
+ 		void __iomem *reg, u8 shift, u8 width,
+@@ -446,7 +452,10 @@ static struct clk *_register_divider(struct device *dev, const char *name,
+ 		return ERR_PTR(-ENOMEM);
+ 
+ 	init.name = name;
+-	init.ops = &clk_divider_ops;
++	if (clk_divider_flags & CLK_DIVIDER_READ_ONLY)
++		init.ops = &clk_divider_ro_ops;
++	else
++		init.ops = &clk_divider_ops;
+ 	init.flags = flags | CLK_IS_BASIC;
+ 	init.parent_names = (parent_name ? &parent_name: NULL);
+ 	init.num_parents = (parent_name ? 1 : 0);
+diff --git a/drivers/clk/clk-xgene.c b/drivers/clk/clk-xgene.c
+index 10224b01b97c..b134a8b15e2c 100644
+--- a/drivers/clk/clk-xgene.c
++++ b/drivers/clk/clk-xgene.c
+@@ -351,8 +351,8 @@ static int xgene_clk_set_rate(struct clk_hw *hw, unsigned long rate,
+ 		/* Set new divider */
+ 		data = xgene_clk_read(pclk->param.divider_reg +
+ 				pclk->param.reg_divider_offset);
+-		data &= ~((1 << pclk->param.reg_divider_width) - 1)
+-				<< pclk->param.reg_divider_shift;
++		data &= ~(((1 << pclk->param.reg_divider_width) - 1)
++				<< pclk->param.reg_divider_shift);
+ 		data |= divider;
+ 		xgene_clk_write(data, pclk->param.divider_reg +
+ 					pclk->param.reg_divider_offset);
+diff --git a/drivers/clk/meson/clkc.c b/drivers/clk/meson/clkc.c
+index c83ae1367abc..d920d410b51d 100644
+--- a/drivers/clk/meson/clkc.c
++++ b/drivers/clk/meson/clkc.c
+@@ -198,7 +198,7 @@ meson_clk_register_fixed_rate(const struct clk_conf *clk_conf,
+ }
+ 
+ void __init meson_clk_register_clks(const struct clk_conf *clk_confs,
+-				    size_t nr_confs,
++				    unsigned int nr_confs,
+ 				    void __iomem *clk_base)
+ {
+ 	unsigned int i;
+diff --git a/drivers/clk/nxp/clk-lpc18xx-ccu.c b/drivers/clk/nxp/clk-lpc18xx-ccu.c
+index 13aabbb3acbe..558da89555af 100644
+--- a/drivers/clk/nxp/clk-lpc18xx-ccu.c
++++ b/drivers/clk/nxp/clk-lpc18xx-ccu.c
+@@ -222,7 +222,7 @@ static void lpc18xx_ccu_register_branch_gate_div(struct lpc18xx_clk_branch *bran
+ 		div->width = 1;
+ 
+ 		div_hw = &div->hw;
+-		div_ops = &clk_divider_ops;
++		div_ops = &clk_divider_ro_ops;
+ 	}
+ 
+ 	branch->gate.reg = branch->offset + reg_base;
+diff --git a/drivers/clk/qcom/gcc-msm8960.c b/drivers/clk/qcom/gcc-msm8960.c
+index 983dd7dc89a7..0a0c1f533249 100644
+--- a/drivers/clk/qcom/gcc-msm8960.c
++++ b/drivers/clk/qcom/gcc-msm8960.c
+@@ -2753,7 +2753,7 @@ static struct clk_rcg ce3_src = {
+ 	},
+ 	.freq_tbl = clk_tbl_ce3,
+ 	.clkr = {
+-		.enable_reg = 0x2c08,
++		.enable_reg = 0x36c0,
+ 		.enable_mask = BIT(7),
+ 		.hw.init = &(struct clk_init_data){
+ 			.name = "ce3_src",
+@@ -2769,7 +2769,7 @@ static struct clk_branch ce3_core_clk = {
+ 	.halt_reg = 0x2fdc,
+ 	.halt_bit = 5,
+ 	.clkr = {
+-		.enable_reg = 0x36c4,
++		.enable_reg = 0x36cc,
+ 		.enable_mask = BIT(4),
+ 		.hw.init = &(struct clk_init_data){
+ 			.name = "ce3_core_clk",
+diff --git a/drivers/clk/rockchip/clk-rk3228.c b/drivers/clk/rockchip/clk-rk3228.c
+index 981a50205339..97f49aab8d42 100644
+--- a/drivers/clk/rockchip/clk-rk3228.c
++++ b/drivers/clk/rockchip/clk-rk3228.c
+@@ -605,13 +605,13 @@ static struct rockchip_clk_branch rk3228_clk_branches[] __initdata = {
+ 
+ 	/* PD_MMC */
+ 	MMC(SCLK_SDMMC_DRV,    "sdmmc_drv",    "sclk_sdmmc", RK3228_SDMMC_CON0, 1),
+-	MMC(SCLK_SDMMC_SAMPLE, "sdmmc_sample", "sclk_sdmmc", RK3228_SDMMC_CON1, 1),
++	MMC(SCLK_SDMMC_SAMPLE, "sdmmc_sample", "sclk_sdmmc", RK3228_SDMMC_CON1, 0),
+ 
+ 	MMC(SCLK_SDIO_DRV,     "sdio_drv",     "sclk_sdio",  RK3228_SDIO_CON0,  1),
+-	MMC(SCLK_SDIO_SAMPLE,  "sdio_sample",  "sclk_sdio",  RK3228_SDIO_CON1,  1),
++	MMC(SCLK_SDIO_SAMPLE,  "sdio_sample",  "sclk_sdio",  RK3228_SDIO_CON1,  0),
+ 
+ 	MMC(SCLK_EMMC_DRV,     "emmc_drv",     "sclk_emmc",  RK3228_EMMC_CON0,  1),
+-	MMC(SCLK_EMMC_SAMPLE,  "emmc_sample",  "sclk_emmc",  RK3228_EMMC_CON1,  1),
++	MMC(SCLK_EMMC_SAMPLE,  "emmc_sample",  "sclk_emmc",  RK3228_EMMC_CON1,  0),
+ };
+ 
+ static const char *const rk3228_critical_clocks[] __initconst = {
+diff --git a/drivers/clk/rockchip/clk.c b/drivers/clk/rockchip/clk.c
+index d9a0b5d4d47f..226af5720c9e 100644
+--- a/drivers/clk/rockchip/clk.c
++++ b/drivers/clk/rockchip/clk.c
+@@ -70,7 +70,7 @@ static struct clk *rockchip_clk_register_branch(const char *name,
+ 	if (gate_offset >= 0) {
+ 		gate = kzalloc(sizeof(*gate), GFP_KERNEL);
+ 		if (!gate)
+-			return ERR_PTR(-ENOMEM);
++			goto err_gate;
+ 
+ 		gate->flags = gate_flags;
+ 		gate->reg = base + gate_offset;
+@@ -82,7 +82,7 @@ static struct clk *rockchip_clk_register_branch(const char *name,
+ 	if (div_width > 0) {
+ 		div = kzalloc(sizeof(*div), GFP_KERNEL);
+ 		if (!div)
+-			return ERR_PTR(-ENOMEM);
++			goto err_div;
+ 
+ 		div->flags = div_flags;
+ 		div->reg = base + muxdiv_offset;
+@@ -90,7 +90,9 @@ static struct clk *rockchip_clk_register_branch(const char *name,
+ 		div->width = div_width;
+ 		div->lock = lock;
+ 		div->table = div_table;
+-		div_ops = &clk_divider_ops;
++		div_ops = (div_flags & CLK_DIVIDER_READ_ONLY)
++						? &clk_divider_ro_ops
++						: &clk_divider_ops;
+ 	}
+ 
+ 	clk = clk_register_composite(NULL, name, parent_names, num_parents,
+@@ -100,6 +102,11 @@ static struct clk *rockchip_clk_register_branch(const char *name,
+ 				     flags);
+ 
+ 	return clk;
++err_div:
++	kfree(gate);
++err_gate:
++	kfree(mux);
++	return ERR_PTR(-ENOMEM);
+ }
+ 
+ struct rockchip_clk_frac {
+diff --git a/drivers/clk/sunxi/clk-sun8i-apb0.c b/drivers/clk/sunxi/clk-sun8i-apb0.c
+index 7ba61103a6f5..2ea61debffc1 100644
+--- a/drivers/clk/sunxi/clk-sun8i-apb0.c
++++ b/drivers/clk/sunxi/clk-sun8i-apb0.c
+@@ -36,7 +36,7 @@ static struct clk *sun8i_a23_apb0_register(struct device_node *node,
+ 
+ 	/* The A23 APB0 clock is a standard 2 bit wide divider clock */
+ 	clk = clk_register_divider(NULL, clk_name, clk_parent, 0, reg,
+-				   0, 2, CLK_DIVIDER_POWER_OF_TWO, NULL);
++				   0, 2, 0, NULL);
+ 	if (IS_ERR(clk))
+ 		return clk;
+ 
+diff --git a/drivers/clk/versatile/clk-sp810.c b/drivers/clk/versatile/clk-sp810.c
+index e78755e0ef78..1fe1e8d970cf 100644
+--- a/drivers/clk/versatile/clk-sp810.c
++++ b/drivers/clk/versatile/clk-sp810.c
+@@ -92,6 +92,7 @@ static void __init clk_sp810_of_setup(struct device_node *node)
+ 	int num = ARRAY_SIZE(parent_names);
+ 	char name[12];
+ 	struct clk_init_data init;
++	static int instance;
+ 	int i;
+ 	bool deprecated;
+ 
+@@ -117,7 +118,7 @@ static void __init clk_sp810_of_setup(struct device_node *node)
+ 	deprecated = !of_find_property(node, "assigned-clock-parents", NULL);
+ 
+ 	for (i = 0; i < ARRAY_SIZE(sp810->timerclken); i++) {
+-		snprintf(name, ARRAY_SIZE(name), "timerclken%d", i);
++		snprintf(name, sizeof(name), "sp810_%d_%d", instance, i);
+ 
+ 		sp810->timerclken[i].sp810 = sp810;
+ 		sp810->timerclken[i].channel = i;
+@@ -138,5 +139,6 @@ static void __init clk_sp810_of_setup(struct device_node *node)
+ 	}
+ 
+ 	of_clk_add_provider(node, clk_sp810_timerclken_of_get, sp810);
++	instance++;
+ }
+ CLK_OF_DECLARE(sp810, "arm,sp810", clk_sp810_of_setup);
+diff --git a/drivers/clocksource/tango_xtal.c b/drivers/clocksource/tango_xtal.c
+index 2bcecafdeaea..c407c47a3232 100644
+--- a/drivers/clocksource/tango_xtal.c
++++ b/drivers/clocksource/tango_xtal.c
+@@ -42,7 +42,7 @@ static void __init tango_clocksource_init(struct device_node *np)
+ 
+ 	ret = clocksource_mmio_init(xtal_in_cnt, "tango-xtal", xtal_freq, 350,
+ 				    32, clocksource_mmio_readl_up);
+-	if (!ret) {
++	if (ret) {
+ 		pr_err("%s: registration failed\n", np->full_name);
+ 		return;
+ 	}
+diff --git a/drivers/cpufreq/sti-cpufreq.c b/drivers/cpufreq/sti-cpufreq.c
+index a9c659f58974..04042038ec4b 100644
+--- a/drivers/cpufreq/sti-cpufreq.c
++++ b/drivers/cpufreq/sti-cpufreq.c
+@@ -259,6 +259,10 @@ static int sti_cpufreq_init(void)
+ {
+ 	int ret;
+ 
++	if ((!of_machine_is_compatible("st,stih407")) &&
++		(!of_machine_is_compatible("st,stih410")))
++		return -ENODEV;
++
+ 	ddata.cpu = get_cpu_device(0);
+ 	if (!ddata.cpu) {
+ 		dev_err(ddata.cpu, "Failed to get device for CPU0\n");
+diff --git a/drivers/cpuidle/cpuidle-arm.c b/drivers/cpuidle/cpuidle-arm.c
+index 545069d5fdfb..e342565e8715 100644
+--- a/drivers/cpuidle/cpuidle-arm.c
++++ b/drivers/cpuidle/cpuidle-arm.c
+@@ -50,7 +50,7 @@ static int arm_enter_idle_state(struct cpuidle_device *dev,
+ 		 * call the CPU ops suspend protocol with idle index as a
+ 		 * parameter.
+ 		 */
+-		arm_cpuidle_suspend(idx);
++		ret = arm_cpuidle_suspend(idx);
+ 
+ 		cpu_pm_exit();
+ 	}
+diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
+index 540cbc88c7a2..cc4d9bd0839e 100644
+--- a/drivers/gpio/gpiolib-acpi.c
++++ b/drivers/gpio/gpiolib-acpi.c
+@@ -977,7 +977,7 @@ bool acpi_can_fallback_to_crs(struct acpi_device *adev, const char *con_id)
+ 		lookup = kmalloc(sizeof(*lookup), GFP_KERNEL);
+ 		if (lookup) {
+ 			lookup->adev = adev;
+-			lookup->con_id = con_id;
++			lookup->con_id = kstrdup(con_id, GFP_KERNEL);
+ 			list_add_tail(&lookup->node, &acpi_crs_lookup_list);
+ 		}
+ 	}
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+index b8fbbd7699e4..73628c7599e7 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+@@ -540,6 +540,7 @@ int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
+ 	if (!metadata_size) {
+ 		if (bo->metadata_size) {
+ 			kfree(bo->metadata);
++			bo->metadata = NULL;
+ 			bo->metadata_size = 0;
+ 		}
+ 		return 0;
+diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
+index 1e0bba29e167..1cd6de575305 100644
+--- a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
++++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
+@@ -298,6 +298,10 @@ bool amdgpu_atombios_encoder_mode_fixup(struct drm_encoder *encoder,
+ 	    && (mode->crtc_vsync_start < (mode->crtc_vdisplay + 2)))
+ 		adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2;
+ 
++	/* vertical FP must be at least 1 */
++	if (mode->crtc_vsync_start == mode->crtc_vdisplay)
++		adjusted_mode->crtc_vsync_start++;
++
+ 	/* get the native mode for scaling */
+ 	if (amdgpu_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT))
+ 		amdgpu_panel_mode_fixup(encoder, adjusted_mode);
+diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
+index f357058c74d9..2e832fa07e09 100644
+--- a/drivers/gpu/drm/i915/i915_drv.c
++++ b/drivers/gpu/drm/i915/i915_drv.c
+@@ -797,7 +797,7 @@ static int i915_drm_resume(struct drm_device *dev)
+ static int i915_drm_resume_early(struct drm_device *dev)
+ {
+ 	struct drm_i915_private *dev_priv = dev->dev_private;
+-	int ret = 0;
++	int ret;
+ 
+ 	/*
+ 	 * We have a resume ordering issue with the snd-hda driver also
+@@ -808,6 +808,36 @@ static int i915_drm_resume_early(struct drm_device *dev)
+ 	 * FIXME: This should be solved with a special hdmi sink device or
+ 	 * similar so that power domains can be employed.
+ 	 */
++
++	/*
++	 * Note that we need to set the power state explicitly, since we
++	 * powered off the device during freeze and the PCI core won't power
++	 * it back up for us during thaw. Powering off the device during
++	 * freeze is not a hard requirement though, and during the
++	 * suspend/resume phases the PCI core makes sure we get here with the
++	 * device powered on. So in case we change our freeze logic and keep
++	 * the device powered we can also remove the following set power state
++	 * call.
++	 */
++	ret = pci_set_power_state(dev->pdev, PCI_D0);
++	if (ret) {
++		DRM_ERROR("failed to set PCI D0 power state (%d)\n", ret);
++		goto out;
++	}
++
++	/*
++	 * Note that pci_enable_device() first enables any parent bridge
++	 * device and only then sets the power state for this device. The
++	 * bridge enabling is a nop though, since bridge devices are resumed
++	 * first. The order of enabling power and enabling the device is
++	 * imposed by the PCI core as described above, so here we preserve the
++	 * same order for the freeze/thaw phases.
++	 *
++	 * TODO: eventually we should remove pci_disable_device() /
++	 * pci_enable_enable_device() from suspend/resume. Due to how they
++	 * depend on the device enable refcount we can't anyway depend on them
++	 * disabling/enabling the device.
++	 */
+ 	if (pci_enable_device(dev->pdev)) {
+ 		ret = -EIO;
+ 		goto out;
+diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
+index 4897728713f6..9b6737c85acb 100644
+--- a/drivers/gpu/drm/i915/i915_reg.h
++++ b/drivers/gpu/drm/i915/i915_reg.h
+@@ -2898,7 +2898,14 @@ enum skl_disp_power_wells {
+ #define GEN6_RP_STATE_CAP	_MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5998)
+ #define BXT_RP_STATE_CAP        _MMIO(0x138170)
+ 
+-#define INTERVAL_1_28_US(us)	(((us) * 100) >> 7)
++/*
++ * Make these a multiple of magic 25 to avoid SNB (eg. Dell XPS
++ * 8300) freezing up around GPU hangs. Looks as if even
++ * scheduling/timer interrupts start misbehaving if the RPS
++ * EI/thresholds are "bad", leading to a very sluggish or even
++ * frozen machine.
++ */
++#define INTERVAL_1_28_US(us)	roundup(((us) * 100) >> 7, 25)
+ #define INTERVAL_1_33_US(us)	(((us) * 3)   >> 2)
+ #define INTERVAL_0_833_US(us)	(((us) * 6) / 5)
+ #define GT_INTERVAL_FROM_US(dev_priv, us) (IS_GEN9(dev_priv) ? \
+diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
+index 084d5586585d..33b8e0a2b3fd 100644
+--- a/drivers/gpu/drm/i915/intel_ddi.c
++++ b/drivers/gpu/drm/i915/intel_ddi.c
+@@ -464,9 +464,17 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port,
+ 	} else if (IS_BROADWELL(dev)) {
+ 		ddi_translations_fdi = bdw_ddi_translations_fdi;
+ 		ddi_translations_dp = bdw_ddi_translations_dp;
+-		ddi_translations_edp = bdw_ddi_translations_edp;
++
++		if (dev_priv->edp_low_vswing) {
++			ddi_translations_edp = bdw_ddi_translations_edp;
++			n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_edp);
++		} else {
++			ddi_translations_edp = bdw_ddi_translations_dp;
++			n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
++		}
++
+ 		ddi_translations_hdmi = bdw_ddi_translations_hdmi;
+-		n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_edp);
++
+ 		n_dp_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
+ 		n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
+ 		hdmi_default_entry = 7;
+@@ -3260,12 +3268,6 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
+ 	intel_ddi_clock_get(encoder, pipe_config);
+ }
+ 
+-static void intel_ddi_destroy(struct drm_encoder *encoder)
+-{
+-	/* HDMI has nothing special to destroy, so we can go with this. */
+-	intel_dp_encoder_destroy(encoder);
+-}
+-
+ static bool intel_ddi_compute_config(struct intel_encoder *encoder,
+ 				     struct intel_crtc_state *pipe_config)
+ {
+@@ -3284,7 +3286,8 @@ static bool intel_ddi_compute_config(struct intel_encoder *encoder,
+ }
+ 
+ static const struct drm_encoder_funcs intel_ddi_funcs = {
+-	.destroy = intel_ddi_destroy,
++	.reset = intel_dp_encoder_reset,
++	.destroy = intel_dp_encoder_destroy,
+ };
+ 
+ static struct intel_connector *
+@@ -3356,6 +3359,7 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
+ 	intel_encoder->post_disable = intel_ddi_post_disable;
+ 	intel_encoder->get_hw_state = intel_ddi_get_hw_state;
+ 	intel_encoder->get_config = intel_ddi_get_config;
++	intel_encoder->suspend = intel_dp_encoder_suspend;
+ 
+ 	intel_dig_port->port = port;
+ 	intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) &
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index a9c35134f2e2..c023a04c44d0 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -13429,6 +13429,9 @@ static int intel_atomic_prepare_commit(struct drm_device *dev,
+ 	}
+ 
+ 	for_each_crtc_in_state(state, crtc, crtc_state, i) {
++		if (state->legacy_cursor_update)
++			continue;
++
+ 		ret = intel_crtc_wait_for_pending_flips(crtc);
+ 		if (ret)
+ 			return ret;
+diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
+index cdc2c15873dc..3cd4996c791c 100644
+--- a/drivers/gpu/drm/i915/intel_dp.c
++++ b/drivers/gpu/drm/i915/intel_dp.c
+@@ -4905,7 +4905,7 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder)
+ 	kfree(intel_dig_port);
+ }
+ 
+-static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
++void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
+ {
+ 	struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
+ 
+@@ -4947,7 +4947,7 @@ static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
+ 	edp_panel_vdd_schedule_off(intel_dp);
+ }
+ 
+-static void intel_dp_encoder_reset(struct drm_encoder *encoder)
++void intel_dp_encoder_reset(struct drm_encoder *encoder)
+ {
+ 	struct intel_dp *intel_dp;
+ 
+diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
+index df7f3cb66056..3ce3bee368fe 100644
+--- a/drivers/gpu/drm/i915/intel_drv.h
++++ b/drivers/gpu/drm/i915/intel_drv.h
+@@ -1234,6 +1234,8 @@ void intel_dp_set_link_params(struct intel_dp *intel_dp,
+ void intel_dp_start_link_train(struct intel_dp *intel_dp);
+ void intel_dp_stop_link_train(struct intel_dp *intel_dp);
+ void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
++void intel_dp_encoder_reset(struct drm_encoder *encoder);
++void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder);
+ void intel_dp_encoder_destroy(struct drm_encoder *encoder);
+ int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc);
+ bool intel_dp_compute_config(struct intel_encoder *encoder,
+diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
+index 616108c4bc3e..43fdae8ff3c3 100644
+--- a/drivers/gpu/drm/i915/intel_hdmi.c
++++ b/drivers/gpu/drm/i915/intel_hdmi.c
+@@ -1407,8 +1407,16 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
+ 				hdmi_to_dig_port(intel_hdmi));
+ 	}
+ 
+-	if (!live_status)
+-		DRM_DEBUG_KMS("Live status not up!");
++	if (!live_status) {
++		DRM_DEBUG_KMS("HDMI live status down\n");
++		/*
++		 * Live status register is not reliable on all intel platforms.
++		 * So consider live_status only for certain platforms, for
++		 * others, read EDID to determine presence of sink.
++		 */
++		if (INTEL_INFO(dev_priv)->gen < 7 || IS_IVYBRIDGE(dev_priv))
++			live_status = true;
++	}
+ 
+ 	intel_hdmi_unset_edid(connector);
+ 
+diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
+index 6104d7d7449e..9de6503b10d8 100644
+--- a/drivers/gpu/drm/radeon/atombios_encoders.c
++++ b/drivers/gpu/drm/radeon/atombios_encoders.c
+@@ -310,6 +310,10 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
+ 	    && (mode->crtc_vsync_start < (mode->crtc_vdisplay + 2)))
+ 		adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2;
+ 
++	/* vertical FP must be at least 1 */
++	if (mode->crtc_vsync_start == mode->crtc_vdisplay)
++		adjusted_mode->crtc_vsync_start++;
++
+ 	/* get the native mode for scaling */
+ 	if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) {
+ 		radeon_panel_mode_fixup(encoder, adjusted_mode);
+diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c
+index e00db3f510dd..abb98c77bad2 100644
+--- a/drivers/gpu/ipu-v3/ipu-common.c
++++ b/drivers/gpu/ipu-v3/ipu-common.c
+@@ -1068,7 +1068,6 @@ static int ipu_add_client_devices(struct ipu_soc *ipu, unsigned long ipu_base)
+ 			goto err_register;
+ 		}
+ 
+-		pdev->dev.of_node = of_node;
+ 		pdev->dev.parent = dev;
+ 
+ 		ret = platform_device_add_data(pdev, &reg->pdata,
+@@ -1079,6 +1078,12 @@ static int ipu_add_client_devices(struct ipu_soc *ipu, unsigned long ipu_base)
+ 			platform_device_put(pdev);
+ 			goto err_register;
+ 		}
++
++		/*
++		 * Set of_node only after calling platform_device_add. Otherwise
++		 * the platform:imx-ipuv3-crtc modalias won't be used.
++		 */
++		pdev->dev.of_node = of_node;
+ 	}
+ 
+ 	return 0;
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index b6ff6e78ac54..14c14c82795c 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -255,6 +255,7 @@
+ #define USB_DEVICE_ID_CORSAIR_K90	0x1b02
+ 
+ #define USB_VENDOR_ID_CREATIVELABS	0x041e
++#define USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51	0x322c
+ #define USB_DEVICE_ID_PRODIKEYS_PCMIDI	0x2801
+ 
+ #define USB_VENDOR_ID_CVTOUCH		0x1ff7
+diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
+index 7dd0953cd70f..dc8e6adf95a4 100644
+--- a/drivers/hid/usbhid/hid-quirks.c
++++ b/drivers/hid/usbhid/hid-quirks.c
+@@ -70,6 +70,7 @@ static const struct hid_blacklist {
+ 	{ USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_3AXIS_5BUTTON_STICK, HID_QUIRK_NOGET },
+ 	{ USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_AXIS_295, HID_QUIRK_NOGET },
+ 	{ USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_PIXART_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL },
++	{ USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51, HID_QUIRK_NOGET },
+ 	{ USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET },
+ 	{ USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_WIIU, HID_QUIRK_MULTI_INPUT },
+ 	{ USB_VENDOR_ID_ELAN, HID_ANY_ID, HID_QUIRK_ALWAYS_POLL },
+diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
+index 94a8875b38fb..f71187aad0d0 100644
+--- a/drivers/hid/wacom_wac.c
++++ b/drivers/hid/wacom_wac.c
+@@ -3378,6 +3378,10 @@ static const struct wacom_features wacom_features_0x33E =
+ 	{ "Wacom Intuos PT M 2", 21600, 13500, 2047, 63,
+ 	  INTUOSHT2, WACOM_INTUOS_RES, WACOM_INTUOS_RES, .touch_max = 16,
+ 	  .check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE };
++static const struct wacom_features wacom_features_0x343 =
++	{ "Wacom DTK1651", 34616, 19559, 1023, 0,
++	  DTUS, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 4,
++	  WACOM_DTU_OFFSET, WACOM_DTU_OFFSET };
+ 
+ static const struct wacom_features wacom_features_HID_ANY_ID =
+ 	{ "Wacom HID", .type = HID_GENERIC };
+@@ -3543,6 +3547,7 @@ const struct hid_device_id wacom_ids[] = {
+ 	{ USB_DEVICE_WACOM(0x33C) },
+ 	{ USB_DEVICE_WACOM(0x33D) },
+ 	{ USB_DEVICE_WACOM(0x33E) },
++	{ USB_DEVICE_WACOM(0x343) },
+ 	{ USB_DEVICE_WACOM(0x4001) },
+ 	{ USB_DEVICE_WACOM(0x4004) },
+ 	{ USB_DEVICE_WACOM(0x5000) },
+diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
+index b53702ce692f..e35560b955b1 100644
+--- a/drivers/hv/ring_buffer.c
++++ b/drivers/hv/ring_buffer.c
+@@ -103,15 +103,29 @@ static bool hv_need_to_signal(u32 old_write, struct hv_ring_buffer_info *rbi)
+  *    there is room for the producer to send the pending packet.
+  */
+ 
+-static bool hv_need_to_signal_on_read(u32 prev_write_sz,
+-				      struct hv_ring_buffer_info *rbi)
++static bool hv_need_to_signal_on_read(struct hv_ring_buffer_info *rbi)
+ {
+ 	u32 cur_write_sz;
+ 	u32 r_size;
+-	u32 write_loc = rbi->ring_buffer->write_index;
++	u32 write_loc;
+ 	u32 read_loc = rbi->ring_buffer->read_index;
+-	u32 pending_sz = rbi->ring_buffer->pending_send_sz;
++	u32 pending_sz;
+ 
++	/*
++	 * Issue a full memory barrier before making the signaling decision.
++	 * Here is the reason for having this barrier:
++	 * If the reading of the pend_sz (in this function)
++	 * were to be reordered and read before we commit the new read
++	 * index (in the calling function)  we could
++	 * have a problem. If the host were to set the pending_sz after we
++	 * have sampled pending_sz and go to sleep before we commit the
++	 * read index, we could miss sending the interrupt. Issue a full
++	 * memory barrier to address this.
++	 */
++	mb();
++
++	pending_sz = rbi->ring_buffer->pending_send_sz;
++	write_loc = rbi->ring_buffer->write_index;
+ 	/* If the other end is not blocked on write don't bother. */
+ 	if (pending_sz == 0)
+ 		return false;
+@@ -120,7 +134,7 @@ static bool hv_need_to_signal_on_read(u32 prev_write_sz,
+ 	cur_write_sz = write_loc >= read_loc ? r_size - (write_loc - read_loc) :
+ 			read_loc - write_loc;
+ 
+-	if ((prev_write_sz < pending_sz) && (cur_write_sz >= pending_sz))
++	if (cur_write_sz >= pending_sz)
+ 		return true;
+ 
+ 	return false;
+@@ -458,7 +472,7 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info,
+ 	/* Update the read index */
+ 	hv_set_next_read_location(inring_info, next_read_location);
+ 
+-	*signal = hv_need_to_signal_on_read(bytes_avail_towrite, inring_info);
++	*signal = hv_need_to_signal_on_read(inring_info);
+ 
+ out_unlock:
+ 	spin_unlock_irqrestore(&inring_info->ring_lock, flags);
+diff --git a/drivers/iio/magnetometer/ak8975.c b/drivers/iio/magnetometer/ak8975.c
+index b13936dacc78..f2a7f72f7aa6 100644
+--- a/drivers/iio/magnetometer/ak8975.c
++++ b/drivers/iio/magnetometer/ak8975.c
+@@ -462,6 +462,8 @@ static int ak8975_setup_irq(struct ak8975_data *data)
+ 	int rc;
+ 	int irq;
+ 
++	init_waitqueue_head(&data->data_ready_queue);
++	clear_bit(0, &data->flags);
+ 	if (client->irq)
+ 		irq = client->irq;
+ 	else
+@@ -477,8 +479,6 @@ static int ak8975_setup_irq(struct ak8975_data *data)
+ 		return rc;
+ 	}
+ 
+-	init_waitqueue_head(&data->data_ready_queue);
+-	clear_bit(0, &data->flags);
+ 	data->eoc_irq = irq;
+ 
+ 	return rc;
+@@ -732,7 +732,7 @@ static int ak8975_probe(struct i2c_client *client,
+ 	int eoc_gpio;
+ 	int err;
+ 	const char *name = NULL;
+-	enum asahi_compass_chipset chipset;
++	enum asahi_compass_chipset chipset = AK_MAX_TYPE;
+ 
+ 	/* Grab and set up the supplied GPIO. */
+ 	if (client->dev.platform_data)
+diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
+index cf21df4a8bf5..4e94cff5ba71 100644
+--- a/drivers/infiniband/hw/cxgb4/cq.c
++++ b/drivers/infiniband/hw/cxgb4/cq.c
+@@ -162,7 +162,7 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
+ 	cq->bar2_va = c4iw_bar2_addrs(rdev, cq->cqid, T4_BAR2_QTYPE_INGRESS,
+ 				      &cq->bar2_qid,
+ 				      user ? &cq->bar2_pa : NULL);
+-	if (user && !cq->bar2_va) {
++	if (user && !cq->bar2_pa) {
+ 		pr_warn(MOD "%s: cqid %u not in BAR2 range.\n",
+ 			pci_name(rdev->lldi.pdev), cq->cqid);
+ 		ret = -EINVAL;
+diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
+index e99345eb875a..8ff690bf09d9 100644
+--- a/drivers/infiniband/hw/cxgb4/qp.c
++++ b/drivers/infiniband/hw/cxgb4/qp.c
+@@ -185,6 +185,10 @@ void __iomem *c4iw_bar2_addrs(struct c4iw_rdev *rdev, unsigned int qid,
+ 
+ 	if (pbar2_pa)
+ 		*pbar2_pa = (rdev->bar2_pa + bar2_qoffset) & PAGE_MASK;
++
++	if (is_t4(rdev->lldi.adapter_type))
++		return NULL;
++
+ 	return rdev->bar2_kva + bar2_qoffset;
+ }
+ 
+@@ -270,7 +274,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
+ 	/*
+ 	 * User mode must have bar2 access.
+ 	 */
+-	if (user && (!wq->sq.bar2_va || !wq->rq.bar2_va)) {
++	if (user && (!wq->sq.bar2_pa || !wq->rq.bar2_pa)) {
+ 		pr_warn(MOD "%s: sqid %u or rqid %u not in BAR2 range.\n",
+ 			pci_name(rdev->lldi.pdev), wq->sq.qid, wq->rq.qid);
+ 		goto free_dma;
+diff --git a/drivers/input/touchscreen/zforce_ts.c b/drivers/input/touchscreen/zforce_ts.c
+index 9bbadaaf6bc3..7b3845aa5983 100644
+--- a/drivers/input/touchscreen/zforce_ts.c
++++ b/drivers/input/touchscreen/zforce_ts.c
+@@ -370,8 +370,8 @@ static int zforce_touch_event(struct zforce_ts *ts, u8 *payload)
+ 			point.coord_x = point.coord_y = 0;
+ 		}
+ 
+-		point.state = payload[9 * i + 5] & 0x03;
+-		point.id = (payload[9 * i + 5] & 0xfc) >> 2;
++		point.state = payload[9 * i + 5] & 0x0f;
++		point.id = (payload[9 * i + 5] & 0xf0) >> 4;
+ 
+ 		/* determine touch major, minor and orientation */
+ 		point.area_major = max(payload[9 * i + 6],
+diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c
+index 307db1ea22de..b7ddfb352792 100644
+--- a/drivers/lightnvm/rrpc.c
++++ b/drivers/lightnvm/rrpc.c
+@@ -499,12 +499,21 @@ static void rrpc_gc_queue(struct work_struct *work)
+ 	struct rrpc *rrpc = gcb->rrpc;
+ 	struct rrpc_block *rblk = gcb->rblk;
+ 	struct nvm_lun *lun = rblk->parent->lun;
++	struct nvm_block *blk = rblk->parent;
+ 	struct rrpc_lun *rlun = &rrpc->luns[lun->id - rrpc->lun_offset];
+ 
+ 	spin_lock(&rlun->lock);
+ 	list_add_tail(&rblk->prio, &rlun->prio_list);
+ 	spin_unlock(&rlun->lock);
+ 
++	spin_lock(&lun->lock);
++	lun->nr_open_blocks--;
++	lun->nr_closed_blocks++;
++	blk->state &= ~NVM_BLK_ST_OPEN;
++	blk->state |= NVM_BLK_ST_CLOSED;
++	list_move_tail(&rblk->list, &rlun->closed_list);
++	spin_unlock(&lun->lock);
++
+ 	mempool_free(gcb, rrpc->gcb_pool);
+ 	pr_debug("nvm: block '%lu' is full, allow GC (sched)\n",
+ 							rblk->parent->id);
+@@ -668,20 +677,8 @@ static void rrpc_end_io_write(struct rrpc *rrpc, struct rrpc_rq *rrqd,
+ 		lun = rblk->parent->lun;
+ 
+ 		cmnt_size = atomic_inc_return(&rblk->data_cmnt_size);
+-		if (unlikely(cmnt_size == rrpc->dev->pgs_per_blk)) {
+-			struct nvm_block *blk = rblk->parent;
+-			struct rrpc_lun *rlun = rblk->rlun;
+-
+-			spin_lock(&lun->lock);
+-			lun->nr_open_blocks--;
+-			lun->nr_closed_blocks++;
+-			blk->state &= ~NVM_BLK_ST_OPEN;
+-			blk->state |= NVM_BLK_ST_CLOSED;
+-			list_move_tail(&rblk->list, &rlun->closed_list);
+-			spin_unlock(&lun->lock);
+-
++		if (unlikely(cmnt_size == rrpc->dev->pgs_per_blk))
+ 			rrpc_run_gc(rrpc, rblk);
+-		}
+ 	}
+ }
+ 
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index e55e6cf9ec17..7551278030d8 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -284,6 +284,8 @@ static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio)
+ 	 * go away inside make_request
+ 	 */
+ 	sectors = bio_sectors(bio);
++	/* bio could be mergeable after passing to underlayer */
++	bio->bi_rw &= ~REQ_NOMERGE;
+ 	mddev->pers->make_request(mddev, bio);
+ 
+ 	cpu = part_stat_lock();
+diff --git a/drivers/mfd/intel-lpss.c b/drivers/mfd/intel-lpss.c
+index 1743788f1595..1bbbe877ba7e 100644
+--- a/drivers/mfd/intel-lpss.c
++++ b/drivers/mfd/intel-lpss.c
+@@ -453,6 +453,7 @@ int intel_lpss_probe(struct device *dev,
+ err_remove_ltr:
+ 	intel_lpss_debugfs_remove(lpss);
+ 	intel_lpss_ltr_hide(lpss);
++	intel_lpss_unregister_clock(lpss);
+ 
+ err_clk_register:
+ 	ida_simple_remove(&intel_lpss_devid_ida, lpss->devid);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
+index 1545a944c309..b86fe50d5d93 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
+@@ -423,6 +423,10 @@ static int handle_hca_cap_atomic(struct mlx5_core_dev *dev)
+ 					 HCA_CAP_OPMOD_GET_CUR);
+ 		if (err)
+ 			return err;
++		err = mlx5_core_get_caps(dev, MLX5_CAP_ATOMIC,
++					 HCA_CAP_OPMOD_GET_MAX);
++		if (err)
++			return err;
+ 	} else {
+ 		return 0;
+ 	}
+diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
+index 91afa3ae414c..a192d451dab2 100644
+--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
++++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
+@@ -2143,11 +2143,7 @@ EXPORT_SYMBOL(ath10k_htt_t2h_msg_handler);
+ void ath10k_htt_rx_pktlog_completion_handler(struct ath10k *ar,
+ 					     struct sk_buff *skb)
+ {
+-	struct ath10k_pktlog_10_4_hdr *hdr =
+-		(struct ath10k_pktlog_10_4_hdr *)skb->data;
+-
+-	trace_ath10k_htt_pktlog(ar, hdr->payload,
+-				sizeof(*hdr) + __le16_to_cpu(hdr->size));
++	trace_ath10k_htt_pktlog(ar, skb->data, skb->len);
+ 	dev_kfree_skb_any(skb);
+ }
+ EXPORT_SYMBOL(ath10k_htt_rx_pktlog_completion_handler);
+diff --git a/drivers/net/wireless/ath/ath9k/ar5008_phy.c b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
+index 8f8793004b9f..1b271b99c49e 100644
+--- a/drivers/net/wireless/ath/ath9k/ar5008_phy.c
++++ b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
+@@ -274,6 +274,9 @@ void ar5008_hw_cmn_spur_mitigate(struct ath_hw *ah,
+ 	};
+ 	static const int inc[4] = { 0, 100, 0, 0 };
+ 
++	memset(&mask_m, 0, sizeof(int8_t) * 123);
++	memset(&mask_p, 0, sizeof(int8_t) * 123);
++
+ 	cur_bin = -6000;
+ 	upper = bin + 100;
+ 	lower = bin - 100;
+@@ -424,14 +427,9 @@ static void ar5008_hw_spur_mitigate(struct ath_hw *ah,
+ 	int tmp, new;
+ 	int i;
+ 
+-	int8_t mask_m[123];
+-	int8_t mask_p[123];
+ 	int cur_bb_spur;
+ 	bool is2GHz = IS_CHAN_2GHZ(chan);
+ 
+-	memset(&mask_m, 0, sizeof(int8_t) * 123);
+-	memset(&mask_p, 0, sizeof(int8_t) * 123);
+-
+ 	for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
+ 		cur_bb_spur = ah->eep_ops->get_spur_channel(ah, i, is2GHz);
+ 		if (AR_NO_SPUR == cur_bb_spur)
+diff --git a/drivers/net/wireless/ath/ath9k/ar9002_phy.c b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
+index db6624527d99..53d7445a5d12 100644
+--- a/drivers/net/wireless/ath/ath9k/ar9002_phy.c
++++ b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
+@@ -178,14 +178,9 @@ static void ar9002_hw_spur_mitigate(struct ath_hw *ah,
+ 	int i;
+ 	struct chan_centers centers;
+ 
+-	int8_t mask_m[123];
+-	int8_t mask_p[123];
+ 	int cur_bb_spur;
+ 	bool is2GHz = IS_CHAN_2GHZ(chan);
+ 
+-	memset(&mask_m, 0, sizeof(int8_t) * 123);
+-	memset(&mask_p, 0, sizeof(int8_t) * 123);
+-
+ 	ath9k_hw_get_channel_centers(ah, chan, &centers);
+ 	freq = centers.synth_center;
+ 
+diff --git a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
+index 6a4fc5d183cf..d7db6f23e728 100644
+--- a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
++++ b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
+@@ -314,6 +314,7 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss,
+ 			mwifiex_dbg(adapter, ERROR,
+ 				    "Attempt to reconnect on csa closed chan(%d)\n",
+ 				    bss_desc->channel);
++			ret = -1;
+ 			goto done;
+ 		}
+ 
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rc.c b/drivers/net/wireless/realtek/rtlwifi/rc.c
+index 28f7010e7108..1aca77719521 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rc.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rc.c
+@@ -41,7 +41,7 @@ static u8 _rtl_rc_get_highest_rix(struct rtl_priv *rtlpriv,
+ 	struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+ 	struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ 	struct rtl_sta_info *sta_entry = NULL;
+-	u8 wireless_mode = 0;
++	u16 wireless_mode = 0;
+ 
+ 	/*
+ 	 *this rate is no use for true rate, firmware
+@@ -99,7 +99,7 @@ static void _rtl_rc_rate_set_series(struct rtl_priv *rtlpriv,
+ {
+ 	struct rtl_mac *mac = rtl_mac(rtlpriv);
+ 	struct rtl_sta_info *sta_entry = NULL;
+-	u8 wireless_mode = 0;
++	u16 wireless_mode = 0;
+ 	u8 sgi_20 = 0, sgi_40 = 0, sgi_80 = 0;
+ 
+ 	if (sta) {
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
+index bbb789f8990b..5e5719b26774 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
+@@ -3855,7 +3855,7 @@ void rtl8821ae_update_channel_access_setting(struct ieee80211_hw *hw)
+ {
+ 	struct rtl_priv *rtlpriv = rtl_priv(hw);
+ 	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+-	u8 wireless_mode = mac->mode;
++	u16 wireless_mode = mac->mode;
+ 	u8 sifs_timer, r2t_sifs;
+ 
+ 	rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SLOT_TIME,
+diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h
+index 4544752a2ba8..84397b190cc0 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/wifi.h
++++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h
+@@ -1323,14 +1323,13 @@ struct rtl_tid_data {
+ 
+ struct rtl_sta_info {
+ 	struct list_head list;
+-	u8 ratr_index;
+-	u8 wireless_mode;
+-	u8 mimo_ps;
+-	u8 mac_addr[ETH_ALEN];
+ 	struct rtl_tid_data tids[MAX_TID_COUNT];
+-
+ 	/* just used for ap adhoc or mesh*/
+ 	struct rssi_sta rssi_stat;
++	u16 wireless_mode;
++	u8 ratr_index;
++	u8 mimo_ps;
++	u8 mac_addr[ETH_ALEN];
+ } __packed;
+ 
+ struct rtl_priv;
+diff --git a/drivers/net/wireless/ti/wlcore/event.c b/drivers/net/wireless/ti/wlcore/event.c
+index c96405498bf4..4b59f67724de 100644
+--- a/drivers/net/wireless/ti/wlcore/event.c
++++ b/drivers/net/wireless/ti/wlcore/event.c
+@@ -38,7 +38,7 @@
+ 
+ int wlcore_event_fw_logger(struct wl1271 *wl)
+ {
+-	u32 ret;
++	int ret;
+ 	struct fw_logger_information fw_log;
+ 	u8  *buffer;
+ 	u32 internal_fw_addrbase = WL18XX_DATA_RAM_BASE_ADDRESS;
+diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
+index 544b802a594c..02c574f8ccb3 100644
+--- a/drivers/nvdimm/pmem.c
++++ b/drivers/nvdimm/pmem.c
+@@ -314,9 +314,16 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
+ 	 * implementation will limit the pfns advertised through
+ 	 * ->direct_access() to those that are included in the memmap.
+ 	 */
+-	if (nd_pfn->mode == PFN_MODE_PMEM)
+-		offset = ALIGN(SZ_8K + 64 * npfns, nd_pfn->align);
+-	else if (nd_pfn->mode == PFN_MODE_RAM)
++	if (nd_pfn->mode == PFN_MODE_PMEM) {
++		unsigned long memmap_size;
++
++		/*
++		 * vmemmap_populate_hugepages() allocates the memmap array in
++		 * HPAGE_SIZE chunks.
++		 */
++		memmap_size = ALIGN(64 * npfns, PMD_SIZE);
++		offset = ALIGN(SZ_8K + memmap_size, nd_pfn->align);
++	} else if (nd_pfn->mode == PFN_MODE_RAM)
+ 		offset = ALIGN(SZ_8K, nd_pfn->align);
+ 	else
+ 		goto err;
+diff --git a/drivers/nvmem/mxs-ocotp.c b/drivers/nvmem/mxs-ocotp.c
+index 8ba19bba3156..2bb3c5799ac4 100644
+--- a/drivers/nvmem/mxs-ocotp.c
++++ b/drivers/nvmem/mxs-ocotp.c
+@@ -94,7 +94,7 @@ static int mxs_ocotp_read(void *context, const void *reg, size_t reg_size,
+ 	if (ret)
+ 		goto close_banks;
+ 
+-	while (val_size) {
++	while (val_size >= reg_size) {
+ 		if ((offset < OCOTP_DATA_OFFSET) || (offset % 16)) {
+ 			/* fill up non-data register */
+ 			*buf = 0;
+@@ -103,7 +103,7 @@ static int mxs_ocotp_read(void *context, const void *reg, size_t reg_size,
+ 		}
+ 
+ 		buf++;
+-		val_size--;
++		val_size -= reg_size;
+ 		offset += reg_size;
+ 	}
+ 
+diff --git a/drivers/pwm/pwm-omap-dmtimer.c b/drivers/pwm/pwm-omap-dmtimer.c
+index 826634ec0d5c..e0679eb399f6 100644
+--- a/drivers/pwm/pwm-omap-dmtimer.c
++++ b/drivers/pwm/pwm-omap-dmtimer.c
+@@ -31,6 +31,7 @@
+ #include <linux/time.h>
+ 
+ #define DM_TIMER_LOAD_MIN 0xfffffffe
++#define DM_TIMER_MAX      0xffffffff
+ 
+ struct pwm_omap_dmtimer_chip {
+ 	struct pwm_chip chip;
+@@ -46,13 +47,9 @@ to_pwm_omap_dmtimer_chip(struct pwm_chip *chip)
+ 	return container_of(chip, struct pwm_omap_dmtimer_chip, chip);
+ }
+ 
+-static int pwm_omap_dmtimer_calc_value(unsigned long clk_rate, int ns)
++static u32 pwm_omap_dmtimer_get_clock_cycles(unsigned long clk_rate, int ns)
+ {
+-	u64 c = (u64)clk_rate * ns;
+-
+-	do_div(c, NSEC_PER_SEC);
+-
+-	return DM_TIMER_LOAD_MIN - c;
++	return DIV_ROUND_CLOSEST_ULL((u64)clk_rate * ns, NSEC_PER_SEC);
+ }
+ 
+ static void pwm_omap_dmtimer_start(struct pwm_omap_dmtimer_chip *omap)
+@@ -99,7 +96,8 @@ static int pwm_omap_dmtimer_config(struct pwm_chip *chip,
+ 				   int duty_ns, int period_ns)
+ {
+ 	struct pwm_omap_dmtimer_chip *omap = to_pwm_omap_dmtimer_chip(chip);
+-	int load_value, match_value;
++	u32 period_cycles, duty_cycles;
++	u32 load_value, match_value;
+ 	struct clk *fclk;
+ 	unsigned long clk_rate;
+ 	bool timer_active;
+@@ -117,15 +115,13 @@ static int pwm_omap_dmtimer_config(struct pwm_chip *chip,
+ 	fclk = omap->pdata->get_fclk(omap->dm_timer);
+ 	if (!fclk) {
+ 		dev_err(chip->dev, "invalid pmtimer fclk\n");
+-		mutex_unlock(&omap->mutex);
+-		return -EINVAL;
++		goto err_einval;
+ 	}
+ 
+ 	clk_rate = clk_get_rate(fclk);
+ 	if (!clk_rate) {
+ 		dev_err(chip->dev, "invalid pmtimer fclk rate\n");
+-		mutex_unlock(&omap->mutex);
+-		return -EINVAL;
++		goto err_einval;
+ 	}
+ 
+ 	dev_dbg(chip->dev, "clk rate: %luHz\n", clk_rate);
+@@ -133,11 +129,45 @@ static int pwm_omap_dmtimer_config(struct pwm_chip *chip,
+ 	/*
+ 	 * Calculate the appropriate load and match values based on the
+ 	 * specified period and duty cycle. The load value determines the
+-	 * cycle time and the match value determines the duty cycle.
++	 * period time and the match value determines the duty time.
++	 *
++	 * The period lasts for (DM_TIMER_MAX-load_value+1) clock cycles.
++	 * Similarly, the active time lasts (match_value-load_value+1) cycles.
++	 * The non-active time is the remainder: (DM_TIMER_MAX-match_value)
++	 * clock cycles.
++	 *
++	 * NOTE: It is required that: load_value <= match_value < DM_TIMER_MAX
++	 *
++	 * References:
++	 *   OMAP4430/60/70 TRM sections 22.2.4.10 and 22.2.4.11
++	 *   AM335x Sitara TRM sections 20.1.3.5 and 20.1.3.6
+ 	 */
+-	load_value = pwm_omap_dmtimer_calc_value(clk_rate, period_ns);
+-	match_value = pwm_omap_dmtimer_calc_value(clk_rate,
+-						  period_ns - duty_ns);
++	period_cycles = pwm_omap_dmtimer_get_clock_cycles(clk_rate, period_ns);
++	duty_cycles = pwm_omap_dmtimer_get_clock_cycles(clk_rate, duty_ns);
++
++	if (period_cycles < 2) {
++		dev_info(chip->dev,
++			 "period %d ns too short for clock rate %lu Hz\n",
++			 period_ns, clk_rate);
++		goto err_einval;
++	}
++
++	if (duty_cycles < 1) {
++		dev_dbg(chip->dev,
++			"duty cycle %d ns is too short for clock rate %lu Hz\n",
++			duty_ns, clk_rate);
++		dev_dbg(chip->dev, "using minimum of 1 clock cycle\n");
++		duty_cycles = 1;
++	} else if (duty_cycles >= period_cycles) {
++		dev_dbg(chip->dev,
++			"duty cycle %d ns is too long for period %d ns at clock rate %lu Hz\n",
++			duty_ns, period_ns, clk_rate);
++		dev_dbg(chip->dev, "using maximum of 1 clock cycle less than period\n");
++		duty_cycles = period_cycles - 1;
++	}
++
++	load_value = (DM_TIMER_MAX - period_cycles) + 1;
++	match_value = load_value + duty_cycles - 1;
+ 
+ 	/*
+ 	 * We MUST stop the associated dual-mode timer before attempting to
+@@ -166,6 +196,11 @@ static int pwm_omap_dmtimer_config(struct pwm_chip *chip,
+ 	mutex_unlock(&omap->mutex);
+ 
+ 	return 0;
++
++err_einval:
++	mutex_unlock(&omap->mutex);
++
++	return -EINVAL;
+ }
+ 
+ static int pwm_omap_dmtimer_set_polarity(struct pwm_chip *chip,
+diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
+index a544366a367e..f57d02c3b6cf 100644
+--- a/drivers/scsi/lpfc/lpfc_init.c
++++ b/drivers/scsi/lpfc/lpfc_init.c
+@@ -2860,7 +2860,7 @@ lpfc_online(struct lpfc_hba *phba)
+ 	}
+ 
+ 	vports = lpfc_create_vport_work_array(phba);
+-	if (vports != NULL)
++	if (vports != NULL) {
+ 		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
+ 			struct Scsi_Host *shost;
+ 			shost = lpfc_shost_from_vport(vports[i]);
+@@ -2877,7 +2877,8 @@ lpfc_online(struct lpfc_hba *phba)
+ 			}
+ 			spin_unlock_irq(shost->host_lock);
+ 		}
+-		lpfc_destroy_vport_work_array(phba, vports);
++	}
++	lpfc_destroy_vport_work_array(phba, vports);
+ 
+ 	lpfc_unblock_mgmt_io(phba);
+ 	return 0;
+diff --git a/drivers/soc/rockchip/pm_domains.c b/drivers/soc/rockchip/pm_domains.c
+index 534c58937a56..4a65c5bda146 100644
+--- a/drivers/soc/rockchip/pm_domains.c
++++ b/drivers/soc/rockchip/pm_domains.c
+@@ -419,6 +419,7 @@ static int rockchip_pm_domain_probe(struct platform_device *pdev)
+ 		if (error) {
+ 			dev_err(dev, "failed to handle node %s: %d\n",
+ 				node->name, error);
++			of_node_put(node);
+ 			goto err_out;
+ 		}
+ 	}
+diff --git a/drivers/usb/core/port.c b/drivers/usb/core/port.c
+index 14718a9ffcfb..460c855be0d0 100644
+--- a/drivers/usb/core/port.c
++++ b/drivers/usb/core/port.c
+@@ -249,18 +249,12 @@ static int usb_port_runtime_suspend(struct device *dev)
+ 
+ 	return retval;
+ }
+-
+-static int usb_port_prepare(struct device *dev)
+-{
+-	return 1;
+-}
+ #endif
+ 
+ static const struct dev_pm_ops usb_port_pm_ops = {
+ #ifdef CONFIG_PM
+ 	.runtime_suspend =	usb_port_runtime_suspend,
+ 	.runtime_resume =	usb_port_runtime_resume,
+-	.prepare =		usb_port_prepare,
+ #endif
+ };
+ 
+diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
+index ebb29caa3fe4..77e4c9bc0ab1 100644
+--- a/drivers/usb/core/usb.c
++++ b/drivers/usb/core/usb.c
+@@ -311,13 +311,7 @@ static int usb_dev_uevent(struct device *dev, struct kobj_uevent_env *env)
+ 
+ static int usb_dev_prepare(struct device *dev)
+ {
+-	struct usb_device *udev = to_usb_device(dev);
+-
+-	/* Return 0 if the current wakeup setting is wrong, otherwise 1 */
+-	if (udev->do_remote_wakeup != device_may_wakeup(dev))
+-		return 0;
+-
+-	return 1;
++	return 0;		/* Implement eventually? */
+ }
+ 
+ static void usb_dev_complete(struct device *dev)
+diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
+index 9bc0e090b881..a543cdc0f88f 100644
+--- a/drivers/usb/serial/cp210x.c
++++ b/drivers/usb/serial/cp210x.c
+@@ -109,6 +109,7 @@ static const struct usb_device_id id_table[] = {
+ 	{ USB_DEVICE(0x10C4, 0x826B) }, /* Cygnal Integrated Products, Inc., Fasttrax GPS demonstration module */
+ 	{ USB_DEVICE(0x10C4, 0x8281) }, /* Nanotec Plug & Drive */
+ 	{ USB_DEVICE(0x10C4, 0x8293) }, /* Telegesis ETRX2USB */
++	{ USB_DEVICE(0x10C4, 0x82F4) }, /* Starizona MicroTouch */
+ 	{ USB_DEVICE(0x10C4, 0x82F9) }, /* Procyon AVS */
+ 	{ USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */
+ 	{ USB_DEVICE(0x10C4, 0x8382) }, /* Cygnal Integrated Products, Inc. */
+@@ -118,6 +119,7 @@ static const struct usb_device_id id_table[] = {
+ 	{ USB_DEVICE(0x10C4, 0x8418) }, /* IRZ Automation Teleport SG-10 GSM/GPRS Modem */
+ 	{ USB_DEVICE(0x10C4, 0x846E) }, /* BEI USB Sensor Interface (VCP) */
+ 	{ USB_DEVICE(0x10C4, 0x8477) }, /* Balluff RFID */
++	{ USB_DEVICE(0x10C4, 0x84B6) }, /* Starizona Hyperion */
+ 	{ USB_DEVICE(0x10C4, 0x85EA) }, /* AC-Services IBUS-IF */
+ 	{ USB_DEVICE(0x10C4, 0x85EB) }, /* AC-Services CIS-IBUS */
+ 	{ USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */
+@@ -141,6 +143,8 @@ static const struct usb_device_id id_table[] = {
+ 	{ USB_DEVICE(0x10C4, 0xF004) }, /* Elan Digital Systems USBcount50 */
+ 	{ USB_DEVICE(0x10C5, 0xEA61) }, /* Silicon Labs MobiData GPRS USB Modem */
+ 	{ USB_DEVICE(0x10CE, 0xEA6A) }, /* Silicon Labs MobiData GPRS USB Modem 100EU */
++	{ USB_DEVICE(0x12B8, 0xEC60) }, /* Link G4 ECU */
++	{ USB_DEVICE(0x12B8, 0xEC62) }, /* Link G4+ ECU */
+ 	{ USB_DEVICE(0x13AD, 0x9999) }, /* Baltech card reader */
+ 	{ USB_DEVICE(0x1555, 0x0004) }, /* Owen AC4 USB-RS485 Converter */
+ 	{ USB_DEVICE(0x166A, 0x0201) }, /* Clipsal 5500PACA C-Bus Pascal Automation Controller */
+diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
+index 12eab503efd1..364bc44610c1 100644
+--- a/drivers/xen/balloon.c
++++ b/drivers/xen/balloon.c
+@@ -152,6 +152,8 @@ static DECLARE_WAIT_QUEUE_HEAD(balloon_wq);
+ static void balloon_process(struct work_struct *work);
+ static DECLARE_DELAYED_WORK(balloon_worker, balloon_process);
+ 
++static void release_memory_resource(struct resource *resource);
++
+ /* When ballooning out (allocating memory to return to Xen) we don't really
+    want the kernel to try too hard since that can trigger the oom killer. */
+ #define GFP_BALLOON \
+@@ -268,6 +270,20 @@ static struct resource *additional_memory_resource(phys_addr_t size)
+ 		return NULL;
+ 	}
+ 
++#ifdef CONFIG_SPARSEMEM
++	{
++		unsigned long limit = 1UL << (MAX_PHYSMEM_BITS - PAGE_SHIFT);
++		unsigned long pfn = res->start >> PAGE_SHIFT;
++
++		if (pfn > limit) {
++			pr_err("New System RAM resource outside addressable RAM (%lu > %lu)\n",
++			       pfn, limit);
++			release_memory_resource(res);
++			return NULL;
++		}
++	}
++#endif
++
+ 	return res;
+ }
+ 
+diff --git a/drivers/xen/evtchn.c b/drivers/xen/evtchn.c
+index 38272ad24551..f4edd6df3df2 100644
+--- a/drivers/xen/evtchn.c
++++ b/drivers/xen/evtchn.c
+@@ -316,7 +316,6 @@ static int evtchn_resize_ring(struct per_user_data *u)
+ {
+ 	unsigned int new_size;
+ 	evtchn_port_t *new_ring, *old_ring;
+-	unsigned int p, c;
+ 
+ 	/*
+ 	 * Ensure the ring is large enough to capture all possible
+@@ -346,20 +345,17 @@ static int evtchn_resize_ring(struct per_user_data *u)
+ 	/*
+ 	 * Copy the old ring contents to the new ring.
+ 	 *
+-	 * If the ring contents crosses the end of the current ring,
+-	 * it needs to be copied in two chunks.
++	 * To take care of wrapping, a full ring, and the new index
++	 * pointing into the second half, simply copy the old contents
++	 * twice.
+ 	 *
+ 	 * +---------+    +------------------+
+-	 * |34567  12| -> |       1234567    |
+-	 * +-----p-c-+    +------------------+
++	 * |34567  12| -> |34567  1234567  12|
++	 * +-----p-c-+    +-------c------p---+
+ 	 */
+-	p = evtchn_ring_offset(u, u->ring_prod);
+-	c = evtchn_ring_offset(u, u->ring_cons);
+-	if (p < c) {
+-		memcpy(new_ring + c, u->ring + c, (u->ring_size - c) * sizeof(*u->ring));
+-		memcpy(new_ring + u->ring_size, u->ring, p * sizeof(*u->ring));
+-	} else
+-		memcpy(new_ring + c, u->ring + c, (p - c) * sizeof(*u->ring));
++	memcpy(new_ring, old_ring, u->ring_size * sizeof(*u->ring));
++	memcpy(new_ring + u->ring_size, old_ring,
++	       u->ring_size * sizeof(*u->ring));
+ 
+ 	u->ring = new_ring;
+ 	u->ring_size = new_size;
+diff --git a/fs/pnode.c b/fs/pnode.c
+index c524fdddc7fb..99899705b105 100644
+--- a/fs/pnode.c
++++ b/fs/pnode.c
+@@ -198,7 +198,7 @@ static struct mount *next_group(struct mount *m, struct mount *origin)
+ 
+ /* all accesses are serialized by namespace_sem */
+ static struct user_namespace *user_ns;
+-static struct mount *last_dest, *last_source, *dest_master;
++static struct mount *last_dest, *first_source, *last_source, *dest_master;
+ static struct mountpoint *mp;
+ static struct hlist_head *list;
+ 
+@@ -221,20 +221,22 @@ static int propagate_one(struct mount *m)
+ 		type = CL_MAKE_SHARED;
+ 	} else {
+ 		struct mount *n, *p;
++		bool done;
+ 		for (n = m; ; n = p) {
+ 			p = n->mnt_master;
+-			if (p == dest_master || IS_MNT_MARKED(p)) {
+-				while (last_dest->mnt_master != p) {
+-					last_source = last_source->mnt_master;
+-					last_dest = last_source->mnt_parent;
+-				}
+-				if (!peers(n, last_dest)) {
+-					last_source = last_source->mnt_master;
+-					last_dest = last_source->mnt_parent;
+-				}
++			if (p == dest_master || IS_MNT_MARKED(p))
+ 				break;
+-			}
+ 		}
++		do {
++			struct mount *parent = last_source->mnt_parent;
++			if (last_source == first_source)
++				break;
++			done = parent->mnt_master == p;
++			if (done && peers(n, parent))
++				break;
++			last_source = last_source->mnt_master;
++		} while (!done);
++
+ 		type = CL_SLAVE;
+ 		/* beginning of peer group among the slaves? */
+ 		if (IS_MNT_SHARED(m))
+@@ -286,6 +288,7 @@ int propagate_mnt(struct mount *dest_mnt, struct mountpoint *dest_mp,
+ 	 */
+ 	user_ns = current->nsproxy->mnt_ns->user_ns;
+ 	last_dest = dest_mnt;
++	first_source = source_mnt;
+ 	last_source = source_mnt;
+ 	mp = dest_mp;
+ 	list = tree_list;
+diff --git a/fs/proc/base.c b/fs/proc/base.c
+index 4f764c2ac1a5..45f2162e55b2 100644
+--- a/fs/proc/base.c
++++ b/fs/proc/base.c
+@@ -955,7 +955,8 @@ static ssize_t environ_read(struct file *file, char __user *buf,
+ 	struct mm_struct *mm = file->private_data;
+ 	unsigned long env_start, env_end;
+ 
+-	if (!mm)
++	/* Ensure the process spawned far enough to have an environment. */
++	if (!mm || !mm->env_end)
+ 		return 0;
+ 
+ 	page = (char *)__get_free_page(GFP_TEMPORARY);
+diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
+index 1143e38555a4..408a60dca353 100644
+--- a/include/linux/clk-provider.h
++++ b/include/linux/clk-provider.h
+@@ -385,6 +385,7 @@ struct clk_divider {
+ #define CLK_DIVIDER_MAX_AT_ZERO		BIT(6)
+ 
+ extern const struct clk_ops clk_divider_ops;
++extern const struct clk_ops clk_divider_ro_ops;
+ 
+ unsigned long divider_recalc_rate(struct clk_hw *hw, unsigned long parent_rate,
+ 		unsigned int val, const struct clk_div_table *table,
+diff --git a/include/linux/hash.h b/include/linux/hash.h
+index 1afde47e1528..79c52fa81cac 100644
+--- a/include/linux/hash.h
++++ b/include/linux/hash.h
+@@ -32,12 +32,28 @@
+ #error Wordsize not 32 or 64
+ #endif
+ 
++/*
++ * The above primes are actively bad for hashing, since they are
++ * too sparse. The 32-bit one is mostly ok, the 64-bit one causes
++ * real problems. Besides, the "prime" part is pointless for the
++ * multiplicative hash.
++ *
++ * Although a random odd number will do, it turns out that the golden
++ * ratio phi = (sqrt(5)-1)/2, or its negative, has particularly nice
++ * properties.
++ *
++ * These are the negative, (1 - phi) = (phi^2) = (3 - sqrt(5))/2.
++ * (See Knuth vol 3, section 6.4, exercise 9.)
++ */
++#define GOLDEN_RATIO_32 0x61C88647
++#define GOLDEN_RATIO_64 0x61C8864680B583EBull
++
+ static __always_inline u64 hash_64(u64 val, unsigned int bits)
+ {
+ 	u64 hash = val;
+ 
+-#if defined(CONFIG_ARCH_HAS_FAST_MULTIPLIER) && BITS_PER_LONG == 64
+-	hash = hash * GOLDEN_RATIO_PRIME_64;
++#if BITS_PER_LONG == 64
++	hash = hash * GOLDEN_RATIO_64;
+ #else
+ 	/*  Sigh, gcc can't optimise this alone like it does for 32 bits. */
+ 	u64 n = hash;
+diff --git a/include/linux/swap.h b/include/linux/swap.h
+index d18b65c53dbb..5fa4aa4ddd05 100644
+--- a/include/linux/swap.h
++++ b/include/linux/swap.h
+@@ -533,6 +533,10 @@ static inline swp_entry_t get_swap_page(void)
+ #ifdef CONFIG_MEMCG
+ static inline int mem_cgroup_swappiness(struct mem_cgroup *memcg)
+ {
++	/* Cgroup2 doesn't have per-cgroup swappiness */
++	if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
++		return vm_swappiness;
++
+ 	/* root ? */
+ 	if (mem_cgroup_disabled() || !memcg->css.parent)
+ 		return vm_swappiness;
+diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
+index 0816c872b689..a6cc576fd467 100644
+--- a/include/net/ip_vs.h
++++ b/include/net/ip_vs.h
+@@ -1588,6 +1588,23 @@ static inline void ip_vs_conn_drop_conntrack(struct ip_vs_conn *cp)
+ }
+ #endif /* CONFIG_IP_VS_NFCT */
+ 
++/* Really using conntrack? */
++static inline bool ip_vs_conn_uses_conntrack(struct ip_vs_conn *cp,
++					     struct sk_buff *skb)
++{
++#ifdef CONFIG_IP_VS_NFCT
++	enum ip_conntrack_info ctinfo;
++	struct nf_conn *ct;
++
++	if (!(cp->flags & IP_VS_CONN_F_NFCT))
++		return false;
++	ct = nf_ct_get(skb, &ctinfo);
++	if (ct && !nf_ct_is_untracked(ct))
++		return true;
++#endif
++	return false;
++}
++
+ static inline int
+ ip_vs_dest_conn_overhead(struct ip_vs_dest *dest)
+ {
+diff --git a/include/xen/page.h b/include/xen/page.h
+index 96294ac93755..9dc46cb8a0fd 100644
+--- a/include/xen/page.h
++++ b/include/xen/page.h
+@@ -15,9 +15,9 @@
+  */
+ 
+ #define xen_pfn_to_page(xen_pfn)	\
+-	((pfn_to_page(((unsigned long)(xen_pfn) << XEN_PAGE_SHIFT) >> PAGE_SHIFT)))
++	(pfn_to_page((unsigned long)(xen_pfn) >> (PAGE_SHIFT - XEN_PAGE_SHIFT)))
+ #define page_to_xen_pfn(page)		\
+-	(((page_to_pfn(page)) << PAGE_SHIFT) >> XEN_PAGE_SHIFT)
++	((page_to_pfn(page)) << (PAGE_SHIFT - XEN_PAGE_SHIFT))
+ 
+ #define XEN_PFN_PER_PAGE	(PAGE_SIZE / XEN_PAGE_SIZE)
+ 
+diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
+index 05ddc0820771..6f965864cc02 100644
+--- a/kernel/trace/trace_events.c
++++ b/kernel/trace/trace_events.c
+@@ -2095,8 +2095,13 @@ event_create_dir(struct dentry *parent, struct trace_event_file *file)
+ 	trace_create_file("filter", 0644, file->dir, file,
+ 			  &ftrace_event_filter_fops);
+ 
+-	trace_create_file("trigger", 0644, file->dir, file,
+-			  &event_trigger_fops);
++	/*
++	 * Only event directories that can be enabled should have
++	 * triggers.
++	 */
++	if (!(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
++		trace_create_file("trigger", 0644, file->dir, file,
++				  &event_trigger_fops);
+ 
+ 	trace_create_file("format", 0444, file->dir, call,
+ 			  &ftrace_event_format_fops);
+diff --git a/mm/compaction.c b/mm/compaction.c
+index 585de54dbe8c..29fb26970fba 100644
+--- a/mm/compaction.c
++++ b/mm/compaction.c
+@@ -880,16 +880,8 @@ isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn,
+ 		pfn = isolate_migratepages_block(cc, pfn, block_end_pfn,
+ 							ISOLATE_UNEVICTABLE);
+ 
+-		/*
+-		 * In case of fatal failure, release everything that might
+-		 * have been isolated in the previous iteration, and signal
+-		 * the failure back to caller.
+-		 */
+-		if (!pfn) {
+-			putback_movable_pages(&cc->migratepages);
+-			cc->nr_migratepages = 0;
++		if (!pfn)
+ 			break;
+-		}
+ 
+ 		if (cc->nr_migratepages == COMPACT_CLUSTER_MAX)
+ 			break;
+diff --git a/mm/page-writeback.c b/mm/page-writeback.c
+index 6fe7d15bd1f7..62bbf350ddf7 100644
+--- a/mm/page-writeback.c
++++ b/mm/page-writeback.c
+@@ -1909,7 +1909,8 @@ bool wb_over_bg_thresh(struct bdi_writeback *wb)
+ 	if (gdtc->dirty > gdtc->bg_thresh)
+ 		return true;
+ 
+-	if (wb_stat(wb, WB_RECLAIMABLE) > __wb_calc_thresh(gdtc))
++	if (wb_stat(wb, WB_RECLAIMABLE) >
++	    wb_calc_thresh(gdtc->wb, gdtc->bg_thresh))
+ 		return true;
+ 
+ 	if (mdtc) {
+@@ -1923,7 +1924,8 @@ bool wb_over_bg_thresh(struct bdi_writeback *wb)
+ 		if (mdtc->dirty > mdtc->bg_thresh)
+ 			return true;
+ 
+-		if (wb_stat(wb, WB_RECLAIMABLE) > __wb_calc_thresh(mdtc))
++		if (wb_stat(wb, WB_RECLAIMABLE) >
++		    wb_calc_thresh(mdtc->wb, mdtc->bg_thresh))
+ 			return true;
+ 	}
+ 
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index 9d9044e91ac5..629ce645cffd 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -6214,7 +6214,7 @@ int __meminit init_per_zone_wmark_min(void)
+ 	setup_per_zone_inactive_ratio();
+ 	return 0;
+ }
+-module_init(init_per_zone_wmark_min)
++core_initcall(init_per_zone_wmark_min)
+ 
+ /*
+  * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
+diff --git a/mm/zswap.c b/mm/zswap.c
+index bf14508afd64..340261946fda 100644
+--- a/mm/zswap.c
++++ b/mm/zswap.c
+@@ -170,6 +170,8 @@ static struct zswap_tree *zswap_trees[MAX_SWAPFILES];
+ static LIST_HEAD(zswap_pools);
+ /* protects zswap_pools list modification */
+ static DEFINE_SPINLOCK(zswap_pools_lock);
++/* pool counter to provide unique names to zpool */
++static atomic_t zswap_pools_count = ATOMIC_INIT(0);
+ 
+ /* used by param callback function */
+ static bool zswap_init_started;
+@@ -565,6 +567,7 @@ static struct zswap_pool *zswap_pool_find_get(char *type, char *compressor)
+ static struct zswap_pool *zswap_pool_create(char *type, char *compressor)
+ {
+ 	struct zswap_pool *pool;
++	char name[38]; /* 'zswap' + 32 char (max) num + \0 */
+ 	gfp_t gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM;
+ 
+ 	pool = kzalloc(sizeof(*pool), GFP_KERNEL);
+@@ -573,7 +576,10 @@ static struct zswap_pool *zswap_pool_create(char *type, char *compressor)
+ 		return NULL;
+ 	}
+ 
+-	pool->zpool = zpool_create_pool(type, "zswap", gfp, &zswap_zpool_ops);
++	/* unique name for each pool specifically required by zsmalloc */
++	snprintf(name, 38, "zswap%x", atomic_inc_return(&zswap_pools_count));
++
++	pool->zpool = zpool_create_pool(type, name, gfp, &zswap_zpool_ops);
+ 	if (!pool->zpool) {
+ 		pr_err("%s zpool not available\n", type);
+ 		goto error;
+diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c
+index a49c705fb86b..5f19133c5530 100644
+--- a/net/batman-adv/distributed-arp-table.c
++++ b/net/batman-adv/distributed-arp-table.c
+@@ -553,6 +553,7 @@ static void batadv_choose_next_candidate(struct batadv_priv *bat_priv,
+  * be sent to
+  * @bat_priv: the bat priv with all the soft interface information
+  * @ip_dst: ipv4 to look up in the DHT
++ * @vid: VLAN identifier
+  *
+  * An originator O is selected if and only if its DHT_ID value is one of three
+  * closest values (from the LEFT, with wrap around if needed) then the hash
+@@ -561,7 +562,8 @@ static void batadv_choose_next_candidate(struct batadv_priv *bat_priv,
+  * Returns the candidate array of size BATADV_DAT_CANDIDATE_NUM.
+  */
+ static struct batadv_dat_candidate *
+-batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst)
++batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst,
++			     unsigned short vid)
+ {
+ 	int select;
+ 	batadv_dat_addr_t last_max = BATADV_DAT_ADDR_MAX, ip_key;
+@@ -577,7 +579,7 @@ batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst)
+ 		return NULL;
+ 
+ 	dat.ip = ip_dst;
+-	dat.vid = 0;
++	dat.vid = vid;
+ 	ip_key = (batadv_dat_addr_t)batadv_hash_dat(&dat,
+ 						    BATADV_DAT_ADDR_MAX);
+ 
+@@ -597,6 +599,7 @@ batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst)
+  * @bat_priv: the bat priv with all the soft interface information
+  * @skb: payload to send
+  * @ip: the DHT key
++ * @vid: VLAN identifier
+  * @packet_subtype: unicast4addr packet subtype to use
+  *
+  * This function copies the skb with pskb_copy() and is sent as unicast packet
+@@ -607,7 +610,7 @@ batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst)
+  */
+ static bool batadv_dat_send_data(struct batadv_priv *bat_priv,
+ 				 struct sk_buff *skb, __be32 ip,
+-				 int packet_subtype)
++				 unsigned short vid, int packet_subtype)
+ {
+ 	int i;
+ 	bool ret = false;
+@@ -616,7 +619,7 @@ static bool batadv_dat_send_data(struct batadv_priv *bat_priv,
+ 	struct sk_buff *tmp_skb;
+ 	struct batadv_dat_candidate *cand;
+ 
+-	cand = batadv_dat_select_candidates(bat_priv, ip);
++	cand = batadv_dat_select_candidates(bat_priv, ip, vid);
+ 	if (!cand)
+ 		goto out;
+ 
+@@ -1004,7 +1007,7 @@ bool batadv_dat_snoop_outgoing_arp_request(struct batadv_priv *bat_priv,
+ 		ret = true;
+ 	} else {
+ 		/* Send the request to the DHT */
+-		ret = batadv_dat_send_data(bat_priv, skb, ip_dst,
++		ret = batadv_dat_send_data(bat_priv, skb, ip_dst, vid,
+ 					   BATADV_P_DAT_DHT_GET);
+ 	}
+ out:
+@@ -1132,8 +1135,8 @@ void batadv_dat_snoop_outgoing_arp_reply(struct batadv_priv *bat_priv,
+ 	/* Send the ARP reply to the candidates for both the IP addresses that
+ 	 * the node obtained from the ARP reply
+ 	 */
+-	batadv_dat_send_data(bat_priv, skb, ip_src, BATADV_P_DAT_DHT_PUT);
+-	batadv_dat_send_data(bat_priv, skb, ip_dst, BATADV_P_DAT_DHT_PUT);
++	batadv_dat_send_data(bat_priv, skb, ip_src, vid, BATADV_P_DAT_DHT_PUT);
++	batadv_dat_send_data(bat_priv, skb, ip_dst, vid, BATADV_P_DAT_DHT_PUT);
+ }
+ 
+ /**
+diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
+index e4f2646d9246..43d15d6c4b6a 100644
+--- a/net/batman-adv/routing.c
++++ b/net/batman-adv/routing.c
+@@ -104,6 +104,15 @@ static void _batadv_update_route(struct batadv_priv *bat_priv,
+ 		neigh_node = NULL;
+ 
+ 	spin_lock_bh(&orig_node->neigh_list_lock);
++	/* curr_router used earlier may not be the current orig_ifinfo->router
++	 * anymore because it was dereferenced outside of the neigh_list_lock
++	 * protected region. After the new best neighbor has replace the current
++	 * best neighbor the reference counter needs to decrease. Consequently,
++	 * the code needs to ensure the curr_router variable contains a pointer
++	 * to the replaced best neighbor.
++	 */
++	curr_router = rcu_dereference_protected(orig_ifinfo->router, true);
++
+ 	rcu_assign_pointer(orig_ifinfo->router, neigh_node);
+ 	spin_unlock_bh(&orig_node->neigh_list_lock);
+ 	batadv_orig_ifinfo_free_ref(orig_ifinfo);
+diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c
+index 782fa33ec296..45bfdefa15a5 100644
+--- a/net/batman-adv/send.c
++++ b/net/batman-adv/send.c
+@@ -629,6 +629,9 @@ batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
+ 
+ 		if (pending) {
+ 			hlist_del(&forw_packet->list);
++			if (!forw_packet->own)
++				atomic_inc(&bat_priv->bcast_queue_left);
++
+ 			batadv_forw_packet_free(forw_packet);
+ 		}
+ 	}
+@@ -656,6 +659,9 @@ batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
+ 
+ 		if (pending) {
+ 			hlist_del(&forw_packet->list);
++			if (!forw_packet->own)
++				atomic_inc(&bat_priv->batman_queue_left);
++
+ 			batadv_forw_packet_free(forw_packet);
+ 		}
+ 	}
+diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
+index ac4d08de5df4..720f1a5b81ac 100644
+--- a/net/batman-adv/soft-interface.c
++++ b/net/batman-adv/soft-interface.c
+@@ -407,11 +407,17 @@ void batadv_interface_rx(struct net_device *soft_iface,
+ 	 */
+ 	nf_reset(skb);
+ 
++	if (unlikely(!pskb_may_pull(skb, ETH_HLEN)))
++		goto dropped;
++
+ 	vid = batadv_get_vid(skb, 0);
+ 	ethhdr = eth_hdr(skb);
+ 
+ 	switch (ntohs(ethhdr->h_proto)) {
+ 	case ETH_P_8021Q:
++		if (!pskb_may_pull(skb, VLAN_ETH_HLEN))
++			goto dropped;
++
+ 		vhdr = (struct vlan_ethhdr *)skb->data;
+ 
+ 		if (vhdr->h_vlan_encapsulated_proto != ethertype)
+@@ -423,8 +429,6 @@ void batadv_interface_rx(struct net_device *soft_iface,
+ 	}
+ 
+ 	/* skb->dev & skb->pkt_type are set here */
+-	if (unlikely(!pskb_may_pull(skb, ETH_HLEN)))
+-		goto dropped;
+ 	skb->protocol = eth_type_trans(skb, soft_iface);
+ 
+ 	/* should not be necessary anymore as we use skb_pull_rcsum()
+diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
+index 7a2b7915093b..bcb0a1b64556 100644
+--- a/net/mac80211/iface.c
++++ b/net/mac80211/iface.c
+@@ -1750,7 +1750,7 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
+ 
+ 		ret = dev_alloc_name(ndev, ndev->name);
+ 		if (ret < 0) {
+-			free_netdev(ndev);
++			ieee80211_if_free(ndev);
+ 			return ret;
+ 		}
+ 
+@@ -1836,7 +1836,7 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
+ 
+ 		ret = register_netdevice(ndev);
+ 		if (ret) {
+-			free_netdev(ndev);
++			ieee80211_if_free(ndev);
+ 			return ret;
+ 		}
+ 	}
+diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
+index f57b4dcdb233..4da560005b0e 100644
+--- a/net/netfilter/ipvs/ip_vs_core.c
++++ b/net/netfilter/ipvs/ip_vs_core.c
+@@ -1757,15 +1757,34 @@ ip_vs_in(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, int
+ 	cp = pp->conn_in_get(ipvs, af, skb, &iph);
+ 
+ 	conn_reuse_mode = sysctl_conn_reuse_mode(ipvs);
+-	if (conn_reuse_mode && !iph.fragoffs &&
+-	    is_new_conn(skb, &iph) && cp &&
+-	    ((unlikely(sysctl_expire_nodest_conn(ipvs)) && cp->dest &&
+-	      unlikely(!atomic_read(&cp->dest->weight))) ||
+-	     unlikely(is_new_conn_expected(cp, conn_reuse_mode)))) {
+-		if (!atomic_read(&cp->n_control))
+-			ip_vs_conn_expire_now(cp);
+-		__ip_vs_conn_put(cp);
+-		cp = NULL;
++	if (conn_reuse_mode && !iph.fragoffs && is_new_conn(skb, &iph) && cp) {
++		bool uses_ct = false, resched = false;
++
++		if (unlikely(sysctl_expire_nodest_conn(ipvs)) && cp->dest &&
++		    unlikely(!atomic_read(&cp->dest->weight))) {
++			resched = true;
++			uses_ct = ip_vs_conn_uses_conntrack(cp, skb);
++		} else if (is_new_conn_expected(cp, conn_reuse_mode)) {
++			uses_ct = ip_vs_conn_uses_conntrack(cp, skb);
++			if (!atomic_read(&cp->n_control)) {
++				resched = true;
++			} else {
++				/* Do not reschedule controlling connection
++				 * that uses conntrack while it is still
++				 * referenced by controlled connection(s).
++				 */
++				resched = !uses_ct;
++			}
++		}
++
++		if (resched) {
++			if (!atomic_read(&cp->n_control))
++				ip_vs_conn_expire_now(cp);
++			__ip_vs_conn_put(cp);
++			if (uses_ct)
++				return NF_DROP;
++			cp = NULL;
++		}
+ 	}
+ 
+ 	if (unlikely(!cp)) {
+diff --git a/net/netfilter/ipvs/ip_vs_pe_sip.c b/net/netfilter/ipvs/ip_vs_pe_sip.c
+index 1b8d594e493a..0a6eb5c0d9e9 100644
+--- a/net/netfilter/ipvs/ip_vs_pe_sip.c
++++ b/net/netfilter/ipvs/ip_vs_pe_sip.c
+@@ -70,10 +70,10 @@ ip_vs_sip_fill_param(struct ip_vs_conn_param *p, struct sk_buff *skb)
+ 	const char *dptr;
+ 	int retc;
+ 
+-	ip_vs_fill_iph_skb(p->af, skb, false, &iph);
++	retc = ip_vs_fill_iph_skb(p->af, skb, false, &iph);
+ 
+ 	/* Only useful with UDP */
+-	if (iph.protocol != IPPROTO_UDP)
++	if (!retc || iph.protocol != IPPROTO_UDP)
+ 		return -EINVAL;
+ 	/* todo: IPv6 fragments:
+ 	 *       I think this only should be done for the first fragment. /HS
+@@ -88,7 +88,7 @@ ip_vs_sip_fill_param(struct ip_vs_conn_param *p, struct sk_buff *skb)
+ 	dptr = skb->data + dataoff;
+ 	datalen = skb->len - dataoff;
+ 
+-	if (get_callid(dptr, dataoff, datalen, &matchoff, &matchlen))
++	if (get_callid(dptr, 0, datalen, &matchoff, &matchlen))
+ 		return -EINVAL;
+ 
+ 	/* N.B: pe_data is only set on success,
+diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
+index 161dd0d67da8..a9155077feef 100644
+--- a/scripts/mod/file2alias.c
++++ b/scripts/mod/file2alias.c
+@@ -371,6 +371,49 @@ static void do_usb_table(void *symval, unsigned long size,
+ 		do_usb_entry_multi(symval + i, mod);
+ }
+ 
++static void do_of_entry_multi(void *symval, struct module *mod)
++{
++	char alias[500];
++	int len;
++	char *tmp;
++
++	DEF_FIELD_ADDR(symval, of_device_id, name);
++	DEF_FIELD_ADDR(symval, of_device_id, type);
++	DEF_FIELD_ADDR(symval, of_device_id, compatible);
++
++	len = sprintf(alias, "of:N%sT%s", (*name)[0] ? *name : "*",
++		      (*type)[0] ? *type : "*");
++
++	if (compatible[0])
++		sprintf(&alias[len], "%sC%s", (*type)[0] ? "*" : "",
++			*compatible);
++
++	/* Replace all whitespace with underscores */
++	for (tmp = alias; tmp && *tmp; tmp++)
++		if (isspace(*tmp))
++			*tmp = '_';
++
++	buf_printf(&mod->dev_table_buf, "MODULE_ALIAS(\"%s\");\n", alias);
++	strcat(alias, "C");
++	add_wildcard(alias);
++	buf_printf(&mod->dev_table_buf, "MODULE_ALIAS(\"%s\");\n", alias);
++}
++
++static void do_of_table(void *symval, unsigned long size,
++			struct module *mod)
++{
++	unsigned int i;
++	const unsigned long id_size = SIZE_of_device_id;
++
++	device_id_check(mod->name, "of", size, id_size, symval);
++
++	/* Leave last one: it's the terminator. */
++	size -= id_size;
++
++	for (i = 0; i < size; i += id_size)
++		do_of_entry_multi(symval + i, mod);
++}
++
+ /* Looks like: hid:bNvNpN */
+ static int do_hid_entry(const char *filename,
+ 			     void *symval, char *alias)
+@@ -684,30 +727,6 @@ static int do_pcmcia_entry(const char *filename,
+ }
+ ADD_TO_DEVTABLE("pcmcia", pcmcia_device_id, do_pcmcia_entry);
+ 
+-static int do_of_entry (const char *filename, void *symval, char *alias)
+-{
+-	int len;
+-	char *tmp;
+-	DEF_FIELD_ADDR(symval, of_device_id, name);
+-	DEF_FIELD_ADDR(symval, of_device_id, type);
+-	DEF_FIELD_ADDR(symval, of_device_id, compatible);
+-
+-	len = sprintf(alias, "of:N%sT%s", (*name)[0] ? *name : "*",
+-		      (*type)[0] ? *type : "*");
+-
+-	if (compatible[0])
+-		sprintf(&alias[len], "%sC%s", (*type)[0] ? "*" : "",
+-			*compatible);
+-
+-	/* Replace all whitespace with underscores */
+-	for (tmp = alias; tmp && *tmp; tmp++)
+-		if (isspace (*tmp))
+-			*tmp = '_';
+-
+-	return 1;
+-}
+-ADD_TO_DEVTABLE("of", of_device_id, do_of_entry);
+-
+ static int do_vio_entry(const char *filename, void *symval,
+ 		char *alias)
+ {
+@@ -1348,6 +1367,8 @@ void handle_moddevtable(struct module *mod, struct elf_info *info,
+ 	/* First handle the "special" cases */
+ 	if (sym_is(name, namelen, "usb"))
+ 		do_usb_table(symval, sym->st_size, mod);
++	if (sym_is(name, namelen, "of"))
++		do_of_table(symval, sym->st_size, mod);
+ 	else if (sym_is(name, namelen, "pnp"))
+ 		do_pnp_device_entry(symval, sym->st_size, mod);
+ 	else if (sym_is(name, namelen, "pnp_card"))


^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [gentoo-commits] proj/linux-patches:4.5 commit in: /
@ 2016-05-19 13:02 Mike Pagano
  0 siblings, 0 replies; 12+ messages in thread
From: Mike Pagano @ 2016-05-19 13:02 UTC (permalink / raw
  To: gentoo-commits

commit:     d972e3821da819db6c85352cbab00b43e6c85506
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu May 19 13:02:07 2016 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu May 19 13:02:07 2016 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=d972e382

Linux patch 4.5.5

 0000_README            |    4 +
 1004_linux-4.5.5.patch | 4824 ++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 4828 insertions(+)

diff --git a/0000_README b/0000_README
index a736c59..f936e50 100644
--- a/0000_README
+++ b/0000_README
@@ -59,6 +59,10 @@ Patch:  1003_linux-4.5.4.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.5.4
 
+Patch:  1004_linux-4.5.5.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.5.5
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1004_linux-4.5.5.patch b/1004_linux-4.5.5.patch
new file mode 100644
index 0000000..6a73830
--- /dev/null
+++ b/1004_linux-4.5.5.patch
@@ -0,0 +1,4824 @@
+diff --git a/Makefile b/Makefile
+index d64eade37241..a23df411d393 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 5
+-SUBLEVEL = 4
++SUBLEVEL = 5
+ EXTRAVERSION =
+ NAME = Blurry Fish Butt
+ 
+diff --git a/arch/arm/boot/dts/at91sam9x5.dtsi b/arch/arm/boot/dts/at91sam9x5.dtsi
+index 0827d594b1f0..cd0cd5fd09a3 100644
+--- a/arch/arm/boot/dts/at91sam9x5.dtsi
++++ b/arch/arm/boot/dts/at91sam9x5.dtsi
+@@ -106,7 +106,7 @@
+ 
+ 			pmc: pmc@fffffc00 {
+ 				compatible = "atmel,at91sam9x5-pmc", "syscon";
+-				reg = <0xfffffc00 0x100>;
++				reg = <0xfffffc00 0x200>;
+ 				interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
+ 				interrupt-controller;
+ 				#address-cells = <1>;
+diff --git a/crypto/ahash.c b/crypto/ahash.c
+index d19b52324cf5..dac1c24e9c3e 100644
+--- a/crypto/ahash.c
++++ b/crypto/ahash.c
+@@ -69,8 +69,9 @@ static int hash_walk_new_entry(struct crypto_hash_walk *walk)
+ 	struct scatterlist *sg;
+ 
+ 	sg = walk->sg;
+-	walk->pg = sg_page(sg);
+ 	walk->offset = sg->offset;
++	walk->pg = sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT);
++	walk->offset = offset_in_page(walk->offset);
+ 	walk->entrylen = sg->length;
+ 
+ 	if (walk->entrylen > walk->total)
+diff --git a/crypto/testmgr.c b/crypto/testmgr.c
+index ae8c57fd8bc7..d4944318ca1f 100644
+--- a/crypto/testmgr.c
++++ b/crypto/testmgr.c
+@@ -1849,6 +1849,7 @@ static int alg_test_drbg(const struct alg_test_desc *desc, const char *driver,
+ static int do_test_rsa(struct crypto_akcipher *tfm,
+ 		       struct akcipher_testvec *vecs)
+ {
++	char *xbuf[XBUFSIZE];
+ 	struct akcipher_request *req;
+ 	void *outbuf_enc = NULL;
+ 	void *outbuf_dec = NULL;
+@@ -1857,9 +1858,12 @@ static int do_test_rsa(struct crypto_akcipher *tfm,
+ 	int err = -ENOMEM;
+ 	struct scatterlist src, dst, src_tab[2];
+ 
++	if (testmgr_alloc_buf(xbuf))
++		return err;
++
+ 	req = akcipher_request_alloc(tfm, GFP_KERNEL);
+ 	if (!req)
+-		return err;
++		goto free_xbuf;
+ 
+ 	init_completion(&result.completion);
+ 
+@@ -1877,9 +1881,14 @@ static int do_test_rsa(struct crypto_akcipher *tfm,
+ 	if (!outbuf_enc)
+ 		goto free_req;
+ 
++	if (WARN_ON(vecs->m_size > PAGE_SIZE))
++		goto free_all;
++
++	memcpy(xbuf[0], vecs->m, vecs->m_size);
++
+ 	sg_init_table(src_tab, 2);
+-	sg_set_buf(&src_tab[0], vecs->m, 8);
+-	sg_set_buf(&src_tab[1], vecs->m + 8, vecs->m_size - 8);
++	sg_set_buf(&src_tab[0], xbuf[0], 8);
++	sg_set_buf(&src_tab[1], xbuf[0] + 8, vecs->m_size - 8);
+ 	sg_init_one(&dst, outbuf_enc, out_len_max);
+ 	akcipher_request_set_crypt(req, src_tab, &dst, vecs->m_size,
+ 				   out_len_max);
+@@ -1898,7 +1907,7 @@ static int do_test_rsa(struct crypto_akcipher *tfm,
+ 		goto free_all;
+ 	}
+ 	/* verify that encrypted message is equal to expected */
+-	if (memcmp(vecs->c, sg_virt(req->dst), vecs->c_size)) {
++	if (memcmp(vecs->c, outbuf_enc, vecs->c_size)) {
+ 		pr_err("alg: rsa: encrypt test failed. Invalid output\n");
+ 		err = -EINVAL;
+ 		goto free_all;
+@@ -1913,7 +1922,13 @@ static int do_test_rsa(struct crypto_akcipher *tfm,
+ 		err = -ENOMEM;
+ 		goto free_all;
+ 	}
+-	sg_init_one(&src, vecs->c, vecs->c_size);
++
++	if (WARN_ON(vecs->c_size > PAGE_SIZE))
++		goto free_all;
++
++	memcpy(xbuf[0], vecs->c, vecs->c_size);
++
++	sg_init_one(&src, xbuf[0], vecs->c_size);
+ 	sg_init_one(&dst, outbuf_dec, out_len_max);
+ 	init_completion(&result.completion);
+ 	akcipher_request_set_crypt(req, &src, &dst, vecs->c_size, out_len_max);
+@@ -1940,6 +1955,8 @@ free_all:
+ 	kfree(outbuf_enc);
+ free_req:
+ 	akcipher_request_free(req);
++free_xbuf:
++	testmgr_free_buf(xbuf);
+ 	return err;
+ }
+ 
+diff --git a/drivers/base/regmap/regmap-spmi.c b/drivers/base/regmap/regmap-spmi.c
+index 7e58f6560399..4a36e415e938 100644
+--- a/drivers/base/regmap/regmap-spmi.c
++++ b/drivers/base/regmap/regmap-spmi.c
+@@ -142,7 +142,7 @@ static int regmap_spmi_ext_read(void *context,
+ 	while (val_size) {
+ 		len = min_t(size_t, val_size, 8);
+ 
+-		err = spmi_ext_register_readl(context, addr, val, val_size);
++		err = spmi_ext_register_readl(context, addr, val, len);
+ 		if (err)
+ 			goto err_out;
+ 
+diff --git a/drivers/crypto/qat/qat_common/adf_common_drv.h b/drivers/crypto/qat/qat_common/adf_common_drv.h
+index 0e82ce3c383e..976b01e58afb 100644
+--- a/drivers/crypto/qat/qat_common/adf_common_drv.h
++++ b/drivers/crypto/qat/qat_common/adf_common_drv.h
+@@ -236,6 +236,8 @@ void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
+ 				 uint32_t vf_mask);
+ void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev);
+ void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev);
++int adf_init_pf_wq(void);
++void adf_exit_pf_wq(void);
+ #else
+ static inline int adf_sriov_configure(struct pci_dev *pdev, int numvfs)
+ {
+@@ -253,5 +255,14 @@ static inline void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
+ static inline void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
+ {
+ }
++
++static inline int adf_init_pf_wq(void)
++{
++	return 0;
++}
++
++static inline void adf_exit_pf_wq(void)
++{
++}
+ #endif
+ #endif
+diff --git a/drivers/crypto/qat/qat_common/adf_ctl_drv.c b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
+index 5c897e6e7994..3c3f948290ca 100644
+--- a/drivers/crypto/qat/qat_common/adf_ctl_drv.c
++++ b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
+@@ -462,12 +462,17 @@ static int __init adf_register_ctl_device_driver(void)
+ 	if (adf_init_aer())
+ 		goto err_aer;
+ 
++	if (adf_init_pf_wq())
++		goto err_pf_wq;
++
+ 	if (qat_crypto_register())
+ 		goto err_crypto_register;
+ 
+ 	return 0;
+ 
+ err_crypto_register:
++	adf_exit_pf_wq();
++err_pf_wq:
+ 	adf_exit_aer();
+ err_aer:
+ 	adf_chr_drv_destroy();
+@@ -480,6 +485,7 @@ static void __exit adf_unregister_ctl_device_driver(void)
+ {
+ 	adf_chr_drv_destroy();
+ 	adf_exit_aer();
++	adf_exit_pf_wq();
+ 	qat_crypto_unregister();
+ 	adf_clean_vf_map(false);
+ 	mutex_destroy(&adf_ctl_lock);
+diff --git a/drivers/crypto/qat/qat_common/adf_sriov.c b/drivers/crypto/qat/qat_common/adf_sriov.c
+index 1117a8b58280..38a0415e767d 100644
+--- a/drivers/crypto/qat/qat_common/adf_sriov.c
++++ b/drivers/crypto/qat/qat_common/adf_sriov.c
+@@ -119,11 +119,6 @@ static int adf_enable_sriov(struct adf_accel_dev *accel_dev)
+ 	int i;
+ 	u32 reg;
+ 
+-	/* Workqueue for PF2VF responses */
+-	pf2vf_resp_wq = create_workqueue("qat_pf2vf_resp_wq");
+-	if (!pf2vf_resp_wq)
+-		return -ENOMEM;
+-
+ 	for (i = 0, vf_info = accel_dev->pf.vf_info; i < totalvfs;
+ 	     i++, vf_info++) {
+ 		/* This ptr will be populated when VFs will be created */
+@@ -216,11 +211,6 @@ void adf_disable_sriov(struct adf_accel_dev *accel_dev)
+ 
+ 	kfree(accel_dev->pf.vf_info);
+ 	accel_dev->pf.vf_info = NULL;
+-
+-	if (pf2vf_resp_wq) {
+-		destroy_workqueue(pf2vf_resp_wq);
+-		pf2vf_resp_wq = NULL;
+-	}
+ }
+ EXPORT_SYMBOL_GPL(adf_disable_sriov);
+ 
+@@ -304,3 +294,19 @@ int adf_sriov_configure(struct pci_dev *pdev, int numvfs)
+ 	return numvfs;
+ }
+ EXPORT_SYMBOL_GPL(adf_sriov_configure);
++
++int __init adf_init_pf_wq(void)
++{
++	/* Workqueue for PF2VF responses */
++	pf2vf_resp_wq = create_workqueue("qat_pf2vf_resp_wq");
++
++	return !pf2vf_resp_wq ? -ENOMEM : 0;
++}
++
++void adf_exit_pf_wq(void)
++{
++	if (pf2vf_resp_wq) {
++		destroy_workqueue(pf2vf_resp_wq);
++		pf2vf_resp_wq = NULL;
++	}
++}
+diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
+index bf731e9f643e..7f85c2c1d681 100644
+--- a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
++++ b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
+@@ -276,8 +276,8 @@ static int amdgpu_atombios_dp_get_dp_link_config(struct drm_connector *connector
+ 			}
+ 		}
+ 	} else {
+-		for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) {
+-			for (i = 0; i < ARRAY_SIZE(link_rates) && link_rates[i] <= max_link_rate; i++) {
++		for (i = 0; i < ARRAY_SIZE(link_rates) && link_rates[i] <= max_link_rate; i++) {
++			for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) {
+ 				max_pix_clock = (lane_num * link_rates[i] * 8) / bpp;
+ 				if (max_pix_clock >= pix_clock) {
+ 					*dp_lanes = lane_num;
+diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
+index cf39ed3133d6..4d0bc2a8843c 100644
+--- a/drivers/gpu/drm/i915/i915_debugfs.c
++++ b/drivers/gpu/drm/i915/i915_debugfs.c
+@@ -2860,20 +2860,6 @@ static void intel_dp_info(struct seq_file *m,
+ 		intel_panel_info(m, &intel_connector->panel);
+ }
+ 
+-static void intel_dp_mst_info(struct seq_file *m,
+-			  struct intel_connector *intel_connector)
+-{
+-	struct intel_encoder *intel_encoder = intel_connector->encoder;
+-	struct intel_dp_mst_encoder *intel_mst =
+-		enc_to_mst(&intel_encoder->base);
+-	struct intel_digital_port *intel_dig_port = intel_mst->primary;
+-	struct intel_dp *intel_dp = &intel_dig_port->dp;
+-	bool has_audio = drm_dp_mst_port_has_audio(&intel_dp->mst_mgr,
+-					intel_connector->port);
+-
+-	seq_printf(m, "\taudio support: %s\n", yesno(has_audio));
+-}
+-
+ static void intel_hdmi_info(struct seq_file *m,
+ 			    struct intel_connector *intel_connector)
+ {
+@@ -2917,8 +2903,6 @@ static void intel_connector_info(struct seq_file *m,
+ 			intel_hdmi_info(m, intel_connector);
+ 		else if (intel_encoder->type == INTEL_OUTPUT_LVDS)
+ 			intel_lvds_info(m, intel_connector);
+-		else if (intel_encoder->type == INTEL_OUTPUT_DP_MST)
+-			intel_dp_mst_info(m, intel_connector);
+ 	}
+ 
+ 	seq_printf(m, "\tmodes:\n");
+diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
+index 9b6737c85acb..5672b57e65d5 100644
+--- a/drivers/gpu/drm/i915/i915_reg.h
++++ b/drivers/gpu/drm/i915/i915_reg.h
+@@ -7412,6 +7412,8 @@ enum skl_disp_power_wells {
+ #define  TRANS_CLK_SEL_DISABLED		(0x0<<29)
+ #define  TRANS_CLK_SEL_PORT(x)		(((x)+1)<<29)
+ 
++#define CDCLK_FREQ			_MMIO(0x46200)
++
+ #define _TRANSA_MSA_MISC		0x60410
+ #define _TRANSB_MSA_MISC		0x61410
+ #define _TRANSC_MSA_MISC		0x62410
+diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
+index 30f921421b0c..7d281b40064a 100644
+--- a/drivers/gpu/drm/i915/intel_audio.c
++++ b/drivers/gpu/drm/i915/intel_audio.c
+@@ -262,8 +262,7 @@ static void hsw_audio_codec_disable(struct intel_encoder *encoder)
+ 	tmp |= AUD_CONFIG_N_PROG_ENABLE;
+ 	tmp &= ~AUD_CONFIG_UPPER_N_MASK;
+ 	tmp &= ~AUD_CONFIG_LOWER_N_MASK;
+-	if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT) ||
+-	    intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DP_MST))
++	if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT))
+ 		tmp |= AUD_CONFIG_N_VALUE_INDEX;
+ 	I915_WRITE(HSW_AUD_CFG(pipe), tmp);
+ 
+@@ -476,8 +475,7 @@ static void ilk_audio_codec_enable(struct drm_connector *connector,
+ 	tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
+ 	tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
+ 	tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK;
+-	if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT) ||
+-	    intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DP_MST))
++	if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT))
+ 		tmp |= AUD_CONFIG_N_VALUE_INDEX;
+ 	else
+ 		tmp |= audio_config_hdmi_pixel_clock(adjusted_mode);
+@@ -515,8 +513,7 @@ void intel_audio_codec_enable(struct intel_encoder *intel_encoder)
+ 
+ 	/* ELD Conn_Type */
+ 	connector->eld[5] &= ~(3 << 2);
+-	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
+-	    intel_pipe_has_type(crtc, INTEL_OUTPUT_DP_MST))
++	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT))
+ 		connector->eld[5] |= (1 << 2);
+ 
+ 	connector->eld[6] = drm_av_sync_delay(connector, adjusted_mode) / 2;
+diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
+index a7b4a524fadd..dbacc2901d47 100644
+--- a/drivers/gpu/drm/i915/intel_crt.c
++++ b/drivers/gpu/drm/i915/intel_crt.c
+@@ -255,8 +255,14 @@ static bool intel_crt_compute_config(struct intel_encoder *encoder,
+ 		pipe_config->has_pch_encoder = true;
+ 
+ 	/* LPT FDI RX only supports 8bpc. */
+-	if (HAS_PCH_LPT(dev))
++	if (HAS_PCH_LPT(dev)) {
++		if (pipe_config->bw_constrained && pipe_config->pipe_bpp < 24) {
++			DRM_DEBUG_KMS("LPT only supports 24bpp\n");
++			return false;
++		}
++
+ 		pipe_config->pipe_bpp = 24;
++	}
+ 
+ 	/* FDI must always be 2.7 GHz */
+ 	if (HAS_DDI(dev)) {
+diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
+index 33b8e0a2b3fd..40511493914c 100644
+--- a/drivers/gpu/drm/i915/intel_ddi.c
++++ b/drivers/gpu/drm/i915/intel_ddi.c
+@@ -3165,23 +3165,6 @@ void intel_ddi_fdi_disable(struct drm_crtc *crtc)
+ 	I915_WRITE(FDI_RX_CTL(PIPE_A), val);
+ }
+ 
+-bool intel_ddi_is_audio_enabled(struct drm_i915_private *dev_priv,
+-				 struct intel_crtc *intel_crtc)
+-{
+-	u32 temp;
+-
+-	if (intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_AUDIO)) {
+-		temp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
+-
+-		intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
+-
+-		if (temp & AUDIO_OUTPUT_ENABLE(intel_crtc->pipe))
+-			return true;
+-	}
+-
+-	return false;
+-}
+-
+ void intel_ddi_get_config(struct intel_encoder *encoder,
+ 			  struct intel_crtc_state *pipe_config)
+ {
+@@ -3242,8 +3225,11 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
+ 		break;
+ 	}
+ 
+-	pipe_config->has_audio =
+-		intel_ddi_is_audio_enabled(dev_priv, intel_crtc);
++	if (intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_AUDIO)) {
++		temp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
++		if (temp & AUDIO_OUTPUT_ENABLE(intel_crtc->pipe))
++			pipe_config->has_audio = true;
++	}
+ 
+ 	if (encoder->type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp_bpp &&
+ 	    pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index c023a04c44d0..39b00b9daf2d 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -9793,6 +9793,8 @@ static void broadwell_set_cdclk(struct drm_device *dev, int cdclk)
+ 	sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, data);
+ 	mutex_unlock(&dev_priv->rps.hw_lock);
+ 
++	I915_WRITE(CDCLK_FREQ, DIV_ROUND_CLOSEST(cdclk, 1000) - 1);
++
+ 	intel_update_cdclk(dev);
+ 
+ 	WARN(cdclk != dev_priv->cdclk_freq,
+diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
+index db6361b5a6ab..e8e6984b8053 100644
+--- a/drivers/gpu/drm/i915/intel_dp_mst.c
++++ b/drivers/gpu/drm/i915/intel_dp_mst.c
+@@ -78,8 +78,6 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
+ 		return false;
+ 	}
+ 
+-	if (drm_dp_mst_port_has_audio(&intel_dp->mst_mgr, found->port))
+-		pipe_config->has_audio = true;
+ 	mst_pbn = drm_dp_calc_pbn_mode(adjusted_mode->crtc_clock, bpp);
+ 
+ 	pipe_config->pbn = mst_pbn;
+@@ -104,11 +102,6 @@ static void intel_mst_disable_dp(struct intel_encoder *encoder)
+ 	struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
+ 	struct intel_digital_port *intel_dig_port = intel_mst->primary;
+ 	struct intel_dp *intel_dp = &intel_dig_port->dp;
+-	struct drm_device *dev = encoder->base.dev;
+-	struct drm_i915_private *dev_priv = dev->dev_private;
+-	struct drm_crtc *crtc = encoder->base.crtc;
+-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+-
+ 	int ret;
+ 
+ 	DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links);
+@@ -119,10 +112,6 @@ static void intel_mst_disable_dp(struct intel_encoder *encoder)
+ 	if (ret) {
+ 		DRM_ERROR("failed to update payload %d\n", ret);
+ 	}
+-	if (intel_crtc->config->has_audio) {
+-		intel_audio_codec_disable(encoder);
+-		intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
+-	}
+ }
+ 
+ static void intel_mst_post_disable_dp(struct intel_encoder *encoder)
+@@ -219,7 +208,6 @@ static void intel_mst_enable_dp(struct intel_encoder *encoder)
+ 	struct intel_dp *intel_dp = &intel_dig_port->dp;
+ 	struct drm_device *dev = intel_dig_port->base.base.dev;
+ 	struct drm_i915_private *dev_priv = dev->dev_private;
+-	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+ 	enum port port = intel_dig_port->port;
+ 	int ret;
+ 
+@@ -232,13 +220,6 @@ static void intel_mst_enable_dp(struct intel_encoder *encoder)
+ 	ret = drm_dp_check_act_status(&intel_dp->mst_mgr);
+ 
+ 	ret = drm_dp_update_payload_part2(&intel_dp->mst_mgr);
+-
+-	if (crtc->config->has_audio) {
+-		DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
+-				 pipe_name(crtc->pipe));
+-		intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
+-		intel_audio_codec_enable(encoder);
+-	}
+ }
+ 
+ static bool intel_dp_mst_enc_get_hw_state(struct intel_encoder *encoder,
+@@ -264,9 +245,6 @@ static void intel_dp_mst_enc_get_config(struct intel_encoder *encoder,
+ 
+ 	pipe_config->has_dp_encoder = true;
+ 
+-	pipe_config->has_audio =
+-		intel_ddi_is_audio_enabled(dev_priv, crtc);
+-
+ 	temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
+ 	if (temp & TRANS_DDI_PHSYNC)
+ 		flags |= DRM_MODE_FLAG_PHSYNC;
+diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
+index 3ce3bee368fe..1ae61f488987 100644
+--- a/drivers/gpu/drm/i915/intel_drv.h
++++ b/drivers/gpu/drm/i915/intel_drv.h
+@@ -1013,8 +1013,6 @@ void intel_ddi_set_pipe_settings(struct drm_crtc *crtc);
+ void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp);
+ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
+ void intel_ddi_fdi_disable(struct drm_crtc *crtc);
+-bool intel_ddi_is_audio_enabled(struct drm_i915_private *dev_priv,
+-				 struct intel_crtc *intel_crtc);
+ void intel_ddi_get_config(struct intel_encoder *encoder,
+ 			  struct intel_crtc_state *pipe_config);
+ struct intel_encoder *
+diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
+index 7e4a9842b9ea..0eae3994e5e3 100644
+--- a/drivers/gpu/drm/i915/intel_pm.c
++++ b/drivers/gpu/drm/i915/intel_pm.c
+@@ -6565,6 +6565,12 @@ static void broadwell_init_clock_gating(struct drm_device *dev)
+ 	misccpctl = I915_READ(GEN7_MISCCPCTL);
+ 	I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
+ 	I915_WRITE(GEN8_L3SQCREG1, BDW_WA_L3SQCREG1_DEFAULT);
++	/*
++	 * Wait at least 100 clocks before re-enabling clock gating. See
++	 * the definition of L3SQCREG1 in BSpec.
++	 */
++	POSTING_READ(GEN8_L3SQCREG1);
++	udelay(1);
+ 	I915_WRITE(GEN7_MISCCPCTL, misccpctl);
+ 
+ 	/*
+diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
+index 801dd60ac192..7f52142d37d5 100644
+--- a/drivers/gpu/drm/radeon/atombios_crtc.c
++++ b/drivers/gpu/drm/radeon/atombios_crtc.c
+@@ -1740,6 +1740,7 @@ static u32 radeon_get_pll_use_mask(struct drm_crtc *crtc)
+ static int radeon_get_shared_dp_ppll(struct drm_crtc *crtc)
+ {
+ 	struct drm_device *dev = crtc->dev;
++	struct radeon_device *rdev = dev->dev_private;
+ 	struct drm_crtc *test_crtc;
+ 	struct radeon_crtc *test_radeon_crtc;
+ 
+@@ -1749,6 +1750,10 @@ static int radeon_get_shared_dp_ppll(struct drm_crtc *crtc)
+ 		test_radeon_crtc = to_radeon_crtc(test_crtc);
+ 		if (test_radeon_crtc->encoder &&
+ 		    ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_radeon_crtc->encoder))) {
++			/* PPLL2 is exclusive to UNIPHYA on DCE61 */
++			if (ASIC_IS_DCE61(rdev) && !ASIC_IS_DCE8(rdev) &&
++			    test_radeon_crtc->pll_id == ATOM_PPLL2)
++				continue;
+ 			/* for DP use the same PLL for all */
+ 			if (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID)
+ 				return test_radeon_crtc->pll_id;
+@@ -1770,6 +1775,7 @@ static int radeon_get_shared_nondp_ppll(struct drm_crtc *crtc)
+ {
+ 	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ 	struct drm_device *dev = crtc->dev;
++	struct radeon_device *rdev = dev->dev_private;
+ 	struct drm_crtc *test_crtc;
+ 	struct radeon_crtc *test_radeon_crtc;
+ 	u32 adjusted_clock, test_adjusted_clock;
+@@ -1785,6 +1791,10 @@ static int radeon_get_shared_nondp_ppll(struct drm_crtc *crtc)
+ 		test_radeon_crtc = to_radeon_crtc(test_crtc);
+ 		if (test_radeon_crtc->encoder &&
+ 		    !ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_radeon_crtc->encoder))) {
++			/* PPLL2 is exclusive to UNIPHYA on DCE61 */
++			if (ASIC_IS_DCE61(rdev) && !ASIC_IS_DCE8(rdev) &&
++			    test_radeon_crtc->pll_id == ATOM_PPLL2)
++				continue;
+ 			/* check if we are already driving this connector with another crtc */
+ 			if (test_radeon_crtc->connector == radeon_crtc->connector) {
+ 				/* if we are, return that pll */
+diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
+index 6af832545bc5..b5760851195c 100644
+--- a/drivers/gpu/drm/radeon/atombios_dp.c
++++ b/drivers/gpu/drm/radeon/atombios_dp.c
+@@ -326,8 +326,8 @@ int radeon_dp_get_dp_link_config(struct drm_connector *connector,
+ 			}
+ 		}
+ 	} else {
+-		for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) {
+-			for (i = 0; i < ARRAY_SIZE(link_rates) && link_rates[i] <= max_link_rate; i++) {
++		for (i = 0; i < ARRAY_SIZE(link_rates) && link_rates[i] <= max_link_rate; i++) {
++			for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) {
+ 				max_pix_clock = (lane_num * link_rates[i] * 8) / bpp;
+ 				if (max_pix_clock >= pix_clock) {
+ 					*dp_lanes = lane_num;
+diff --git a/drivers/gpu/drm/radeon/radeon_dp_auxch.c b/drivers/gpu/drm/radeon/radeon_dp_auxch.c
+index 3b0c229d7dcd..db64e0062689 100644
+--- a/drivers/gpu/drm/radeon/radeon_dp_auxch.c
++++ b/drivers/gpu/drm/radeon/radeon_dp_auxch.c
+@@ -105,7 +105,7 @@ radeon_dp_aux_transfer_native(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg
+ 
+ 	tmp &= AUX_HPD_SEL(0x7);
+ 	tmp |= AUX_HPD_SEL(chan->rec.hpd);
+-	tmp |= AUX_EN | AUX_LS_READ_EN;
++	tmp |= AUX_EN | AUX_LS_READ_EN | AUX_HPD_DISCON(0x1);
+ 
+ 	WREG32(AUX_CONTROL + aux_offset[instance], tmp);
+ 
+diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
+index ed9cefa1f6f1..eaed31d04468 100644
+--- a/drivers/infiniband/hw/mlx5/main.c
++++ b/drivers/infiniband/hw/mlx5/main.c
+@@ -654,8 +654,8 @@ static int mlx5_query_hca_port(struct ib_device *ibdev, u8 port,
+ 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
+ 	struct mlx5_core_dev *mdev = dev->mdev;
+ 	struct mlx5_hca_vport_context *rep;
+-	int max_mtu;
+-	int oper_mtu;
++	u16 max_mtu;
++	u16 oper_mtu;
+ 	int err;
+ 	u8 ib_link_width_oper;
+ 	u8 vl_hw_cap;
+diff --git a/drivers/input/misc/max8997_haptic.c b/drivers/input/misc/max8997_haptic.c
+index a806ba3818f7..8d6326d7e7be 100644
+--- a/drivers/input/misc/max8997_haptic.c
++++ b/drivers/input/misc/max8997_haptic.c
+@@ -255,12 +255,14 @@ static int max8997_haptic_probe(struct platform_device *pdev)
+ 	struct max8997_dev *iodev = dev_get_drvdata(pdev->dev.parent);
+ 	const struct max8997_platform_data *pdata =
+ 					dev_get_platdata(iodev->dev);
+-	const struct max8997_haptic_platform_data *haptic_pdata =
+-					pdata->haptic_pdata;
++	const struct max8997_haptic_platform_data *haptic_pdata = NULL;
+ 	struct max8997_haptic *chip;
+ 	struct input_dev *input_dev;
+ 	int error;
+ 
++	if (pdata)
++		haptic_pdata = pdata->haptic_pdata;
++
+ 	if (!haptic_pdata) {
+ 		dev_err(&pdev->dev, "no haptic platform data\n");
+ 		return -EINVAL;
+diff --git a/drivers/media/v4l2-core/videobuf2-v4l2.c b/drivers/media/v4l2-core/videobuf2-v4l2.c
+index 7f366f1b0377..0b1b8c7b6ce5 100644
+--- a/drivers/media/v4l2-core/videobuf2-v4l2.c
++++ b/drivers/media/v4l2-core/videobuf2-v4l2.c
+@@ -74,11 +74,6 @@ static int __verify_planes_array(struct vb2_buffer *vb, const struct v4l2_buffer
+ 	return 0;
+ }
+ 
+-static int __verify_planes_array_core(struct vb2_buffer *vb, const void *pb)
+-{
+-	return __verify_planes_array(vb, pb);
+-}
+-
+ /**
+  * __verify_length() - Verify that the bytesused value for each plane fits in
+  * the plane length and that the data offset doesn't exceed the bytesused value.
+@@ -442,7 +437,6 @@ static int __fill_vb2_buffer(struct vb2_buffer *vb,
+ }
+ 
+ static const struct vb2_buf_ops v4l2_buf_ops = {
+-	.verify_planes_array	= __verify_planes_array_core,
+ 	.fill_user_buffer	= __fill_v4l2_buffer,
+ 	.fill_vb2_buffer	= __fill_vb2_buffer,
+ 	.copy_timestamp		= __copy_timestamp,
+diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
+index 8f76f4558a88..2ff465848b65 100644
+--- a/drivers/net/ethernet/atheros/atlx/atl2.c
++++ b/drivers/net/ethernet/atheros/atlx/atl2.c
+@@ -1412,7 +1412,7 @@ static int atl2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 
+ 	err = -EIO;
+ 
+-	netdev->hw_features = NETIF_F_SG | NETIF_F_HW_VLAN_CTAG_RX;
++	netdev->hw_features = NETIF_F_HW_VLAN_CTAG_RX;
+ 	netdev->features |= (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
+ 
+ 	/* Init PHY as early as possible due to power saving issue  */
+diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+index 767347b1f631..f50bdbfaae7c 100644
+--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
++++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+@@ -519,6 +519,7 @@ static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs,
+ 		nicvf_config_vlan_stripping(nic, nic->netdev->features);
+ 
+ 	/* Enable Receive queue */
++	memset(&rq_cfg, 0, sizeof(struct rq_cfg));
+ 	rq_cfg.ena = 1;
+ 	rq_cfg.tcp_ena = 0;
+ 	nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, *(u64 *)&rq_cfg);
+@@ -551,6 +552,7 @@ void nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs,
+ 			      qidx, (u64)(cq->dmem.phys_base));
+ 
+ 	/* Enable Completion queue */
++	memset(&cq_cfg, 0, sizeof(struct cq_cfg));
+ 	cq_cfg.ena = 1;
+ 	cq_cfg.reset = 0;
+ 	cq_cfg.caching = 0;
+@@ -599,6 +601,7 @@ static void nicvf_snd_queue_config(struct nicvf *nic, struct queue_set *qs,
+ 			      qidx, (u64)(sq->dmem.phys_base));
+ 
+ 	/* Enable send queue  & set queue size */
++	memset(&sq_cfg, 0, sizeof(struct sq_cfg));
+ 	sq_cfg.ena = 1;
+ 	sq_cfg.reset = 0;
+ 	sq_cfg.ldwb = 0;
+@@ -635,6 +638,7 @@ static void nicvf_rbdr_config(struct nicvf *nic, struct queue_set *qs,
+ 
+ 	/* Enable RBDR  & set queue size */
+ 	/* Buffer size should be in multiples of 128 bytes */
++	memset(&rbdr_cfg, 0, sizeof(struct rbdr_cfg));
+ 	rbdr_cfg.ena = 1;
+ 	rbdr_cfg.reset = 0;
+ 	rbdr_cfg.ldwb = 0;
+diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
+index 41c81f6ec630..0d6e8c177650 100644
+--- a/drivers/net/ethernet/freescale/fec_main.c
++++ b/drivers/net/ethernet/freescale/fec_main.c
+@@ -1566,9 +1566,15 @@ fec_enet_rx(struct net_device *ndev, int budget)
+ 	struct fec_enet_private *fep = netdev_priv(ndev);
+ 
+ 	for_each_set_bit(queue_id, &fep->work_rx, FEC_ENET_MAX_RX_QS) {
+-		clear_bit(queue_id, &fep->work_rx);
+-		pkt_received += fec_enet_rx_queue(ndev,
++		int ret;
++
++		ret = fec_enet_rx_queue(ndev,
+ 					budget - pkt_received, queue_id);
++
++		if (ret < budget - pkt_received)
++			clear_bit(queue_id, &fep->work_rx);
++
++		pkt_received += ret;
+ 	}
+ 	return pkt_received;
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+index 41440b2b20a3..03ef9aca21e4 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+@@ -704,7 +704,7 @@ static int get_fixed_ipv6_csum(__wsum hw_checksum, struct sk_buff *skb,
+ 
+ 	if (ipv6h->nexthdr == IPPROTO_FRAGMENT || ipv6h->nexthdr == IPPROTO_HOPOPTS)
+ 		return -1;
+-	hw_checksum = csum_add(hw_checksum, (__force __wsum)(ipv6h->nexthdr << 8));
++	hw_checksum = csum_add(hw_checksum, (__force __wsum)htons(ipv6h->nexthdr));
+ 
+ 	csum_pseudo_hdr = csum_partial(&ipv6h->saddr,
+ 				       sizeof(ipv6h->saddr) + sizeof(ipv6h->daddr), 0);
+diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+index e0946ab22010..0debb611da8b 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+@@ -402,7 +402,6 @@ static bool mlx4_en_process_tx_cq(struct net_device *dev,
+ 	u32 packets = 0;
+ 	u32 bytes = 0;
+ 	int factor = priv->cqe_factor;
+-	u64 timestamp = 0;
+ 	int done = 0;
+ 	int budget = priv->tx_work_limit;
+ 	u32 last_nr_txbb;
+@@ -442,9 +441,12 @@ static bool mlx4_en_process_tx_cq(struct net_device *dev,
+ 		new_index = be16_to_cpu(cqe->wqe_index) & size_mask;
+ 
+ 		do {
++			u64 timestamp = 0;
++
+ 			txbbs_skipped += last_nr_txbb;
+ 			ring_index = (ring_index + last_nr_txbb) & size_mask;
+-			if (ring->tx_info[ring_index].ts_requested)
++
++			if (unlikely(ring->tx_info[ring_index].ts_requested))
+ 				timestamp = mlx4_en_get_cqe_ts(cqe);
+ 
+ 			/* free next descriptor */
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+index 402994bf7e16..e293a2ec2775 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+@@ -1389,24 +1389,50 @@ static int mlx5e_refresh_tirs_self_loopback_enable(struct mlx5e_priv *priv)
+ 	return 0;
+ }
+ 
+-static int mlx5e_set_dev_port_mtu(struct net_device *netdev)
++static int mlx5e_set_mtu(struct mlx5e_priv *priv, u16 mtu)
+ {
+-	struct mlx5e_priv *priv = netdev_priv(netdev);
+ 	struct mlx5_core_dev *mdev = priv->mdev;
+-	int hw_mtu;
++	u16 hw_mtu = MLX5E_SW2HW_MTU(mtu);
+ 	int err;
+ 
+-	err = mlx5_set_port_mtu(mdev, MLX5E_SW2HW_MTU(netdev->mtu), 1);
++	err = mlx5_set_port_mtu(mdev, hw_mtu, 1);
+ 	if (err)
+ 		return err;
+ 
+-	mlx5_query_port_oper_mtu(mdev, &hw_mtu, 1);
++	/* Update vport context MTU */
++	mlx5_modify_nic_vport_mtu(mdev, hw_mtu);
++	return 0;
++}
++
++static void mlx5e_query_mtu(struct mlx5e_priv *priv, u16 *mtu)
++{
++	struct mlx5_core_dev *mdev = priv->mdev;
++	u16 hw_mtu = 0;
++	int err;
++
++	err = mlx5_query_nic_vport_mtu(mdev, &hw_mtu);
++	if (err || !hw_mtu) /* fallback to port oper mtu */
++		mlx5_query_port_oper_mtu(mdev, &hw_mtu, 1);
++
++	*mtu = MLX5E_HW2SW_MTU(hw_mtu);
++}
++
++static int mlx5e_set_dev_port_mtu(struct net_device *netdev)
++{
++	struct mlx5e_priv *priv = netdev_priv(netdev);
++	u16 mtu;
++	int err;
+ 
+-	if (MLX5E_HW2SW_MTU(hw_mtu) != netdev->mtu)
+-		netdev_warn(netdev, "%s: Port MTU %d is different than netdev mtu %d\n",
+-			    __func__, MLX5E_HW2SW_MTU(hw_mtu), netdev->mtu);
++	err = mlx5e_set_mtu(priv, netdev->mtu);
++	if (err)
++		return err;
+ 
+-	netdev->mtu = MLX5E_HW2SW_MTU(hw_mtu);
++	mlx5e_query_mtu(priv, &mtu);
++	if (mtu != netdev->mtu)
++		netdev_warn(netdev, "%s: VPort MTU %d is different than netdev mtu %d\n",
++			    __func__, mtu, netdev->mtu);
++
++	netdev->mtu = mtu;
+ 	return 0;
+ }
+ 
+@@ -1906,22 +1932,27 @@ static int mlx5e_set_features(struct net_device *netdev,
+ 	return err;
+ }
+ 
++#define MXL5_HW_MIN_MTU 64
++#define MXL5E_MIN_MTU (MXL5_HW_MIN_MTU + ETH_FCS_LEN)
++
+ static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu)
+ {
+ 	struct mlx5e_priv *priv = netdev_priv(netdev);
+ 	struct mlx5_core_dev *mdev = priv->mdev;
+ 	bool was_opened;
+-	int max_mtu;
++	u16 max_mtu;
++	u16 min_mtu;
+ 	int err = 0;
+ 
+ 	mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
+ 
+ 	max_mtu = MLX5E_HW2SW_MTU(max_mtu);
++	min_mtu = MLX5E_HW2SW_MTU(MXL5E_MIN_MTU);
+ 
+-	if (new_mtu > max_mtu) {
++	if (new_mtu > max_mtu || new_mtu < min_mtu) {
+ 		netdev_err(netdev,
+-			   "%s: Bad MTU (%d) > (%d) Max\n",
+-			   __func__, new_mtu, max_mtu);
++			   "%s: Bad MTU (%d), valid range is: [%d..%d]\n",
++			   __func__, new_mtu, min_mtu, max_mtu);
+ 		return -EINVAL;
+ 	}
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+index 6f68dba8d7ed..cc901852f1a9 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+@@ -957,33 +957,6 @@ unlock_fg:
+ 	return rule;
+ }
+ 
+-static struct mlx5_flow_rule *add_rule_to_auto_fg(struct mlx5_flow_table *ft,
+-						  u8 match_criteria_enable,
+-						  u32 *match_criteria,
+-						  u32 *match_value,
+-						  u8 action,
+-						  u32 flow_tag,
+-						  struct mlx5_flow_destination *dest)
+-{
+-	struct mlx5_flow_rule *rule;
+-	struct mlx5_flow_group *g;
+-
+-	g = create_autogroup(ft, match_criteria_enable, match_criteria);
+-	if (IS_ERR(g))
+-		return (void *)g;
+-
+-	rule = add_rule_fg(g, match_value,
+-			   action, flow_tag, dest);
+-	if (IS_ERR(rule)) {
+-		/* Remove assumes refcount > 0 and autogroup creates a group
+-		 * with a refcount = 0.
+-		 */
+-		tree_get_node(&g->node);
+-		tree_remove_node(&g->node);
+-	}
+-	return rule;
+-}
+-
+ struct mlx5_flow_rule *
+ mlx5_add_flow_rule(struct mlx5_flow_table *ft,
+ 		   u8 match_criteria_enable,
+@@ -1008,8 +981,23 @@ mlx5_add_flow_rule(struct mlx5_flow_table *ft,
+ 				goto unlock;
+ 		}
+ 
+-	rule = add_rule_to_auto_fg(ft, match_criteria_enable, match_criteria,
+-				   match_value, action, flow_tag, dest);
++	g = create_autogroup(ft, match_criteria_enable, match_criteria);
++	if (IS_ERR(g)) {
++		rule = (void *)g;
++		goto unlock;
++	}
++
++	rule = add_rule_fg(g, match_value,
++			   action, flow_tag, dest);
++	if (IS_ERR(rule)) {
++		/* Remove assumes refcount > 0 and autogroup creates a group
++		 * with a refcount = 0.
++		 */
++		unlock_ref_node(&ft->node);
++		tree_get_node(&g->node);
++		tree_remove_node(&g->node);
++		return rule;
++	}
+ unlock:
+ 	unlock_ref_node(&ft->node);
+ 	return rule;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
+index a87e773e93f3..53a793bc2e3d 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
+@@ -246,8 +246,8 @@ int mlx5_query_port_admin_status(struct mlx5_core_dev *dev,
+ }
+ EXPORT_SYMBOL_GPL(mlx5_query_port_admin_status);
+ 
+-static void mlx5_query_port_mtu(struct mlx5_core_dev *dev, int *admin_mtu,
+-				int *max_mtu, int *oper_mtu, u8 port)
++static void mlx5_query_port_mtu(struct mlx5_core_dev *dev, u16 *admin_mtu,
++				u16 *max_mtu, u16 *oper_mtu, u8 port)
+ {
+ 	u32 in[MLX5_ST_SZ_DW(pmtu_reg)];
+ 	u32 out[MLX5_ST_SZ_DW(pmtu_reg)];
+@@ -267,7 +267,7 @@ static void mlx5_query_port_mtu(struct mlx5_core_dev *dev, int *admin_mtu,
+ 		*admin_mtu = MLX5_GET(pmtu_reg, out, admin_mtu);
+ }
+ 
+-int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu, u8 port)
++int mlx5_set_port_mtu(struct mlx5_core_dev *dev, u16 mtu, u8 port)
+ {
+ 	u32 in[MLX5_ST_SZ_DW(pmtu_reg)];
+ 	u32 out[MLX5_ST_SZ_DW(pmtu_reg)];
+@@ -282,14 +282,14 @@ int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu, u8 port)
+ }
+ EXPORT_SYMBOL_GPL(mlx5_set_port_mtu);
+ 
+-void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu,
++void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, u16 *max_mtu,
+ 			     u8 port)
+ {
+ 	mlx5_query_port_mtu(dev, NULL, max_mtu, NULL, port);
+ }
+ EXPORT_SYMBOL_GPL(mlx5_query_port_max_mtu);
+ 
+-void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu,
++void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, u16 *oper_mtu,
+ 			      u8 port)
+ {
+ 	mlx5_query_port_mtu(dev, NULL, NULL, oper_mtu, port);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+index c7398b95aecd..6d5f56e73b5d 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+@@ -196,6 +196,46 @@ int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *mdev,
+ }
+ EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mac_address);
+ 
++int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu)
++{
++	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
++	u32 *out;
++	int err;
++
++	out = mlx5_vzalloc(outlen);
++	if (!out)
++		return -ENOMEM;
++
++	err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
++	if (!err)
++		*mtu = MLX5_GET(query_nic_vport_context_out, out,
++				nic_vport_context.mtu);
++
++	kvfree(out);
++	return err;
++}
++EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mtu);
++
++int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 mtu)
++{
++	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
++	void *in;
++	int err;
++
++	in = mlx5_vzalloc(inlen);
++	if (!in)
++		return -ENOMEM;
++
++	MLX5_SET(modify_nic_vport_context_in, in, field_select.mtu, 1);
++	MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.mtu, mtu);
++
++	err = mlx5_modify_nic_vport_context(mdev, in, inlen);
++
++	kvfree(in);
++	return err;
++}
++EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mtu);
++
+ int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev,
+ 				  u32 vport,
+ 				  enum mlx5_list_type list_type,
+diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c
+index bdd83d95ec0a..96a5028621c8 100644
+--- a/drivers/net/usb/cdc_mbim.c
++++ b/drivers/net/usb/cdc_mbim.c
+@@ -617,8 +617,13 @@ static const struct usb_device_id mbim_devs[] = {
+ 	{ USB_VENDOR_AND_INTERFACE_INFO(0x0bdb, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
+ 	  .driver_info = (unsigned long)&cdc_mbim_info,
+ 	},
+-	/* Huawei E3372 fails unless NDP comes after the IP packets */
+-	{ USB_DEVICE_AND_INTERFACE_INFO(0x12d1, 0x157d, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
++
++	/* Some Huawei devices, ME906s-158 (12d1:15c1) and E3372
++	 * (12d1:157d), are known to fail unless the NDP is placed
++	 * after the IP packets.  Applying the quirk to all Huawei
++	 * devices is broader than necessary, but harmless.
++	 */
++	{ USB_VENDOR_AND_INTERFACE_INFO(0x12d1, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
+ 	  .driver_info = (unsigned long)&cdc_mbim_info_ndp_to_end,
+ 	},
+ 	/* default entry */
+diff --git a/drivers/pinctrl/pinctrl-at91-pio4.c b/drivers/pinctrl/pinctrl-at91-pio4.c
+index ee69db6ae1c7..e1c0d4e1bb33 100644
+--- a/drivers/pinctrl/pinctrl-at91-pio4.c
++++ b/drivers/pinctrl/pinctrl-at91-pio4.c
+@@ -722,9 +722,11 @@ static int atmel_conf_pin_config_group_set(struct pinctrl_dev *pctldev,
+ 			break;
+ 		case PIN_CONFIG_BIAS_PULL_UP:
+ 			conf |= ATMEL_PIO_PUEN_MASK;
++			conf &= (~ATMEL_PIO_PDEN_MASK);
+ 			break;
+ 		case PIN_CONFIG_BIAS_PULL_DOWN:
+ 			conf |= ATMEL_PIO_PDEN_MASK;
++			conf &= (~ATMEL_PIO_PUEN_MASK);
+ 			break;
+ 		case PIN_CONFIG_DRIVE_OPEN_DRAIN:
+ 			if (arg == 0)
+diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c
+index f2e1a39ce0f3..5cf4a97e0304 100644
+--- a/drivers/regulator/axp20x-regulator.c
++++ b/drivers/regulator/axp20x-regulator.c
+@@ -221,10 +221,10 @@ static const struct regulator_desc axp22x_regulators[] = {
+ 		 AXP22X_ELDO2_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, BIT(1)),
+ 	AXP_DESC(AXP22X, ELDO3, "eldo3", "eldoin", 700, 3300, 100,
+ 		 AXP22X_ELDO3_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, BIT(2)),
+-	AXP_DESC_IO(AXP22X, LDO_IO0, "ldo_io0", "ips", 1800, 3300, 100,
++	AXP_DESC_IO(AXP22X, LDO_IO0, "ldo_io0", "ips", 700, 3300, 100,
+ 		    AXP22X_LDO_IO0_V_OUT, 0x1f, AXP20X_GPIO0_CTRL, 0x07,
+ 		    AXP22X_IO_ENABLED, AXP22X_IO_DISABLED),
+-	AXP_DESC_IO(AXP22X, LDO_IO1, "ldo_io1", "ips", 1800, 3300, 100,
++	AXP_DESC_IO(AXP22X, LDO_IO1, "ldo_io1", "ips", 700, 3300, 100,
+ 		    AXP22X_LDO_IO1_V_OUT, 0x1f, AXP20X_GPIO1_CTRL, 0x07,
+ 		    AXP22X_IO_ENABLED, AXP22X_IO_DISABLED),
+ 	AXP_DESC_FIXED(AXP22X, RTC_LDO, "rtc_ldo", "ips", 3000),
+diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c
+index 3242ffc0cb25..09dce49609c1 100644
+--- a/drivers/regulator/s2mps11.c
++++ b/drivers/regulator/s2mps11.c
+@@ -306,7 +306,7 @@ static struct regulator_ops s2mps11_buck_ops = {
+ 	.enable_mask	= S2MPS11_ENABLE_MASK			\
+ }
+ 
+-#define regulator_desc_s2mps11_buck6_10(num, min, step) {	\
++#define regulator_desc_s2mps11_buck67810(num, min, step) {	\
+ 	.name		= "BUCK"#num,				\
+ 	.id		= S2MPS11_BUCK##num,			\
+ 	.ops		= &s2mps11_buck_ops,			\
+@@ -322,6 +322,22 @@ static struct regulator_ops s2mps11_buck_ops = {
+ 	.enable_mask	= S2MPS11_ENABLE_MASK			\
+ }
+ 
++#define regulator_desc_s2mps11_buck9 {				\
++	.name		= "BUCK9",				\
++	.id		= S2MPS11_BUCK9,			\
++	.ops		= &s2mps11_buck_ops,			\
++	.type		= REGULATOR_VOLTAGE,			\
++	.owner		= THIS_MODULE,				\
++	.min_uV		= MIN_3000_MV,				\
++	.uV_step	= STEP_25_MV,				\
++	.n_voltages	= S2MPS11_BUCK9_N_VOLTAGES,		\
++	.ramp_delay	= S2MPS11_RAMP_DELAY,			\
++	.vsel_reg	= S2MPS11_REG_B9CTRL2,			\
++	.vsel_mask	= S2MPS11_BUCK9_VSEL_MASK,		\
++	.enable_reg	= S2MPS11_REG_B9CTRL1,			\
++	.enable_mask	= S2MPS11_ENABLE_MASK			\
++}
++
+ static const struct regulator_desc s2mps11_regulators[] = {
+ 	regulator_desc_s2mps11_ldo(1, STEP_25_MV),
+ 	regulator_desc_s2mps11_ldo(2, STEP_50_MV),
+@@ -366,11 +382,11 @@ static const struct regulator_desc s2mps11_regulators[] = {
+ 	regulator_desc_s2mps11_buck1_4(3),
+ 	regulator_desc_s2mps11_buck1_4(4),
+ 	regulator_desc_s2mps11_buck5,
+-	regulator_desc_s2mps11_buck6_10(6, MIN_600_MV, STEP_6_25_MV),
+-	regulator_desc_s2mps11_buck6_10(7, MIN_600_MV, STEP_6_25_MV),
+-	regulator_desc_s2mps11_buck6_10(8, MIN_600_MV, STEP_6_25_MV),
+-	regulator_desc_s2mps11_buck6_10(9, MIN_3000_MV, STEP_25_MV),
+-	regulator_desc_s2mps11_buck6_10(10, MIN_750_MV, STEP_12_5_MV),
++	regulator_desc_s2mps11_buck67810(6, MIN_600_MV, STEP_6_25_MV),
++	regulator_desc_s2mps11_buck67810(7, MIN_600_MV, STEP_6_25_MV),
++	regulator_desc_s2mps11_buck67810(8, MIN_600_MV, STEP_6_25_MV),
++	regulator_desc_s2mps11_buck9,
++	regulator_desc_s2mps11_buck67810(10, MIN_750_MV, STEP_12_5_MV),
+ };
+ 
+ static struct regulator_ops s2mps14_reg_ops;
+diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
+index 5d0ec42a9317..634254a52301 100644
+--- a/drivers/scsi/qla1280.c
++++ b/drivers/scsi/qla1280.c
+@@ -4214,7 +4214,7 @@ static struct scsi_host_template qla1280_driver_template = {
+ 	.eh_bus_reset_handler	= qla1280_eh_bus_reset,
+ 	.eh_host_reset_handler	= qla1280_eh_adapter_reset,
+ 	.bios_param		= qla1280_biosparam,
+-	.can_queue		= 0xfffff,
++	.can_queue		= MAX_OUTSTANDING_COMMANDS,
+ 	.this_id		= -1,
+ 	.sg_tablesize		= SG_ALL,
+ 	.use_clustering		= ENABLE_CLUSTERING,
+diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
+index ab9914ad8365..64232ecbb821 100644
+--- a/drivers/spi/spi-pxa2xx.c
++++ b/drivers/spi/spi-pxa2xx.c
+@@ -111,7 +111,7 @@ static const struct lpss_config lpss_platforms[] = {
+ 		.reg_general = -1,
+ 		.reg_ssp = 0x20,
+ 		.reg_cs_ctrl = 0x24,
+-		.reg_capabilities = 0xfc,
++		.reg_capabilities = -1,
+ 		.rx_threshold = 1,
+ 		.tx_threshold_lo = 32,
+ 		.tx_threshold_hi = 56,
+diff --git a/drivers/spi/spi-ti-qspi.c b/drivers/spi/spi-ti-qspi.c
+index 64318fcfacf2..5044c6198332 100644
+--- a/drivers/spi/spi-ti-qspi.c
++++ b/drivers/spi/spi-ti-qspi.c
+@@ -94,6 +94,7 @@ struct ti_qspi {
+ #define QSPI_FLEN(n)			((n - 1) << 0)
+ #define QSPI_WLEN_MAX_BITS		128
+ #define QSPI_WLEN_MAX_BYTES		16
++#define QSPI_WLEN_MASK			QSPI_WLEN(QSPI_WLEN_MAX_BITS)
+ 
+ /* STATUS REGISTER */
+ #define BUSY				0x01
+@@ -224,16 +225,16 @@ static inline int ti_qspi_poll_wc(struct ti_qspi *qspi)
+ 	return  -ETIMEDOUT;
+ }
+ 
+-static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t)
++static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t,
++			  int count)
+ {
+-	int wlen, count, xfer_len;
++	int wlen, xfer_len;
+ 	unsigned int cmd;
+ 	const u8 *txbuf;
+ 	u32 data;
+ 
+ 	txbuf = t->tx_buf;
+ 	cmd = qspi->cmd | QSPI_WR_SNGL;
+-	count = t->len;
+ 	wlen = t->bits_per_word >> 3;	/* in bytes */
+ 	xfer_len = wlen;
+ 
+@@ -293,9 +294,10 @@ static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t)
+ 	return 0;
+ }
+ 
+-static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t)
++static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t,
++			 int count)
+ {
+-	int wlen, count;
++	int wlen;
+ 	unsigned int cmd;
+ 	u8 *rxbuf;
+ 
+@@ -312,7 +314,6 @@ static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t)
+ 		cmd |= QSPI_RD_SNGL;
+ 		break;
+ 	}
+-	count = t->len;
+ 	wlen = t->bits_per_word >> 3;	/* in bytes */
+ 
+ 	while (count) {
+@@ -343,12 +344,13 @@ static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t)
+ 	return 0;
+ }
+ 
+-static int qspi_transfer_msg(struct ti_qspi *qspi, struct spi_transfer *t)
++static int qspi_transfer_msg(struct ti_qspi *qspi, struct spi_transfer *t,
++			     int count)
+ {
+ 	int ret;
+ 
+ 	if (t->tx_buf) {
+-		ret = qspi_write_msg(qspi, t);
++		ret = qspi_write_msg(qspi, t, count);
+ 		if (ret) {
+ 			dev_dbg(qspi->dev, "Error while writing\n");
+ 			return ret;
+@@ -356,7 +358,7 @@ static int qspi_transfer_msg(struct ti_qspi *qspi, struct spi_transfer *t)
+ 	}
+ 
+ 	if (t->rx_buf) {
+-		ret = qspi_read_msg(qspi, t);
++		ret = qspi_read_msg(qspi, t, count);
+ 		if (ret) {
+ 			dev_dbg(qspi->dev, "Error while reading\n");
+ 			return ret;
+@@ -373,7 +375,8 @@ static int ti_qspi_start_transfer_one(struct spi_master *master,
+ 	struct spi_device *spi = m->spi;
+ 	struct spi_transfer *t;
+ 	int status = 0, ret;
+-	int frame_length;
++	unsigned int frame_len_words, transfer_len_words;
++	int wlen;
+ 
+ 	/* setup device control reg */
+ 	qspi->dc = 0;
+@@ -385,30 +388,38 @@ static int ti_qspi_start_transfer_one(struct spi_master *master,
+ 	if (spi->mode & SPI_CS_HIGH)
+ 		qspi->dc |= QSPI_CSPOL(spi->chip_select);
+ 
+-	frame_length = (m->frame_length << 3) / spi->bits_per_word;
+-
+-	frame_length = clamp(frame_length, 0, QSPI_FRAME);
++	frame_len_words = 0;
++	list_for_each_entry(t, &m->transfers, transfer_list)
++		frame_len_words += t->len / (t->bits_per_word >> 3);
++	frame_len_words = min_t(unsigned int, frame_len_words, QSPI_FRAME);
+ 
+ 	/* setup command reg */
+ 	qspi->cmd = 0;
+ 	qspi->cmd |= QSPI_EN_CS(spi->chip_select);
+-	qspi->cmd |= QSPI_FLEN(frame_length);
++	qspi->cmd |= QSPI_FLEN(frame_len_words);
+ 
+ 	ti_qspi_write(qspi, qspi->dc, QSPI_SPI_DC_REG);
+ 
+ 	mutex_lock(&qspi->list_lock);
+ 
+ 	list_for_each_entry(t, &m->transfers, transfer_list) {
+-		qspi->cmd |= QSPI_WLEN(t->bits_per_word);
++		qspi->cmd = ((qspi->cmd & ~QSPI_WLEN_MASK) |
++			     QSPI_WLEN(t->bits_per_word));
++
++		wlen = t->bits_per_word >> 3;
++		transfer_len_words = min(t->len / wlen, frame_len_words);
+ 
+-		ret = qspi_transfer_msg(qspi, t);
++		ret = qspi_transfer_msg(qspi, t, transfer_len_words * wlen);
+ 		if (ret) {
+ 			dev_dbg(qspi->dev, "transfer message failed\n");
+ 			mutex_unlock(&qspi->list_lock);
+ 			return -EINVAL;
+ 		}
+ 
+-		m->actual_length += t->len;
++		m->actual_length += transfer_len_words * wlen;
++		frame_len_words -= transfer_len_words;
++		if (frame_len_words == 0)
++			break;
+ 	}
+ 
+ 	mutex_unlock(&qspi->list_lock);
+diff --git a/drivers/staging/wilc1000/wilc_spi.c b/drivers/staging/wilc1000/wilc_spi.c
+index 86de50c9f7f5..b3d6541b3896 100644
+--- a/drivers/staging/wilc1000/wilc_spi.c
++++ b/drivers/staging/wilc1000/wilc_spi.c
+@@ -120,8 +120,6 @@ static u8 crc7(u8 crc, const u8 *buffer, u32 len)
+ 
+ #define USE_SPI_DMA     0
+ 
+-static const struct wilc1000_ops wilc1000_spi_ops;
+-
+ static int wilc_bus_probe(struct spi_device *spi)
+ {
+ 	int ret, gpio;
+diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
+index 769e0ff1b4ce..dea6486a7508 100644
+--- a/fs/btrfs/ctree.c
++++ b/fs/btrfs/ctree.c
+@@ -19,6 +19,7 @@
+ #include <linux/sched.h>
+ #include <linux/slab.h>
+ #include <linux/rbtree.h>
++#include <linux/vmalloc.h>
+ #include "ctree.h"
+ #include "disk-io.h"
+ #include "transaction.h"
+@@ -5361,10 +5362,13 @@ int btrfs_compare_trees(struct btrfs_root *left_root,
+ 		goto out;
+ 	}
+ 
+-	tmp_buf = kmalloc(left_root->nodesize, GFP_NOFS);
++	tmp_buf = kmalloc(left_root->nodesize, GFP_KERNEL | __GFP_NOWARN);
+ 	if (!tmp_buf) {
+-		ret = -ENOMEM;
+-		goto out;
++		tmp_buf = vmalloc(left_root->nodesize);
++		if (!tmp_buf) {
++			ret = -ENOMEM;
++			goto out;
++		}
+ 	}
+ 
+ 	left_path->search_commit_root = 1;
+@@ -5565,7 +5569,7 @@ int btrfs_compare_trees(struct btrfs_root *left_root,
+ out:
+ 	btrfs_free_path(left_path);
+ 	btrfs_free_path(right_path);
+-	kfree(tmp_buf);
++	kvfree(tmp_buf);
+ 	return ret;
+ }
+ 
+diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
+index bfe4a337fb4d..6661ad8b4088 100644
+--- a/fs/btrfs/ctree.h
++++ b/fs/btrfs/ctree.h
+@@ -2252,7 +2252,7 @@ struct btrfs_ioctl_defrag_range_args {
+ #define BTRFS_MOUNT_FREE_SPACE_TREE	(1 << 26)
+ 
+ #define BTRFS_DEFAULT_COMMIT_INTERVAL	(30)
+-#define BTRFS_DEFAULT_MAX_INLINE	(8192)
++#define BTRFS_DEFAULT_MAX_INLINE	(2048)
+ 
+ #define btrfs_clear_opt(o, opt)		((o) &= ~BTRFS_MOUNT_##opt)
+ #define btrfs_set_opt(o, opt)		((o) |= BTRFS_MOUNT_##opt)
+diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
+index cbb7dbfb3fff..218f51a5dbab 100644
+--- a/fs/btrfs/dev-replace.c
++++ b/fs/btrfs/dev-replace.c
+@@ -394,6 +394,8 @@ int btrfs_dev_replace_start(struct btrfs_root *root,
+ 	dev_replace->cursor_right = 0;
+ 	dev_replace->is_valid = 1;
+ 	dev_replace->item_needs_writeback = 1;
++	atomic64_set(&dev_replace->num_write_errors, 0);
++	atomic64_set(&dev_replace->num_uncorrectable_read_errors, 0);
+ 	args->result = BTRFS_IOCTL_DEV_REPLACE_RESULT_NO_ERROR;
+ 	btrfs_dev_replace_unlock(dev_replace);
+ 
+diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
+index d8d68af5aef0..ae6e3e36fdf0 100644
+--- a/fs/btrfs/disk-io.c
++++ b/fs/btrfs/disk-io.c
+@@ -303,7 +303,7 @@ static int csum_tree_block(struct btrfs_fs_info *fs_info,
+ 		err = map_private_extent_buffer(buf, offset, 32,
+ 					&kaddr, &map_start, &map_len);
+ 		if (err)
+-			return 1;
++			return err;
+ 		cur_len = min(len, map_len - (offset - map_start));
+ 		crc = btrfs_csum_data(kaddr + offset - map_start,
+ 				      crc, cur_len);
+@@ -313,7 +313,7 @@ static int csum_tree_block(struct btrfs_fs_info *fs_info,
+ 	if (csum_size > sizeof(inline_result)) {
+ 		result = kzalloc(csum_size, GFP_NOFS);
+ 		if (!result)
+-			return 1;
++			return -ENOMEM;
+ 	} else {
+ 		result = (char *)&inline_result;
+ 	}
+@@ -334,7 +334,7 @@ static int csum_tree_block(struct btrfs_fs_info *fs_info,
+ 				val, found, btrfs_header_level(buf));
+ 			if (result != (char *)&inline_result)
+ 				kfree(result);
+-			return 1;
++			return -EUCLEAN;
+ 		}
+ 	} else {
+ 		write_extent_buffer(buf, result, 0, csum_size);
+@@ -513,11 +513,21 @@ static int csum_dirty_buffer(struct btrfs_fs_info *fs_info, struct page *page)
+ 	eb = (struct extent_buffer *)page->private;
+ 	if (page != eb->pages[0])
+ 		return 0;
++
+ 	found_start = btrfs_header_bytenr(eb);
+-	if (WARN_ON(found_start != start || !PageUptodate(page)))
+-		return 0;
+-	csum_tree_block(fs_info, eb, 0);
+-	return 0;
++	/*
++	 * Please do not consolidate these warnings into a single if.
++	 * It is useful to know what went wrong.
++	 */
++	if (WARN_ON(found_start != start))
++		return -EUCLEAN;
++	if (WARN_ON(!PageUptodate(page)))
++		return -EUCLEAN;
++
++	ASSERT(memcmp_extent_buffer(eb, fs_info->fsid,
++			btrfs_header_fsid(), BTRFS_FSID_SIZE) == 0);
++
++	return csum_tree_block(fs_info, eb, 0);
+ }
+ 
+ static int check_tree_block_fsid(struct btrfs_fs_info *fs_info,
+@@ -660,10 +670,8 @@ static int btree_readpage_end_io_hook(struct btrfs_io_bio *io_bio,
+ 				       eb, found_level);
+ 
+ 	ret = csum_tree_block(root->fs_info, eb, 1);
+-	if (ret) {
+-		ret = -EIO;
++	if (ret)
+ 		goto err;
+-	}
+ 
+ 	/*
+ 	 * If this is a leaf block and it is corrupt, set the corrupt bit so
+diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
+index 9a30ca64066b..5d956b869e03 100644
+--- a/fs/btrfs/file.c
++++ b/fs/btrfs/file.c
+@@ -1996,10 +1996,11 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
+ 	 */
+ 	smp_mb();
+ 	if (btrfs_inode_in_log(inode, root->fs_info->generation) ||
+-	    (BTRFS_I(inode)->last_trans <=
+-	     root->fs_info->last_trans_committed &&
+-	     (full_sync ||
+-	      !btrfs_have_ordered_extents_in_range(inode, start, len)))) {
++	    (full_sync && BTRFS_I(inode)->last_trans <=
++	     root->fs_info->last_trans_committed) ||
++	    (!btrfs_have_ordered_extents_in_range(inode, start, len) &&
++	     BTRFS_I(inode)->last_trans
++	     <= root->fs_info->last_trans_committed)) {
+ 		/*
+ 		 * We'v had everything committed since the last time we were
+ 		 * modified so clear this flag in case it was set for whatever
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index d96f5cf38a2d..f407e487c687 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -4211,11 +4211,20 @@ static int truncate_space_check(struct btrfs_trans_handle *trans,
+ {
+ 	int ret;
+ 
++	/*
++	 * This is only used to apply pressure to the enospc system, we don't
++	 * intend to use this reservation at all.
++	 */
+ 	bytes_deleted = btrfs_csum_bytes_to_leaves(root, bytes_deleted);
++	bytes_deleted *= root->nodesize;
+ 	ret = btrfs_block_rsv_add(root, &root->fs_info->trans_block_rsv,
+ 				  bytes_deleted, BTRFS_RESERVE_NO_FLUSH);
+-	if (!ret)
++	if (!ret) {
++		trace_btrfs_space_reservation(root->fs_info, "transaction",
++					      trans->transid,
++					      bytes_deleted, 1);
+ 		trans->bytes_reserved += bytes_deleted;
++	}
+ 	return ret;
+ 
+ }
+@@ -7414,7 +7423,26 @@ static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend,
+ 				     cached_state, GFP_NOFS);
+ 
+ 		if (ordered) {
+-			btrfs_start_ordered_extent(inode, ordered, 1);
++			/*
++			 * If we are doing a DIO read and the ordered extent we
++			 * found is for a buffered write, we can not wait for it
++			 * to complete and retry, because if we do so we can
++			 * deadlock with concurrent buffered writes on page
++			 * locks. This happens only if our DIO read covers more
++			 * than one extent map, if at this point has already
++			 * created an ordered extent for a previous extent map
++			 * and locked its range in the inode's io tree, and a
++			 * concurrent write against that previous extent map's
++			 * range and this range started (we unlock the ranges
++			 * in the io tree only when the bios complete and
++			 * buffered writes always lock pages before attempting
++			 * to lock range in the io tree).
++			 */
++			if (writing ||
++			    test_bit(BTRFS_ORDERED_DIRECT, &ordered->flags))
++				btrfs_start_ordered_extent(inode, ordered, 1);
++			else
++				ret = -ENOTBLK;
+ 			btrfs_put_ordered_extent(ordered);
+ 		} else {
+ 			/*
+@@ -7431,9 +7459,11 @@ static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend,
+ 			 * that page.
+ 			 */
+ 			ret = -ENOTBLK;
+-			break;
+ 		}
+ 
++		if (ret)
++			break;
++
+ 		cond_resched();
+ 	}
+ 
+diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
+index 48aee9846329..e3791f268489 100644
+--- a/fs/btrfs/ioctl.c
++++ b/fs/btrfs/ioctl.c
+@@ -59,6 +59,7 @@
+ #include "props.h"
+ #include "sysfs.h"
+ #include "qgroup.h"
++#include "tree-log.h"
+ 
+ #ifdef CONFIG_64BIT
+ /* If we have a 32-bit userspace and 64-bit kernel, then the UAPI
+@@ -1656,7 +1657,7 @@ static noinline int btrfs_ioctl_snap_create_transid(struct file *file,
+ 
+ 		src_inode = file_inode(src.file);
+ 		if (src_inode->i_sb != file_inode(file)->i_sb) {
+-			btrfs_info(BTRFS_I(src_inode)->root->fs_info,
++			btrfs_info(BTRFS_I(file_inode(file))->root->fs_info,
+ 				   "Snapshot src from another FS");
+ 			ret = -EXDEV;
+ 		} else if (!inode_owner_or_capable(src_inode)) {
+@@ -2097,8 +2098,6 @@ static noinline int search_ioctl(struct inode *inode,
+ 		key.offset = (u64)-1;
+ 		root = btrfs_read_fs_root_no_name(info, &key);
+ 		if (IS_ERR(root)) {
+-			btrfs_err(info, "could not find root %llu",
+-			       sk->tree_id);
+ 			btrfs_free_path(path);
+ 			return -ENOENT;
+ 		}
+@@ -2476,6 +2475,8 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
+ 	trans->block_rsv = &block_rsv;
+ 	trans->bytes_reserved = block_rsv.size;
+ 
++	btrfs_record_snapshot_destroy(trans, dir);
++
+ 	ret = btrfs_unlink_subvol(trans, root, dir,
+ 				dest->root_key.objectid,
+ 				dentry->d_name.name,
+@@ -3068,6 +3069,9 @@ static int btrfs_extent_same(struct inode *src, u64 loff, u64 olen,
+ 		ret = extent_same_check_offsets(src, loff, &len, olen);
+ 		if (ret)
+ 			goto out_unlock;
++		ret = extent_same_check_offsets(src, dst_loff, &len, olen);
++		if (ret)
++			goto out_unlock;
+ 
+ 		/*
+ 		 * Single inode case wants the same checks, except we
+diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
+index 5279fdae7142..7173360eea7a 100644
+--- a/fs/btrfs/qgroup.c
++++ b/fs/btrfs/qgroup.c
+@@ -1842,8 +1842,10 @@ out:
+ }
+ 
+ /*
+- * copy the acounting information between qgroups. This is necessary when a
+- * snapshot or a subvolume is created
++ * Copy the acounting information between qgroups. This is necessary
++ * when a snapshot or a subvolume is created. Throwing an error will
++ * cause a transaction abort so we take extra care here to only error
++ * when a readonly fs is a reasonable outcome.
+  */
+ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
+ 			 struct btrfs_fs_info *fs_info, u64 srcid, u64 objectid,
+@@ -1873,15 +1875,15 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
+ 		       2 * inherit->num_excl_copies;
+ 		for (i = 0; i < nums; ++i) {
+ 			srcgroup = find_qgroup_rb(fs_info, *i_qgroups);
+-			if (!srcgroup) {
+-				ret = -EINVAL;
+-				goto out;
+-			}
+ 
+-			if ((srcgroup->qgroupid >> 48) <= (objectid >> 48)) {
+-				ret = -EINVAL;
+-				goto out;
+-			}
++			/*
++			 * Zero out invalid groups so we can ignore
++			 * them later.
++			 */
++			if (!srcgroup ||
++			    ((srcgroup->qgroupid >> 48) <= (objectid >> 48)))
++				*i_qgroups = 0ULL;
++
+ 			++i_qgroups;
+ 		}
+ 	}
+@@ -1916,17 +1918,19 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
+ 	 */
+ 	if (inherit) {
+ 		i_qgroups = (u64 *)(inherit + 1);
+-		for (i = 0; i < inherit->num_qgroups; ++i) {
++		for (i = 0; i < inherit->num_qgroups; ++i, ++i_qgroups) {
++			if (*i_qgroups == 0)
++				continue;
+ 			ret = add_qgroup_relation_item(trans, quota_root,
+ 						       objectid, *i_qgroups);
+-			if (ret)
++			if (ret && ret != -EEXIST)
+ 				goto out;
+ 			ret = add_qgroup_relation_item(trans, quota_root,
+ 						       *i_qgroups, objectid);
+-			if (ret)
++			if (ret && ret != -EEXIST)
+ 				goto out;
+-			++i_qgroups;
+ 		}
++		ret = 0;
+ 	}
+ 
+ 
+@@ -1987,17 +1991,22 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
+ 
+ 	i_qgroups = (u64 *)(inherit + 1);
+ 	for (i = 0; i < inherit->num_qgroups; ++i) {
+-		ret = add_relation_rb(quota_root->fs_info, objectid,
+-				      *i_qgroups);
+-		if (ret)
+-			goto unlock;
++		if (*i_qgroups) {
++			ret = add_relation_rb(quota_root->fs_info, objectid,
++					      *i_qgroups);
++			if (ret)
++				goto unlock;
++		}
+ 		++i_qgroups;
+ 	}
+ 
+-	for (i = 0; i <  inherit->num_ref_copies; ++i) {
++	for (i = 0; i <  inherit->num_ref_copies; ++i, i_qgroups += 2) {
+ 		struct btrfs_qgroup *src;
+ 		struct btrfs_qgroup *dst;
+ 
++		if (!i_qgroups[0] || !i_qgroups[1])
++			continue;
++
+ 		src = find_qgroup_rb(fs_info, i_qgroups[0]);
+ 		dst = find_qgroup_rb(fs_info, i_qgroups[1]);
+ 
+@@ -2008,12 +2017,14 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
+ 
+ 		dst->rfer = src->rfer - level_size;
+ 		dst->rfer_cmpr = src->rfer_cmpr - level_size;
+-		i_qgroups += 2;
+ 	}
+-	for (i = 0; i <  inherit->num_excl_copies; ++i) {
++	for (i = 0; i <  inherit->num_excl_copies; ++i, i_qgroups += 2) {
+ 		struct btrfs_qgroup *src;
+ 		struct btrfs_qgroup *dst;
+ 
++		if (!i_qgroups[0] || !i_qgroups[1])
++			continue;
++
+ 		src = find_qgroup_rb(fs_info, i_qgroups[0]);
+ 		dst = find_qgroup_rb(fs_info, i_qgroups[1]);
+ 
+@@ -2024,7 +2035,6 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
+ 
+ 		dst->excl = src->excl + level_size;
+ 		dst->excl_cmpr = src->excl_cmpr + level_size;
+-		i_qgroups += 2;
+ 	}
+ 
+ unlock:
+diff --git a/fs/btrfs/reada.c b/fs/btrfs/reada.c
+index 619f92963e27..49b3fb73ffbf 100644
+--- a/fs/btrfs/reada.c
++++ b/fs/btrfs/reada.c
+@@ -265,7 +265,7 @@ static struct reada_zone *reada_find_zone(struct btrfs_fs_info *fs_info,
+ 	spin_unlock(&fs_info->reada_lock);
+ 
+ 	if (ret == 1) {
+-		if (logical >= zone->start && logical < zone->end)
++		if (logical >= zone->start && logical <= zone->end)
+ 			return zone;
+ 		spin_lock(&fs_info->reada_lock);
+ 		kref_put(&zone->refcnt, reada_zone_release);
+@@ -679,7 +679,7 @@ static int reada_start_machine_dev(struct btrfs_fs_info *fs_info,
+ 	 */
+ 	ret = radix_tree_gang_lookup(&dev->reada_extents, (void **)&re,
+ 				     dev->reada_next >> PAGE_CACHE_SHIFT, 1);
+-	if (ret == 0 || re->logical >= dev->reada_curr_zone->end) {
++	if (ret == 0 || re->logical > dev->reada_curr_zone->end) {
+ 		ret = reada_pick_zone(dev);
+ 		if (!ret) {
+ 			spin_unlock(&fs_info->reada_lock);
+diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
+index 2bd0011450df..5c806f0d443d 100644
+--- a/fs/btrfs/relocation.c
++++ b/fs/btrfs/relocation.c
+@@ -1850,6 +1850,7 @@ again:
+ 			eb = read_tree_block(dest, old_bytenr, old_ptr_gen);
+ 			if (IS_ERR(eb)) {
+ 				ret = PTR_ERR(eb);
++				break;
+ 			} else if (!extent_buffer_uptodate(eb)) {
+ 				ret = -EIO;
+ 				free_extent_buffer(eb);
+diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
+index 849a30aa117d..58ae0a2ce65c 100644
+--- a/fs/btrfs/tree-log.c
++++ b/fs/btrfs/tree-log.c
+@@ -4621,7 +4621,22 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
+ 
+ 	mutex_lock(&BTRFS_I(inode)->log_mutex);
+ 
+-	btrfs_get_logged_extents(inode, &logged_list, start, end);
++	/*
++	 * Collect ordered extents only if we are logging data. This is to
++	 * ensure a subsequent request to log this inode in LOG_INODE_ALL mode
++	 * will process the ordered extents if they still exists at the time,
++	 * because when we collect them we test and set for the flag
++	 * BTRFS_ORDERED_LOGGED to prevent multiple log requests to process the
++	 * same ordered extents. The consequence for the LOG_INODE_ALL log mode
++	 * not processing the ordered extents is that we end up logging the
++	 * corresponding file extent items, based on the extent maps in the
++	 * inode's extent_map_tree's modified_list, without logging the
++	 * respective checksums (since the may still be only attached to the
++	 * ordered extents and have not been inserted in the csum tree by
++	 * btrfs_finish_ordered_io() yet).
++	 */
++	if (inode_only == LOG_INODE_ALL)
++		btrfs_get_logged_extents(inode, &logged_list, start, end);
+ 
+ 	/*
+ 	 * a brute force approach to making sure we get the most uptodate
+@@ -4909,6 +4924,42 @@ out_unlock:
+ }
+ 
+ /*
++ * Check if we must fallback to a transaction commit when logging an inode.
++ * This must be called after logging the inode and is used only in the context
++ * when fsyncing an inode requires the need to log some other inode - in which
++ * case we can't lock the i_mutex of each other inode we need to log as that
++ * can lead to deadlocks with concurrent fsync against other inodes (as we can
++ * log inodes up or down in the hierarchy) or rename operations for example. So
++ * we take the log_mutex of the inode after we have logged it and then check for
++ * its last_unlink_trans value - this is safe because any task setting
++ * last_unlink_trans must take the log_mutex and it must do this before it does
++ * the actual unlink operation, so if we do this check before a concurrent task
++ * sets last_unlink_trans it means we've logged a consistent version/state of
++ * all the inode items, otherwise we are not sure and must do a transaction
++ * commit (the concurrent task migth have only updated last_unlink_trans before
++ * we logged the inode or it might have also done the unlink).
++ */
++static bool btrfs_must_commit_transaction(struct btrfs_trans_handle *trans,
++					  struct inode *inode)
++{
++	struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
++	bool ret = false;
++
++	mutex_lock(&BTRFS_I(inode)->log_mutex);
++	if (BTRFS_I(inode)->last_unlink_trans > fs_info->last_trans_committed) {
++		/*
++		 * Make sure any commits to the log are forced to be full
++		 * commits.
++		 */
++		btrfs_set_log_full_commit(fs_info, trans);
++		ret = true;
++	}
++	mutex_unlock(&BTRFS_I(inode)->log_mutex);
++
++	return ret;
++}
++
++/*
+  * follow the dentry parent pointers up the chain and see if any
+  * of the directories in it require a full commit before they can
+  * be logged.  Returns zero if nothing special needs to be done or 1 if
+@@ -4921,7 +4972,6 @@ static noinline int check_parent_dirs_for_sync(struct btrfs_trans_handle *trans,
+ 					       u64 last_committed)
+ {
+ 	int ret = 0;
+-	struct btrfs_root *root;
+ 	struct dentry *old_parent = NULL;
+ 	struct inode *orig_inode = inode;
+ 
+@@ -4953,14 +5003,7 @@ static noinline int check_parent_dirs_for_sync(struct btrfs_trans_handle *trans,
+ 			BTRFS_I(inode)->logged_trans = trans->transid;
+ 		smp_mb();
+ 
+-		if (BTRFS_I(inode)->last_unlink_trans > last_committed) {
+-			root = BTRFS_I(inode)->root;
+-
+-			/*
+-			 * make sure any commits to the log are forced
+-			 * to be full commits
+-			 */
+-			btrfs_set_log_full_commit(root->fs_info, trans);
++		if (btrfs_must_commit_transaction(trans, inode)) {
+ 			ret = 1;
+ 			break;
+ 		}
+@@ -5119,6 +5162,9 @@ process_leaf:
+ 			btrfs_release_path(path);
+ 			ret = btrfs_log_inode(trans, root, di_inode,
+ 					      log_mode, 0, LLONG_MAX, ctx);
++			if (!ret &&
++			    btrfs_must_commit_transaction(trans, di_inode))
++				ret = 1;
+ 			iput(di_inode);
+ 			if (ret)
+ 				goto next_dir_inode;
+@@ -5233,6 +5279,9 @@ static int btrfs_log_all_parents(struct btrfs_trans_handle *trans,
+ 
+ 			ret = btrfs_log_inode(trans, root, dir_inode,
+ 					      LOG_INODE_ALL, 0, LLONG_MAX, ctx);
++			if (!ret &&
++			    btrfs_must_commit_transaction(trans, dir_inode))
++				ret = 1;
+ 			iput(dir_inode);
+ 			if (ret)
+ 				goto out;
+@@ -5584,6 +5633,9 @@ error:
+  * They revolve around files there were unlinked from the directory, and
+  * this function updates the parent directory so that a full commit is
+  * properly done if it is fsync'd later after the unlinks are done.
++ *
++ * Must be called before the unlink operations (updates to the subvolume tree,
++ * inodes, etc) are done.
+  */
+ void btrfs_record_unlink_dir(struct btrfs_trans_handle *trans,
+ 			     struct inode *dir, struct inode *inode,
+@@ -5599,8 +5651,11 @@ void btrfs_record_unlink_dir(struct btrfs_trans_handle *trans,
+ 	 * into the file.  When the file is logged we check it and
+ 	 * don't log the parents if the file is fully on disk.
+ 	 */
+-	if (S_ISREG(inode->i_mode))
++	if (S_ISREG(inode->i_mode)) {
++		mutex_lock(&BTRFS_I(inode)->log_mutex);
+ 		BTRFS_I(inode)->last_unlink_trans = trans->transid;
++		mutex_unlock(&BTRFS_I(inode)->log_mutex);
++	}
+ 
+ 	/*
+ 	 * if this directory was already logged any new
+@@ -5631,7 +5686,29 @@ void btrfs_record_unlink_dir(struct btrfs_trans_handle *trans,
+ 	return;
+ 
+ record:
++	mutex_lock(&BTRFS_I(dir)->log_mutex);
++	BTRFS_I(dir)->last_unlink_trans = trans->transid;
++	mutex_unlock(&BTRFS_I(dir)->log_mutex);
++}
++
++/*
++ * Make sure that if someone attempts to fsync the parent directory of a deleted
++ * snapshot, it ends up triggering a transaction commit. This is to guarantee
++ * that after replaying the log tree of the parent directory's root we will not
++ * see the snapshot anymore and at log replay time we will not see any log tree
++ * corresponding to the deleted snapshot's root, which could lead to replaying
++ * it after replaying the log tree of the parent directory (which would replay
++ * the snapshot delete operation).
++ *
++ * Must be called before the actual snapshot destroy operation (updates to the
++ * parent root and tree of tree roots trees, etc) are done.
++ */
++void btrfs_record_snapshot_destroy(struct btrfs_trans_handle *trans,
++				   struct inode *dir)
++{
++	mutex_lock(&BTRFS_I(dir)->log_mutex);
+ 	BTRFS_I(dir)->last_unlink_trans = trans->transid;
++	mutex_unlock(&BTRFS_I(dir)->log_mutex);
+ }
+ 
+ /*
+diff --git a/fs/btrfs/tree-log.h b/fs/btrfs/tree-log.h
+index 6916a781ea02..a9f1b75d080d 100644
+--- a/fs/btrfs/tree-log.h
++++ b/fs/btrfs/tree-log.h
+@@ -79,6 +79,8 @@ int btrfs_pin_log_trans(struct btrfs_root *root);
+ void btrfs_record_unlink_dir(struct btrfs_trans_handle *trans,
+ 			     struct inode *dir, struct inode *inode,
+ 			     int for_rename);
++void btrfs_record_snapshot_destroy(struct btrfs_trans_handle *trans,
++				   struct inode *dir);
+ int btrfs_log_new_name(struct btrfs_trans_handle *trans,
+ 			struct inode *inode, struct inode *old_dir,
+ 			struct dentry *parent);
+diff --git a/fs/isofs/rock.c b/fs/isofs/rock.c
+index 5384ceb35b1c..98b3eb7d8eaf 100644
+--- a/fs/isofs/rock.c
++++ b/fs/isofs/rock.c
+@@ -203,6 +203,8 @@ int get_rock_ridge_filename(struct iso_directory_record *de,
+ 	int retnamlen = 0;
+ 	int truncate = 0;
+ 	int ret = 0;
++	char *p;
++	int len;
+ 
+ 	if (!ISOFS_SB(inode->i_sb)->s_rock)
+ 		return 0;
+@@ -267,12 +269,17 @@ repeat:
+ 					rr->u.NM.flags);
+ 				break;
+ 			}
+-			if ((strlen(retname) + rr->len - 5) >= 254) {
++			len = rr->len - 5;
++			if (retnamlen + len >= 254) {
+ 				truncate = 1;
+ 				break;
+ 			}
+-			strncat(retname, rr->u.NM.name, rr->len - 5);
+-			retnamlen += rr->len - 5;
++			p = memchr(rr->u.NM.name, '\0', len);
++			if (unlikely(p))
++				len = p - rr->u.NM.name;
++			memcpy(retname + retnamlen, rr->u.NM.name, len);
++			retnamlen += len;
++			retname[retnamlen] = '\0';
+ 			break;
+ 		case SIG('R', 'E'):
+ 			kfree(rs.buffer);
+diff --git a/fs/namei.c b/fs/namei.c
+index 9c590e0f66e9..7824bfb89ada 100644
+--- a/fs/namei.c
++++ b/fs/namei.c
+@@ -2968,22 +2968,10 @@ no_open:
+ 		dentry = lookup_real(dir, dentry, nd->flags);
+ 		if (IS_ERR(dentry))
+ 			return PTR_ERR(dentry);
+-
+-		if (create_error) {
+-			int open_flag = op->open_flag;
+-
+-			error = create_error;
+-			if ((open_flag & O_EXCL)) {
+-				if (!dentry->d_inode)
+-					goto out;
+-			} else if (!dentry->d_inode) {
+-				goto out;
+-			} else if ((open_flag & O_TRUNC) &&
+-				   d_is_reg(dentry)) {
+-				goto out;
+-			}
+-			/* will fail later, go on to get the right error */
+-		}
++	}
++	if (create_error && !dentry->d_inode) {
++		error = create_error;
++		goto out;
+ 	}
+ looked_up:
+ 	path->dentry = dentry;
+@@ -4258,7 +4246,11 @@ int vfs_rename(struct inode *old_dir, struct dentry *old_dentry,
+ 	bool new_is_dir = false;
+ 	unsigned max_links = new_dir->i_sb->s_max_links;
+ 
+-	if (source == target)
++	/*
++	 * Check source == target.
++	 * On overlayfs need to look at underlying inodes.
++	 */
++	if (vfs_select_inode(old_dentry, 0) == vfs_select_inode(new_dentry, 0))
+ 		return 0;
+ 
+ 	error = may_delete(old_dir, old_dentry, is_dir);
+diff --git a/fs/ocfs2/acl.c b/fs/ocfs2/acl.c
+index 0cdf497c91ef..2162434728c0 100644
+--- a/fs/ocfs2/acl.c
++++ b/fs/ocfs2/acl.c
+@@ -322,3 +322,90 @@ struct posix_acl *ocfs2_iop_get_acl(struct inode *inode, int type)
+ 	brelse(di_bh);
+ 	return acl;
+ }
++
++int ocfs2_acl_chmod(struct inode *inode, struct buffer_head *bh)
++{
++	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
++	struct posix_acl *acl;
++	int ret;
++
++	if (S_ISLNK(inode->i_mode))
++		return -EOPNOTSUPP;
++
++	if (!(osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL))
++		return 0;
++
++	acl = ocfs2_get_acl_nolock(inode, ACL_TYPE_ACCESS, bh);
++	if (IS_ERR(acl) || !acl)
++		return PTR_ERR(acl);
++	ret = __posix_acl_chmod(&acl, GFP_KERNEL, inode->i_mode);
++	if (ret)
++		return ret;
++	ret = ocfs2_set_acl(NULL, inode, NULL, ACL_TYPE_ACCESS,
++			    acl, NULL, NULL);
++	posix_acl_release(acl);
++	return ret;
++}
++
++/*
++ * Initialize the ACLs of a new inode. If parent directory has default ACL,
++ * then clone to new inode. Called from ocfs2_mknod.
++ */
++int ocfs2_init_acl(handle_t *handle,
++		   struct inode *inode,
++		   struct inode *dir,
++		   struct buffer_head *di_bh,
++		   struct buffer_head *dir_bh,
++		   struct ocfs2_alloc_context *meta_ac,
++		   struct ocfs2_alloc_context *data_ac)
++{
++	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
++	struct posix_acl *acl = NULL;
++	int ret = 0, ret2;
++	umode_t mode;
++
++	if (!S_ISLNK(inode->i_mode)) {
++		if (osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL) {
++			acl = ocfs2_get_acl_nolock(dir, ACL_TYPE_DEFAULT,
++						   dir_bh);
++			if (IS_ERR(acl))
++				return PTR_ERR(acl);
++		}
++		if (!acl) {
++			mode = inode->i_mode & ~current_umask();
++			ret = ocfs2_acl_set_mode(inode, di_bh, handle, mode);
++			if (ret) {
++				mlog_errno(ret);
++				goto cleanup;
++			}
++		}
++	}
++	if ((osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL) && acl) {
++		if (S_ISDIR(inode->i_mode)) {
++			ret = ocfs2_set_acl(handle, inode, di_bh,
++					    ACL_TYPE_DEFAULT, acl,
++					    meta_ac, data_ac);
++			if (ret)
++				goto cleanup;
++		}
++		mode = inode->i_mode;
++		ret = __posix_acl_create(&acl, GFP_NOFS, &mode);
++		if (ret < 0)
++			return ret;
++
++		ret2 = ocfs2_acl_set_mode(inode, di_bh, handle, mode);
++		if (ret2) {
++			mlog_errno(ret2);
++			ret = ret2;
++			goto cleanup;
++		}
++		if (ret > 0) {
++			ret = ocfs2_set_acl(handle, inode,
++					    di_bh, ACL_TYPE_ACCESS,
++					    acl, meta_ac, data_ac);
++		}
++	}
++cleanup:
++	posix_acl_release(acl);
++	return ret;
++}
+diff --git a/fs/ocfs2/acl.h b/fs/ocfs2/acl.h
+index 3fce68d08625..2783a75b3999 100644
+--- a/fs/ocfs2/acl.h
++++ b/fs/ocfs2/acl.h
+@@ -35,5 +35,10 @@ int ocfs2_set_acl(handle_t *handle,
+ 			 struct posix_acl *acl,
+ 			 struct ocfs2_alloc_context *meta_ac,
+ 			 struct ocfs2_alloc_context *data_ac);
++extern int ocfs2_acl_chmod(struct inode *, struct buffer_head *);
++extern int ocfs2_init_acl(handle_t *, struct inode *, struct inode *,
++			  struct buffer_head *, struct buffer_head *,
++			  struct ocfs2_alloc_context *,
++			  struct ocfs2_alloc_context *);
+ 
+ #endif /* OCFS2_ACL_H */
+diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
+index 7cb38fdca229..3d60fda1cb09 100644
+--- a/fs/ocfs2/file.c
++++ b/fs/ocfs2/file.c
+@@ -1268,20 +1268,20 @@ bail_unlock_rw:
+ 	if (size_change)
+ 		ocfs2_rw_unlock(inode, 1);
+ bail:
+-	brelse(bh);
+ 
+ 	/* Release quota pointers in case we acquired them */
+ 	for (qtype = 0; qtype < OCFS2_MAXQUOTAS; qtype++)
+ 		dqput(transfer_to[qtype]);
+ 
+ 	if (!status && attr->ia_valid & ATTR_MODE) {
+-		status = posix_acl_chmod(inode, inode->i_mode);
++		status = ocfs2_acl_chmod(inode, bh);
+ 		if (status < 0)
+ 			mlog_errno(status);
+ 	}
+ 	if (inode_locked)
+ 		ocfs2_inode_unlock(inode, 1);
+ 
++	brelse(bh);
+ 	return status;
+ }
+ 
+diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
+index 6b3e87189a64..a8f1225e6d9b 100644
+--- a/fs/ocfs2/namei.c
++++ b/fs/ocfs2/namei.c
+@@ -259,7 +259,6 @@ static int ocfs2_mknod(struct inode *dir,
+ 	struct ocfs2_dir_lookup_result lookup = { NULL, };
+ 	sigset_t oldset;
+ 	int did_block_signals = 0;
+-	struct posix_acl *default_acl = NULL, *acl = NULL;
+ 	struct ocfs2_dentry_lock *dl = NULL;
+ 
+ 	trace_ocfs2_mknod(dir, dentry, dentry->d_name.len, dentry->d_name.name,
+@@ -367,12 +366,6 @@ static int ocfs2_mknod(struct inode *dir,
+ 		goto leave;
+ 	}
+ 
+-	status = posix_acl_create(dir, &inode->i_mode, &default_acl, &acl);
+-	if (status) {
+-		mlog_errno(status);
+-		goto leave;
+-	}
+-
+ 	handle = ocfs2_start_trans(osb, ocfs2_mknod_credits(osb->sb,
+ 							    S_ISDIR(mode),
+ 							    xattr_credits));
+@@ -421,16 +414,8 @@ static int ocfs2_mknod(struct inode *dir,
+ 		inc_nlink(dir);
+ 	}
+ 
+-	if (default_acl) {
+-		status = ocfs2_set_acl(handle, inode, new_fe_bh,
+-				       ACL_TYPE_DEFAULT, default_acl,
+-				       meta_ac, data_ac);
+-	}
+-	if (!status && acl) {
+-		status = ocfs2_set_acl(handle, inode, new_fe_bh,
+-				       ACL_TYPE_ACCESS, acl,
+-				       meta_ac, data_ac);
+-	}
++	status = ocfs2_init_acl(handle, inode, dir, new_fe_bh, parent_fe_bh,
++			 meta_ac, data_ac);
+ 
+ 	if (status < 0) {
+ 		mlog_errno(status);
+@@ -472,10 +457,6 @@ static int ocfs2_mknod(struct inode *dir,
+ 	d_instantiate(dentry, inode);
+ 	status = 0;
+ leave:
+-	if (default_acl)
+-		posix_acl_release(default_acl);
+-	if (acl)
+-		posix_acl_release(acl);
+ 	if (status < 0 && did_quota_inode)
+ 		dquot_free_inode(inode);
+ 	if (handle)
+diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
+index 3eff031aaf26..9919964c5b3b 100644
+--- a/fs/ocfs2/refcounttree.c
++++ b/fs/ocfs2/refcounttree.c
+@@ -4248,20 +4248,12 @@ static int ocfs2_reflink(struct dentry *old_dentry, struct inode *dir,
+ 	struct inode *inode = d_inode(old_dentry);
+ 	struct buffer_head *old_bh = NULL;
+ 	struct inode *new_orphan_inode = NULL;
+-	struct posix_acl *default_acl, *acl;
+-	umode_t mode;
+ 
+ 	if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb)))
+ 		return -EOPNOTSUPP;
+ 
+-	mode = inode->i_mode;
+-	error = posix_acl_create(dir, &mode, &default_acl, &acl);
+-	if (error) {
+-		mlog_errno(error);
+-		return error;
+-	}
+ 
+-	error = ocfs2_create_inode_in_orphan(dir, mode,
++	error = ocfs2_create_inode_in_orphan(dir, inode->i_mode,
+ 					     &new_orphan_inode);
+ 	if (error) {
+ 		mlog_errno(error);
+@@ -4300,16 +4292,11 @@ static int ocfs2_reflink(struct dentry *old_dentry, struct inode *dir,
+ 	/* If the security isn't preserved, we need to re-initialize them. */
+ 	if (!preserve) {
+ 		error = ocfs2_init_security_and_acl(dir, new_orphan_inode,
+-						    &new_dentry->d_name,
+-						    default_acl, acl);
++						    &new_dentry->d_name);
+ 		if (error)
+ 			mlog_errno(error);
+ 	}
+ out:
+-	if (default_acl)
+-		posix_acl_release(default_acl);
+-	if (acl)
+-		posix_acl_release(acl);
+ 	if (!error) {
+ 		error = ocfs2_mv_orphaned_inode_to_new(dir, new_orphan_inode,
+ 						       new_dentry);
+diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
+index 7d3d979f57d9..f19b7381a998 100644
+--- a/fs/ocfs2/xattr.c
++++ b/fs/ocfs2/xattr.c
+@@ -7216,12 +7216,10 @@ out:
+  */
+ int ocfs2_init_security_and_acl(struct inode *dir,
+ 				struct inode *inode,
+-				const struct qstr *qstr,
+-				struct posix_acl *default_acl,
+-				struct posix_acl *acl)
++				const struct qstr *qstr)
+ {
+-	struct buffer_head *dir_bh = NULL;
+ 	int ret = 0;
++	struct buffer_head *dir_bh = NULL;
+ 
+ 	ret = ocfs2_init_security_get(inode, dir, qstr, NULL);
+ 	if (ret) {
+@@ -7234,11 +7232,9 @@ int ocfs2_init_security_and_acl(struct inode *dir,
+ 		mlog_errno(ret);
+ 		goto leave;
+ 	}
+-
+-	if (!ret && default_acl)
+-		ret = ocfs2_iop_set_acl(inode, default_acl, ACL_TYPE_DEFAULT);
+-	if (!ret && acl)
+-		ret = ocfs2_iop_set_acl(inode, acl, ACL_TYPE_ACCESS);
++	ret = ocfs2_init_acl(NULL, inode, dir, NULL, dir_bh, NULL, NULL);
++	if (ret)
++		mlog_errno(ret);
+ 
+ 	ocfs2_inode_unlock(dir, 0);
+ 	brelse(dir_bh);
+diff --git a/fs/ocfs2/xattr.h b/fs/ocfs2/xattr.h
+index f10d5b93c366..1633cc15ea1f 100644
+--- a/fs/ocfs2/xattr.h
++++ b/fs/ocfs2/xattr.h
+@@ -94,7 +94,5 @@ int ocfs2_reflink_xattrs(struct inode *old_inode,
+ 			 bool preserve_security);
+ int ocfs2_init_security_and_acl(struct inode *dir,
+ 				struct inode *inode,
+-				const struct qstr *qstr,
+-				struct posix_acl *default_acl,
+-				struct posix_acl *acl);
++				const struct qstr *qstr);
+ #endif /* OCFS2_XATTR_H */
+diff --git a/fs/open.c b/fs/open.c
+index 17cb6b1dab75..081d3d6df74b 100644
+--- a/fs/open.c
++++ b/fs/open.c
+@@ -840,16 +840,12 @@ EXPORT_SYMBOL(file_path);
+ int vfs_open(const struct path *path, struct file *file,
+ 	     const struct cred *cred)
+ {
+-	struct dentry *dentry = path->dentry;
+-	struct inode *inode = dentry->d_inode;
++	struct inode *inode = vfs_select_inode(path->dentry, file->f_flags);
+ 
+-	file->f_path = *path;
+-	if (dentry->d_flags & DCACHE_OP_SELECT_INODE) {
+-		inode = dentry->d_op->d_select_inode(dentry, file->f_flags);
+-		if (IS_ERR(inode))
+-			return PTR_ERR(inode);
+-	}
++	if (IS_ERR(inode))
++		return PTR_ERR(inode);
+ 
++	file->f_path = *path;
+ 	return do_dentry_open(file, inode, NULL, cred);
+ }
+ 
+diff --git a/include/linux/bpf.h b/include/linux/bpf.h
+index 83d1926c61e4..67bc2da5d233 100644
+--- a/include/linux/bpf.h
++++ b/include/linux/bpf.h
+@@ -165,12 +165,13 @@ void bpf_register_prog_type(struct bpf_prog_type_list *tl);
+ void bpf_register_map_type(struct bpf_map_type_list *tl);
+ 
+ struct bpf_prog *bpf_prog_get(u32 ufd);
++struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog);
+ void bpf_prog_put(struct bpf_prog *prog);
+ void bpf_prog_put_rcu(struct bpf_prog *prog);
+ 
+ struct bpf_map *bpf_map_get_with_uref(u32 ufd);
+ struct bpf_map *__bpf_map_get(struct fd f);
+-void bpf_map_inc(struct bpf_map *map, bool uref);
++struct bpf_map *bpf_map_inc(struct bpf_map *map, bool uref);
+ void bpf_map_put_with_uref(struct bpf_map *map);
+ void bpf_map_put(struct bpf_map *map);
+ 
+diff --git a/include/linux/dcache.h b/include/linux/dcache.h
+index 03dda7ba73ac..96c1a2da92d7 100644
+--- a/include/linux/dcache.h
++++ b/include/linux/dcache.h
+@@ -592,4 +592,16 @@ static inline struct dentry *d_real(struct dentry *dentry)
+ 		return dentry;
+ }
+ 
++static inline struct inode *vfs_select_inode(struct dentry *dentry,
++					     unsigned open_flags)
++{
++	struct inode *inode = d_inode(dentry);
++
++	if (inode && unlikely(dentry->d_flags & DCACHE_OP_SELECT_INODE))
++		inode = dentry->d_op->d_select_inode(dentry, open_flags);
++
++	return inode;
++}
++
++
+ #endif	/* __LINUX_DCACHE_H */
+diff --git a/include/linux/mfd/samsung/s2mps11.h b/include/linux/mfd/samsung/s2mps11.h
+index b288965e8101..2c14eeca46f0 100644
+--- a/include/linux/mfd/samsung/s2mps11.h
++++ b/include/linux/mfd/samsung/s2mps11.h
+@@ -173,10 +173,12 @@ enum s2mps11_regulators {
+ 
+ #define S2MPS11_LDO_VSEL_MASK	0x3F
+ #define S2MPS11_BUCK_VSEL_MASK	0xFF
++#define S2MPS11_BUCK9_VSEL_MASK	0x1F
+ #define S2MPS11_ENABLE_MASK	(0x03 << S2MPS11_ENABLE_SHIFT)
+ #define S2MPS11_ENABLE_SHIFT	0x06
+ #define S2MPS11_LDO_N_VOLTAGES	(S2MPS11_LDO_VSEL_MASK + 1)
+ #define S2MPS11_BUCK_N_VOLTAGES (S2MPS11_BUCK_VSEL_MASK + 1)
++#define S2MPS11_BUCK9_N_VOLTAGES (S2MPS11_BUCK9_VSEL_MASK + 1)
+ #define S2MPS11_RAMP_DELAY	25000		/* uV/us */
+ 
+ #define S2MPS11_CTRL1_PWRHOLD_MASK	BIT(4)
+diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
+index 1e3006dcf35d..46dd88e7071b 100644
+--- a/include/linux/mlx5/driver.h
++++ b/include/linux/mlx5/driver.h
+@@ -813,9 +813,9 @@ int mlx5_set_port_admin_status(struct mlx5_core_dev *dev,
+ int mlx5_query_port_admin_status(struct mlx5_core_dev *dev,
+ 				 enum mlx5_port_status *status);
+ 
+-int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu, u8 port);
+-void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu, u8 port);
+-void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu,
++int mlx5_set_port_mtu(struct mlx5_core_dev *dev, u16 mtu, u8 port);
++void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, u16 *max_mtu, u8 port);
++void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, u16 *oper_mtu,
+ 			      u8 port);
+ 
+ int mlx5_query_port_vl_hw_cap(struct mlx5_core_dev *dev,
+diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h
+index 123771003e68..a3f3c71124d3 100644
+--- a/include/linux/mlx5/vport.h
++++ b/include/linux/mlx5/vport.h
+@@ -45,6 +45,8 @@ int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
+ 				     u16 vport, u8 *addr);
+ int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *dev,
+ 				      u16 vport, u8 *addr);
++int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu);
++int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 mtu);
+ int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev,
+ 					   u64 *system_image_guid);
+ int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid);
+diff --git a/include/linux/mm.h b/include/linux/mm.h
+index a6c240e885c0..e4e59f9d82f1 100644
+--- a/include/linux/mm.h
++++ b/include/linux/mm.h
+@@ -456,11 +456,20 @@ static inline int page_mapcount(struct page *page)
+ 
+ #ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ int total_mapcount(struct page *page);
++int page_trans_huge_mapcount(struct page *page, int *total_mapcount);
+ #else
+ static inline int total_mapcount(struct page *page)
+ {
+ 	return page_mapcount(page);
+ }
++static inline int page_trans_huge_mapcount(struct page *page,
++					   int *total_mapcount)
++{
++	int mapcount = page_mapcount(page);
++	if (total_mapcount)
++		*total_mapcount = mapcount;
++	return mapcount;
++}
+ #endif
+ 
+ static inline int page_count(struct page *page)
+diff --git a/include/linux/net.h b/include/linux/net.h
+index 0b4ac7da583a..25ef630f1bd6 100644
+--- a/include/linux/net.h
++++ b/include/linux/net.h
+@@ -245,7 +245,15 @@ do {								\
+ 	net_ratelimited_function(pr_warn, fmt, ##__VA_ARGS__)
+ #define net_info_ratelimited(fmt, ...)				\
+ 	net_ratelimited_function(pr_info, fmt, ##__VA_ARGS__)
+-#if defined(DEBUG)
++#if defined(CONFIG_DYNAMIC_DEBUG)
++#define net_dbg_ratelimited(fmt, ...)					\
++do {									\
++	DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt);			\
++	if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT) &&	\
++	    net_ratelimit())						\
++		__dynamic_pr_debug(&descriptor, fmt, ##__VA_ARGS__);	\
++} while (0)
++#elif defined(DEBUG)
+ #define net_dbg_ratelimited(fmt, ...)				\
+ 	net_ratelimited_function(pr_debug, fmt, ##__VA_ARGS__)
+ #else
+diff --git a/include/linux/rculist_nulls.h b/include/linux/rculist_nulls.h
+index 1c33dd7da4a7..4ae95f7e8597 100644
+--- a/include/linux/rculist_nulls.h
++++ b/include/linux/rculist_nulls.h
+@@ -98,6 +98,45 @@ static inline void hlist_nulls_add_head_rcu(struct hlist_nulls_node *n,
+ 	if (!is_a_nulls(first))
+ 		first->pprev = &n->next;
+ }
++
++/**
++ * hlist_nulls_add_tail_rcu
++ * @n: the element to add to the hash list.
++ * @h: the list to add to.
++ *
++ * Description:
++ * Adds the specified element to the end of the specified hlist_nulls,
++ * while permitting racing traversals.  NOTE: tail insertion requires
++ * list traversal.
++ *
++ * The caller must take whatever precautions are necessary
++ * (such as holding appropriate locks) to avoid racing
++ * with another list-mutation primitive, such as hlist_nulls_add_head_rcu()
++ * or hlist_nulls_del_rcu(), running on this same list.
++ * However, it is perfectly legal to run concurrently with
++ * the _rcu list-traversal primitives, such as
++ * hlist_nulls_for_each_entry_rcu(), used to prevent memory-consistency
++ * problems on Alpha CPUs.  Regardless of the type of CPU, the
++ * list-traversal primitive must be guarded by rcu_read_lock().
++ */
++static inline void hlist_nulls_add_tail_rcu(struct hlist_nulls_node *n,
++					struct hlist_nulls_head *h)
++{
++	struct hlist_nulls_node *i, *last = NULL;
++
++	for (i = hlist_nulls_first_rcu(h); !is_a_nulls(i);
++	     i = hlist_nulls_next_rcu(i))
++		last = i;
++
++	if (last) {
++		n->next = last->next;
++		n->pprev = &last->next;
++		rcu_assign_pointer(hlist_nulls_next_rcu(last), n);
++	} else {
++		hlist_nulls_add_head_rcu(n, h);
++	}
++}
++
+ /**
+  * hlist_nulls_for_each_entry_rcu - iterate over rcu list of given type
+  * @tpos:	the type * to use as a loop cursor.
+diff --git a/include/linux/swap.h b/include/linux/swap.h
+index 5fa4aa4ddd05..b974a2106dd7 100644
+--- a/include/linux/swap.h
++++ b/include/linux/swap.h
+@@ -418,7 +418,7 @@ extern sector_t swapdev_block(int, pgoff_t);
+ extern int page_swapcount(struct page *);
+ extern int swp_swapcount(swp_entry_t entry);
+ extern struct swap_info_struct *page_swap_info(struct page *);
+-extern int reuse_swap_page(struct page *);
++extern bool reuse_swap_page(struct page *, int *);
+ extern int try_to_free_swap(struct page *);
+ struct backing_dev_info;
+ 
+@@ -513,8 +513,8 @@ static inline int swp_swapcount(swp_entry_t entry)
+ 	return 0;
+ }
+ 
+-#define reuse_swap_page(page) \
+-	(!PageTransCompound(page) && page_mapcount(page) == 1)
++#define reuse_swap_page(page, total_mapcount) \
++	(page_trans_huge_mapcount(page, total_mapcount) == 1)
+ 
+ static inline int try_to_free_swap(struct page *page)
+ {
+diff --git a/include/net/codel.h b/include/net/codel.h
+index 267e70210061..d168aca115cc 100644
+--- a/include/net/codel.h
++++ b/include/net/codel.h
+@@ -162,12 +162,14 @@ struct codel_vars {
+  * struct codel_stats - contains codel shared variables and stats
+  * @maxpacket:	largest packet we've seen so far
+  * @drop_count:	temp count of dropped packets in dequeue()
++ * @drop_len:	bytes of dropped packets in dequeue()
+  * ecn_mark:	number of packets we ECN marked instead of dropping
+  * ce_mark:	number of packets CE marked because sojourn time was above ce_threshold
+  */
+ struct codel_stats {
+ 	u32		maxpacket;
+ 	u32		drop_count;
++	u32		drop_len;
+ 	u32		ecn_mark;
+ 	u32		ce_mark;
+ };
+@@ -308,6 +310,7 @@ static struct sk_buff *codel_dequeue(struct Qdisc *sch,
+ 								  vars->rec_inv_sqrt);
+ 					goto end;
+ 				}
++				stats->drop_len += qdisc_pkt_len(skb);
+ 				qdisc_drop(skb, sch);
+ 				stats->drop_count++;
+ 				skb = dequeue_func(vars, sch);
+@@ -330,6 +333,7 @@ static struct sk_buff *codel_dequeue(struct Qdisc *sch,
+ 		if (params->ecn && INET_ECN_set_ce(skb)) {
+ 			stats->ecn_mark++;
+ 		} else {
++			stats->drop_len += qdisc_pkt_len(skb);
+ 			qdisc_drop(skb, sch);
+ 			stats->drop_count++;
+ 
+diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
+index 636a362a0e03..e5bba897d206 100644
+--- a/include/net/sch_generic.h
++++ b/include/net/sch_generic.h
+@@ -396,7 +396,8 @@ struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
+ 			      struct Qdisc *qdisc);
+ void qdisc_reset(struct Qdisc *qdisc);
+ void qdisc_destroy(struct Qdisc *qdisc);
+-void qdisc_tree_decrease_qlen(struct Qdisc *qdisc, unsigned int n);
++void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, unsigned int n,
++			       unsigned int len);
+ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
+ 			  const struct Qdisc_ops *ops);
+ struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
+@@ -707,6 +708,23 @@ static inline void qdisc_reset_queue(struct Qdisc *sch)
+ 	sch->qstats.backlog = 0;
+ }
+ 
++static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new,
++					  struct Qdisc **pold)
++{
++	struct Qdisc *old;
++
++	sch_tree_lock(sch);
++	old = *pold;
++	*pold = new;
++	if (old != NULL) {
++		qdisc_tree_reduce_backlog(old, old->q.qlen, old->qstats.backlog);
++		qdisc_reset(old);
++	}
++	sch_tree_unlock(sch);
++
++	return old;
++}
++
+ static inline unsigned int __qdisc_queue_drop(struct Qdisc *sch,
+ 					      struct sk_buff_head *list)
+ {
+diff --git a/include/net/sock.h b/include/net/sock.h
+index f5ea148853e2..3c688ca3456d 100644
+--- a/include/net/sock.h
++++ b/include/net/sock.h
+@@ -630,7 +630,11 @@ static inline void sk_add_node_rcu(struct sock *sk, struct hlist_head *list)
+ 
+ static inline void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
+ {
+-	hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list);
++	if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport &&
++	    sk->sk_family == AF_INET6)
++		hlist_nulls_add_tail_rcu(&sk->sk_nulls_node, list);
++	else
++		hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list);
+ }
+ 
+ static inline void sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
+diff --git a/include/uapi/linux/if.h b/include/uapi/linux/if.h
+index 9cf2394f0bcf..752f5dc040a5 100644
+--- a/include/uapi/linux/if.h
++++ b/include/uapi/linux/if.h
+@@ -19,14 +19,20 @@
+ #ifndef _LINUX_IF_H
+ #define _LINUX_IF_H
+ 
++#include <linux/libc-compat.h>          /* for compatibility with glibc */
+ #include <linux/types.h>		/* for "__kernel_caddr_t" et al	*/
+ #include <linux/socket.h>		/* for "struct sockaddr" et al	*/
+ #include <linux/compiler.h>		/* for "__user" et al           */
+ 
++#if __UAPI_DEF_IF_IFNAMSIZ
+ #define	IFNAMSIZ	16
++#endif /* __UAPI_DEF_IF_IFNAMSIZ */
+ #define	IFALIASZ	256
+ #include <linux/hdlc/ioctl.h>
+ 
++/* For glibc compatibility. An empty enum does not compile. */
++#if __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO != 0 && \
++    __UAPI_DEF_IF_NET_DEVICE_FLAGS != 0
+ /**
+  * enum net_device_flags - &struct net_device flags
+  *
+@@ -68,6 +74,8 @@
+  * @IFF_ECHO: echo sent packets. Volatile.
+  */
+ enum net_device_flags {
++/* for compatibility with glibc net/if.h */
++#if __UAPI_DEF_IF_NET_DEVICE_FLAGS
+ 	IFF_UP				= 1<<0,  /* sysfs */
+ 	IFF_BROADCAST			= 1<<1,  /* volatile */
+ 	IFF_DEBUG			= 1<<2,  /* sysfs */
+@@ -84,11 +92,17 @@ enum net_device_flags {
+ 	IFF_PORTSEL			= 1<<13, /* sysfs */
+ 	IFF_AUTOMEDIA			= 1<<14, /* sysfs */
+ 	IFF_DYNAMIC			= 1<<15, /* sysfs */
++#endif /* __UAPI_DEF_IF_NET_DEVICE_FLAGS */
++#if __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO
+ 	IFF_LOWER_UP			= 1<<16, /* volatile */
+ 	IFF_DORMANT			= 1<<17, /* volatile */
+ 	IFF_ECHO			= 1<<18, /* volatile */
++#endif /* __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO */
+ };
++#endif /* __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO != 0 && __UAPI_DEF_IF_NET_DEVICE_FLAGS != 0 */
+ 
++/* for compatibility with glibc net/if.h */
++#if __UAPI_DEF_IF_NET_DEVICE_FLAGS
+ #define IFF_UP				IFF_UP
+ #define IFF_BROADCAST			IFF_BROADCAST
+ #define IFF_DEBUG			IFF_DEBUG
+@@ -105,9 +119,13 @@ enum net_device_flags {
+ #define IFF_PORTSEL			IFF_PORTSEL
+ #define IFF_AUTOMEDIA			IFF_AUTOMEDIA
+ #define IFF_DYNAMIC			IFF_DYNAMIC
++#endif /* __UAPI_DEF_IF_NET_DEVICE_FLAGS */
++
++#if __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO
+ #define IFF_LOWER_UP			IFF_LOWER_UP
+ #define IFF_DORMANT			IFF_DORMANT
+ #define IFF_ECHO			IFF_ECHO
++#endif /* __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO */
+ 
+ #define IFF_VOLATILE	(IFF_LOOPBACK|IFF_POINTOPOINT|IFF_BROADCAST|IFF_ECHO|\
+ 		IFF_MASTER|IFF_SLAVE|IFF_RUNNING|IFF_LOWER_UP|IFF_DORMANT)
+@@ -166,6 +184,8 @@ enum {
+  *	being very small might be worth keeping for clean configuration.
+  */
+ 
++/* for compatibility with glibc net/if.h */
++#if __UAPI_DEF_IF_IFMAP
+ struct ifmap {
+ 	unsigned long mem_start;
+ 	unsigned long mem_end;
+@@ -175,6 +195,7 @@ struct ifmap {
+ 	unsigned char port;
+ 	/* 3 bytes spare */
+ };
++#endif /* __UAPI_DEF_IF_IFMAP */
+ 
+ struct if_settings {
+ 	unsigned int type;	/* Type of physical device or protocol */
+@@ -200,6 +221,8 @@ struct if_settings {
+  * remainder may be interface specific.
+  */
+ 
++/* for compatibility with glibc net/if.h */
++#if __UAPI_DEF_IF_IFREQ
+ struct ifreq {
+ #define IFHWADDRLEN	6
+ 	union
+@@ -223,6 +246,7 @@ struct ifreq {
+ 		struct	if_settings ifru_settings;
+ 	} ifr_ifru;
+ };
++#endif /* __UAPI_DEF_IF_IFREQ */
+ 
+ #define ifr_name	ifr_ifrn.ifrn_name	/* interface name 	*/
+ #define ifr_hwaddr	ifr_ifru.ifru_hwaddr	/* MAC address 		*/
+@@ -249,6 +273,8 @@ struct ifreq {
+  * must know all networks accessible).
+  */
+ 
++/* for compatibility with glibc net/if.h */
++#if __UAPI_DEF_IF_IFCONF
+ struct ifconf  {
+ 	int	ifc_len;			/* size of buffer	*/
+ 	union {
+@@ -256,6 +282,8 @@ struct ifconf  {
+ 		struct ifreq __user *ifcu_req;
+ 	} ifc_ifcu;
+ };
++#endif /* __UAPI_DEF_IF_IFCONF */
++
+ #define	ifc_buf	ifc_ifcu.ifcu_buf		/* buffer address	*/
+ #define	ifc_req	ifc_ifcu.ifcu_req		/* array of structures	*/
+ 
+diff --git a/include/uapi/linux/libc-compat.h b/include/uapi/linux/libc-compat.h
+index 7d024ceb075d..d5e38c73377c 100644
+--- a/include/uapi/linux/libc-compat.h
++++ b/include/uapi/linux/libc-compat.h
+@@ -51,6 +51,40 @@
+ /* We have included glibc headers... */
+ #if defined(__GLIBC__)
+ 
++/* Coordinate with glibc net/if.h header. */
++#if defined(_NET_IF_H)
++
++/* GLIBC headers included first so don't define anything
++ * that would already be defined. */
++
++#define __UAPI_DEF_IF_IFCONF 0
++#define __UAPI_DEF_IF_IFMAP 0
++#define __UAPI_DEF_IF_IFNAMSIZ 0
++#define __UAPI_DEF_IF_IFREQ 0
++/* Everything up to IFF_DYNAMIC, matches net/if.h until glibc 2.23 */
++#define __UAPI_DEF_IF_NET_DEVICE_FLAGS 0
++/* For the future if glibc adds IFF_LOWER_UP, IFF_DORMANT and IFF_ECHO */
++#ifndef __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO
++#define __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO 1
++#endif /* __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO */
++
++#else /* _NET_IF_H */
++
++/* Linux headers included first, and we must define everything
++ * we need. The expectation is that glibc will check the
++ * __UAPI_DEF_* defines and adjust appropriately. */
++
++#define __UAPI_DEF_IF_IFCONF 1
++#define __UAPI_DEF_IF_IFMAP 1
++#define __UAPI_DEF_IF_IFNAMSIZ 1
++#define __UAPI_DEF_IF_IFREQ 1
++/* Everything up to IFF_DYNAMIC, matches net/if.h until glibc 2.23 */
++#define __UAPI_DEF_IF_NET_DEVICE_FLAGS 1
++/* For the future if glibc adds IFF_LOWER_UP, IFF_DORMANT and IFF_ECHO */
++#define __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO 1
++
++#endif /* _NET_IF_H */
++
+ /* Coordinate with glibc netinet/in.h header. */
+ #if defined(_NETINET_IN_H)
+ 
+@@ -117,6 +151,16 @@
+  * that we need. */
+ #else /* !defined(__GLIBC__) */
+ 
++/* Definitions for if.h */
++#define __UAPI_DEF_IF_IFCONF 1
++#define __UAPI_DEF_IF_IFMAP 1
++#define __UAPI_DEF_IF_IFNAMSIZ 1
++#define __UAPI_DEF_IF_IFREQ 1
++/* Everything up to IFF_DYNAMIC, matches net/if.h until glibc 2.23 */
++#define __UAPI_DEF_IF_NET_DEVICE_FLAGS 1
++/* For the future if glibc adds IFF_LOWER_UP, IFF_DORMANT and IFF_ECHO */
++#define __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO 1
++
+ /* Definitions for in.h */
+ #define __UAPI_DEF_IN_ADDR		1
+ #define __UAPI_DEF_IN_IPPROTO		1
+diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c
+index f2ece3c174a5..8f94ca1860cf 100644
+--- a/kernel/bpf/inode.c
++++ b/kernel/bpf/inode.c
+@@ -31,10 +31,10 @@ static void *bpf_any_get(void *raw, enum bpf_type type)
+ {
+ 	switch (type) {
+ 	case BPF_TYPE_PROG:
+-		atomic_inc(&((struct bpf_prog *)raw)->aux->refcnt);
++		raw = bpf_prog_inc(raw);
+ 		break;
+ 	case BPF_TYPE_MAP:
+-		bpf_map_inc(raw, true);
++		raw = bpf_map_inc(raw, true);
+ 		break;
+ 	default:
+ 		WARN_ON_ONCE(1);
+@@ -297,7 +297,8 @@ static void *bpf_obj_do_get(const struct filename *pathname,
+ 		goto out;
+ 
+ 	raw = bpf_any_get(inode->i_private, *type);
+-	touch_atime(&path);
++	if (!IS_ERR(raw))
++		touch_atime(&path);
+ 
+ 	path_put(&path);
+ 	return raw;
+diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
+index 637397059f76..aa5f39772ac4 100644
+--- a/kernel/bpf/syscall.c
++++ b/kernel/bpf/syscall.c
+@@ -201,11 +201,18 @@ struct bpf_map *__bpf_map_get(struct fd f)
+ 	return f.file->private_data;
+ }
+ 
+-void bpf_map_inc(struct bpf_map *map, bool uref)
++/* prog's and map's refcnt limit */
++#define BPF_MAX_REFCNT 32768
++
++struct bpf_map *bpf_map_inc(struct bpf_map *map, bool uref)
+ {
+-	atomic_inc(&map->refcnt);
++	if (atomic_inc_return(&map->refcnt) > BPF_MAX_REFCNT) {
++		atomic_dec(&map->refcnt);
++		return ERR_PTR(-EBUSY);
++	}
+ 	if (uref)
+ 		atomic_inc(&map->usercnt);
++	return map;
+ }
+ 
+ struct bpf_map *bpf_map_get_with_uref(u32 ufd)
+@@ -217,7 +224,7 @@ struct bpf_map *bpf_map_get_with_uref(u32 ufd)
+ 	if (IS_ERR(map))
+ 		return map;
+ 
+-	bpf_map_inc(map, true);
++	map = bpf_map_inc(map, true);
+ 	fdput(f);
+ 
+ 	return map;
+@@ -600,6 +607,15 @@ static struct bpf_prog *__bpf_prog_get(struct fd f)
+ 	return f.file->private_data;
+ }
+ 
++struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog)
++{
++	if (atomic_inc_return(&prog->aux->refcnt) > BPF_MAX_REFCNT) {
++		atomic_dec(&prog->aux->refcnt);
++		return ERR_PTR(-EBUSY);
++	}
++	return prog;
++}
++
+ /* called by sockets/tracing/seccomp before attaching program to an event
+  * pairs with bpf_prog_put()
+  */
+@@ -612,7 +628,7 @@ struct bpf_prog *bpf_prog_get(u32 ufd)
+ 	if (IS_ERR(prog))
+ 		return prog;
+ 
+-	atomic_inc(&prog->aux->refcnt);
++	prog = bpf_prog_inc(prog);
+ 	fdput(f);
+ 
+ 	return prog;
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index 2e7f7ab739e4..2cbfba78d3db 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -239,15 +239,6 @@ static const char * const reg_type_str[] = {
+ 	[CONST_IMM]		= "imm",
+ };
+ 
+-static const struct {
+-	int map_type;
+-	int func_id;
+-} func_limit[] = {
+-	{BPF_MAP_TYPE_PROG_ARRAY, BPF_FUNC_tail_call},
+-	{BPF_MAP_TYPE_PERF_EVENT_ARRAY, BPF_FUNC_perf_event_read},
+-	{BPF_MAP_TYPE_PERF_EVENT_ARRAY, BPF_FUNC_perf_event_output},
+-};
+-
+ static void print_verifier_state(struct verifier_env *env)
+ {
+ 	enum bpf_reg_type t;
+@@ -898,24 +889,44 @@ static int check_func_arg(struct verifier_env *env, u32 regno,
+ 
+ static int check_map_func_compatibility(struct bpf_map *map, int func_id)
+ {
+-	bool bool_map, bool_func;
+-	int i;
+-
+ 	if (!map)
+ 		return 0;
+ 
+-	for (i = 0; i < ARRAY_SIZE(func_limit); i++) {
+-		bool_map = (map->map_type == func_limit[i].map_type);
+-		bool_func = (func_id == func_limit[i].func_id);
+-		/* only when map & func pair match it can continue.
+-		 * don't allow any other map type to be passed into
+-		 * the special func;
+-		 */
+-		if (bool_func && bool_map != bool_func)
+-			return -EINVAL;
++	/* We need a two way check, first is from map perspective ... */
++	switch (map->map_type) {
++	case BPF_MAP_TYPE_PROG_ARRAY:
++		if (func_id != BPF_FUNC_tail_call)
++			goto error;
++		break;
++	case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
++		if (func_id != BPF_FUNC_perf_event_read &&
++		    func_id != BPF_FUNC_perf_event_output)
++			goto error;
++		break;
++	default:
++		break;
++	}
++
++	/* ... and second from the function itself. */
++	switch (func_id) {
++	case BPF_FUNC_tail_call:
++		if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
++			goto error;
++		break;
++	case BPF_FUNC_perf_event_read:
++	case BPF_FUNC_perf_event_output:
++		if (map->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY)
++			goto error;
++		break;
++	default:
++		break;
+ 	}
+ 
+ 	return 0;
++error:
++	verbose("cannot pass map_type %d into func %d\n",
++		map->map_type, func_id);
++	return -EINVAL;
+ }
+ 
+ static int check_call(struct verifier_env *env, int func_id)
+@@ -1348,6 +1359,7 @@ static int check_ld_abs(struct verifier_env *env, struct bpf_insn *insn)
+ 	}
+ 
+ 	if (insn->dst_reg != BPF_REG_0 || insn->off != 0 ||
++	    BPF_SIZE(insn->code) == BPF_DW ||
+ 	    (mode == BPF_ABS && insn->src_reg != BPF_REG_0)) {
+ 		verbose("BPF_LD_ABS uses reserved fields\n");
+ 		return -EINVAL;
+@@ -2003,7 +2015,6 @@ static int replace_map_fd_with_map_ptr(struct verifier_env *env)
+ 			if (IS_ERR(map)) {
+ 				verbose("fd %d is not pointing to valid bpf_map\n",
+ 					insn->imm);
+-				fdput(f);
+ 				return PTR_ERR(map);
+ 			}
+ 
+@@ -2023,15 +2034,18 @@ static int replace_map_fd_with_map_ptr(struct verifier_env *env)
+ 				return -E2BIG;
+ 			}
+ 
+-			/* remember this map */
+-			env->used_maps[env->used_map_cnt++] = map;
+-
+ 			/* hold the map. If the program is rejected by verifier,
+ 			 * the map will be released by release_maps() or it
+ 			 * will be used by the valid program until it's unloaded
+ 			 * and all maps are released in free_bpf_prog_info()
+ 			 */
+-			bpf_map_inc(map, false);
++			map = bpf_map_inc(map, false);
++			if (IS_ERR(map)) {
++				fdput(f);
++				return PTR_ERR(map);
++			}
++			env->used_maps[env->used_map_cnt++] = map;
++
+ 			fdput(f);
+ next_insn:
+ 			insn++;
+diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
+index 1faad2cfdb9e..287201a5d12f 100644
+--- a/kernel/events/ring_buffer.c
++++ b/kernel/events/ring_buffer.c
+@@ -347,6 +347,7 @@ void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size,
+ 			 bool truncated)
+ {
+ 	struct ring_buffer *rb = handle->rb;
++	bool wakeup = truncated;
+ 	unsigned long aux_head;
+ 	u64 flags = 0;
+ 
+@@ -375,9 +376,16 @@ void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size,
+ 	aux_head = rb->user_page->aux_head = local_read(&rb->aux_head);
+ 
+ 	if (aux_head - local_read(&rb->aux_wakeup) >= rb->aux_watermark) {
+-		perf_output_wakeup(handle);
++		wakeup = true;
+ 		local_add(rb->aux_watermark, &rb->aux_wakeup);
+ 	}
++
++	if (wakeup) {
++		if (truncated)
++			handle->event->pending_disable = 1;
++		perf_output_wakeup(handle);
++	}
++
+ 	handle->event = NULL;
+ 
+ 	local_set(&rb->aux_nest, 0);
+diff --git a/kernel/workqueue.c b/kernel/workqueue.c
+index 9e82d0450fad..f48c80e4ba75 100644
+--- a/kernel/workqueue.c
++++ b/kernel/workqueue.c
+@@ -4556,6 +4556,17 @@ static void rebind_workers(struct worker_pool *pool)
+ 						  pool->attrs->cpumask) < 0);
+ 
+ 	spin_lock_irq(&pool->lock);
++
++	/*
++	 * XXX: CPU hotplug notifiers are weird and can call DOWN_FAILED
++	 * w/o preceding DOWN_PREPARE.  Work around it.  CPU hotplug is
++	 * being reworked and this can go away in time.
++	 */
++	if (!(pool->flags & POOL_DISASSOCIATED)) {
++		spin_unlock_irq(&pool->lock);
++		return;
++	}
++
+ 	pool->flags &= ~POOL_DISASSOCIATED;
+ 
+ 	for_each_pool_worker(worker, pool) {
+diff --git a/mm/huge_memory.c b/mm/huge_memory.c
+index a7db0a2db1ab..a82a87b3f9c6 100644
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -1257,15 +1257,9 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
+ 	VM_BUG_ON_PAGE(!PageCompound(page) || !PageHead(page), page);
+ 	/*
+ 	 * We can only reuse the page if nobody else maps the huge page or it's
+-	 * part. We can do it by checking page_mapcount() on each sub-page, but
+-	 * it's expensive.
+-	 * The cheaper way is to check page_count() to be equal 1: every
+-	 * mapcount takes page reference reference, so this way we can
+-	 * guarantee, that the PMD is the only mapping.
+-	 * This can give false negative if somebody pinned the page, but that's
+-	 * fine.
++	 * part.
+ 	 */
+-	if (page_mapcount(page) == 1 && page_count(page) == 1) {
++	if (page_trans_huge_mapcount(page, NULL) == 1) {
+ 		pmd_t entry;
+ 		entry = pmd_mkyoung(orig_pmd);
+ 		entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
+@@ -2038,7 +2032,8 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
+ 		if (pte_write(pteval)) {
+ 			writable = true;
+ 		} else {
+-			if (PageSwapCache(page) && !reuse_swap_page(page)) {
++			if (PageSwapCache(page) &&
++			    !reuse_swap_page(page, NULL)) {
+ 				unlock_page(page);
+ 				result = SCAN_SWAP_CACHE_PAGE;
+ 				goto out;
+@@ -3338,6 +3333,64 @@ int total_mapcount(struct page *page)
+ }
+ 
+ /*
++ * This calculates accurately how many mappings a transparent hugepage
++ * has (unlike page_mapcount() which isn't fully accurate). This full
++ * accuracy is primarily needed to know if copy-on-write faults can
++ * reuse the page and change the mapping to read-write instead of
++ * copying them. At the same time this returns the total_mapcount too.
++ *
++ * The function returns the highest mapcount any one of the subpages
++ * has. If the return value is one, even if different processes are
++ * mapping different subpages of the transparent hugepage, they can
++ * all reuse it, because each process is reusing a different subpage.
++ *
++ * The total_mapcount is instead counting all virtual mappings of the
++ * subpages. If the total_mapcount is equal to "one", it tells the
++ * caller all mappings belong to the same "mm" and in turn the
++ * anon_vma of the transparent hugepage can become the vma->anon_vma
++ * local one as no other process may be mapping any of the subpages.
++ *
++ * It would be more accurate to replace page_mapcount() with
++ * page_trans_huge_mapcount(), however we only use
++ * page_trans_huge_mapcount() in the copy-on-write faults where we
++ * need full accuracy to avoid breaking page pinning, because
++ * page_trans_huge_mapcount() is slower than page_mapcount().
++ */
++int page_trans_huge_mapcount(struct page *page, int *total_mapcount)
++{
++	int i, ret, _total_mapcount, mapcount;
++
++	/* hugetlbfs shouldn't call it */
++	VM_BUG_ON_PAGE(PageHuge(page), page);
++
++	if (likely(!PageTransCompound(page))) {
++		mapcount = atomic_read(&page->_mapcount) + 1;
++		if (total_mapcount)
++			*total_mapcount = mapcount;
++		return mapcount;
++	}
++
++	page = compound_head(page);
++
++	_total_mapcount = ret = 0;
++	for (i = 0; i < HPAGE_PMD_NR; i++) {
++		mapcount = atomic_read(&page[i]._mapcount) + 1;
++		ret = max(ret, mapcount);
++		_total_mapcount += mapcount;
++	}
++	if (PageDoubleMap(page)) {
++		ret -= 1;
++		_total_mapcount -= HPAGE_PMD_NR;
++	}
++	mapcount = compound_mapcount(page);
++	ret += mapcount;
++	_total_mapcount += mapcount;
++	if (total_mapcount)
++		*total_mapcount = _total_mapcount;
++	return ret;
++}
++
++/*
+  * This function splits huge page into normal pages. @page can point to any
+  * subpage of huge page to split. Split doesn't change the position of @page.
+  *
+diff --git a/mm/memory.c b/mm/memory.c
+index 3345dcf862cf..c1aa0e4b4096 100644
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -2357,6 +2357,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
+ 	 * not dirty accountable.
+ 	 */
+ 	if (PageAnon(old_page) && !PageKsm(old_page)) {
++		int total_mapcount;
+ 		if (!trylock_page(old_page)) {
+ 			page_cache_get(old_page);
+ 			pte_unmap_unlock(page_table, ptl);
+@@ -2371,13 +2372,18 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
+ 			}
+ 			page_cache_release(old_page);
+ 		}
+-		if (reuse_swap_page(old_page)) {
+-			/*
+-			 * The page is all ours.  Move it to our anon_vma so
+-			 * the rmap code will not search our parent or siblings.
+-			 * Protected against the rmap code by the page lock.
+-			 */
+-			page_move_anon_rmap(old_page, vma, address);
++		if (reuse_swap_page(old_page, &total_mapcount)) {
++			if (total_mapcount == 1) {
++				/*
++				 * The page is all ours. Move it to
++				 * our anon_vma so the rmap code will
++				 * not search our parent or siblings.
++				 * Protected against the rmap code by
++				 * the page lock.
++				 */
++				page_move_anon_rmap(compound_head(old_page),
++						    vma, address);
++			}
+ 			unlock_page(old_page);
+ 			return wp_page_reuse(mm, vma, address, page_table, ptl,
+ 					     orig_pte, old_page, 0, 0);
+@@ -2602,7 +2608,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
+ 	inc_mm_counter_fast(mm, MM_ANONPAGES);
+ 	dec_mm_counter_fast(mm, MM_SWAPENTS);
+ 	pte = mk_pte(page, vma->vm_page_prot);
+-	if ((flags & FAULT_FLAG_WRITE) && reuse_swap_page(page)) {
++	if ((flags & FAULT_FLAG_WRITE) && reuse_swap_page(page, NULL)) {
+ 		pte = maybe_mkwrite(pte_mkdirty(pte), vma);
+ 		flags &= ~FAULT_FLAG_WRITE;
+ 		ret |= VM_FAULT_WRITE;
+diff --git a/mm/swapfile.c b/mm/swapfile.c
+index d2c37365e2d6..954fd8f72b79 100644
+--- a/mm/swapfile.c
++++ b/mm/swapfile.c
+@@ -916,18 +916,19 @@ out:
+  * to it.  And as a side-effect, free up its swap: because the old content
+  * on disk will never be read, and seeking back there to write new content
+  * later would only waste time away from clustering.
++ *
++ * NOTE: total_mapcount should not be relied upon by the caller if
++ * reuse_swap_page() returns false, but it may be always overwritten
++ * (see the other implementation for CONFIG_SWAP=n).
+  */
+-int reuse_swap_page(struct page *page)
++bool reuse_swap_page(struct page *page, int *total_mapcount)
+ {
+ 	int count;
+ 
+ 	VM_BUG_ON_PAGE(!PageLocked(page), page);
+ 	if (unlikely(PageKsm(page)))
+-		return 0;
+-	/* The page is part of THP and cannot be reused */
+-	if (PageTransCompound(page))
+-		return 0;
+-	count = page_mapcount(page);
++		return false;
++	count = page_trans_huge_mapcount(page, total_mapcount);
+ 	if (count <= 1 && PageSwapCache(page)) {
+ 		count += page_swapcount(page);
+ 		if (count == 1 && !PageWriteback(page)) {
+diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
+index 2d7c4c11fc63..336149ffd07d 100644
+--- a/mm/zsmalloc.c
++++ b/mm/zsmalloc.c
+@@ -1732,10 +1732,13 @@ static struct page *isolate_source_page(struct size_class *class)
+ static unsigned long zs_can_compact(struct size_class *class)
+ {
+ 	unsigned long obj_wasted;
++	unsigned long obj_allocated = zs_stat_get(class, OBJ_ALLOCATED);
++	unsigned long obj_used = zs_stat_get(class, OBJ_USED);
+ 
+-	obj_wasted = zs_stat_get(class, OBJ_ALLOCATED) -
+-		zs_stat_get(class, OBJ_USED);
++	if (obj_allocated <= obj_used)
++		return 0;
+ 
++	obj_wasted = obj_allocated - obj_used;
+ 	obj_wasted /= get_maxobj_per_zspage(class->size,
+ 			class->pages_per_zspage);
+ 
+diff --git a/net/bridge/br_ioctl.c b/net/bridge/br_ioctl.c
+index 263b4de4de57..60a3dbfca8a1 100644
+--- a/net/bridge/br_ioctl.c
++++ b/net/bridge/br_ioctl.c
+@@ -21,18 +21,19 @@
+ #include <asm/uaccess.h>
+ #include "br_private.h"
+ 
+-/* called with RTNL */
+ static int get_bridge_ifindices(struct net *net, int *indices, int num)
+ {
+ 	struct net_device *dev;
+ 	int i = 0;
+ 
+-	for_each_netdev(net, dev) {
++	rcu_read_lock();
++	for_each_netdev_rcu(net, dev) {
+ 		if (i >= num)
+ 			break;
+ 		if (dev->priv_flags & IFF_EBRIDGE)
+ 			indices[i++] = dev->ifindex;
+ 	}
++	rcu_read_unlock();
+ 
+ 	return i;
+ }
+diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
+index 03661d97463c..ea9893743a0f 100644
+--- a/net/bridge/br_multicast.c
++++ b/net/bridge/br_multicast.c
+@@ -1270,6 +1270,7 @@ static int br_ip4_multicast_query(struct net_bridge *br,
+ 	struct br_ip saddr;
+ 	unsigned long max_delay;
+ 	unsigned long now = jiffies;
++	unsigned int offset = skb_transport_offset(skb);
+ 	__be32 group;
+ 	int err = 0;
+ 
+@@ -1280,14 +1281,14 @@ static int br_ip4_multicast_query(struct net_bridge *br,
+ 
+ 	group = ih->group;
+ 
+-	if (skb->len == sizeof(*ih)) {
++	if (skb->len == offset + sizeof(*ih)) {
+ 		max_delay = ih->code * (HZ / IGMP_TIMER_SCALE);
+ 
+ 		if (!max_delay) {
+ 			max_delay = 10 * HZ;
+ 			group = 0;
+ 		}
+-	} else if (skb->len >= sizeof(*ih3)) {
++	} else if (skb->len >= offset + sizeof(*ih3)) {
+ 		ih3 = igmpv3_query_hdr(skb);
+ 		if (ih3->nsrcs)
+ 			goto out;
+@@ -1348,6 +1349,7 @@ static int br_ip6_multicast_query(struct net_bridge *br,
+ 	struct br_ip saddr;
+ 	unsigned long max_delay;
+ 	unsigned long now = jiffies;
++	unsigned int offset = skb_transport_offset(skb);
+ 	const struct in6_addr *group = NULL;
+ 	bool is_general_query;
+ 	int err = 0;
+@@ -1357,8 +1359,8 @@ static int br_ip6_multicast_query(struct net_bridge *br,
+ 	    (port && port->state == BR_STATE_DISABLED))
+ 		goto out;
+ 
+-	if (skb->len == sizeof(*mld)) {
+-		if (!pskb_may_pull(skb, sizeof(*mld))) {
++	if (skb->len == offset + sizeof(*mld)) {
++		if (!pskb_may_pull(skb, offset + sizeof(*mld))) {
+ 			err = -EINVAL;
+ 			goto out;
+ 		}
+@@ -1367,7 +1369,7 @@ static int br_ip6_multicast_query(struct net_bridge *br,
+ 		if (max_delay)
+ 			group = &mld->mld_mca;
+ 	} else {
+-		if (!pskb_may_pull(skb, sizeof(*mld2q))) {
++		if (!pskb_may_pull(skb, offset + sizeof(*mld2q))) {
+ 			err = -EINVAL;
+ 			goto out;
+ 		}
+diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
+index 215e6137f6ff..482c3717a45e 100644
+--- a/net/core/rtnetlink.c
++++ b/net/core/rtnetlink.c
+@@ -1176,14 +1176,16 @@ static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb,
+ 
+ static int rtnl_fill_link_ifmap(struct sk_buff *skb, struct net_device *dev)
+ {
+-	struct rtnl_link_ifmap map = {
+-		.mem_start   = dev->mem_start,
+-		.mem_end     = dev->mem_end,
+-		.base_addr   = dev->base_addr,
+-		.irq         = dev->irq,
+-		.dma         = dev->dma,
+-		.port        = dev->if_port,
+-	};
++	struct rtnl_link_ifmap map;
++
++	memset(&map, 0, sizeof(map));
++	map.mem_start   = dev->mem_start;
++	map.mem_end     = dev->mem_end;
++	map.base_addr   = dev->base_addr;
++	map.irq         = dev->irq;
++	map.dma         = dev->dma;
++	map.port        = dev->if_port;
++
+ 	if (nla_put(skb, IFLA_MAP, sizeof(map), &map))
+ 		return -EMSGSIZE;
+ 
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index 8616d1147c93..9835d9a8a7a4 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -4427,15 +4427,16 @@ int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci)
+ 		__skb_push(skb, offset);
+ 		err = __vlan_insert_tag(skb, skb->vlan_proto,
+ 					skb_vlan_tag_get(skb));
+-		if (err)
++		if (err) {
++			__skb_pull(skb, offset);
+ 			return err;
++		}
++
+ 		skb->protocol = skb->vlan_proto;
+ 		skb->mac_len += VLAN_HLEN;
+-		__skb_pull(skb, offset);
+ 
+-		if (skb->ip_summed == CHECKSUM_COMPLETE)
+-			skb->csum = csum_add(skb->csum, csum_partial(skb->data
+-					+ (2 * ETH_ALEN), VLAN_HLEN, 0));
++		skb_postpush_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN);
++		__skb_pull(skb, offset);
+ 	}
+ 	__vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci);
+ 	return 0;
+diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
+index 607a14f20d88..b1dc096d22f8 100644
+--- a/net/decnet/dn_route.c
++++ b/net/decnet/dn_route.c
+@@ -1034,10 +1034,13 @@ source_ok:
+ 	if (!fld.daddr) {
+ 		fld.daddr = fld.saddr;
+ 
+-		err = -EADDRNOTAVAIL;
+ 		if (dev_out)
+ 			dev_put(dev_out);
++		err = -EINVAL;
+ 		dev_out = init_net.loopback_dev;
++		if (!dev_out->dn_ptr)
++			goto out;
++		err = -EADDRNOTAVAIL;
+ 		dev_hold(dev_out);
+ 		if (!fld.daddr) {
+ 			fld.daddr =
+@@ -1110,6 +1113,8 @@ source_ok:
+ 		if (dev_out == NULL)
+ 			goto out;
+ 		dn_db = rcu_dereference_raw(dev_out->dn_ptr);
++		if (!dn_db)
++			goto e_inval;
+ 		/* Possible improvement - check all devices for local addr */
+ 		if (dn_dev_islocal(dev_out, fld.daddr)) {
+ 			dev_put(dev_out);
+@@ -1151,6 +1156,8 @@ select_source:
+ 			dev_put(dev_out);
+ 		dev_out = init_net.loopback_dev;
+ 		dev_hold(dev_out);
++		if (!dev_out->dn_ptr)
++			goto e_inval;
+ 		fld.flowidn_oif = dev_out->ifindex;
+ 		if (res.fi)
+ 			dn_fib_info_put(res.fi);
+diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
+index 8a9246deccfe..63566ec54794 100644
+--- a/net/ipv4/fib_frontend.c
++++ b/net/ipv4/fib_frontend.c
+@@ -904,7 +904,11 @@ void fib_del_ifaddr(struct in_ifaddr *ifa, struct in_ifaddr *iprim)
+ 	if (ifa->ifa_flags & IFA_F_SECONDARY) {
+ 		prim = inet_ifa_byprefix(in_dev, any, ifa->ifa_mask);
+ 		if (!prim) {
+-			pr_warn("%s: bug: prim == NULL\n", __func__);
++			/* if the device has been deleted, we don't perform
++			 * address promotion
++			 */
++			if (!in_dev->dead)
++				pr_warn("%s: bug: prim == NULL\n", __func__);
+ 			return;
+ 		}
+ 		if (iprim && iprim != prim) {
+diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
+index d97268e8ff10..2b68418c7198 100644
+--- a/net/ipv4/fib_semantics.c
++++ b/net/ipv4/fib_semantics.c
+@@ -975,6 +975,8 @@ fib_convert_metrics(struct fib_info *fi, const struct fib_config *cfg)
+ 			val = 65535 - 40;
+ 		if (type == RTAX_MTU && val > 65535 - 15)
+ 			val = 65535 - 15;
++		if (type == RTAX_HOPLIMIT && val > 255)
++			val = 255;
+ 		if (type == RTAX_FEATURES && (val & ~RTAX_FEATURE_MASK))
+ 			return -EINVAL;
+ 		fi->fib_metrics[type - 1] = val;
+diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
+index 41ba68de46d8..d0c1e7766214 100644
+--- a/net/ipv4/ip_gre.c
++++ b/net/ipv4/ip_gre.c
+@@ -179,6 +179,7 @@ static __be16 tnl_flags_to_gre_flags(__be16 tflags)
+ 	return flags;
+ }
+ 
++/* Fills in tpi and returns header length to be pulled. */
+ static int parse_gre_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
+ 			    bool *csum_err)
+ {
+@@ -238,7 +239,7 @@ static int parse_gre_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
+ 				return -EINVAL;
+ 		}
+ 	}
+-	return iptunnel_pull_header(skb, hdr_len, tpi->proto);
++	return hdr_len;
+ }
+ 
+ static void ipgre_err(struct sk_buff *skb, u32 info,
+@@ -341,7 +342,7 @@ static void gre_err(struct sk_buff *skb, u32 info)
+ 	struct tnl_ptk_info tpi;
+ 	bool csum_err = false;
+ 
+-	if (parse_gre_header(skb, &tpi, &csum_err)) {
++	if (parse_gre_header(skb, &tpi, &csum_err) < 0) {
+ 		if (!csum_err)		/* ignore csum errors. */
+ 			return;
+ 	}
+@@ -419,6 +420,7 @@ static int gre_rcv(struct sk_buff *skb)
+ {
+ 	struct tnl_ptk_info tpi;
+ 	bool csum_err = false;
++	int hdr_len;
+ 
+ #ifdef CONFIG_NET_IPGRE_BROADCAST
+ 	if (ipv4_is_multicast(ip_hdr(skb)->daddr)) {
+@@ -428,7 +430,10 @@ static int gre_rcv(struct sk_buff *skb)
+ 	}
+ #endif
+ 
+-	if (parse_gre_header(skb, &tpi, &csum_err) < 0)
++	hdr_len = parse_gre_header(skb, &tpi, &csum_err);
++	if (hdr_len < 0)
++		goto drop;
++	if (iptunnel_pull_header(skb, hdr_len, tpi.proto) < 0)
+ 		goto drop;
+ 
+ 	if (ipgre_rcv(skb, &tpi) == PACKET_RCVD)
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c
+index 02c62299d717..b050cf980a57 100644
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -2045,6 +2045,18 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
+ 		 */
+ 		if (fi && res->prefixlen < 4)
+ 			fi = NULL;
++	} else if ((type == RTN_LOCAL) && (orig_oif != 0) &&
++		   (orig_oif != dev_out->ifindex)) {
++		/* For local routes that require a particular output interface
++		 * we do not want to cache the result.  Caching the result
++		 * causes incorrect behaviour when there are multiple source
++		 * addresses on the interface, the end result being that if the
++		 * intended recipient is waiting on that interface for the
++		 * packet he won't receive it because it will be delivered on
++		 * the loopback interface and the IP_PKTINFO ipi_ifindex will
++		 * be set to the loopback interface as well.
++		 */
++		fi = NULL;
+ 	}
+ 
+ 	fnhe = NULL;
+diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
+index fda379cd600d..b6876f2f4cf2 100644
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -2625,8 +2625,10 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
+ 	 */
+ 	if (unlikely((NET_IP_ALIGN && ((unsigned long)skb->data & 3)) ||
+ 		     skb_headroom(skb) >= 0xFFFF)) {
+-		struct sk_buff *nskb = __pskb_copy(skb, MAX_TCP_HEADER,
+-						   GFP_ATOMIC);
++		struct sk_buff *nskb;
++
++		skb_mstamp_get(&skb->skb_mstamp);
++		nskb = __pskb_copy(skb, MAX_TCP_HEADER, GFP_ATOMIC);
+ 		err = nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) :
+ 			     -ENOBUFS;
+ 	} else {
+diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
+index eb8933bc0e6e..56218ff388c7 100644
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -339,8 +339,13 @@ found:
+ 
+ 		hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash);
+ 		spin_lock(&hslot2->lock);
+-		hlist_nulls_add_head_rcu(&udp_sk(sk)->udp_portaddr_node,
+-					 &hslot2->head);
++		if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport &&
++			sk->sk_family == AF_INET6)
++			hlist_nulls_add_tail_rcu(&udp_sk(sk)->udp_portaddr_node,
++						 &hslot2->head);
++		else
++			hlist_nulls_add_head_rcu(&udp_sk(sk)->udp_portaddr_node,
++						 &hslot2->head);
+ 		hslot2->count++;
+ 		spin_unlock(&hslot2->lock);
+ 	}
+diff --git a/net/ipv6/ila/ila_lwt.c b/net/ipv6/ila/ila_lwt.c
+index 2ae3c4fd8aab..41f18de5dcc2 100644
+--- a/net/ipv6/ila/ila_lwt.c
++++ b/net/ipv6/ila/ila_lwt.c
+@@ -120,8 +120,7 @@ nla_put_failure:
+ 
+ static int ila_encap_nlsize(struct lwtunnel_state *lwtstate)
+ {
+-	/* No encapsulation overhead */
+-	return 0;
++	return nla_total_size(sizeof(u64)); /* ILA_ATTR_LOCATOR */
+ }
+ 
+ static int ila_encap_cmp(struct lwtunnel_state *a, struct lwtunnel_state *b)
+diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
+index 18f3498a6c80..e2ea31175ef9 100644
+--- a/net/ipv6/reassembly.c
++++ b/net/ipv6/reassembly.c
+@@ -496,10 +496,8 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
+ 	IP6CB(head)->flags |= IP6SKB_FRAGMENTED;
+ 
+ 	/* Yes, and fold redundant checksum back. 8) */
+-	if (head->ip_summed == CHECKSUM_COMPLETE)
+-		head->csum = csum_partial(skb_network_header(head),
+-					  skb_network_header_len(head),
+-					  head->csum);
++	skb_postpush_rcsum(head, skb_network_header(head),
++			   skb_network_header_len(head));
+ 
+ 	rcu_read_lock();
+ 	IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMOKS);
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index ed446639219c..18e29e2f8877 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -1737,6 +1737,8 @@ static int ip6_convert_metrics(struct mx6_config *mxc,
+ 		} else {
+ 			val = nla_get_u32(nla);
+ 		}
++		if (type == RTAX_HOPLIMIT && val > 255)
++			val = 255;
+ 		if (type == RTAX_FEATURES && (val & ~RTAX_FEATURE_MASK))
+ 			goto err;
+ 
+diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
+index 8dab4e569571..bb8edb9ef506 100644
+--- a/net/llc/af_llc.c
++++ b/net/llc/af_llc.c
+@@ -626,6 +626,7 @@ static void llc_cmsg_rcv(struct msghdr *msg, struct sk_buff *skb)
+ 	if (llc->cmsg_flags & LLC_CMSG_PKTINFO) {
+ 		struct llc_pktinfo info;
+ 
++		memset(&info, 0, sizeof(info));
+ 		info.lpi_ifindex = llc_sk(skb->sk)->dev->ifindex;
+ 		llc_pdu_decode_dsap(skb, &info.lpi_sap);
+ 		llc_pdu_decode_da(skb, info.lpi_mac);
+diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
+index f60b4fdeeb8c..6b94f0bc11b8 100644
+--- a/net/netfilter/nf_conntrack_core.c
++++ b/net/netfilter/nf_conntrack_core.c
+@@ -1780,6 +1780,7 @@ void nf_conntrack_init_end(void)
+ 
+ int nf_conntrack_init_net(struct net *net)
+ {
++	static atomic64_t unique_id;
+ 	int ret = -ENOMEM;
+ 	int cpu;
+ 
+@@ -1802,7 +1803,8 @@ int nf_conntrack_init_net(struct net *net)
+ 	if (!net->ct.stat)
+ 		goto err_pcpu_lists;
+ 
+-	net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%p", net);
++	net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%llu",
++				(u64)atomic64_inc_return(&unique_id));
+ 	if (!net->ct.slabname)
+ 		goto err_slabname;
+ 
+diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
+index 2d59df521915..879185fe183f 100644
+--- a/net/openvswitch/actions.c
++++ b/net/openvswitch/actions.c
+@@ -158,9 +158,7 @@ static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key,
+ 	new_mpls_lse = (__be32 *)skb_mpls_header(skb);
+ 	*new_mpls_lse = mpls->mpls_lse;
+ 
+-	if (skb->ip_summed == CHECKSUM_COMPLETE)
+-		skb->csum = csum_add(skb->csum, csum_partial(new_mpls_lse,
+-							     MPLS_HLEN, 0));
++	skb_postpush_rcsum(skb, new_mpls_lse, MPLS_HLEN);
+ 
+ 	hdr = eth_hdr(skb);
+ 	hdr->h_proto = mpls->mpls_ethertype;
+@@ -280,7 +278,7 @@ static int set_eth_addr(struct sk_buff *skb, struct sw_flow_key *flow_key,
+ 	ether_addr_copy_masked(eth_hdr(skb)->h_dest, key->eth_dst,
+ 			       mask->eth_dst);
+ 
+-	ovs_skb_postpush_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
++	skb_postpush_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
+ 
+ 	ether_addr_copy(flow_key->eth.src, eth_hdr(skb)->h_source);
+ 	ether_addr_copy(flow_key->eth.dst, eth_hdr(skb)->h_dest);
+@@ -463,7 +461,7 @@ static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *flow_key,
+ 		mask_ipv6_addr(saddr, key->ipv6_src, mask->ipv6_src, masked);
+ 
+ 		if (unlikely(memcmp(saddr, masked, sizeof(masked)))) {
+-			set_ipv6_addr(skb, key->ipv6_proto, saddr, masked,
++			set_ipv6_addr(skb, flow_key->ip.proto, saddr, masked,
+ 				      true);
+ 			memcpy(&flow_key->ipv6.addr.src, masked,
+ 			       sizeof(flow_key->ipv6.addr.src));
+@@ -485,7 +483,7 @@ static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *flow_key,
+ 							     NULL, &flags)
+ 					       != NEXTHDR_ROUTING);
+ 
+-			set_ipv6_addr(skb, key->ipv6_proto, daddr, masked,
++			set_ipv6_addr(skb, flow_key->ip.proto, daddr, masked,
+ 				      recalc_csum);
+ 			memcpy(&flow_key->ipv6.addr.dst, masked,
+ 			       sizeof(flow_key->ipv6.addr.dst));
+@@ -639,7 +637,7 @@ static int ovs_vport_output(struct net *net, struct sock *sk, struct sk_buff *sk
+ 	/* Reconstruct the MAC header.  */
+ 	skb_push(skb, data->l2_len);
+ 	memcpy(skb->data, &data->l2_data, data->l2_len);
+-	ovs_skb_postpush_rcsum(skb, skb->data, data->l2_len);
++	skb_postpush_rcsum(skb, skb->data, data->l2_len);
+ 	skb_reset_mac_header(skb);
+ 
+ 	ovs_vport_send(vport, skb);
+diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
+index ee6ff8ffc12d..9e5b9fc805fb 100644
+--- a/net/openvswitch/conntrack.c
++++ b/net/openvswitch/conntrack.c
+@@ -320,6 +320,7 @@ static int handle_fragments(struct net *net, struct sw_flow_key *key,
+ 	} else if (key->eth.type == htons(ETH_P_IPV6)) {
+ 		enum ip6_defrag_users user = IP6_DEFRAG_CONNTRACK_IN + zone;
+ 
++		skb_orphan(skb);
+ 		memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm));
+ 		err = nf_ct_frag6_gather(net, skb, user);
+ 		if (err)
+diff --git a/net/openvswitch/vport-netdev.c b/net/openvswitch/vport-netdev.c
+index 6a6adf314363..4e3972344aa6 100644
+--- a/net/openvswitch/vport-netdev.c
++++ b/net/openvswitch/vport-netdev.c
+@@ -58,7 +58,7 @@ static void netdev_port_receive(struct sk_buff *skb)
+ 		return;
+ 
+ 	skb_push(skb, ETH_HLEN);
+-	ovs_skb_postpush_rcsum(skb, skb->data, ETH_HLEN);
++	skb_postpush_rcsum(skb, skb->data, ETH_HLEN);
+ 	ovs_vport_receive(vport, skb, skb_tunnel_info(skb));
+ 	return;
+ error:
+diff --git a/net/openvswitch/vport.h b/net/openvswitch/vport.h
+index c10899cb9040..f01f28a567ad 100644
+--- a/net/openvswitch/vport.h
++++ b/net/openvswitch/vport.h
+@@ -185,13 +185,6 @@ static inline struct vport *vport_from_priv(void *priv)
+ int ovs_vport_receive(struct vport *, struct sk_buff *,
+ 		      const struct ip_tunnel_info *);
+ 
+-static inline void ovs_skb_postpush_rcsum(struct sk_buff *skb,
+-				      const void *start, unsigned int len)
+-{
+-	if (skb->ip_summed == CHECKSUM_COMPLETE)
+-		skb->csum = csum_add(skb->csum, csum_partial(start, len, 0));
+-}
+-
+ static inline const char *ovs_vport_name(struct vport *vport)
+ {
+ 	return vport->dev->name;
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index da1ae0e13cb5..9cc7b512b472 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -3436,6 +3436,7 @@ static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq)
+ 	i->ifindex = mreq->mr_ifindex;
+ 	i->alen = mreq->mr_alen;
+ 	memcpy(i->addr, mreq->mr_address, i->alen);
++	memset(i->addr + i->alen, 0, sizeof(i->addr) - i->alen);
+ 	i->count = 1;
+ 	i->next = po->mclist;
+ 	po->mclist = i;
+diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
+index af1acf009866..95b560f0b253 100644
+--- a/net/sched/sch_api.c
++++ b/net/sched/sch_api.c
+@@ -744,14 +744,15 @@ static u32 qdisc_alloc_handle(struct net_device *dev)
+ 	return 0;
+ }
+ 
+-void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n)
++void qdisc_tree_reduce_backlog(struct Qdisc *sch, unsigned int n,
++			       unsigned int len)
+ {
+ 	const struct Qdisc_class_ops *cops;
+ 	unsigned long cl;
+ 	u32 parentid;
+ 	int drops;
+ 
+-	if (n == 0)
++	if (n == 0 && len == 0)
+ 		return;
+ 	drops = max_t(int, n, 0);
+ 	rcu_read_lock();
+@@ -774,11 +775,12 @@ void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n)
+ 			cops->put(sch, cl);
+ 		}
+ 		sch->q.qlen -= n;
++		sch->qstats.backlog -= len;
+ 		__qdisc_qstats_drop(sch, drops);
+ 	}
+ 	rcu_read_unlock();
+ }
+-EXPORT_SYMBOL(qdisc_tree_decrease_qlen);
++EXPORT_SYMBOL(qdisc_tree_reduce_backlog);
+ 
+ static void notify_and_destroy(struct net *net, struct sk_buff *skb,
+ 			       struct nlmsghdr *n, u32 clid,
+diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
+index c538d9e4a8f6..baafddf229ce 100644
+--- a/net/sched/sch_cbq.c
++++ b/net/sched/sch_cbq.c
+@@ -1624,13 +1624,8 @@ static int cbq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
+ 			new->reshape_fail = cbq_reshape_fail;
+ #endif
+ 	}
+-	sch_tree_lock(sch);
+-	*old = cl->q;
+-	cl->q = new;
+-	qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
+-	qdisc_reset(*old);
+-	sch_tree_unlock(sch);
+ 
++	*old = qdisc_replace(sch, new, &cl->q);
+ 	return 0;
+ }
+ 
+@@ -1914,7 +1909,7 @@ static int cbq_delete(struct Qdisc *sch, unsigned long arg)
+ {
+ 	struct cbq_sched_data *q = qdisc_priv(sch);
+ 	struct cbq_class *cl = (struct cbq_class *)arg;
+-	unsigned int qlen;
++	unsigned int qlen, backlog;
+ 
+ 	if (cl->filters || cl->children || cl == &q->link)
+ 		return -EBUSY;
+@@ -1922,8 +1917,9 @@ static int cbq_delete(struct Qdisc *sch, unsigned long arg)
+ 	sch_tree_lock(sch);
+ 
+ 	qlen = cl->q->q.qlen;
++	backlog = cl->q->qstats.backlog;
+ 	qdisc_reset(cl->q);
+-	qdisc_tree_decrease_qlen(cl->q, qlen);
++	qdisc_tree_reduce_backlog(cl->q, qlen, backlog);
+ 
+ 	if (cl->next_alive)
+ 		cbq_deactivate_class(cl);
+diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c
+index 5ffb8b8337c7..0a08c860eee4 100644
+--- a/net/sched/sch_choke.c
++++ b/net/sched/sch_choke.c
+@@ -128,8 +128,8 @@ static void choke_drop_by_idx(struct Qdisc *sch, unsigned int idx)
+ 		choke_zap_tail_holes(q);
+ 
+ 	qdisc_qstats_backlog_dec(sch, skb);
++	qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(skb));
+ 	qdisc_drop(skb, sch);
+-	qdisc_tree_decrease_qlen(sch, 1);
+ 	--sch->q.qlen;
+ }
+ 
+@@ -456,6 +456,7 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt)
+ 		old = q->tab;
+ 		if (old) {
+ 			unsigned int oqlen = sch->q.qlen, tail = 0;
++			unsigned dropped = 0;
+ 
+ 			while (q->head != q->tail) {
+ 				struct sk_buff *skb = q->tab[q->head];
+@@ -467,11 +468,12 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt)
+ 					ntab[tail++] = skb;
+ 					continue;
+ 				}
++				dropped += qdisc_pkt_len(skb);
+ 				qdisc_qstats_backlog_dec(sch, skb);
+ 				--sch->q.qlen;
+ 				qdisc_drop(skb, sch);
+ 			}
+-			qdisc_tree_decrease_qlen(sch, oqlen - sch->q.qlen);
++			qdisc_tree_reduce_backlog(sch, oqlen - sch->q.qlen, dropped);
+ 			q->head = 0;
+ 			q->tail = tail;
+ 		}
+diff --git a/net/sched/sch_codel.c b/net/sched/sch_codel.c
+index 535007d5f0b5..9b7e2980ee5c 100644
+--- a/net/sched/sch_codel.c
++++ b/net/sched/sch_codel.c
+@@ -79,12 +79,13 @@ static struct sk_buff *codel_qdisc_dequeue(struct Qdisc *sch)
+ 
+ 	skb = codel_dequeue(sch, &q->params, &q->vars, &q->stats, dequeue);
+ 
+-	/* We cant call qdisc_tree_decrease_qlen() if our qlen is 0,
++	/* We cant call qdisc_tree_reduce_backlog() if our qlen is 0,
+ 	 * or HTB crashes. Defer it for next round.
+ 	 */
+ 	if (q->stats.drop_count && sch->q.qlen) {
+-		qdisc_tree_decrease_qlen(sch, q->stats.drop_count);
++		qdisc_tree_reduce_backlog(sch, q->stats.drop_count, q->stats.drop_len);
+ 		q->stats.drop_count = 0;
++		q->stats.drop_len = 0;
+ 	}
+ 	if (skb)
+ 		qdisc_bstats_update(sch, skb);
+@@ -116,7 +117,7 @@ static int codel_change(struct Qdisc *sch, struct nlattr *opt)
+ {
+ 	struct codel_sched_data *q = qdisc_priv(sch);
+ 	struct nlattr *tb[TCA_CODEL_MAX + 1];
+-	unsigned int qlen;
++	unsigned int qlen, dropped = 0;
+ 	int err;
+ 
+ 	if (!opt)
+@@ -156,10 +157,11 @@ static int codel_change(struct Qdisc *sch, struct nlattr *opt)
+ 	while (sch->q.qlen > sch->limit) {
+ 		struct sk_buff *skb = __skb_dequeue(&sch->q);
+ 
++		dropped += qdisc_pkt_len(skb);
+ 		qdisc_qstats_backlog_dec(sch, skb);
+ 		qdisc_drop(skb, sch);
+ 	}
+-	qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen);
++	qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped);
+ 
+ 	sch_tree_unlock(sch);
+ 	return 0;
+diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c
+index a1cd778240cd..a63e879e8975 100644
+--- a/net/sched/sch_drr.c
++++ b/net/sched/sch_drr.c
+@@ -53,9 +53,10 @@ static struct drr_class *drr_find_class(struct Qdisc *sch, u32 classid)
+ static void drr_purge_queue(struct drr_class *cl)
+ {
+ 	unsigned int len = cl->qdisc->q.qlen;
++	unsigned int backlog = cl->qdisc->qstats.backlog;
+ 
+ 	qdisc_reset(cl->qdisc);
+-	qdisc_tree_decrease_qlen(cl->qdisc, len);
++	qdisc_tree_reduce_backlog(cl->qdisc, len, backlog);
+ }
+ 
+ static const struct nla_policy drr_policy[TCA_DRR_MAX + 1] = {
+@@ -226,11 +227,7 @@ static int drr_graft_class(struct Qdisc *sch, unsigned long arg,
+ 			new = &noop_qdisc;
+ 	}
+ 
+-	sch_tree_lock(sch);
+-	drr_purge_queue(cl);
+-	*old = cl->qdisc;
+-	cl->qdisc = new;
+-	sch_tree_unlock(sch);
++	*old = qdisc_replace(sch, new, &cl->qdisc);
+ 	return 0;
+ }
+ 
+diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c
+index f357f34d02d2..d0dff0cd8186 100644
+--- a/net/sched/sch_dsmark.c
++++ b/net/sched/sch_dsmark.c
+@@ -73,13 +73,7 @@ static int dsmark_graft(struct Qdisc *sch, unsigned long arg,
+ 			new = &noop_qdisc;
+ 	}
+ 
+-	sch_tree_lock(sch);
+-	*old = p->q;
+-	p->q = new;
+-	qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
+-	qdisc_reset(*old);
+-	sch_tree_unlock(sch);
+-
++	*old = qdisc_replace(sch, new, &p->q);
+ 	return 0;
+ }
+ 
+@@ -264,6 +258,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch)
+ 		return err;
+ 	}
+ 
++	qdisc_qstats_backlog_inc(sch, skb);
+ 	sch->q.qlen++;
+ 
+ 	return NET_XMIT_SUCCESS;
+@@ -286,6 +281,7 @@ static struct sk_buff *dsmark_dequeue(struct Qdisc *sch)
+ 		return NULL;
+ 
+ 	qdisc_bstats_update(sch, skb);
++	qdisc_qstats_backlog_dec(sch, skb);
+ 	sch->q.qlen--;
+ 
+ 	index = skb->tc_index & (p->indices - 1);
+@@ -401,6 +397,7 @@ static void dsmark_reset(struct Qdisc *sch)
+ 
+ 	pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p);
+ 	qdisc_reset(p->q);
++	sch->qstats.backlog = 0;
+ 	sch->q.qlen = 0;
+ }
+ 
+diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
+index 109b2322778f..3c6a47d66a04 100644
+--- a/net/sched/sch_fq.c
++++ b/net/sched/sch_fq.c
+@@ -662,6 +662,7 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt)
+ 	struct fq_sched_data *q = qdisc_priv(sch);
+ 	struct nlattr *tb[TCA_FQ_MAX + 1];
+ 	int err, drop_count = 0;
++	unsigned drop_len = 0;
+ 	u32 fq_log;
+ 
+ 	if (!opt)
+@@ -736,10 +737,11 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt)
+ 
+ 		if (!skb)
+ 			break;
++		drop_len += qdisc_pkt_len(skb);
+ 		kfree_skb(skb);
+ 		drop_count++;
+ 	}
+-	qdisc_tree_decrease_qlen(sch, drop_count);
++	qdisc_tree_reduce_backlog(sch, drop_count, drop_len);
+ 
+ 	sch_tree_unlock(sch);
+ 	return err;
+diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c
+index 4c834e93dafb..d3fc8f9dd3d4 100644
+--- a/net/sched/sch_fq_codel.c
++++ b/net/sched/sch_fq_codel.c
+@@ -175,7 +175,7 @@ static unsigned int fq_codel_qdisc_drop(struct Qdisc *sch)
+ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
+ {
+ 	struct fq_codel_sched_data *q = qdisc_priv(sch);
+-	unsigned int idx;
++	unsigned int idx, prev_backlog;
+ 	struct fq_codel_flow *flow;
+ 	int uninitialized_var(ret);
+ 
+@@ -203,6 +203,7 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
+ 	if (++sch->q.qlen <= sch->limit)
+ 		return NET_XMIT_SUCCESS;
+ 
++	prev_backlog = sch->qstats.backlog;
+ 	q->drop_overlimit++;
+ 	/* Return Congestion Notification only if we dropped a packet
+ 	 * from this flow.
+@@ -211,7 +212,7 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
+ 		return NET_XMIT_CN;
+ 
+ 	/* As we dropped a packet, better let upper stack know this */
+-	qdisc_tree_decrease_qlen(sch, 1);
++	qdisc_tree_reduce_backlog(sch, 1, prev_backlog - sch->qstats.backlog);
+ 	return NET_XMIT_SUCCESS;
+ }
+ 
+@@ -241,6 +242,7 @@ static struct sk_buff *fq_codel_dequeue(struct Qdisc *sch)
+ 	struct fq_codel_flow *flow;
+ 	struct list_head *head;
+ 	u32 prev_drop_count, prev_ecn_mark;
++	unsigned int prev_backlog;
+ 
+ begin:
+ 	head = &q->new_flows;
+@@ -259,6 +261,7 @@ begin:
+ 
+ 	prev_drop_count = q->cstats.drop_count;
+ 	prev_ecn_mark = q->cstats.ecn_mark;
++	prev_backlog = sch->qstats.backlog;
+ 
+ 	skb = codel_dequeue(sch, &q->cparams, &flow->cvars, &q->cstats,
+ 			    dequeue);
+@@ -276,12 +279,14 @@ begin:
+ 	}
+ 	qdisc_bstats_update(sch, skb);
+ 	flow->deficit -= qdisc_pkt_len(skb);
+-	/* We cant call qdisc_tree_decrease_qlen() if our qlen is 0,
++	/* We cant call qdisc_tree_reduce_backlog() if our qlen is 0,
+ 	 * or HTB crashes. Defer it for next round.
+ 	 */
+ 	if (q->cstats.drop_count && sch->q.qlen) {
+-		qdisc_tree_decrease_qlen(sch, q->cstats.drop_count);
++		qdisc_tree_reduce_backlog(sch, q->cstats.drop_count,
++					  q->cstats.drop_len);
+ 		q->cstats.drop_count = 0;
++		q->cstats.drop_len = 0;
+ 	}
+ 	return skb;
+ }
+@@ -372,11 +377,13 @@ static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt)
+ 	while (sch->q.qlen > sch->limit) {
+ 		struct sk_buff *skb = fq_codel_dequeue(sch);
+ 
++		q->cstats.drop_len += qdisc_pkt_len(skb);
+ 		kfree_skb(skb);
+ 		q->cstats.drop_count++;
+ 	}
+-	qdisc_tree_decrease_qlen(sch, q->cstats.drop_count);
++	qdisc_tree_reduce_backlog(sch, q->cstats.drop_count, q->cstats.drop_len);
+ 	q->cstats.drop_count = 0;
++	q->cstats.drop_len = 0;
+ 
+ 	sch_tree_unlock(sch);
+ 	return 0;
+diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
+index 16bc83b2842a..aa4725038f94 100644
+--- a/net/sched/sch_generic.c
++++ b/net/sched/sch_generic.c
+@@ -159,12 +159,15 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
+ 	if (validate)
+ 		skb = validate_xmit_skb_list(skb, dev);
+ 
+-	if (skb) {
++	if (likely(skb)) {
+ 		HARD_TX_LOCK(dev, txq, smp_processor_id());
+ 		if (!netif_xmit_frozen_or_stopped(txq))
+ 			skb = dev_hard_start_xmit(skb, dev, txq, &ret);
+ 
+ 		HARD_TX_UNLOCK(dev, txq);
++	} else {
++		spin_lock(root_lock);
++		return qdisc_qlen(q);
+ 	}
+ 	spin_lock(root_lock);
+ 
+diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
+index b7ebe2c87586..d783d7cc3348 100644
+--- a/net/sched/sch_hfsc.c
++++ b/net/sched/sch_hfsc.c
+@@ -895,9 +895,10 @@ static void
+ hfsc_purge_queue(struct Qdisc *sch, struct hfsc_class *cl)
+ {
+ 	unsigned int len = cl->qdisc->q.qlen;
++	unsigned int backlog = cl->qdisc->qstats.backlog;
+ 
+ 	qdisc_reset(cl->qdisc);
+-	qdisc_tree_decrease_qlen(cl->qdisc, len);
++	qdisc_tree_reduce_backlog(cl->qdisc, len, backlog);
+ }
+ 
+ static void
+@@ -1215,11 +1216,7 @@ hfsc_graft_class(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
+ 			new = &noop_qdisc;
+ 	}
+ 
+-	sch_tree_lock(sch);
+-	hfsc_purge_queue(sch, cl);
+-	*old = cl->qdisc;
+-	cl->qdisc = new;
+-	sch_tree_unlock(sch);
++	*old = qdisc_replace(sch, new, &cl->qdisc);
+ 	return 0;
+ }
+ 
+diff --git a/net/sched/sch_hhf.c b/net/sched/sch_hhf.c
+index 86b04e31e60b..13d6f83ec491 100644
+--- a/net/sched/sch_hhf.c
++++ b/net/sched/sch_hhf.c
+@@ -382,6 +382,7 @@ static int hhf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
+ 	struct hhf_sched_data *q = qdisc_priv(sch);
+ 	enum wdrr_bucket_idx idx;
+ 	struct wdrr_bucket *bucket;
++	unsigned int prev_backlog;
+ 
+ 	idx = hhf_classify(skb, sch);
+ 
+@@ -409,6 +410,7 @@ static int hhf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
+ 	if (++sch->q.qlen <= sch->limit)
+ 		return NET_XMIT_SUCCESS;
+ 
++	prev_backlog = sch->qstats.backlog;
+ 	q->drop_overlimit++;
+ 	/* Return Congestion Notification only if we dropped a packet from this
+ 	 * bucket.
+@@ -417,7 +419,7 @@ static int hhf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
+ 		return NET_XMIT_CN;
+ 
+ 	/* As we dropped a packet, better let upper stack know this. */
+-	qdisc_tree_decrease_qlen(sch, 1);
++	qdisc_tree_reduce_backlog(sch, 1, prev_backlog - sch->qstats.backlog);
+ 	return NET_XMIT_SUCCESS;
+ }
+ 
+@@ -527,7 +529,7 @@ static int hhf_change(struct Qdisc *sch, struct nlattr *opt)
+ {
+ 	struct hhf_sched_data *q = qdisc_priv(sch);
+ 	struct nlattr *tb[TCA_HHF_MAX + 1];
+-	unsigned int qlen;
++	unsigned int qlen, prev_backlog;
+ 	int err;
+ 	u64 non_hh_quantum;
+ 	u32 new_quantum = q->quantum;
+@@ -577,12 +579,14 @@ static int hhf_change(struct Qdisc *sch, struct nlattr *opt)
+ 	}
+ 
+ 	qlen = sch->q.qlen;
++	prev_backlog = sch->qstats.backlog;
+ 	while (sch->q.qlen > sch->limit) {
+ 		struct sk_buff *skb = hhf_dequeue(sch);
+ 
+ 		kfree_skb(skb);
+ 	}
+-	qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen);
++	qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen,
++				  prev_backlog - sch->qstats.backlog);
+ 
+ 	sch_tree_unlock(sch);
+ 	return 0;
+diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
+index 15ccd7f8fb2a..87b02ed3d5f2 100644
+--- a/net/sched/sch_htb.c
++++ b/net/sched/sch_htb.c
+@@ -600,6 +600,7 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
+ 		htb_activate(q, cl);
+ 	}
+ 
++	qdisc_qstats_backlog_inc(sch, skb);
+ 	sch->q.qlen++;
+ 	return NET_XMIT_SUCCESS;
+ }
+@@ -889,6 +890,7 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch)
+ ok:
+ 		qdisc_bstats_update(sch, skb);
+ 		qdisc_unthrottled(sch);
++		qdisc_qstats_backlog_dec(sch, skb);
+ 		sch->q.qlen--;
+ 		return skb;
+ 	}
+@@ -955,6 +957,7 @@ static unsigned int htb_drop(struct Qdisc *sch)
+ 			unsigned int len;
+ 			if (cl->un.leaf.q->ops->drop &&
+ 			    (len = cl->un.leaf.q->ops->drop(cl->un.leaf.q))) {
++				sch->qstats.backlog -= len;
+ 				sch->q.qlen--;
+ 				if (!cl->un.leaf.q->q.qlen)
+ 					htb_deactivate(q, cl);
+@@ -984,12 +987,12 @@ static void htb_reset(struct Qdisc *sch)
+ 			}
+ 			cl->prio_activity = 0;
+ 			cl->cmode = HTB_CAN_SEND;
+-
+ 		}
+ 	}
+ 	qdisc_watchdog_cancel(&q->watchdog);
+ 	__skb_queue_purge(&q->direct_queue);
+ 	sch->q.qlen = 0;
++	sch->qstats.backlog = 0;
+ 	memset(q->hlevel, 0, sizeof(q->hlevel));
+ 	memset(q->row_mask, 0, sizeof(q->row_mask));
+ 	for (i = 0; i < TC_HTB_NUMPRIO; i++)
+@@ -1163,14 +1166,7 @@ static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
+ 				     cl->common.classid)) == NULL)
+ 		return -ENOBUFS;
+ 
+-	sch_tree_lock(sch);
+-	*old = cl->un.leaf.q;
+-	cl->un.leaf.q = new;
+-	if (*old != NULL) {
+-		qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
+-		qdisc_reset(*old);
+-	}
+-	sch_tree_unlock(sch);
++	*old = qdisc_replace(sch, new, &cl->un.leaf.q);
+ 	return 0;
+ }
+ 
+@@ -1272,7 +1268,6 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg)
+ {
+ 	struct htb_sched *q = qdisc_priv(sch);
+ 	struct htb_class *cl = (struct htb_class *)arg;
+-	unsigned int qlen;
+ 	struct Qdisc *new_q = NULL;
+ 	int last_child = 0;
+ 
+@@ -1292,9 +1287,11 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg)
+ 	sch_tree_lock(sch);
+ 
+ 	if (!cl->level) {
+-		qlen = cl->un.leaf.q->q.qlen;
++		unsigned int qlen = cl->un.leaf.q->q.qlen;
++		unsigned int backlog = cl->un.leaf.q->qstats.backlog;
++
+ 		qdisc_reset(cl->un.leaf.q);
+-		qdisc_tree_decrease_qlen(cl->un.leaf.q, qlen);
++		qdisc_tree_reduce_backlog(cl->un.leaf.q, qlen, backlog);
+ 	}
+ 
+ 	/* delete from hash and active; remainder in destroy_class */
+@@ -1428,10 +1425,11 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
+ 		sch_tree_lock(sch);
+ 		if (parent && !parent->level) {
+ 			unsigned int qlen = parent->un.leaf.q->q.qlen;
++			unsigned int backlog = parent->un.leaf.q->qstats.backlog;
+ 
+ 			/* turn parent into inner node */
+ 			qdisc_reset(parent->un.leaf.q);
+-			qdisc_tree_decrease_qlen(parent->un.leaf.q, qlen);
++			qdisc_tree_reduce_backlog(parent->un.leaf.q, qlen, backlog);
+ 			qdisc_destroy(parent->un.leaf.q);
+ 			if (parent->prio_activity)
+ 				htb_deactivate(q, parent);
+diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c
+index 4e904ca0af9d..bcdd54bb101c 100644
+--- a/net/sched/sch_multiq.c
++++ b/net/sched/sch_multiq.c
+@@ -218,7 +218,8 @@ static int multiq_tune(struct Qdisc *sch, struct nlattr *opt)
+ 		if (q->queues[i] != &noop_qdisc) {
+ 			struct Qdisc *child = q->queues[i];
+ 			q->queues[i] = &noop_qdisc;
+-			qdisc_tree_decrease_qlen(child, child->q.qlen);
++			qdisc_tree_reduce_backlog(child, child->q.qlen,
++						  child->qstats.backlog);
+ 			qdisc_destroy(child);
+ 		}
+ 	}
+@@ -238,8 +239,9 @@ static int multiq_tune(struct Qdisc *sch, struct nlattr *opt)
+ 				q->queues[i] = child;
+ 
+ 				if (old != &noop_qdisc) {
+-					qdisc_tree_decrease_qlen(old,
+-								 old->q.qlen);
++					qdisc_tree_reduce_backlog(old,
++								  old->q.qlen,
++								  old->qstats.backlog);
+ 					qdisc_destroy(old);
+ 				}
+ 				sch_tree_unlock(sch);
+@@ -303,13 +305,7 @@ static int multiq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
+ 	if (new == NULL)
+ 		new = &noop_qdisc;
+ 
+-	sch_tree_lock(sch);
+-	*old = q->queues[band];
+-	q->queues[band] = new;
+-	qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
+-	qdisc_reset(*old);
+-	sch_tree_unlock(sch);
+-
++	*old = qdisc_replace(sch, new, &q->queues[band]);
+ 	return 0;
+ }
+ 
+diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
+index 5abd1d9de989..4befe97a9034 100644
+--- a/net/sched/sch_netem.c
++++ b/net/sched/sch_netem.c
+@@ -395,6 +395,25 @@ static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
+ 	sch->q.qlen++;
+ }
+ 
++/* netem can't properly corrupt a megapacket (like we get from GSO), so instead
++ * when we statistically choose to corrupt one, we instead segment it, returning
++ * the first packet to be corrupted, and re-enqueue the remaining frames
++ */
++static struct sk_buff *netem_segment(struct sk_buff *skb, struct Qdisc *sch)
++{
++	struct sk_buff *segs;
++	netdev_features_t features = netif_skb_features(skb);
++
++	segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
++
++	if (IS_ERR_OR_NULL(segs)) {
++		qdisc_reshape_fail(skb, sch);
++		return NULL;
++	}
++	consume_skb(skb);
++	return segs;
++}
++
+ /*
+  * Insert one skb into qdisc.
+  * Note: parent depends on return value to account for queue length.
+@@ -407,7 +426,11 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
+ 	/* We don't fill cb now as skb_unshare() may invalidate it */
+ 	struct netem_skb_cb *cb;
+ 	struct sk_buff *skb2;
++	struct sk_buff *segs = NULL;
++	unsigned int len = 0, last_len, prev_len = qdisc_pkt_len(skb);
++	int nb = 0;
+ 	int count = 1;
++	int rc = NET_XMIT_SUCCESS;
+ 
+ 	/* Random duplication */
+ 	if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor))
+@@ -453,10 +476,23 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
+ 	 * do it now in software before we mangle it.
+ 	 */
+ 	if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) {
++		if (skb_is_gso(skb)) {
++			segs = netem_segment(skb, sch);
++			if (!segs)
++				return NET_XMIT_DROP;
++		} else {
++			segs = skb;
++		}
++
++		skb = segs;
++		segs = segs->next;
++
+ 		if (!(skb = skb_unshare(skb, GFP_ATOMIC)) ||
+ 		    (skb->ip_summed == CHECKSUM_PARTIAL &&
+-		     skb_checksum_help(skb)))
+-			return qdisc_drop(skb, sch);
++		     skb_checksum_help(skb))) {
++			rc = qdisc_drop(skb, sch);
++			goto finish_segs;
++		}
+ 
+ 		skb->data[prandom_u32() % skb_headlen(skb)] ^=
+ 			1<<(prandom_u32() % 8);
+@@ -516,6 +552,27 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
+ 		sch->qstats.requeues++;
+ 	}
+ 
++finish_segs:
++	if (segs) {
++		while (segs) {
++			skb2 = segs->next;
++			segs->next = NULL;
++			qdisc_skb_cb(segs)->pkt_len = segs->len;
++			last_len = segs->len;
++			rc = qdisc_enqueue(segs, sch);
++			if (rc != NET_XMIT_SUCCESS) {
++				if (net_xmit_drop_count(rc))
++					qdisc_qstats_drop(sch);
++			} else {
++				nb++;
++				len += last_len;
++			}
++			segs = skb2;
++		}
++		sch->q.qlen += nb;
++		if (nb > 1)
++			qdisc_tree_reduce_backlog(sch, 1 - nb, prev_len - len);
++	}
+ 	return NET_XMIT_SUCCESS;
+ }
+ 
+@@ -598,7 +655,8 @@ deliver:
+ 				if (unlikely(err != NET_XMIT_SUCCESS)) {
+ 					if (net_xmit_drop_count(err)) {
+ 						qdisc_qstats_drop(sch);
+-						qdisc_tree_decrease_qlen(sch, 1);
++						qdisc_tree_reduce_backlog(sch, 1,
++									  qdisc_pkt_len(skb));
+ 					}
+ 				}
+ 				goto tfifo_dequeue;
+@@ -1037,15 +1095,7 @@ static int netem_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
+ {
+ 	struct netem_sched_data *q = qdisc_priv(sch);
+ 
+-	sch_tree_lock(sch);
+-	*old = q->qdisc;
+-	q->qdisc = new;
+-	if (*old) {
+-		qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
+-		qdisc_reset(*old);
+-	}
+-	sch_tree_unlock(sch);
+-
++	*old = qdisc_replace(sch, new, &q->qdisc);
+ 	return 0;
+ }
+ 
+diff --git a/net/sched/sch_pie.c b/net/sched/sch_pie.c
+index b783a446d884..71ae3b9629f9 100644
+--- a/net/sched/sch_pie.c
++++ b/net/sched/sch_pie.c
+@@ -183,7 +183,7 @@ static int pie_change(struct Qdisc *sch, struct nlattr *opt)
+ {
+ 	struct pie_sched_data *q = qdisc_priv(sch);
+ 	struct nlattr *tb[TCA_PIE_MAX + 1];
+-	unsigned int qlen;
++	unsigned int qlen, dropped = 0;
+ 	int err;
+ 
+ 	if (!opt)
+@@ -232,10 +232,11 @@ static int pie_change(struct Qdisc *sch, struct nlattr *opt)
+ 	while (sch->q.qlen > sch->limit) {
+ 		struct sk_buff *skb = __skb_dequeue(&sch->q);
+ 
++		dropped += qdisc_pkt_len(skb);
+ 		qdisc_qstats_backlog_dec(sch, skb);
+ 		qdisc_drop(skb, sch);
+ 	}
+-	qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen);
++	qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped);
+ 
+ 	sch_tree_unlock(sch);
+ 	return 0;
+diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
+index ba6487f2741f..fee1b15506b2 100644
+--- a/net/sched/sch_prio.c
++++ b/net/sched/sch_prio.c
+@@ -191,7 +191,7 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt)
+ 		struct Qdisc *child = q->queues[i];
+ 		q->queues[i] = &noop_qdisc;
+ 		if (child != &noop_qdisc) {
+-			qdisc_tree_decrease_qlen(child, child->q.qlen);
++			qdisc_tree_reduce_backlog(child, child->q.qlen, child->qstats.backlog);
+ 			qdisc_destroy(child);
+ 		}
+ 	}
+@@ -210,8 +210,9 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt)
+ 				q->queues[i] = child;
+ 
+ 				if (old != &noop_qdisc) {
+-					qdisc_tree_decrease_qlen(old,
+-								 old->q.qlen);
++					qdisc_tree_reduce_backlog(old,
++								  old->q.qlen,
++								  old->qstats.backlog);
+ 					qdisc_destroy(old);
+ 				}
+ 				sch_tree_unlock(sch);
+@@ -268,13 +269,7 @@ static int prio_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
+ 	if (new == NULL)
+ 		new = &noop_qdisc;
+ 
+-	sch_tree_lock(sch);
+-	*old = q->queues[band];
+-	q->queues[band] = new;
+-	qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
+-	qdisc_reset(*old);
+-	sch_tree_unlock(sch);
+-
++	*old = qdisc_replace(sch, new, &q->queues[band]);
+ 	return 0;
+ }
+ 
+diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
+index 3dc3a6e56052..8d2d8d953432 100644
+--- a/net/sched/sch_qfq.c
++++ b/net/sched/sch_qfq.c
+@@ -220,9 +220,10 @@ static struct qfq_class *qfq_find_class(struct Qdisc *sch, u32 classid)
+ static void qfq_purge_queue(struct qfq_class *cl)
+ {
+ 	unsigned int len = cl->qdisc->q.qlen;
++	unsigned int backlog = cl->qdisc->qstats.backlog;
+ 
+ 	qdisc_reset(cl->qdisc);
+-	qdisc_tree_decrease_qlen(cl->qdisc, len);
++	qdisc_tree_reduce_backlog(cl->qdisc, len, backlog);
+ }
+ 
+ static const struct nla_policy qfq_policy[TCA_QFQ_MAX + 1] = {
+@@ -617,11 +618,7 @@ static int qfq_graft_class(struct Qdisc *sch, unsigned long arg,
+ 			new = &noop_qdisc;
+ 	}
+ 
+-	sch_tree_lock(sch);
+-	qfq_purge_queue(cl);
+-	*old = cl->qdisc;
+-	cl->qdisc = new;
+-	sch_tree_unlock(sch);
++	*old = qdisc_replace(sch, new, &cl->qdisc);
+ 	return 0;
+ }
+ 
+diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
+index 6c0534cc7758..8c0508c0e287 100644
+--- a/net/sched/sch_red.c
++++ b/net/sched/sch_red.c
+@@ -210,7 +210,8 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt)
+ 	q->flags = ctl->flags;
+ 	q->limit = ctl->limit;
+ 	if (child) {
+-		qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen);
++		qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen,
++					  q->qdisc->qstats.backlog);
+ 		qdisc_destroy(q->qdisc);
+ 		q->qdisc = child;
+ 	}
+@@ -313,12 +314,7 @@ static int red_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
+ 	if (new == NULL)
+ 		new = &noop_qdisc;
+ 
+-	sch_tree_lock(sch);
+-	*old = q->qdisc;
+-	q->qdisc = new;
+-	qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
+-	qdisc_reset(*old);
+-	sch_tree_unlock(sch);
++	*old = qdisc_replace(sch, new, &q->qdisc);
+ 	return 0;
+ }
+ 
+diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c
+index 5bbb6332ec57..c69611640fa5 100644
+--- a/net/sched/sch_sfb.c
++++ b/net/sched/sch_sfb.c
+@@ -510,7 +510,8 @@ static int sfb_change(struct Qdisc *sch, struct nlattr *opt)
+ 
+ 	sch_tree_lock(sch);
+ 
+-	qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen);
++	qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen,
++				  q->qdisc->qstats.backlog);
+ 	qdisc_destroy(q->qdisc);
+ 	q->qdisc = child;
+ 
+@@ -606,12 +607,7 @@ static int sfb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
+ 	if (new == NULL)
+ 		new = &noop_qdisc;
+ 
+-	sch_tree_lock(sch);
+-	*old = q->qdisc;
+-	q->qdisc = new;
+-	qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
+-	qdisc_reset(*old);
+-	sch_tree_unlock(sch);
++	*old = qdisc_replace(sch, new, &q->qdisc);
+ 	return 0;
+ }
+ 
+diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
+index 3abab534eb5c..498f0a2cb47f 100644
+--- a/net/sched/sch_sfq.c
++++ b/net/sched/sch_sfq.c
+@@ -346,7 +346,7 @@ static int
+ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
+ {
+ 	struct sfq_sched_data *q = qdisc_priv(sch);
+-	unsigned int hash;
++	unsigned int hash, dropped;
+ 	sfq_index x, qlen;
+ 	struct sfq_slot *slot;
+ 	int uninitialized_var(ret);
+@@ -461,7 +461,7 @@ enqueue:
+ 		return NET_XMIT_SUCCESS;
+ 
+ 	qlen = slot->qlen;
+-	sfq_drop(sch);
++	dropped = sfq_drop(sch);
+ 	/* Return Congestion Notification only if we dropped a packet
+ 	 * from this flow.
+ 	 */
+@@ -469,7 +469,7 @@ enqueue:
+ 		return NET_XMIT_CN;
+ 
+ 	/* As we dropped a packet, better let upper stack know this */
+-	qdisc_tree_decrease_qlen(sch, 1);
++	qdisc_tree_reduce_backlog(sch, 1, dropped);
+ 	return NET_XMIT_SUCCESS;
+ }
+ 
+@@ -537,6 +537,7 @@ static void sfq_rehash(struct Qdisc *sch)
+ 	struct sfq_slot *slot;
+ 	struct sk_buff_head list;
+ 	int dropped = 0;
++	unsigned int drop_len = 0;
+ 
+ 	__skb_queue_head_init(&list);
+ 
+@@ -565,6 +566,7 @@ static void sfq_rehash(struct Qdisc *sch)
+ 			if (x >= SFQ_MAX_FLOWS) {
+ drop:
+ 				qdisc_qstats_backlog_dec(sch, skb);
++				drop_len += qdisc_pkt_len(skb);
+ 				kfree_skb(skb);
+ 				dropped++;
+ 				continue;
+@@ -594,7 +596,7 @@ drop:
+ 		}
+ 	}
+ 	sch->q.qlen -= dropped;
+-	qdisc_tree_decrease_qlen(sch, dropped);
++	qdisc_tree_reduce_backlog(sch, dropped, drop_len);
+ }
+ 
+ static void sfq_perturbation(unsigned long arg)
+@@ -618,7 +620,7 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
+ 	struct sfq_sched_data *q = qdisc_priv(sch);
+ 	struct tc_sfq_qopt *ctl = nla_data(opt);
+ 	struct tc_sfq_qopt_v1 *ctl_v1 = NULL;
+-	unsigned int qlen;
++	unsigned int qlen, dropped = 0;
+ 	struct red_parms *p = NULL;
+ 
+ 	if (opt->nla_len < nla_attr_size(sizeof(*ctl)))
+@@ -667,8 +669,8 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
+ 
+ 	qlen = sch->q.qlen;
+ 	while (sch->q.qlen > q->limit)
+-		sfq_drop(sch);
+-	qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen);
++		dropped += sfq_drop(sch);
++	qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped);
+ 
+ 	del_timer(&q->perturb_timer);
+ 	if (q->perturb_period) {
+diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
+index a4afde14e865..c2fbde742f37 100644
+--- a/net/sched/sch_tbf.c
++++ b/net/sched/sch_tbf.c
+@@ -160,6 +160,7 @@ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch)
+ 	struct tbf_sched_data *q = qdisc_priv(sch);
+ 	struct sk_buff *segs, *nskb;
+ 	netdev_features_t features = netif_skb_features(skb);
++	unsigned int len = 0, prev_len = qdisc_pkt_len(skb);
+ 	int ret, nb;
+ 
+ 	segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
+@@ -172,6 +173,7 @@ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch)
+ 		nskb = segs->next;
+ 		segs->next = NULL;
+ 		qdisc_skb_cb(segs)->pkt_len = segs->len;
++		len += segs->len;
+ 		ret = qdisc_enqueue(segs, q->qdisc);
+ 		if (ret != NET_XMIT_SUCCESS) {
+ 			if (net_xmit_drop_count(ret))
+@@ -183,7 +185,7 @@ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch)
+ 	}
+ 	sch->q.qlen += nb;
+ 	if (nb > 1)
+-		qdisc_tree_decrease_qlen(sch, 1 - nb);
++		qdisc_tree_reduce_backlog(sch, 1 - nb, prev_len - len);
+ 	consume_skb(skb);
+ 	return nb > 0 ? NET_XMIT_SUCCESS : NET_XMIT_DROP;
+ }
+@@ -399,7 +401,8 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt)
+ 
+ 	sch_tree_lock(sch);
+ 	if (child) {
+-		qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen);
++		qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen,
++					  q->qdisc->qstats.backlog);
+ 		qdisc_destroy(q->qdisc);
+ 		q->qdisc = child;
+ 	}
+@@ -502,13 +505,7 @@ static int tbf_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
+ 	if (new == NULL)
+ 		new = &noop_qdisc;
+ 
+-	sch_tree_lock(sch);
+-	*old = q->qdisc;
+-	q->qdisc = new;
+-	qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
+-	qdisc_reset(*old);
+-	sch_tree_unlock(sch);
+-
++	*old = qdisc_replace(sch, new, &q->qdisc);
+ 	return 0;
+ }
+ 
+diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
+index bbe65dcb9738..c93e67beaea7 100644
+--- a/net/vmw_vsock/af_vsock.c
++++ b/net/vmw_vsock/af_vsock.c
+@@ -1789,27 +1789,8 @@ vsock_stream_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
+ 	else if (sk->sk_shutdown & RCV_SHUTDOWN)
+ 		err = 0;
+ 
+-	if (copied > 0) {
+-		/* We only do these additional bookkeeping/notification steps
+-		 * if we actually copied something out of the queue pair
+-		 * instead of just peeking ahead.
+-		 */
+-
+-		if (!(flags & MSG_PEEK)) {
+-			/* If the other side has shutdown for sending and there
+-			 * is nothing more to read, then modify the socket
+-			 * state.
+-			 */
+-			if (vsk->peer_shutdown & SEND_SHUTDOWN) {
+-				if (vsock_stream_has_data(vsk) <= 0) {
+-					sk->sk_state = SS_UNCONNECTED;
+-					sock_set_flag(sk, SOCK_DONE);
+-					sk->sk_state_change(sk);
+-				}
+-			}
+-		}
++	if (copied > 0)
+ 		err = copied;
+-	}
+ 
+ out:
+ 	release_sock(sk);
+diff --git a/net/x25/x25_facilities.c b/net/x25/x25_facilities.c
+index 7ecd04c21360..997ff7b2509b 100644
+--- a/net/x25/x25_facilities.c
++++ b/net/x25/x25_facilities.c
+@@ -277,6 +277,7 @@ int x25_negotiate_facilities(struct sk_buff *skb, struct sock *sk,
+ 
+ 	memset(&theirs, 0, sizeof(theirs));
+ 	memcpy(new, ours, sizeof(*new));
++	memset(dte, 0, sizeof(*dte));
+ 
+ 	len = x25_parse_facilities(skb, &theirs, dte, &x25->vc_facil_mask);
+ 	if (len < 0)
+diff --git a/samples/bpf/trace_output_kern.c b/samples/bpf/trace_output_kern.c
+index 8d8d1ec429eb..9b96f4fb8cea 100644
+--- a/samples/bpf/trace_output_kern.c
++++ b/samples/bpf/trace_output_kern.c
+@@ -18,7 +18,6 @@ int bpf_prog1(struct pt_regs *ctx)
+ 		u64 cookie;
+ 	} data;
+ 
+-	memset(&data, 0, sizeof(data));
+ 	data.pid = bpf_get_current_pid_tgid();
+ 	data.cookie = 0x12345678;
+ 
+diff --git a/sound/pci/hda/hda_sysfs.c b/sound/pci/hda/hda_sysfs.c
+index 64e0d1d81ca5..9739fce9e032 100644
+--- a/sound/pci/hda/hda_sysfs.c
++++ b/sound/pci/hda/hda_sysfs.c
+@@ -141,14 +141,6 @@ static int reconfig_codec(struct hda_codec *codec)
+ 	err = snd_hda_codec_configure(codec);
+ 	if (err < 0)
+ 		goto error;
+-	/* rebuild PCMs */
+-	err = snd_hda_codec_build_pcms(codec);
+-	if (err < 0)
+-		goto error;
+-	/* rebuild mixers */
+-	err = snd_hda_codec_build_controls(codec);
+-	if (err < 0)
+-		goto error;
+ 	err = snd_card_register(codec->card);
+  error:
+ 	snd_hda_power_down(codec);
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index ac4490a96863..4918ffa5ba68 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -6426,6 +6426,7 @@ enum {
+ 	ALC668_FIXUP_DELL_DISABLE_AAMIX,
+ 	ALC668_FIXUP_DELL_XPS13,
+ 	ALC662_FIXUP_ASUS_Nx50,
++	ALC668_FIXUP_ASUS_Nx51,
+ };
+ 
+ static const struct hda_fixup alc662_fixups[] = {
+@@ -6672,6 +6673,15 @@ static const struct hda_fixup alc662_fixups[] = {
+ 		.chained = true,
+ 		.chain_id = ALC662_FIXUP_BASS_1A
+ 	},
++	[ALC668_FIXUP_ASUS_Nx51] = {
++		.type = HDA_FIXUP_PINS,
++		.v.pins = (const struct hda_pintbl[]) {
++			{0x1a, 0x90170151}, /* bass speaker */
++			{}
++		},
++		.chained = true,
++		.chain_id = ALC662_FIXUP_BASS_CHMAP,
++	},
+ };
+ 
+ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
+@@ -6694,11 +6704,14 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1028, 0x0698, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1028, 0x069f, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x103c, 0x1632, "HP RP5800", ALC662_FIXUP_HP_RP5800),
++	SND_PCI_QUIRK(0x1043, 0x1080, "Asus UX501VW", ALC668_FIXUP_HEADSET_MODE),
+ 	SND_PCI_QUIRK(0x1043, 0x11cd, "Asus N550", ALC662_FIXUP_ASUS_Nx50),
+ 	SND_PCI_QUIRK(0x1043, 0x13df, "Asus N550JX", ALC662_FIXUP_BASS_1A),
+ 	SND_PCI_QUIRK(0x1043, 0x129d, "Asus N750", ALC662_FIXUP_ASUS_Nx50),
+ 	SND_PCI_QUIRK(0x1043, 0x1477, "ASUS N56VZ", ALC662_FIXUP_BASS_MODE4_CHMAP),
+ 	SND_PCI_QUIRK(0x1043, 0x15a7, "ASUS UX51VZH", ALC662_FIXUP_BASS_16),
++	SND_PCI_QUIRK(0x1043, 0x177d, "ASUS N551", ALC668_FIXUP_ASUS_Nx51),
++	SND_PCI_QUIRK(0x1043, 0x17bd, "ASUS N751", ALC668_FIXUP_ASUS_Nx51),
+ 	SND_PCI_QUIRK(0x1043, 0x1b73, "ASUS N55SF", ALC662_FIXUP_BASS_16),
+ 	SND_PCI_QUIRK(0x1043, 0x1bf3, "ASUS N76VZ", ALC662_FIXUP_BASS_MODE4_CHMAP),
+ 	SND_PCI_QUIRK(0x1043, 0x8469, "ASUS mobo", ALC662_FIXUP_NO_JACK_DETECT),
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index 001fb4dc0722..db11ecf0b74d 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -1138,8 +1138,11 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
+ 	case USB_ID(0x047F, 0x0415): /* Plantronics BT-300 */
+ 	case USB_ID(0x047F, 0xAA05): /* Plantronics DA45 */
+ 	case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */
++	case USB_ID(0x0556, 0x0014): /* Phoenix Audio TMX320VC */
+ 	case USB_ID(0x074D, 0x3553): /* Outlaw RR2150 (Micronas UAC3553B) */
++	case USB_ID(0x1de7, 0x0013): /* Phoenix Audio MT202exe */
+ 	case USB_ID(0x1de7, 0x0014): /* Phoenix Audio TMX320 */
++	case USB_ID(0x1de7, 0x0114): /* Phoenix Audio MT202pcs */
+ 	case USB_ID(0x21B4, 0x0081): /* AudioQuest DragonFly */
+ 		return true;
+ 	}
+diff --git a/tools/lib/traceevent/parse-filter.c b/tools/lib/traceevent/parse-filter.c
+index 0144b3d1bb77..88cccea3ca99 100644
+--- a/tools/lib/traceevent/parse-filter.c
++++ b/tools/lib/traceevent/parse-filter.c
+@@ -1164,11 +1164,11 @@ process_filter(struct event_format *event, struct filter_arg **parg,
+ 		current_op = current_exp;
+ 
+ 	ret = collapse_tree(current_op, parg, error_str);
++	/* collapse_tree() may free current_op, and updates parg accordingly */
++	current_op = NULL;
+ 	if (ret < 0)
+ 		goto fail;
+ 
+-	*parg = current_op;
+-
+ 	free(token);
+ 	return 0;
+ 
+diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c
+index ec722346e6ff..16892a7ca27e 100644
+--- a/tools/perf/util/sort.c
++++ b/tools/perf/util/sort.c
+@@ -2272,6 +2272,9 @@ static char *prefix_if_not_in(const char *pre, char *str)
+ 
+ static char *setup_overhead(char *keys)
+ {
++	if (sort__mode == SORT_MODE__DIFF)
++		return keys;
++
+ 	keys = prefix_if_not_in("overhead", keys);
+ 
+ 	if (symbol_conf.cumulate_callchain)


^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [gentoo-commits] proj/linux-patches:4.5 commit in: /
@ 2016-06-02 19:08 Mike Pagano
  0 siblings, 0 replies; 12+ messages in thread
From: Mike Pagano @ 2016-06-02 19:08 UTC (permalink / raw
  To: gentoo-commits

commit:     afed3274f4b915ecdbdb505c7255ab11f78a22d7
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu Jun  2 19:08:04 2016 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu Jun  2 19:08:04 2016 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=afed3274

Linux patch 4.5.6

 0000_README            |     4 +
 1005_linux-4.5.6.patch | 32192 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 32196 insertions(+)

diff --git a/0000_README b/0000_README
index f936e50..9a3ebeb 100644
--- a/0000_README
+++ b/0000_README
@@ -63,6 +63,10 @@ Patch:  1004_linux-4.5.5.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.5.5
 
+Patch:  1005_linux-4.5.6.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.5.6
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1005_linux-4.5.6.patch b/1005_linux-4.5.6.patch
new file mode 100644
index 0000000..339ef1c
--- /dev/null
+++ b/1005_linux-4.5.6.patch
@@ -0,0 +1,32192 @@
+diff --git a/Documentation/cgroup-v2.txt b/Documentation/cgroup-v2.txt
+index ff49cf901148..81eb378210c6 100644
+--- a/Documentation/cgroup-v2.txt
++++ b/Documentation/cgroup-v2.txt
+@@ -1368,6 +1368,12 @@ system than killing the group.  Otherwise, memory.max is there to
+ limit this type of spillover and ultimately contain buggy or even
+ malicious applications.
+ 
++Setting the original memory.limit_in_bytes below the current usage was
++subject to a race condition, where concurrent charges could cause the
++limit setting to fail. memory.max on the other hand will first set the
++limit to prevent new charges, and then reclaim and OOM kill until the
++new limit is met - or the task writing to memory.max is killed.
++
+ The combined memory+swap accounting and limiting is replaced by real
+ control over swap space.
+ 
+diff --git a/Documentation/devicetree/bindings/ata/ahci-platform.txt b/Documentation/devicetree/bindings/ata/ahci-platform.txt
+index c2340eeeb97f..c000832a7fb9 100644
+--- a/Documentation/devicetree/bindings/ata/ahci-platform.txt
++++ b/Documentation/devicetree/bindings/ata/ahci-platform.txt
+@@ -30,6 +30,10 @@ Optional properties:
+ - target-supply     : regulator for SATA target power
+ - phys              : reference to the SATA PHY node
+ - phy-names         : must be "sata-phy"
++- ports-implemented : Mask that indicates which ports that the HBA supports
++		      are available for software to use. Useful if PORTS_IMPL
++		      is not programmed by the BIOS, which is true with
++		      some embedded SOC's.
+ 
+ Required properties when using sub-nodes:
+ - #address-cells    : number of cells to encode an address
+diff --git a/Documentation/devicetree/bindings/pinctrl/img,pistachio-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/img,pistachio-pinctrl.txt
+index 08a4a32c8eb0..0326154c7925 100644
+--- a/Documentation/devicetree/bindings/pinctrl/img,pistachio-pinctrl.txt
++++ b/Documentation/devicetree/bindings/pinctrl/img,pistachio-pinctrl.txt
+@@ -134,12 +134,12 @@ mfio80		ddr_debug, mips_trace_data, mips_debug
+ mfio81		dreq0, mips_trace_data, eth_debug
+ mfio82		dreq1, mips_trace_data, eth_debug
+ mfio83		mips_pll_lock, mips_trace_data, usb_debug
+-mfio84		sys_pll_lock, mips_trace_data, usb_debug
+-mfio85		wifi_pll_lock, mips_trace_data, sdhost_debug
+-mfio86		bt_pll_lock, mips_trace_data, sdhost_debug
+-mfio87		rpu_v_pll_lock, dreq2, socif_debug
+-mfio88		rpu_l_pll_lock, dreq3, socif_debug
+-mfio89		audio_pll_lock, dreq4, dreq5
++mfio84		audio_pll_lock, mips_trace_data, usb_debug
++mfio85		rpu_v_pll_lock, mips_trace_data, sdhost_debug
++mfio86		rpu_l_pll_lock, mips_trace_data, sdhost_debug
++mfio87		sys_pll_lock, dreq2, socif_debug
++mfio88		wifi_pll_lock, dreq3, socif_debug
++mfio89		bt_pll_lock, dreq4, dreq5
+ tck
+ trstn
+ tdi
+diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
+index 9a53c929f017..21e4b4861331 100644
+--- a/Documentation/kernel-parameters.txt
++++ b/Documentation/kernel-parameters.txt
+@@ -4016,6 +4016,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
+ 					sector if the number is odd);
+ 				i = IGNORE_DEVICE (don't bind to this
+ 					device);
++				j = NO_REPORT_LUNS (don't use report luns
++					command, uas only);
+ 				l = NOT_LOCKABLE (don't try to lock and
+ 					unlock ejectable media);
+ 				m = MAX_SECTORS_64 (don't transfer more
+diff --git a/Documentation/serial/tty.txt b/Documentation/serial/tty.txt
+index bc3842dc323a..e2dea3dc4307 100644
+--- a/Documentation/serial/tty.txt
++++ b/Documentation/serial/tty.txt
+@@ -213,9 +213,6 @@ TTY_IO_ERROR		If set, causes all subsequent userspace read/write
+ 
+ TTY_OTHER_CLOSED	Device is a pty and the other side has closed.
+ 
+-TTY_OTHER_DONE		Device is a pty and the other side has closed and
+-			all pending input processing has been completed.
+-
+ TTY_NO_WRITE_SPLIT	Prevent driver from splitting up writes into
+ 			smaller chunks.
+ 
+diff --git a/MAINTAINERS b/MAINTAINERS
+index 6ee06ea47be4..77e4c10b4c06 100644
+--- a/MAINTAINERS
++++ b/MAINTAINERS
+@@ -228,13 +228,13 @@ F:	kernel/sys_ni.c
+ 
+ ABIT UGURU 1,2 HARDWARE MONITOR DRIVER
+ M:	Hans de Goede <hdegoede@redhat.com>
+-L:	lm-sensors@lm-sensors.org
++L:	linux-hwmon@vger.kernel.org
+ S:	Maintained
+ F:	drivers/hwmon/abituguru.c
+ 
+ ABIT UGURU 3 HARDWARE MONITOR DRIVER
+ M:	Alistair John Strachan <alistair@devzero.co.uk>
+-L:	lm-sensors@lm-sensors.org
++L:	linux-hwmon@vger.kernel.org
+ S:	Maintained
+ F:	drivers/hwmon/abituguru3.c
+ 
+@@ -386,14 +386,14 @@ F:	Documentation/devicetree/bindings/net/ieee802154/adf7242.txt
+ 
+ ADM1025 HARDWARE MONITOR DRIVER
+ M:	Jean Delvare <jdelvare@suse.com>
+-L:	lm-sensors@lm-sensors.org
++L:	linux-hwmon@vger.kernel.org
+ S:	Maintained
+ F:	Documentation/hwmon/adm1025
+ F:	drivers/hwmon/adm1025.c
+ 
+ ADM1029 HARDWARE MONITOR DRIVER
+ M:	Corentin Labbe <clabbe.montjoie@gmail.com>
+-L:	lm-sensors@lm-sensors.org
++L:	linux-hwmon@vger.kernel.org
+ S:	Maintained
+ F:	drivers/hwmon/adm1029.c
+ 
+@@ -438,7 +438,7 @@ F:	drivers/video/backlight/adp8860_bl.c
+ 
+ ADS1015 HARDWARE MONITOR DRIVER
+ M:	Dirk Eibach <eibach@gdsys.de>
+-L:	lm-sensors@lm-sensors.org
++L:	linux-hwmon@vger.kernel.org
+ S:	Maintained
+ F:	Documentation/hwmon/ads1015
+ F:	drivers/hwmon/ads1015.c
+@@ -451,7 +451,7 @@ F:	drivers/macintosh/therm_adt746x.c
+ 
+ ADT7475 HARDWARE MONITOR DRIVER
+ M:	Jean Delvare <jdelvare@suse.com>
+-L:	lm-sensors@lm-sensors.org
++L:	linux-hwmon@vger.kernel.org
+ S:	Maintained
+ F:	Documentation/hwmon/adt7475
+ F:	drivers/hwmon/adt7475.c
+@@ -628,7 +628,7 @@ F:	include/linux/ccp.h
+ 
+ AMD FAM15H PROCESSOR POWER MONITORING DRIVER
+ M:	Huang Rui <ray.huang@amd.com>
+-L:	lm-sensors@lm-sensors.org
++L:	linux-hwmon@vger.kernel.org
+ S:	Supported
+ F:	Documentation/hwmon/fam15h_power
+ F:	drivers/hwmon/fam15h_power.c
+@@ -786,7 +786,7 @@ F:	drivers/input/mouse/bcm5974.c
+ 
+ APPLE SMC DRIVER
+ M:	Henrik Rydberg <rydberg@bitmath.org>
+-L:	lm-sensors@lm-sensors.org
++L:	linux-hwmon@vger.kernel.org
+ S:	Odd fixes
+ F:	drivers/hwmon/applesmc.c
+ 
+@@ -1825,7 +1825,7 @@ F:	include/media/i2c/as3645a.h
+ 
+ ASC7621 HARDWARE MONITOR DRIVER
+ M:	George Joseph <george.joseph@fairview5.com>
+-L:	lm-sensors@lm-sensors.org
++L:	linux-hwmon@vger.kernel.org
+ S:	Maintained
+ F:	Documentation/hwmon/asc7621
+ F:	drivers/hwmon/asc7621.c
+@@ -1918,7 +1918,7 @@ F:	drivers/net/wireless/ath/carl9170/
+ 
+ ATK0110 HWMON DRIVER
+ M:	Luca Tettamanti <kronos.it@gmail.com>
+-L:	lm-sensors@lm-sensors.org
++L:	linux-hwmon@vger.kernel.org
+ S:	Maintained
+ F:	drivers/hwmon/asus_atk0110.c
+ 
+@@ -3037,7 +3037,7 @@ F:	mm/swap_cgroup.c
+ 
+ CORETEMP HARDWARE MONITORING DRIVER
+ M:	Fenghua Yu <fenghua.yu@intel.com>
+-L:	lm-sensors@lm-sensors.org
++L:	linux-hwmon@vger.kernel.org
+ S:	Maintained
+ F:	Documentation/hwmon/coretemp
+ F:	drivers/hwmon/coretemp.c
+@@ -3625,7 +3625,7 @@ T:	git git://git.infradead.org/users/vkoul/slave-dma.git
+ 
+ DME1737 HARDWARE MONITOR DRIVER
+ M:	Juerg Haefliger <juergh@gmail.com>
+-L:	lm-sensors@lm-sensors.org
++L:	linux-hwmon@vger.kernel.org
+ S:	Maintained
+ F:	Documentation/hwmon/dme1737
+ F:	drivers/hwmon/dme1737.c
+@@ -4163,8 +4163,8 @@ F:	Documentation/efi-stub.txt
+ F:	arch/ia64/kernel/efi.c
+ F:	arch/x86/boot/compressed/eboot.[ch]
+ F:	arch/x86/include/asm/efi.h
+-F:	arch/x86/platform/efi/*
+-F:	drivers/firmware/efi/*
++F:	arch/x86/platform/efi/
++F:	drivers/firmware/efi/
+ F:	include/linux/efi*.h
+ 
+ EFI VARIABLE FILESYSTEM
+@@ -4322,7 +4322,7 @@ F:	include/video/exynos_mipi*
+ 
+ F71805F HARDWARE MONITORING DRIVER
+ M:	Jean Delvare <jdelvare@suse.com>
+-L:	lm-sensors@lm-sensors.org
++L:	linux-hwmon@vger.kernel.org
+ S:	Maintained
+ F:	Documentation/hwmon/f71805f
+ F:	drivers/hwmon/f71805f.c
+@@ -4401,7 +4401,7 @@ F:	fs/*
+ 
+ FINTEK F75375S HARDWARE MONITOR AND FAN CONTROLLER DRIVER
+ M:	Riku Voipio <riku.voipio@iki.fi>
+-L:	lm-sensors@lm-sensors.org
++L:	linux-hwmon@vger.kernel.org
+ S:	Maintained
+ F:	drivers/hwmon/f75375s.c
+ F:	include/linux/f75375s.h
+@@ -4958,8 +4958,8 @@ F:	drivers/media/usb/hackrf/
+ HARDWARE MONITORING
+ M:	Jean Delvare <jdelvare@suse.com>
+ M:	Guenter Roeck <linux@roeck-us.net>
+-L:	lm-sensors@lm-sensors.org
+-W:	http://www.lm-sensors.org/
++L:	linux-hwmon@vger.kernel.org
++W:	http://hwmon.wiki.kernel.org/
+ T:	quilt http://jdelvare.nerim.net/devel/linux/jdelvare-hwmon/
+ T:	git git://git.kernel.org/pub/scm/linux/kernel/git/groeck/linux-staging.git
+ S:	Maintained
+@@ -5484,7 +5484,7 @@ F:	drivers/usb/atm/ueagle-atm.c
+ 
+ INA209 HARDWARE MONITOR DRIVER
+ M:	Guenter Roeck <linux@roeck-us.net>
+-L:	lm-sensors@lm-sensors.org
++L:	linux-hwmon@vger.kernel.org
+ S:	Maintained
+ F:	Documentation/hwmon/ina209
+ F:	Documentation/devicetree/bindings/i2c/ina209.txt
+@@ -5492,7 +5492,7 @@ F:	drivers/hwmon/ina209.c
+ 
+ INA2XX HARDWARE MONITOR DRIVER
+ M:	Guenter Roeck <linux@roeck-us.net>
+-L:	lm-sensors@lm-sensors.org
++L:	linux-hwmon@vger.kernel.org
+ S:	Maintained
+ F:	Documentation/hwmon/ina2xx
+ F:	drivers/hwmon/ina2xx.c
+@@ -5985,7 +5985,7 @@ F:	drivers/isdn/hardware/eicon/
+ 
+ IT87 HARDWARE MONITORING DRIVER
+ M:	Jean Delvare <jdelvare@suse.com>
+-L:	lm-sensors@lm-sensors.org
++L:	linux-hwmon@vger.kernel.org
+ S:	Maintained
+ F:	Documentation/hwmon/it87
+ F:	drivers/hwmon/it87.c
+@@ -6021,7 +6021,7 @@ F:	drivers/media/dvb-frontends/ix2505v*
+ 
+ JC42.4 TEMPERATURE SENSOR DRIVER
+ M:	Guenter Roeck <linux@roeck-us.net>
+-L:	lm-sensors@lm-sensors.org
++L:	linux-hwmon@vger.kernel.org
+ S:	Maintained
+ F:	drivers/hwmon/jc42.c
+ F:	Documentation/hwmon/jc42
+@@ -6071,14 +6071,14 @@ F:	drivers/tty/serial/jsm/
+ 
+ K10TEMP HARDWARE MONITORING DRIVER
+ M:	Clemens Ladisch <clemens@ladisch.de>
+-L:	lm-sensors@lm-sensors.org
++L:	linux-hwmon@vger.kernel.org
+ S:	Maintained
+ F:	Documentation/hwmon/k10temp
+ F:	drivers/hwmon/k10temp.c
+ 
+ K8TEMP HARDWARE MONITORING DRIVER
+ M:	Rudolf Marek <r.marek@assembler.cz>
+-L:	lm-sensors@lm-sensors.org
++L:	linux-hwmon@vger.kernel.org
+ S:	Maintained
+ F:	Documentation/hwmon/k8temp
+ F:	drivers/hwmon/k8temp.c
+@@ -6605,27 +6605,27 @@ F:	net/llc/
+ 
+ LM73 HARDWARE MONITOR DRIVER
+ M:	Guillaume Ligneul <guillaume.ligneul@gmail.com>
+-L:	lm-sensors@lm-sensors.org
++L:	linux-hwmon@vger.kernel.org
+ S:	Maintained
+ F:	drivers/hwmon/lm73.c
+ 
+ LM78 HARDWARE MONITOR DRIVER
+ M:	Jean Delvare <jdelvare@suse.com>
+-L:	lm-sensors@lm-sensors.org
++L:	linux-hwmon@vger.kernel.org
+ S:	Maintained
+ F:	Documentation/hwmon/lm78
+ F:	drivers/hwmon/lm78.c
+ 
+ LM83 HARDWARE MONITOR DRIVER
+ M:	Jean Delvare <jdelvare@suse.com>
+-L:	lm-sensors@lm-sensors.org
++L:	linux-hwmon@vger.kernel.org
+ S:	Maintained
+ F:	Documentation/hwmon/lm83
+ F:	drivers/hwmon/lm83.c
+ 
+ LM90 HARDWARE MONITOR DRIVER
+ M:	Jean Delvare <jdelvare@suse.com>
+-L:	lm-sensors@lm-sensors.org
++L:	linux-hwmon@vger.kernel.org
+ S:	Maintained
+ F:	Documentation/hwmon/lm90
+ F:	Documentation/devicetree/bindings/hwmon/lm90.txt
+@@ -6633,7 +6633,7 @@ F:	drivers/hwmon/lm90.c
+ 
+ LM95234 HARDWARE MONITOR DRIVER
+ M:	Guenter Roeck <linux@roeck-us.net>
+-L:	lm-sensors@lm-sensors.org
++L:	linux-hwmon@vger.kernel.org
+ S:	Maintained
+ F:	Documentation/hwmon/lm95234
+ F:	drivers/hwmon/lm95234.c
+@@ -6700,7 +6700,7 @@ F:	drivers/scsi/sym53c8xx_2/
+ 
+ LTC4261 HARDWARE MONITOR DRIVER
+ M:	Guenter Roeck <linux@roeck-us.net>
+-L:	lm-sensors@lm-sensors.org
++L:	linux-hwmon@vger.kernel.org
+ S:	Maintained
+ F:	Documentation/hwmon/ltc4261
+ F:	drivers/hwmon/ltc4261.c
+@@ -6870,28 +6870,28 @@ F:	include/uapi/linux/matroxfb.h
+ 
+ MAX16065 HARDWARE MONITOR DRIVER
+ M:	Guenter Roeck <linux@roeck-us.net>
+-L:	lm-sensors@lm-sensors.org
++L:	linux-hwmon@vger.kernel.org
+ S:	Maintained
+ F:	Documentation/hwmon/max16065
+ F:	drivers/hwmon/max16065.c
+ 
+ MAX20751 HARDWARE MONITOR DRIVER
+ M:	Guenter Roeck <linux@roeck-us.net>
+-L:	lm-sensors@lm-sensors.org
++L:	linux-hwmon@vger.kernel.org
+ S:	Maintained
+ F:	Documentation/hwmon/max20751
+ F:	drivers/hwmon/max20751.c
+ 
+ MAX6650 HARDWARE MONITOR AND FAN CONTROLLER DRIVER
+ M:	"Hans J. Koch" <hjk@hansjkoch.de>
+-L:	lm-sensors@lm-sensors.org
++L:	linux-hwmon@vger.kernel.org
+ S:	Maintained
+ F:	Documentation/hwmon/max6650
+ F:	drivers/hwmon/max6650.c
+ 
+ MAX6697 HARDWARE MONITOR DRIVER
+ M:	Guenter Roeck <linux@roeck-us.net>
+-L:	lm-sensors@lm-sensors.org
++L:	linux-hwmon@vger.kernel.org
+ S:	Maintained
+ F:	Documentation/hwmon/max6697
+ F:	Documentation/devicetree/bindings/i2c/max6697.txt
+@@ -7455,7 +7455,7 @@ F:	drivers/scsi/NCR_D700.*
+ 
+ NCT6775 HARDWARE MONITOR DRIVER
+ M:	Guenter Roeck <linux@roeck-us.net>
+-L:	lm-sensors@lm-sensors.org
++L:	linux-hwmon@vger.kernel.org
+ S:	Maintained
+ F:	Documentation/hwmon/nct6775
+ F:	drivers/hwmon/nct6775.c
+@@ -8235,7 +8235,7 @@ F:	drivers/video/logo/logo_parisc*
+ 
+ PC87360 HARDWARE MONITORING DRIVER
+ M:	Jim Cromie <jim.cromie@gmail.com>
+-L:	lm-sensors@lm-sensors.org
++L:	linux-hwmon@vger.kernel.org
+ S:	Maintained
+ F:	Documentation/hwmon/pc87360
+ F:	drivers/hwmon/pc87360.c
+@@ -8247,7 +8247,7 @@ F:	drivers/char/pc8736x_gpio.c
+ 
+ PC87427 HARDWARE MONITORING DRIVER
+ M:	Jean Delvare <jdelvare@suse.com>
+-L:	lm-sensors@lm-sensors.org
++L:	linux-hwmon@vger.kernel.org
+ S:	Maintained
+ F:	Documentation/hwmon/pc87427
+ F:	drivers/hwmon/pc87427.c
+@@ -8601,8 +8601,8 @@ F:	drivers/rtc/rtc-puv3.c
+ 
+ PMBUS HARDWARE MONITORING DRIVERS
+ M:	Guenter Roeck <linux@roeck-us.net>
+-L:	lm-sensors@lm-sensors.org
+-W:	http://www.lm-sensors.org/
++L:	linux-hwmon@vger.kernel.org
++W:	http://hwmon.wiki.kernel.org/
+ W:	http://www.roeck-us.net/linux/drivers/
+ T:	git git://git.kernel.org/pub/scm/linux/kernel/git/groeck/linux-staging.git
+ S:	Maintained
+@@ -8807,7 +8807,7 @@ F:	drivers/media/usb/pwc/*
+ 
+ PWM FAN DRIVER
+ M:	Kamil Debski <k.debski@samsung.com>
+-L:	lm-sensors@lm-sensors.org
++L:	linux-hwmon@vger.kernel.org
+ S:	Supported
+ F:	Documentation/devicetree/bindings/hwmon/pwm-fan.txt
+ F:	Documentation/hwmon/pwm-fan
+@@ -10113,28 +10113,28 @@ F:	Documentation/devicetree/bindings/media/i2c/nokia,smia.txt
+ 
+ SMM665 HARDWARE MONITOR DRIVER
+ M:	Guenter Roeck <linux@roeck-us.net>
+-L:	lm-sensors@lm-sensors.org
++L:	linux-hwmon@vger.kernel.org
+ S:	Maintained
+ F:	Documentation/hwmon/smm665
+ F:	drivers/hwmon/smm665.c
+ 
+ SMSC EMC2103 HARDWARE MONITOR DRIVER
+ M:	Steve Glendinning <steve.glendinning@shawell.net>
+-L:	lm-sensors@lm-sensors.org
++L:	linux-hwmon@vger.kernel.org
+ S:	Maintained
+ F:	Documentation/hwmon/emc2103
+ F:	drivers/hwmon/emc2103.c
+ 
+ SMSC SCH5627 HARDWARE MONITOR DRIVER
+ M:	Hans de Goede <hdegoede@redhat.com>
+-L:	lm-sensors@lm-sensors.org
++L:	linux-hwmon@vger.kernel.org
+ S:	Supported
+ F:	Documentation/hwmon/sch5627
+ F:	drivers/hwmon/sch5627.c
+ 
+ SMSC47B397 HARDWARE MONITOR DRIVER
+ M:	Jean Delvare <jdelvare@suse.com>
+-L:	lm-sensors@lm-sensors.org
++L:	linux-hwmon@vger.kernel.org
+ S:	Maintained
+ F:	Documentation/hwmon/smsc47b397
+ F:	drivers/hwmon/smsc47b397.c
+@@ -11067,7 +11067,7 @@ F:	include/linux/mmc/sh_mobile_sdhi.h
+ 
+ TMP401 HARDWARE MONITOR DRIVER
+ M:	Guenter Roeck <linux@roeck-us.net>
+-L:	lm-sensors@lm-sensors.org
++L:	linux-hwmon@vger.kernel.org
+ S:	Maintained
+ F:	Documentation/hwmon/tmp401
+ F:	drivers/hwmon/tmp401.c
+@@ -11812,14 +11812,14 @@ F:	Documentation/networking/vrf.txt
+ 
+ VT1211 HARDWARE MONITOR DRIVER
+ M:	Juerg Haefliger <juergh@gmail.com>
+-L:	lm-sensors@lm-sensors.org
++L:	linux-hwmon@vger.kernel.org
+ S:	Maintained
+ F:	Documentation/hwmon/vt1211
+ F:	drivers/hwmon/vt1211.c
+ 
+ VT8231 HARDWARE MONITOR DRIVER
+ M:	Roger Lucas <vt8231@hiddenengine.co.uk>
+-L:	lm-sensors@lm-sensors.org
++L:	linux-hwmon@vger.kernel.org
+ S:	Maintained
+ F:	drivers/hwmon/vt8231.c
+ 
+@@ -11838,21 +11838,21 @@ F:	drivers/w1/
+ 
+ W83791D HARDWARE MONITORING DRIVER
+ M:	Marc Hulsman <m.hulsman@tudelft.nl>
+-L:	lm-sensors@lm-sensors.org
++L:	linux-hwmon@vger.kernel.org
+ S:	Maintained
+ F:	Documentation/hwmon/w83791d
+ F:	drivers/hwmon/w83791d.c
+ 
+ W83793 HARDWARE MONITORING DRIVER
+ M:	Rudolf Marek <r.marek@assembler.cz>
+-L:	lm-sensors@lm-sensors.org
++L:	linux-hwmon@vger.kernel.org
+ S:	Maintained
+ F:	Documentation/hwmon/w83793
+ F:	drivers/hwmon/w83793.c
+ 
+ W83795 HARDWARE MONITORING DRIVER
+ M:	Jean Delvare <jdelvare@suse.com>
+-L:	lm-sensors@lm-sensors.org
++L:	linux-hwmon@vger.kernel.org
+ S:	Maintained
+ F:	drivers/hwmon/w83795.c
+ 
+diff --git a/Makefile b/Makefile
+index 7b3ecdcdc6c1..07a1786f695a 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 5
+-SUBLEVEL = 0
++SUBLEVEL = 6
+ EXTRAVERSION =
+ NAME = Blurry Fish Butt
+ 
+@@ -688,9 +688,10 @@ KBUILD_CFLAGS += $(call cc-option, -mno-global-merge,)
+ KBUILD_CFLAGS += $(call cc-option, -fcatch-undefined-behavior)
+ else
+ 
+-# This warning generated too much noise in a regular build.
+-# Use make W=1 to enable this warning (see scripts/Makefile.build)
++# These warnings generated too much noise in a regular build.
++# Use make W=1 to enable them (see scripts/Makefile.build)
+ KBUILD_CFLAGS += $(call cc-disable-warning, unused-but-set-variable)
++KBUILD_CFLAGS += $(call cc-disable-warning, unused-const-variable)
+ endif
+ 
+ ifdef CONFIG_FRAME_POINTER
+diff --git a/arch/arc/boot/dts/axs10x_mb.dtsi b/arch/arc/boot/dts/axs10x_mb.dtsi
+index 44a578c10732..ab5d5701e11d 100644
+--- a/arch/arc/boot/dts/axs10x_mb.dtsi
++++ b/arch/arc/boot/dts/axs10x_mb.dtsi
+@@ -47,6 +47,14 @@
+ 			clocks = <&apbclk>;
+ 			clock-names = "stmmaceth";
+ 			max-speed = <100>;
++			mdio0 {
++				#address-cells = <1>;
++				#size-cells = <0>;
++				compatible = "snps,dwmac-mdio";
++				phy1: ethernet-phy@1 {
++					reg = <1>;
++				};
++			};
+ 		};
+ 
+ 		ehci@0x40000 {
+diff --git a/arch/arc/include/asm/bitops.h b/arch/arc/include/asm/bitops.h
+index 57c1f33844d4..0352fb8d21b9 100644
+--- a/arch/arc/include/asm/bitops.h
++++ b/arch/arc/include/asm/bitops.h
+@@ -35,21 +35,6 @@ static inline void op##_bit(unsigned long nr, volatile unsigned long *m)\
+ 									\
+ 	m += nr >> 5;							\
+ 									\
+-	/*								\
+-	 * ARC ISA micro-optimization:					\
+-	 *								\
+-	 * Instructions dealing with bitpos only consider lower 5 bits	\
+-	 * e.g (x << 33) is handled like (x << 1) by ASL instruction	\
+-	 *  (mem pointer still needs adjustment to point to next word)	\
+-	 *								\
+-	 * Hence the masking to clamp @nr arg can be elided in general.	\
+-	 *								\
+-	 * However if @nr is a constant (above assumed in a register),	\
+-	 * and greater than 31, gcc can optimize away (x << 33) to 0,	\
+-	 * as overflow, given the 32-bit ISA. Thus masking needs to be	\
+-	 * done for const @nr, but no code is generated due to gcc	\
+-	 * const prop.							\
+-	 */								\
+ 	nr &= 0x1f;							\
+ 									\
+ 	__asm__ __volatile__(						\
+diff --git a/arch/arc/include/asm/io.h b/arch/arc/include/asm/io.h
+index 694ece8a0243..cb69299a492e 100644
+--- a/arch/arc/include/asm/io.h
++++ b/arch/arc/include/asm/io.h
+@@ -13,6 +13,15 @@
+ #include <asm/byteorder.h>
+ #include <asm/page.h>
+ 
++#ifdef CONFIG_ISA_ARCV2
++#include <asm/barrier.h>
++#define __iormb()		rmb()
++#define __iowmb()		wmb()
++#else
++#define __iormb()		do { } while (0)
++#define __iowmb()		do { } while (0)
++#endif
++
+ extern void __iomem *ioremap(unsigned long physaddr, unsigned long size);
+ extern void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size,
+ 				  unsigned long flags);
+@@ -22,6 +31,15 @@ extern void iounmap(const void __iomem *addr);
+ #define ioremap_wc(phy, sz)		ioremap(phy, sz)
+ #define ioremap_wt(phy, sz)		ioremap(phy, sz)
+ 
++/*
++ * io{read,write}{16,32}be() macros
++ */
++#define ioread16be(p)		({ u16 __v = be16_to_cpu((__force __be16)__raw_readw(p)); __iormb(); __v; })
++#define ioread32be(p)		({ u32 __v = be32_to_cpu((__force __be32)__raw_readl(p)); __iormb(); __v; })
++
++#define iowrite16be(v,p)	({ __iowmb(); __raw_writew((__force u16)cpu_to_be16(v), p); })
++#define iowrite32be(v,p)	({ __iowmb(); __raw_writel((__force u32)cpu_to_be32(v), p); })
++
+ /* Change struct page to physical address */
+ #define page_to_phys(page)		(page_to_pfn(page) << PAGE_SHIFT)
+ 
+@@ -99,15 +117,6 @@ static inline void __raw_writel(u32 w, volatile void __iomem *addr)
+ 
+ }
+ 
+-#ifdef CONFIG_ISA_ARCV2
+-#include <asm/barrier.h>
+-#define __iormb()		rmb()
+-#define __iowmb()		wmb()
+-#else
+-#define __iormb()		do { } while (0)
+-#define __iowmb()		do { } while (0)
+-#endif
+-
+ /*
+  * MMIO can also get buffered/optimized in micro-arch, so barriers needed
+  * Based on ARM model for the typical use case
+@@ -129,15 +138,23 @@ static inline void __raw_writel(u32 w, volatile void __iomem *addr)
+ #define writel(v,c)		({ __iowmb(); writel_relaxed(v,c); })
+ 
+ /*
+- * Relaxed API for drivers which can handle any ordering themselves
++ * Relaxed API for drivers which can handle barrier ordering themselves
++ *
++ * Also these are defined to perform little endian accesses.
++ * To provide the typical device register semantics of fixed endian,
++ * swap the byte order for Big Endian
++ *
++ * http://lkml.kernel.org/r/201603100845.30602.arnd@arndb.de
+  */
+ #define readb_relaxed(c)	__raw_readb(c)
+-#define readw_relaxed(c)	__raw_readw(c)
+-#define readl_relaxed(c)	__raw_readl(c)
++#define readw_relaxed(c) ({ u16 __r = le16_to_cpu((__force __le16) \
++					__raw_readw(c)); __r; })
++#define readl_relaxed(c) ({ u32 __r = le32_to_cpu((__force __le32) \
++					__raw_readl(c)); __r; })
+ 
+ #define writeb_relaxed(v,c)	__raw_writeb(v,c)
+-#define writew_relaxed(v,c)	__raw_writew(v,c)
+-#define writel_relaxed(v,c)	__raw_writel(v,c)
++#define writew_relaxed(v,c)	__raw_writew((__force u16) cpu_to_le16(v),c)
++#define writel_relaxed(v,c)	__raw_writel((__force u32) cpu_to_le32(v),c)
+ 
+ #include <asm-generic/io.h>
+ 
+diff --git a/arch/arm/boot/dts/am33xx.dtsi b/arch/arm/boot/dts/am33xx.dtsi
+index 1fafaad516ba..97471d62d5e4 100644
+--- a/arch/arm/boot/dts/am33xx.dtsi
++++ b/arch/arm/boot/dts/am33xx.dtsi
+@@ -860,7 +860,7 @@
+ 			ti,no-idle-on-init;
+ 			reg = <0x50000000 0x2000>;
+ 			interrupts = <100>;
+-			dmas = <&edma 52>;
++			dmas = <&edma 52 0>;
+ 			dma-names = "rxtx";
+ 			gpmc,num-cs = <7>;
+ 			gpmc,num-waitpins = <2>;
+diff --git a/arch/arm/boot/dts/am4372.dtsi b/arch/arm/boot/dts/am4372.dtsi
+index 92068fbf8b57..6bd38a28e26c 100644
+--- a/arch/arm/boot/dts/am4372.dtsi
++++ b/arch/arm/boot/dts/am4372.dtsi
+@@ -207,7 +207,7 @@
+ 			ti,tptcs = <&edma_tptc0 7>, <&edma_tptc1 5>,
+ 				   <&edma_tptc2 0>;
+ 
+-			ti,edma-memcpy-channels = <32 33>;
++			ti,edma-memcpy-channels = <58 59>;
+ 		};
+ 
+ 		edma_tptc0: tptc@49800000 {
+@@ -884,7 +884,7 @@
+ 		gpmc: gpmc@50000000 {
+ 			compatible = "ti,am3352-gpmc";
+ 			ti,hwmods = "gpmc";
+-			dmas = <&edma 52>;
++			dmas = <&edma 52 0>;
+ 			dma-names = "rxtx";
+ 			clocks = <&l3s_gclk>;
+ 			clock-names = "fck";
+diff --git a/arch/arm/boot/dts/am43x-epos-evm.dts b/arch/arm/boot/dts/am43x-epos-evm.dts
+index d580e2b70f9a..637dc5dbc8ac 100644
+--- a/arch/arm/boot/dts/am43x-epos-evm.dts
++++ b/arch/arm/boot/dts/am43x-epos-evm.dts
+@@ -792,3 +792,8 @@
+ 	tx-num-evt = <32>;
+ 	rx-num-evt = <32>;
+ };
++
++&synctimer_32kclk {
++	assigned-clocks = <&mux_synctimer32k_ck>;
++	assigned-clock-parents = <&clkdiv32k_ick>;
++};
+diff --git a/arch/arm/boot/dts/armada-375.dtsi b/arch/arm/boot/dts/armada-375.dtsi
+index 7ccce7529b0c..cc952cf8ec30 100644
+--- a/arch/arm/boot/dts/armada-375.dtsi
++++ b/arch/arm/boot/dts/armada-375.dtsi
+@@ -529,7 +529,7 @@
+ 			};
+ 
+ 			sata@a0000 {
+-				compatible = "marvell,orion-sata";
++				compatible = "marvell,armada-370-sata";
+ 				reg = <0xa0000 0x5000>;
+ 				interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>;
+ 				clocks = <&gateclk 14>, <&gateclk 20>;
+diff --git a/arch/arm/boot/dts/armada-385-linksys.dtsi b/arch/arm/boot/dts/armada-385-linksys.dtsi
+index 3710755c6d76..85d2c377c332 100644
+--- a/arch/arm/boot/dts/armada-385-linksys.dtsi
++++ b/arch/arm/boot/dts/armada-385-linksys.dtsi
+@@ -117,7 +117,7 @@
+ 			};
+ 
+ 			/* USB part of the eSATA/USB 2.0 port */
+-			usb@50000 {
++			usb@58000 {
+ 				status = "okay";
+ 			};
+ 
+diff --git a/arch/arm/boot/dts/at91-sama5d3_xplained.dts b/arch/arm/boot/dts/at91-sama5d3_xplained.dts
+index ff888d21c786..f3e2b96c06a3 100644
+--- a/arch/arm/boot/dts/at91-sama5d3_xplained.dts
++++ b/arch/arm/boot/dts/at91-sama5d3_xplained.dts
+@@ -303,6 +303,7 @@
+ 		regulator-name = "mmc0-card-supply";
+ 		regulator-min-microvolt = <3300000>;
+ 		regulator-max-microvolt = <3300000>;
++		regulator-always-on;
+ 	};
+ 
+ 	gpio_keys {
+diff --git a/arch/arm/boot/dts/at91-sama5d4_xplained.dts b/arch/arm/boot/dts/at91-sama5d4_xplained.dts
+index 569026e8f96c..da84e65b56ef 100644
+--- a/arch/arm/boot/dts/at91-sama5d4_xplained.dts
++++ b/arch/arm/boot/dts/at91-sama5d4_xplained.dts
+@@ -268,5 +268,6 @@
+ 		regulator-min-microvolt = <3300000>;
+ 		regulator-max-microvolt = <3300000>;
+ 		vin-supply = <&vcc_3v3_reg>;
++		regulator-always-on;
+ 	};
+ };
+diff --git a/arch/arm/boot/dts/at91sam9x5.dtsi b/arch/arm/boot/dts/at91sam9x5.dtsi
+index 0827d594b1f0..cd0cd5fd09a3 100644
+--- a/arch/arm/boot/dts/at91sam9x5.dtsi
++++ b/arch/arm/boot/dts/at91sam9x5.dtsi
+@@ -106,7 +106,7 @@
+ 
+ 			pmc: pmc@fffffc00 {
+ 				compatible = "atmel,at91sam9x5-pmc", "syscon";
+-				reg = <0xfffffc00 0x100>;
++				reg = <0xfffffc00 0x200>;
+ 				interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
+ 				interrupt-controller;
+ 				#address-cells = <1>;
+diff --git a/arch/arm/boot/dts/pxa3xx.dtsi b/arch/arm/boot/dts/pxa3xx.dtsi
+index cf6998a0804d..564341af7e97 100644
+--- a/arch/arm/boot/dts/pxa3xx.dtsi
++++ b/arch/arm/boot/dts/pxa3xx.dtsi
+@@ -30,7 +30,7 @@
+ 			reg = <0x43100000 90>;
+ 			interrupts = <45>;
+ 			clocks = <&clks CLK_NAND>;
+-			dmas = <&pdma 97>;
++			dmas = <&pdma 97 3>;
+ 			dma-names = "data";
+ 			#address-cells = <1>;
+ 			#size-cells = <1>;	
+diff --git a/arch/arm/boot/dts/qcom-apq8064.dtsi b/arch/arm/boot/dts/qcom-apq8064.dtsi
+index ed521e85e208..e8bc7e8bedd2 100644
+--- a/arch/arm/boot/dts/qcom-apq8064.dtsi
++++ b/arch/arm/boot/dts/qcom-apq8064.dtsi
+@@ -665,7 +665,7 @@
+ 		};
+ 
+ 		sata0: sata@29000000 {
+-			compatible		= "generic-ahci";
++			compatible		= "qcom,apq8064-ahci", "generic-ahci";
+ 			status			= "disabled";
+ 			reg			= <0x29000000 0x180>;
+ 			interrupts		= <GIC_SPI 209 IRQ_TYPE_NONE>;
+@@ -687,6 +687,7 @@
+ 
+ 			phys			= <&sata_phy0>;
+ 			phy-names		= "sata-phy";
++			ports-implemented	= <0x1>;
+ 		};
+ 
+ 		/* Temporary fixed regulator */
+diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
+index 7d0cba6f1cc5..c86ea8aac203 100644
+--- a/arch/arm/kernel/setup.c
++++ b/arch/arm/kernel/setup.c
+@@ -430,11 +430,13 @@ static void __init patch_aeabi_idiv(void)
+ 	pr_info("CPU: div instructions available: patching division code\n");
+ 
+ 	fn_addr = ((uintptr_t)&__aeabi_uidiv) & ~1;
++	asm ("" : "+g" (fn_addr));
+ 	((u32 *)fn_addr)[0] = udiv_instruction();
+ 	((u32 *)fn_addr)[1] = bx_lr_instruction();
+ 	flush_icache_range(fn_addr, fn_addr + 8);
+ 
+ 	fn_addr = ((uintptr_t)&__aeabi_idiv) & ~1;
++	asm ("" : "+g" (fn_addr));
+ 	((u32 *)fn_addr)[0] = sdiv_instruction();
+ 	((u32 *)fn_addr)[1] = bx_lr_instruction();
+ 	flush_icache_range(fn_addr, fn_addr + 8);
+diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
+index aba61fd3697a..88323ffb1ee0 100644
+--- a/arch/arm/kvm/mmu.c
++++ b/arch/arm/kvm/mmu.c
+@@ -886,11 +886,14 @@ static int stage2_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache
+ 	VM_BUG_ON(pmd_present(*pmd) && pmd_pfn(*pmd) != pmd_pfn(*new_pmd));
+ 
+ 	old_pmd = *pmd;
+-	kvm_set_pmd(pmd, *new_pmd);
+-	if (pmd_present(old_pmd))
++	if (pmd_present(old_pmd)) {
++		pmd_clear(pmd);
+ 		kvm_tlb_flush_vmid_ipa(kvm, addr);
+-	else
++	} else {
+ 		get_page(virt_to_page(pmd));
++	}
++
++	kvm_set_pmd(pmd, *new_pmd);
+ 	return 0;
+ }
+ 
+@@ -939,12 +942,14 @@ static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
+ 
+ 	/* Create 2nd stage page table mapping - Level 3 */
+ 	old_pte = *pte;
+-	kvm_set_pte(pte, *new_pte);
+-	if (pte_present(old_pte))
++	if (pte_present(old_pte)) {
++		kvm_set_pte(pte, __pte(0));
+ 		kvm_tlb_flush_vmid_ipa(kvm, addr);
+-	else
++	} else {
+ 		get_page(virt_to_page(pte));
++	}
+ 
++	kvm_set_pte(pte, *new_pte);
+ 	return 0;
+ }
+ 
+diff --git a/arch/arm/mach-cns3xxx/pcie.c b/arch/arm/mach-cns3xxx/pcie.c
+index 47905a50e075..318394ed5c7a 100644
+--- a/arch/arm/mach-cns3xxx/pcie.c
++++ b/arch/arm/mach-cns3xxx/pcie.c
+@@ -220,13 +220,13 @@ static void cns3xxx_write_config(struct cns3xxx_pcie *cnspci,
+ 	u32 mask = (0x1ull << (size * 8)) - 1;
+ 	int shift = (where % 4) * 8;
+ 
+-	v = readl_relaxed(base + (where & 0xffc));
++	v = readl_relaxed(base);
+ 
+ 	v &= ~(mask << shift);
+ 	v |= (val & mask) << shift;
+ 
+-	writel_relaxed(v, base + (where & 0xffc));
+-	readl_relaxed(base + (where & 0xffc));
++	writel_relaxed(v, base);
++	readl_relaxed(base);
+ }
+ 
+ static void __init cns3xxx_pcie_hw_init(struct cns3xxx_pcie *cnspci)
+diff --git a/arch/arm/mach-exynos/Kconfig b/arch/arm/mach-exynos/Kconfig
+index 652a0bb11578..5189bcecad12 100644
+--- a/arch/arm/mach-exynos/Kconfig
++++ b/arch/arm/mach-exynos/Kconfig
+@@ -27,6 +27,7 @@ menuconfig ARCH_EXYNOS
+ 	select S5P_DEV_MFC
+ 	select SRAM
+ 	select THERMAL
++	select THERMAL_OF
+ 	select MFD_SYSCON
+ 	select CLKSRC_EXYNOS_MCT
+ 	select POWER_RESET
+diff --git a/arch/arm/mach-exynos/pm_domains.c b/arch/arm/mach-exynos/pm_domains.c
+index 7c21760f590f..875a2bab64f6 100644
+--- a/arch/arm/mach-exynos/pm_domains.c
++++ b/arch/arm/mach-exynos/pm_domains.c
+@@ -92,7 +92,7 @@ static int exynos_pd_power(struct generic_pm_domain *domain, bool power_on)
+ 			if (IS_ERR(pd->clk[i]))
+ 				break;
+ 
+-			if (IS_ERR(pd->clk[i]))
++			if (IS_ERR(pd->pclk[i]))
+ 				continue; /* Skip on first power up */
+ 			if (clk_set_parent(pd->clk[i], pd->pclk[i]))
+ 				pr_err("%s: error setting parent to clock%d\n",
+diff --git a/arch/arm/mach-omap2/cpuidle34xx.c b/arch/arm/mach-omap2/cpuidle34xx.c
+index aa7b379e2661..2a3db0bd9e15 100644
+--- a/arch/arm/mach-omap2/cpuidle34xx.c
++++ b/arch/arm/mach-omap2/cpuidle34xx.c
+@@ -34,6 +34,7 @@
+ #include "pm.h"
+ #include "control.h"
+ #include "common.h"
++#include "soc.h"
+ 
+ /* Mach specific information to be recorded in the C-state driver_data */
+ struct omap3_idle_statedata {
+@@ -315,6 +316,69 @@ static struct cpuidle_driver omap3_idle_driver = {
+ 	.safe_state_index = 0,
+ };
+ 
++/*
++ * Numbers based on measurements made in October 2009 for PM optimized kernel
++ * with CPU freq enabled on device Nokia N900. Assumes OPP2 (main idle OPP,
++ * and worst case latencies).
++ */
++static struct cpuidle_driver omap3430_idle_driver = {
++	.name             = "omap3430_idle",
++	.owner            = THIS_MODULE,
++	.states = {
++		{
++			.enter		  = omap3_enter_idle_bm,
++			.exit_latency	  = 110 + 162,
++			.target_residency = 5,
++			.name		  = "C1",
++			.desc		  = "MPU ON + CORE ON",
++		},
++		{
++			.enter		  = omap3_enter_idle_bm,
++			.exit_latency	  = 106 + 180,
++			.target_residency = 309,
++			.name		  = "C2",
++			.desc		  = "MPU ON + CORE ON",
++		},
++		{
++			.enter		  = omap3_enter_idle_bm,
++			.exit_latency	  = 107 + 410,
++			.target_residency = 46057,
++			.name		  = "C3",
++			.desc		  = "MPU RET + CORE ON",
++		},
++		{
++			.enter		  = omap3_enter_idle_bm,
++			.exit_latency	  = 121 + 3374,
++			.target_residency = 46057,
++			.name		  = "C4",
++			.desc		  = "MPU OFF + CORE ON",
++		},
++		{
++			.enter		  = omap3_enter_idle_bm,
++			.exit_latency	  = 855 + 1146,
++			.target_residency = 46057,
++			.name		  = "C5",
++			.desc		  = "MPU RET + CORE RET",
++		},
++		{
++			.enter		  = omap3_enter_idle_bm,
++			.exit_latency	  = 7580 + 4134,
++			.target_residency = 484329,
++			.name		  = "C6",
++			.desc		  = "MPU OFF + CORE RET",
++		},
++		{
++			.enter		  = omap3_enter_idle_bm,
++			.exit_latency	  = 7505 + 15274,
++			.target_residency = 484329,
++			.name		  = "C7",
++			.desc		  = "MPU OFF + CORE OFF",
++		},
++	},
++	.state_count = ARRAY_SIZE(omap3_idle_data),
++	.safe_state_index = 0,
++};
++
+ /* Public functions */
+ 
+ /**
+@@ -333,5 +397,8 @@ int __init omap3_idle_init(void)
+ 	if (!mpu_pd || !core_pd || !per_pd || !cam_pd)
+ 		return -ENODEV;
+ 
+-	return cpuidle_register(&omap3_idle_driver, NULL);
++	if (cpu_is_omap3430())
++		return cpuidle_register(&omap3430_idle_driver, NULL);
++	else
++		return cpuidle_register(&omap3_idle_driver, NULL);
+ }
+diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c
+index 3c87e40650cf..9821be6dfd5e 100644
+--- a/arch/arm/mach-omap2/io.c
++++ b/arch/arm/mach-omap2/io.c
+@@ -368,6 +368,7 @@ void __init omap5_map_io(void)
+ void __init dra7xx_map_io(void)
+ {
+ 	iotable_init(dra7xx_io_desc, ARRAY_SIZE(dra7xx_io_desc));
++	omap_barriers_init();
+ }
+ #endif
+ /*
+diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
+index b6d62e4cdfdd..2af6ff63e3b4 100644
+--- a/arch/arm/mach-omap2/omap_hwmod.c
++++ b/arch/arm/mach-omap2/omap_hwmod.c
+@@ -1416,9 +1416,7 @@ static void _enable_sysc(struct omap_hwmod *oh)
+ 	    (sf & SYSC_HAS_CLOCKACTIVITY))
+ 		_set_clockactivity(oh, oh->class->sysc->clockact, &v);
+ 
+-	/* If the cached value is the same as the new value, skip the write */
+-	if (oh->_sysc_cache != v)
+-		_write_sysconfig(v, oh);
++	_write_sysconfig(v, oh);
+ 
+ 	/*
+ 	 * Set the autoidle bit only after setting the smartidle bit
+@@ -1481,7 +1479,9 @@ static void _idle_sysc(struct omap_hwmod *oh)
+ 		_set_master_standbymode(oh, idlemode, &v);
+ 	}
+ 
+-	_write_sysconfig(v, oh);
++	/* If the cached value is the same as the new value, skip the write */
++	if (oh->_sysc_cache != v)
++		_write_sysconfig(v, oh);
+ }
+ 
+ /**
+diff --git a/arch/arm/mach-prima2/Kconfig b/arch/arm/mach-prima2/Kconfig
+index f998eb1c698e..0cf4426183cf 100644
+--- a/arch/arm/mach-prima2/Kconfig
++++ b/arch/arm/mach-prima2/Kconfig
+@@ -2,6 +2,7 @@ menuconfig ARCH_SIRF
+ 	bool "CSR SiRF"
+ 	depends on ARCH_MULTI_V7
+ 	select ARCH_HAS_RESET_CONTROLLER
++	select RESET_CONTROLLER
+ 	select ARCH_REQUIRE_GPIOLIB
+ 	select GENERIC_IRQ_CHIP
+ 	select NO_IOPORT_MAP
+diff --git a/arch/arm/mach-socfpga/headsmp.S b/arch/arm/mach-socfpga/headsmp.S
+index 5d94b7a2fb10..c160fa3007e9 100644
+--- a/arch/arm/mach-socfpga/headsmp.S
++++ b/arch/arm/mach-socfpga/headsmp.S
+@@ -13,6 +13,7 @@
+ #include <asm/assembler.h>
+ 
+ 	.arch	armv7-a
++	.arm
+ 
+ ENTRY(secondary_trampoline)
+ 	/* CPU1 will always fetch from 0x0 when it is brought out of reset.
+diff --git a/arch/arm64/include/asm/opcodes.h b/arch/arm64/include/asm/opcodes.h
+index 4e603ea36ad3..123f45d92cd1 100644
+--- a/arch/arm64/include/asm/opcodes.h
++++ b/arch/arm64/include/asm/opcodes.h
+@@ -1 +1,5 @@
++#ifdef CONFIG_CPU_BIG_ENDIAN
++#define CONFIG_CPU_ENDIAN_BE8 CONFIG_CPU_BIG_ENDIAN
++#endif
++
+ #include <../../arm/include/asm/opcodes.h>
+diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h
+index 5c25b831273d..9786f770088d 100644
+--- a/arch/arm64/include/asm/pgtable-hwdef.h
++++ b/arch/arm64/include/asm/pgtable-hwdef.h
+@@ -133,7 +133,6 @@
+  * Section
+  */
+ #define PMD_SECT_VALID		(_AT(pmdval_t, 1) << 0)
+-#define PMD_SECT_PROT_NONE	(_AT(pmdval_t, 1) << 58)
+ #define PMD_SECT_USER		(_AT(pmdval_t, 1) << 6)		/* AP[1] */
+ #define PMD_SECT_RDONLY		(_AT(pmdval_t, 1) << 7)		/* AP[2] */
+ #define PMD_SECT_S		(_AT(pmdval_t, 3) << 8)
+diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
+index 819aff5d593f..137d3bf88e2a 100644
+--- a/arch/arm64/include/asm/pgtable.h
++++ b/arch/arm64/include/asm/pgtable.h
+@@ -279,7 +279,7 @@ extern void __sync_icache_dcache(pte_t pteval, unsigned long addr);
+ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
+ 			      pte_t *ptep, pte_t pte)
+ {
+-	if (pte_valid(pte)) {
++	if (pte_present(pte)) {
+ 		if (pte_sw_dirty(pte) && pte_write(pte))
+ 			pte_val(pte) &= ~PTE_RDONLY;
+ 		else
+@@ -356,6 +356,7 @@ static inline pgprot_t mk_sect_prot(pgprot_t prot)
+ #define pmd_trans_huge(pmd)	(pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT))
+ #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+ 
++#define pmd_present(pmd)	pte_present(pmd_pte(pmd))
+ #define pmd_dirty(pmd)		pte_dirty(pmd_pte(pmd))
+ #define pmd_young(pmd)		pte_young(pmd_pte(pmd))
+ #define pmd_wrprotect(pmd)	pte_pmd(pte_wrprotect(pmd_pte(pmd)))
+@@ -364,7 +365,7 @@ static inline pgprot_t mk_sect_prot(pgprot_t prot)
+ #define pmd_mkclean(pmd)       pte_pmd(pte_mkclean(pmd_pte(pmd)))
+ #define pmd_mkdirty(pmd)	pte_pmd(pte_mkdirty(pmd_pte(pmd)))
+ #define pmd_mkyoung(pmd)	pte_pmd(pte_mkyoung(pmd_pte(pmd)))
+-#define pmd_mknotpresent(pmd)	(__pmd(pmd_val(pmd) & ~PMD_TYPE_MASK))
++#define pmd_mknotpresent(pmd)	(__pmd(pmd_val(pmd) & ~PMD_SECT_VALID))
+ 
+ #define __HAVE_ARCH_PMD_WRITE
+ #define pmd_write(pmd)		pte_write(pmd_pte(pmd))
+@@ -403,7 +404,6 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
+ 				     unsigned long size, pgprot_t vma_prot);
+ 
+ #define pmd_none(pmd)		(!pmd_val(pmd))
+-#define pmd_present(pmd)	(pmd_val(pmd))
+ 
+ #define pmd_bad(pmd)		(!(pmd_val(pmd) & 2))
+ 
+@@ -547,6 +547,21 @@ static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
+ }
+ 
+ #ifdef CONFIG_ARM64_HW_AFDBM
++#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
++extern int ptep_set_access_flags(struct vm_area_struct *vma,
++				 unsigned long address, pte_t *ptep,
++				 pte_t entry, int dirty);
++
++#ifdef CONFIG_TRANSPARENT_HUGEPAGE
++#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
++static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
++					unsigned long address, pmd_t *pmdp,
++					pmd_t entry, int dirty)
++{
++	return ptep_set_access_flags(vma, address, (pte_t *)pmdp, pmd_pte(entry), dirty);
++}
++#endif
++
+ /*
+  * Atomic pte/pmd modifications.
+  */
+@@ -599,9 +614,9 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
+ }
+ 
+ #ifdef CONFIG_TRANSPARENT_HUGEPAGE
+-#define __HAVE_ARCH_PMDP_GET_AND_CLEAR
+-static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm,
+-				       unsigned long address, pmd_t *pmdp)
++#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
++static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
++					    unsigned long address, pmd_t *pmdp)
+ {
+ 	return pte_pmd(ptep_get_and_clear(mm, address, (pte_t *)pmdp));
+ }
+@@ -649,6 +664,7 @@ extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
+  *	bits 0-1:	present (must be zero)
+  *	bits 2-7:	swap type
+  *	bits 8-57:	swap offset
++ *	bit  58:	PTE_PROT_NONE (must be zero)
+  */
+ #define __SWP_TYPE_SHIFT	2
+ #define __SWP_TYPE_BITS		6
+diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c
+index 212ae6361d8b..a5f234039616 100644
+--- a/arch/arm64/kernel/cpuinfo.c
++++ b/arch/arm64/kernel/cpuinfo.c
+@@ -85,7 +85,8 @@ static const char *const compat_hwcap_str[] = {
+ 	"idivt",
+ 	"vfpd32",
+ 	"lpae",
+-	"evtstrm"
++	"evtstrm",
++	NULL
+ };
+ 
+ static const char *const compat_hwcap2_str[] = {
+diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
+index c536c9e307b9..0931155f6406 100644
+--- a/arch/arm64/kernel/debug-monitors.c
++++ b/arch/arm64/kernel/debug-monitors.c
+@@ -186,20 +186,21 @@ static void clear_regs_spsr_ss(struct pt_regs *regs)
+ 
+ /* EL1 Single Step Handler hooks */
+ static LIST_HEAD(step_hook);
+-static DEFINE_RWLOCK(step_hook_lock);
++static DEFINE_SPINLOCK(step_hook_lock);
+ 
+ void register_step_hook(struct step_hook *hook)
+ {
+-	write_lock(&step_hook_lock);
+-	list_add(&hook->node, &step_hook);
+-	write_unlock(&step_hook_lock);
++	spin_lock(&step_hook_lock);
++	list_add_rcu(&hook->node, &step_hook);
++	spin_unlock(&step_hook_lock);
+ }
+ 
+ void unregister_step_hook(struct step_hook *hook)
+ {
+-	write_lock(&step_hook_lock);
+-	list_del(&hook->node);
+-	write_unlock(&step_hook_lock);
++	spin_lock(&step_hook_lock);
++	list_del_rcu(&hook->node);
++	spin_unlock(&step_hook_lock);
++	synchronize_rcu();
+ }
+ 
+ /*
+@@ -213,15 +214,15 @@ static int call_step_hook(struct pt_regs *regs, unsigned int esr)
+ 	struct step_hook *hook;
+ 	int retval = DBG_HOOK_ERROR;
+ 
+-	read_lock(&step_hook_lock);
++	rcu_read_lock();
+ 
+-	list_for_each_entry(hook, &step_hook, node)	{
++	list_for_each_entry_rcu(hook, &step_hook, node)	{
+ 		retval = hook->fn(regs, esr);
+ 		if (retval == DBG_HOOK_HANDLED)
+ 			break;
+ 	}
+ 
+-	read_unlock(&step_hook_lock);
++	rcu_read_unlock();
+ 
+ 	return retval;
+ }
+diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c
+index 4d1ac81870d2..e9e0e6db73f6 100644
+--- a/arch/arm64/kvm/inject_fault.c
++++ b/arch/arm64/kvm/inject_fault.c
+@@ -162,7 +162,7 @@ static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr
+ 		esr |= (ESR_ELx_EC_IABT_CUR << ESR_ELx_EC_SHIFT);
+ 
+ 	if (!is_iabt)
+-		esr |= ESR_ELx_EC_DABT_LOW;
++		esr |= ESR_ELx_EC_DABT_LOW << ESR_ELx_EC_SHIFT;
+ 
+ 	vcpu_sys_reg(vcpu, ESR_EL1) = esr | ESR_ELx_FSC_EXTABT;
+ }
+diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
+index abe2a9542b3a..a26e3acea6a9 100644
+--- a/arch/arm64/mm/fault.c
++++ b/arch/arm64/mm/fault.c
+@@ -81,6 +81,56 @@ void show_pte(struct mm_struct *mm, unsigned long addr)
+ 	printk("\n");
+ }
+ 
++#ifdef CONFIG_ARM64_HW_AFDBM
++/*
++ * This function sets the access flags (dirty, accessed), as well as write
++ * permission, and only to a more permissive setting.
++ *
++ * It needs to cope with hardware update of the accessed/dirty state by other
++ * agents in the system and can safely skip the __sync_icache_dcache() call as,
++ * like set_pte_at(), the PTE is never changed from no-exec to exec here.
++ *
++ * Returns whether or not the PTE actually changed.
++ */
++int ptep_set_access_flags(struct vm_area_struct *vma,
++			  unsigned long address, pte_t *ptep,
++			  pte_t entry, int dirty)
++{
++	pteval_t old_pteval;
++	unsigned int tmp;
++
++	if (pte_same(*ptep, entry))
++		return 0;
++
++	/* only preserve the access flags and write permission */
++	pte_val(entry) &= PTE_AF | PTE_WRITE | PTE_DIRTY;
++
++	/*
++	 * PTE_RDONLY is cleared by default in the asm below, so set it in
++	 * back if necessary (read-only or clean PTE).
++	 */
++	if (!pte_write(entry) || !dirty)
++		pte_val(entry) |= PTE_RDONLY;
++
++	/*
++	 * Setting the flags must be done atomically to avoid racing with the
++	 * hardware update of the access/dirty state.
++	 */
++	asm volatile("//	ptep_set_access_flags\n"
++	"	prfm	pstl1strm, %2\n"
++	"1:	ldxr	%0, %2\n"
++	"	and	%0, %0, %3		// clear PTE_RDONLY\n"
++	"	orr	%0, %0, %4		// set flags\n"
++	"	stxr	%w1, %0, %2\n"
++	"	cbnz	%w1, 1b\n"
++	: "=&r" (old_pteval), "=&r" (tmp), "+Q" (pte_val(*ptep))
++	: "L" (~PTE_RDONLY), "r" (pte_val(entry)));
++
++	flush_tlb_fix_spurious_fault(vma, address);
++	return 1;
++}
++#endif
++
+ /*
+  * The kernel tried to access some page that wasn't present.
+  */
+diff --git a/arch/ia64/include/asm/io.h b/arch/ia64/include/asm/io.h
+index a865d2a04f75..5de673ac9cb1 100644
+--- a/arch/ia64/include/asm/io.h
++++ b/arch/ia64/include/asm/io.h
+@@ -433,6 +433,7 @@ static inline void __iomem * ioremap_cache (unsigned long phys_addr, unsigned lo
+ 	return ioremap(phys_addr, size);
+ }
+ #define ioremap_cache ioremap_cache
++#define ioremap_uc ioremap_nocache
+ 
+ 
+ /*
+diff --git a/arch/mips/alchemy/devboards/db1000.c b/arch/mips/alchemy/devboards/db1000.c
+index bdeed9d13c6f..433c4b9a9f0a 100644
+--- a/arch/mips/alchemy/devboards/db1000.c
++++ b/arch/mips/alchemy/devboards/db1000.c
+@@ -503,15 +503,15 @@ int __init db1000_dev_setup(void)
+ 	if (board == BCSR_WHOAMI_DB1500) {
+ 		c0 = AU1500_GPIO2_INT;
+ 		c1 = AU1500_GPIO5_INT;
+-		d0 = AU1500_GPIO0_INT;
+-		d1 = AU1500_GPIO3_INT;
++		d0 = 0;	/* GPIO number, NOT irq! */
++		d1 = 3; /* GPIO number, NOT irq! */
+ 		s0 = AU1500_GPIO1_INT;
+ 		s1 = AU1500_GPIO4_INT;
+ 	} else if (board == BCSR_WHOAMI_DB1100) {
+ 		c0 = AU1100_GPIO2_INT;
+ 		c1 = AU1100_GPIO5_INT;
+-		d0 = AU1100_GPIO0_INT;
+-		d1 = AU1100_GPIO3_INT;
++		d0 = 0; /* GPIO number, NOT irq! */
++		d1 = 3; /* GPIO number, NOT irq! */
+ 		s0 = AU1100_GPIO1_INT;
+ 		s1 = AU1100_GPIO4_INT;
+ 
+@@ -545,15 +545,15 @@ int __init db1000_dev_setup(void)
+ 	} else if (board == BCSR_WHOAMI_DB1000) {
+ 		c0 = AU1000_GPIO2_INT;
+ 		c1 = AU1000_GPIO5_INT;
+-		d0 = AU1000_GPIO0_INT;
+-		d1 = AU1000_GPIO3_INT;
++		d0 = 0; /* GPIO number, NOT irq! */
++		d1 = 3; /* GPIO number, NOT irq! */
+ 		s0 = AU1000_GPIO1_INT;
+ 		s1 = AU1000_GPIO4_INT;
+ 		platform_add_devices(db1000_devs, ARRAY_SIZE(db1000_devs));
+ 	} else if ((board == BCSR_WHOAMI_PB1500) ||
+ 		   (board == BCSR_WHOAMI_PB1500R2)) {
+ 		c0 = AU1500_GPIO203_INT;
+-		d0 = AU1500_GPIO201_INT;
++		d0 = 1; /* GPIO number, NOT irq! */
+ 		s0 = AU1500_GPIO202_INT;
+ 		twosocks = 0;
+ 		flashsize = 64;
+@@ -566,7 +566,7 @@ int __init db1000_dev_setup(void)
+ 		 */
+ 	} else if (board == BCSR_WHOAMI_PB1100) {
+ 		c0 = AU1100_GPIO11_INT;
+-		d0 = AU1100_GPIO9_INT;
++		d0 = 9; /* GPIO number, NOT irq! */
+ 		s0 = AU1100_GPIO10_INT;
+ 		twosocks = 0;
+ 		flashsize = 64;
+@@ -583,7 +583,6 @@ int __init db1000_dev_setup(void)
+ 	} else
+ 		return 0; /* unknown board, no further dev setup to do */
+ 
+-	irq_set_irq_type(d0, IRQ_TYPE_EDGE_BOTH);
+ 	irq_set_irq_type(c0, IRQ_TYPE_LEVEL_LOW);
+ 	irq_set_irq_type(s0, IRQ_TYPE_LEVEL_LOW);
+ 
+@@ -597,7 +596,6 @@ int __init db1000_dev_setup(void)
+ 		c0, d0, /*s0*/0, 0, 0);
+ 
+ 	if (twosocks) {
+-		irq_set_irq_type(d1, IRQ_TYPE_EDGE_BOTH);
+ 		irq_set_irq_type(c1, IRQ_TYPE_LEVEL_LOW);
+ 		irq_set_irq_type(s1, IRQ_TYPE_LEVEL_LOW);
+ 
+diff --git a/arch/mips/alchemy/devboards/db1550.c b/arch/mips/alchemy/devboards/db1550.c
+index b518f029f5e7..1c01d6eadb08 100644
+--- a/arch/mips/alchemy/devboards/db1550.c
++++ b/arch/mips/alchemy/devboards/db1550.c
+@@ -514,7 +514,7 @@ static void __init db1550_devices(void)
+ 		AU1000_PCMCIA_MEM_PHYS_ADDR  + 0x000400000 - 1,
+ 		AU1000_PCMCIA_IO_PHYS_ADDR,
+ 		AU1000_PCMCIA_IO_PHYS_ADDR   + 0x000010000 - 1,
+-		AU1550_GPIO3_INT, AU1550_GPIO0_INT,
++		AU1550_GPIO3_INT, 0,
+ 		/*AU1550_GPIO21_INT*/0, 0, 0);
+ 
+ 	db1x_register_pcmcia_socket(
+@@ -524,7 +524,7 @@ static void __init db1550_devices(void)
+ 		AU1000_PCMCIA_MEM_PHYS_ADDR  + 0x004400000 - 1,
+ 		AU1000_PCMCIA_IO_PHYS_ADDR   + 0x004000000,
+ 		AU1000_PCMCIA_IO_PHYS_ADDR   + 0x004010000 - 1,
+-		AU1550_GPIO5_INT, AU1550_GPIO1_INT,
++		AU1550_GPIO5_INT, 1,
+ 		/*AU1550_GPIO22_INT*/0, 0, 1);
+ 
+ 	platform_device_register(&db1550_nand_dev);
+diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
+index f6b12790716c..942b8f6bf35b 100644
+--- a/arch/mips/include/asm/kvm_host.h
++++ b/arch/mips/include/asm/kvm_host.h
+@@ -747,7 +747,7 @@ extern enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
+ 
+ uint32_t kvm_mips_read_count(struct kvm_vcpu *vcpu);
+ void kvm_mips_write_count(struct kvm_vcpu *vcpu, uint32_t count);
+-void kvm_mips_write_compare(struct kvm_vcpu *vcpu, uint32_t compare);
++void kvm_mips_write_compare(struct kvm_vcpu *vcpu, uint32_t compare, bool ack);
+ void kvm_mips_init_count(struct kvm_vcpu *vcpu);
+ int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl);
+ int kvm_mips_set_count_resume(struct kvm_vcpu *vcpu, s64 count_resume);
+diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c
+index 490cea569d57..5c62065cbf22 100644
+--- a/arch/mips/kernel/unaligned.c
++++ b/arch/mips/kernel/unaligned.c
+@@ -885,7 +885,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
+ {
+ 	union mips_instruction insn;
+ 	unsigned long value;
+-	unsigned int res;
++	unsigned int res, preempted;
+ 	unsigned long origpc;
+ 	unsigned long orig31;
+ 	void __user *fault_addr = NULL;
+@@ -1226,27 +1226,36 @@ static void emulate_load_store_insn(struct pt_regs *regs,
+ 			if (!access_ok(VERIFY_READ, addr, sizeof(*fpr)))
+ 				goto sigbus;
+ 
+-			/*
+-			 * Disable preemption to avoid a race between copying
+-			 * state from userland, migrating to another CPU and
+-			 * updating the hardware vector register below.
+-			 */
+-			preempt_disable();
+-
+-			res = __copy_from_user_inatomic(fpr, addr,
+-							sizeof(*fpr));
+-			if (res)
+-				goto fault;
+-
+-			/*
+-			 * Update the hardware register if it is in use by the
+-			 * task in this quantum, in order to avoid having to
+-			 * save & restore the whole vector context.
+-			 */
+-			if (test_thread_flag(TIF_USEDMSA))
+-				write_msa_wr(wd, fpr, df);
++			do {
++				/*
++				 * If we have live MSA context keep track of
++				 * whether we get preempted in order to avoid
++				 * the register context we load being clobbered
++				 * by the live context as it's saved during
++				 * preemption. If we don't have live context
++				 * then it can't be saved to clobber the value
++				 * we load.
++				 */
++				preempted = test_thread_flag(TIF_USEDMSA);
++
++				res = __copy_from_user_inatomic(fpr, addr,
++								sizeof(*fpr));
++				if (res)
++					goto fault;
+ 
+-			preempt_enable();
++				/*
++				 * Update the hardware register if it is in use
++				 * by the task in this quantum, in order to
++				 * avoid having to save & restore the whole
++				 * vector context.
++				 */
++				preempt_disable();
++				if (test_thread_flag(TIF_USEDMSA)) {
++					write_msa_wr(wd, fpr, df);
++					preempted = 0;
++				}
++				preempt_enable();
++			} while (preempted);
+ 			break;
+ 
+ 		case msa_st_op:
+diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c
+index b37954cc880d..b8b7860ec1a8 100644
+--- a/arch/mips/kvm/emulate.c
++++ b/arch/mips/kvm/emulate.c
+@@ -302,12 +302,31 @@ static inline ktime_t kvm_mips_count_time(struct kvm_vcpu *vcpu)
+  */
+ static uint32_t kvm_mips_read_count_running(struct kvm_vcpu *vcpu, ktime_t now)
+ {
+-	ktime_t expires;
++	struct mips_coproc *cop0 = vcpu->arch.cop0;
++	ktime_t expires, threshold;
++	uint32_t count, compare;
+ 	int running;
+ 
+-	/* Is the hrtimer pending? */
++	/* Calculate the biased and scaled guest CP0_Count */
++	count = vcpu->arch.count_bias + kvm_mips_ktime_to_count(vcpu, now);
++	compare = kvm_read_c0_guest_compare(cop0);
++
++	/*
++	 * Find whether CP0_Count has reached the closest timer interrupt. If
++	 * not, we shouldn't inject it.
++	 */
++	if ((int32_t)(count - compare) < 0)
++		return count;
++
++	/*
++	 * The CP0_Count we're going to return has already reached the closest
++	 * timer interrupt. Quickly check if it really is a new interrupt by
++	 * looking at whether the interval until the hrtimer expiry time is
++	 * less than 1/4 of the timer period.
++	 */
+ 	expires = hrtimer_get_expires(&vcpu->arch.comparecount_timer);
+-	if (ktime_compare(now, expires) >= 0) {
++	threshold = ktime_add_ns(now, vcpu->arch.count_period / 4);
++	if (ktime_before(expires, threshold)) {
+ 		/*
+ 		 * Cancel it while we handle it so there's no chance of
+ 		 * interference with the timeout handler.
+@@ -329,8 +348,7 @@ static uint32_t kvm_mips_read_count_running(struct kvm_vcpu *vcpu, ktime_t now)
+ 		}
+ 	}
+ 
+-	/* Return the biased and scaled guest CP0_Count */
+-	return vcpu->arch.count_bias + kvm_mips_ktime_to_count(vcpu, now);
++	return count;
+ }
+ 
+ /**
+@@ -420,32 +438,6 @@ static void kvm_mips_resume_hrtimer(struct kvm_vcpu *vcpu,
+ }
+ 
+ /**
+- * kvm_mips_update_hrtimer() - Update next expiry time of hrtimer.
+- * @vcpu:	Virtual CPU.
+- *
+- * Recalculates and updates the expiry time of the hrtimer. This can be used
+- * after timer parameters have been altered which do not depend on the time that
+- * the change occurs (in those cases kvm_mips_freeze_hrtimer() and
+- * kvm_mips_resume_hrtimer() are used directly).
+- *
+- * It is guaranteed that no timer interrupts will be lost in the process.
+- *
+- * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
+- */
+-static void kvm_mips_update_hrtimer(struct kvm_vcpu *vcpu)
+-{
+-	ktime_t now;
+-	uint32_t count;
+-
+-	/*
+-	 * freeze_hrtimer takes care of a timer interrupts <= count, and
+-	 * resume_hrtimer the hrtimer takes care of a timer interrupts > count.
+-	 */
+-	now = kvm_mips_freeze_hrtimer(vcpu, &count);
+-	kvm_mips_resume_hrtimer(vcpu, now, count);
+-}
+-
+-/**
+  * kvm_mips_write_count() - Modify the count and update timer.
+  * @vcpu:	Virtual CPU.
+  * @count:	Guest CP0_Count value to set.
+@@ -540,23 +532,42 @@ int kvm_mips_set_count_hz(struct kvm_vcpu *vcpu, s64 count_hz)
+  * kvm_mips_write_compare() - Modify compare and update timer.
+  * @vcpu:	Virtual CPU.
+  * @compare:	New CP0_Compare value.
++ * @ack:	Whether to acknowledge timer interrupt.
+  *
+  * Update CP0_Compare to a new value and update the timeout.
++ * If @ack, atomically acknowledge any pending timer interrupt, otherwise ensure
++ * any pending timer interrupt is preserved.
+  */
+-void kvm_mips_write_compare(struct kvm_vcpu *vcpu, uint32_t compare)
++void kvm_mips_write_compare(struct kvm_vcpu *vcpu, uint32_t compare, bool ack)
+ {
+ 	struct mips_coproc *cop0 = vcpu->arch.cop0;
++	int dc;
++	u32 old_compare = kvm_read_c0_guest_compare(cop0);
++	ktime_t now;
++	uint32_t count;
+ 
+ 	/* if unchanged, must just be an ack */
+-	if (kvm_read_c0_guest_compare(cop0) == compare)
++	if (old_compare == compare) {
++		if (!ack)
++			return;
++		kvm_mips_callbacks->dequeue_timer_int(vcpu);
++		kvm_write_c0_guest_compare(cop0, compare);
+ 		return;
++	}
++
++	/* freeze_hrtimer() takes care of timer interrupts <= count */
++	dc = kvm_mips_count_disabled(vcpu);
++	if (!dc)
++		now = kvm_mips_freeze_hrtimer(vcpu, &count);
++
++	if (ack)
++		kvm_mips_callbacks->dequeue_timer_int(vcpu);
+ 
+-	/* Update compare */
+ 	kvm_write_c0_guest_compare(cop0, compare);
+ 
+-	/* Update timeout if count enabled */
+-	if (!kvm_mips_count_disabled(vcpu))
+-		kvm_mips_update_hrtimer(vcpu);
++	/* resume_hrtimer() takes care of timer interrupts > count */
++	if (!dc)
++		kvm_mips_resume_hrtimer(vcpu, now, count);
+ }
+ 
+ /**
+@@ -1095,9 +1106,9 @@ enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc,
+ 
+ 				/* If we are writing to COMPARE */
+ 				/* Clear pending timer interrupt, if any */
+-				kvm_mips_callbacks->dequeue_timer_int(vcpu);
+ 				kvm_mips_write_compare(vcpu,
+-						       vcpu->arch.gprs[rt]);
++						       vcpu->arch.gprs[rt],
++						       true);
+ 			} else if ((rd == MIPS_CP0_STATUS) && (sel == 0)) {
+ 				unsigned int old_val, val, change;
+ 
+diff --git a/arch/mips/kvm/trap_emul.c b/arch/mips/kvm/trap_emul.c
+index ad988000563f..1664589d4746 100644
+--- a/arch/mips/kvm/trap_emul.c
++++ b/arch/mips/kvm/trap_emul.c
+@@ -546,7 +546,7 @@ static int kvm_trap_emul_set_one_reg(struct kvm_vcpu *vcpu,
+ 		kvm_mips_write_count(vcpu, v);
+ 		break;
+ 	case KVM_REG_MIPS_CP0_COMPARE:
+-		kvm_mips_write_compare(vcpu, v);
++		kvm_mips_write_compare(vcpu, v, false);
+ 		break;
+ 	case KVM_REG_MIPS_CP0_CAUSE:
+ 		/*
+diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
+index 14f655cf542e..86ed37671ef5 100644
+--- a/arch/parisc/Kconfig
++++ b/arch/parisc/Kconfig
+@@ -29,6 +29,7 @@ config PARISC
+ 	select TTY # Needed for pdc_cons.c
+ 	select HAVE_DEBUG_STACKOVERFLOW
+ 	select HAVE_ARCH_AUDITSYSCALL
++	select HAVE_ARCH_SECCOMP_FILTER
+ 	select ARCH_NO_COHERENT_DMA_MMAP
+ 
+ 	help
+diff --git a/arch/parisc/include/asm/compat.h b/arch/parisc/include/asm/compat.h
+index 0448a2c8eafb..3387307cc33e 100644
+--- a/arch/parisc/include/asm/compat.h
++++ b/arch/parisc/include/asm/compat.h
+@@ -183,6 +183,13 @@ typedef struct compat_siginfo {
+ 			int _band;      /* POLL_IN, POLL_OUT, POLL_MSG */
+ 			int _fd;
+ 		} _sigpoll;
++
++		/* SIGSYS */
++		struct {
++			compat_uptr_t _call_addr; /* calling user insn */
++			int _syscall;	/* triggering system call number */
++			compat_uint_t _arch;	/* AUDIT_ARCH_* of syscall */
++		} _sigsys;
+ 	} _sifields;
+ } compat_siginfo_t;
+ 
+diff --git a/arch/parisc/include/asm/syscall.h b/arch/parisc/include/asm/syscall.h
+index a5eba95d87fe..637ce8d6f375 100644
+--- a/arch/parisc/include/asm/syscall.h
++++ b/arch/parisc/include/asm/syscall.h
+@@ -39,6 +39,19 @@ static inline void syscall_get_arguments(struct task_struct *tsk,
+ 	}
+ }
+ 
++static inline void syscall_set_return_value(struct task_struct *task,
++					    struct pt_regs *regs,
++					    int error, long val)
++{
++	regs->gr[28] = error ? error : val;
++}
++
++static inline void syscall_rollback(struct task_struct *task,
++				    struct pt_regs *regs)
++{
++	/* do nothing */
++}
++
+ static inline int syscall_get_arch(void)
+ {
+ 	int arch = AUDIT_ARCH_PARISC;
+diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
+index 0abdd4c607ed..1960b87c1c8b 100644
+--- a/arch/parisc/include/asm/uaccess.h
++++ b/arch/parisc/include/asm/uaccess.h
+@@ -76,6 +76,7 @@ struct exception_table_entry {
+  */
+ struct exception_data {
+ 	unsigned long fault_ip;
++	unsigned long fault_gp;
+ 	unsigned long fault_space;
+ 	unsigned long fault_addr;
+ };
+diff --git a/arch/parisc/kernel/asm-offsets.c b/arch/parisc/kernel/asm-offsets.c
+index d2f62570a7b1..78d30d2ea2d8 100644
+--- a/arch/parisc/kernel/asm-offsets.c
++++ b/arch/parisc/kernel/asm-offsets.c
+@@ -299,6 +299,7 @@ int main(void)
+ #endif
+ 	BLANK();
+ 	DEFINE(EXCDATA_IP, offsetof(struct exception_data, fault_ip));
++	DEFINE(EXCDATA_GP, offsetof(struct exception_data, fault_gp));
+ 	DEFINE(EXCDATA_SPACE, offsetof(struct exception_data, fault_space));
+ 	DEFINE(EXCDATA_ADDR, offsetof(struct exception_data, fault_addr));
+ 	BLANK();
+diff --git a/arch/parisc/kernel/parisc_ksyms.c b/arch/parisc/kernel/parisc_ksyms.c
+index 568b2c61ea02..3cad8aadc69e 100644
+--- a/arch/parisc/kernel/parisc_ksyms.c
++++ b/arch/parisc/kernel/parisc_ksyms.c
+@@ -47,11 +47,11 @@ EXPORT_SYMBOL(__cmpxchg_u64);
+ EXPORT_SYMBOL(lclear_user);
+ EXPORT_SYMBOL(lstrnlen_user);
+ 
+-/* Global fixups */
+-extern void fixup_get_user_skip_1(void);
+-extern void fixup_get_user_skip_2(void);
+-extern void fixup_put_user_skip_1(void);
+-extern void fixup_put_user_skip_2(void);
++/* Global fixups - defined as int to avoid creation of function pointers */
++extern int fixup_get_user_skip_1;
++extern int fixup_get_user_skip_2;
++extern int fixup_put_user_skip_1;
++extern int fixup_put_user_skip_2;
+ EXPORT_SYMBOL(fixup_get_user_skip_1);
+ EXPORT_SYMBOL(fixup_get_user_skip_2);
+ EXPORT_SYMBOL(fixup_put_user_skip_1);
+diff --git a/arch/parisc/kernel/ptrace.c b/arch/parisc/kernel/ptrace.c
+index ce0b2b4075c7..8fb81a391599 100644
+--- a/arch/parisc/kernel/ptrace.c
++++ b/arch/parisc/kernel/ptrace.c
+@@ -270,7 +270,8 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
+ long do_syscall_trace_enter(struct pt_regs *regs)
+ {
+ 	/* Do the secure computing check first. */
+-	secure_computing_strict(regs->gr[20]);
++	if (secure_computing() == -1)
++		return -1;
+ 
+ 	if (test_thread_flag(TIF_SYSCALL_TRACE) &&
+ 	    tracehook_report_syscall_entry(regs)) {
+@@ -296,7 +297,11 @@ long do_syscall_trace_enter(struct pt_regs *regs)
+ 			regs->gr[23] & 0xffffffff);
+ 
+ out:
+-	return regs->gr[20];
++	/*
++	 * Sign extend the syscall number to 64bit since it may have been
++	 * modified by a compat ptrace call
++	 */
++	return (int) ((u32) regs->gr[20]);
+ }
+ 
+ void do_syscall_trace_exit(struct pt_regs *regs)
+diff --git a/arch/parisc/kernel/signal32.c b/arch/parisc/kernel/signal32.c
+index 984abbee71ca..c342b2e17492 100644
+--- a/arch/parisc/kernel/signal32.c
++++ b/arch/parisc/kernel/signal32.c
+@@ -371,6 +371,11 @@ copy_siginfo_to_user32 (compat_siginfo_t __user *to, const siginfo_t *from)
+ 			val = (compat_int_t)from->si_int;
+ 			err |= __put_user(val, &to->si_int);
+ 			break;
++		case __SI_SYS >> 16:
++			err |= __put_user(ptr_to_compat(from->si_call_addr), &to->si_call_addr);
++			err |= __put_user(from->si_syscall, &to->si_syscall);
++			err |= __put_user(from->si_arch, &to->si_arch);
++			break;
+ 		}
+ 	}
+ 	return err;
+diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
+index fbafa0d0e2bf..57b4836b7ecd 100644
+--- a/arch/parisc/kernel/syscall.S
++++ b/arch/parisc/kernel/syscall.S
+@@ -329,6 +329,7 @@ tracesys_next:
+ 
+ 	ldo     -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1      /* get task ptr */
+ 	LDREG	TI_TASK(%r1), %r1
++	LDREG   TASK_PT_GR28(%r1), %r28		/* Restore return value */
+ 	LDREG   TASK_PT_GR26(%r1), %r26		/* Restore the users args */
+ 	LDREG   TASK_PT_GR25(%r1), %r25
+ 	LDREG   TASK_PT_GR24(%r1), %r24
+@@ -342,7 +343,8 @@ tracesys_next:
+ 	stw     %r21, -56(%r30)                 /* 6th argument */
+ #endif
+ 
+-	comiclr,>>=	__NR_Linux_syscalls, %r20, %r0
++	cmpib,COND(=),n -1,%r20,tracesys_exit /* seccomp may have returned -1 */
++	comiclr,>>	__NR_Linux_syscalls, %r20, %r0
+ 	b,n	.Ltracesys_nosys
+ 
+ 	LDREGX  %r20(%r19), %r19
+diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
+index 553b09855cfd..77e2262c97f6 100644
+--- a/arch/parisc/kernel/traps.c
++++ b/arch/parisc/kernel/traps.c
+@@ -798,6 +798,9 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
+ 
+ 	    if (fault_space == 0 && !faulthandler_disabled())
+ 	    {
++		/* Clean up and return if in exception table. */
++		if (fixup_exception(regs))
++			return;
+ 		pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
+ 		parisc_terminate("Kernel Fault", regs, code, fault_address);
+ 	    }
+diff --git a/arch/parisc/lib/fixup.S b/arch/parisc/lib/fixup.S
+index 536ef66bb94b..1052b747e011 100644
+--- a/arch/parisc/lib/fixup.S
++++ b/arch/parisc/lib/fixup.S
+@@ -26,6 +26,7 @@
+ 
+ #ifdef CONFIG_SMP
+ 	.macro  get_fault_ip t1 t2
++	loadgp
+ 	addil LT%__per_cpu_offset,%r27
+ 	LDREG RT%__per_cpu_offset(%r1),\t1
+ 	/* t2 = smp_processor_id() */
+@@ -40,14 +41,19 @@
+ 	LDREG RT%exception_data(%r1),\t1
+ 	/* t1 = this_cpu_ptr(&exception_data) */
+ 	add,l \t1,\t2,\t1
++	/* %r27 = t1->fault_gp - restore gp */
++	LDREG EXCDATA_GP(\t1), %r27
+ 	/* t1 = t1->fault_ip */
+ 	LDREG EXCDATA_IP(\t1), \t1
+ 	.endm
+ #else
+ 	.macro  get_fault_ip t1 t2
++	loadgp
+ 	/* t1 = this_cpu_ptr(&exception_data) */
+ 	addil LT%exception_data,%r27
+ 	LDREG RT%exception_data(%r1),\t2
++	/* %r27 = t2->fault_gp - restore gp */
++	LDREG EXCDATA_GP(\t2), %r27
+ 	/* t1 = t2->fault_ip */
+ 	LDREG EXCDATA_IP(\t2), \t1
+ 	.endm
+diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
+index a762864ec92e..f9064449908a 100644
+--- a/arch/parisc/mm/fault.c
++++ b/arch/parisc/mm/fault.c
+@@ -151,6 +151,7 @@ int fixup_exception(struct pt_regs *regs)
+ 		struct exception_data *d;
+ 		d = this_cpu_ptr(&exception_data);
+ 		d->fault_ip = regs->iaoq[0];
++		d->fault_gp = regs->gr[27];
+ 		d->fault_space = regs->isr;
+ 		d->fault_addr = regs->ior;
+ 
+diff --git a/arch/powerpc/include/asm/word-at-a-time.h b/arch/powerpc/include/asm/word-at-a-time.h
+index e4396a7d0f7c..4afe66aa1400 100644
+--- a/arch/powerpc/include/asm/word-at-a-time.h
++++ b/arch/powerpc/include/asm/word-at-a-time.h
+@@ -82,7 +82,7 @@ static inline unsigned long create_zero_mask(unsigned long bits)
+ 	    "andc	%1,%1,%2\n\t"
+ 	    "popcntd	%0,%1"
+ 		: "=r" (leading_zero_bits), "=&r" (trailing_zero_bit_mask)
+-		: "r" (bits));
++		: "b" (bits));
+ 
+ 	return leading_zero_bits;
+ }
+diff --git a/arch/powerpc/include/uapi/asm/cputable.h b/arch/powerpc/include/uapi/asm/cputable.h
+index 8dde19962a5b..f63c96cd3608 100644
+--- a/arch/powerpc/include/uapi/asm/cputable.h
++++ b/arch/powerpc/include/uapi/asm/cputable.h
+@@ -31,6 +31,7 @@
+ #define PPC_FEATURE_PSERIES_PERFMON_COMPAT \
+ 					0x00000040
+ 
++/* Reserved - do not use		0x00000004 */
+ #define PPC_FEATURE_TRUE_LE		0x00000002
+ #define PPC_FEATURE_PPC_LE		0x00000001
+ 
+diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
+index 3c5736e52a14..54ed9c7663e6 100644
+--- a/arch/powerpc/kernel/process.c
++++ b/arch/powerpc/kernel/process.c
+@@ -854,7 +854,7 @@ void restore_tm_state(struct pt_regs *regs)
+ static inline void save_sprs(struct thread_struct *t)
+ {
+ #ifdef CONFIG_ALTIVEC
+-	if (cpu_has_feature(cpu_has_feature(CPU_FTR_ALTIVEC)))
++	if (cpu_has_feature(CPU_FTR_ALTIVEC))
+ 		t->vrsave = mfspr(SPRN_VRSAVE);
+ #endif
+ #ifdef CONFIG_PPC_BOOK3S_64
+diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
+index 7030b035905d..a15fe1d4e84a 100644
+--- a/arch/powerpc/kernel/prom.c
++++ b/arch/powerpc/kernel/prom.c
+@@ -148,23 +148,25 @@ static struct ibm_pa_feature {
+ 	unsigned long	cpu_features;	/* CPU_FTR_xxx bit */
+ 	unsigned long	mmu_features;	/* MMU_FTR_xxx bit */
+ 	unsigned int	cpu_user_ftrs;	/* PPC_FEATURE_xxx bit */
++	unsigned int	cpu_user_ftrs2;	/* PPC_FEATURE2_xxx bit */
+ 	unsigned char	pabyte;		/* byte number in ibm,pa-features */
+ 	unsigned char	pabit;		/* bit number (big-endian) */
+ 	unsigned char	invert;		/* if 1, pa bit set => clear feature */
+ } ibm_pa_features[] __initdata = {
+-	{0, 0, PPC_FEATURE_HAS_MMU,	0, 0, 0},
+-	{0, 0, PPC_FEATURE_HAS_FPU,	0, 1, 0},
+-	{CPU_FTR_CTRL, 0, 0,		0, 3, 0},
+-	{CPU_FTR_NOEXECUTE, 0, 0,	0, 6, 0},
+-	{CPU_FTR_NODSISRALIGN, 0, 0,	1, 1, 1},
+-	{0, MMU_FTR_CI_LARGE_PAGE, 0,	1, 2, 0},
+-	{CPU_FTR_REAL_LE, PPC_FEATURE_TRUE_LE, 5, 0, 0},
++	{0, 0, PPC_FEATURE_HAS_MMU, 0,		0, 0, 0},
++	{0, 0, PPC_FEATURE_HAS_FPU, 0,		0, 1, 0},
++	{CPU_FTR_CTRL, 0, 0, 0,			0, 3, 0},
++	{CPU_FTR_NOEXECUTE, 0, 0, 0,		0, 6, 0},
++	{CPU_FTR_NODSISRALIGN, 0, 0, 0,		1, 1, 1},
++	{0, MMU_FTR_CI_LARGE_PAGE, 0, 0,		1, 2, 0},
++	{CPU_FTR_REAL_LE, 0, PPC_FEATURE_TRUE_LE, 0, 5, 0, 0},
+ 	/*
+-	 * If the kernel doesn't support TM (ie. CONFIG_PPC_TRANSACTIONAL_MEM=n),
+-	 * we don't want to turn on CPU_FTR_TM here, so we use CPU_FTR_TM_COMP
+-	 * which is 0 if the kernel doesn't support TM.
++	 * If the kernel doesn't support TM (ie CONFIG_PPC_TRANSACTIONAL_MEM=n),
++	 * we don't want to turn on TM here, so we use the *_COMP versions
++	 * which are 0 if the kernel doesn't support TM.
+ 	 */
+-	{CPU_FTR_TM_COMP, 0, 0,		22, 0, 0},
++	{CPU_FTR_TM_COMP, 0, 0,
++	 PPC_FEATURE2_HTM_COMP|PPC_FEATURE2_HTM_NOSC_COMP, 22, 0, 0},
+ };
+ 
+ static void __init scan_features(unsigned long node, const unsigned char *ftrs,
+@@ -195,10 +197,12 @@ static void __init scan_features(unsigned long node, const unsigned char *ftrs,
+ 		if (bit ^ fp->invert) {
+ 			cur_cpu_spec->cpu_features |= fp->cpu_features;
+ 			cur_cpu_spec->cpu_user_features |= fp->cpu_user_ftrs;
++			cur_cpu_spec->cpu_user_features2 |= fp->cpu_user_ftrs2;
+ 			cur_cpu_spec->mmu_features |= fp->mmu_features;
+ 		} else {
+ 			cur_cpu_spec->cpu_features &= ~fp->cpu_features;
+ 			cur_cpu_spec->cpu_user_features &= ~fp->cpu_user_ftrs;
++			cur_cpu_spec->cpu_user_features2 &= ~fp->cpu_user_ftrs2;
+ 			cur_cpu_spec->mmu_features &= ~fp->mmu_features;
+ 		}
+ 	}
+diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
+index 744e24bcb85c..4a811ca7ac9d 100644
+--- a/arch/powerpc/mm/hugetlbpage.c
++++ b/arch/powerpc/mm/hugetlbpage.c
+@@ -414,13 +414,13 @@ static void hugepd_free(struct mmu_gather *tlb, void *hugepte)
+ {
+ 	struct hugepd_freelist **batchp;
+ 
+-	batchp = this_cpu_ptr(&hugepd_freelist_cur);
++	batchp = &get_cpu_var(hugepd_freelist_cur);
+ 
+ 	if (atomic_read(&tlb->mm->mm_users) < 2 ||
+ 	    cpumask_equal(mm_cpumask(tlb->mm),
+ 			  cpumask_of(smp_processor_id()))) {
+ 		kmem_cache_free(hugepte_cache, hugepte);
+-        put_cpu_var(hugepd_freelist_cur);
++		put_cpu_var(hugepd_freelist_cur);
+ 		return;
+ 	}
+ 
+diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h
+index c873e682b67f..6dafabb6ae1a 100644
+--- a/arch/s390/include/asm/pci.h
++++ b/arch/s390/include/asm/pci.h
+@@ -45,7 +45,8 @@ struct zpci_fmb {
+ 	u64 rpcit_ops;
+ 	u64 dma_rbytes;
+ 	u64 dma_wbytes;
+-} __packed __aligned(16);
++	u64 pad[2];
++} __packed __aligned(128);
+ 
+ enum zpci_state {
+ 	ZPCI_FN_STATE_RESERVED,
+diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
+index cd5a191381b9..c920b81be5bb 100644
+--- a/arch/s390/kernel/entry.S
++++ b/arch/s390/kernel/entry.S
+@@ -1199,114 +1199,12 @@ cleanup_critical:
+ 	.quad	.Lpsw_idle_lpsw
+ 
+ .Lcleanup_save_fpu_regs:
+-	TSTMSK	__LC_CPU_FLAGS,_CIF_FPU
+-	bor	%r14
+-	clg	%r9,BASED(.Lcleanup_save_fpu_regs_done)
+-	jhe	5f
+-	clg	%r9,BASED(.Lcleanup_save_fpu_regs_fp)
+-	jhe	4f
+-	clg	%r9,BASED(.Lcleanup_save_fpu_regs_vx_high)
+-	jhe	3f
+-	clg	%r9,BASED(.Lcleanup_save_fpu_regs_vx_low)
+-	jhe	2f
+-	clg	%r9,BASED(.Lcleanup_save_fpu_fpc_end)
+-	jhe	1f
+-	lg	%r2,__LC_CURRENT
+-	aghi	%r2,__TASK_thread
+-0:	# Store floating-point controls
+-	stfpc	__THREAD_FPU_fpc(%r2)
+-1:	# Load register save area and check if VX is active
+-	lg	%r3,__THREAD_FPU_regs(%r2)
+-	TSTMSK	__LC_MACHINE_FLAGS,MACHINE_FLAG_VX
+-	jz	4f			  # no VX -> store FP regs
+-2:	# Store vector registers (V0-V15)
+-	VSTM	%v0,%v15,0,%r3		  # vstm 0,15,0(3)
+-3:	# Store vector registers (V16-V31)
+-	VSTM	%v16,%v31,256,%r3	  # vstm 16,31,256(3)
+-	j	5f			  # -> done, set CIF_FPU flag
+-4:	# Store floating-point registers
+-	std	0,0(%r3)
+-	std	1,8(%r3)
+-	std	2,16(%r3)
+-	std	3,24(%r3)
+-	std	4,32(%r3)
+-	std	5,40(%r3)
+-	std	6,48(%r3)
+-	std	7,56(%r3)
+-	std	8,64(%r3)
+-	std	9,72(%r3)
+-	std	10,80(%r3)
+-	std	11,88(%r3)
+-	std	12,96(%r3)
+-	std	13,104(%r3)
+-	std	14,112(%r3)
+-	std	15,120(%r3)
+-5:	# Set CIF_FPU flag
+-	oi	__LC_CPU_FLAGS+7,_CIF_FPU
+-	lg	%r9,48(%r11)		# return from save_fpu_regs
++	larl	%r9,save_fpu_regs
+ 	br	%r14
+-.Lcleanup_save_fpu_fpc_end:
+-	.quad	.Lsave_fpu_regs_fpc_end
+-.Lcleanup_save_fpu_regs_vx_low:
+-	.quad	.Lsave_fpu_regs_vx_low
+-.Lcleanup_save_fpu_regs_vx_high:
+-	.quad	.Lsave_fpu_regs_vx_high
+-.Lcleanup_save_fpu_regs_fp:
+-	.quad	.Lsave_fpu_regs_fp
+-.Lcleanup_save_fpu_regs_done:
+-	.quad	.Lsave_fpu_regs_done
+ 
+ .Lcleanup_load_fpu_regs:
+-	TSTMSK	__LC_CPU_FLAGS,_CIF_FPU
+-	bnor	%r14
+-	clg	%r9,BASED(.Lcleanup_load_fpu_regs_done)
+-	jhe	1f
+-	clg	%r9,BASED(.Lcleanup_load_fpu_regs_fp)
+-	jhe	2f
+-	clg	%r9,BASED(.Lcleanup_load_fpu_regs_vx_high)
+-	jhe	3f
+-	clg	%r9,BASED(.Lcleanup_load_fpu_regs_vx)
+-	jhe	4f
+-	lg	%r4,__LC_CURRENT
+-	aghi	%r4,__TASK_thread
+-	lfpc	__THREAD_FPU_fpc(%r4)
+-	TSTMSK	__LC_MACHINE_FLAGS,MACHINE_FLAG_VX
+-	lg	%r4,__THREAD_FPU_regs(%r4)	# %r4 <- reg save area
+-	jz	2f				# -> no VX, load FP regs
+-4:	# Load V0 ..V15 registers
+-	VLM	%v0,%v15,0,%r4
+-3:	# Load V16..V31 registers
+-	VLM	%v16,%v31,256,%r4
+-	j	1f
+-2:	# Load floating-point registers
+-	ld	0,0(%r4)
+-	ld	1,8(%r4)
+-	ld	2,16(%r4)
+-	ld	3,24(%r4)
+-	ld	4,32(%r4)
+-	ld	5,40(%r4)
+-	ld	6,48(%r4)
+-	ld	7,56(%r4)
+-	ld	8,64(%r4)
+-	ld	9,72(%r4)
+-	ld	10,80(%r4)
+-	ld	11,88(%r4)
+-	ld	12,96(%r4)
+-	ld	13,104(%r4)
+-	ld	14,112(%r4)
+-	ld	15,120(%r4)
+-1:	# Clear CIF_FPU bit
+-	ni	__LC_CPU_FLAGS+7,255-_CIF_FPU
+-	lg	%r9,48(%r11)		# return from load_fpu_regs
++	larl	%r9,load_fpu_regs
+ 	br	%r14
+-.Lcleanup_load_fpu_regs_vx:
+-	.quad	.Lload_fpu_regs_vx
+-.Lcleanup_load_fpu_regs_vx_high:
+-	.quad	.Lload_fpu_regs_vx_high
+-.Lcleanup_load_fpu_regs_fp:
+-	.quad	.Lload_fpu_regs_fp
+-.Lcleanup_load_fpu_regs_done:
+-	.quad	.Lload_fpu_regs_done
+ 
+ /*
+  * Integer constants
+diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
+index 9220db5c996a..93fc63ef6e95 100644
+--- a/arch/s390/kernel/setup.c
++++ b/arch/s390/kernel/setup.c
+@@ -327,6 +327,7 @@ static void __init setup_lowcore(void)
+ 		+ PAGE_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
+ 	lc->current_task = (unsigned long) init_thread_union.thread_info.task;
+ 	lc->thread_info = (unsigned long) &init_thread_union;
++	lc->lpp = LPP_MAGIC;
+ 	lc->machine_flags = S390_lowcore.machine_flags;
+ 	lc->stfl_fac_list = S390_lowcore.stfl_fac_list;
+ 	memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
+diff --git a/arch/s390/mm/gup.c b/arch/s390/mm/gup.c
+index 13dab0c1645c..3776aca22082 100644
+--- a/arch/s390/mm/gup.c
++++ b/arch/s390/mm/gup.c
+@@ -20,9 +20,9 @@
+ static inline int gup_pte_range(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
+ 		unsigned long end, int write, struct page **pages, int *nr)
+ {
++	struct page *head, *page;
+ 	unsigned long mask;
+ 	pte_t *ptep, pte;
+-	struct page *page;
+ 
+ 	mask = (write ? _PAGE_PROTECT : 0) | _PAGE_INVALID | _PAGE_SPECIAL;
+ 
+@@ -37,12 +37,14 @@ static inline int gup_pte_range(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
+ 			return 0;
+ 		VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
+ 		page = pte_page(pte);
+-		if (!page_cache_get_speculative(page))
++		head = compound_head(page);
++		if (!page_cache_get_speculative(head))
+ 			return 0;
+ 		if (unlikely(pte_val(pte) != pte_val(*ptep))) {
+-			put_page(page);
++			put_page(head);
+ 			return 0;
+ 		}
++		VM_BUG_ON_PAGE(compound_head(page) != head, page);
+ 		pages[*nr] = page;
+ 		(*nr)++;
+ 
+diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
+index 8f19c8f9d660..8f75edc998ff 100644
+--- a/arch/s390/pci/pci.c
++++ b/arch/s390/pci/pci.c
+@@ -864,8 +864,11 @@ static inline int barsize(u8 size)
+ 
+ static int zpci_mem_init(void)
+ {
++	BUILD_BUG_ON(!is_power_of_2(__alignof__(struct zpci_fmb)) ||
++		     __alignof__(struct zpci_fmb) < sizeof(struct zpci_fmb));
++
+ 	zdev_fmb_cache = kmem_cache_create("PCI_FMB_cache", sizeof(struct zpci_fmb),
+-				16, 0, NULL);
++					   __alignof__(struct zpci_fmb), 0, NULL);
+ 	if (!zdev_fmb_cache)
+ 		goto error_fmb;
+ 
+diff --git a/arch/sh/mm/kmap.c b/arch/sh/mm/kmap.c
+index ec29e14ec5a8..bf25d7c79a2d 100644
+--- a/arch/sh/mm/kmap.c
++++ b/arch/sh/mm/kmap.c
+@@ -36,6 +36,7 @@ void *kmap_coherent(struct page *page, unsigned long addr)
+ 
+ 	BUG_ON(!test_bit(PG_dcache_clean, &page->flags));
+ 
++	preempt_disable();
+ 	pagefault_disable();
+ 
+ 	idx = FIX_CMAP_END -
+@@ -64,4 +65,5 @@ void kunmap_coherent(void *kvaddr)
+ 	}
+ 
+ 	pagefault_enable();
++	preempt_enable();
+ }
+diff --git a/arch/um/drivers/mconsole_kern.c b/arch/um/drivers/mconsole_kern.c
+index b821b13d343a..8a6b57108ac2 100644
+--- a/arch/um/drivers/mconsole_kern.c
++++ b/arch/um/drivers/mconsole_kern.c
+@@ -133,7 +133,7 @@ void mconsole_proc(struct mc_request *req)
+ 	ptr += strlen("proc");
+ 	ptr = skip_spaces(ptr);
+ 
+-	file = file_open_root(mnt->mnt_root, mnt, ptr, O_RDONLY);
++	file = file_open_root(mnt->mnt_root, mnt, ptr, O_RDONLY, 0);
+ 	if (IS_ERR(file)) {
+ 		mconsole_reply(req, "Failed to open file", 1, 0);
+ 		printk(KERN_ERR "open /proc/%s: %ld\n", ptr, PTR_ERR(file));
+diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
+index c46662f64c39..3bf45a0cd69e 100644
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -1160,22 +1160,23 @@ config MICROCODE
+ 	bool "CPU microcode loading support"
+ 	default y
+ 	depends on CPU_SUP_AMD || CPU_SUP_INTEL
+-	depends on BLK_DEV_INITRD
+ 	select FW_LOADER
+ 	---help---
+-
+ 	  If you say Y here, you will be able to update the microcode on
+-	  certain Intel and AMD processors. The Intel support is for the
+-	  IA32 family, e.g. Pentium Pro, Pentium II, Pentium III, Pentium 4,
+-	  Xeon etc. The AMD support is for families 0x10 and later. You will
+-	  obviously need the actual microcode binary data itself which is not
+-	  shipped with the Linux kernel.
+-
+-	  This option selects the general module only, you need to select
+-	  at least one vendor specific module as well.
+-
+-	  To compile this driver as a module, choose M here: the module
+-	  will be called microcode.
++	  Intel and AMD processors. The Intel support is for the IA32 family,
++	  e.g. Pentium Pro, Pentium II, Pentium III, Pentium 4, Xeon etc. The
++	  AMD support is for families 0x10 and later. You will obviously need
++	  the actual microcode binary data itself which is not shipped with
++	  the Linux kernel.
++
++	  The preferred method to load microcode from a detached initrd is described
++	  in Documentation/x86/early-microcode.txt. For that you need to enable
++	  CONFIG_BLK_DEV_INITRD in order for the loader to be able to scan the
++	  initrd for microcode blobs.
++
++	  In addition, you can build-in the microcode into the kernel. For that you
++	  need to enable FIRMWARE_IN_KERNEL and add the vendor-supplied microcode
++	  to the CONFIG_EXTRA_FIRMWARE config option.
+ 
+ config MICROCODE_INTEL
+ 	bool "Intel microcode loading support"
+diff --git a/arch/x86/crypto/sha-mb/sha1_mb.c b/arch/x86/crypto/sha-mb/sha1_mb.c
+index a841e9765bd6..8381c09d2870 100644
+--- a/arch/x86/crypto/sha-mb/sha1_mb.c
++++ b/arch/x86/crypto/sha-mb/sha1_mb.c
+@@ -453,10 +453,10 @@ static int sha_complete_job(struct mcryptd_hash_request_ctx *rctx,
+ 
+ 			req = cast_mcryptd_ctx_to_req(req_ctx);
+ 			if (irqs_disabled())
+-				rctx->complete(&req->base, ret);
++				req_ctx->complete(&req->base, ret);
+ 			else {
+ 				local_bh_disable();
+-				rctx->complete(&req->base, ret);
++				req_ctx->complete(&req->base, ret);
+ 				local_bh_enable();
+ 			}
+ 		}
+diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c
+index 03663740c866..1a4477cedc49 100644
+--- a/arch/x86/entry/common.c
++++ b/arch/x86/entry/common.c
+@@ -268,6 +268,7 @@ static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags)
+ /* Called with IRQs disabled. */
+ __visible inline void prepare_exit_to_usermode(struct pt_regs *regs)
+ {
++	struct thread_info *ti = pt_regs_to_thread_info(regs);
+ 	u32 cached_flags;
+ 
+ 	if (IS_ENABLED(CONFIG_PROVE_LOCKING) && WARN_ON(!irqs_disabled()))
+@@ -275,12 +276,22 @@ __visible inline void prepare_exit_to_usermode(struct pt_regs *regs)
+ 
+ 	lockdep_sys_exit();
+ 
+-	cached_flags =
+-		READ_ONCE(pt_regs_to_thread_info(regs)->flags);
++	cached_flags = READ_ONCE(ti->flags);
+ 
+ 	if (unlikely(cached_flags & EXIT_TO_USERMODE_LOOP_FLAGS))
+ 		exit_to_usermode_loop(regs, cached_flags);
+ 
++#ifdef CONFIG_COMPAT
++	/*
++	 * Compat syscalls set TS_COMPAT.  Make sure we clear it before
++	 * returning to user mode.  We need to clear it *after* signal
++	 * handling, because syscall restart has a fixup for compat
++	 * syscalls.  The fixup is exercised by the ptrace_syscall_32
++	 * selftest.
++	 */
++	ti->status &= ~TS_COMPAT;
++#endif
++
+ 	user_enter();
+ }
+ 
+@@ -332,14 +343,6 @@ __visible inline void syscall_return_slowpath(struct pt_regs *regs)
+ 	if (unlikely(cached_flags & SYSCALL_EXIT_WORK_FLAGS))
+ 		syscall_slow_exit_work(regs, cached_flags);
+ 
+-#ifdef CONFIG_COMPAT
+-	/*
+-	 * Compat syscalls set TS_COMPAT.  Make sure we clear it before
+-	 * returning to user mode.
+-	 */
+-	ti->status &= ~TS_COMPAT;
+-#endif
+-
+ 	local_irq_disable();
+ 	prepare_exit_to_usermode(regs);
+ }
+diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
+index c80f6b6f3da2..e8c4fba52d3d 100644
+--- a/arch/x86/include/asm/apic.h
++++ b/arch/x86/include/asm/apic.h
+@@ -644,8 +644,8 @@ static inline void entering_irq(void)
+ 
+ static inline void entering_ack_irq(void)
+ {
+-	ack_APIC_irq();
+ 	entering_irq();
++	ack_APIC_irq();
+ }
+ 
+ static inline void ipi_entering_ack_irq(void)
+diff --git a/arch/x86/include/asm/hugetlb.h b/arch/x86/include/asm/hugetlb.h
+index f8a29d2c97b0..e6a8613fbfb0 100644
+--- a/arch/x86/include/asm/hugetlb.h
++++ b/arch/x86/include/asm/hugetlb.h
+@@ -4,6 +4,7 @@
+ #include <asm/page.h>
+ #include <asm-generic/hugetlb.h>
+ 
++#define hugepages_supported() cpu_has_pse
+ 
+ static inline int is_hugepage_only_range(struct mm_struct *mm,
+ 					 unsigned long addr,
+diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
+index 1815b736269d..84b3d194a958 100644
+--- a/arch/x86/include/asm/hw_irq.h
++++ b/arch/x86/include/asm/hw_irq.h
+@@ -141,6 +141,7 @@ struct irq_alloc_info {
+ struct irq_cfg {
+ 	unsigned int		dest_apicid;
+ 	u8			vector;
++	u8			old_vector;
+ };
+ 
+ extern struct irq_cfg *irq_cfg(unsigned int irq);
+diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
+index 44adbb819041..f8dba2097c40 100644
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -42,7 +42,7 @@
+ 
+ #define KVM_PIO_PAGE_OFFSET 1
+ #define KVM_COALESCED_MMIO_PAGE_OFFSET 2
+-#define KVM_HALT_POLL_NS_DEFAULT 500000
++#define KVM_HALT_POLL_NS_DEFAULT 400000
+ 
+ #define KVM_IRQCHIP_NUM_PINS  KVM_IOAPIC_NUM_PINS
+ 
+diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h
+index 1e1b07a5a738..9d3a96c4da78 100644
+--- a/arch/x86/include/asm/microcode.h
++++ b/arch/x86/include/asm/microcode.h
+@@ -3,6 +3,7 @@
+ 
+ #include <asm/cpu.h>
+ #include <linux/earlycpio.h>
++#include <linux/initrd.h>
+ 
+ #define native_rdmsr(msr, val1, val2)			\
+ do {							\
+@@ -143,4 +144,29 @@ static inline void reload_early_microcode(void)			{ }
+ static inline bool
+ get_builtin_firmware(struct cpio_data *cd, const char *name)	{ return false; }
+ #endif
++
++static inline unsigned long get_initrd_start(void)
++{
++#ifdef CONFIG_BLK_DEV_INITRD
++	return initrd_start;
++#else
++	return 0;
++#endif
++}
++
++static inline unsigned long get_initrd_start_addr(void)
++{
++#ifdef CONFIG_BLK_DEV_INITRD
++#ifdef CONFIG_X86_32
++	unsigned long *initrd_start_p = (unsigned long *)__pa_nodebug(&initrd_start);
++
++	return (unsigned long)__pa_nodebug(*initrd_start_p);
++#else
++	return get_initrd_start();
++#endif
++#else /* CONFIG_BLK_DEV_INITRD */
++	return 0;
++#endif
++}
++
+ #endif /* _ASM_X86_MICROCODE_H */
+diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
+index 7bcb861a04e5..5a2ed3ed2f26 100644
+--- a/arch/x86/include/asm/perf_event.h
++++ b/arch/x86/include/asm/perf_event.h
+@@ -165,6 +165,7 @@ struct x86_pmu_capability {
+ #define GLOBAL_STATUS_ASIF				BIT_ULL(60)
+ #define GLOBAL_STATUS_COUNTERS_FROZEN			BIT_ULL(59)
+ #define GLOBAL_STATUS_LBRS_FROZEN			BIT_ULL(58)
++#define GLOBAL_STATUS_TRACE_TOPAPMI			BIT_ULL(55)
+ 
+ /*
+  * IBS cpuid feature detection
+diff --git a/arch/x86/include/asm/xen/hypervisor.h b/arch/x86/include/asm/xen/hypervisor.h
+index 8b2d4bea9962..39171b3646bb 100644
+--- a/arch/x86/include/asm/xen/hypervisor.h
++++ b/arch/x86/include/asm/xen/hypervisor.h
+@@ -62,4 +62,6 @@ void xen_arch_register_cpu(int num);
+ void xen_arch_unregister_cpu(int num);
+ #endif
+ 
++extern void xen_set_iopl_mask(unsigned mask);
++
+ #endif /* _ASM_X86_XEN_HYPERVISOR_H */
+diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
+index 3b670df4ba7b..ef495511f019 100644
+--- a/arch/x86/kernel/apic/vector.c
++++ b/arch/x86/kernel/apic/vector.c
+@@ -213,6 +213,7 @@ update:
+ 	 */
+ 	cpumask_and(d->old_domain, d->old_domain, cpu_online_mask);
+ 	d->move_in_progress = !cpumask_empty(d->old_domain);
++	d->cfg.old_vector = d->move_in_progress ? d->cfg.vector : 0;
+ 	d->cfg.vector = vector;
+ 	cpumask_copy(d->domain, vector_cpumask);
+ success:
+@@ -255,7 +256,8 @@ static void clear_irq_vector(int irq, struct apic_chip_data *data)
+ 	struct irq_desc *desc;
+ 	int cpu, vector;
+ 
+-	BUG_ON(!data->cfg.vector);
++	if (!data->cfg.vector)
++		return;
+ 
+ 	vector = data->cfg.vector;
+ 	for_each_cpu_and(cpu, data->domain, cpu_online_mask)
+@@ -655,46 +657,97 @@ void irq_complete_move(struct irq_cfg *cfg)
+ }
+ 
+ /*
+- * Called with @desc->lock held and interrupts disabled.
++ * Called from fixup_irqs() with @desc->lock held and interrupts disabled.
+  */
+ void irq_force_complete_move(struct irq_desc *desc)
+ {
+ 	struct irq_data *irqdata = irq_desc_get_irq_data(desc);
+ 	struct apic_chip_data *data = apic_chip_data(irqdata);
+ 	struct irq_cfg *cfg = data ? &data->cfg : NULL;
++	unsigned int cpu;
+ 
+ 	if (!cfg)
+ 		return;
+ 
+-	__irq_complete_move(cfg, cfg->vector);
+-
+ 	/*
+ 	 * This is tricky. If the cleanup of @data->old_domain has not been
+ 	 * done yet, then the following setaffinity call will fail with
+ 	 * -EBUSY. This can leave the interrupt in a stale state.
+ 	 *
+-	 * The cleanup cannot make progress because we hold @desc->lock. So in
+-	 * case @data->old_domain is not yet cleaned up, we need to drop the
+-	 * lock and acquire it again. @desc cannot go away, because the
+-	 * hotplug code holds the sparse irq lock.
++	 * All CPUs are stuck in stop machine with interrupts disabled so
++	 * calling __irq_complete_move() would be completely pointless.
+ 	 */
+ 	raw_spin_lock(&vector_lock);
+-	/* Clean out all offline cpus (including ourself) first. */
++	/*
++	 * Clean out all offline cpus (including the outgoing one) from the
++	 * old_domain mask.
++	 */
+ 	cpumask_and(data->old_domain, data->old_domain, cpu_online_mask);
+-	while (!cpumask_empty(data->old_domain)) {
++
++	/*
++	 * If move_in_progress is cleared and the old_domain mask is empty,
++	 * then there is nothing to cleanup. fixup_irqs() will take care of
++	 * the stale vectors on the outgoing cpu.
++	 */
++	if (!data->move_in_progress && cpumask_empty(data->old_domain)) {
+ 		raw_spin_unlock(&vector_lock);
+-		raw_spin_unlock(&desc->lock);
+-		cpu_relax();
+-		raw_spin_lock(&desc->lock);
++		return;
++	}
++
++	/*
++	 * 1) The interrupt is in move_in_progress state. That means that we
++	 *    have not seen an interrupt since the io_apic was reprogrammed to
++	 *    the new vector.
++	 *
++	 * 2) The interrupt has fired on the new vector, but the cleanup IPIs
++	 *    have not been processed yet.
++	 */
++	if (data->move_in_progress) {
+ 		/*
+-		 * Reevaluate apic_chip_data. It might have been cleared after
+-		 * we dropped @desc->lock.
++		 * In theory there is a race:
++		 *
++		 * set_ioapic(new_vector) <-- Interrupt is raised before update
++		 *			      is effective, i.e. it's raised on
++		 *			      the old vector.
++		 *
++		 * So if the target cpu cannot handle that interrupt before
++		 * the old vector is cleaned up, we get a spurious interrupt
++		 * and in the worst case the ioapic irq line becomes stale.
++		 *
++		 * But in case of cpu hotplug this should be a non issue
++		 * because if the affinity update happens right before all
++		 * cpus rendevouz in stop machine, there is no way that the
++		 * interrupt can be blocked on the target cpu because all cpus
++		 * loops first with interrupts enabled in stop machine, so the
++		 * old vector is not yet cleaned up when the interrupt fires.
++		 *
++		 * So the only way to run into this issue is if the delivery
++		 * of the interrupt on the apic/system bus would be delayed
++		 * beyond the point where the target cpu disables interrupts
++		 * in stop machine. I doubt that it can happen, but at least
++		 * there is a theroretical chance. Virtualization might be
++		 * able to expose this, but AFAICT the IOAPIC emulation is not
++		 * as stupid as the real hardware.
++		 *
++		 * Anyway, there is nothing we can do about that at this point
++		 * w/o refactoring the whole fixup_irq() business completely.
++		 * We print at least the irq number and the old vector number,
++		 * so we have the necessary information when a problem in that
++		 * area arises.
+ 		 */
+-		data = apic_chip_data(irqdata);
+-		if (!data)
+-			return;
+-		raw_spin_lock(&vector_lock);
++		pr_warn("IRQ fixup: irq %d move in progress, old vector %d\n",
++			irqdata->irq, cfg->old_vector);
+ 	}
++	/*
++	 * If old_domain is not empty, then other cpus still have the irq
++	 * descriptor set in their vector array. Clean it up.
++	 */
++	for_each_cpu(cpu, data->old_domain)
++		per_cpu(vector_irq, cpu)[cfg->old_vector] = VECTOR_UNUSED;
++
++	/* Cleanup the left overs of the (half finished) move */
++	cpumask_clear(data->old_domain);
++	data->move_in_progress = 0;
+ 	raw_spin_unlock(&vector_lock);
+ }
+ #endif
+diff --git a/arch/x86/kernel/cpu/mcheck/mce-genpool.c b/arch/x86/kernel/cpu/mcheck/mce-genpool.c
+index 0a850100c594..2658e2af74ec 100644
+--- a/arch/x86/kernel/cpu/mcheck/mce-genpool.c
++++ b/arch/x86/kernel/cpu/mcheck/mce-genpool.c
+@@ -29,7 +29,7 @@ static char gen_pool_buf[MCE_POOLSZ];
+ void mce_gen_pool_process(void)
+ {
+ 	struct llist_node *head;
+-	struct mce_evt_llist *node;
++	struct mce_evt_llist *node, *tmp;
+ 	struct mce *mce;
+ 
+ 	head = llist_del_all(&mce_event_llist);
+@@ -37,7 +37,7 @@ void mce_gen_pool_process(void)
+ 		return;
+ 
+ 	head = llist_reverse_order(head);
+-	llist_for_each_entry(node, head, llnode) {
++	llist_for_each_entry_safe(node, tmp, head, llnode) {
+ 		mce = &node->mce;
+ 		atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, mce);
+ 		gen_pool_free(mce_evt_pool, (unsigned long)node, sizeof(*node));
+diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c
+index 2c5aaf8c2e2f..05538582a809 100644
+--- a/arch/x86/kernel/cpu/mcheck/therm_throt.c
++++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c
+@@ -385,6 +385,9 @@ static void intel_thermal_interrupt(void)
+ {
+ 	__u64 msr_val;
+ 
++	if (static_cpu_has(X86_FEATURE_HWP))
++		wrmsrl_safe(MSR_HWP_STATUS, 0);
++
+ 	rdmsrl(MSR_IA32_THERM_STATUS, msr_val);
+ 
+ 	/* Check for violation of core thermal thresholds*/
+diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
+index ee81c544ee0d..4f4735bd8698 100644
+--- a/arch/x86/kernel/cpu/microcode/intel.c
++++ b/arch/x86/kernel/cpu/microcode/intel.c
+@@ -551,10 +551,14 @@ scan_microcode(struct mc_saved_data *mc_saved_data, unsigned long *initrd,
+ 	cd.data = NULL;
+ 	cd.size = 0;
+ 
+-	cd = find_cpio_data(p, (void *)start, size, &offset);
+-	if (!cd.data) {
++	/* try built-in microcode if no initrd */
++	if (!size) {
+ 		if (!load_builtin_intel_microcode(&cd))
+ 			return UCODE_ERROR;
++	} else {
++		cd = find_cpio_data(p, (void *)start, size, &offset);
++		if (!cd.data)
++			return UCODE_ERROR;
+ 	}
+ 
+ 	return get_matching_model_microcode(0, start, cd.data, cd.size,
+@@ -690,7 +694,7 @@ int __init save_microcode_in_initrd_intel(void)
+ 	if (count == 0)
+ 		return ret;
+ 
+-	copy_initrd_ptrs(mc_saved, mc_saved_in_initrd, initrd_start, count);
++	copy_initrd_ptrs(mc_saved, mc_saved_in_initrd, get_initrd_start(), count);
+ 	ret = save_microcode(&mc_saved_data, mc_saved, count);
+ 	if (ret)
+ 		pr_err("Cannot save microcode patches from initrd.\n");
+@@ -728,16 +732,20 @@ void __init load_ucode_intel_bsp(void)
+ 	struct boot_params *p;
+ 
+ 	p	= (struct boot_params *)__pa_nodebug(&boot_params);
+-	start	= p->hdr.ramdisk_image;
+ 	size	= p->hdr.ramdisk_size;
+ 
+-	_load_ucode_intel_bsp(
+-			(struct mc_saved_data *)__pa_nodebug(&mc_saved_data),
+-			(unsigned long *)__pa_nodebug(&mc_saved_in_initrd),
+-			start, size);
++	/*
++	 * Set start only if we have an initrd image. We cannot use initrd_start
++	 * because it is not set that early yet.
++	 */
++	start	= (size ? p->hdr.ramdisk_image : 0);
++
++	_load_ucode_intel_bsp((struct mc_saved_data *)__pa_nodebug(&mc_saved_data),
++			      (unsigned long *)__pa_nodebug(&mc_saved_in_initrd),
++			      start, size);
+ #else
+-	start	= boot_params.hdr.ramdisk_image + PAGE_OFFSET;
+ 	size	= boot_params.hdr.ramdisk_size;
++	start	= (size ? boot_params.hdr.ramdisk_image + PAGE_OFFSET : 0);
+ 
+ 	_load_ucode_intel_bsp(&mc_saved_data, mc_saved_in_initrd, start, size);
+ #endif
+@@ -748,20 +756,14 @@ void load_ucode_intel_ap(void)
+ 	struct mc_saved_data *mc_saved_data_p;
+ 	struct ucode_cpu_info uci;
+ 	unsigned long *mc_saved_in_initrd_p;
+-	unsigned long initrd_start_addr;
+ 	enum ucode_state ret;
+ #ifdef CONFIG_X86_32
+-	unsigned long *initrd_start_p;
+ 
+-	mc_saved_in_initrd_p =
+-		(unsigned long *)__pa_nodebug(mc_saved_in_initrd);
++	mc_saved_in_initrd_p = (unsigned long *)__pa_nodebug(mc_saved_in_initrd);
+ 	mc_saved_data_p = (struct mc_saved_data *)__pa_nodebug(&mc_saved_data);
+-	initrd_start_p = (unsigned long *)__pa_nodebug(&initrd_start);
+-	initrd_start_addr = (unsigned long)__pa_nodebug(*initrd_start_p);
+ #else
+-	mc_saved_data_p = &mc_saved_data;
+ 	mc_saved_in_initrd_p = mc_saved_in_initrd;
+-	initrd_start_addr = initrd_start;
++	mc_saved_data_p = &mc_saved_data;
+ #endif
+ 
+ 	/*
+@@ -773,7 +775,7 @@ void load_ucode_intel_ap(void)
+ 
+ 	collect_cpu_info_early(&uci);
+ 	ret = load_microcode(mc_saved_data_p, mc_saved_in_initrd_p,
+-			     initrd_start_addr, &uci);
++			     get_initrd_start_addr(), &uci);
+ 
+ 	if (ret != UCODE_OK)
+ 		return;
+diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
+index 1b443db2db50..6532f5b40646 100644
+--- a/arch/x86/kernel/cpu/perf_event.c
++++ b/arch/x86/kernel/cpu/perf_event.c
+@@ -596,6 +596,19 @@ void x86_pmu_disable_all(void)
+ 	}
+ }
+ 
++/*
++ * There may be PMI landing after enabled=0. The PMI hitting could be before or
++ * after disable_all.
++ *
++ * If PMI hits before disable_all, the PMU will be disabled in the NMI handler.
++ * It will not be re-enabled in the NMI handler again, because enabled=0. After
++ * handling the NMI, disable_all will be called, which will not change the
++ * state either. If PMI hits after disable_all, the PMU is already disabled
++ * before entering NMI handler. The NMI handler will not change the state
++ * either.
++ *
++ * So either situation is harmless.
++ */
+ static void x86_pmu_disable(struct pmu *pmu)
+ {
+ 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h
+index 7bb61e32fb29..98be6d6d32fa 100644
+--- a/arch/x86/kernel/cpu/perf_event.h
++++ b/arch/x86/kernel/cpu/perf_event.h
+@@ -586,6 +586,7 @@ struct x86_pmu {
+ 			pebs_broken	:1,
+ 			pebs_prec_dist	:1;
+ 	int		pebs_record_size;
++	int		pebs_buffer_size;
+ 	void		(*drain_pebs)(struct pt_regs *regs);
+ 	struct event_constraint *pebs_constraints;
+ 	void		(*pebs_aliases)(struct perf_event *event);
+@@ -904,6 +905,8 @@ void intel_pmu_lbr_init_skl(void);
+ 
+ void intel_pmu_lbr_init_knl(void);
+ 
++void intel_pmu_pebs_data_source_nhm(void);
++
+ int intel_pmu_setup_lbr_filter(struct perf_event *event);
+ 
+ void intel_pt_interrupt(void);
+diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
+index fed2ab1f1065..760aec1e8f82 100644
+--- a/arch/x86/kernel/cpu/perf_event_intel.c
++++ b/arch/x86/kernel/cpu/perf_event_intel.c
+@@ -1502,7 +1502,15 @@ static __initconst const u64 knl_hw_cache_extra_regs
+ };
+ 
+ /*
+- * Use from PMIs where the LBRs are already disabled.
++ * Used from PMIs where the LBRs are already disabled.
++ *
++ * This function could be called consecutively. It is required to remain in
++ * disabled state if called consecutively.
++ *
++ * During consecutive calls, the same disable value will be written to related
++ * registers, so the PMU state remains unchanged. hw.state in
++ * intel_bts_disable_local will remain PERF_HES_STOPPED too in consecutive
++ * calls.
+  */
+ static void __intel_pmu_disable_all(void)
+ {
+@@ -1884,6 +1892,16 @@ again:
+ 	if (__test_and_clear_bit(62, (unsigned long *)&status)) {
+ 		handled++;
+ 		x86_pmu.drain_pebs(regs);
++		/*
++		 * There are cases where, even though, the PEBS ovfl bit is set
++		 * in GLOBAL_OVF_STATUS, the PEBS events may also have their
++		 * overflow bits set for their counters. We must clear them
++		 * here because they have been processed as exact samples in
++		 * the drain_pebs() routine. They must not be processed again
++		 * in the for_each_bit_set() loop for regular samples below.
++		 */
++		status &= ~cpuc->pebs_enabled;
++		status &= x86_pmu.intel_ctrl | GLOBAL_STATUS_TRACE_TOPAPMI;
+ 	}
+ 
+ 	/*
+@@ -1929,7 +1947,10 @@ again:
+ 		goto again;
+ 
+ done:
+-	__intel_pmu_enable_all(0, true);
++	/* Only restore PMU state when it's active. See x86_pmu_disable(). */
++	if (cpuc->enabled)
++		__intel_pmu_enable_all(0, true);
++
+ 	/*
+ 	 * Only unmask the NMI after the overflow counters
+ 	 * have been reset. This avoids spurious NMIs on
+@@ -3396,6 +3417,7 @@ __init int intel_pmu_init(void)
+ 		intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
+ 			X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1);
+ 
++		intel_pmu_pebs_data_source_nhm();
+ 		x86_add_quirk(intel_nehalem_quirk);
+ 
+ 		pr_cont("Nehalem events, ");
+@@ -3459,6 +3481,7 @@ __init int intel_pmu_init(void)
+ 		intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
+ 			X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1);
+ 
++		intel_pmu_pebs_data_source_nhm();
+ 		pr_cont("Westmere events, ");
+ 		break;
+ 
+diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c
+index 10602f0a438f..955140140fd4 100644
+--- a/arch/x86/kernel/cpu/perf_event_intel_ds.c
++++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c
+@@ -51,7 +51,8 @@ union intel_x86_pebs_dse {
+ #define OP_LH (P(OP, LOAD) | P(LVL, HIT))
+ #define SNOOP_NONE_MISS (P(SNOOP, NONE) | P(SNOOP, MISS))
+ 
+-static const u64 pebs_data_source[] = {
++/* Version for Sandy Bridge and later */
++static u64 pebs_data_source[] = {
+ 	P(OP, LOAD) | P(LVL, MISS) | P(LVL, L3) | P(SNOOP, NA),/* 0x00:ukn L3 */
+ 	OP_LH | P(LVL, L1)  | P(SNOOP, NONE),	/* 0x01: L1 local */
+ 	OP_LH | P(LVL, LFB) | P(SNOOP, NONE),	/* 0x02: LFB hit */
+@@ -70,6 +71,14 @@ static const u64 pebs_data_source[] = {
+ 	OP_LH | P(LVL, UNC) | P(SNOOP, NONE), /* 0x0f: uncached */
+ };
+ 
++/* Patch up minor differences in the bits */
++void __init intel_pmu_pebs_data_source_nhm(void)
++{
++	pebs_data_source[0x05] = OP_LH | P(LVL, L3)  | P(SNOOP, HIT);
++	pebs_data_source[0x06] = OP_LH | P(LVL, L3)  | P(SNOOP, HITM);
++	pebs_data_source[0x07] = OP_LH | P(LVL, L3)  | P(SNOOP, HITM);
++}
++
+ static u64 precise_store_data(u64 status)
+ {
+ 	union intel_x86_pebs_dse dse;
+@@ -269,7 +278,7 @@ static int alloc_pebs_buffer(int cpu)
+ 	if (!x86_pmu.pebs)
+ 		return 0;
+ 
+-	buffer = kzalloc_node(PEBS_BUFFER_SIZE, GFP_KERNEL, node);
++	buffer = kzalloc_node(x86_pmu.pebs_buffer_size, GFP_KERNEL, node);
+ 	if (unlikely(!buffer))
+ 		return -ENOMEM;
+ 
+@@ -286,7 +295,7 @@ static int alloc_pebs_buffer(int cpu)
+ 		per_cpu(insn_buffer, cpu) = ibuffer;
+ 	}
+ 
+-	max = PEBS_BUFFER_SIZE / x86_pmu.pebs_record_size;
++	max = x86_pmu.pebs_buffer_size / x86_pmu.pebs_record_size;
+ 
+ 	ds->pebs_buffer_base = (u64)(unsigned long)buffer;
+ 	ds->pebs_index = ds->pebs_buffer_base;
+@@ -1319,6 +1328,7 @@ void __init intel_ds_init(void)
+ 
+ 	x86_pmu.bts  = boot_cpu_has(X86_FEATURE_BTS);
+ 	x86_pmu.pebs = boot_cpu_has(X86_FEATURE_PEBS);
++	x86_pmu.pebs_buffer_size = PEBS_BUFFER_SIZE;
+ 	if (x86_pmu.pebs) {
+ 		char pebs_type = x86_pmu.intel_cap.pebs_trap ?  '+' : '-';
+ 		int format = x86_pmu.intel_cap.pebs_format;
+@@ -1327,6 +1337,14 @@ void __init intel_ds_init(void)
+ 		case 0:
+ 			printk(KERN_CONT "PEBS fmt0%c, ", pebs_type);
+ 			x86_pmu.pebs_record_size = sizeof(struct pebs_record_core);
++			/*
++			 * Using >PAGE_SIZE buffers makes the WRMSR to
++			 * PERF_GLOBAL_CTRL in intel_pmu_enable_all()
++			 * mysteriously hang on Core2.
++			 *
++			 * As a workaround, we don't do this.
++			 */
++			x86_pmu.pebs_buffer_size = PAGE_SIZE;
+ 			x86_pmu.drain_pebs = intel_pmu_drain_pebs_core;
+ 			break;
+ 
+diff --git a/arch/x86/kernel/cpu/perf_event_intel_pt.c b/arch/x86/kernel/cpu/perf_event_intel_pt.c
+index c0bbd1033b7c..a5286d0bbb43 100644
+--- a/arch/x86/kernel/cpu/perf_event_intel_pt.c
++++ b/arch/x86/kernel/cpu/perf_event_intel_pt.c
+@@ -695,6 +695,7 @@ static int pt_buffer_reset_markers(struct pt_buffer *buf,
+ 
+ 	/* clear STOP and INT from current entry */
+ 	buf->topa_index[buf->stop_pos]->stop = 0;
++	buf->topa_index[buf->stop_pos]->intr = 0;
+ 	buf->topa_index[buf->intr_pos]->intr = 0;
+ 
+ 	/* how many pages till the STOP marker */
+@@ -719,6 +720,7 @@ static int pt_buffer_reset_markers(struct pt_buffer *buf,
+ 	buf->intr_pos = idx;
+ 
+ 	buf->topa_index[buf->stop_pos]->stop = 1;
++	buf->topa_index[buf->stop_pos]->intr = 1;
+ 	buf->topa_index[buf->intr_pos]->intr = 1;
+ 
+ 	return 0;
+diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c b/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c
+index 33acb884ccf1..4547b2cca71b 100644
+--- a/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c
++++ b/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c
+@@ -2875,11 +2875,13 @@ static struct intel_uncore_type bdx_uncore_sbox = {
+ 	.format_group		= &hswep_uncore_sbox_format_group,
+ };
+ 
++#define BDX_MSR_UNCORE_SBOX	3
++
+ static struct intel_uncore_type *bdx_msr_uncores[] = {
+ 	&bdx_uncore_ubox,
+ 	&bdx_uncore_cbox,
+-	&bdx_uncore_sbox,
+ 	&hswep_uncore_pcu,
++	&bdx_uncore_sbox,
+ 	NULL,
+ };
+ 
+@@ -2888,6 +2890,10 @@ void bdx_uncore_cpu_init(void)
+ 	if (bdx_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
+ 		bdx_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
+ 	uncore_msr_uncores = bdx_msr_uncores;
++
++	/* BDX-DE doesn't have SBOX */
++	if (boot_cpu_data.x86_model == 86)
++		uncore_msr_uncores[BDX_MSR_UNCORE_SBOX] = NULL;
+ }
+ 
+ static struct intel_uncore_type bdx_uncore_ha = {
+diff --git a/arch/x86/kernel/cpu/perf_event_knc.c b/arch/x86/kernel/cpu/perf_event_knc.c
+index 5b0c232d1ee6..b931095e86d4 100644
+--- a/arch/x86/kernel/cpu/perf_event_knc.c
++++ b/arch/x86/kernel/cpu/perf_event_knc.c
+@@ -263,7 +263,9 @@ again:
+ 		goto again;
+ 
+ done:
+-	knc_pmu_enable_all(0);
++	/* Only restore PMU state when it's active. See x86_pmu_disable(). */
++	if (cpuc->enabled)
++		knc_pmu_enable_all(0);
+ 
+ 	return handled;
+ }
+diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
+index 37dae792dbbe..589b3193f102 100644
+--- a/arch/x86/kernel/ioport.c
++++ b/arch/x86/kernel/ioport.c
+@@ -96,9 +96,14 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
+ SYSCALL_DEFINE1(iopl, unsigned int, level)
+ {
+ 	struct pt_regs *regs = current_pt_regs();
+-	unsigned int old = (regs->flags >> 12) & 3;
+ 	struct thread_struct *t = &current->thread;
+ 
++	/*
++	 * Careful: the IOPL bits in regs->flags are undefined under Xen PV
++	 * and changing them has no effect.
++	 */
++	unsigned int old = t->iopl >> X86_EFLAGS_IOPL_BIT;
++
+ 	if (level > 3)
+ 		return -EINVAL;
+ 	/* Trying to gain more privileges? */
+@@ -106,8 +111,9 @@ SYSCALL_DEFINE1(iopl, unsigned int, level)
+ 		if (!capable(CAP_SYS_RAWIO))
+ 			return -EPERM;
+ 	}
+-	regs->flags = (regs->flags & ~X86_EFLAGS_IOPL) | (level << 12);
+-	t->iopl = level << 12;
++	regs->flags = (regs->flags & ~X86_EFLAGS_IOPL) |
++		(level << X86_EFLAGS_IOPL_BIT);
++	t->iopl = level << X86_EFLAGS_IOPL_BIT;
+ 	set_iopl_mask(t->iopl);
+ 
+ 	return 0;
+diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
+index b9d99e0f82c4..9f751876066f 100644
+--- a/arch/x86/kernel/process_64.c
++++ b/arch/x86/kernel/process_64.c
+@@ -48,6 +48,7 @@
+ #include <asm/syscalls.h>
+ #include <asm/debugreg.h>
+ #include <asm/switch_to.h>
++#include <asm/xen/hypervisor.h>
+ 
+ asmlinkage extern void ret_from_fork(void);
+ 
+@@ -411,6 +412,17 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
+ 		     task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV))
+ 		__switch_to_xtra(prev_p, next_p, tss);
+ 
++#ifdef CONFIG_XEN
++	/*
++	 * On Xen PV, IOPL bits in pt_regs->flags have no effect, and
++	 * current_pt_regs()->flags may not match the current task's
++	 * intended IOPL.  We need to switch it manually.
++	 */
++	if (unlikely(static_cpu_has(X86_FEATURE_XENPV) &&
++		     prev->iopl != next->iopl))
++		xen_set_iopl_mask(next->iopl);
++#endif
++
+ 	if (static_cpu_has_bug(X86_BUG_SYSRET_SS_ATTRS)) {
+ 		/*
+ 		 * AMD CPUs have a misfeature: SYSRET sets the SS selector but
+diff --git a/arch/x86/kernel/sysfb_efi.c b/arch/x86/kernel/sysfb_efi.c
+index b285d4e8c68e..5da924bbf0a0 100644
+--- a/arch/x86/kernel/sysfb_efi.c
++++ b/arch/x86/kernel/sysfb_efi.c
+@@ -106,14 +106,24 @@ static int __init efifb_set_system(const struct dmi_system_id *id)
+ 					continue;
+ 				for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
+ 					resource_size_t start, end;
++					unsigned long flags;
++
++					flags = pci_resource_flags(dev, i);
++					if (!(flags & IORESOURCE_MEM))
++						continue;
++
++					if (flags & IORESOURCE_UNSET)
++						continue;
++
++					if (pci_resource_len(dev, i) == 0)
++						continue;
+ 
+ 					start = pci_resource_start(dev, i);
+-					if (start == 0)
+-						break;
+ 					end = pci_resource_end(dev, i);
+ 					if (screen_info.lfb_base >= start &&
+ 					    screen_info.lfb_base < end) {
+ 						found_bar = 1;
++						break;
+ 					}
+ 				}
+ 			}
+diff --git a/arch/x86/kernel/tsc_msr.c b/arch/x86/kernel/tsc_msr.c
+index 92ae6acac8a7..6aa0f4d9eea6 100644
+--- a/arch/x86/kernel/tsc_msr.c
++++ b/arch/x86/kernel/tsc_msr.c
+@@ -92,7 +92,7 @@ unsigned long try_msr_calibrate_tsc(void)
+ 
+ 	if (freq_desc_tables[cpu_index].msr_plat) {
+ 		rdmsr(MSR_PLATFORM_INFO, lo, hi);
+-		ratio = (lo >> 8) & 0x1f;
++		ratio = (lo >> 8) & 0xff;
+ 	} else {
+ 		rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
+ 		ratio = (hi >> 8) & 0x1f;
+diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
+index 6525e926f566..2e1fd586b895 100644
+--- a/arch/x86/kvm/cpuid.c
++++ b/arch/x86/kvm/cpuid.c
+@@ -509,6 +509,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
+ 			do_cpuid_1_ent(&entry[i], function, idx);
+ 			if (idx == 1) {
+ 				entry[i].eax &= kvm_supported_word10_x86_features;
++				cpuid_mask(&entry[i].eax, 10);
+ 				entry[i].ebx = 0;
+ 				if (entry[i].eax & (F(XSAVES)|F(XSAVEC)))
+ 					entry[i].ebx =
+diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
+index b0ea42b78ccd..ab5318727579 100644
+--- a/arch/x86/kvm/i8254.c
++++ b/arch/x86/kvm/i8254.c
+@@ -245,7 +245,7 @@ static void kvm_pit_ack_irq(struct kvm_irq_ack_notifier *kian)
+ 		 * PIC is being reset.  Handle it gracefully here
+ 		 */
+ 		atomic_inc(&ps->pending);
+-	else if (value > 0)
++	else if (value > 0 && ps->reinject)
+ 		/* in this case, we had multiple outstanding pit interrupts
+ 		 * that we needed to inject.  Reinject
+ 		 */
+@@ -288,7 +288,9 @@ static void pit_do_work(struct kthread_work *work)
+ 	 * last one has been acked.
+ 	 */
+ 	spin_lock(&ps->inject_lock);
+-	if (ps->irq_ack) {
++	if (!ps->reinject)
++		inject = 1;
++	else if (ps->irq_ack) {
+ 		ps->irq_ack = 0;
+ 		inject = 1;
+ 	}
+@@ -317,10 +319,10 @@ static enum hrtimer_restart pit_timer_fn(struct hrtimer *data)
+ 	struct kvm_kpit_state *ps = container_of(data, struct kvm_kpit_state, timer);
+ 	struct kvm_pit *pt = ps->kvm->arch.vpit;
+ 
+-	if (ps->reinject || !atomic_read(&ps->pending)) {
++	if (ps->reinject)
+ 		atomic_inc(&ps->pending);
+-		queue_kthread_work(&pt->worker, &pt->expired);
+-	}
++
++	queue_kthread_work(&pt->worker, &pt->expired);
+ 
+ 	if (ps->is_periodic) {
+ 		hrtimer_add_expires_ns(&ps->timer, ps->period);
+diff --git a/arch/x86/kvm/mtrr.c b/arch/x86/kvm/mtrr.c
+index 3f8c732117ec..c146f3c262c3 100644
+--- a/arch/x86/kvm/mtrr.c
++++ b/arch/x86/kvm/mtrr.c
+@@ -44,8 +44,6 @@ static bool msr_mtrr_valid(unsigned msr)
+ 	case MSR_MTRRdefType:
+ 	case MSR_IA32_CR_PAT:
+ 		return true;
+-	case 0x2f8:
+-		return true;
+ 	}
+ 	return false;
+ }
+diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
+index 9bd8f44baded..60946a5d3812 100644
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -2702,8 +2702,15 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
+ 	} else
+ 		vmx->nested.nested_vmx_ept_caps = 0;
+ 
++	/*
++	 * Old versions of KVM use the single-context version without
++	 * checking for support, so declare that it is supported even
++	 * though it is treated as global context.  The alternative is
++	 * not failing the single-context invvpid, and it is worse.
++	 */
+ 	if (enable_vpid)
+ 		vmx->nested.nested_vmx_vpid_caps = VMX_VPID_INVVPID_BIT |
++				VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT |
+ 				VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT;
+ 	else
+ 		vmx->nested.nested_vmx_vpid_caps = 0;
+@@ -5014,8 +5021,8 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
+ 		vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
+ 
+ 	cr0 = X86_CR0_NW | X86_CR0_CD | X86_CR0_ET;
+-	vmx_set_cr0(vcpu, cr0); /* enter rmode */
+ 	vmx->vcpu.arch.cr0 = cr0;
++	vmx_set_cr0(vcpu, cr0); /* enter rmode */
+ 	vmx_set_cr4(vcpu, 0);
+ 	vmx_set_efer(vcpu, 0);
+ 	vmx_fpu_activate(vcpu);
+@@ -7398,6 +7405,7 @@ static int handle_invept(struct kvm_vcpu *vcpu)
+ 	if (!(types & (1UL << type))) {
+ 		nested_vmx_failValid(vcpu,
+ 				VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
++		skip_emulated_instruction(vcpu);
+ 		return 1;
+ 	}
+ 
+@@ -7456,6 +7464,7 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
+ 	if (!(types & (1UL << type))) {
+ 		nested_vmx_failValid(vcpu,
+ 			VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
++		skip_emulated_instruction(vcpu);
+ 		return 1;
+ 	}
+ 
+@@ -7472,12 +7481,17 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
+ 	}
+ 
+ 	switch (type) {
++	case VMX_VPID_EXTENT_SINGLE_CONTEXT:
++		/*
++		 * Old versions of KVM use the single-context version so we
++		 * have to support it; just treat it the same as all-context.
++		 */
+ 	case VMX_VPID_EXTENT_ALL_CONTEXT:
+ 		__vmx_flush_tlb(vcpu, to_vmx(vcpu)->nested.vpid02);
+ 		nested_vmx_succeed(vcpu);
+ 		break;
+ 	default:
+-		/* Trap single context invalidation invvpid calls */
++		/* Trap individual address invalidation invvpid calls */
+ 		BUG_ON(1);
+ 		break;
+ 	}
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index eaf6ee8c28b8..ac4963c38aa3 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -697,7 +697,6 @@ static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
+ 		if ((xcr0 & XFEATURE_MASK_AVX512) != XFEATURE_MASK_AVX512)
+ 			return 1;
+ 	}
+-	kvm_put_guest_xcr0(vcpu);
+ 	vcpu->arch.xcr0 = xcr0;
+ 
+ 	if ((xcr0 ^ old_xcr0) & XFEATURE_MASK_EXTEND)
+@@ -2752,6 +2751,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+ 	}
+ 
+ 	kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
++	vcpu->arch.switch_db_regs |= KVM_DEBUGREG_RELOAD;
+ }
+ 
+ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
+@@ -6073,12 +6073,10 @@ static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win)
+ 	}
+ 
+ 	/* try to inject new event if pending */
+-	if (vcpu->arch.nmi_pending) {
+-		if (kvm_x86_ops->nmi_allowed(vcpu)) {
+-			--vcpu->arch.nmi_pending;
+-			vcpu->arch.nmi_injected = true;
+-			kvm_x86_ops->set_nmi(vcpu);
+-		}
++	if (vcpu->arch.nmi_pending && kvm_x86_ops->nmi_allowed(vcpu)) {
++		--vcpu->arch.nmi_pending;
++		vcpu->arch.nmi_injected = true;
++		kvm_x86_ops->set_nmi(vcpu);
+ 	} else if (kvm_cpu_has_injectable_intr(vcpu)) {
+ 		/*
+ 		 * Because interrupts can be injected asynchronously, we are
+@@ -6547,10 +6545,12 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
+ 		if (inject_pending_event(vcpu, req_int_win) != 0)
+ 			req_immediate_exit = true;
+ 		/* enable NMI/IRQ window open exits if needed */
+-		else if (vcpu->arch.nmi_pending)
+-			kvm_x86_ops->enable_nmi_window(vcpu);
+-		else if (kvm_cpu_has_injectable_intr(vcpu) || req_int_win)
+-			kvm_x86_ops->enable_irq_window(vcpu);
++		else {
++			if (vcpu->arch.nmi_pending)
++				kvm_x86_ops->enable_nmi_window(vcpu);
++			if (kvm_cpu_has_injectable_intr(vcpu) || req_int_win)
++				kvm_x86_ops->enable_irq_window(vcpu);
++		}
+ 
+ 		if (kvm_lapic_enabled(vcpu)) {
+ 			update_cr8_intercept(vcpu);
+@@ -6568,8 +6568,6 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
+ 	kvm_x86_ops->prepare_guest_switch(vcpu);
+ 	if (vcpu->fpu_active)
+ 		kvm_load_guest_fpu(vcpu);
+-	kvm_load_guest_xcr0(vcpu);
+-
+ 	vcpu->mode = IN_GUEST_MODE;
+ 
+ 	srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
+@@ -6592,6 +6590,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
+ 		goto cancel_injection;
+ 	}
+ 
++	kvm_load_guest_xcr0(vcpu);
++
+ 	if (req_immediate_exit)
+ 		smp_send_reschedule(vcpu->cpu);
+ 
+@@ -6641,6 +6641,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
+ 	vcpu->mode = OUTSIDE_GUEST_MODE;
+ 	smp_wmb();
+ 
++	kvm_put_guest_xcr0(vcpu);
++
+ 	/* Interrupt is enabled by handle_external_intr() */
+ 	kvm_x86_ops->handle_external_intr(vcpu);
+ 
+@@ -7288,7 +7290,6 @@ void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
+ 	 * and assume host would use all available bits.
+ 	 * Guest xcr0 would be loaded later.
+ 	 */
+-	kvm_put_guest_xcr0(vcpu);
+ 	vcpu->guest_fpu_loaded = 1;
+ 	__kernel_fpu_begin();
+ 	__copy_kernel_to_fpregs(&vcpu->arch.guest_fpu.state);
+@@ -7297,8 +7298,6 @@ void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
+ 
+ void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
+ {
+-	kvm_put_guest_xcr0(vcpu);
+-
+ 	if (!vcpu->guest_fpu_loaded) {
+ 		vcpu->fpu_counter = 0;
+ 		return;
+diff --git a/arch/x86/mm/kmmio.c b/arch/x86/mm/kmmio.c
+index 637ab34ed632..ddb2244b06a1 100644
+--- a/arch/x86/mm/kmmio.c
++++ b/arch/x86/mm/kmmio.c
+@@ -33,7 +33,7 @@
+ struct kmmio_fault_page {
+ 	struct list_head list;
+ 	struct kmmio_fault_page *release_next;
+-	unsigned long page; /* location of the fault page */
++	unsigned long addr; /* the requested address */
+ 	pteval_t old_presence; /* page presence prior to arming */
+ 	bool armed;
+ 
+@@ -70,9 +70,16 @@ unsigned int kmmio_count;
+ static struct list_head kmmio_page_table[KMMIO_PAGE_TABLE_SIZE];
+ static LIST_HEAD(kmmio_probes);
+ 
+-static struct list_head *kmmio_page_list(unsigned long page)
++static struct list_head *kmmio_page_list(unsigned long addr)
+ {
+-	return &kmmio_page_table[hash_long(page, KMMIO_PAGE_HASH_BITS)];
++	unsigned int l;
++	pte_t *pte = lookup_address(addr, &l);
++
++	if (!pte)
++		return NULL;
++	addr &= page_level_mask(l);
++
++	return &kmmio_page_table[hash_long(addr, KMMIO_PAGE_HASH_BITS)];
+ }
+ 
+ /* Accessed per-cpu */
+@@ -98,15 +105,19 @@ static struct kmmio_probe *get_kmmio_probe(unsigned long addr)
+ }
+ 
+ /* You must be holding RCU read lock. */
+-static struct kmmio_fault_page *get_kmmio_fault_page(unsigned long page)
++static struct kmmio_fault_page *get_kmmio_fault_page(unsigned long addr)
+ {
+ 	struct list_head *head;
+ 	struct kmmio_fault_page *f;
++	unsigned int l;
++	pte_t *pte = lookup_address(addr, &l);
+ 
+-	page &= PAGE_MASK;
+-	head = kmmio_page_list(page);
++	if (!pte)
++		return NULL;
++	addr &= page_level_mask(l);
++	head = kmmio_page_list(addr);
+ 	list_for_each_entry_rcu(f, head, list) {
+-		if (f->page == page)
++		if (f->addr == addr)
+ 			return f;
+ 	}
+ 	return NULL;
+@@ -137,10 +148,10 @@ static void clear_pte_presence(pte_t *pte, bool clear, pteval_t *old)
+ static int clear_page_presence(struct kmmio_fault_page *f, bool clear)
+ {
+ 	unsigned int level;
+-	pte_t *pte = lookup_address(f->page, &level);
++	pte_t *pte = lookup_address(f->addr, &level);
+ 
+ 	if (!pte) {
+-		pr_err("no pte for page 0x%08lx\n", f->page);
++		pr_err("no pte for addr 0x%08lx\n", f->addr);
+ 		return -1;
+ 	}
+ 
+@@ -156,7 +167,7 @@ static int clear_page_presence(struct kmmio_fault_page *f, bool clear)
+ 		return -1;
+ 	}
+ 
+-	__flush_tlb_one(f->page);
++	__flush_tlb_one(f->addr);
+ 	return 0;
+ }
+ 
+@@ -176,12 +187,12 @@ static int arm_kmmio_fault_page(struct kmmio_fault_page *f)
+ 	int ret;
+ 	WARN_ONCE(f->armed, KERN_ERR pr_fmt("kmmio page already armed.\n"));
+ 	if (f->armed) {
+-		pr_warning("double-arm: page 0x%08lx, ref %d, old %d\n",
+-			   f->page, f->count, !!f->old_presence);
++		pr_warning("double-arm: addr 0x%08lx, ref %d, old %d\n",
++			   f->addr, f->count, !!f->old_presence);
+ 	}
+ 	ret = clear_page_presence(f, true);
+-	WARN_ONCE(ret < 0, KERN_ERR pr_fmt("arming 0x%08lx failed.\n"),
+-		  f->page);
++	WARN_ONCE(ret < 0, KERN_ERR pr_fmt("arming at 0x%08lx failed.\n"),
++		  f->addr);
+ 	f->armed = true;
+ 	return ret;
+ }
+@@ -191,7 +202,7 @@ static void disarm_kmmio_fault_page(struct kmmio_fault_page *f)
+ {
+ 	int ret = clear_page_presence(f, false);
+ 	WARN_ONCE(ret < 0,
+-			KERN_ERR "kmmio disarming 0x%08lx failed.\n", f->page);
++			KERN_ERR "kmmio disarming at 0x%08lx failed.\n", f->addr);
+ 	f->armed = false;
+ }
+ 
+@@ -215,6 +226,12 @@ int kmmio_handler(struct pt_regs *regs, unsigned long addr)
+ 	struct kmmio_context *ctx;
+ 	struct kmmio_fault_page *faultpage;
+ 	int ret = 0; /* default to fault not handled */
++	unsigned long page_base = addr;
++	unsigned int l;
++	pte_t *pte = lookup_address(addr, &l);
++	if (!pte)
++		return -EINVAL;
++	page_base &= page_level_mask(l);
+ 
+ 	/*
+ 	 * Preemption is now disabled to prevent process switch during
+@@ -227,7 +244,7 @@ int kmmio_handler(struct pt_regs *regs, unsigned long addr)
+ 	preempt_disable();
+ 	rcu_read_lock();
+ 
+-	faultpage = get_kmmio_fault_page(addr);
++	faultpage = get_kmmio_fault_page(page_base);
+ 	if (!faultpage) {
+ 		/*
+ 		 * Either this page fault is not caused by kmmio, or
+@@ -239,7 +256,7 @@ int kmmio_handler(struct pt_regs *regs, unsigned long addr)
+ 
+ 	ctx = &get_cpu_var(kmmio_ctx);
+ 	if (ctx->active) {
+-		if (addr == ctx->addr) {
++		if (page_base == ctx->addr) {
+ 			/*
+ 			 * A second fault on the same page means some other
+ 			 * condition needs handling by do_page_fault(), the
+@@ -267,9 +284,9 @@ int kmmio_handler(struct pt_regs *regs, unsigned long addr)
+ 	ctx->active++;
+ 
+ 	ctx->fpage = faultpage;
+-	ctx->probe = get_kmmio_probe(addr);
++	ctx->probe = get_kmmio_probe(page_base);
+ 	ctx->saved_flags = (regs->flags & (X86_EFLAGS_TF | X86_EFLAGS_IF));
+-	ctx->addr = addr;
++	ctx->addr = page_base;
+ 
+ 	if (ctx->probe && ctx->probe->pre_handler)
+ 		ctx->probe->pre_handler(ctx->probe, regs, addr);
+@@ -354,12 +371,11 @@ out:
+ }
+ 
+ /* You must be holding kmmio_lock. */
+-static int add_kmmio_fault_page(unsigned long page)
++static int add_kmmio_fault_page(unsigned long addr)
+ {
+ 	struct kmmio_fault_page *f;
+ 
+-	page &= PAGE_MASK;
+-	f = get_kmmio_fault_page(page);
++	f = get_kmmio_fault_page(addr);
+ 	if (f) {
+ 		if (!f->count)
+ 			arm_kmmio_fault_page(f);
+@@ -372,26 +388,25 @@ static int add_kmmio_fault_page(unsigned long page)
+ 		return -1;
+ 
+ 	f->count = 1;
+-	f->page = page;
++	f->addr = addr;
+ 
+ 	if (arm_kmmio_fault_page(f)) {
+ 		kfree(f);
+ 		return -1;
+ 	}
+ 
+-	list_add_rcu(&f->list, kmmio_page_list(f->page));
++	list_add_rcu(&f->list, kmmio_page_list(f->addr));
+ 
+ 	return 0;
+ }
+ 
+ /* You must be holding kmmio_lock. */
+-static void release_kmmio_fault_page(unsigned long page,
++static void release_kmmio_fault_page(unsigned long addr,
+ 				struct kmmio_fault_page **release_list)
+ {
+ 	struct kmmio_fault_page *f;
+ 
+-	page &= PAGE_MASK;
+-	f = get_kmmio_fault_page(page);
++	f = get_kmmio_fault_page(addr);
+ 	if (!f)
+ 		return;
+ 
+@@ -420,18 +435,27 @@ int register_kmmio_probe(struct kmmio_probe *p)
+ 	int ret = 0;
+ 	unsigned long size = 0;
+ 	const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK);
++	unsigned int l;
++	pte_t *pte;
+ 
+ 	spin_lock_irqsave(&kmmio_lock, flags);
+ 	if (get_kmmio_probe(p->addr)) {
+ 		ret = -EEXIST;
+ 		goto out;
+ 	}
++
++	pte = lookup_address(p->addr, &l);
++	if (!pte) {
++		ret = -EINVAL;
++		goto out;
++	}
++
+ 	kmmio_count++;
+ 	list_add_rcu(&p->list, &kmmio_probes);
+ 	while (size < size_lim) {
+ 		if (add_kmmio_fault_page(p->addr + size))
+ 			pr_err("Unable to set page fault.\n");
+-		size += PAGE_SIZE;
++		size += page_level_size(l);
+ 	}
+ out:
+ 	spin_unlock_irqrestore(&kmmio_lock, flags);
+@@ -506,11 +530,17 @@ void unregister_kmmio_probe(struct kmmio_probe *p)
+ 	const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK);
+ 	struct kmmio_fault_page *release_list = NULL;
+ 	struct kmmio_delayed_release *drelease;
++	unsigned int l;
++	pte_t *pte;
++
++	pte = lookup_address(p->addr, &l);
++	if (!pte)
++		return;
+ 
+ 	spin_lock_irqsave(&kmmio_lock, flags);
+ 	while (size < size_lim) {
+ 		release_kmmio_fault_page(p->addr + size, &release_list);
+-		size += PAGE_SIZE;
++		size += page_level_size(l);
+ 	}
+ 	list_del_rcu(&p->list);
+ 	kmmio_count--;
+diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
+index 8f4cc3dfac32..5fb6adaaa796 100644
+--- a/arch/x86/mm/tlb.c
++++ b/arch/x86/mm/tlb.c
+@@ -106,8 +106,6 @@ static void flush_tlb_func(void *info)
+ 
+ 	if (f->flush_mm != this_cpu_read(cpu_tlbstate.active_mm))
+ 		return;
+-	if (!f->flush_end)
+-		f->flush_end = f->flush_start + PAGE_SIZE;
+ 
+ 	count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
+ 	if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) {
+@@ -135,12 +133,20 @@ void native_flush_tlb_others(const struct cpumask *cpumask,
+ 				 unsigned long end)
+ {
+ 	struct flush_tlb_info info;
++
++	if (end == 0)
++		end = start + PAGE_SIZE;
+ 	info.flush_mm = mm;
+ 	info.flush_start = start;
+ 	info.flush_end = end;
+ 
+ 	count_vm_tlb_event(NR_TLB_REMOTE_FLUSH);
+-	trace_tlb_flush(TLB_REMOTE_SEND_IPI, end - start);
++	if (end == TLB_FLUSH_ALL)
++		trace_tlb_flush(TLB_REMOTE_SEND_IPI, TLB_FLUSH_ALL);
++	else
++		trace_tlb_flush(TLB_REMOTE_SEND_IPI,
++				(end - start) >> PAGE_SHIFT);
++
+ 	if (is_uv_system()) {
+ 		unsigned int cpu;
+ 
+diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c
+index e58565556703..0ae7e9fa348d 100644
+--- a/arch/x86/pci/fixup.c
++++ b/arch/x86/pci/fixup.c
+@@ -540,3 +540,10 @@ static void twinhead_reserve_killing_zone(struct pci_dev *dev)
+         }
+ }
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x27B9, twinhead_reserve_killing_zone);
++
++static void pci_bdwep_bar(struct pci_dev *dev)
++{
++	dev->non_compliant_bars = 1;
++}
++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fa0, pci_bdwep_bar);
++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fc0, pci_bdwep_bar);
+diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
+index beac4dfdade6..349b8ce92bf2 100644
+--- a/arch/x86/pci/xen.c
++++ b/arch/x86/pci/xen.c
+@@ -491,8 +491,11 @@ int __init pci_xen_initial_domain(void)
+ #endif
+ 	__acpi_register_gsi = acpi_register_gsi_xen;
+ 	__acpi_unregister_gsi = NULL;
+-	/* Pre-allocate legacy irqs */
+-	for (irq = 0; irq < nr_legacy_irqs(); irq++) {
++	/*
++	 * Pre-allocate the legacy IRQs.  Use NR_LEGACY_IRQS here
++	 * because we don't have a PIC and thus nr_legacy_irqs() is zero.
++	 */
++	for (irq = 0; irq < NR_IRQS_LEGACY; irq++) {
+ 		int trigger, polarity;
+ 
+ 		if (acpi_get_override_irq(irq, &trigger, &polarity) == -1)
+diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
+index d09e4c9d7cc5..e3679db17545 100644
+--- a/arch/x86/xen/enlighten.c
++++ b/arch/x86/xen/enlighten.c
+@@ -961,7 +961,7 @@ static void xen_load_sp0(struct tss_struct *tss,
+ 	tss->x86_tss.sp0 = thread->sp0;
+ }
+ 
+-static void xen_set_iopl_mask(unsigned mask)
++void xen_set_iopl_mask(unsigned mask)
+ {
+ 	struct physdev_set_iopl set_iopl;
+ 
+diff --git a/arch/xtensa/kernel/head.S b/arch/xtensa/kernel/head.S
+index 9ed55649ac8e..05e1df943856 100644
+--- a/arch/xtensa/kernel/head.S
++++ b/arch/xtensa/kernel/head.S
+@@ -128,7 +128,7 @@ ENTRY(_startup)
+ 	wsr	a0, icountlevel
+ 
+ 	.set	_index, 0
+-	.rept	XCHAL_NUM_DBREAK - 1
++	.rept	XCHAL_NUM_DBREAK
+ 	wsr	a0, SREG_DBREAKC + _index
+ 	.set	_index, _index + 1
+ 	.endr
+diff --git a/arch/xtensa/mm/cache.c b/arch/xtensa/mm/cache.c
+index d75aa1476da7..1a804a2f9a5b 100644
+--- a/arch/xtensa/mm/cache.c
++++ b/arch/xtensa/mm/cache.c
+@@ -97,11 +97,11 @@ void clear_user_highpage(struct page *page, unsigned long vaddr)
+ 	unsigned long paddr;
+ 	void *kvaddr = coherent_kvaddr(page, TLBTEMP_BASE_1, vaddr, &paddr);
+ 
+-	pagefault_disable();
++	preempt_disable();
+ 	kmap_invalidate_coherent(page, vaddr);
+ 	set_bit(PG_arch_1, &page->flags);
+ 	clear_page_alias(kvaddr, paddr);
+-	pagefault_enable();
++	preempt_enable();
+ }
+ 
+ void copy_user_highpage(struct page *dst, struct page *src,
+@@ -113,11 +113,11 @@ void copy_user_highpage(struct page *dst, struct page *src,
+ 	void *src_vaddr = coherent_kvaddr(src, TLBTEMP_BASE_2, vaddr,
+ 					  &src_paddr);
+ 
+-	pagefault_disable();
++	preempt_disable();
+ 	kmap_invalidate_coherent(dst, vaddr);
+ 	set_bit(PG_arch_1, &dst->flags);
+ 	copy_page_alias(dst_vaddr, src_vaddr, dst_paddr, src_paddr);
+-	pagefault_enable();
++	preempt_enable();
+ }
+ 
+ #endif /* DCACHE_WAY_SIZE > PAGE_SIZE */
+diff --git a/arch/xtensa/platforms/iss/console.c b/arch/xtensa/platforms/iss/console.c
+index 70cb408bc20d..92d785fefb6d 100644
+--- a/arch/xtensa/platforms/iss/console.c
++++ b/arch/xtensa/platforms/iss/console.c
+@@ -100,21 +100,23 @@ static void rs_poll(unsigned long priv)
+ {
+ 	struct tty_port *port = (struct tty_port *)priv;
+ 	int i = 0;
++	int rd = 1;
+ 	unsigned char c;
+ 
+ 	spin_lock(&timer_lock);
+ 
+ 	while (simc_poll(0)) {
+-		simc_read(0, &c, 1);
++		rd = simc_read(0, &c, 1);
++		if (rd <= 0)
++			break;
+ 		tty_insert_flip_char(port, c, TTY_NORMAL);
+ 		i++;
+ 	}
+ 
+ 	if (i)
+ 		tty_flip_buffer_push(port);
+-
+-
+-	mod_timer(&serial_timer, jiffies + SERIAL_TIMER_VALUE);
++	if (rd)
++		mod_timer(&serial_timer, jiffies + SERIAL_TIMER_VALUE);
+ 	spin_unlock(&timer_lock);
+ }
+ 
+diff --git a/block/blk-core.c b/block/blk-core.c
+index b83d29755b5a..45f4d7efbf34 100644
+--- a/block/blk-core.c
++++ b/block/blk-core.c
+@@ -2198,7 +2198,7 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
+ 	if (q->mq_ops) {
+ 		if (blk_queue_io_stat(q))
+ 			blk_account_io_start(rq, true);
+-		blk_mq_insert_request(rq, false, true, true);
++		blk_mq_insert_request(rq, false, true, false);
+ 		return 0;
+ 	}
+ 
+diff --git a/block/partition-generic.c b/block/partition-generic.c
+index fefd01b496a0..cfcfe1b0ecbc 100644
+--- a/block/partition-generic.c
++++ b/block/partition-generic.c
+@@ -350,15 +350,20 @@ struct hd_struct *add_partition(struct gendisk *disk, int partno,
+ 			goto out_del;
+ 	}
+ 
++	err = hd_ref_init(p);
++	if (err) {
++		if (flags & ADDPART_FLAG_WHOLEDISK)
++			goto out_remove_file;
++		goto out_del;
++	}
++
+ 	/* everything is up and running, commence */
+ 	rcu_assign_pointer(ptbl->part[partno], p);
+ 
+ 	/* suppress uevent if the disk suppresses it */
+ 	if (!dev_get_uevent_suppress(ddev))
+ 		kobject_uevent(&pdev->kobj, KOBJ_ADD);
+-
+-	if (!hd_ref_init(p))
+-		return p;
++	return p;
+ 
+ out_free_info:
+ 	free_part_info(p);
+@@ -367,6 +372,8 @@ out_free_stats:
+ out_free:
+ 	kfree(p);
+ 	return ERR_PTR(err);
++out_remove_file:
++	device_remove_file(pdev, &dev_attr_whole_disk);
+ out_del:
+ 	kobject_put(p->holder_dir);
+ 	device_del(pdev);
+diff --git a/crypto/ahash.c b/crypto/ahash.c
+index d19b52324cf5..dac1c24e9c3e 100644
+--- a/crypto/ahash.c
++++ b/crypto/ahash.c
+@@ -69,8 +69,9 @@ static int hash_walk_new_entry(struct crypto_hash_walk *walk)
+ 	struct scatterlist *sg;
+ 
+ 	sg = walk->sg;
+-	walk->pg = sg_page(sg);
+ 	walk->offset = sg->offset;
++	walk->pg = sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT);
++	walk->offset = offset_in_page(walk->offset);
+ 	walk->entrylen = sg->length;
+ 
+ 	if (walk->entrylen > walk->total)
+diff --git a/crypto/asymmetric_keys/pkcs7_trust.c b/crypto/asymmetric_keys/pkcs7_trust.c
+index 90d6d47965b0..ecdb5a2ce085 100644
+--- a/crypto/asymmetric_keys/pkcs7_trust.c
++++ b/crypto/asymmetric_keys/pkcs7_trust.c
+@@ -178,6 +178,8 @@ int pkcs7_validate_trust(struct pkcs7_message *pkcs7,
+ 	int cached_ret = -ENOKEY;
+ 	int ret;
+ 
++	*_trusted = false;
++
+ 	for (p = pkcs7->certs; p; p = p->next)
+ 		p->seen = false;
+ 
+diff --git a/crypto/asymmetric_keys/x509_cert_parser.c b/crypto/asymmetric_keys/x509_cert_parser.c
+index 021d39c0ba75..13c4e5a5fe8c 100644
+--- a/crypto/asymmetric_keys/x509_cert_parser.c
++++ b/crypto/asymmetric_keys/x509_cert_parser.c
+@@ -494,7 +494,7 @@ int x509_decode_time(time64_t *_t,  size_t hdrlen,
+ 		     unsigned char tag,
+ 		     const unsigned char *value, size_t vlen)
+ {
+-	static const unsigned char month_lengths[] = { 31, 29, 31, 30, 31, 30,
++	static const unsigned char month_lengths[] = { 31, 28, 31, 30, 31, 30,
+ 						       31, 31, 30, 31, 30, 31 };
+ 	const unsigned char *p = value;
+ 	unsigned year, mon, day, hour, min, sec, mon_len;
+@@ -540,9 +540,9 @@ int x509_decode_time(time64_t *_t,  size_t hdrlen,
+ 		if (year % 4 == 0) {
+ 			mon_len = 29;
+ 			if (year % 100 == 0) {
+-				year /= 100;
+-				if (year % 4 != 0)
+-					mon_len = 28;
++				mon_len = 28;
++				if (year % 400 == 0)
++					mon_len = 29;
+ 			}
+ 		}
+ 	}
+diff --git a/crypto/keywrap.c b/crypto/keywrap.c
+index b1d106ce55f3..72014f963ba7 100644
+--- a/crypto/keywrap.c
++++ b/crypto/keywrap.c
+@@ -212,7 +212,7 @@ static int crypto_kw_decrypt(struct blkcipher_desc *desc,
+ 			  SEMIBSIZE))
+ 		ret = -EBADMSG;
+ 
+-	memzero_explicit(&block, sizeof(struct crypto_kw_block));
++	memzero_explicit(block, sizeof(struct crypto_kw_block));
+ 
+ 	return ret;
+ }
+@@ -297,7 +297,7 @@ static int crypto_kw_encrypt(struct blkcipher_desc *desc,
+ 	/* establish the IV for the caller to pick up */
+ 	memcpy(desc->info, block->A, SEMIBSIZE);
+ 
+-	memzero_explicit(&block, sizeof(struct crypto_kw_block));
++	memzero_explicit(block, sizeof(struct crypto_kw_block));
+ 
+ 	return 0;
+ }
+diff --git a/crypto/rsa-pkcs1pad.c b/crypto/rsa-pkcs1pad.c
+index 50f5c97e1087..0cbc5a5025c2 100644
+--- a/crypto/rsa-pkcs1pad.c
++++ b/crypto/rsa-pkcs1pad.c
+@@ -310,16 +310,16 @@ static int pkcs1pad_decrypt(struct akcipher_request *req)
+ 	req_ctx->child_req.src = req->src;
+ 	req_ctx->child_req.src_len = req->src_len;
+ 	req_ctx->child_req.dst = req_ctx->out_sg;
+-	req_ctx->child_req.dst_len = ctx->key_size - 1;
++	req_ctx->child_req.dst_len = ctx->key_size ;
+ 
+-	req_ctx->out_buf = kmalloc(ctx->key_size - 1,
++	req_ctx->out_buf = kmalloc(ctx->key_size,
+ 			(req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ 			GFP_KERNEL : GFP_ATOMIC);
+ 	if (!req_ctx->out_buf)
+ 		return -ENOMEM;
+ 
+ 	pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf,
+-			ctx->key_size - 1, NULL);
++			    ctx->key_size, NULL);
+ 
+ 	akcipher_request_set_tfm(&req_ctx->child_req, ctx->child);
+ 	akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
+@@ -491,16 +491,16 @@ static int pkcs1pad_verify(struct akcipher_request *req)
+ 	req_ctx->child_req.src = req->src;
+ 	req_ctx->child_req.src_len = req->src_len;
+ 	req_ctx->child_req.dst = req_ctx->out_sg;
+-	req_ctx->child_req.dst_len = ctx->key_size - 1;
++	req_ctx->child_req.dst_len = ctx->key_size;
+ 
+-	req_ctx->out_buf = kmalloc(ctx->key_size - 1,
++	req_ctx->out_buf = kmalloc(ctx->key_size,
+ 			(req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ 			GFP_KERNEL : GFP_ATOMIC);
+ 	if (!req_ctx->out_buf)
+ 		return -ENOMEM;
+ 
+ 	pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf,
+-			ctx->key_size - 1, NULL);
++			    ctx->key_size, NULL);
+ 
+ 	akcipher_request_set_tfm(&req_ctx->child_req, ctx->child);
+ 	akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
+diff --git a/crypto/testmgr.c b/crypto/testmgr.c
+index ae8c57fd8bc7..d4944318ca1f 100644
+--- a/crypto/testmgr.c
++++ b/crypto/testmgr.c
+@@ -1849,6 +1849,7 @@ static int alg_test_drbg(const struct alg_test_desc *desc, const char *driver,
+ static int do_test_rsa(struct crypto_akcipher *tfm,
+ 		       struct akcipher_testvec *vecs)
+ {
++	char *xbuf[XBUFSIZE];
+ 	struct akcipher_request *req;
+ 	void *outbuf_enc = NULL;
+ 	void *outbuf_dec = NULL;
+@@ -1857,9 +1858,12 @@ static int do_test_rsa(struct crypto_akcipher *tfm,
+ 	int err = -ENOMEM;
+ 	struct scatterlist src, dst, src_tab[2];
+ 
++	if (testmgr_alloc_buf(xbuf))
++		return err;
++
+ 	req = akcipher_request_alloc(tfm, GFP_KERNEL);
+ 	if (!req)
+-		return err;
++		goto free_xbuf;
+ 
+ 	init_completion(&result.completion);
+ 
+@@ -1877,9 +1881,14 @@ static int do_test_rsa(struct crypto_akcipher *tfm,
+ 	if (!outbuf_enc)
+ 		goto free_req;
+ 
++	if (WARN_ON(vecs->m_size > PAGE_SIZE))
++		goto free_all;
++
++	memcpy(xbuf[0], vecs->m, vecs->m_size);
++
+ 	sg_init_table(src_tab, 2);
+-	sg_set_buf(&src_tab[0], vecs->m, 8);
+-	sg_set_buf(&src_tab[1], vecs->m + 8, vecs->m_size - 8);
++	sg_set_buf(&src_tab[0], xbuf[0], 8);
++	sg_set_buf(&src_tab[1], xbuf[0] + 8, vecs->m_size - 8);
+ 	sg_init_one(&dst, outbuf_enc, out_len_max);
+ 	akcipher_request_set_crypt(req, src_tab, &dst, vecs->m_size,
+ 				   out_len_max);
+@@ -1898,7 +1907,7 @@ static int do_test_rsa(struct crypto_akcipher *tfm,
+ 		goto free_all;
+ 	}
+ 	/* verify that encrypted message is equal to expected */
+-	if (memcmp(vecs->c, sg_virt(req->dst), vecs->c_size)) {
++	if (memcmp(vecs->c, outbuf_enc, vecs->c_size)) {
+ 		pr_err("alg: rsa: encrypt test failed. Invalid output\n");
+ 		err = -EINVAL;
+ 		goto free_all;
+@@ -1913,7 +1922,13 @@ static int do_test_rsa(struct crypto_akcipher *tfm,
+ 		err = -ENOMEM;
+ 		goto free_all;
+ 	}
+-	sg_init_one(&src, vecs->c, vecs->c_size);
++
++	if (WARN_ON(vecs->c_size > PAGE_SIZE))
++		goto free_all;
++
++	memcpy(xbuf[0], vecs->c, vecs->c_size);
++
++	sg_init_one(&src, xbuf[0], vecs->c_size);
+ 	sg_init_one(&dst, outbuf_dec, out_len_max);
+ 	init_completion(&result.completion);
+ 	akcipher_request_set_crypt(req, &src, &dst, vecs->c_size, out_len_max);
+@@ -1940,6 +1955,8 @@ free_all:
+ 	kfree(outbuf_enc);
+ free_req:
+ 	akcipher_request_free(req);
++free_xbuf:
++	testmgr_free_buf(xbuf);
+ 	return err;
+ }
+ 
+diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c
+index 6979186dbd4b..9f77943653fb 100644
+--- a/drivers/acpi/acpi_processor.c
++++ b/drivers/acpi/acpi_processor.c
+@@ -491,6 +491,58 @@ static void acpi_processor_remove(struct acpi_device *device)
+ }
+ #endif /* CONFIG_ACPI_HOTPLUG_CPU */
+ 
++#ifdef CONFIG_X86
++static bool acpi_hwp_native_thermal_lvt_set;
++static acpi_status __init acpi_hwp_native_thermal_lvt_osc(acpi_handle handle,
++							  u32 lvl,
++							  void *context,
++							  void **rv)
++{
++	u8 sb_uuid_str[] = "4077A616-290C-47BE-9EBD-D87058713953";
++	u32 capbuf[2];
++	struct acpi_osc_context osc_context = {
++		.uuid_str = sb_uuid_str,
++		.rev = 1,
++		.cap.length = 8,
++		.cap.pointer = capbuf,
++	};
++
++	if (acpi_hwp_native_thermal_lvt_set)
++		return AE_CTRL_TERMINATE;
++
++	capbuf[0] = 0x0000;
++	capbuf[1] = 0x1000; /* set bit 12 */
++
++	if (ACPI_SUCCESS(acpi_run_osc(handle, &osc_context))) {
++		if (osc_context.ret.pointer && osc_context.ret.length > 1) {
++			u32 *capbuf_ret = osc_context.ret.pointer;
++
++			if (capbuf_ret[1] & 0x1000) {
++				acpi_handle_info(handle,
++					"_OSC native thermal LVT Acked\n");
++				acpi_hwp_native_thermal_lvt_set = true;
++			}
++		}
++		kfree(osc_context.ret.pointer);
++	}
++
++	return AE_OK;
++}
++
++void __init acpi_early_processor_osc(void)
++{
++	if (boot_cpu_has(X86_FEATURE_HWP)) {
++		acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT,
++				    ACPI_UINT32_MAX,
++				    acpi_hwp_native_thermal_lvt_osc,
++				    NULL, NULL, NULL);
++		acpi_get_devices(ACPI_PROCESSOR_DEVICE_HID,
++				 acpi_hwp_native_thermal_lvt_osc,
++				 NULL, NULL);
++	}
++}
++#endif
++
+ /*
+  * The following ACPI IDs are known to be suitable for representing as
+  * processor devices.
+diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c
+index 6a72047aae1c..c3a052d43317 100644
+--- a/drivers/acpi/acpica/dsmethod.c
++++ b/drivers/acpi/acpica/dsmethod.c
+@@ -428,6 +428,9 @@ acpi_ds_begin_method_execution(struct acpi_namespace_node *method_node,
+ 				obj_desc->method.mutex->mutex.
+ 				    original_sync_level =
+ 				    obj_desc->method.mutex->mutex.sync_level;
++
++				obj_desc->method.mutex->mutex.thread_id =
++				    acpi_os_get_thread_id();
+ 			}
+ 		}
+ 
+diff --git a/drivers/acpi/acpica/nsinit.c b/drivers/acpi/acpica/nsinit.c
+index bd75d46234a4..ddb436f86415 100644
+--- a/drivers/acpi/acpica/nsinit.c
++++ b/drivers/acpi/acpica/nsinit.c
+@@ -83,6 +83,8 @@ acpi_status acpi_ns_initialize_objects(void)
+ 
+ 	ACPI_FUNCTION_TRACE(ns_initialize_objects);
+ 
++	ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
++			  "[Init] Completing Initialization of ACPI Objects\n"));
+ 	ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
+ 			  "**** Starting initialization of namespace objects ****\n"));
+ 	ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT,
+diff --git a/drivers/acpi/acpica/tbxfload.c b/drivers/acpi/acpica/tbxfload.c
+index 278666e39563..c37d47982fbe 100644
+--- a/drivers/acpi/acpica/tbxfload.c
++++ b/drivers/acpi/acpica/tbxfload.c
+@@ -83,6 +83,20 @@ acpi_status __init acpi_load_tables(void)
+ 				"While loading namespace from ACPI tables"));
+ 	}
+ 
++	if (!acpi_gbl_group_module_level_code) {
++		/*
++		 * Initialize the objects that remain uninitialized. This
++		 * runs the executable AML that may be part of the
++		 * declaration of these objects:
++		 * operation_regions, buffer_fields, Buffers, and Packages.
++		 */
++		status = acpi_ns_initialize_objects();
++		if (ACPI_FAILURE(status)) {
++			return_ACPI_STATUS(status);
++		}
++	}
++
++	acpi_gbl_reg_methods_enabled = TRUE;
+ 	return_ACPI_STATUS(status);
+ }
+ 
+diff --git a/drivers/acpi/acpica/utxfinit.c b/drivers/acpi/acpica/utxfinit.c
+index 721b87cce908..638fbd4ad72b 100644
+--- a/drivers/acpi/acpica/utxfinit.c
++++ b/drivers/acpi/acpica/utxfinit.c
+@@ -267,7 +267,6 @@ acpi_status __init acpi_initialize_objects(u32 flags)
+ 	 * initialized, even if they contain executable AML (see the call to
+ 	 * acpi_ns_initialize_objects below).
+ 	 */
+-	acpi_gbl_reg_methods_enabled = TRUE;
+ 	if (!(flags & ACPI_NO_ADDRESS_SPACE_INIT)) {
+ 		ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+ 				  "[Init] Executing _REG OpRegion methods\n"));
+@@ -299,20 +298,18 @@ acpi_status __init acpi_initialize_objects(u32 flags)
+ 	 */
+ 	if (acpi_gbl_group_module_level_code) {
+ 		acpi_ns_exec_module_code_list();
+-	}
+ 
+-	/*
+-	 * Initialize the objects that remain uninitialized. This runs the
+-	 * executable AML that may be part of the declaration of these objects:
+-	 * operation_regions, buffer_fields, Buffers, and Packages.
+-	 */
+-	if (!(flags & ACPI_NO_OBJECT_INIT)) {
+-		ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+-				  "[Init] Completing Initialization of ACPI Objects\n"));
+-
+-		status = acpi_ns_initialize_objects();
+-		if (ACPI_FAILURE(status)) {
+-			return_ACPI_STATUS(status);
++		/*
++		 * Initialize the objects that remain uninitialized. This
++		 * runs the executable AML that may be part of the
++		 * declaration of these objects:
++		 * operation_regions, buffer_fields, Buffers, and Packages.
++		 */
++		if (!(flags & ACPI_NO_OBJECT_INIT)) {
++			status = acpi_ns_initialize_objects();
++			if (ACPI_FAILURE(status)) {
++				return_ACPI_STATUS(status);
++			}
+ 		}
+ 	}
+ 
+diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
+index 891c42d1cd65..f9081b791b81 100644
+--- a/drivers/acpi/bus.c
++++ b/drivers/acpi/bus.c
+@@ -1005,6 +1005,9 @@ static int __init acpi_bus_init(void)
+ 		goto error1;
+ 	}
+ 
++	/* Set capability bits for _OSC under processor scope */
++	acpi_early_processor_osc();
++
+ 	/*
+ 	 * _OSC method may exist in module level code,
+ 	 * so it must be run after ACPI_FULL_INITIALIZATION
+diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
+index cd2c3d6d40e0..993fd31394c8 100644
+--- a/drivers/acpi/device_pm.c
++++ b/drivers/acpi/device_pm.c
+@@ -319,6 +319,7 @@ int acpi_device_fix_up_power(struct acpi_device *device)
+ 
+ 	return ret;
+ }
++EXPORT_SYMBOL_GPL(acpi_device_fix_up_power);
+ 
+ int acpi_device_update_power(struct acpi_device *device, int *state_p)
+ {
+diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
+index 1e6833a5cd44..6f41c73f82bb 100644
+--- a/drivers/acpi/internal.h
++++ b/drivers/acpi/internal.h
+@@ -138,6 +138,12 @@ void acpi_early_processor_set_pdc(void);
+ static inline void acpi_early_processor_set_pdc(void) {}
+ #endif
+ 
++#ifdef CONFIG_X86
++void acpi_early_processor_osc(void);
++#else
++static inline void acpi_early_processor_osc(void) {}
++#endif
++
+ /* --------------------------------------------------------------------------
+                                   Embedded Controller
+    -------------------------------------------------------------------------- */
+diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
+index 67da6fb72274..c45fdc49ff28 100644
+--- a/drivers/acpi/osl.c
++++ b/drivers/acpi/osl.c
+@@ -135,7 +135,7 @@ static struct osi_linux {
+ 	unsigned int	enable:1;
+ 	unsigned int	dmi:1;
+ 	unsigned int	cmdline:1;
+-	unsigned int	default_disabling:1;
++	u8		default_disabling;
+ } osi_linux = {0, 0, 0, 0};
+ 
+ static u32 acpi_osi_handler(acpi_string interface, u32 supported)
+@@ -1713,10 +1713,13 @@ void __init acpi_osi_setup(char *str)
+ 	if (*str == '!') {
+ 		str++;
+ 		if (*str == '\0') {
+-			osi_linux.default_disabling = 1;
++			/* Do not override acpi_osi=!* */
++			if (!osi_linux.default_disabling)
++				osi_linux.default_disabling =
++					ACPI_DISABLE_ALL_VENDOR_STRINGS;
+ 			return;
+ 		} else if (*str == '*') {
+-			acpi_update_interfaces(ACPI_DISABLE_ALL_STRINGS);
++			osi_linux.default_disabling = ACPI_DISABLE_ALL_STRINGS;
+ 			for (i = 0; i < OSI_STRING_ENTRIES_MAX; i++) {
+ 				osi = &osi_setup_entries[i];
+ 				osi->enable = false;
+@@ -1789,10 +1792,13 @@ static void __init acpi_osi_setup_late(void)
+ 	acpi_status status;
+ 
+ 	if (osi_linux.default_disabling) {
+-		status = acpi_update_interfaces(ACPI_DISABLE_ALL_VENDOR_STRINGS);
++		status = acpi_update_interfaces(osi_linux.default_disabling);
+ 
+ 		if (ACPI_SUCCESS(status))
+-			printk(KERN_INFO PREFIX "Disabled all _OSI OS vendors\n");
++			printk(KERN_INFO PREFIX "Disabled all _OSI OS vendors%s\n",
++				osi_linux.default_disabling ==
++				ACPI_DISABLE_ALL_STRINGS ?
++				" and feature groups" : "");
+ 	}
+ 
+ 	for (i = 0; i < OSI_STRING_ENTRIES_MAX; i++) {
+diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
+index d02fd53042a5..56241eb341f4 100644
+--- a/drivers/acpi/resource.c
++++ b/drivers/acpi/resource.c
+@@ -27,8 +27,20 @@
+ 
+ #ifdef CONFIG_X86
+ #define valid_IRQ(i) (((i) != 0) && ((i) != 2))
++static inline bool acpi_iospace_resource_valid(struct resource *res)
++{
++	/* On X86 IO space is limited to the [0 - 64K] IO port range */
++	return res->end < 0x10003;
++}
+ #else
+ #define valid_IRQ(i) (true)
++/*
++ * ACPI IO descriptors on arches other than X86 contain MMIO CPU physical
++ * addresses mapping IO space in CPU physical address space, IO space
++ * resources can be placed anywhere in the 64-bit physical address space.
++ */
++static inline bool
++acpi_iospace_resource_valid(struct resource *res) { return true; }
+ #endif
+ 
+ static bool acpi_dev_resource_len_valid(u64 start, u64 end, u64 len, bool io)
+@@ -127,7 +139,7 @@ static void acpi_dev_ioresource_flags(struct resource *res, u64 len,
+ 	if (!acpi_dev_resource_len_valid(res->start, res->end, len, true))
+ 		res->flags |= IORESOURCE_DISABLED | IORESOURCE_UNSET;
+ 
+-	if (res->end >= 0x10003)
++	if (!acpi_iospace_resource_valid(res))
+ 		res->flags |= IORESOURCE_DISABLED | IORESOURCE_UNSET;
+ 
+ 	if (io_decode == ACPI_DECODE_16)
+diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
+index 9cb975200cac..f054cadf30d8 100644
+--- a/drivers/acpi/sleep.c
++++ b/drivers/acpi/sleep.c
+@@ -714,6 +714,7 @@ static int acpi_hibernation_enter(void)
+ 
+ static void acpi_hibernation_leave(void)
+ {
++	pm_set_resume_via_firmware();
+ 	/*
+ 	 * If ACPI is not enabled by the BIOS and the boot kernel, we need to
+ 	 * enable it here.
+diff --git a/drivers/ata/ahci_platform.c b/drivers/ata/ahci_platform.c
+index 04975b851c23..639adb1f8abd 100644
+--- a/drivers/ata/ahci_platform.c
++++ b/drivers/ata/ahci_platform.c
+@@ -51,6 +51,9 @@ static int ahci_probe(struct platform_device *pdev)
+ 	if (rc)
+ 		return rc;
+ 
++	of_property_read_u32(dev->of_node,
++			     "ports-implemented", &hpriv->force_port_map);
++
+ 	if (of_device_is_compatible(dev->of_node, "hisilicon,hisi-ahci"))
+ 		hpriv->flags |= AHCI_HFLAG_NO_FBS | AHCI_HFLAG_NO_NCQ;
+ 
+diff --git a/drivers/ata/ahci_xgene.c b/drivers/ata/ahci_xgene.c
+index 8e3f7faf00d3..73b19b277138 100644
+--- a/drivers/ata/ahci_xgene.c
++++ b/drivers/ata/ahci_xgene.c
+@@ -821,9 +821,9 @@ static int xgene_ahci_probe(struct platform_device *pdev)
+ 				dev_warn(&pdev->dev, "%s: Error reading device info. Assume version1\n",
+ 					__func__);
+ 				version = XGENE_AHCI_V1;
+-			}
+-			if (info->valid & ACPI_VALID_CID)
++			} else if (info->valid & ACPI_VALID_CID) {
+ 				version = XGENE_AHCI_V2;
++			}
+ 		}
+ 	}
+ #endif
+diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
+index 85ea5142a095..bb050ea26101 100644
+--- a/drivers/ata/libahci.c
++++ b/drivers/ata/libahci.c
+@@ -469,6 +469,7 @@ void ahci_save_initial_config(struct device *dev, struct ahci_host_priv *hpriv)
+ 		dev_info(dev, "forcing port_map 0x%x -> 0x%x\n",
+ 			 port_map, hpriv->force_port_map);
+ 		port_map = hpriv->force_port_map;
++		hpriv->saved_port_map = port_map;
+ 	}
+ 
+ 	if (hpriv->mask_port_map) {
+diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
+index 301b785f9f56..0caf92ae25f3 100644
+--- a/drivers/base/power/domain.c
++++ b/drivers/base/power/domain.c
+@@ -1378,7 +1378,7 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
+ 	mutex_lock(&subdomain->lock);
+ 	mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
+ 
+-	if (!list_empty(&subdomain->slave_links) || subdomain->device_count) {
++	if (!list_empty(&subdomain->master_links) || subdomain->device_count) {
+ 		pr_warn("%s: unable to remove subdomain %s\n", genpd->name,
+ 			subdomain->name);
+ 		ret = -EBUSY;
+diff --git a/drivers/base/power/opp/core.c b/drivers/base/power/opp/core.c
+index cf351d3dab1c..0708f301ad97 100644
+--- a/drivers/base/power/opp/core.c
++++ b/drivers/base/power/opp/core.c
+@@ -844,8 +844,14 @@ static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev,
+ 	}
+ 
+ 	opp->u_volt = microvolt[0];
+-	opp->u_volt_min = microvolt[1];
+-	opp->u_volt_max = microvolt[2];
++
++	if (count == 1) {
++		opp->u_volt_min = opp->u_volt;
++		opp->u_volt_max = opp->u_volt;
++	} else {
++		opp->u_volt_min = microvolt[1];
++		opp->u_volt_max = microvolt[2];
++	}
+ 
+ 	/* Search for "opp-microamp-<name>" */
+ 	prop = NULL;
+diff --git a/drivers/base/regmap/regmap-spmi.c b/drivers/base/regmap/regmap-spmi.c
+index 7e58f6560399..4a36e415e938 100644
+--- a/drivers/base/regmap/regmap-spmi.c
++++ b/drivers/base/regmap/regmap-spmi.c
+@@ -142,7 +142,7 @@ static int regmap_spmi_ext_read(void *context,
+ 	while (val_size) {
+ 		len = min_t(size_t, val_size, 8);
+ 
+-		err = spmi_ext_register_readl(context, addr, val, val_size);
++		err = spmi_ext_register_readl(context, addr, val, len);
+ 		if (err)
+ 			goto err_out;
+ 
+diff --git a/drivers/block/brd.c b/drivers/block/brd.c
+index cb27190e9f39..f7ecc287d733 100644
+--- a/drivers/block/brd.c
++++ b/drivers/block/brd.c
+@@ -341,7 +341,7 @@ static blk_qc_t brd_make_request(struct request_queue *q, struct bio *bio)
+ 
+ 	if (unlikely(bio->bi_rw & REQ_DISCARD)) {
+ 		if (sector & ((PAGE_SIZE >> SECTOR_SHIFT) - 1) ||
+-		    bio->bi_iter.bi_size & PAGE_MASK)
++		    bio->bi_iter.bi_size & ~PAGE_MASK)
+ 			goto io_error;
+ 		discard_from_brd(brd, sector, bio->bi_iter.bi_size);
+ 		goto out;
+diff --git a/drivers/block/loop.c b/drivers/block/loop.c
+index 423f4ca7d712..80cf8add46ff 100644
+--- a/drivers/block/loop.c
++++ b/drivers/block/loop.c
+@@ -488,6 +488,12 @@ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd,
+ 	bvec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
+ 	iov_iter_bvec(&iter, ITER_BVEC | rw, bvec,
+ 		      bio_segments(bio), blk_rq_bytes(cmd->rq));
++	/*
++	 * This bio may be started from the middle of the 'bvec'
++	 * because of bio splitting, so offset from the bvec must
++	 * be passed to iov iterator
++	 */
++	iter.iov_offset = bio->bi_iter.bi_bvec_done;
+ 
+ 	cmd->iocb.ki_pos = pos;
+ 	cmd->iocb.ki_filp = file;
+diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
+index 9b180dbbd03c..1c330b61f05d 100644
+--- a/drivers/block/mtip32xx/mtip32xx.c
++++ b/drivers/block/mtip32xx/mtip32xx.c
+@@ -173,7 +173,13 @@ static struct mtip_cmd *mtip_get_int_command(struct driver_data *dd)
+ {
+ 	struct request *rq;
+ 
++	if (mtip_check_surprise_removal(dd->pdev))
++		return NULL;
++
+ 	rq = blk_mq_alloc_request(dd->queue, 0, BLK_MQ_REQ_RESERVED);
++	if (IS_ERR(rq))
++		return NULL;
++
+ 	return blk_mq_rq_to_pdu(rq);
+ }
+ 
+@@ -233,15 +239,9 @@ static void mtip_async_complete(struct mtip_port *port,
+ 			"Command tag %d failed due to TFE\n", tag);
+ 	}
+ 
+-	/* Unmap the DMA scatter list entries */
+-	dma_unmap_sg(&dd->pdev->dev, cmd->sg, cmd->scatter_ents, cmd->direction);
+-
+ 	rq = mtip_rq_from_tag(dd, tag);
+ 
+-	if (unlikely(cmd->unaligned))
+-		up(&port->cmd_slot_unal);
+-
+-	blk_mq_end_request(rq, status ? -EIO : 0);
++	blk_mq_complete_request(rq, status);
+ }
+ 
+ /*
+@@ -581,6 +581,8 @@ static void mtip_completion(struct mtip_port *port,
+ 		dev_warn(&port->dd->pdev->dev,
+ 			"Internal command %d completed with TFE\n", tag);
+ 
++	command->comp_func = NULL;
++	command->comp_data = NULL;
+ 	complete(waiting);
+ }
+ 
+@@ -618,8 +620,6 @@ static void mtip_handle_tfe(struct driver_data *dd)
+ 
+ 	port = dd->port;
+ 
+-	set_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags);
+-
+ 	if (test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags)) {
+ 		cmd = mtip_cmd_from_tag(dd, MTIP_TAG_INTERNAL);
+ 		dbg_printk(MTIP_DRV_NAME " TFE for the internal command\n");
+@@ -628,7 +628,7 @@ static void mtip_handle_tfe(struct driver_data *dd)
+ 			cmd->comp_func(port, MTIP_TAG_INTERNAL,
+ 					cmd, PORT_IRQ_TF_ERR);
+ 		}
+-		goto handle_tfe_exit;
++		return;
+ 	}
+ 
+ 	/* clear the tag accumulator */
+@@ -701,7 +701,7 @@ static void mtip_handle_tfe(struct driver_data *dd)
+ 			fail_reason = "thermal shutdown";
+ 		}
+ 		if (buf[288] == 0xBF) {
+-			set_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag);
++			set_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag);
+ 			dev_info(&dd->pdev->dev,
+ 				"Drive indicates rebuild has failed. Secure erase required.\n");
+ 			fail_all_ncq_cmds = 1;
+@@ -771,11 +771,6 @@ static void mtip_handle_tfe(struct driver_data *dd)
+ 		}
+ 	}
+ 	print_tags(dd, "reissued (TFE)", tagaccum, cmd_cnt);
+-
+-handle_tfe_exit:
+-	/* clear eh_active */
+-	clear_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags);
+-	wake_up_interruptible(&port->svc_wait);
+ }
+ 
+ /*
+@@ -1007,6 +1002,7 @@ static bool mtip_pause_ncq(struct mtip_port *port,
+ 			(fis->features == 0x27 || fis->features == 0x72 ||
+ 			 fis->features == 0x62 || fis->features == 0x26))) {
+ 		clear_bit(MTIP_DDF_SEC_LOCK_BIT, &port->dd->dd_flag);
++		clear_bit(MTIP_DDF_REBUILD_FAILED_BIT, &port->dd->dd_flag);
+ 		/* Com reset after secure erase or lowlevel format */
+ 		mtip_restart_port(port);
+ 		clear_bit(MTIP_PF_SE_ACTIVE_BIT, &port->flags);
+@@ -1021,12 +1017,14 @@ static bool mtip_pause_ncq(struct mtip_port *port,
+  *
+  * @port    Pointer to port data structure
+  * @timeout Max duration to wait (ms)
++ * @atomic  gfp_t flag to indicate blockable context or not
+  *
+  * return value
+  *	0	Success
+  *	-EBUSY  Commands still active
+  */
+-static int mtip_quiesce_io(struct mtip_port *port, unsigned long timeout)
++static int mtip_quiesce_io(struct mtip_port *port, unsigned long timeout,
++								gfp_t atomic)
+ {
+ 	unsigned long to;
+ 	unsigned int n;
+@@ -1037,16 +1035,21 @@ static int mtip_quiesce_io(struct mtip_port *port, unsigned long timeout)
+ 	to = jiffies + msecs_to_jiffies(timeout);
+ 	do {
+ 		if (test_bit(MTIP_PF_SVC_THD_ACTIVE_BIT, &port->flags) &&
+-			test_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags)) {
++			test_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags) &&
++			atomic == GFP_KERNEL) {
+ 			msleep(20);
+ 			continue; /* svc thd is actively issuing commands */
+ 		}
+ 
+-		msleep(100);
++		if (atomic == GFP_KERNEL)
++			msleep(100);
++		else {
++			cpu_relax();
++			udelay(100);
++		}
++
+ 		if (mtip_check_surprise_removal(port->dd->pdev))
+ 			goto err_fault;
+-		if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &port->dd->dd_flag))
+-			goto err_fault;
+ 
+ 		/*
+ 		 * Ignore s_active bit 0 of array element 0.
+@@ -1099,6 +1102,7 @@ static int mtip_exec_internal_command(struct mtip_port *port,
+ 	struct mtip_cmd *int_cmd;
+ 	struct driver_data *dd = port->dd;
+ 	int rv = 0;
++	unsigned long start;
+ 
+ 	/* Make sure the buffer is 8 byte aligned. This is asic specific. */
+ 	if (buffer & 0x00000007) {
+@@ -1107,6 +1111,10 @@ static int mtip_exec_internal_command(struct mtip_port *port,
+ 	}
+ 
+ 	int_cmd = mtip_get_int_command(dd);
++	if (!int_cmd) {
++		dbg_printk(MTIP_DRV_NAME "Unable to allocate tag for PIO cmd\n");
++		return -EFAULT;
++	}
+ 
+ 	set_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags);
+ 
+@@ -1119,7 +1127,7 @@ static int mtip_exec_internal_command(struct mtip_port *port,
+ 		if (fis->command != ATA_CMD_STANDBYNOW1) {
+ 			/* wait for io to complete if non atomic */
+ 			if (mtip_quiesce_io(port,
+-					MTIP_QUIESCE_IO_TIMEOUT_MS) < 0) {
++				MTIP_QUIESCE_IO_TIMEOUT_MS, atomic) < 0) {
+ 				dev_warn(&dd->pdev->dev,
+ 					"Failed to quiesce IO\n");
+ 				mtip_put_int_command(dd, int_cmd);
+@@ -1162,6 +1170,8 @@ static int mtip_exec_internal_command(struct mtip_port *port,
+ 	/* Populate the command header */
+ 	int_cmd->command_header->byte_count = 0;
+ 
++	start = jiffies;
++
+ 	/* Issue the command to the hardware */
+ 	mtip_issue_non_ncq_command(port, MTIP_TAG_INTERNAL);
+ 
+@@ -1170,10 +1180,12 @@ static int mtip_exec_internal_command(struct mtip_port *port,
+ 		if ((rv = wait_for_completion_interruptible_timeout(
+ 				&wait,
+ 				msecs_to_jiffies(timeout))) <= 0) {
++
+ 			if (rv == -ERESTARTSYS) { /* interrupted */
+ 				dev_err(&dd->pdev->dev,
+-					"Internal command [%02X] was interrupted after %lu ms\n",
+-					fis->command, timeout);
++					"Internal command [%02X] was interrupted after %u ms\n",
++					fis->command,
++					jiffies_to_msecs(jiffies - start));
+ 				rv = -EINTR;
+ 				goto exec_ic_exit;
+ 			} else if (rv == 0) /* timeout */
+@@ -2890,6 +2902,42 @@ static int mtip_ftl_rebuild_poll(struct driver_data *dd)
+ 	return -EFAULT;
+ }
+ 
++static void mtip_softirq_done_fn(struct request *rq)
++{
++	struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq);
++	struct driver_data *dd = rq->q->queuedata;
++
++	/* Unmap the DMA scatter list entries */
++	dma_unmap_sg(&dd->pdev->dev, cmd->sg, cmd->scatter_ents,
++							cmd->direction);
++
++	if (unlikely(cmd->unaligned))
++		up(&dd->port->cmd_slot_unal);
++
++	blk_mq_end_request(rq, rq->errors);
++}
++
++static void mtip_abort_cmd(struct request *req, void *data,
++							bool reserved)
++{
++	struct driver_data *dd = data;
++
++	dbg_printk(MTIP_DRV_NAME " Aborting request, tag = %d\n", req->tag);
++
++	clear_bit(req->tag, dd->port->cmds_to_issue);
++	req->errors = -EIO;
++	mtip_softirq_done_fn(req);
++}
++
++static void mtip_queue_cmd(struct request *req, void *data,
++							bool reserved)
++{
++	struct driver_data *dd = data;
++
++	set_bit(req->tag, dd->port->cmds_to_issue);
++	blk_abort_request(req);
++}
++
+ /*
+  * service thread to issue queued commands
+  *
+@@ -2902,7 +2950,7 @@ static int mtip_ftl_rebuild_poll(struct driver_data *dd)
+ static int mtip_service_thread(void *data)
+ {
+ 	struct driver_data *dd = (struct driver_data *)data;
+-	unsigned long slot, slot_start, slot_wrap;
++	unsigned long slot, slot_start, slot_wrap, to;
+ 	unsigned int num_cmd_slots = dd->slot_groups * 32;
+ 	struct mtip_port *port = dd->port;
+ 
+@@ -2917,9 +2965,7 @@ static int mtip_service_thread(void *data)
+ 		 * is in progress nor error handling is active
+ 		 */
+ 		wait_event_interruptible(port->svc_wait, (port->flags) &&
+-			!(port->flags & MTIP_PF_PAUSE_IO));
+-
+-		set_bit(MTIP_PF_SVC_THD_ACTIVE_BIT, &port->flags);
++			(port->flags & MTIP_PF_SVC_THD_WORK));
+ 
+ 		if (kthread_should_stop() ||
+ 			test_bit(MTIP_PF_SVC_THD_STOP_BIT, &port->flags))
+@@ -2929,6 +2975,8 @@ static int mtip_service_thread(void *data)
+ 				&dd->dd_flag)))
+ 			goto st_out;
+ 
++		set_bit(MTIP_PF_SVC_THD_ACTIVE_BIT, &port->flags);
++
+ restart_eh:
+ 		/* Demux bits: start with error handling */
+ 		if (test_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags)) {
+@@ -2939,6 +2987,32 @@ restart_eh:
+ 		if (test_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags))
+ 			goto restart_eh;
+ 
++		if (test_bit(MTIP_PF_TO_ACTIVE_BIT, &port->flags)) {
++			to = jiffies + msecs_to_jiffies(5000);
++
++			do {
++				mdelay(100);
++			} while (atomic_read(&dd->irq_workers_active) != 0 &&
++				time_before(jiffies, to));
++
++			if (atomic_read(&dd->irq_workers_active) != 0)
++				dev_warn(&dd->pdev->dev,
++					"Completion workers still active!");
++
++			spin_lock(dd->queue->queue_lock);
++			blk_mq_all_tag_busy_iter(*dd->tags.tags,
++							mtip_queue_cmd, dd);
++			spin_unlock(dd->queue->queue_lock);
++
++			set_bit(MTIP_PF_ISSUE_CMDS_BIT, &dd->port->flags);
++
++			if (mtip_device_reset(dd))
++				blk_mq_all_tag_busy_iter(*dd->tags.tags,
++							mtip_abort_cmd, dd);
++
++			clear_bit(MTIP_PF_TO_ACTIVE_BIT, &dd->port->flags);
++		}
++
+ 		if (test_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags)) {
+ 			slot = 1;
+ 			/* used to restrict the loop to one iteration */
+@@ -2971,10 +3045,8 @@ restart_eh:
+ 		}
+ 
+ 		if (test_bit(MTIP_PF_REBUILD_BIT, &port->flags)) {
+-			if (mtip_ftl_rebuild_poll(dd) < 0)
+-				set_bit(MTIP_DDF_REBUILD_FAILED_BIT,
+-							&dd->dd_flag);
+-			clear_bit(MTIP_PF_REBUILD_BIT, &port->flags);
++			if (mtip_ftl_rebuild_poll(dd) == 0)
++				clear_bit(MTIP_PF_REBUILD_BIT, &port->flags);
+ 		}
+ 	}
+ 
+@@ -3089,7 +3161,7 @@ static int mtip_hw_get_identify(struct driver_data *dd)
+ 		if (buf[288] == 0xBF) {
+ 			dev_info(&dd->pdev->dev,
+ 				"Drive indicates rebuild has failed.\n");
+-			/* TODO */
++			set_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag);
+ 		}
+ 	}
+ 
+@@ -3263,20 +3335,25 @@ out1:
+ 	return rv;
+ }
+ 
+-static void mtip_standby_drive(struct driver_data *dd)
++static int mtip_standby_drive(struct driver_data *dd)
+ {
+-	if (dd->sr)
+-		return;
++	int rv = 0;
+ 
++	if (dd->sr || !dd->port)
++		return -ENODEV;
+ 	/*
+ 	 * Send standby immediate (E0h) to the drive so that it
+ 	 * saves its state.
+ 	 */
+ 	if (!test_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags) &&
+-	    !test_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag))
+-		if (mtip_standby_immediate(dd->port))
++	    !test_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag) &&
++	    !test_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag)) {
++		rv = mtip_standby_immediate(dd->port);
++		if (rv)
+ 			dev_warn(&dd->pdev->dev,
+ 				"STANDBY IMMEDIATE failed\n");
++	}
++	return rv;
+ }
+ 
+ /*
+@@ -3289,10 +3366,6 @@ static void mtip_standby_drive(struct driver_data *dd)
+  */
+ static int mtip_hw_exit(struct driver_data *dd)
+ {
+-	/*
+-	 * Send standby immediate (E0h) to the drive so that it
+-	 * saves its state.
+-	 */
+ 	if (!dd->sr) {
+ 		/* de-initialize the port. */
+ 		mtip_deinit_port(dd->port);
+@@ -3334,8 +3407,7 @@ static int mtip_hw_shutdown(struct driver_data *dd)
+ 	 * Send standby immediate (E0h) to the drive so that it
+ 	 * saves its state.
+ 	 */
+-	if (!dd->sr && dd->port)
+-		mtip_standby_immediate(dd->port);
++	mtip_standby_drive(dd);
+ 
+ 	return 0;
+ }
+@@ -3358,7 +3430,7 @@ static int mtip_hw_suspend(struct driver_data *dd)
+ 	 * Send standby immediate (E0h) to the drive
+ 	 * so that it saves its state.
+ 	 */
+-	if (mtip_standby_immediate(dd->port) != 0) {
++	if (mtip_standby_drive(dd) != 0) {
+ 		dev_err(&dd->pdev->dev,
+ 			"Failed standby-immediate command\n");
+ 		return -EFAULT;
+@@ -3596,6 +3668,28 @@ static int mtip_block_getgeo(struct block_device *dev,
+ 	return 0;
+ }
+ 
++static int mtip_block_open(struct block_device *dev, fmode_t mode)
++{
++	struct driver_data *dd;
++
++	if (dev && dev->bd_disk) {
++		dd = (struct driver_data *) dev->bd_disk->private_data;
++
++		if (dd) {
++			if (test_bit(MTIP_DDF_REMOVAL_BIT,
++							&dd->dd_flag)) {
++				return -ENODEV;
++			}
++			return 0;
++		}
++	}
++	return -ENODEV;
++}
++
++void mtip_block_release(struct gendisk *disk, fmode_t mode)
++{
++}
++
+ /*
+  * Block device operation function.
+  *
+@@ -3603,6 +3697,8 @@ static int mtip_block_getgeo(struct block_device *dev,
+  * layer.
+  */
+ static const struct block_device_operations mtip_block_ops = {
++	.open		= mtip_block_open,
++	.release	= mtip_block_release,
+ 	.ioctl		= mtip_block_ioctl,
+ #ifdef CONFIG_COMPAT
+ 	.compat_ioctl	= mtip_block_compat_ioctl,
+@@ -3664,10 +3760,9 @@ static int mtip_submit_request(struct blk_mq_hw_ctx *hctx, struct request *rq)
+ 				rq_data_dir(rq))) {
+ 			return -ENODATA;
+ 		}
+-		if (unlikely(test_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag)))
++		if (unlikely(test_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag) ||
++			test_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag)))
+ 			return -ENODATA;
+-		if (test_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag))
+-			return -ENXIO;
+ 	}
+ 
+ 	if (rq->cmd_flags & REQ_DISCARD) {
+@@ -3779,11 +3874,33 @@ static int mtip_init_cmd(void *data, struct request *rq, unsigned int hctx_idx,
+ 	return 0;
+ }
+ 
++static enum blk_eh_timer_return mtip_cmd_timeout(struct request *req,
++								bool reserved)
++{
++	struct driver_data *dd = req->q->queuedata;
++	int ret = BLK_EH_RESET_TIMER;
++
++	if (reserved)
++		goto exit_handler;
++
++	if (test_bit(req->tag, dd->port->cmds_to_issue))
++		goto exit_handler;
++
++	if (test_and_set_bit(MTIP_PF_TO_ACTIVE_BIT, &dd->port->flags))
++		goto exit_handler;
++
++	wake_up_interruptible(&dd->port->svc_wait);
++exit_handler:
++	return ret;
++}
++
+ static struct blk_mq_ops mtip_mq_ops = {
+ 	.queue_rq	= mtip_queue_rq,
+ 	.map_queue	= blk_mq_map_queue,
+ 	.init_request	= mtip_init_cmd,
+ 	.exit_request	= mtip_free_cmd,
++	.complete	= mtip_softirq_done_fn,
++	.timeout        = mtip_cmd_timeout,
+ };
+ 
+ /*
+@@ -3850,7 +3967,6 @@ static int mtip_block_initialize(struct driver_data *dd)
+ 
+ 	mtip_hw_debugfs_init(dd);
+ 
+-skip_create_disk:
+ 	memset(&dd->tags, 0, sizeof(dd->tags));
+ 	dd->tags.ops = &mtip_mq_ops;
+ 	dd->tags.nr_hw_queues = 1;
+@@ -3860,12 +3976,13 @@ skip_create_disk:
+ 	dd->tags.numa_node = dd->numa_node;
+ 	dd->tags.flags = BLK_MQ_F_SHOULD_MERGE;
+ 	dd->tags.driver_data = dd;
++	dd->tags.timeout = MTIP_NCQ_CMD_TIMEOUT_MS;
+ 
+ 	rv = blk_mq_alloc_tag_set(&dd->tags);
+ 	if (rv) {
+ 		dev_err(&dd->pdev->dev,
+ 			"Unable to allocate request queue\n");
+-		goto block_queue_alloc_init_error;
++		goto block_queue_alloc_tag_error;
+ 	}
+ 
+ 	/* Allocate the request queue. */
+@@ -3880,6 +3997,7 @@ skip_create_disk:
+ 	dd->disk->queue		= dd->queue;
+ 	dd->queue->queuedata	= dd;
+ 
++skip_create_disk:
+ 	/* Initialize the protocol layer. */
+ 	wait_for_rebuild = mtip_hw_get_identify(dd);
+ 	if (wait_for_rebuild < 0) {
+@@ -3976,8 +4094,9 @@ kthread_run_error:
+ read_capacity_error:
+ init_hw_cmds_error:
+ 	blk_cleanup_queue(dd->queue);
+-	blk_mq_free_tag_set(&dd->tags);
+ block_queue_alloc_init_error:
++	blk_mq_free_tag_set(&dd->tags);
++block_queue_alloc_tag_error:
+ 	mtip_hw_debugfs_exit(dd);
+ disk_index_error:
+ 	spin_lock(&rssd_index_lock);
+@@ -3994,6 +4113,22 @@ protocol_init_error:
+ 	return rv;
+ }
+ 
++static void mtip_no_dev_cleanup(struct request *rq, void *data, bool reserv)
++{
++	struct driver_data *dd = (struct driver_data *)data;
++	struct mtip_cmd *cmd;
++
++	if (likely(!reserv))
++		blk_mq_complete_request(rq, -ENODEV);
++	else if (test_bit(MTIP_PF_IC_ACTIVE_BIT, &dd->port->flags)) {
++
++		cmd = mtip_cmd_from_tag(dd, MTIP_TAG_INTERNAL);
++		if (cmd->comp_func)
++			cmd->comp_func(dd->port, MTIP_TAG_INTERNAL,
++					cmd, -ENODEV);
++	}
++}
++
+ /*
+  * Block layer deinitialization function.
+  *
+@@ -4025,12 +4160,23 @@ static int mtip_block_remove(struct driver_data *dd)
+ 		}
+ 	}
+ 
+-	if (!dd->sr)
+-		mtip_standby_drive(dd);
++	if (!dd->sr) {
++		/*
++		 * Explicitly wait here for IOs to quiesce,
++		 * as mtip_standby_drive usually won't wait for IOs.
++		 */
++		if (!mtip_quiesce_io(dd->port, MTIP_QUIESCE_IO_TIMEOUT_MS,
++								GFP_KERNEL))
++			mtip_standby_drive(dd);
++	}
+ 	else
+ 		dev_info(&dd->pdev->dev, "device %s surprise removal\n",
+ 						dd->disk->disk_name);
+ 
++	blk_mq_freeze_queue_start(dd->queue);
++	blk_mq_stop_hw_queues(dd->queue);
++	blk_mq_all_tag_busy_iter(dd->tags.tags[0], mtip_no_dev_cleanup, dd);
++
+ 	/*
+ 	 * Delete our gendisk structure. This also removes the device
+ 	 * from /dev
+@@ -4040,7 +4186,8 @@ static int mtip_block_remove(struct driver_data *dd)
+ 		dd->bdev = NULL;
+ 	}
+ 	if (dd->disk) {
+-		del_gendisk(dd->disk);
++		if (test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag))
++			del_gendisk(dd->disk);
+ 		if (dd->disk->queue) {
+ 			blk_cleanup_queue(dd->queue);
+ 			blk_mq_free_tag_set(&dd->tags);
+@@ -4081,7 +4228,8 @@ static int mtip_block_shutdown(struct driver_data *dd)
+ 		dev_info(&dd->pdev->dev,
+ 			"Shutting down %s ...\n", dd->disk->disk_name);
+ 
+-		del_gendisk(dd->disk);
++		if (test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag))
++			del_gendisk(dd->disk);
+ 		if (dd->disk->queue) {
+ 			blk_cleanup_queue(dd->queue);
+ 			blk_mq_free_tag_set(&dd->tags);
+@@ -4426,7 +4574,7 @@ static void mtip_pci_remove(struct pci_dev *pdev)
+ 	struct driver_data *dd = pci_get_drvdata(pdev);
+ 	unsigned long flags, to;
+ 
+-	set_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag);
++	set_bit(MTIP_DDF_REMOVAL_BIT, &dd->dd_flag);
+ 
+ 	spin_lock_irqsave(&dev_lock, flags);
+ 	list_del_init(&dd->online_list);
+@@ -4443,12 +4591,17 @@ static void mtip_pci_remove(struct pci_dev *pdev)
+ 	} while (atomic_read(&dd->irq_workers_active) != 0 &&
+ 		time_before(jiffies, to));
+ 
++	if (!dd->sr)
++		fsync_bdev(dd->bdev);
++
+ 	if (atomic_read(&dd->irq_workers_active) != 0) {
+ 		dev_warn(&dd->pdev->dev,
+ 			"Completion workers still active!\n");
+ 	}
+ 
+-	blk_mq_stop_hw_queues(dd->queue);
++	blk_set_queue_dying(dd->queue);
++	set_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag);
++
+ 	/* Clean up the block layer. */
+ 	mtip_block_remove(dd);
+ 
+diff --git a/drivers/block/mtip32xx/mtip32xx.h b/drivers/block/mtip32xx/mtip32xx.h
+index 3274784008eb..7617888f7944 100644
+--- a/drivers/block/mtip32xx/mtip32xx.h
++++ b/drivers/block/mtip32xx/mtip32xx.h
+@@ -134,16 +134,24 @@ enum {
+ 	MTIP_PF_EH_ACTIVE_BIT       = 1, /* error handling */
+ 	MTIP_PF_SE_ACTIVE_BIT       = 2, /* secure erase */
+ 	MTIP_PF_DM_ACTIVE_BIT       = 3, /* download microcde */
++	MTIP_PF_TO_ACTIVE_BIT       = 9, /* timeout handling */
+ 	MTIP_PF_PAUSE_IO      =	((1 << MTIP_PF_IC_ACTIVE_BIT) |
+ 				(1 << MTIP_PF_EH_ACTIVE_BIT) |
+ 				(1 << MTIP_PF_SE_ACTIVE_BIT) |
+-				(1 << MTIP_PF_DM_ACTIVE_BIT)),
++				(1 << MTIP_PF_DM_ACTIVE_BIT) |
++				(1 << MTIP_PF_TO_ACTIVE_BIT)),
+ 
+ 	MTIP_PF_SVC_THD_ACTIVE_BIT  = 4,
+ 	MTIP_PF_ISSUE_CMDS_BIT      = 5,
+ 	MTIP_PF_REBUILD_BIT         = 6,
+ 	MTIP_PF_SVC_THD_STOP_BIT    = 8,
+ 
++	MTIP_PF_SVC_THD_WORK	= ((1 << MTIP_PF_EH_ACTIVE_BIT) |
++				  (1 << MTIP_PF_ISSUE_CMDS_BIT) |
++				  (1 << MTIP_PF_REBUILD_BIT) |
++				  (1 << MTIP_PF_SVC_THD_STOP_BIT) |
++				  (1 << MTIP_PF_TO_ACTIVE_BIT)),
++
+ 	/* below are bit numbers in 'dd_flag' defined in driver_data */
+ 	MTIP_DDF_SEC_LOCK_BIT	    = 0,
+ 	MTIP_DDF_REMOVE_PENDING_BIT = 1,
+@@ -153,6 +161,7 @@ enum {
+ 	MTIP_DDF_RESUME_BIT         = 6,
+ 	MTIP_DDF_INIT_DONE_BIT      = 7,
+ 	MTIP_DDF_REBUILD_FAILED_BIT = 8,
++	MTIP_DDF_REMOVAL_BIT	    = 9,
+ 
+ 	MTIP_DDF_STOP_IO      = ((1 << MTIP_DDF_REMOVE_PENDING_BIT) |
+ 				(1 << MTIP_DDF_SEC_LOCK_BIT) |
+diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
+index e4c5cc107934..c65d41f4007a 100644
+--- a/drivers/block/nbd.c
++++ b/drivers/block/nbd.c
+@@ -618,8 +618,8 @@ static void nbd_request_handler(struct request_queue *q)
+ 			req, req->cmd_type);
+ 
+ 		if (unlikely(!nbd->sock)) {
+-			dev_err(disk_to_dev(nbd->disk),
+-				"Attempted send on closed socket\n");
++			dev_err_ratelimited(disk_to_dev(nbd->disk),
++					    "Attempted send on closed socket\n");
+ 			req->errors++;
+ 			nbd_end_request(nbd, req);
+ 			spin_lock_irq(q->queue_lock);
+diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
+index 64a7b5971b57..cab97593ba54 100644
+--- a/drivers/block/null_blk.c
++++ b/drivers/block/null_blk.c
+@@ -742,10 +742,11 @@ static int null_add_dev(void)
+ 
+ 	add_disk(disk);
+ 
++done:
+ 	mutex_lock(&lock);
+ 	list_add_tail(&nullb->list, &nullb_list);
+ 	mutex_unlock(&lock);
+-done:
++
+ 	return 0;
+ 
+ out_cleanup_lightnvm:
+diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c
+index 562b5a4ca7b7..78a39f736c64 100644
+--- a/drivers/block/paride/pd.c
++++ b/drivers/block/paride/pd.c
+@@ -126,7 +126,7 @@
+ */
+ #include <linux/types.h>
+ 
+-static bool verbose = 0;
++static int verbose = 0;
+ static int major = PD_MAJOR;
+ static char *name = PD_NAME;
+ static int cluster = 64;
+@@ -161,7 +161,7 @@ enum {D_PRT, D_PRO, D_UNI, D_MOD, D_GEO, D_SBY, D_DLY, D_SLV};
+ static DEFINE_MUTEX(pd_mutex);
+ static DEFINE_SPINLOCK(pd_lock);
+ 
+-module_param(verbose, bool, 0);
++module_param(verbose, int, 0);
+ module_param(major, int, 0);
+ module_param(name, charp, 0);
+ module_param(cluster, int, 0);
+diff --git a/drivers/block/paride/pt.c b/drivers/block/paride/pt.c
+index 1740d75e8a32..216a94fed5b4 100644
+--- a/drivers/block/paride/pt.c
++++ b/drivers/block/paride/pt.c
+@@ -117,7 +117,7 @@
+ 
+ */
+ 
+-static bool verbose = 0;
++static int verbose = 0;
+ static int major = PT_MAJOR;
+ static char *name = PT_NAME;
+ static int disable = 0;
+@@ -152,7 +152,7 @@ static int (*drives[4])[6] = {&drive0, &drive1, &drive2, &drive3};
+ 
+ #include <asm/uaccess.h>
+ 
+-module_param(verbose, bool, 0);
++module_param(verbose, int, 0);
+ module_param(major, int, 0);
+ module_param(name, charp, 0);
+ module_param_array(drive0, int, NULL, 0);
+diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
+index 4a876785b68c..9745cf9fcd3c 100644
+--- a/drivers/block/rbd.c
++++ b/drivers/block/rbd.c
+@@ -1955,7 +1955,7 @@ static struct ceph_osd_request *rbd_osd_req_create(
+ 
+ 	osdc = &rbd_dev->rbd_client->client->osdc;
+ 	osd_req = ceph_osdc_alloc_request(osdc, snapc, num_ops, false,
+-					  GFP_ATOMIC);
++					  GFP_NOIO);
+ 	if (!osd_req)
+ 		return NULL;	/* ENOMEM */
+ 
+@@ -2004,7 +2004,7 @@ rbd_osd_req_create_copyup(struct rbd_obj_request *obj_request)
+ 	rbd_dev = img_request->rbd_dev;
+ 	osdc = &rbd_dev->rbd_client->client->osdc;
+ 	osd_req = ceph_osdc_alloc_request(osdc, snapc, num_osd_ops,
+-						false, GFP_ATOMIC);
++						false, GFP_NOIO);
+ 	if (!osd_req)
+ 		return NULL;	/* ENOMEM */
+ 
+@@ -2506,7 +2506,7 @@ static int rbd_img_request_fill(struct rbd_img_request *img_request,
+ 					bio_chain_clone_range(&bio_list,
+ 								&bio_offset,
+ 								clone_size,
+-								GFP_ATOMIC);
++								GFP_NOIO);
+ 			if (!obj_request->bio_list)
+ 				goto out_unwind;
+ 		} else if (type == OBJ_REQUEST_PAGES) {
+diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
+index fa893c3ec408..0beaa52df66b 100644
+--- a/drivers/bluetooth/ath3k.c
++++ b/drivers/bluetooth/ath3k.c
+@@ -82,6 +82,7 @@ static const struct usb_device_id ath3k_table[] = {
+ 	{ USB_DEVICE(0x0489, 0xe05f) },
+ 	{ USB_DEVICE(0x0489, 0xe076) },
+ 	{ USB_DEVICE(0x0489, 0xe078) },
++	{ USB_DEVICE(0x0489, 0xe095) },
+ 	{ USB_DEVICE(0x04c5, 0x1330) },
+ 	{ USB_DEVICE(0x04CA, 0x3004) },
+ 	{ USB_DEVICE(0x04CA, 0x3005) },
+@@ -92,6 +93,7 @@ static const struct usb_device_id ath3k_table[] = {
+ 	{ USB_DEVICE(0x04CA, 0x300d) },
+ 	{ USB_DEVICE(0x04CA, 0x300f) },
+ 	{ USB_DEVICE(0x04CA, 0x3010) },
++	{ USB_DEVICE(0x04CA, 0x3014) },
+ 	{ USB_DEVICE(0x0930, 0x0219) },
+ 	{ USB_DEVICE(0x0930, 0x021c) },
+ 	{ USB_DEVICE(0x0930, 0x0220) },
+@@ -113,10 +115,12 @@ static const struct usb_device_id ath3k_table[] = {
+ 	{ USB_DEVICE(0x13d3, 0x3362) },
+ 	{ USB_DEVICE(0x13d3, 0x3375) },
+ 	{ USB_DEVICE(0x13d3, 0x3393) },
++	{ USB_DEVICE(0x13d3, 0x3395) },
+ 	{ USB_DEVICE(0x13d3, 0x3402) },
+ 	{ USB_DEVICE(0x13d3, 0x3408) },
+ 	{ USB_DEVICE(0x13d3, 0x3423) },
+ 	{ USB_DEVICE(0x13d3, 0x3432) },
++	{ USB_DEVICE(0x13d3, 0x3472) },
+ 	{ USB_DEVICE(0x13d3, 0x3474) },
+ 
+ 	/* Atheros AR5BBU12 with sflash firmware */
+@@ -144,6 +148,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
+ 	{ USB_DEVICE(0x0489, 0xe05f), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0489, 0xe076), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0489, 0xe078), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x0489, 0xe095), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
+@@ -154,6 +159,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
+ 	{ USB_DEVICE(0x04ca, 0x300d), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x04ca, 0x300f), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x04ca, 0x3014), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0930, 0x021c), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
+@@ -175,10 +181,12 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
+ 	{ USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x13d3, 0x3395), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x13d3, 0x3408), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x13d3, 0x3423), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x13d3, 0x3432), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x13d3, 0x3472), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x13d3, 0x3474), .driver_info = BTUSB_ATH3012 },
+ 
+ 	/* Atheros AR5BBU22 with sflash firmware */
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index a191e318fab8..0d4e372e426d 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -196,6 +196,7 @@ static const struct usb_device_id blacklist_table[] = {
+ 	{ USB_DEVICE(0x0489, 0xe05f), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0489, 0xe076), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0489, 0xe078), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x0489, 0xe095), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
+@@ -206,6 +207,7 @@ static const struct usb_device_id blacklist_table[] = {
+ 	{ USB_DEVICE(0x04ca, 0x300d), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x04ca, 0x300f), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x04ca, 0x3014), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0930, 0x021c), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
+@@ -227,10 +229,12 @@ static const struct usb_device_id blacklist_table[] = {
+ 	{ USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x13d3, 0x3395), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x13d3, 0x3408), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x13d3, 0x3423), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x13d3, 0x3432), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x13d3, 0x3472), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x13d3, 0x3474), .driver_info = BTUSB_ATH3012 },
+ 
+ 	/* Atheros AR5BBU12 with sflash firmware */
+diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c
+index 80783dcb7f57..aba31210c802 100644
+--- a/drivers/bluetooth/hci_vhci.c
++++ b/drivers/bluetooth/hci_vhci.c
+@@ -50,6 +50,7 @@ struct vhci_data {
+ 	wait_queue_head_t read_wait;
+ 	struct sk_buff_head readq;
+ 
++	struct mutex open_mutex;
+ 	struct delayed_work open_timeout;
+ };
+ 
+@@ -87,12 +88,15 @@ static int vhci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
+ 	return 0;
+ }
+ 
+-static int vhci_create_device(struct vhci_data *data, __u8 opcode)
++static int __vhci_create_device(struct vhci_data *data, __u8 opcode)
+ {
+ 	struct hci_dev *hdev;
+ 	struct sk_buff *skb;
+ 	__u8 dev_type;
+ 
++	if (data->hdev)
++		return -EBADFD;
++
+ 	/* bits 0-1 are dev_type (BR/EDR or AMP) */
+ 	dev_type = opcode & 0x03;
+ 
+@@ -151,6 +155,17 @@ static int vhci_create_device(struct vhci_data *data, __u8 opcode)
+ 	return 0;
+ }
+ 
++static int vhci_create_device(struct vhci_data *data, __u8 opcode)
++{
++	int err;
++
++	mutex_lock(&data->open_mutex);
++	err = __vhci_create_device(data, opcode);
++	mutex_unlock(&data->open_mutex);
++
++	return err;
++}
++
+ static inline ssize_t vhci_get_user(struct vhci_data *data,
+ 				    struct iov_iter *from)
+ {
+@@ -189,11 +204,6 @@ static inline ssize_t vhci_get_user(struct vhci_data *data,
+ 		break;
+ 
+ 	case HCI_VENDOR_PKT:
+-		if (data->hdev) {
+-			kfree_skb(skb);
+-			return -EBADFD;
+-		}
+-
+ 		cancel_delayed_work_sync(&data->open_timeout);
+ 
+ 		opcode = *((__u8 *) skb->data);
+@@ -320,6 +330,7 @@ static int vhci_open(struct inode *inode, struct file *file)
+ 	skb_queue_head_init(&data->readq);
+ 	init_waitqueue_head(&data->read_wait);
+ 
++	mutex_init(&data->open_mutex);
+ 	INIT_DELAYED_WORK(&data->open_timeout, vhci_open_timeout);
+ 
+ 	file->private_data = data;
+@@ -333,15 +344,18 @@ static int vhci_open(struct inode *inode, struct file *file)
+ static int vhci_release(struct inode *inode, struct file *file)
+ {
+ 	struct vhci_data *data = file->private_data;
+-	struct hci_dev *hdev = data->hdev;
++	struct hci_dev *hdev;
+ 
+ 	cancel_delayed_work_sync(&data->open_timeout);
+ 
++	hdev = data->hdev;
++
+ 	if (hdev) {
+ 		hci_unregister_dev(hdev);
+ 		hci_free_dev(hdev);
+ 	}
+ 
++	skb_queue_purge(&data->readq);
+ 	file->private_data = NULL;
+ 	kfree(data);
+ 
+diff --git a/drivers/bus/imx-weim.c b/drivers/bus/imx-weim.c
+index e98d15eaa799..1827fc4d15c1 100644
+--- a/drivers/bus/imx-weim.c
++++ b/drivers/bus/imx-weim.c
+@@ -150,7 +150,7 @@ static int __init weim_parse_dt(struct platform_device *pdev,
+ 			return ret;
+ 	}
+ 
+-	for_each_child_of_node(pdev->dev.of_node, child) {
++	for_each_available_child_of_node(pdev->dev.of_node, child) {
+ 		if (!child->name)
+ 			continue;
+ 
+diff --git a/drivers/bus/uniphier-system-bus.c b/drivers/bus/uniphier-system-bus.c
+index 834a2aeaf27a..350b7309c26d 100644
+--- a/drivers/bus/uniphier-system-bus.c
++++ b/drivers/bus/uniphier-system-bus.c
+@@ -108,7 +108,7 @@ static int uniphier_system_bus_check_overlap(
+ 
+ 	for (i = 0; i < ARRAY_SIZE(priv->bank); i++) {
+ 		for (j = i + 1; j < ARRAY_SIZE(priv->bank); j++) {
+-			if (priv->bank[i].end > priv->bank[j].base ||
++			if (priv->bank[i].end > priv->bank[j].base &&
+ 			    priv->bank[i].base < priv->bank[j].end) {
+ 				dev_err(priv->dev,
+ 					"region overlap between bank%d and bank%d\n",
+diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
+index 45cc39aabeee..252142524ff2 100644
+--- a/drivers/char/tpm/tpm-chip.c
++++ b/drivers/char/tpm/tpm-chip.c
+@@ -136,11 +136,13 @@ struct tpm_chip *tpmm_chip_alloc(struct device *dev,
+ 	chip->cdev.owner = chip->pdev->driver->owner;
+ 	chip->cdev.kobj.parent = &chip->dev.kobj;
+ 
++	devm_add_action(dev, (void (*)(void *)) put_device, &chip->dev);
++
+ 	return chip;
+ }
+ EXPORT_SYMBOL_GPL(tpmm_chip_alloc);
+ 
+-static int tpm_dev_add_device(struct tpm_chip *chip)
++static int tpm_add_char_device(struct tpm_chip *chip)
+ {
+ 	int rc;
+ 
+@@ -151,7 +153,6 @@ static int tpm_dev_add_device(struct tpm_chip *chip)
+ 			chip->devname, MAJOR(chip->dev.devt),
+ 			MINOR(chip->dev.devt), rc);
+ 
+-		device_unregister(&chip->dev);
+ 		return rc;
+ 	}
+ 
+@@ -162,16 +163,17 @@ static int tpm_dev_add_device(struct tpm_chip *chip)
+ 			chip->devname, MAJOR(chip->dev.devt),
+ 			MINOR(chip->dev.devt), rc);
+ 
++		cdev_del(&chip->cdev);
+ 		return rc;
+ 	}
+ 
+ 	return rc;
+ }
+ 
+-static void tpm_dev_del_device(struct tpm_chip *chip)
++static void tpm_del_char_device(struct tpm_chip *chip)
+ {
+ 	cdev_del(&chip->cdev);
+-	device_unregister(&chip->dev);
++	device_del(&chip->dev);
+ }
+ 
+ static int tpm1_chip_register(struct tpm_chip *chip)
+@@ -222,7 +224,7 @@ int tpm_chip_register(struct tpm_chip *chip)
+ 
+ 	tpm_add_ppi(chip);
+ 
+-	rc = tpm_dev_add_device(chip);
++	rc = tpm_add_char_device(chip);
+ 	if (rc)
+ 		goto out_err;
+ 
+@@ -274,6 +276,6 @@ void tpm_chip_unregister(struct tpm_chip *chip)
+ 		sysfs_remove_link(&chip->pdev->kobj, "ppi");
+ 
+ 	tpm1_chip_unregister(chip);
+-	tpm_dev_del_device(chip);
++	tpm_del_char_device(chip);
+ }
+ EXPORT_SYMBOL_GPL(tpm_chip_unregister);
+diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
+index 45a634016f95..b28e4da3d2cf 100644
+--- a/drivers/char/tpm/tpm2-cmd.c
++++ b/drivers/char/tpm/tpm2-cmd.c
+@@ -20,7 +20,11 @@
+ #include <keys/trusted-type.h>
+ 
+ enum tpm2_object_attributes {
+-	TPM2_ATTR_USER_WITH_AUTH	= BIT(6),
++	TPM2_OA_USER_WITH_AUTH		= BIT(6),
++};
++
++enum tpm2_session_attributes {
++	TPM2_SA_CONTINUE_SESSION	= BIT(0),
+ };
+ 
+ struct tpm2_startup_in {
+@@ -478,22 +482,18 @@ int tpm2_seal_trusted(struct tpm_chip *chip,
+ 	tpm_buf_append_u8(&buf, payload->migratable);
+ 
+ 	/* public */
+-	if (options->policydigest)
+-		tpm_buf_append_u16(&buf, 14 + options->digest_len);
+-	else
+-		tpm_buf_append_u16(&buf, 14);
+-
++	tpm_buf_append_u16(&buf, 14 + options->policydigest_len);
+ 	tpm_buf_append_u16(&buf, TPM2_ALG_KEYEDHASH);
+ 	tpm_buf_append_u16(&buf, hash);
+ 
+ 	/* policy */
+-	if (options->policydigest) {
++	if (options->policydigest_len) {
+ 		tpm_buf_append_u32(&buf, 0);
+-		tpm_buf_append_u16(&buf, options->digest_len);
++		tpm_buf_append_u16(&buf, options->policydigest_len);
+ 		tpm_buf_append(&buf, options->policydigest,
+-			       options->digest_len);
++			       options->policydigest_len);
+ 	} else {
+-		tpm_buf_append_u32(&buf, TPM2_ATTR_USER_WITH_AUTH);
++		tpm_buf_append_u32(&buf, TPM2_OA_USER_WITH_AUTH);
+ 		tpm_buf_append_u16(&buf, 0);
+ 	}
+ 
+@@ -631,7 +631,7 @@ static int tpm2_unseal(struct tpm_chip *chip,
+ 			     options->policyhandle ?
+ 			     options->policyhandle : TPM2_RS_PW,
+ 			     NULL /* nonce */, 0,
+-			     0 /* session_attributes */,
++			     TPM2_SA_CONTINUE_SESSION,
+ 			     options->blobauth /* hmac */,
+ 			     TPM_DIGEST_SIZE);
+ 
+diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c
+index 8342cf51ffdc..26bab5a2959f 100644
+--- a/drivers/char/tpm/tpm_crb.c
++++ b/drivers/char/tpm/tpm_crb.c
+@@ -302,11 +302,11 @@ static int crb_acpi_remove(struct acpi_device *device)
+ 	struct device *dev = &device->dev;
+ 	struct tpm_chip *chip = dev_get_drvdata(dev);
+ 
+-	tpm_chip_unregister(chip);
+-
+ 	if (chip->flags & TPM_CHIP_FLAG_TPM2)
+ 		tpm2_shutdown(chip, TPM2_SU_CLEAR);
+ 
++	tpm_chip_unregister(chip);
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/char/tpm/tpm_eventlog.c b/drivers/char/tpm/tpm_eventlog.c
+index bd72fb04225e..4e6940acf639 100644
+--- a/drivers/char/tpm/tpm_eventlog.c
++++ b/drivers/char/tpm/tpm_eventlog.c
+@@ -232,7 +232,7 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
+ {
+ 	struct tcpa_event *event = v;
+ 	struct tcpa_event temp_event;
+-	char *tempPtr;
++	char *temp_ptr;
+ 	int i;
+ 
+ 	memcpy(&temp_event, event, sizeof(struct tcpa_event));
+@@ -242,10 +242,16 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
+ 	temp_event.event_type = do_endian_conversion(event->event_type);
+ 	temp_event.event_size = do_endian_conversion(event->event_size);
+ 
+-	tempPtr = (char *)&temp_event;
++	temp_ptr = (char *) &temp_event;
+ 
+-	for (i = 0; i < sizeof(struct tcpa_event) + temp_event.event_size; i++)
+-		seq_putc(m, tempPtr[i]);
++	for (i = 0; i < (sizeof(struct tcpa_event) - 1) ; i++)
++		seq_putc(m, temp_ptr[i]);
++
++	temp_ptr = (char *) v;
++
++	for (i = (sizeof(struct tcpa_event) - 1);
++	     i < (sizeof(struct tcpa_event) + temp_event.event_size); i++)
++		seq_putc(m, temp_ptr[i]);
+ 
+ 	return 0;
+ 
+diff --git a/drivers/clk/bcm/clk-bcm2835-aux.c b/drivers/clk/bcm/clk-bcm2835-aux.c
+index e4f89e28b5ec..3a177ade6e6c 100644
+--- a/drivers/clk/bcm/clk-bcm2835-aux.c
++++ b/drivers/clk/bcm/clk-bcm2835-aux.c
+@@ -38,8 +38,8 @@ static int bcm2835_aux_clk_probe(struct platform_device *pdev)
+ 
+ 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ 	reg = devm_ioremap_resource(dev, res);
+-	if (!reg)
+-		return -ENODEV;
++	if (IS_ERR(reg))
++		return PTR_ERR(reg);
+ 
+ 	onecell = devm_kmalloc(dev, sizeof(*onecell), GFP_KERNEL);
+ 	if (!onecell)
+diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c
+index 015e687ffabe..dd2856b5633c 100644
+--- a/drivers/clk/bcm/clk-bcm2835.c
++++ b/drivers/clk/bcm/clk-bcm2835.c
+@@ -1078,10 +1078,12 @@ static void bcm2835_pll_divider_off(struct clk_hw *hw)
+ 	struct bcm2835_cprman *cprman = divider->cprman;
+ 	const struct bcm2835_pll_divider_data *data = divider->data;
+ 
++	spin_lock(&cprman->regs_lock);
+ 	cprman_write(cprman, data->cm_reg,
+ 		     (cprman_read(cprman, data->cm_reg) &
+ 		      ~data->load_mask) | data->hold_mask);
+ 	cprman_write(cprman, data->a2w_reg, A2W_PLL_CHANNEL_DISABLE);
++	spin_unlock(&cprman->regs_lock);
+ }
+ 
+ static int bcm2835_pll_divider_on(struct clk_hw *hw)
+@@ -1090,12 +1092,14 @@ static int bcm2835_pll_divider_on(struct clk_hw *hw)
+ 	struct bcm2835_cprman *cprman = divider->cprman;
+ 	const struct bcm2835_pll_divider_data *data = divider->data;
+ 
++	spin_lock(&cprman->regs_lock);
+ 	cprman_write(cprman, data->a2w_reg,
+ 		     cprman_read(cprman, data->a2w_reg) &
+ 		     ~A2W_PLL_CHANNEL_DISABLE);
+ 
+ 	cprman_write(cprman, data->cm_reg,
+ 		     cprman_read(cprman, data->cm_reg) & ~data->hold_mask);
++	spin_unlock(&cprman->regs_lock);
+ 
+ 	return 0;
+ }
+@@ -1107,13 +1111,15 @@ static int bcm2835_pll_divider_set_rate(struct clk_hw *hw,
+ 	struct bcm2835_pll_divider *divider = bcm2835_pll_divider_from_hw(hw);
+ 	struct bcm2835_cprman *cprman = divider->cprman;
+ 	const struct bcm2835_pll_divider_data *data = divider->data;
+-	u32 cm;
+-	int ret;
++	u32 cm, div, max_div = 1 << A2W_PLL_DIV_BITS;
+ 
+-	ret = clk_divider_ops.set_rate(hw, rate, parent_rate);
+-	if (ret)
+-		return ret;
++	div = DIV_ROUND_UP_ULL(parent_rate, rate);
++
++	div = min(div, max_div);
++	if (div == max_div)
++		div = 0;
+ 
++	cprman_write(cprman, data->a2w_reg, div);
+ 	cm = cprman_read(cprman, data->cm_reg);
+ 	cprman_write(cprman, data->cm_reg, cm | data->load_mask);
+ 	cprman_write(cprman, data->cm_reg, cm & ~data->load_mask);
+diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c
+index ded3ff4b91b9..aa1dacdaa39d 100644
+--- a/drivers/clk/clk-divider.c
++++ b/drivers/clk/clk-divider.c
+@@ -423,6 +423,12 @@ const struct clk_ops clk_divider_ops = {
+ };
+ EXPORT_SYMBOL_GPL(clk_divider_ops);
+ 
++const struct clk_ops clk_divider_ro_ops = {
++	.recalc_rate = clk_divider_recalc_rate,
++	.round_rate = clk_divider_round_rate,
++};
++EXPORT_SYMBOL_GPL(clk_divider_ro_ops);
++
+ static struct clk *_register_divider(struct device *dev, const char *name,
+ 		const char *parent_name, unsigned long flags,
+ 		void __iomem *reg, u8 shift, u8 width,
+@@ -446,7 +452,10 @@ static struct clk *_register_divider(struct device *dev, const char *name,
+ 		return ERR_PTR(-ENOMEM);
+ 
+ 	init.name = name;
+-	init.ops = &clk_divider_ops;
++	if (clk_divider_flags & CLK_DIVIDER_READ_ONLY)
++		init.ops = &clk_divider_ro_ops;
++	else
++		init.ops = &clk_divider_ops;
+ 	init.flags = flags | CLK_IS_BASIC;
+ 	init.parent_names = (parent_name ? &parent_name: NULL);
+ 	init.num_parents = (parent_name ? 1 : 0);
+diff --git a/drivers/clk/clk-xgene.c b/drivers/clk/clk-xgene.c
+index 10224b01b97c..b134a8b15e2c 100644
+--- a/drivers/clk/clk-xgene.c
++++ b/drivers/clk/clk-xgene.c
+@@ -351,8 +351,8 @@ static int xgene_clk_set_rate(struct clk_hw *hw, unsigned long rate,
+ 		/* Set new divider */
+ 		data = xgene_clk_read(pclk->param.divider_reg +
+ 				pclk->param.reg_divider_offset);
+-		data &= ~((1 << pclk->param.reg_divider_width) - 1)
+-				<< pclk->param.reg_divider_shift;
++		data &= ~(((1 << pclk->param.reg_divider_width) - 1)
++				<< pclk->param.reg_divider_shift);
+ 		data |= divider;
+ 		xgene_clk_write(data, pclk->param.divider_reg +
+ 					pclk->param.reg_divider_offset);
+diff --git a/drivers/clk/meson/clkc.c b/drivers/clk/meson/clkc.c
+index c83ae1367abc..d920d410b51d 100644
+--- a/drivers/clk/meson/clkc.c
++++ b/drivers/clk/meson/clkc.c
+@@ -198,7 +198,7 @@ meson_clk_register_fixed_rate(const struct clk_conf *clk_conf,
+ }
+ 
+ void __init meson_clk_register_clks(const struct clk_conf *clk_confs,
+-				    size_t nr_confs,
++				    unsigned int nr_confs,
+ 				    void __iomem *clk_base)
+ {
+ 	unsigned int i;
+diff --git a/drivers/clk/nxp/clk-lpc18xx-ccu.c b/drivers/clk/nxp/clk-lpc18xx-ccu.c
+index 13aabbb3acbe..558da89555af 100644
+--- a/drivers/clk/nxp/clk-lpc18xx-ccu.c
++++ b/drivers/clk/nxp/clk-lpc18xx-ccu.c
+@@ -222,7 +222,7 @@ static void lpc18xx_ccu_register_branch_gate_div(struct lpc18xx_clk_branch *bran
+ 		div->width = 1;
+ 
+ 		div_hw = &div->hw;
+-		div_ops = &clk_divider_ops;
++		div_ops = &clk_divider_ro_ops;
+ 	}
+ 
+ 	branch->gate.reg = branch->offset + reg_base;
+diff --git a/drivers/clk/qcom/gcc-msm8916.c b/drivers/clk/qcom/gcc-msm8916.c
+index 8cc9b2868b41..5f56d6aae31d 100644
+--- a/drivers/clk/qcom/gcc-msm8916.c
++++ b/drivers/clk/qcom/gcc-msm8916.c
+@@ -2346,6 +2346,7 @@ static struct clk_branch gcc_crypto_ahb_clk = {
+ 				"pcnoc_bfdcd_clk_src",
+ 			},
+ 			.num_parents = 1,
++			.flags = CLK_SET_RATE_PARENT,
+ 			.ops = &clk_branch2_ops,
+ 		},
+ 	},
+@@ -2381,6 +2382,7 @@ static struct clk_branch gcc_crypto_clk = {
+ 				"crypto_clk_src",
+ 			},
+ 			.num_parents = 1,
++			.flags = CLK_SET_RATE_PARENT,
+ 			.ops = &clk_branch2_ops,
+ 		},
+ 	},
+diff --git a/drivers/clk/qcom/gcc-msm8960.c b/drivers/clk/qcom/gcc-msm8960.c
+index 983dd7dc89a7..0a0c1f533249 100644
+--- a/drivers/clk/qcom/gcc-msm8960.c
++++ b/drivers/clk/qcom/gcc-msm8960.c
+@@ -2753,7 +2753,7 @@ static struct clk_rcg ce3_src = {
+ 	},
+ 	.freq_tbl = clk_tbl_ce3,
+ 	.clkr = {
+-		.enable_reg = 0x2c08,
++		.enable_reg = 0x36c0,
+ 		.enable_mask = BIT(7),
+ 		.hw.init = &(struct clk_init_data){
+ 			.name = "ce3_src",
+@@ -2769,7 +2769,7 @@ static struct clk_branch ce3_core_clk = {
+ 	.halt_reg = 0x2fdc,
+ 	.halt_bit = 5,
+ 	.clkr = {
+-		.enable_reg = 0x36c4,
++		.enable_reg = 0x36cc,
+ 		.enable_mask = BIT(4),
+ 		.hw.init = &(struct clk_init_data){
+ 			.name = "ce3_core_clk",
+diff --git a/drivers/clk/rockchip/clk-rk3188.c b/drivers/clk/rockchip/clk-rk3188.c
+index 7f7444cbf6fc..05263571c223 100644
+--- a/drivers/clk/rockchip/clk-rk3188.c
++++ b/drivers/clk/rockchip/clk-rk3188.c
+@@ -748,6 +748,7 @@ static const char *const rk3188_critical_clocks[] __initconst = {
+ 	"hclk_peri",
+ 	"pclk_cpu",
+ 	"pclk_peri",
++	"hclk_cpubus"
+ };
+ 
+ static void __init rk3188_common_clk_init(struct device_node *np)
+diff --git a/drivers/clk/rockchip/clk-rk3228.c b/drivers/clk/rockchip/clk-rk3228.c
+index 981a50205339..97f49aab8d42 100644
+--- a/drivers/clk/rockchip/clk-rk3228.c
++++ b/drivers/clk/rockchip/clk-rk3228.c
+@@ -605,13 +605,13 @@ static struct rockchip_clk_branch rk3228_clk_branches[] __initdata = {
+ 
+ 	/* PD_MMC */
+ 	MMC(SCLK_SDMMC_DRV,    "sdmmc_drv",    "sclk_sdmmc", RK3228_SDMMC_CON0, 1),
+-	MMC(SCLK_SDMMC_SAMPLE, "sdmmc_sample", "sclk_sdmmc", RK3228_SDMMC_CON1, 1),
++	MMC(SCLK_SDMMC_SAMPLE, "sdmmc_sample", "sclk_sdmmc", RK3228_SDMMC_CON1, 0),
+ 
+ 	MMC(SCLK_SDIO_DRV,     "sdio_drv",     "sclk_sdio",  RK3228_SDIO_CON0,  1),
+-	MMC(SCLK_SDIO_SAMPLE,  "sdio_sample",  "sclk_sdio",  RK3228_SDIO_CON1,  1),
++	MMC(SCLK_SDIO_SAMPLE,  "sdio_sample",  "sclk_sdio",  RK3228_SDIO_CON1,  0),
+ 
+ 	MMC(SCLK_EMMC_DRV,     "emmc_drv",     "sclk_emmc",  RK3228_EMMC_CON0,  1),
+-	MMC(SCLK_EMMC_SAMPLE,  "emmc_sample",  "sclk_emmc",  RK3228_EMMC_CON1,  1),
++	MMC(SCLK_EMMC_SAMPLE,  "emmc_sample",  "sclk_emmc",  RK3228_EMMC_CON1,  0),
+ };
+ 
+ static const char *const rk3228_critical_clocks[] __initconst = {
+diff --git a/drivers/clk/rockchip/clk-rk3368.c b/drivers/clk/rockchip/clk-rk3368.c
+index 21f3ea909fab..57acb625c8ff 100644
+--- a/drivers/clk/rockchip/clk-rk3368.c
++++ b/drivers/clk/rockchip/clk-rk3368.c
+@@ -165,7 +165,7 @@ static const struct rockchip_cpuclk_reg_data rk3368_cpuclkb_data = {
+ 	.core_reg = RK3368_CLKSEL_CON(0),
+ 	.div_core_shift = 0,
+ 	.div_core_mask = 0x1f,
+-	.mux_core_shift = 15,
++	.mux_core_shift = 7,
+ };
+ 
+ static const struct rockchip_cpuclk_reg_data rk3368_cpuclkl_data = {
+@@ -218,29 +218,29 @@ static const struct rockchip_cpuclk_reg_data rk3368_cpuclkl_data = {
+ 	}
+ 
+ static struct rockchip_cpuclk_rate_table rk3368_cpuclkb_rates[] __initdata = {
+-	RK3368_CPUCLKB_RATE(1512000000, 2, 6, 6),
+-	RK3368_CPUCLKB_RATE(1488000000, 2, 5, 5),
+-	RK3368_CPUCLKB_RATE(1416000000, 2, 5, 5),
+-	RK3368_CPUCLKB_RATE(1200000000, 2, 4, 4),
+-	RK3368_CPUCLKB_RATE(1008000000, 2, 4, 4),
+-	RK3368_CPUCLKB_RATE( 816000000, 2, 3, 3),
+-	RK3368_CPUCLKB_RATE( 696000000, 2, 3, 3),
+-	RK3368_CPUCLKB_RATE( 600000000, 2, 2, 2),
+-	RK3368_CPUCLKB_RATE( 408000000, 2, 2, 2),
+-	RK3368_CPUCLKB_RATE( 312000000, 2, 2, 2),
++	RK3368_CPUCLKB_RATE(1512000000, 1, 5, 5),
++	RK3368_CPUCLKB_RATE(1488000000, 1, 4, 4),
++	RK3368_CPUCLKB_RATE(1416000000, 1, 4, 4),
++	RK3368_CPUCLKB_RATE(1200000000, 1, 3, 3),
++	RK3368_CPUCLKB_RATE(1008000000, 1, 3, 3),
++	RK3368_CPUCLKB_RATE( 816000000, 1, 2, 2),
++	RK3368_CPUCLKB_RATE( 696000000, 1, 2, 2),
++	RK3368_CPUCLKB_RATE( 600000000, 1, 1, 1),
++	RK3368_CPUCLKB_RATE( 408000000, 1, 1, 1),
++	RK3368_CPUCLKB_RATE( 312000000, 1, 1, 1),
+ };
+ 
+ static struct rockchip_cpuclk_rate_table rk3368_cpuclkl_rates[] __initdata = {
+-	RK3368_CPUCLKL_RATE(1512000000, 2, 7, 7),
+-	RK3368_CPUCLKL_RATE(1488000000, 2, 6, 6),
+-	RK3368_CPUCLKL_RATE(1416000000, 2, 6, 6),
+-	RK3368_CPUCLKL_RATE(1200000000, 2, 5, 5),
+-	RK3368_CPUCLKL_RATE(1008000000, 2, 5, 5),
+-	RK3368_CPUCLKL_RATE( 816000000, 2, 4, 4),
+-	RK3368_CPUCLKL_RATE( 696000000, 2, 3, 3),
+-	RK3368_CPUCLKL_RATE( 600000000, 2, 3, 3),
+-	RK3368_CPUCLKL_RATE( 408000000, 2, 2, 2),
+-	RK3368_CPUCLKL_RATE( 312000000, 2, 2, 2),
++	RK3368_CPUCLKL_RATE(1512000000, 1, 6, 6),
++	RK3368_CPUCLKL_RATE(1488000000, 1, 5, 5),
++	RK3368_CPUCLKL_RATE(1416000000, 1, 5, 5),
++	RK3368_CPUCLKL_RATE(1200000000, 1, 4, 4),
++	RK3368_CPUCLKL_RATE(1008000000, 1, 4, 4),
++	RK3368_CPUCLKL_RATE( 816000000, 1, 3, 3),
++	RK3368_CPUCLKL_RATE( 696000000, 1, 2, 2),
++	RK3368_CPUCLKL_RATE( 600000000, 1, 2, 2),
++	RK3368_CPUCLKL_RATE( 408000000, 1, 1, 1),
++	RK3368_CPUCLKL_RATE( 312000000, 1, 1, 1),
+ };
+ 
+ static struct rockchip_clk_branch rk3368_clk_branches[] __initdata = {
+@@ -384,10 +384,10 @@ static struct rockchip_clk_branch rk3368_clk_branches[] __initdata = {
+ 	 * Clock-Architecture Diagram 3
+ 	 */
+ 
+-	COMPOSITE(0, "aclk_vepu", mux_pll_src_cpll_gpll_usb_p, 0,
++	COMPOSITE(0, "aclk_vepu", mux_pll_src_cpll_gpll_npll_usb_p, 0,
+ 			RK3368_CLKSEL_CON(15), 6, 2, MFLAGS, 0, 5, DFLAGS,
+ 			RK3368_CLKGATE_CON(4), 6, GFLAGS),
+-	COMPOSITE(0, "aclk_vdpu", mux_pll_src_cpll_gpll_usb_p, 0,
++	COMPOSITE(0, "aclk_vdpu", mux_pll_src_cpll_gpll_npll_usb_p, 0,
+ 			RK3368_CLKSEL_CON(15), 14, 2, MFLAGS, 8, 5, DFLAGS,
+ 			RK3368_CLKGATE_CON(4), 7, GFLAGS),
+ 
+@@ -442,7 +442,7 @@ static struct rockchip_clk_branch rk3368_clk_branches[] __initdata = {
+ 	GATE(SCLK_HDMI_HDCP, "sclk_hdmi_hdcp", "xin24m", 0,
+ 			RK3368_CLKGATE_CON(4), 13, GFLAGS),
+ 	GATE(SCLK_HDMI_CEC, "sclk_hdmi_cec", "xin32k", 0,
+-			RK3368_CLKGATE_CON(5), 12, GFLAGS),
++			RK3368_CLKGATE_CON(4), 12, GFLAGS),
+ 
+ 	COMPOSITE_NODIV(0, "vip_src", mux_pll_src_cpll_gpll_p, 0,
+ 			RK3368_CLKSEL_CON(21), 15, 1, MFLAGS,
+diff --git a/drivers/clk/rockchip/clk.c b/drivers/clk/rockchip/clk.c
+index d9a0b5d4d47f..226af5720c9e 100644
+--- a/drivers/clk/rockchip/clk.c
++++ b/drivers/clk/rockchip/clk.c
+@@ -70,7 +70,7 @@ static struct clk *rockchip_clk_register_branch(const char *name,
+ 	if (gate_offset >= 0) {
+ 		gate = kzalloc(sizeof(*gate), GFP_KERNEL);
+ 		if (!gate)
+-			return ERR_PTR(-ENOMEM);
++			goto err_gate;
+ 
+ 		gate->flags = gate_flags;
+ 		gate->reg = base + gate_offset;
+@@ -82,7 +82,7 @@ static struct clk *rockchip_clk_register_branch(const char *name,
+ 	if (div_width > 0) {
+ 		div = kzalloc(sizeof(*div), GFP_KERNEL);
+ 		if (!div)
+-			return ERR_PTR(-ENOMEM);
++			goto err_div;
+ 
+ 		div->flags = div_flags;
+ 		div->reg = base + muxdiv_offset;
+@@ -90,7 +90,9 @@ static struct clk *rockchip_clk_register_branch(const char *name,
+ 		div->width = div_width;
+ 		div->lock = lock;
+ 		div->table = div_table;
+-		div_ops = &clk_divider_ops;
++		div_ops = (div_flags & CLK_DIVIDER_READ_ONLY)
++						? &clk_divider_ro_ops
++						: &clk_divider_ops;
+ 	}
+ 
+ 	clk = clk_register_composite(NULL, name, parent_names, num_parents,
+@@ -100,6 +102,11 @@ static struct clk *rockchip_clk_register_branch(const char *name,
+ 				     flags);
+ 
+ 	return clk;
++err_div:
++	kfree(gate);
++err_gate:
++	kfree(mux);
++	return ERR_PTR(-ENOMEM);
+ }
+ 
+ struct rockchip_clk_frac {
+diff --git a/drivers/clk/sunxi/clk-sun8i-apb0.c b/drivers/clk/sunxi/clk-sun8i-apb0.c
+index 7ba61103a6f5..2ea61debffc1 100644
+--- a/drivers/clk/sunxi/clk-sun8i-apb0.c
++++ b/drivers/clk/sunxi/clk-sun8i-apb0.c
+@@ -36,7 +36,7 @@ static struct clk *sun8i_a23_apb0_register(struct device_node *node,
+ 
+ 	/* The A23 APB0 clock is a standard 2 bit wide divider clock */
+ 	clk = clk_register_divider(NULL, clk_name, clk_parent, 0, reg,
+-				   0, 2, CLK_DIVIDER_POWER_OF_TWO, NULL);
++				   0, 2, 0, NULL);
+ 	if (IS_ERR(clk))
+ 		return clk;
+ 
+diff --git a/drivers/clk/versatile/clk-sp810.c b/drivers/clk/versatile/clk-sp810.c
+index e78755e0ef78..1fe1e8d970cf 100644
+--- a/drivers/clk/versatile/clk-sp810.c
++++ b/drivers/clk/versatile/clk-sp810.c
+@@ -92,6 +92,7 @@ static void __init clk_sp810_of_setup(struct device_node *node)
+ 	int num = ARRAY_SIZE(parent_names);
+ 	char name[12];
+ 	struct clk_init_data init;
++	static int instance;
+ 	int i;
+ 	bool deprecated;
+ 
+@@ -117,7 +118,7 @@ static void __init clk_sp810_of_setup(struct device_node *node)
+ 	deprecated = !of_find_property(node, "assigned-clock-parents", NULL);
+ 
+ 	for (i = 0; i < ARRAY_SIZE(sp810->timerclken); i++) {
+-		snprintf(name, ARRAY_SIZE(name), "timerclken%d", i);
++		snprintf(name, sizeof(name), "sp810_%d_%d", instance, i);
+ 
+ 		sp810->timerclken[i].sp810 = sp810;
+ 		sp810->timerclken[i].channel = i;
+@@ -138,5 +139,6 @@ static void __init clk_sp810_of_setup(struct device_node *node)
+ 	}
+ 
+ 	of_clk_add_provider(node, clk_sp810_timerclken_of_get, sp810);
++	instance++;
+ }
+ CLK_OF_DECLARE(sp810, "arm,sp810", clk_sp810_of_setup);
+diff --git a/drivers/clocksource/tango_xtal.c b/drivers/clocksource/tango_xtal.c
+index 2bcecafdeaea..c407c47a3232 100644
+--- a/drivers/clocksource/tango_xtal.c
++++ b/drivers/clocksource/tango_xtal.c
+@@ -42,7 +42,7 @@ static void __init tango_clocksource_init(struct device_node *np)
+ 
+ 	ret = clocksource_mmio_init(xtal_in_cnt, "tango-xtal", xtal_freq, 350,
+ 				    32, clocksource_mmio_readl_up);
+-	if (!ret) {
++	if (ret) {
+ 		pr_err("%s: registration failed\n", np->full_name);
+ 		return;
+ 	}
+diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
+index cd83d477e32d..e89512383c3c 100644
+--- a/drivers/cpufreq/intel_pstate.c
++++ b/drivers/cpufreq/intel_pstate.c
+@@ -673,6 +673,11 @@ static int core_get_max_pstate(void)
+ 			if (err)
+ 				goto skip_tar;
+ 
++			/* For level 1 and 2, bits[23:16] contain the ratio */
++			if (tdp_ctrl)
++				tdp_ratio >>= 16;
++
++			tdp_ratio &= 0xff; /* ratios are only 8 bits long */
+ 			if (tdp_ratio - 1 == tar) {
+ 				max_pstate = tar;
+ 				pr_debug("max_pstate=TAC %x\n", max_pstate);
+diff --git a/drivers/cpufreq/sti-cpufreq.c b/drivers/cpufreq/sti-cpufreq.c
+index a9c659f58974..04042038ec4b 100644
+--- a/drivers/cpufreq/sti-cpufreq.c
++++ b/drivers/cpufreq/sti-cpufreq.c
+@@ -259,6 +259,10 @@ static int sti_cpufreq_init(void)
+ {
+ 	int ret;
+ 
++	if ((!of_machine_is_compatible("st,stih407")) &&
++		(!of_machine_is_compatible("st,stih410")))
++		return -ENODEV;
++
+ 	ddata.cpu = get_cpu_device(0);
+ 	if (!ddata.cpu) {
+ 		dev_err(ddata.cpu, "Failed to get device for CPU0\n");
+diff --git a/drivers/cpuidle/cpuidle-arm.c b/drivers/cpuidle/cpuidle-arm.c
+index 545069d5fdfb..e342565e8715 100644
+--- a/drivers/cpuidle/cpuidle-arm.c
++++ b/drivers/cpuidle/cpuidle-arm.c
+@@ -50,7 +50,7 @@ static int arm_enter_idle_state(struct cpuidle_device *dev,
+ 		 * call the CPU ops suspend protocol with idle index as a
+ 		 * parameter.
+ 		 */
+-		arm_cpuidle_suspend(idx);
++		ret = arm_cpuidle_suspend(idx);
+ 
+ 		cpu_pm_exit();
+ 	}
+diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c
+index 3eb3f1279fb7..7de007abe46e 100644
+--- a/drivers/crypto/atmel-aes.c
++++ b/drivers/crypto/atmel-aes.c
+@@ -2085,9 +2085,9 @@ static int atmel_aes_probe(struct platform_device *pdev)
+ 	}
+ 
+ 	aes_dd->io_base = devm_ioremap_resource(&pdev->dev, aes_res);
+-	if (!aes_dd->io_base) {
++	if (IS_ERR(aes_dd->io_base)) {
+ 		dev_err(dev, "can't ioremap\n");
+-		err = -ENOMEM;
++		err = PTR_ERR(aes_dd->io_base);
+ 		goto res_err;
+ 	}
+ 
+diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c
+index 8bf9914d4d15..68d47a2da4a1 100644
+--- a/drivers/crypto/atmel-sha.c
++++ b/drivers/crypto/atmel-sha.c
+@@ -1404,9 +1404,9 @@ static int atmel_sha_probe(struct platform_device *pdev)
+ 	}
+ 
+ 	sha_dd->io_base = devm_ioremap_resource(&pdev->dev, sha_res);
+-	if (!sha_dd->io_base) {
++	if (IS_ERR(sha_dd->io_base)) {
+ 		dev_err(dev, "can't ioremap\n");
+-		err = -ENOMEM;
++		err = PTR_ERR(sha_dd->io_base);
+ 		goto res_err;
+ 	}
+ 
+diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c
+index 2c7a628d0375..bf467d7be35c 100644
+--- a/drivers/crypto/atmel-tdes.c
++++ b/drivers/crypto/atmel-tdes.c
+@@ -1417,9 +1417,9 @@ static int atmel_tdes_probe(struct platform_device *pdev)
+ 	}
+ 
+ 	tdes_dd->io_base = devm_ioremap_resource(&pdev->dev, tdes_res);
+-	if (!tdes_dd->io_base) {
++	if (IS_ERR(tdes_dd->io_base)) {
+ 		dev_err(dev, "can't ioremap\n");
+-		err = -ENOMEM;
++		err = PTR_ERR(tdes_dd->io_base);
+ 		goto res_err;
+ 	}
+ 
+diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
+index f7e0d8d4c3da..8f50a02ff68d 100644
+--- a/drivers/crypto/caam/jr.c
++++ b/drivers/crypto/caam/jr.c
+@@ -248,7 +248,7 @@ static void caam_jr_dequeue(unsigned long devarg)
+ struct device *caam_jr_alloc(void)
+ {
+ 	struct caam_drv_private_jr *jrpriv, *min_jrpriv = NULL;
+-	struct device *dev = NULL;
++	struct device *dev = ERR_PTR(-ENODEV);
+ 	int min_tfm_cnt	= INT_MAX;
+ 	int tfm_cnt;
+ 
+diff --git a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
+index d89f20c04266..60fc0fa26fd3 100644
+--- a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
++++ b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
+@@ -220,6 +220,42 @@ static int ccp_aes_cmac_digest(struct ahash_request *req)
+ 	return ccp_aes_cmac_finup(req);
+ }
+ 
++static int ccp_aes_cmac_export(struct ahash_request *req, void *out)
++{
++	struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req);
++	struct ccp_aes_cmac_exp_ctx state;
++
++	/* Don't let anything leak to 'out' */
++	memset(&state, 0, sizeof(state));
++
++	state.null_msg = rctx->null_msg;
++	memcpy(state.iv, rctx->iv, sizeof(state.iv));
++	state.buf_count = rctx->buf_count;
++	memcpy(state.buf, rctx->buf, sizeof(state.buf));
++
++	/* 'out' may not be aligned so memcpy from local variable */
++	memcpy(out, &state, sizeof(state));
++
++	return 0;
++}
++
++static int ccp_aes_cmac_import(struct ahash_request *req, const void *in)
++{
++	struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req);
++	struct ccp_aes_cmac_exp_ctx state;
++
++	/* 'in' may not be aligned so memcpy to local variable */
++	memcpy(&state, in, sizeof(state));
++
++	memset(rctx, 0, sizeof(*rctx));
++	rctx->null_msg = state.null_msg;
++	memcpy(rctx->iv, state.iv, sizeof(rctx->iv));
++	rctx->buf_count = state.buf_count;
++	memcpy(rctx->buf, state.buf, sizeof(rctx->buf));
++
++	return 0;
++}
++
+ static int ccp_aes_cmac_setkey(struct crypto_ahash *tfm, const u8 *key,
+ 			       unsigned int key_len)
+ {
+@@ -352,10 +388,13 @@ int ccp_register_aes_cmac_algs(struct list_head *head)
+ 	alg->final = ccp_aes_cmac_final;
+ 	alg->finup = ccp_aes_cmac_finup;
+ 	alg->digest = ccp_aes_cmac_digest;
++	alg->export = ccp_aes_cmac_export;
++	alg->import = ccp_aes_cmac_import;
+ 	alg->setkey = ccp_aes_cmac_setkey;
+ 
+ 	halg = &alg->halg;
+ 	halg->digestsize = AES_BLOCK_SIZE;
++	halg->statesize = sizeof(struct ccp_aes_cmac_exp_ctx);
+ 
+ 	base = &halg->base;
+ 	snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "cmac(aes)");
+diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c b/drivers/crypto/ccp/ccp-crypto-sha.c
+index d14b3f28e010..ab9945f2cb7a 100644
+--- a/drivers/crypto/ccp/ccp-crypto-sha.c
++++ b/drivers/crypto/ccp/ccp-crypto-sha.c
+@@ -207,6 +207,46 @@ static int ccp_sha_digest(struct ahash_request *req)
+ 	return ccp_sha_finup(req);
+ }
+ 
++static int ccp_sha_export(struct ahash_request *req, void *out)
++{
++	struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req);
++	struct ccp_sha_exp_ctx state;
++
++	/* Don't let anything leak to 'out' */
++	memset(&state, 0, sizeof(state));
++
++	state.type = rctx->type;
++	state.msg_bits = rctx->msg_bits;
++	state.first = rctx->first;
++	memcpy(state.ctx, rctx->ctx, sizeof(state.ctx));
++	state.buf_count = rctx->buf_count;
++	memcpy(state.buf, rctx->buf, sizeof(state.buf));
++
++	/* 'out' may not be aligned so memcpy from local variable */
++	memcpy(out, &state, sizeof(state));
++
++	return 0;
++}
++
++static int ccp_sha_import(struct ahash_request *req, const void *in)
++{
++	struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req);
++	struct ccp_sha_exp_ctx state;
++
++	/* 'in' may not be aligned so memcpy to local variable */
++	memcpy(&state, in, sizeof(state));
++
++	memset(rctx, 0, sizeof(*rctx));
++	rctx->type = state.type;
++	rctx->msg_bits = state.msg_bits;
++	rctx->first = state.first;
++	memcpy(rctx->ctx, state.ctx, sizeof(rctx->ctx));
++	rctx->buf_count = state.buf_count;
++	memcpy(rctx->buf, state.buf, sizeof(rctx->buf));
++
++	return 0;
++}
++
+ static int ccp_sha_setkey(struct crypto_ahash *tfm, const u8 *key,
+ 			  unsigned int key_len)
+ {
+@@ -403,9 +443,12 @@ static int ccp_register_sha_alg(struct list_head *head,
+ 	alg->final = ccp_sha_final;
+ 	alg->finup = ccp_sha_finup;
+ 	alg->digest = ccp_sha_digest;
++	alg->export = ccp_sha_export;
++	alg->import = ccp_sha_import;
+ 
+ 	halg = &alg->halg;
+ 	halg->digestsize = def->digest_size;
++	halg->statesize = sizeof(struct ccp_sha_exp_ctx);
+ 
+ 	base = &halg->base;
+ 	snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
+diff --git a/drivers/crypto/ccp/ccp-crypto.h b/drivers/crypto/ccp/ccp-crypto.h
+index 76a96f0f44c6..a326ec20bfa8 100644
+--- a/drivers/crypto/ccp/ccp-crypto.h
++++ b/drivers/crypto/ccp/ccp-crypto.h
+@@ -129,6 +129,15 @@ struct ccp_aes_cmac_req_ctx {
+ 	struct ccp_cmd cmd;
+ };
+ 
++struct ccp_aes_cmac_exp_ctx {
++	unsigned int null_msg;
++
++	u8 iv[AES_BLOCK_SIZE];
++
++	unsigned int buf_count;
++	u8 buf[AES_BLOCK_SIZE];
++};
++
+ /***** SHA related defines *****/
+ #define MAX_SHA_CONTEXT_SIZE	SHA256_DIGEST_SIZE
+ #define MAX_SHA_BLOCK_SIZE	SHA256_BLOCK_SIZE
+@@ -171,6 +180,19 @@ struct ccp_sha_req_ctx {
+ 	struct ccp_cmd cmd;
+ };
+ 
++struct ccp_sha_exp_ctx {
++	enum ccp_sha_type type;
++
++	u64 msg_bits;
++
++	unsigned int first;
++
++	u8 ctx[MAX_SHA_CONTEXT_SIZE];
++
++	unsigned int buf_count;
++	u8 buf[MAX_SHA_BLOCK_SIZE];
++};
++
+ /***** Common Context Structure *****/
+ struct ccp_ctx {
+ 	int (*complete)(struct crypto_async_request *req, int ret);
+diff --git a/drivers/crypto/marvell/cesa.c b/drivers/crypto/marvell/cesa.c
+index c0656e7f37b5..80239ae69527 100644
+--- a/drivers/crypto/marvell/cesa.c
++++ b/drivers/crypto/marvell/cesa.c
+@@ -420,7 +420,7 @@ static int mv_cesa_probe(struct platform_device *pdev)
+ 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
+ 	cesa->regs = devm_ioremap_resource(dev, res);
+ 	if (IS_ERR(cesa->regs))
+-		return -ENOMEM;
++		return PTR_ERR(cesa->regs);
+ 
+ 	ret = mv_cesa_dev_dma_init(cesa);
+ 	if (ret)
+diff --git a/drivers/crypto/qat/qat_common/adf_common_drv.h b/drivers/crypto/qat/qat_common/adf_common_drv.h
+index 0e82ce3c383e..976b01e58afb 100644
+--- a/drivers/crypto/qat/qat_common/adf_common_drv.h
++++ b/drivers/crypto/qat/qat_common/adf_common_drv.h
+@@ -236,6 +236,8 @@ void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
+ 				 uint32_t vf_mask);
+ void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev);
+ void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev);
++int adf_init_pf_wq(void);
++void adf_exit_pf_wq(void);
+ #else
+ static inline int adf_sriov_configure(struct pci_dev *pdev, int numvfs)
+ {
+@@ -253,5 +255,14 @@ static inline void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
+ static inline void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
+ {
+ }
++
++static inline int adf_init_pf_wq(void)
++{
++	return 0;
++}
++
++static inline void adf_exit_pf_wq(void)
++{
++}
+ #endif
+ #endif
+diff --git a/drivers/crypto/qat/qat_common/adf_ctl_drv.c b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
+index 5c897e6e7994..3c3f948290ca 100644
+--- a/drivers/crypto/qat/qat_common/adf_ctl_drv.c
++++ b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
+@@ -462,12 +462,17 @@ static int __init adf_register_ctl_device_driver(void)
+ 	if (adf_init_aer())
+ 		goto err_aer;
+ 
++	if (adf_init_pf_wq())
++		goto err_pf_wq;
++
+ 	if (qat_crypto_register())
+ 		goto err_crypto_register;
+ 
+ 	return 0;
+ 
+ err_crypto_register:
++	adf_exit_pf_wq();
++err_pf_wq:
+ 	adf_exit_aer();
+ err_aer:
+ 	adf_chr_drv_destroy();
+@@ -480,6 +485,7 @@ static void __exit adf_unregister_ctl_device_driver(void)
+ {
+ 	adf_chr_drv_destroy();
+ 	adf_exit_aer();
++	adf_exit_pf_wq();
+ 	qat_crypto_unregister();
+ 	adf_clean_vf_map(false);
+ 	mutex_destroy(&adf_ctl_lock);
+diff --git a/drivers/crypto/qat/qat_common/adf_sriov.c b/drivers/crypto/qat/qat_common/adf_sriov.c
+index 1117a8b58280..38a0415e767d 100644
+--- a/drivers/crypto/qat/qat_common/adf_sriov.c
++++ b/drivers/crypto/qat/qat_common/adf_sriov.c
+@@ -119,11 +119,6 @@ static int adf_enable_sriov(struct adf_accel_dev *accel_dev)
+ 	int i;
+ 	u32 reg;
+ 
+-	/* Workqueue for PF2VF responses */
+-	pf2vf_resp_wq = create_workqueue("qat_pf2vf_resp_wq");
+-	if (!pf2vf_resp_wq)
+-		return -ENOMEM;
+-
+ 	for (i = 0, vf_info = accel_dev->pf.vf_info; i < totalvfs;
+ 	     i++, vf_info++) {
+ 		/* This ptr will be populated when VFs will be created */
+@@ -216,11 +211,6 @@ void adf_disable_sriov(struct adf_accel_dev *accel_dev)
+ 
+ 	kfree(accel_dev->pf.vf_info);
+ 	accel_dev->pf.vf_info = NULL;
+-
+-	if (pf2vf_resp_wq) {
+-		destroy_workqueue(pf2vf_resp_wq);
+-		pf2vf_resp_wq = NULL;
+-	}
+ }
+ EXPORT_SYMBOL_GPL(adf_disable_sriov);
+ 
+@@ -304,3 +294,19 @@ int adf_sriov_configure(struct pci_dev *pdev, int numvfs)
+ 	return numvfs;
+ }
+ EXPORT_SYMBOL_GPL(adf_sriov_configure);
++
++int __init adf_init_pf_wq(void)
++{
++	/* Workqueue for PF2VF responses */
++	pf2vf_resp_wq = create_workqueue("qat_pf2vf_resp_wq");
++
++	return !pf2vf_resp_wq ? -ENOMEM : 0;
++}
++
++void adf_exit_pf_wq(void)
++{
++	if (pf2vf_resp_wq) {
++		destroy_workqueue(pf2vf_resp_wq);
++		pf2vf_resp_wq = NULL;
++	}
++}
+diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c b/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
+index a19ee127edca..e72fea737a0d 100644
+--- a/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
++++ b/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
+@@ -35,6 +35,7 @@ static int sun4i_ss_opti_poll(struct ablkcipher_request *areq)
+ 	unsigned int todo;
+ 	struct sg_mapping_iter mi, mo;
+ 	unsigned int oi, oo; /* offset for in and out */
++	unsigned long flags;
+ 
+ 	if (areq->nbytes == 0)
+ 		return 0;
+@@ -49,7 +50,7 @@ static int sun4i_ss_opti_poll(struct ablkcipher_request *areq)
+ 		return -EINVAL;
+ 	}
+ 
+-	spin_lock_bh(&ss->slock);
++	spin_lock_irqsave(&ss->slock, flags);
+ 
+ 	for (i = 0; i < op->keylen; i += 4)
+ 		writel(*(op->key + i / 4), ss->base + SS_KEY0 + i);
+@@ -117,7 +118,7 @@ release_ss:
+ 	sg_miter_stop(&mi);
+ 	sg_miter_stop(&mo);
+ 	writel(0, ss->base + SS_CTL);
+-	spin_unlock_bh(&ss->slock);
++	spin_unlock_irqrestore(&ss->slock, flags);
+ 	return err;
+ }
+ 
+@@ -149,6 +150,7 @@ static int sun4i_ss_cipher_poll(struct ablkcipher_request *areq)
+ 	unsigned int ob = 0;	/* offset in buf */
+ 	unsigned int obo = 0;	/* offset in bufo*/
+ 	unsigned int obl = 0;	/* length of data in bufo */
++	unsigned long flags;
+ 
+ 	if (areq->nbytes == 0)
+ 		return 0;
+@@ -181,7 +183,7 @@ static int sun4i_ss_cipher_poll(struct ablkcipher_request *areq)
+ 	if (no_chunk == 1)
+ 		return sun4i_ss_opti_poll(areq);
+ 
+-	spin_lock_bh(&ss->slock);
++	spin_lock_irqsave(&ss->slock, flags);
+ 
+ 	for (i = 0; i < op->keylen; i += 4)
+ 		writel(*(op->key + i / 4), ss->base + SS_KEY0 + i);
+@@ -308,7 +310,7 @@ release_ss:
+ 	sg_miter_stop(&mi);
+ 	sg_miter_stop(&mo);
+ 	writel(0, ss->base + SS_CTL);
+-	spin_unlock_bh(&ss->slock);
++	spin_unlock_irqrestore(&ss->slock, flags);
+ 
+ 	return err;
+ }
+diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
+index a0d4a08313ae..b7ee8d30147d 100644
+--- a/drivers/crypto/talitos.c
++++ b/drivers/crypto/talitos.c
+@@ -63,6 +63,14 @@ static void to_talitos_ptr(struct talitos_ptr *ptr, dma_addr_t dma_addr,
+ 		ptr->eptr = upper_32_bits(dma_addr);
+ }
+ 
++static void copy_talitos_ptr(struct talitos_ptr *dst_ptr,
++			     struct talitos_ptr *src_ptr, bool is_sec1)
++{
++	dst_ptr->ptr = src_ptr->ptr;
++	if (!is_sec1)
++		dst_ptr->eptr = src_ptr->eptr;
++}
++
+ static void to_talitos_ptr_len(struct talitos_ptr *ptr, unsigned int len,
+ 			       bool is_sec1)
+ {
+@@ -827,6 +835,16 @@ struct talitos_ahash_req_ctx {
+ 	struct scatterlist *psrc;
+ };
+ 
++struct talitos_export_state {
++	u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
++	u8 buf[HASH_MAX_BLOCK_SIZE];
++	unsigned int swinit;
++	unsigned int first;
++	unsigned int last;
++	unsigned int to_hash_later;
++	unsigned int nbuf;
++};
++
+ static int aead_setkey(struct crypto_aead *authenc,
+ 		       const u8 *key, unsigned int keylen)
+ {
+@@ -1083,21 +1101,20 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
+ 	sg_count = dma_map_sg(dev, areq->src, edesc->src_nents ?: 1,
+ 			      (areq->src == areq->dst) ? DMA_BIDIRECTIONAL
+ 							   : DMA_TO_DEVICE);
+-
+ 	/* hmac data */
+ 	desc->ptr[1].len = cpu_to_be16(areq->assoclen);
+ 	if (sg_count > 1 &&
+ 	    (ret = sg_to_link_tbl_offset(areq->src, sg_count, 0,
+ 					 areq->assoclen,
+ 					 &edesc->link_tbl[tbl_off])) > 1) {
+-		tbl_off += ret;
+-
+ 		to_talitos_ptr(&desc->ptr[1], edesc->dma_link_tbl + tbl_off *
+ 			       sizeof(struct talitos_ptr), 0);
+ 		desc->ptr[1].j_extent = DESC_PTR_LNKTBL_JUMP;
+ 
+ 		dma_sync_single_for_device(dev, edesc->dma_link_tbl,
+ 					   edesc->dma_len, DMA_BIDIRECTIONAL);
++
++		tbl_off += ret;
+ 	} else {
+ 		to_talitos_ptr(&desc->ptr[1], sg_dma_address(areq->src), 0);
+ 		desc->ptr[1].j_extent = 0;
+@@ -1126,11 +1143,13 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
+ 	if (edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV)
+ 		sg_link_tbl_len += authsize;
+ 
+-	if (sg_count > 1 &&
+-	    (ret = sg_to_link_tbl_offset(areq->src, sg_count, areq->assoclen,
+-					 sg_link_tbl_len,
+-					 &edesc->link_tbl[tbl_off])) > 1) {
+-		tbl_off += ret;
++	if (sg_count == 1) {
++		to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src) +
++			       areq->assoclen, 0);
++	} else if ((ret = sg_to_link_tbl_offset(areq->src, sg_count,
++						areq->assoclen, sg_link_tbl_len,
++						&edesc->link_tbl[tbl_off])) >
++		   1) {
+ 		desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
+ 		to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl +
+ 					      tbl_off *
+@@ -1138,8 +1157,10 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
+ 		dma_sync_single_for_device(dev, edesc->dma_link_tbl,
+ 					   edesc->dma_len,
+ 					   DMA_BIDIRECTIONAL);
+-	} else
+-		to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src), 0);
++		tbl_off += ret;
++	} else {
++		copy_talitos_ptr(&desc->ptr[4], &edesc->link_tbl[tbl_off], 0);
++	}
+ 
+ 	/* cipher out */
+ 	desc->ptr[5].len = cpu_to_be16(cryptlen);
+@@ -1151,11 +1172,13 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
+ 
+ 	edesc->icv_ool = false;
+ 
+-	if (sg_count > 1 &&
+-	    (sg_count = sg_to_link_tbl_offset(areq->dst, sg_count,
++	if (sg_count == 1) {
++		to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst) +
++			       areq->assoclen, 0);
++	} else if ((sg_count =
++			sg_to_link_tbl_offset(areq->dst, sg_count,
+ 					      areq->assoclen, cryptlen,
+-					      &edesc->link_tbl[tbl_off])) >
+-	    1) {
++					      &edesc->link_tbl[tbl_off])) > 1) {
+ 		struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
+ 
+ 		to_talitos_ptr(&desc->ptr[5], edesc->dma_link_tbl +
+@@ -1178,8 +1201,9 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
+ 					   edesc->dma_len, DMA_BIDIRECTIONAL);
+ 
+ 		edesc->icv_ool = true;
+-	} else
+-		to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst), 0);
++	} else {
++		copy_talitos_ptr(&desc->ptr[5], &edesc->link_tbl[tbl_off], 0);
++	}
+ 
+ 	/* iv out */
+ 	map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv,
+@@ -1967,6 +1991,46 @@ static int ahash_digest(struct ahash_request *areq)
+ 	return ahash_process_req(areq, areq->nbytes);
+ }
+ 
++static int ahash_export(struct ahash_request *areq, void *out)
++{
++	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
++	struct talitos_export_state *export = out;
++
++	memcpy(export->hw_context, req_ctx->hw_context,
++	       req_ctx->hw_context_size);
++	memcpy(export->buf, req_ctx->buf, req_ctx->nbuf);
++	export->swinit = req_ctx->swinit;
++	export->first = req_ctx->first;
++	export->last = req_ctx->last;
++	export->to_hash_later = req_ctx->to_hash_later;
++	export->nbuf = req_ctx->nbuf;
++
++	return 0;
++}
++
++static int ahash_import(struct ahash_request *areq, const void *in)
++{
++	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
++	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
++	const struct talitos_export_state *export = in;
++
++	memset(req_ctx, 0, sizeof(*req_ctx));
++	req_ctx->hw_context_size =
++		(crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
++			? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
++			: TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
++	memcpy(req_ctx->hw_context, export->hw_context,
++	       req_ctx->hw_context_size);
++	memcpy(req_ctx->buf, export->buf, export->nbuf);
++	req_ctx->swinit = export->swinit;
++	req_ctx->first = export->first;
++	req_ctx->last = export->last;
++	req_ctx->to_hash_later = export->to_hash_later;
++	req_ctx->nbuf = export->nbuf;
++
++	return 0;
++}
++
+ struct keyhash_result {
+ 	struct completion completion;
+ 	int err;
+@@ -2444,6 +2508,7 @@ static struct talitos_alg_template driver_algs[] = {
+ 	{	.type = CRYPTO_ALG_TYPE_AHASH,
+ 		.alg.hash = {
+ 			.halg.digestsize = MD5_DIGEST_SIZE,
++			.halg.statesize = sizeof(struct talitos_export_state),
+ 			.halg.base = {
+ 				.cra_name = "md5",
+ 				.cra_driver_name = "md5-talitos",
+@@ -2459,6 +2524,7 @@ static struct talitos_alg_template driver_algs[] = {
+ 	{	.type = CRYPTO_ALG_TYPE_AHASH,
+ 		.alg.hash = {
+ 			.halg.digestsize = SHA1_DIGEST_SIZE,
++			.halg.statesize = sizeof(struct talitos_export_state),
+ 			.halg.base = {
+ 				.cra_name = "sha1",
+ 				.cra_driver_name = "sha1-talitos",
+@@ -2474,6 +2540,7 @@ static struct talitos_alg_template driver_algs[] = {
+ 	{	.type = CRYPTO_ALG_TYPE_AHASH,
+ 		.alg.hash = {
+ 			.halg.digestsize = SHA224_DIGEST_SIZE,
++			.halg.statesize = sizeof(struct talitos_export_state),
+ 			.halg.base = {
+ 				.cra_name = "sha224",
+ 				.cra_driver_name = "sha224-talitos",
+@@ -2489,6 +2556,7 @@ static struct talitos_alg_template driver_algs[] = {
+ 	{	.type = CRYPTO_ALG_TYPE_AHASH,
+ 		.alg.hash = {
+ 			.halg.digestsize = SHA256_DIGEST_SIZE,
++			.halg.statesize = sizeof(struct talitos_export_state),
+ 			.halg.base = {
+ 				.cra_name = "sha256",
+ 				.cra_driver_name = "sha256-talitos",
+@@ -2504,6 +2572,7 @@ static struct talitos_alg_template driver_algs[] = {
+ 	{	.type = CRYPTO_ALG_TYPE_AHASH,
+ 		.alg.hash = {
+ 			.halg.digestsize = SHA384_DIGEST_SIZE,
++			.halg.statesize = sizeof(struct talitos_export_state),
+ 			.halg.base = {
+ 				.cra_name = "sha384",
+ 				.cra_driver_name = "sha384-talitos",
+@@ -2519,6 +2588,7 @@ static struct talitos_alg_template driver_algs[] = {
+ 	{	.type = CRYPTO_ALG_TYPE_AHASH,
+ 		.alg.hash = {
+ 			.halg.digestsize = SHA512_DIGEST_SIZE,
++			.halg.statesize = sizeof(struct talitos_export_state),
+ 			.halg.base = {
+ 				.cra_name = "sha512",
+ 				.cra_driver_name = "sha512-talitos",
+@@ -2534,6 +2604,7 @@ static struct talitos_alg_template driver_algs[] = {
+ 	{	.type = CRYPTO_ALG_TYPE_AHASH,
+ 		.alg.hash = {
+ 			.halg.digestsize = MD5_DIGEST_SIZE,
++			.halg.statesize = sizeof(struct talitos_export_state),
+ 			.halg.base = {
+ 				.cra_name = "hmac(md5)",
+ 				.cra_driver_name = "hmac-md5-talitos",
+@@ -2549,6 +2620,7 @@ static struct talitos_alg_template driver_algs[] = {
+ 	{	.type = CRYPTO_ALG_TYPE_AHASH,
+ 		.alg.hash = {
+ 			.halg.digestsize = SHA1_DIGEST_SIZE,
++			.halg.statesize = sizeof(struct talitos_export_state),
+ 			.halg.base = {
+ 				.cra_name = "hmac(sha1)",
+ 				.cra_driver_name = "hmac-sha1-talitos",
+@@ -2564,6 +2636,7 @@ static struct talitos_alg_template driver_algs[] = {
+ 	{	.type = CRYPTO_ALG_TYPE_AHASH,
+ 		.alg.hash = {
+ 			.halg.digestsize = SHA224_DIGEST_SIZE,
++			.halg.statesize = sizeof(struct talitos_export_state),
+ 			.halg.base = {
+ 				.cra_name = "hmac(sha224)",
+ 				.cra_driver_name = "hmac-sha224-talitos",
+@@ -2579,6 +2652,7 @@ static struct talitos_alg_template driver_algs[] = {
+ 	{	.type = CRYPTO_ALG_TYPE_AHASH,
+ 		.alg.hash = {
+ 			.halg.digestsize = SHA256_DIGEST_SIZE,
++			.halg.statesize = sizeof(struct talitos_export_state),
+ 			.halg.base = {
+ 				.cra_name = "hmac(sha256)",
+ 				.cra_driver_name = "hmac-sha256-talitos",
+@@ -2594,6 +2668,7 @@ static struct talitos_alg_template driver_algs[] = {
+ 	{	.type = CRYPTO_ALG_TYPE_AHASH,
+ 		.alg.hash = {
+ 			.halg.digestsize = SHA384_DIGEST_SIZE,
++			.halg.statesize = sizeof(struct talitos_export_state),
+ 			.halg.base = {
+ 				.cra_name = "hmac(sha384)",
+ 				.cra_driver_name = "hmac-sha384-talitos",
+@@ -2609,6 +2684,7 @@ static struct talitos_alg_template driver_algs[] = {
+ 	{	.type = CRYPTO_ALG_TYPE_AHASH,
+ 		.alg.hash = {
+ 			.halg.digestsize = SHA512_DIGEST_SIZE,
++			.halg.statesize = sizeof(struct talitos_export_state),
+ 			.halg.base = {
+ 				.cra_name = "hmac(sha512)",
+ 				.cra_driver_name = "hmac-sha512-talitos",
+@@ -2629,21 +2705,11 @@ struct talitos_crypto_alg {
+ 	struct talitos_alg_template algt;
+ };
+ 
+-static int talitos_cra_init(struct crypto_tfm *tfm)
++static int talitos_init_common(struct talitos_ctx *ctx,
++			       struct talitos_crypto_alg *talitos_alg)
+ {
+-	struct crypto_alg *alg = tfm->__crt_alg;
+-	struct talitos_crypto_alg *talitos_alg;
+-	struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
+ 	struct talitos_private *priv;
+ 
+-	if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
+-		talitos_alg = container_of(__crypto_ahash_alg(alg),
+-					   struct talitos_crypto_alg,
+-					   algt.alg.hash);
+-	else
+-		talitos_alg = container_of(alg, struct talitos_crypto_alg,
+-					   algt.alg.crypto);
+-
+ 	/* update context with ptr to dev */
+ 	ctx->dev = talitos_alg->dev;
+ 
+@@ -2661,10 +2727,33 @@ static int talitos_cra_init(struct crypto_tfm *tfm)
+ 	return 0;
+ }
+ 
++static int talitos_cra_init(struct crypto_tfm *tfm)
++{
++	struct crypto_alg *alg = tfm->__crt_alg;
++	struct talitos_crypto_alg *talitos_alg;
++	struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
++
++	if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
++		talitos_alg = container_of(__crypto_ahash_alg(alg),
++					   struct talitos_crypto_alg,
++					   algt.alg.hash);
++	else
++		talitos_alg = container_of(alg, struct talitos_crypto_alg,
++					   algt.alg.crypto);
++
++	return talitos_init_common(ctx, talitos_alg);
++}
++
+ static int talitos_cra_init_aead(struct crypto_aead *tfm)
+ {
+-	talitos_cra_init(crypto_aead_tfm(tfm));
+-	return 0;
++	struct aead_alg *alg = crypto_aead_alg(tfm);
++	struct talitos_crypto_alg *talitos_alg;
++	struct talitos_ctx *ctx = crypto_aead_ctx(tfm);
++
++	talitos_alg = container_of(alg, struct talitos_crypto_alg,
++				   algt.alg.aead);
++
++	return talitos_init_common(ctx, talitos_alg);
+ }
+ 
+ static int talitos_cra_init_ahash(struct crypto_tfm *tfm)
+@@ -2787,6 +2876,8 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
+ 		t_alg->algt.alg.hash.finup = ahash_finup;
+ 		t_alg->algt.alg.hash.digest = ahash_digest;
+ 		t_alg->algt.alg.hash.setkey = ahash_setkey;
++		t_alg->algt.alg.hash.import = ahash_import;
++		t_alg->algt.alg.hash.export = ahash_export;
+ 
+ 		if (!(priv->features & TALITOS_FTR_HMAC_OK) &&
+ 		    !strncmp(alg->cra_name, "hmac", 4)) {
+diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c
+index 4c243c1ffc7f..790f7cadc1ed 100644
+--- a/drivers/crypto/ux500/cryp/cryp_core.c
++++ b/drivers/crypto/ux500/cryp/cryp_core.c
+@@ -1440,9 +1440,9 @@ static int ux500_cryp_probe(struct platform_device *pdev)
+ 
+ 	device_data->phybase = res->start;
+ 	device_data->base = devm_ioremap_resource(dev, res);
+-	if (!device_data->base) {
++	if (IS_ERR(device_data->base)) {
+ 		dev_err(dev, "[%s]: ioremap failed!", __func__);
+-		ret = -ENOMEM;
++		ret = PTR_ERR(device_data->base);
+ 		goto out;
+ 	}
+ 
+diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c
+index d6fdc583ce5d..574e87c7f2b8 100644
+--- a/drivers/crypto/ux500/hash/hash_core.c
++++ b/drivers/crypto/ux500/hash/hash_core.c
+@@ -1659,9 +1659,9 @@ static int ux500_hash_probe(struct platform_device *pdev)
+ 
+ 	device_data->phybase = res->start;
+ 	device_data->base = devm_ioremap_resource(dev, res);
+-	if (!device_data->base) {
++	if (IS_ERR(device_data->base)) {
+ 		dev_err(dev, "%s: ioremap() failed!\n", __func__);
+-		ret = -ENOMEM;
++		ret = PTR_ERR(device_data->base);
+ 		goto out;
+ 	}
+ 	spin_lock_init(&device_data->ctx_lock);
+diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
+index 5ad0ec1f0e29..97199b3c25a2 100644
+--- a/drivers/dma/dw/core.c
++++ b/drivers/dma/dw/core.c
+@@ -130,26 +130,14 @@ static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
+ static void dwc_initialize(struct dw_dma_chan *dwc)
+ {
+ 	struct dw_dma *dw = to_dw_dma(dwc->chan.device);
+-	struct dw_dma_slave *dws = dwc->chan.private;
+ 	u32 cfghi = DWC_CFGH_FIFO_MODE;
+ 	u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority);
+ 
+ 	if (dwc->initialized == true)
+ 		return;
+ 
+-	if (dws) {
+-		/*
+-		 * We need controller-specific data to set up slave
+-		 * transfers.
+-		 */
+-		BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev);
+-
+-		cfghi |= DWC_CFGH_DST_PER(dws->dst_id);
+-		cfghi |= DWC_CFGH_SRC_PER(dws->src_id);
+-	} else {
+-		cfghi |= DWC_CFGH_DST_PER(dwc->dst_id);
+-		cfghi |= DWC_CFGH_SRC_PER(dwc->src_id);
+-	}
++	cfghi |= DWC_CFGH_DST_PER(dwc->dst_id);
++	cfghi |= DWC_CFGH_SRC_PER(dwc->src_id);
+ 
+ 	channel_writel(dwc, CFG_LO, cfglo);
+ 	channel_writel(dwc, CFG_HI, cfghi);
+@@ -941,7 +929,7 @@ bool dw_dma_filter(struct dma_chan *chan, void *param)
+ 	struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
+ 	struct dw_dma_slave *dws = param;
+ 
+-	if (!dws || dws->dma_dev != chan->device->dev)
++	if (dws->dma_dev != chan->device->dev)
+ 		return false;
+ 
+ 	/* We have to copy data since dws can be temporary storage */
+@@ -1165,6 +1153,14 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
+ 	 * doesn't mean what you think it means), and status writeback.
+ 	 */
+ 
++	/*
++	 * We need controller-specific data to set up slave transfers.
++	 */
++	if (chan->private && !dw_dma_filter(chan, chan->private)) {
++		dev_warn(chan2dev(chan), "Wrong controller-specific data\n");
++		return -EINVAL;
++	}
++
+ 	/* Enable controller here if needed */
+ 	if (!dw->in_use)
+ 		dw_dma_on(dw);
+@@ -1226,6 +1222,14 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
+ 	spin_lock_irqsave(&dwc->lock, flags);
+ 	list_splice_init(&dwc->free_list, &list);
+ 	dwc->descs_allocated = 0;
++
++	/* Clear custom channel configuration */
++	dwc->src_id = 0;
++	dwc->dst_id = 0;
++
++	dwc->src_master = 0;
++	dwc->dst_master = 0;
++
+ 	dwc->initialized = false;
+ 
+ 	/* Disable interrupts */
+diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
+index e3d7fcb69b4c..2dac314a2d7a 100644
+--- a/drivers/dma/edma.c
++++ b/drivers/dma/edma.c
+@@ -1563,32 +1563,6 @@ static irqreturn_t dma_ccerr_handler(int irq, void *data)
+ 	return IRQ_HANDLED;
+ }
+ 
+-static void edma_tc_set_pm_state(struct edma_tc *tc, bool enable)
+-{
+-	struct platform_device *tc_pdev;
+-	int ret;
+-
+-	if (!IS_ENABLED(CONFIG_OF) || !tc)
+-		return;
+-
+-	tc_pdev = of_find_device_by_node(tc->node);
+-	if (!tc_pdev) {
+-		pr_err("%s: TPTC device is not found\n", __func__);
+-		return;
+-	}
+-	if (!pm_runtime_enabled(&tc_pdev->dev))
+-		pm_runtime_enable(&tc_pdev->dev);
+-
+-	if (enable)
+-		ret = pm_runtime_get_sync(&tc_pdev->dev);
+-	else
+-		ret = pm_runtime_put_sync(&tc_pdev->dev);
+-
+-	if (ret < 0)
+-		pr_err("%s: pm_runtime_%s_sync() failed for %s\n", __func__,
+-		       enable ? "get" : "put", dev_name(&tc_pdev->dev));
+-}
+-
+ /* Alloc channel resources */
+ static int edma_alloc_chan_resources(struct dma_chan *chan)
+ {
+@@ -1625,8 +1599,6 @@ static int edma_alloc_chan_resources(struct dma_chan *chan)
+ 		EDMA_CHAN_SLOT(echan->ch_num), chan->chan_id,
+ 		echan->hw_triggered ? "HW" : "SW");
+ 
+-	edma_tc_set_pm_state(echan->tc, true);
+-
+ 	return 0;
+ 
+ err_slot:
+@@ -1663,7 +1635,6 @@ static void edma_free_chan_resources(struct dma_chan *chan)
+ 		echan->alloced = false;
+ 	}
+ 
+-	edma_tc_set_pm_state(echan->tc, false);
+ 	echan->tc = NULL;
+ 	echan->hw_triggered = false;
+ 
+@@ -2408,10 +2379,8 @@ static int edma_pm_suspend(struct device *dev)
+ 	int i;
+ 
+ 	for (i = 0; i < ecc->num_channels; i++) {
+-		if (echan[i].alloced) {
++		if (echan[i].alloced)
+ 			edma_setup_interrupt(&echan[i], false);
+-			edma_tc_set_pm_state(echan[i].tc, false);
+-		}
+ 	}
+ 
+ 	return 0;
+@@ -2441,8 +2410,6 @@ static int edma_pm_resume(struct device *dev)
+ 
+ 			/* Set up channel -> slot mapping for the entry slot */
+ 			edma_set_chmap(&echan[i], echan[i].slot[0]);
+-
+-			edma_tc_set_pm_state(echan[i].tc, true);
+ 		}
+ 	}
+ 
+@@ -2466,7 +2433,8 @@ static struct platform_driver edma_driver = {
+ 
+ static int edma_tptc_probe(struct platform_device *pdev)
+ {
+-	return 0;
++	pm_runtime_enable(&pdev->dev);
++	return pm_runtime_get_sync(&pdev->dev);
+ }
+ 
+ static struct platform_driver edma_tptc_driver = {
+diff --git a/drivers/dma/hsu/hsu.c b/drivers/dma/hsu/hsu.c
+index eef145edb936..025d375fc3d7 100644
+--- a/drivers/dma/hsu/hsu.c
++++ b/drivers/dma/hsu/hsu.c
+@@ -135,7 +135,7 @@ static u32 hsu_dma_chan_get_sr(struct hsu_dma_chan *hsuc)
+ 	sr = hsu_chan_readl(hsuc, HSU_CH_SR);
+ 	spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
+ 
+-	return sr;
++	return sr & ~(HSU_CH_SR_DESCE_ANY | HSU_CH_SR_CDESC_ANY);
+ }
+ 
+ irqreturn_t hsu_dma_irq(struct hsu_dma_chip *chip, unsigned short nr)
+@@ -254,10 +254,13 @@ static void hsu_dma_issue_pending(struct dma_chan *chan)
+ static size_t hsu_dma_active_desc_size(struct hsu_dma_chan *hsuc)
+ {
+ 	struct hsu_dma_desc *desc = hsuc->desc;
+-	size_t bytes = desc->length;
++	size_t bytes = 0;
+ 	int i;
+ 
+-	i = desc->active % HSU_DMA_CHAN_NR_DESC;
++	for (i = desc->active; i < desc->nents; i++)
++		bytes += desc->sg[i].len;
++
++	i = HSU_DMA_CHAN_NR_DESC - 1;
+ 	do {
+ 		bytes += hsu_chan_readl(hsuc, HSU_CH_DxTSR(i));
+ 	} while (--i >= 0);
+diff --git a/drivers/dma/hsu/hsu.h b/drivers/dma/hsu/hsu.h
+index 578a8ee8cd05..6b070c22b1df 100644
+--- a/drivers/dma/hsu/hsu.h
++++ b/drivers/dma/hsu/hsu.h
+@@ -41,6 +41,9 @@
+ #define HSU_CH_SR_DESCTO(x)	BIT(8 + (x))
+ #define HSU_CH_SR_DESCTO_ANY	(BIT(11) | BIT(10) | BIT(9) | BIT(8))
+ #define HSU_CH_SR_CHE		BIT(15)
++#define HSU_CH_SR_DESCE(x)	BIT(16 + (x))
++#define HSU_CH_SR_DESCE_ANY	(BIT(19) | BIT(18) | BIT(17) | BIT(16))
++#define HSU_CH_SR_CDESC_ANY	(BIT(31) | BIT(30))
+ 
+ /* Bits in HSU_CH_CR */
+ #define HSU_CH_CR_CHA		BIT(0)
+diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c
+index 9794b073d7d7..a5ed9407c51b 100644
+--- a/drivers/dma/omap-dma.c
++++ b/drivers/dma/omap-dma.c
+@@ -48,6 +48,7 @@ struct omap_chan {
+ 	unsigned dma_sig;
+ 	bool cyclic;
+ 	bool paused;
++	bool running;
+ 
+ 	int dma_ch;
+ 	struct omap_desc *desc;
+@@ -294,6 +295,8 @@ static void omap_dma_start(struct omap_chan *c, struct omap_desc *d)
+ 
+ 	/* Enable channel */
+ 	omap_dma_chan_write(c, CCR, d->ccr | CCR_ENABLE);
++
++	c->running = true;
+ }
+ 
+ static void omap_dma_stop(struct omap_chan *c)
+@@ -355,6 +358,8 @@ static void omap_dma_stop(struct omap_chan *c)
+ 
+ 		omap_dma_chan_write(c, CLNK_CTRL, val);
+ 	}
++
++	c->running = false;
+ }
+ 
+ static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d,
+@@ -673,15 +678,20 @@ static enum dma_status omap_dma_tx_status(struct dma_chan *chan,
+ 	struct omap_chan *c = to_omap_dma_chan(chan);
+ 	struct virt_dma_desc *vd;
+ 	enum dma_status ret;
+-	uint32_t ccr;
+ 	unsigned long flags;
+ 
+-	ccr = omap_dma_chan_read(c, CCR);
+-	/* The channel is no longer active, handle the completion right away */
+-	if (!(ccr & CCR_ENABLE))
+-		omap_dma_callback(c->dma_ch, 0, c);
+-
+ 	ret = dma_cookie_status(chan, cookie, txstate);
++
++	if (!c->paused && c->running) {
++		uint32_t ccr = omap_dma_chan_read(c, CCR);
++		/*
++		 * The channel is no longer active, set the return value
++		 * accordingly
++		 */
++		if (!(ccr & CCR_ENABLE))
++			ret = DMA_COMPLETE;
++	}
++
+ 	if (ret == DMA_COMPLETE || !txstate)
+ 		return ret;
+ 
+diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c
+index debca824bed6..77c1c44009d8 100644
+--- a/drivers/dma/pxa_dma.c
++++ b/drivers/dma/pxa_dma.c
+@@ -122,6 +122,7 @@ struct pxad_chan {
+ struct pxad_device {
+ 	struct dma_device		slave;
+ 	int				nr_chans;
++	int				nr_requestors;
+ 	void __iomem			*base;
+ 	struct pxad_phy			*phys;
+ 	spinlock_t			phy_lock;	/* Phy association */
+@@ -473,7 +474,7 @@ static void pxad_free_phy(struct pxad_chan *chan)
+ 		return;
+ 
+ 	/* clear the channel mapping in DRCMR */
+-	if (chan->drcmr <= DRCMR_CHLNUM) {
++	if (chan->drcmr <= pdev->nr_requestors) {
+ 		reg = pxad_drcmr(chan->drcmr);
+ 		writel_relaxed(0, chan->phy->base + reg);
+ 	}
+@@ -509,6 +510,7 @@ static bool is_running_chan_misaligned(struct pxad_chan *chan)
+ 
+ static void phy_enable(struct pxad_phy *phy, bool misaligned)
+ {
++	struct pxad_device *pdev;
+ 	u32 reg, dalgn;
+ 
+ 	if (!phy->vchan)
+@@ -518,7 +520,8 @@ static void phy_enable(struct pxad_phy *phy, bool misaligned)
+ 		"%s(); phy=%p(%d) misaligned=%d\n", __func__,
+ 		phy, phy->idx, misaligned);
+ 
+-	if (phy->vchan->drcmr <= DRCMR_CHLNUM) {
++	pdev = to_pxad_dev(phy->vchan->vc.chan.device);
++	if (phy->vchan->drcmr <= pdev->nr_requestors) {
+ 		reg = pxad_drcmr(phy->vchan->drcmr);
+ 		writel_relaxed(DRCMR_MAPVLD | phy->idx, phy->base + reg);
+ 	}
+@@ -914,6 +917,7 @@ static void pxad_get_config(struct pxad_chan *chan,
+ {
+ 	u32 maxburst = 0, dev_addr = 0;
+ 	enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
++	struct pxad_device *pdev = to_pxad_dev(chan->vc.chan.device);
+ 
+ 	*dcmd = 0;
+ 	if (dir == DMA_DEV_TO_MEM) {
+@@ -922,7 +926,7 @@ static void pxad_get_config(struct pxad_chan *chan,
+ 		dev_addr = chan->cfg.src_addr;
+ 		*dev_src = dev_addr;
+ 		*dcmd |= PXA_DCMD_INCTRGADDR;
+-		if (chan->drcmr <= DRCMR_CHLNUM)
++		if (chan->drcmr <= pdev->nr_requestors)
+ 			*dcmd |= PXA_DCMD_FLOWSRC;
+ 	}
+ 	if (dir == DMA_MEM_TO_DEV) {
+@@ -931,7 +935,7 @@ static void pxad_get_config(struct pxad_chan *chan,
+ 		dev_addr = chan->cfg.dst_addr;
+ 		*dev_dst = dev_addr;
+ 		*dcmd |= PXA_DCMD_INCSRCADDR;
+-		if (chan->drcmr <= DRCMR_CHLNUM)
++		if (chan->drcmr <= pdev->nr_requestors)
+ 			*dcmd |= PXA_DCMD_FLOWTRG;
+ 	}
+ 	if (dir == DMA_MEM_TO_MEM)
+@@ -1341,13 +1345,15 @@ static struct dma_chan *pxad_dma_xlate(struct of_phandle_args *dma_spec,
+ 
+ static int pxad_init_dmadev(struct platform_device *op,
+ 			    struct pxad_device *pdev,
+-			    unsigned int nr_phy_chans)
++			    unsigned int nr_phy_chans,
++			    unsigned int nr_requestors)
+ {
+ 	int ret;
+ 	unsigned int i;
+ 	struct pxad_chan *c;
+ 
+ 	pdev->nr_chans = nr_phy_chans;
++	pdev->nr_requestors = nr_requestors;
+ 	INIT_LIST_HEAD(&pdev->slave.channels);
+ 	pdev->slave.device_alloc_chan_resources = pxad_alloc_chan_resources;
+ 	pdev->slave.device_free_chan_resources = pxad_free_chan_resources;
+@@ -1382,7 +1388,7 @@ static int pxad_probe(struct platform_device *op)
+ 	const struct of_device_id *of_id;
+ 	struct mmp_dma_platdata *pdata = dev_get_platdata(&op->dev);
+ 	struct resource *iores;
+-	int ret, dma_channels = 0;
++	int ret, dma_channels = 0, nb_requestors = 0;
+ 	const enum dma_slave_buswidth widths =
+ 		DMA_SLAVE_BUSWIDTH_1_BYTE   | DMA_SLAVE_BUSWIDTH_2_BYTES |
+ 		DMA_SLAVE_BUSWIDTH_4_BYTES;
+@@ -1399,13 +1405,23 @@ static int pxad_probe(struct platform_device *op)
+ 		return PTR_ERR(pdev->base);
+ 
+ 	of_id = of_match_device(pxad_dt_ids, &op->dev);
+-	if (of_id)
++	if (of_id) {
+ 		of_property_read_u32(op->dev.of_node, "#dma-channels",
+ 				     &dma_channels);
+-	else if (pdata && pdata->dma_channels)
++		ret = of_property_read_u32(op->dev.of_node, "#dma-requests",
++					   &nb_requestors);
++		if (ret) {
++			dev_warn(pdev->slave.dev,
++				 "#dma-requests set to default 32 as missing in OF: %d",
++				 ret);
++			nb_requestors = 32;
++		};
++	} else if (pdata && pdata->dma_channels) {
+ 		dma_channels = pdata->dma_channels;
+-	else
++		nb_requestors = pdata->nb_requestors;
++	} else {
+ 		dma_channels = 32;	/* default 32 channel */
++	}
+ 
+ 	dma_cap_set(DMA_SLAVE, pdev->slave.cap_mask);
+ 	dma_cap_set(DMA_MEMCPY, pdev->slave.cap_mask);
+@@ -1423,7 +1439,7 @@ static int pxad_probe(struct platform_device *op)
+ 	pdev->slave.descriptor_reuse = true;
+ 
+ 	pdev->slave.dev = &op->dev;
+-	ret = pxad_init_dmadev(op, pdev, dma_channels);
++	ret = pxad_init_dmadev(op, pdev, dma_channels, nb_requestors);
+ 	if (ret) {
+ 		dev_err(pdev->slave.dev, "unable to register\n");
+ 		return ret;
+@@ -1442,7 +1458,8 @@ static int pxad_probe(struct platform_device *op)
+ 
+ 	platform_set_drvdata(op, pdev);
+ 	pxad_init_debugfs(pdev);
+-	dev_info(pdev->slave.dev, "initialized %d channels\n", dma_channels);
++	dev_info(pdev->slave.dev, "initialized %d channels on %d requestors\n",
++		 dma_channels, nb_requestors);
+ 	return 0;
+ }
+ 
+diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
+index 9eee13ef83a5..d87a47547ba5 100644
+--- a/drivers/edac/amd64_edac.c
++++ b/drivers/edac/amd64_edac.c
+@@ -1452,7 +1452,7 @@ static u64 f1x_get_norm_dct_addr(struct amd64_pvt *pvt, u8 range,
+ 	u64 chan_off;
+ 	u64 dram_base		= get_dram_base(pvt, range);
+ 	u64 hole_off		= f10_dhar_offset(pvt);
+-	u64 dct_sel_base_off	= (pvt->dct_sel_hi & 0xFFFFFC00) << 16;
++	u64 dct_sel_base_off	= (u64)(pvt->dct_sel_hi & 0xFFFFFC00) << 16;
+ 
+ 	if (hi_rng) {
+ 		/*
+diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
+index 01087a38da22..792bdae2b91d 100644
+--- a/drivers/edac/i7core_edac.c
++++ b/drivers/edac/i7core_edac.c
+@@ -1866,7 +1866,7 @@ static int i7core_mce_check_error(struct notifier_block *nb, unsigned long val,
+ 
+ 	i7_dev = get_i7core_dev(mce->socketid);
+ 	if (!i7_dev)
+-		return NOTIFY_BAD;
++		return NOTIFY_DONE;
+ 
+ 	mci = i7_dev->mci;
+ 	pvt = mci->pvt_info;
+diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
+index f5c6b97c8958..8bf745d2da7e 100644
+--- a/drivers/edac/sb_edac.c
++++ b/drivers/edac/sb_edac.c
+@@ -362,6 +362,7 @@ struct sbridge_pvt {
+ 
+ 	/* Memory type detection */
+ 	bool			is_mirrored, is_lockstep, is_close_pg;
++	bool			is_chan_hash;
+ 
+ 	/* Fifo double buffers */
+ 	struct mce		mce_entry[MCE_LOG_LEN];
+@@ -1060,6 +1061,20 @@ static inline u8 sad_pkg_ha(u8 pkg)
+ 	return (pkg >> 2) & 0x1;
+ }
+ 
++static int haswell_chan_hash(int idx, u64 addr)
++{
++	int i;
++
++	/*
++	 * XOR even bits from 12:26 to bit0 of idx,
++	 *     odd bits from 13:27 to bit1
++	 */
++	for (i = 12; i < 28; i += 2)
++		idx ^= (addr >> i) & 3;
++
++	return idx;
++}
++
+ /****************************************************************************
+ 			Memory check routines
+  ****************************************************************************/
+@@ -1616,6 +1631,10 @@ static int get_dimm_config(struct mem_ctl_info *mci)
+ 		KNL_MAX_CHANNELS : NUM_CHANNELS;
+ 	u64 knl_mc_sizes[KNL_MAX_CHANNELS];
+ 
++	if (pvt->info.type == HASWELL || pvt->info.type == BROADWELL) {
++		pci_read_config_dword(pvt->pci_ha0, HASWELL_HASYSDEFEATURE2, &reg);
++		pvt->is_chan_hash = GET_BITFIELD(reg, 21, 21);
++	}
+ 	if (pvt->info.type == HASWELL || pvt->info.type == BROADWELL ||
+ 			pvt->info.type == KNIGHTS_LANDING)
+ 		pci_read_config_dword(pvt->pci_sad1, SAD_TARGET, &reg);
+@@ -1839,8 +1858,8 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
+ 		edac_dbg(0, "TAD#%d: up to %u.%03u GB (0x%016Lx), socket interleave %d, memory interleave %d, TGT: %d, %d, %d, %d, reg=0x%08x\n",
+ 			 n_tads, gb, (mb*1000)/1024,
+ 			 ((u64)tmp_mb) << 20L,
+-			 (u32)TAD_SOCK(reg),
+-			 (u32)TAD_CH(reg),
++			 (u32)(1 << TAD_SOCK(reg)),
++			 (u32)TAD_CH(reg) + 1,
+ 			 (u32)TAD_TGT0(reg),
+ 			 (u32)TAD_TGT1(reg),
+ 			 (u32)TAD_TGT2(reg),
+@@ -2118,12 +2137,15 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
+ 	}
+ 
+ 	ch_way = TAD_CH(reg) + 1;
+-	sck_way = TAD_SOCK(reg) + 1;
++	sck_way = TAD_SOCK(reg);
+ 
+ 	if (ch_way == 3)
+ 		idx = addr >> 6;
+-	else
++	else {
+ 		idx = (addr >> (6 + sck_way + shiftup)) & 0x3;
++		if (pvt->is_chan_hash)
++			idx = haswell_chan_hash(idx, addr);
++	}
+ 	idx = idx % ch_way;
+ 
+ 	/*
+@@ -2157,7 +2179,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
+ 		switch(ch_way) {
+ 		case 2:
+ 		case 4:
+-			sck_xch = 1 << sck_way * (ch_way >> 1);
++			sck_xch = (1 << sck_way) * (ch_way >> 1);
+ 			break;
+ 		default:
+ 			sprintf(msg, "Invalid mirror set. Can't decode addr");
+@@ -2175,7 +2197,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
+ 		 n_tads,
+ 		 addr,
+ 		 limit,
+-		 (u32)TAD_SOCK(reg),
++		 sck_way,
+ 		 ch_way,
+ 		 offset,
+ 		 idx,
+@@ -2190,18 +2212,12 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
+ 			offset, addr);
+ 		return -EINVAL;
+ 	}
+-	addr -= offset;
+-	/* Store the low bits [0:6] of the addr */
+-	ch_addr = addr & 0x7f;
+-	/* Remove socket wayness and remove 6 bits */
+-	addr >>= 6;
+-	addr = div_u64(addr, sck_xch);
+-#if 0
+-	/* Divide by channel way */
+-	addr = addr / ch_way;
+-#endif
+-	/* Recover the last 6 bits */
+-	ch_addr |= addr << 6;
++
++	ch_addr = addr - offset;
++	ch_addr >>= (6 + shiftup);
++	ch_addr /= sck_xch;
++	ch_addr <<= (6 + shiftup);
++	ch_addr |= addr & ((1 << (6 + shiftup)) - 1);
+ 
+ 	/*
+ 	 * Step 3) Decode rank
+@@ -3152,7 +3168,7 @@ static int sbridge_mce_check_error(struct notifier_block *nb, unsigned long val,
+ 
+ 	mci = get_mci_for_node_id(mce->socketid);
+ 	if (!mci)
+-		return NOTIFY_BAD;
++		return NOTIFY_DONE;
+ 	pvt = mci->pvt_info;
+ 
+ 	/*
+diff --git a/drivers/extcon/extcon-max77843.c b/drivers/extcon/extcon-max77843.c
+index 74dfb7f4f277..d8cac4661cfe 100644
+--- a/drivers/extcon/extcon-max77843.c
++++ b/drivers/extcon/extcon-max77843.c
+@@ -803,7 +803,7 @@ static int max77843_muic_probe(struct platform_device *pdev)
+ 	/* Clear IRQ bits before request IRQs */
+ 	ret = regmap_bulk_read(max77843->regmap_muic,
+ 			MAX77843_MUIC_REG_INT1, info->status,
+-			MAX77843_MUIC_IRQ_NUM);
++			MAX77843_MUIC_STATUS_NUM);
+ 	if (ret) {
+ 		dev_err(&pdev->dev, "Failed to Clear IRQ bits\n");
+ 		goto err_muic_irq;
+diff --git a/drivers/firmware/broadcom/bcm47xx_nvram.c b/drivers/firmware/broadcom/bcm47xx_nvram.c
+index 0c2f0a61b0ea..0b631e5b5b84 100644
+--- a/drivers/firmware/broadcom/bcm47xx_nvram.c
++++ b/drivers/firmware/broadcom/bcm47xx_nvram.c
+@@ -94,15 +94,14 @@ static int nvram_find_and_copy(void __iomem *iobase, u32 lim)
+ 
+ found:
+ 	__ioread32_copy(nvram_buf, header, sizeof(*header) / 4);
+-	header = (struct nvram_header *)nvram_buf;
+-	nvram_len = header->len;
++	nvram_len = ((struct nvram_header *)(nvram_buf))->len;
+ 	if (nvram_len > size) {
+ 		pr_err("The nvram size according to the header seems to be bigger than the partition on flash\n");
+ 		nvram_len = size;
+ 	}
+ 	if (nvram_len >= NVRAM_SPACE) {
+ 		pr_err("nvram on flash (%i bytes) is bigger than the reserved space in memory, will just copy the first %i bytes\n",
+-		       header->len, NVRAM_SPACE - 1);
++		       nvram_len, NVRAM_SPACE - 1);
+ 		nvram_len = NVRAM_SPACE - 1;
+ 	}
+ 	/* proceed reading data after header */
+diff --git a/drivers/firmware/efi/arm-init.c b/drivers/firmware/efi/arm-init.c
+index 9e15d571b53c..a76c35fc0b92 100644
+--- a/drivers/firmware/efi/arm-init.c
++++ b/drivers/firmware/efi/arm-init.c
+@@ -203,7 +203,19 @@ void __init efi_init(void)
+ 
+ 	reserve_regions();
+ 	early_memunmap(memmap.map, params.mmap_size);
+-	memblock_mark_nomap(params.mmap & PAGE_MASK,
+-			    PAGE_ALIGN(params.mmap_size +
+-				       (params.mmap & ~PAGE_MASK)));
++
++	if (IS_ENABLED(CONFIG_ARM)) {
++		/*
++		 * ARM currently does not allow ioremap_cache() to be called on
++		 * memory regions that are covered by struct page. So remove the
++		 * UEFI memory map from the linear mapping.
++		 */
++		memblock_mark_nomap(params.mmap & PAGE_MASK,
++				    PAGE_ALIGN(params.mmap_size +
++					       (params.mmap & ~PAGE_MASK)));
++	} else {
++		memblock_reserve(params.mmap & PAGE_MASK,
++				 PAGE_ALIGN(params.mmap_size +
++					    (params.mmap & ~PAGE_MASK)));
++	}
+ }
+diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
+index 2cd37dad67a6..c51f3b2fe3c0 100644
+--- a/drivers/firmware/efi/efi.c
++++ b/drivers/firmware/efi/efi.c
+@@ -182,6 +182,7 @@ static int generic_ops_register(void)
+ {
+ 	generic_ops.get_variable = efi.get_variable;
+ 	generic_ops.set_variable = efi.set_variable;
++	generic_ops.set_variable_nonblocking = efi.set_variable_nonblocking;
+ 	generic_ops.get_next_variable = efi.get_next_variable;
+ 	generic_ops.query_variable_store = efi_query_variable_store;
+ 
+diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c
+index 7f2ea21c730d..6f182fd91a6d 100644
+--- a/drivers/firmware/efi/vars.c
++++ b/drivers/firmware/efi/vars.c
+@@ -202,29 +202,44 @@ static const struct variable_validate variable_validate[] = {
+ 	{ NULL_GUID, "", NULL },
+ };
+ 
++/*
++ * Check if @var_name matches the pattern given in @match_name.
++ *
++ * @var_name: an array of @len non-NUL characters.
++ * @match_name: a NUL-terminated pattern string, optionally ending in "*". A
++ *              final "*" character matches any trailing characters @var_name,
++ *              including the case when there are none left in @var_name.
++ * @match: on output, the number of non-wildcard characters in @match_name
++ *         that @var_name matches, regardless of the return value.
++ * @return: whether @var_name fully matches @match_name.
++ */
+ static bool
+ variable_matches(const char *var_name, size_t len, const char *match_name,
+ 		 int *match)
+ {
+ 	for (*match = 0; ; (*match)++) {
+ 		char c = match_name[*match];
+-		char u = var_name[*match];
+ 
+-		/* Wildcard in the matching name means we've matched */
+-		if (c == '*')
++		switch (c) {
++		case '*':
++			/* Wildcard in @match_name means we've matched. */
+ 			return true;
+ 
+-		/* Case sensitive match */
+-		if (!c && *match == len)
+-			return true;
++		case '\0':
++			/* @match_name has ended. Has @var_name too? */
++			return (*match == len);
+ 
+-		if (c != u)
++		default:
++			/*
++			 * We've reached a non-wildcard char in @match_name.
++			 * Continue only if there's an identical character in
++			 * @var_name.
++			 */
++			if (*match < len && c == var_name[*match])
++				continue;
+ 			return false;
+-
+-		if (!c)
+-			return true;
++		}
+ 	}
+-	return true;
+ }
+ 
+ bool
+diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
+index 23196c5fc17c..90c1511d731f 100644
+--- a/drivers/gpio/gpio-pca953x.c
++++ b/drivers/gpio/gpio-pca953x.c
+@@ -18,6 +18,7 @@
+ #include <linux/i2c.h>
+ #include <linux/platform_data/pca953x.h>
+ #include <linux/slab.h>
++#include <asm/unaligned.h>
+ #include <linux/of_platform.h>
+ #include <linux/acpi.h>
+ 
+@@ -159,7 +160,7 @@ static int pca953x_write_regs(struct pca953x_chip *chip, int reg, u8 *val)
+ 		switch (chip->chip_type) {
+ 		case PCA953X_TYPE:
+ 			ret = i2c_smbus_write_word_data(chip->client,
+-							reg << 1, (u16) *val);
++			    reg << 1, cpu_to_le16(get_unaligned((u16 *)val)));
+ 			break;
+ 		case PCA957X_TYPE:
+ 			ret = i2c_smbus_write_byte_data(chip->client, reg << 1,
+@@ -367,9 +368,11 @@ static void pca953x_gpio_set_multiple(struct gpio_chip *gc,
+ 	memcpy(reg_val, chip->reg_output, NBANK(chip));
+ 	mutex_lock(&chip->i2c_lock);
+ 	for(bank=0; bank<NBANK(chip); bank++) {
+-		unsigned bankmask = mask[bank/4] >> ((bank % 4) * 8);
++		unsigned bankmask = mask[bank / sizeof(*mask)] >>
++				    ((bank % sizeof(*mask)) * 8);
+ 		if(bankmask) {
+-			unsigned bankval  = bits[bank/4] >> ((bank % 4) * 8);
++			unsigned bankval  = bits[bank / sizeof(*bits)] >>
++					    ((bank % sizeof(*bits)) * 8);
+ 			reg_val[bank] = (reg_val[bank] & ~bankmask) | bankval;
+ 		}
+ 	}
+diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c
+index b2b7b78664b8..76ac906b4d78 100644
+--- a/drivers/gpio/gpio-pxa.c
++++ b/drivers/gpio/gpio-pxa.c
+@@ -283,8 +283,8 @@ static int pxa_gpio_direction_output(struct gpio_chip *chip,
+ 	writel_relaxed(mask, base + (value ? GPSR_OFFSET : GPCR_OFFSET));
+ 
+ 	ret = pinctrl_gpio_direction_output(chip->base + offset);
+-	if (!ret)
+-		return 0;
++	if (ret)
++		return ret;
+ 
+ 	spin_lock_irqsave(&gpio_lock, flags);
+ 
+diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
+index 540cbc88c7a2..cc4d9bd0839e 100644
+--- a/drivers/gpio/gpiolib-acpi.c
++++ b/drivers/gpio/gpiolib-acpi.c
+@@ -977,7 +977,7 @@ bool acpi_can_fallback_to_crs(struct acpi_device *adev, const char *con_id)
+ 		lookup = kmalloc(sizeof(*lookup), GFP_KERNEL);
+ 		if (lookup) {
+ 			lookup->adev = adev;
+-			lookup->con_id = con_id;
++			lookup->con_id = kstrdup(con_id, GFP_KERNEL);
+ 			list_add_tail(&lookup->node, &acpi_crs_lookup_list);
+ 		}
+ 	}
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+index 5e7770f9a415..ff299752d5e6 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+@@ -1619,6 +1619,7 @@ struct amdgpu_uvd {
+ 	struct amdgpu_bo	*vcpu_bo;
+ 	void			*cpu_addr;
+ 	uint64_t		gpu_addr;
++	unsigned		fw_version;
+ 	atomic_t		handles[AMDGPU_MAX_UVD_HANDLES];
+ 	struct drm_file		*filp[AMDGPU_MAX_UVD_HANDLES];
+ 	struct delayed_work	idle_work;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+index 7a4b101e10c6..75cb5b9b88f8 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+@@ -816,10 +816,13 @@ static int amdgpu_cgs_get_active_displays_info(void *cgs_device,
+ 	struct drm_device *ddev = adev->ddev;
+ 	struct drm_crtc *crtc;
+ 	uint32_t line_time_us, vblank_lines;
++	struct cgs_mode_info *mode_info;
+ 
+ 	if (info == NULL)
+ 		return -EINVAL;
+ 
++	mode_info = info->mode_info;
++
+ 	if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
+ 		list_for_each_entry(crtc,
+ 				&ddev->mode_config.crtc_list, head) {
+@@ -828,7 +831,7 @@ static int amdgpu_cgs_get_active_displays_info(void *cgs_device,
+ 				info->active_display_mask |= (1 << amdgpu_crtc->crtc_id);
+ 				info->display_count++;
+ 			}
+-			if (info->mode_info != NULL &&
++			if (mode_info != NULL &&
+ 				crtc->enabled && amdgpu_crtc->enabled &&
+ 				amdgpu_crtc->hw_mode.clock) {
+ 				line_time_us = (amdgpu_crtc->hw_mode.crtc_htotal * 1000) /
+@@ -836,10 +839,10 @@ static int amdgpu_cgs_get_active_displays_info(void *cgs_device,
+ 				vblank_lines = amdgpu_crtc->hw_mode.crtc_vblank_end -
+ 							amdgpu_crtc->hw_mode.crtc_vdisplay +
+ 							(amdgpu_crtc->v_border * 2);
+-				info->mode_info->vblank_time_us = vblank_lines * line_time_us;
+-				info->mode_info->refresh_rate = drm_mode_vrefresh(&amdgpu_crtc->hw_mode);
+-				info->mode_info->ref_clock = adev->clock.spll.reference_freq;
+-				info->mode_info++;
++				mode_info->vblank_time_us = vblank_lines * line_time_us;
++				mode_info->refresh_rate = drm_mode_vrefresh(&amdgpu_crtc->hw_mode);
++				mode_info->ref_clock = adev->clock.spll.reference_freq;
++				mode_info = NULL;
+ 			}
+ 		}
+ 	}
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+index e23843f4d877..4488e82f87b0 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+@@ -303,7 +303,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
+ 			fw_info.feature = adev->vce.fb_version;
+ 			break;
+ 		case AMDGPU_INFO_FW_UVD:
+-			fw_info.ver = 0;
++			fw_info.ver = adev->uvd.fw_version;
+ 			fw_info.feature = 0;
+ 			break;
+ 		case AMDGPU_INFO_FW_GMC:
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+index fdc1be8550da..3b2d75d96ea0 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+@@ -53,7 +53,7 @@ struct amdgpu_hpd;
+ 
+ #define AMDGPU_MAX_HPD_PINS 6
+ #define AMDGPU_MAX_CRTCS 6
+-#define AMDGPU_MAX_AFMT_BLOCKS 7
++#define AMDGPU_MAX_AFMT_BLOCKS 9
+ 
+ enum amdgpu_rmx_type {
+ 	RMX_OFF,
+@@ -309,8 +309,8 @@ struct amdgpu_mode_info {
+ 	struct atom_context *atom_context;
+ 	struct card_info *atom_card_info;
+ 	bool mode_config_initialized;
+-	struct amdgpu_crtc *crtcs[6];
+-	struct amdgpu_afmt *afmt[7];
++	struct amdgpu_crtc *crtcs[AMDGPU_MAX_CRTCS];
++	struct amdgpu_afmt *afmt[AMDGPU_MAX_AFMT_BLOCKS];
+ 	/* DVI-I properties */
+ 	struct drm_property *coherent_mode_property;
+ 	/* DAC enable load detect */
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+index b8fbbd7699e4..73628c7599e7 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+@@ -540,6 +540,7 @@ int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
+ 	if (!metadata_size) {
+ 		if (bo->metadata_size) {
+ 			kfree(bo->metadata);
++			bo->metadata = NULL;
+ 			bo->metadata_size = 0;
+ 		}
+ 		return 0;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+index 53f987aeeacf..3b35ad83867c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+@@ -156,6 +156,9 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
+ 	DRM_INFO("Found UVD firmware Version: %hu.%hu Family ID: %hu\n",
+ 		version_major, version_minor, family_id);
+ 
++	adev->uvd.fw_version = ((version_major << 24) | (version_minor << 16) |
++				(family_id << 8));
++
+ 	bo_size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8)
+ 		 +  AMDGPU_UVD_STACK_SIZE + AMDGPU_UVD_HEAP_SIZE;
+ 	r = amdgpu_bo_create(adev, bo_size, PAGE_SIZE, true,
+@@ -273,6 +276,8 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev)
+ 	memcpy(adev->uvd.cpu_addr, (adev->uvd.fw->data) + offset,
+ 		(adev->uvd.fw->size) - offset);
+ 
++	cancel_delayed_work_sync(&adev->uvd.idle_work);
++
+ 	size = amdgpu_bo_size(adev->uvd.vcpu_bo);
+ 	size -= le32_to_cpu(hdr->ucode_size_bytes);
+ 	ptr = adev->uvd.cpu_addr;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+index a745eeeb5d82..bb0da76051a1 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+@@ -220,6 +220,7 @@ int amdgpu_vce_suspend(struct amdgpu_device *adev)
+ 	if (i == AMDGPU_MAX_VCE_HANDLES)
+ 		return 0;
+ 
++	cancel_delayed_work_sync(&adev->vce.idle_work);
+ 	/* TODO: suspending running encoding sessions isn't supported */
+ 	return -EINVAL;
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
+index bf731e9f643e..7f85c2c1d681 100644
+--- a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
++++ b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
+@@ -276,8 +276,8 @@ static int amdgpu_atombios_dp_get_dp_link_config(struct drm_connector *connector
+ 			}
+ 		}
+ 	} else {
+-		for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) {
+-			for (i = 0; i < ARRAY_SIZE(link_rates) && link_rates[i] <= max_link_rate; i++) {
++		for (i = 0; i < ARRAY_SIZE(link_rates) && link_rates[i] <= max_link_rate; i++) {
++			for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) {
+ 				max_pix_clock = (lane_num * link_rates[i] * 8) / bpp;
+ 				if (max_pix_clock >= pix_clock) {
+ 					*dp_lanes = lane_num;
+diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
+index 1e0bba29e167..1cd6de575305 100644
+--- a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
++++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
+@@ -298,6 +298,10 @@ bool amdgpu_atombios_encoder_mode_fixup(struct drm_encoder *encoder,
+ 	    && (mode->crtc_vsync_start < (mode->crtc_vdisplay + 2)))
+ 		adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2;
+ 
++	/* vertical FP must be at least 1 */
++	if (mode->crtc_vsync_start == mode->crtc_vdisplay)
++		adjusted_mode->crtc_vsync_start++;
++
+ 	/* get the native mode for scaling */
+ 	if (amdgpu_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT))
+ 		amdgpu_panel_mode_fixup(encoder, adjusted_mode);
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+index 06602df707f8..9b1c43005c80 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+@@ -3628,7 +3628,7 @@ static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
+ 					unsigned vm_id, uint64_t pd_addr)
+ {
+ 	int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
+-	uint32_t seq = ring->fence_drv.sync_seq;
++	uint32_t seq = ring->fence_drv.sync_seq[ring->idx];
+ 	uint64_t addr = ring->fence_drv.gpu_addr;
+ 
+ 	amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+index b8060795b27b..53964b14ff48 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+@@ -902,14 +902,6 @@ static int gmc_v7_0_early_init(void *handle)
+ 	gmc_v7_0_set_gart_funcs(adev);
+ 	gmc_v7_0_set_irq_funcs(adev);
+ 
+-	if (adev->flags & AMD_IS_APU) {
+-		adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
+-	} else {
+-		u32 tmp = RREG32(mmMC_SEQ_MISC0);
+-		tmp &= MC_SEQ_MISC0__MT__MASK;
+-		adev->mc.vram_type = gmc_v7_0_convert_vram_type(tmp);
+-	}
+-
+ 	return 0;
+ }
+ 
+@@ -930,6 +922,14 @@ static int gmc_v7_0_sw_init(void *handle)
+ 	if (r)
+ 		return r;
+ 
++	if (adev->flags & AMD_IS_APU) {
++		adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
++	} else {
++		u32 tmp = RREG32(mmMC_SEQ_MISC0);
++		tmp &= MC_SEQ_MISC0__MT__MASK;
++		adev->mc.vram_type = gmc_v7_0_convert_vram_type(tmp);
++	}
++
+ 	r = amdgpu_irq_add_id(adev, 146, &adev->mc.vm_fault);
+ 	if (r)
+ 		return r;
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+index 3efd45546241..e59251f4a85d 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+@@ -856,14 +856,6 @@ static int gmc_v8_0_early_init(void *handle)
+ 	gmc_v8_0_set_gart_funcs(adev);
+ 	gmc_v8_0_set_irq_funcs(adev);
+ 
+-	if (adev->flags & AMD_IS_APU) {
+-		adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
+-	} else {
+-		u32 tmp = RREG32(mmMC_SEQ_MISC0);
+-		tmp &= MC_SEQ_MISC0__MT__MASK;
+-		adev->mc.vram_type = gmc_v8_0_convert_vram_type(tmp);
+-	}
+-
+ 	return 0;
+ }
+ 
+@@ -874,6 +866,8 @@ static int gmc_v8_0_late_init(void *handle)
+ 	return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
+ }
+ 
++#define mmMC_SEQ_MISC0_FIJI 0xA71
++
+ static int gmc_v8_0_sw_init(void *handle)
+ {
+ 	int r;
+@@ -884,6 +878,19 @@ static int gmc_v8_0_sw_init(void *handle)
+ 	if (r)
+ 		return r;
+ 
++	if (adev->flags & AMD_IS_APU) {
++		adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
++	} else {
++		u32 tmp;
++
++		if (adev->asic_type == CHIP_FIJI)
++			tmp = RREG32(mmMC_SEQ_MISC0_FIJI);
++		else
++			tmp = RREG32(mmMC_SEQ_MISC0);
++		tmp &= MC_SEQ_MISC0__MT__MASK;
++		adev->mc.vram_type = gmc_v8_0_convert_vram_type(tmp);
++	}
++
+ 	r = amdgpu_irq_add_id(adev, 146, &adev->mc.vm_fault);
+ 	if (r)
+ 		return r;
+diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+index 2cf50180cc51..b1c7a9b3631b 100644
+--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
++++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+@@ -32,8 +32,8 @@
+ #include "oss/oss_2_4_d.h"
+ #include "oss/oss_2_4_sh_mask.h"
+ 
+-#include "gmc/gmc_8_1_d.h"
+-#include "gmc/gmc_8_1_sh_mask.h"
++#include "gmc/gmc_7_1_d.h"
++#include "gmc/gmc_7_1_sh_mask.h"
+ 
+ #include "gca/gfx_8_0_d.h"
+ #include "gca/gfx_8_0_enum.h"
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
+index c34c393e9aea..d5e19b5fbbfb 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
+@@ -513,7 +513,7 @@ static int dbgdev_wave_control_set_registers(
+ 				union SQ_CMD_BITS *in_reg_sq_cmd,
+ 				union GRBM_GFX_INDEX_BITS *in_reg_gfx_index)
+ {
+-	int status;
++	int status = 0;
+ 	union SQ_CMD_BITS reg_sq_cmd;
+ 	union GRBM_GFX_INDEX_BITS reg_gfx_index;
+ 	struct HsaDbgWaveMsgAMDGen2 *pMsg;
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
+index cf01177ca3b5..2ea012e88991 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
+@@ -241,6 +241,11 @@ static int cz_initialize_dpm_defaults(struct pp_hwmgr *hwmgr)
+ 	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ 					PHM_PlatformCaps_DynamicUVDState);
+ 
++	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
++			PHM_PlatformCaps_UVDDPM);
++	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
++			PHM_PlatformCaps_VCEDPM);
++
+ 	cz_hwmgr->cc6_settings.cpu_cc6_disable = false;
+ 	cz_hwmgr->cc6_settings.cpu_pstate_disable = false;
+ 	cz_hwmgr->cc6_settings.nb_pstate_switch_disable = false;
+diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
+index 9535c5b60387..7e5a97204051 100644
+--- a/drivers/gpu/drm/drm_dp_helper.c
++++ b/drivers/gpu/drm/drm_dp_helper.c
+@@ -178,7 +178,7 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request,
+ {
+ 	struct drm_dp_aux_msg msg;
+ 	unsigned int retry;
+-	int err;
++	int err = 0;
+ 
+ 	memset(&msg, 0, sizeof(msg));
+ 	msg.address = offset;
+@@ -186,6 +186,8 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request,
+ 	msg.buffer = buffer;
+ 	msg.size = size;
+ 
++	mutex_lock(&aux->hw_mutex);
++
+ 	/*
+ 	 * The specification doesn't give any recommendation on how often to
+ 	 * retry native transactions. We used to retry 7 times like for
+@@ -194,25 +196,24 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request,
+ 	 */
+ 	for (retry = 0; retry < 32; retry++) {
+ 
+-		mutex_lock(&aux->hw_mutex);
+ 		err = aux->transfer(aux, &msg);
+-		mutex_unlock(&aux->hw_mutex);
+ 		if (err < 0) {
+ 			if (err == -EBUSY)
+ 				continue;
+ 
+-			return err;
++			goto unlock;
+ 		}
+ 
+ 
+ 		switch (msg.reply & DP_AUX_NATIVE_REPLY_MASK) {
+ 		case DP_AUX_NATIVE_REPLY_ACK:
+ 			if (err < size)
+-				return -EPROTO;
+-			return err;
++				err = -EPROTO;
++			goto unlock;
+ 
+ 		case DP_AUX_NATIVE_REPLY_NACK:
+-			return -EIO;
++			err = -EIO;
++			goto unlock;
+ 
+ 		case DP_AUX_NATIVE_REPLY_DEFER:
+ 			usleep_range(AUX_RETRY_INTERVAL, AUX_RETRY_INTERVAL + 100);
+@@ -221,7 +222,11 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request,
+ 	}
+ 
+ 	DRM_DEBUG_KMS("too many retries, giving up\n");
+-	return -EIO;
++	err = -EIO;
++
++unlock:
++	mutex_unlock(&aux->hw_mutex);
++	return err;
+ }
+ 
+ /**
+@@ -543,9 +548,7 @@ static int drm_dp_i2c_do_msg(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
+ 	int max_retries = max(7, drm_dp_i2c_retry_count(msg, dp_aux_i2c_speed_khz));
+ 
+ 	for (retry = 0, defer_i2c = 0; retry < (max_retries + defer_i2c); retry++) {
+-		mutex_lock(&aux->hw_mutex);
+ 		ret = aux->transfer(aux, msg);
+-		mutex_unlock(&aux->hw_mutex);
+ 		if (ret < 0) {
+ 			if (ret == -EBUSY)
+ 				continue;
+@@ -684,6 +687,8 @@ static int drm_dp_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
+ 
+ 	memset(&msg, 0, sizeof(msg));
+ 
++	mutex_lock(&aux->hw_mutex);
++
+ 	for (i = 0; i < num; i++) {
+ 		msg.address = msgs[i].addr;
+ 		drm_dp_i2c_msg_set_request(&msg, &msgs[i]);
+@@ -738,6 +743,8 @@ static int drm_dp_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
+ 	msg.size = 0;
+ 	(void)drm_dp_i2c_do_msg(aux, &msg);
+ 
++	mutex_unlock(&aux->hw_mutex);
++
+ 	return err;
+ }
+ 
+diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
+index 27fbd79d0daf..71ea0521ea96 100644
+--- a/drivers/gpu/drm/drm_dp_mst_topology.c
++++ b/drivers/gpu/drm/drm_dp_mst_topology.c
+@@ -1672,13 +1672,19 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
+ 	u8 sinks[DRM_DP_MAX_SDP_STREAMS];
+ 	int i;
+ 
++	port = drm_dp_get_validated_port_ref(mgr, port);
++	if (!port)
++		return -EINVAL;
++
+ 	port_num = port->port_num;
+ 	mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
+ 	if (!mstb) {
+ 		mstb = drm_dp_get_last_connected_port_and_mstb(mgr, port->parent, &port_num);
+ 
+-		if (!mstb)
++		if (!mstb) {
++			drm_dp_put_port(port);
+ 			return -EINVAL;
++		}
+ 	}
+ 
+ 	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
+@@ -1707,6 +1713,7 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
+ 	kfree(txmsg);
+ fail_put:
+ 	drm_dp_put_mst_branch_device(mstb);
++	drm_dp_put_port(port);
+ 	return ret;
+ }
+ 
+@@ -1789,6 +1796,11 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
+ 		req_payload.start_slot = cur_slots;
+ 		if (mgr->proposed_vcpis[i]) {
+ 			port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
++			port = drm_dp_get_validated_port_ref(mgr, port);
++			if (!port) {
++				mutex_unlock(&mgr->payload_lock);
++				return -EINVAL;
++			}
+ 			req_payload.num_slots = mgr->proposed_vcpis[i]->num_slots;
+ 			req_payload.vcpi = mgr->proposed_vcpis[i]->vcpi;
+ 		} else {
+@@ -1816,6 +1828,9 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
+ 			mgr->payloads[i].payload_state = req_payload.payload_state;
+ 		}
+ 		cur_slots += req_payload.num_slots;
++
++		if (port)
++			drm_dp_put_port(port);
+ 	}
+ 
+ 	for (i = 0; i < mgr->max_payloads; i++) {
+@@ -2121,6 +2136,8 @@ int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr)
+ 
+ 	if (mgr->mst_primary) {
+ 		int sret;
++		u8 guid[16];
++
+ 		sret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
+ 		if (sret != DP_RECEIVER_CAP_SIZE) {
+ 			DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
+@@ -2135,6 +2152,16 @@ int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr)
+ 			ret = -1;
+ 			goto out_unlock;
+ 		}
++
++		/* Some hubs forget their guids after they resume */
++		sret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16);
++		if (sret != 16) {
++			DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
++			ret = -1;
++			goto out_unlock;
++		}
++		drm_dp_check_mstb_guid(mgr->mst_primary, guid);
++
+ 		ret = 0;
+ 	} else
+ 		ret = -1;
+diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
+index cf39ed3133d6..4d0bc2a8843c 100644
+--- a/drivers/gpu/drm/i915/i915_debugfs.c
++++ b/drivers/gpu/drm/i915/i915_debugfs.c
+@@ -2860,20 +2860,6 @@ static void intel_dp_info(struct seq_file *m,
+ 		intel_panel_info(m, &intel_connector->panel);
+ }
+ 
+-static void intel_dp_mst_info(struct seq_file *m,
+-			  struct intel_connector *intel_connector)
+-{
+-	struct intel_encoder *intel_encoder = intel_connector->encoder;
+-	struct intel_dp_mst_encoder *intel_mst =
+-		enc_to_mst(&intel_encoder->base);
+-	struct intel_digital_port *intel_dig_port = intel_mst->primary;
+-	struct intel_dp *intel_dp = &intel_dig_port->dp;
+-	bool has_audio = drm_dp_mst_port_has_audio(&intel_dp->mst_mgr,
+-					intel_connector->port);
+-
+-	seq_printf(m, "\taudio support: %s\n", yesno(has_audio));
+-}
+-
+ static void intel_hdmi_info(struct seq_file *m,
+ 			    struct intel_connector *intel_connector)
+ {
+@@ -2917,8 +2903,6 @@ static void intel_connector_info(struct seq_file *m,
+ 			intel_hdmi_info(m, intel_connector);
+ 		else if (intel_encoder->type == INTEL_OUTPUT_LVDS)
+ 			intel_lvds_info(m, intel_connector);
+-		else if (intel_encoder->type == INTEL_OUTPUT_DP_MST)
+-			intel_dp_mst_info(m, intel_connector);
+ 	}
+ 
+ 	seq_printf(m, "\tmodes:\n");
+diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
+index f357058c74d9..2e832fa07e09 100644
+--- a/drivers/gpu/drm/i915/i915_drv.c
++++ b/drivers/gpu/drm/i915/i915_drv.c
+@@ -797,7 +797,7 @@ static int i915_drm_resume(struct drm_device *dev)
+ static int i915_drm_resume_early(struct drm_device *dev)
+ {
+ 	struct drm_i915_private *dev_priv = dev->dev_private;
+-	int ret = 0;
++	int ret;
+ 
+ 	/*
+ 	 * We have a resume ordering issue with the snd-hda driver also
+@@ -808,6 +808,36 @@ static int i915_drm_resume_early(struct drm_device *dev)
+ 	 * FIXME: This should be solved with a special hdmi sink device or
+ 	 * similar so that power domains can be employed.
+ 	 */
++
++	/*
++	 * Note that we need to set the power state explicitly, since we
++	 * powered off the device during freeze and the PCI core won't power
++	 * it back up for us during thaw. Powering off the device during
++	 * freeze is not a hard requirement though, and during the
++	 * suspend/resume phases the PCI core makes sure we get here with the
++	 * device powered on. So in case we change our freeze logic and keep
++	 * the device powered we can also remove the following set power state
++	 * call.
++	 */
++	ret = pci_set_power_state(dev->pdev, PCI_D0);
++	if (ret) {
++		DRM_ERROR("failed to set PCI D0 power state (%d)\n", ret);
++		goto out;
++	}
++
++	/*
++	 * Note that pci_enable_device() first enables any parent bridge
++	 * device and only then sets the power state for this device. The
++	 * bridge enabling is a nop though, since bridge devices are resumed
++	 * first. The order of enabling power and enabling the device is
++	 * imposed by the PCI core as described above, so here we preserve the
++	 * same order for the freeze/thaw phases.
++	 *
++	 * TODO: eventually we should remove pci_disable_device() /
++	 * pci_enable_enable_device() from suspend/resume. Due to how they
++	 * depend on the device enable refcount we can't anyway depend on them
++	 * disabling/enabling the device.
++	 */
+ 	if (pci_enable_device(dev->pdev)) {
+ 		ret = -EIO;
+ 		goto out;
+diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
+index 4897728713f6..5672b57e65d5 100644
+--- a/drivers/gpu/drm/i915/i915_reg.h
++++ b/drivers/gpu/drm/i915/i915_reg.h
+@@ -2898,7 +2898,14 @@ enum skl_disp_power_wells {
+ #define GEN6_RP_STATE_CAP	_MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5998)
+ #define BXT_RP_STATE_CAP        _MMIO(0x138170)
+ 
+-#define INTERVAL_1_28_US(us)	(((us) * 100) >> 7)
++/*
++ * Make these a multiple of magic 25 to avoid SNB (eg. Dell XPS
++ * 8300) freezing up around GPU hangs. Looks as if even
++ * scheduling/timer interrupts start misbehaving if the RPS
++ * EI/thresholds are "bad", leading to a very sluggish or even
++ * frozen machine.
++ */
++#define INTERVAL_1_28_US(us)	roundup(((us) * 100) >> 7, 25)
+ #define INTERVAL_1_33_US(us)	(((us) * 3)   >> 2)
+ #define INTERVAL_0_833_US(us)	(((us) * 6) / 5)
+ #define GT_INTERVAL_FROM_US(dev_priv, us) (IS_GEN9(dev_priv) ? \
+@@ -7405,6 +7412,8 @@ enum skl_disp_power_wells {
+ #define  TRANS_CLK_SEL_DISABLED		(0x0<<29)
+ #define  TRANS_CLK_SEL_PORT(x)		(((x)+1)<<29)
+ 
++#define CDCLK_FREQ			_MMIO(0x46200)
++
+ #define _TRANSA_MSA_MISC		0x60410
+ #define _TRANSB_MSA_MISC		0x61410
+ #define _TRANSC_MSA_MISC		0x62410
+diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
+index 30f921421b0c..7d281b40064a 100644
+--- a/drivers/gpu/drm/i915/intel_audio.c
++++ b/drivers/gpu/drm/i915/intel_audio.c
+@@ -262,8 +262,7 @@ static void hsw_audio_codec_disable(struct intel_encoder *encoder)
+ 	tmp |= AUD_CONFIG_N_PROG_ENABLE;
+ 	tmp &= ~AUD_CONFIG_UPPER_N_MASK;
+ 	tmp &= ~AUD_CONFIG_LOWER_N_MASK;
+-	if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT) ||
+-	    intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DP_MST))
++	if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT))
+ 		tmp |= AUD_CONFIG_N_VALUE_INDEX;
+ 	I915_WRITE(HSW_AUD_CFG(pipe), tmp);
+ 
+@@ -476,8 +475,7 @@ static void ilk_audio_codec_enable(struct drm_connector *connector,
+ 	tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
+ 	tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
+ 	tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK;
+-	if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT) ||
+-	    intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DP_MST))
++	if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT))
+ 		tmp |= AUD_CONFIG_N_VALUE_INDEX;
+ 	else
+ 		tmp |= audio_config_hdmi_pixel_clock(adjusted_mode);
+@@ -515,8 +513,7 @@ void intel_audio_codec_enable(struct intel_encoder *intel_encoder)
+ 
+ 	/* ELD Conn_Type */
+ 	connector->eld[5] &= ~(3 << 2);
+-	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
+-	    intel_pipe_has_type(crtc, INTEL_OUTPUT_DP_MST))
++	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT))
+ 		connector->eld[5] |= (1 << 2);
+ 
+ 	connector->eld[6] = drm_av_sync_delay(connector, adjusted_mode) / 2;
+diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
+index a7b4a524fadd..dbacc2901d47 100644
+--- a/drivers/gpu/drm/i915/intel_crt.c
++++ b/drivers/gpu/drm/i915/intel_crt.c
+@@ -255,8 +255,14 @@ static bool intel_crt_compute_config(struct intel_encoder *encoder,
+ 		pipe_config->has_pch_encoder = true;
+ 
+ 	/* LPT FDI RX only supports 8bpc. */
+-	if (HAS_PCH_LPT(dev))
++	if (HAS_PCH_LPT(dev)) {
++		if (pipe_config->bw_constrained && pipe_config->pipe_bpp < 24) {
++			DRM_DEBUG_KMS("LPT only supports 24bpp\n");
++			return false;
++		}
++
+ 		pipe_config->pipe_bpp = 24;
++	}
+ 
+ 	/* FDI must always be 2.7 GHz */
+ 	if (HAS_DDI(dev)) {
+diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c
+index 647d85e77c2f..597cfb5ca847 100644
+--- a/drivers/gpu/drm/i915/intel_csr.c
++++ b/drivers/gpu/drm/i915/intel_csr.c
+@@ -177,7 +177,8 @@ static const struct stepping_info kbl_stepping_info[] = {
+ static const struct stepping_info skl_stepping_info[] = {
+ 	{'A', '0'}, {'B', '0'}, {'C', '0'},
+ 	{'D', '0'}, {'E', '0'}, {'F', '0'},
+-	{'G', '0'}, {'H', '0'}, {'I', '0'}
++	{'G', '0'}, {'H', '0'}, {'I', '0'},
++	{'J', '0'}, {'K', '0'}
+ };
+ 
+ static const struct stepping_info bxt_stepping_info[] = {
+diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
+index 084d5586585d..40511493914c 100644
+--- a/drivers/gpu/drm/i915/intel_ddi.c
++++ b/drivers/gpu/drm/i915/intel_ddi.c
+@@ -464,9 +464,17 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port,
+ 	} else if (IS_BROADWELL(dev)) {
+ 		ddi_translations_fdi = bdw_ddi_translations_fdi;
+ 		ddi_translations_dp = bdw_ddi_translations_dp;
+-		ddi_translations_edp = bdw_ddi_translations_edp;
++
++		if (dev_priv->edp_low_vswing) {
++			ddi_translations_edp = bdw_ddi_translations_edp;
++			n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_edp);
++		} else {
++			ddi_translations_edp = bdw_ddi_translations_dp;
++			n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
++		}
++
+ 		ddi_translations_hdmi = bdw_ddi_translations_hdmi;
+-		n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_edp);
++
+ 		n_dp_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
+ 		n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
+ 		hdmi_default_entry = 7;
+@@ -3157,23 +3165,6 @@ void intel_ddi_fdi_disable(struct drm_crtc *crtc)
+ 	I915_WRITE(FDI_RX_CTL(PIPE_A), val);
+ }
+ 
+-bool intel_ddi_is_audio_enabled(struct drm_i915_private *dev_priv,
+-				 struct intel_crtc *intel_crtc)
+-{
+-	u32 temp;
+-
+-	if (intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_AUDIO)) {
+-		temp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
+-
+-		intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
+-
+-		if (temp & AUDIO_OUTPUT_ENABLE(intel_crtc->pipe))
+-			return true;
+-	}
+-
+-	return false;
+-}
+-
+ void intel_ddi_get_config(struct intel_encoder *encoder,
+ 			  struct intel_crtc_state *pipe_config)
+ {
+@@ -3234,8 +3225,11 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
+ 		break;
+ 	}
+ 
+-	pipe_config->has_audio =
+-		intel_ddi_is_audio_enabled(dev_priv, intel_crtc);
++	if (intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_AUDIO)) {
++		temp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
++		if (temp & AUDIO_OUTPUT_ENABLE(intel_crtc->pipe))
++			pipe_config->has_audio = true;
++	}
+ 
+ 	if (encoder->type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp_bpp &&
+ 	    pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
+@@ -3260,12 +3254,6 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
+ 	intel_ddi_clock_get(encoder, pipe_config);
+ }
+ 
+-static void intel_ddi_destroy(struct drm_encoder *encoder)
+-{
+-	/* HDMI has nothing special to destroy, so we can go with this. */
+-	intel_dp_encoder_destroy(encoder);
+-}
+-
+ static bool intel_ddi_compute_config(struct intel_encoder *encoder,
+ 				     struct intel_crtc_state *pipe_config)
+ {
+@@ -3284,7 +3272,8 @@ static bool intel_ddi_compute_config(struct intel_encoder *encoder,
+ }
+ 
+ static const struct drm_encoder_funcs intel_ddi_funcs = {
+-	.destroy = intel_ddi_destroy,
++	.reset = intel_dp_encoder_reset,
++	.destroy = intel_dp_encoder_destroy,
+ };
+ 
+ static struct intel_connector *
+@@ -3356,6 +3345,7 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
+ 	intel_encoder->post_disable = intel_ddi_post_disable;
+ 	intel_encoder->get_hw_state = intel_ddi_get_hw_state;
+ 	intel_encoder->get_config = intel_ddi_get_config;
++	intel_encoder->suspend = intel_dp_encoder_suspend;
+ 
+ 	intel_dig_port->port = port;
+ 	intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) &
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index 46947fffd599..39b00b9daf2d 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -4455,7 +4455,7 @@ int skl_update_scaler_crtc(struct intel_crtc_state *state)
+ 		      intel_crtc->base.base.id, intel_crtc->pipe, SKL_CRTC_INDEX);
+ 
+ 	return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
+-		&state->scaler_state.scaler_id, DRM_ROTATE_0,
++		&state->scaler_state.scaler_id, BIT(DRM_ROTATE_0),
+ 		state->pipe_src_w, state->pipe_src_h,
+ 		adjusted_mode->crtc_hdisplay, adjusted_mode->crtc_vdisplay);
+ }
+@@ -9793,6 +9793,8 @@ static void broadwell_set_cdclk(struct drm_device *dev, int cdclk)
+ 	sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, data);
+ 	mutex_unlock(&dev_priv->rps.hw_lock);
+ 
++	I915_WRITE(CDCLK_FREQ, DIV_ROUND_CLOSEST(cdclk, 1000) - 1);
++
+ 	intel_update_cdclk(dev);
+ 
+ 	WARN(cdclk != dev_priv->cdclk_freq,
+@@ -13429,6 +13431,9 @@ static int intel_atomic_prepare_commit(struct drm_device *dev,
+ 	}
+ 
+ 	for_each_crtc_in_state(state, crtc, crtc_state, i) {
++		if (state->legacy_cursor_update)
++			continue;
++
+ 		ret = intel_crtc_wait_for_pending_flips(crtc);
+ 		if (ret)
+ 			return ret;
+diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
+index cdc2c15873dc..3cd4996c791c 100644
+--- a/drivers/gpu/drm/i915/intel_dp.c
++++ b/drivers/gpu/drm/i915/intel_dp.c
+@@ -4905,7 +4905,7 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder)
+ 	kfree(intel_dig_port);
+ }
+ 
+-static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
++void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
+ {
+ 	struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
+ 
+@@ -4947,7 +4947,7 @@ static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
+ 	edp_panel_vdd_schedule_off(intel_dp);
+ }
+ 
+-static void intel_dp_encoder_reset(struct drm_encoder *encoder)
++void intel_dp_encoder_reset(struct drm_encoder *encoder)
+ {
+ 	struct intel_dp *intel_dp;
+ 
+diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
+index fa0dabf578dc..e8e6984b8053 100644
+--- a/drivers/gpu/drm/i915/intel_dp_mst.c
++++ b/drivers/gpu/drm/i915/intel_dp_mst.c
+@@ -78,8 +78,6 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
+ 		return false;
+ 	}
+ 
+-	if (drm_dp_mst_port_has_audio(&intel_dp->mst_mgr, found->port))
+-		pipe_config->has_audio = true;
+ 	mst_pbn = drm_dp_calc_pbn_mode(adjusted_mode->crtc_clock, bpp);
+ 
+ 	pipe_config->pbn = mst_pbn;
+@@ -104,11 +102,6 @@ static void intel_mst_disable_dp(struct intel_encoder *encoder)
+ 	struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
+ 	struct intel_digital_port *intel_dig_port = intel_mst->primary;
+ 	struct intel_dp *intel_dp = &intel_dig_port->dp;
+-	struct drm_device *dev = encoder->base.dev;
+-	struct drm_i915_private *dev_priv = dev->dev_private;
+-	struct drm_crtc *crtc = encoder->base.crtc;
+-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+-
+ 	int ret;
+ 
+ 	DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links);
+@@ -119,10 +112,6 @@ static void intel_mst_disable_dp(struct intel_encoder *encoder)
+ 	if (ret) {
+ 		DRM_ERROR("failed to update payload %d\n", ret);
+ 	}
+-	if (intel_crtc->config->has_audio) {
+-		intel_audio_codec_disable(encoder);
+-		intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
+-	}
+ }
+ 
+ static void intel_mst_post_disable_dp(struct intel_encoder *encoder)
+@@ -184,7 +173,7 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder)
+ 	intel_mst->port = found->port;
+ 
+ 	if (intel_dp->active_mst_links == 0) {
+-		intel_ddi_clk_select(encoder, intel_crtc->config);
++		intel_ddi_clk_select(&intel_dig_port->base, intel_crtc->config);
+ 
+ 		intel_dp_set_link_params(intel_dp, intel_crtc->config);
+ 
+@@ -219,7 +208,6 @@ static void intel_mst_enable_dp(struct intel_encoder *encoder)
+ 	struct intel_dp *intel_dp = &intel_dig_port->dp;
+ 	struct drm_device *dev = intel_dig_port->base.base.dev;
+ 	struct drm_i915_private *dev_priv = dev->dev_private;
+-	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+ 	enum port port = intel_dig_port->port;
+ 	int ret;
+ 
+@@ -232,13 +220,6 @@ static void intel_mst_enable_dp(struct intel_encoder *encoder)
+ 	ret = drm_dp_check_act_status(&intel_dp->mst_mgr);
+ 
+ 	ret = drm_dp_update_payload_part2(&intel_dp->mst_mgr);
+-
+-	if (crtc->config->has_audio) {
+-		DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
+-				 pipe_name(crtc->pipe));
+-		intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
+-		intel_audio_codec_enable(encoder);
+-	}
+ }
+ 
+ static bool intel_dp_mst_enc_get_hw_state(struct intel_encoder *encoder,
+@@ -264,9 +245,6 @@ static void intel_dp_mst_enc_get_config(struct intel_encoder *encoder,
+ 
+ 	pipe_config->has_dp_encoder = true;
+ 
+-	pipe_config->has_audio =
+-		intel_ddi_is_audio_enabled(dev_priv, crtc);
+-
+ 	temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
+ 	if (temp & TRANS_DDI_PHSYNC)
+ 		flags |= DRM_MODE_FLAG_PHSYNC;
+@@ -499,6 +477,8 @@ static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
+ 	struct intel_connector *intel_connector = to_intel_connector(connector);
+ 	struct drm_device *dev = connector->dev;
+ 
++	intel_connector->unregister(intel_connector);
++
+ 	/* need to nuke the connector */
+ 	drm_modeset_lock_all(dev);
+ 	if (connector->state->crtc) {
+@@ -512,11 +492,7 @@ static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
+ 
+ 		WARN(ret, "Disabling mst crtc failed with %i\n", ret);
+ 	}
+-	drm_modeset_unlock_all(dev);
+-
+-	intel_connector->unregister(intel_connector);
+ 
+-	drm_modeset_lock_all(dev);
+ 	intel_connector_remove_from_fbdev(intel_connector);
+ 	drm_connector_cleanup(connector);
+ 	drm_modeset_unlock_all(dev);
+diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
+index df7f3cb66056..1ae61f488987 100644
+--- a/drivers/gpu/drm/i915/intel_drv.h
++++ b/drivers/gpu/drm/i915/intel_drv.h
+@@ -1013,8 +1013,6 @@ void intel_ddi_set_pipe_settings(struct drm_crtc *crtc);
+ void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp);
+ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
+ void intel_ddi_fdi_disable(struct drm_crtc *crtc);
+-bool intel_ddi_is_audio_enabled(struct drm_i915_private *dev_priv,
+-				 struct intel_crtc *intel_crtc);
+ void intel_ddi_get_config(struct intel_encoder *encoder,
+ 			  struct intel_crtc_state *pipe_config);
+ struct intel_encoder *
+@@ -1234,6 +1232,8 @@ void intel_dp_set_link_params(struct intel_dp *intel_dp,
+ void intel_dp_start_link_train(struct intel_dp *intel_dp);
+ void intel_dp_stop_link_train(struct intel_dp *intel_dp);
+ void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
++void intel_dp_encoder_reset(struct drm_encoder *encoder);
++void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder);
+ void intel_dp_encoder_destroy(struct drm_encoder *encoder);
+ int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc);
+ bool intel_dp_compute_config(struct intel_encoder *encoder,
+diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
+index 616108c4bc3e..43fdae8ff3c3 100644
+--- a/drivers/gpu/drm/i915/intel_hdmi.c
++++ b/drivers/gpu/drm/i915/intel_hdmi.c
+@@ -1407,8 +1407,16 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
+ 				hdmi_to_dig_port(intel_hdmi));
+ 	}
+ 
+-	if (!live_status)
+-		DRM_DEBUG_KMS("Live status not up!");
++	if (!live_status) {
++		DRM_DEBUG_KMS("HDMI live status down\n");
++		/*
++		 * Live status register is not reliable on all intel platforms.
++		 * So consider live_status only for certain platforms, for
++		 * others, read EDID to determine presence of sink.
++		 */
++		if (INTEL_INFO(dev_priv)->gen < 7 || IS_IVYBRIDGE(dev_priv))
++			live_status = true;
++	}
+ 
+ 	intel_hdmi_unset_edid(connector);
+ 
+diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
+index f1fa756c5d5d..cfd5f9fff2f4 100644
+--- a/drivers/gpu/drm/i915/intel_lrc.c
++++ b/drivers/gpu/drm/i915/intel_lrc.c
+@@ -781,11 +781,11 @@ static int logical_ring_prepare(struct drm_i915_gem_request *req, int bytes)
+ 		if (unlikely(total_bytes > remain_usable)) {
+ 			/*
+ 			 * The base request will fit but the reserved space
+-			 * falls off the end. So only need to to wait for the
+-			 * reserved size after flushing out the remainder.
++			 * falls off the end. So don't need an immediate wrap
++			 * and only need to effectively wait for the reserved
++			 * size space from the start of ringbuffer.
+ 			 */
+ 			wait_bytes = remain_actual + ringbuf->reserved_size;
+-			need_wrap = true;
+ 		} else if (total_bytes > ringbuf->space) {
+ 			/* No wrapping required, just waiting. */
+ 			wait_bytes = total_bytes;
+diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
+index b28c29f20e75..0eae3994e5e3 100644
+--- a/drivers/gpu/drm/i915/intel_pm.c
++++ b/drivers/gpu/drm/i915/intel_pm.c
+@@ -2281,6 +2281,7 @@ static int ilk_compute_pipe_wm(struct intel_crtc *intel_crtc,
+ 		return PTR_ERR(cstate);
+ 
+ 	pipe_wm = &cstate->wm.optimal.ilk;
++	memset(pipe_wm, 0, sizeof(*pipe_wm));
+ 
+ 	for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
+ 		ps = drm_atomic_get_plane_state(state,
+@@ -3606,23 +3607,43 @@ static void skl_update_wm(struct drm_crtc *crtc)
+ 	dev_priv->wm.skl_hw = *results;
+ }
+ 
++static void ilk_compute_wm_config(struct drm_device *dev,
++				  struct intel_wm_config *config)
++{
++	struct intel_crtc *crtc;
++
++	/* Compute the currently _active_ config */
++	for_each_intel_crtc(dev, crtc) {
++		const struct intel_pipe_wm *wm = &crtc->wm.active.ilk;
++
++		if (!wm->pipe_enabled)
++			continue;
++
++		config->sprites_enabled |= wm->sprites_enabled;
++		config->sprites_scaled |= wm->sprites_scaled;
++		config->num_pipes_active++;
++	}
++}
++
+ static void ilk_program_watermarks(struct drm_i915_private *dev_priv)
+ {
+ 	struct drm_device *dev = dev_priv->dev;
+ 	struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
+ 	struct ilk_wm_maximums max;
+-	struct intel_wm_config *config = &dev_priv->wm.config;
++	struct intel_wm_config config = {};
+ 	struct ilk_wm_values results = {};
+ 	enum intel_ddb_partitioning partitioning;
+ 
+-	ilk_compute_wm_maximums(dev, 1, config, INTEL_DDB_PART_1_2, &max);
+-	ilk_wm_merge(dev, config, &max, &lp_wm_1_2);
++	ilk_compute_wm_config(dev, &config);
++
++	ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_1_2, &max);
++	ilk_wm_merge(dev, &config, &max, &lp_wm_1_2);
+ 
+ 	/* 5/6 split only in single pipe config on IVB+ */
+ 	if (INTEL_INFO(dev)->gen >= 7 &&
+-	    config->num_pipes_active == 1 && config->sprites_enabled) {
+-		ilk_compute_wm_maximums(dev, 1, config, INTEL_DDB_PART_5_6, &max);
+-		ilk_wm_merge(dev, config, &max, &lp_wm_5_6);
++	    config.num_pipes_active == 1 && config.sprites_enabled) {
++		ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_5_6, &max);
++		ilk_wm_merge(dev, &config, &max, &lp_wm_5_6);
+ 
+ 		best_lp_wm = ilk_find_best_result(dev, &lp_wm_1_2, &lp_wm_5_6);
+ 	} else {
+@@ -6544,6 +6565,12 @@ static void broadwell_init_clock_gating(struct drm_device *dev)
+ 	misccpctl = I915_READ(GEN7_MISCCPCTL);
+ 	I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
+ 	I915_WRITE(GEN8_L3SQCREG1, BDW_WA_L3SQCREG1_DEFAULT);
++	/*
++	 * Wait at least 100 clocks before re-enabling clock gating. See
++	 * the definition of L3SQCREG1 in BSpec.
++	 */
++	POSTING_READ(GEN8_L3SQCREG1);
++	udelay(1);
+ 	I915_WRITE(GEN7_MISCCPCTL, misccpctl);
+ 
+ 	/*
+diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
+index 40c6aff57256..549afa7bc75f 100644
+--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
++++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
+@@ -951,7 +951,7 @@ static int gen9_init_workarounds(struct intel_engine_cs *ring)
+ 
+ 	/* WaForceContextSaveRestoreNonCoherent:skl,bxt */
+ 	tmp = HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT;
+-	if (IS_SKL_REVID(dev, SKL_REVID_F0, SKL_REVID_F0) ||
++	if (IS_SKL_REVID(dev, SKL_REVID_F0, REVID_FOREVER) ||
+ 	    IS_BXT_REVID(dev, BXT_REVID_B0, REVID_FOREVER))
+ 		tmp |= HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE;
+ 	WA_SET_BIT_MASKED(HDC_CHICKEN0, tmp);
+@@ -1044,7 +1044,8 @@ static int skl_init_workarounds(struct intel_engine_cs *ring)
+ 		WA_SET_BIT_MASKED(HIZ_CHICKEN,
+ 				  BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE);
+ 
+-	if (IS_SKL_REVID(dev, 0, SKL_REVID_F0)) {
++	/* This is tied to WaForceContextSaveRestoreNonCoherent */
++	if (IS_SKL_REVID(dev, 0, REVID_FOREVER)) {
+ 		/*
+ 		 *Use Force Non-Coherent whenever executing a 3D context. This
+ 		 * is a workaround for a possible hang in the unlikely event
+@@ -1901,6 +1902,17 @@ i915_dispatch_execbuffer(struct drm_i915_gem_request *req,
+ 	return 0;
+ }
+ 
++static void cleanup_phys_status_page(struct intel_engine_cs *ring)
++{
++	struct drm_i915_private *dev_priv = to_i915(ring->dev);
++
++	if (!dev_priv->status_page_dmah)
++		return;
++
++	drm_pci_free(ring->dev, dev_priv->status_page_dmah);
++	ring->status_page.page_addr = NULL;
++}
++
+ static void cleanup_status_page(struct intel_engine_cs *ring)
+ {
+ 	struct drm_i915_gem_object *obj;
+@@ -1917,9 +1929,9 @@ static void cleanup_status_page(struct intel_engine_cs *ring)
+ 
+ static int init_status_page(struct intel_engine_cs *ring)
+ {
+-	struct drm_i915_gem_object *obj;
++	struct drm_i915_gem_object *obj = ring->status_page.obj;
+ 
+-	if ((obj = ring->status_page.obj) == NULL) {
++	if (obj == NULL) {
+ 		unsigned flags;
+ 		int ret;
+ 
+@@ -2019,10 +2031,12 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
+ {
+ 	struct drm_i915_private *dev_priv = to_i915(dev);
+ 	struct drm_i915_gem_object *obj = ringbuf->obj;
++	/* Ring wraparound at offset 0 sometimes hangs. No idea why. */
++	unsigned flags = PIN_OFFSET_BIAS | 4096;
+ 	int ret;
+ 
+ 	if (HAS_LLC(dev_priv) && !obj->stolen) {
+-		ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, 0);
++		ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, flags);
+ 		if (ret)
+ 			return ret;
+ 
+@@ -2038,7 +2052,8 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
+ 			return -ENOMEM;
+ 		}
+ 	} else {
+-		ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE);
++		ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE,
++					    flags | PIN_MAPPABLE);
+ 		if (ret)
+ 			return ret;
+ 
+@@ -2164,7 +2179,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
+ 		if (ret)
+ 			goto error;
+ 	} else {
+-		BUG_ON(ring->id != RCS);
++		WARN_ON(ring->id != RCS);
+ 		ret = init_phys_status_page(ring);
+ 		if (ret)
+ 			goto error;
+@@ -2210,7 +2225,12 @@ void intel_cleanup_ring_buffer(struct intel_engine_cs *ring)
+ 	if (ring->cleanup)
+ 		ring->cleanup(ring);
+ 
+-	cleanup_status_page(ring);
++	if (I915_NEED_GFX_HWS(ring->dev)) {
++		cleanup_status_page(ring);
++	} else {
++		WARN_ON(ring->id != RCS);
++		cleanup_phys_status_page(ring);
++	}
+ 
+ 	i915_cmd_parser_fini_ring(ring);
+ 	i915_gem_batch_pool_fini(&ring->batch_pool);
+@@ -2373,11 +2393,11 @@ static int __intel_ring_prepare(struct intel_engine_cs *ring, int bytes)
+ 		if (unlikely(total_bytes > remain_usable)) {
+ 			/*
+ 			 * The base request will fit but the reserved space
+-			 * falls off the end. So only need to to wait for the
+-			 * reserved size after flushing out the remainder.
++			 * falls off the end. So don't need an immediate wrap
++			 * and only need to effectively wait for the reserved
++			 * size space from the start of ringbuffer.
+ 			 */
+ 			wait_bytes = remain_actual + ringbuf->reserved_size;
+-			need_wrap = true;
+ 		} else if (total_bytes > ringbuf->space) {
+ 			/* No wrapping required, just waiting. */
+ 			wait_bytes = total_bytes;
+diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
+index 277e60ae0e47..08961f7d151c 100644
+--- a/drivers/gpu/drm/i915/intel_uncore.c
++++ b/drivers/gpu/drm/i915/intel_uncore.c
+@@ -1155,7 +1155,11 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev)
+ 	} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
+ 		dev_priv->uncore.funcs.force_wake_get =
+ 			fw_domains_get_with_thread_status;
+-		dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
++		if (IS_HASWELL(dev))
++			dev_priv->uncore.funcs.force_wake_put =
++				fw_domains_put_with_fifo;
++		else
++			dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
+ 		fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
+ 			       FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
+ 	} else if (IS_IVYBRIDGE(dev)) {
+diff --git a/drivers/gpu/drm/nouveau/nvkm/core/ramht.c b/drivers/gpu/drm/nouveau/nvkm/core/ramht.c
+index 3216e157a8a0..89da47234016 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/core/ramht.c
++++ b/drivers/gpu/drm/nouveau/nvkm/core/ramht.c
+@@ -131,7 +131,7 @@ nvkm_ramht_del(struct nvkm_ramht **pramht)
+ 	struct nvkm_ramht *ramht = *pramht;
+ 	if (ramht) {
+ 		nvkm_gpuobj_del(&ramht->gpuobj);
+-		kfree(*pramht);
++		vfree(*pramht);
+ 		*pramht = NULL;
+ 	}
+ }
+@@ -143,8 +143,8 @@ nvkm_ramht_new(struct nvkm_device *device, u32 size, u32 align,
+ 	struct nvkm_ramht *ramht;
+ 	int ret, i;
+ 
+-	if (!(ramht = *pramht = kzalloc(sizeof(*ramht) + (size >> 3) *
+-					sizeof(*ramht->data), GFP_KERNEL)))
++	if (!(ramht = *pramht = vzalloc(sizeof(*ramht) +
++					(size >> 3) * sizeof(*ramht->data))))
+ 		return -ENOMEM;
+ 
+ 	ramht->device = device;
+diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+index 1f81069edc58..332b5fe687fe 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+@@ -1807,6 +1807,8 @@ gf100_gr_init(struct gf100_gr *gr)
+ 
+ 	gf100_gr_mmio(gr, gr->func->mmio);
+ 
++	nvkm_mask(device, TPC_UNIT(0, 0, 0x05c), 0x00000001, 0x00000001);
++
+ 	memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
+ 	for (i = 0, gpc = -1; i < gr->tpc_total; i++) {
+ 		do {
+diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
+index 86276519b2ef..47e52647c9e5 100644
+--- a/drivers/gpu/drm/qxl/qxl_display.c
++++ b/drivers/gpu/drm/qxl/qxl_display.c
+@@ -375,10 +375,15 @@ static int qxl_crtc_cursor_set2(struct drm_crtc *crtc,
+ 
+ 	qxl_bo_kunmap(user_bo);
+ 
++	qcrtc->cur_x += qcrtc->hot_spot_x - hot_x;
++	qcrtc->cur_y += qcrtc->hot_spot_y - hot_y;
++	qcrtc->hot_spot_x = hot_x;
++	qcrtc->hot_spot_y = hot_y;
++
+ 	cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release);
+ 	cmd->type = QXL_CURSOR_SET;
+-	cmd->u.set.position.x = qcrtc->cur_x;
+-	cmd->u.set.position.y = qcrtc->cur_y;
++	cmd->u.set.position.x = qcrtc->cur_x + qcrtc->hot_spot_x;
++	cmd->u.set.position.y = qcrtc->cur_y + qcrtc->hot_spot_y;
+ 
+ 	cmd->u.set.shape = qxl_bo_physical_address(qdev, cursor_bo, 0);
+ 
+@@ -441,8 +446,8 @@ static int qxl_crtc_cursor_move(struct drm_crtc *crtc,
+ 
+ 	cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release);
+ 	cmd->type = QXL_CURSOR_MOVE;
+-	cmd->u.position.x = qcrtc->cur_x;
+-	cmd->u.position.y = qcrtc->cur_y;
++	cmd->u.position.x = qcrtc->cur_x + qcrtc->hot_spot_x;
++	cmd->u.position.y = qcrtc->cur_y + qcrtc->hot_spot_y;
+ 	qxl_release_unmap(qdev, release, &cmd->release_info);
+ 
+ 	qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
+diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
+index 6e6b9b1519b8..3f3897eb458c 100644
+--- a/drivers/gpu/drm/qxl/qxl_drv.h
++++ b/drivers/gpu/drm/qxl/qxl_drv.h
+@@ -135,6 +135,8 @@ struct qxl_crtc {
+ 	int index;
+ 	int cur_x;
+ 	int cur_y;
++	int hot_spot_x;
++	int hot_spot_y;
+ };
+ 
+ struct qxl_output {
+diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
+index 801dd60ac192..7f52142d37d5 100644
+--- a/drivers/gpu/drm/radeon/atombios_crtc.c
++++ b/drivers/gpu/drm/radeon/atombios_crtc.c
+@@ -1740,6 +1740,7 @@ static u32 radeon_get_pll_use_mask(struct drm_crtc *crtc)
+ static int radeon_get_shared_dp_ppll(struct drm_crtc *crtc)
+ {
+ 	struct drm_device *dev = crtc->dev;
++	struct radeon_device *rdev = dev->dev_private;
+ 	struct drm_crtc *test_crtc;
+ 	struct radeon_crtc *test_radeon_crtc;
+ 
+@@ -1749,6 +1750,10 @@ static int radeon_get_shared_dp_ppll(struct drm_crtc *crtc)
+ 		test_radeon_crtc = to_radeon_crtc(test_crtc);
+ 		if (test_radeon_crtc->encoder &&
+ 		    ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_radeon_crtc->encoder))) {
++			/* PPLL2 is exclusive to UNIPHYA on DCE61 */
++			if (ASIC_IS_DCE61(rdev) && !ASIC_IS_DCE8(rdev) &&
++			    test_radeon_crtc->pll_id == ATOM_PPLL2)
++				continue;
+ 			/* for DP use the same PLL for all */
+ 			if (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID)
+ 				return test_radeon_crtc->pll_id;
+@@ -1770,6 +1775,7 @@ static int radeon_get_shared_nondp_ppll(struct drm_crtc *crtc)
+ {
+ 	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ 	struct drm_device *dev = crtc->dev;
++	struct radeon_device *rdev = dev->dev_private;
+ 	struct drm_crtc *test_crtc;
+ 	struct radeon_crtc *test_radeon_crtc;
+ 	u32 adjusted_clock, test_adjusted_clock;
+@@ -1785,6 +1791,10 @@ static int radeon_get_shared_nondp_ppll(struct drm_crtc *crtc)
+ 		test_radeon_crtc = to_radeon_crtc(test_crtc);
+ 		if (test_radeon_crtc->encoder &&
+ 		    !ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_radeon_crtc->encoder))) {
++			/* PPLL2 is exclusive to UNIPHYA on DCE61 */
++			if (ASIC_IS_DCE61(rdev) && !ASIC_IS_DCE8(rdev) &&
++			    test_radeon_crtc->pll_id == ATOM_PPLL2)
++				continue;
+ 			/* check if we are already driving this connector with another crtc */
+ 			if (test_radeon_crtc->connector == radeon_crtc->connector) {
+ 				/* if we are, return that pll */
+diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
+index 6af832545bc5..b5760851195c 100644
+--- a/drivers/gpu/drm/radeon/atombios_dp.c
++++ b/drivers/gpu/drm/radeon/atombios_dp.c
+@@ -326,8 +326,8 @@ int radeon_dp_get_dp_link_config(struct drm_connector *connector,
+ 			}
+ 		}
+ 	} else {
+-		for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) {
+-			for (i = 0; i < ARRAY_SIZE(link_rates) && link_rates[i] <= max_link_rate; i++) {
++		for (i = 0; i < ARRAY_SIZE(link_rates) && link_rates[i] <= max_link_rate; i++) {
++			for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) {
+ 				max_pix_clock = (lane_num * link_rates[i] * 8) / bpp;
+ 				if (max_pix_clock >= pix_clock) {
+ 					*dp_lanes = lane_num;
+diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
+index 01b20e14a247..9de6503b10d8 100644
+--- a/drivers/gpu/drm/radeon/atombios_encoders.c
++++ b/drivers/gpu/drm/radeon/atombios_encoders.c
+@@ -310,6 +310,10 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
+ 	    && (mode->crtc_vsync_start < (mode->crtc_vdisplay + 2)))
+ 		adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2;
+ 
++	/* vertical FP must be at least 1 */
++	if (mode->crtc_vsync_start == mode->crtc_vdisplay)
++		adjusted_mode->crtc_vsync_start++;
++
+ 	/* get the native mode for scaling */
+ 	if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) {
+ 		radeon_panel_mode_fixup(encoder, adjusted_mode);
+@@ -892,8 +896,6 @@ atombios_dig_encoder_setup2(struct drm_encoder *encoder, int action, int panel_m
+ 			else
+ 				args.v1.ucLaneNum = 4;
+ 
+-			if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode) && (dp_clock == 270000))
+-				args.v1.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
+ 			switch (radeon_encoder->encoder_id) {
+ 			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+ 				args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER1;
+@@ -910,6 +912,10 @@ atombios_dig_encoder_setup2(struct drm_encoder *encoder, int action, int panel_m
+ 				args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKB;
+ 			else
+ 				args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKA;
++
++			if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode) && (dp_clock == 270000))
++				args.v1.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
++
+ 			break;
+ 		case 2:
+ 		case 3:
+diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
+index 2ad462896896..32491355a1d4 100644
+--- a/drivers/gpu/drm/radeon/evergreen.c
++++ b/drivers/gpu/drm/radeon/evergreen.c
+@@ -2608,10 +2608,152 @@ static void evergreen_agp_enable(struct radeon_device *rdev)
+ 	WREG32(VM_CONTEXT1_CNTL, 0);
+ }
+ 
++static const unsigned ni_dig_offsets[] =
++{
++	NI_DIG0_REGISTER_OFFSET,
++	NI_DIG1_REGISTER_OFFSET,
++	NI_DIG2_REGISTER_OFFSET,
++	NI_DIG3_REGISTER_OFFSET,
++	NI_DIG4_REGISTER_OFFSET,
++	NI_DIG5_REGISTER_OFFSET
++};
++
++static const unsigned ni_tx_offsets[] =
++{
++	NI_DCIO_UNIPHY0_UNIPHY_TX_CONTROL1,
++	NI_DCIO_UNIPHY1_UNIPHY_TX_CONTROL1,
++	NI_DCIO_UNIPHY2_UNIPHY_TX_CONTROL1,
++	NI_DCIO_UNIPHY3_UNIPHY_TX_CONTROL1,
++	NI_DCIO_UNIPHY4_UNIPHY_TX_CONTROL1,
++	NI_DCIO_UNIPHY5_UNIPHY_TX_CONTROL1
++};
++
++static const unsigned evergreen_dp_offsets[] =
++{
++	EVERGREEN_DP0_REGISTER_OFFSET,
++	EVERGREEN_DP1_REGISTER_OFFSET,
++	EVERGREEN_DP2_REGISTER_OFFSET,
++	EVERGREEN_DP3_REGISTER_OFFSET,
++	EVERGREEN_DP4_REGISTER_OFFSET,
++	EVERGREEN_DP5_REGISTER_OFFSET
++};
++
++
++/*
++ * Assumption is that EVERGREEN_CRTC_MASTER_EN enable for requested crtc
++ * We go from crtc to connector and it is not relible  since it
++ * should be an opposite direction .If crtc is enable then
++ * find the dig_fe which selects this crtc and insure that it enable.
++ * if such dig_fe is found then find dig_be which selects found dig_be and
++ * insure that it enable and in DP_SST mode.
++ * if UNIPHY_PLL_CONTROL1.enable then we should disconnect timing
++ * from dp symbols clocks .
++ */
++static bool evergreen_is_dp_sst_stream_enabled(struct radeon_device *rdev,
++					       unsigned crtc_id, unsigned *ret_dig_fe)
++{
++	unsigned i;
++	unsigned dig_fe;
++	unsigned dig_be;
++	unsigned dig_en_be;
++	unsigned uniphy_pll;
++	unsigned digs_fe_selected;
++	unsigned dig_be_mode;
++	unsigned dig_fe_mask;
++	bool is_enabled = false;
++	bool found_crtc = false;
++
++	/* loop through all running dig_fe to find selected crtc */
++	for (i = 0; i < ARRAY_SIZE(ni_dig_offsets); i++) {
++		dig_fe = RREG32(NI_DIG_FE_CNTL + ni_dig_offsets[i]);
++		if (dig_fe & NI_DIG_FE_CNTL_SYMCLK_FE_ON &&
++		    crtc_id == NI_DIG_FE_CNTL_SOURCE_SELECT(dig_fe)) {
++			/* found running pipe */
++			found_crtc = true;
++			dig_fe_mask = 1 << i;
++			dig_fe = i;
++			break;
++		}
++	}
++
++	if (found_crtc) {
++		/* loop through all running dig_be to find selected dig_fe */
++		for (i = 0; i < ARRAY_SIZE(ni_dig_offsets); i++) {
++			dig_be = RREG32(NI_DIG_BE_CNTL + ni_dig_offsets[i]);
++			/* if dig_fe_selected by dig_be? */
++			digs_fe_selected = NI_DIG_BE_CNTL_FE_SOURCE_SELECT(dig_be);
++			dig_be_mode = NI_DIG_FE_CNTL_MODE(dig_be);
++			if (dig_fe_mask &  digs_fe_selected &&
++			    /* if dig_be in sst mode? */
++			    dig_be_mode == NI_DIG_BE_DPSST) {
++				dig_en_be = RREG32(NI_DIG_BE_EN_CNTL +
++						   ni_dig_offsets[i]);
++				uniphy_pll = RREG32(NI_DCIO_UNIPHY0_PLL_CONTROL1 +
++						    ni_tx_offsets[i]);
++				/* dig_be enable and tx is running */
++				if (dig_en_be & NI_DIG_BE_EN_CNTL_ENABLE &&
++				    dig_en_be & NI_DIG_BE_EN_CNTL_SYMBCLK_ON &&
++				    uniphy_pll & NI_DCIO_UNIPHY0_PLL_CONTROL1_ENABLE) {
++					is_enabled = true;
++					*ret_dig_fe = dig_fe;
++					break;
++				}
++			}
++		}
++	}
++
++	return is_enabled;
++}
++
++/*
++ * Blank dig when in dp sst mode
++ * Dig ignores crtc timing
++ */
++static void evergreen_blank_dp_output(struct radeon_device *rdev,
++				      unsigned dig_fe)
++{
++	unsigned stream_ctrl;
++	unsigned fifo_ctrl;
++	unsigned counter = 0;
++
++	if (dig_fe >= ARRAY_SIZE(evergreen_dp_offsets)) {
++		DRM_ERROR("invalid dig_fe %d\n", dig_fe);
++		return;
++	}
++
++	stream_ctrl = RREG32(EVERGREEN_DP_VID_STREAM_CNTL +
++			     evergreen_dp_offsets[dig_fe]);
++	if (!(stream_ctrl & EVERGREEN_DP_VID_STREAM_CNTL_ENABLE)) {
++		DRM_ERROR("dig %d , should be enable\n", dig_fe);
++		return;
++	}
++
++	stream_ctrl &=~EVERGREEN_DP_VID_STREAM_CNTL_ENABLE;
++	WREG32(EVERGREEN_DP_VID_STREAM_CNTL +
++	       evergreen_dp_offsets[dig_fe], stream_ctrl);
++
++	stream_ctrl = RREG32(EVERGREEN_DP_VID_STREAM_CNTL +
++			     evergreen_dp_offsets[dig_fe]);
++	while (counter < 32 && stream_ctrl & EVERGREEN_DP_VID_STREAM_STATUS) {
++		msleep(1);
++		counter++;
++		stream_ctrl = RREG32(EVERGREEN_DP_VID_STREAM_CNTL +
++				     evergreen_dp_offsets[dig_fe]);
++	}
++	if (counter >= 32 )
++		DRM_ERROR("counter exceeds %d\n", counter);
++
++	fifo_ctrl = RREG32(EVERGREEN_DP_STEER_FIFO + evergreen_dp_offsets[dig_fe]);
++	fifo_ctrl |= EVERGREEN_DP_STEER_FIFO_RESET;
++	WREG32(EVERGREEN_DP_STEER_FIFO + evergreen_dp_offsets[dig_fe], fifo_ctrl);
++
++}
++
+ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save)
+ {
+ 	u32 crtc_enabled, tmp, frame_count, blackout;
+ 	int i, j;
++	unsigned dig_fe;
+ 
+ 	if (!ASIC_IS_NODCE(rdev)) {
+ 		save->vga_render_control = RREG32(VGA_RENDER_CONTROL);
+@@ -2651,7 +2793,17 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
+ 					break;
+ 				udelay(1);
+ 			}
+-
++			/*we should disable dig if it drives dp sst*/
++			/*but we are in radeon_device_init and the topology is unknown*/
++			/*and it is available after radeon_modeset_init*/
++			/*the following method radeon_atom_encoder_dpms_dig*/
++			/*does the job if we initialize it properly*/
++			/*for now we do it this manually*/
++			/**/
++			if (ASIC_IS_DCE5(rdev) &&
++			    evergreen_is_dp_sst_stream_enabled(rdev, i ,&dig_fe))
++				evergreen_blank_dp_output(rdev, dig_fe);
++			/*we could remove 6 lines below*/
+ 			/* XXX this is a hack to avoid strange behavior with EFI on certain systems */
+ 			WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+ 			tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
+diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h
+index aa939dfed3a3..b436badf9efa 100644
+--- a/drivers/gpu/drm/radeon/evergreen_reg.h
++++ b/drivers/gpu/drm/radeon/evergreen_reg.h
+@@ -250,8 +250,43 @@
+ 
+ /* HDMI blocks at 0x7030, 0x7c30, 0x10830, 0x11430, 0x12030, 0x12c30 */
+ #define EVERGREEN_HDMI_BASE				0x7030
++/*DIG block*/
++#define NI_DIG0_REGISTER_OFFSET                 (0x7000  - 0x7000)
++#define NI_DIG1_REGISTER_OFFSET                 (0x7C00  - 0x7000)
++#define NI_DIG2_REGISTER_OFFSET                 (0x10800 - 0x7000)
++#define NI_DIG3_REGISTER_OFFSET                 (0x11400 - 0x7000)
++#define NI_DIG4_REGISTER_OFFSET                 (0x12000 - 0x7000)
++#define NI_DIG5_REGISTER_OFFSET                 (0x12C00 - 0x7000)
++
++
++#define NI_DIG_FE_CNTL                               0x7000
++#       define NI_DIG_FE_CNTL_SOURCE_SELECT(x)        ((x) & 0x3)
++#       define NI_DIG_FE_CNTL_SYMCLK_FE_ON            (1<<24)
++
++
++#define NI_DIG_BE_CNTL                    0x7140
++#       define NI_DIG_BE_CNTL_FE_SOURCE_SELECT(x)     (((x) >> 8 ) & 0x3F)
++#       define NI_DIG_FE_CNTL_MODE(x)                 (((x) >> 16) & 0x7 )
++
++#define NI_DIG_BE_EN_CNTL                              0x7144
++#       define NI_DIG_BE_EN_CNTL_ENABLE               (1 << 0)
++#       define NI_DIG_BE_EN_CNTL_SYMBCLK_ON           (1 << 8)
++#       define NI_DIG_BE_DPSST 0
+ 
+ /* Display Port block */
++#define EVERGREEN_DP0_REGISTER_OFFSET                 (0x730C  - 0x730C)
++#define EVERGREEN_DP1_REGISTER_OFFSET                 (0x7F0C  - 0x730C)
++#define EVERGREEN_DP2_REGISTER_OFFSET                 (0x10B0C - 0x730C)
++#define EVERGREEN_DP3_REGISTER_OFFSET                 (0x1170C - 0x730C)
++#define EVERGREEN_DP4_REGISTER_OFFSET                 (0x1230C - 0x730C)
++#define EVERGREEN_DP5_REGISTER_OFFSET                 (0x12F0C - 0x730C)
++
++
++#define EVERGREEN_DP_VID_STREAM_CNTL                    0x730C
++#       define EVERGREEN_DP_VID_STREAM_CNTL_ENABLE     (1 << 0)
++#       define EVERGREEN_DP_VID_STREAM_STATUS          (1 <<16)
++#define EVERGREEN_DP_STEER_FIFO                         0x7310
++#       define EVERGREEN_DP_STEER_FIFO_RESET           (1 << 0)
+ #define EVERGREEN_DP_SEC_CNTL                           0x7280
+ #       define EVERGREEN_DP_SEC_STREAM_ENABLE           (1 << 0)
+ #       define EVERGREEN_DP_SEC_ASP_ENABLE              (1 << 4)
+@@ -266,4 +301,15 @@
+ #       define EVERGREEN_DP_SEC_N_BASE_MULTIPLE(x)      (((x) & 0xf) << 24)
+ #       define EVERGREEN_DP_SEC_SS_EN                   (1 << 28)
+ 
++/*DCIO_UNIPHY block*/
++#define NI_DCIO_UNIPHY0_UNIPHY_TX_CONTROL1            (0x6600  -0x6600)
++#define NI_DCIO_UNIPHY1_UNIPHY_TX_CONTROL1            (0x6640  -0x6600)
++#define NI_DCIO_UNIPHY2_UNIPHY_TX_CONTROL1            (0x6680 - 0x6600)
++#define NI_DCIO_UNIPHY3_UNIPHY_TX_CONTROL1            (0x66C0 - 0x6600)
++#define NI_DCIO_UNIPHY4_UNIPHY_TX_CONTROL1            (0x6700 - 0x6600)
++#define NI_DCIO_UNIPHY5_UNIPHY_TX_CONTROL1            (0x6740 - 0x6600)
++
++#define NI_DCIO_UNIPHY0_PLL_CONTROL1                   0x6618
++#       define NI_DCIO_UNIPHY0_PLL_CONTROL1_ENABLE     (1 << 0)
++
+ #endif
+diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
+index 340f3f549f29..9cfc1c3e1965 100644
+--- a/drivers/gpu/drm/radeon/radeon_connectors.c
++++ b/drivers/gpu/drm/radeon/radeon_connectors.c
+@@ -1996,10 +1996,12 @@ radeon_add_atom_connector(struct drm_device *dev,
+ 						   rdev->mode_info.dither_property,
+ 						   RADEON_FMT_DITHER_DISABLE);
+ 
+-			if (radeon_audio != 0)
++			if (radeon_audio != 0) {
+ 				drm_object_attach_property(&radeon_connector->base.base,
+ 							   rdev->mode_info.audio_property,
+ 							   RADEON_AUDIO_AUTO);
++				radeon_connector->audio = RADEON_AUDIO_AUTO;
++			}
+ 			if (ASIC_IS_DCE5(rdev))
+ 				drm_object_attach_property(&radeon_connector->base.base,
+ 							   rdev->mode_info.output_csc_property,
+@@ -2124,6 +2126,7 @@ radeon_add_atom_connector(struct drm_device *dev,
+ 				drm_object_attach_property(&radeon_connector->base.base,
+ 							   rdev->mode_info.audio_property,
+ 							   RADEON_AUDIO_AUTO);
++				radeon_connector->audio = RADEON_AUDIO_AUTO;
+ 			}
+ 			if (connector_type == DRM_MODE_CONNECTOR_DVII) {
+ 				radeon_connector->dac_load_detect = true;
+@@ -2179,6 +2182,7 @@ radeon_add_atom_connector(struct drm_device *dev,
+ 				drm_object_attach_property(&radeon_connector->base.base,
+ 							   rdev->mode_info.audio_property,
+ 							   RADEON_AUDIO_AUTO);
++				radeon_connector->audio = RADEON_AUDIO_AUTO;
+ 			}
+ 			if (ASIC_IS_DCE5(rdev))
+ 				drm_object_attach_property(&radeon_connector->base.base,
+@@ -2231,6 +2235,7 @@ radeon_add_atom_connector(struct drm_device *dev,
+ 				drm_object_attach_property(&radeon_connector->base.base,
+ 							   rdev->mode_info.audio_property,
+ 							   RADEON_AUDIO_AUTO);
++				radeon_connector->audio = RADEON_AUDIO_AUTO;
+ 			}
+ 			if (ASIC_IS_DCE5(rdev))
+ 				drm_object_attach_property(&radeon_connector->base.base,
+diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
+index 2d9196a447fd..bfcef4db8138 100644
+--- a/drivers/gpu/drm/radeon/radeon_display.c
++++ b/drivers/gpu/drm/radeon/radeon_display.c
+@@ -1683,10 +1683,8 @@ int radeon_modeset_init(struct radeon_device *rdev)
+ 	/* setup afmt */
+ 	radeon_afmt_init(rdev);
+ 
+-	if (!list_empty(&rdev->ddev->mode_config.connector_list)) {
+-		radeon_fbdev_init(rdev);
+-		drm_kms_helper_poll_init(rdev->ddev);
+-	}
++	radeon_fbdev_init(rdev);
++	drm_kms_helper_poll_init(rdev->ddev);
+ 
+ 	/* do pm late init */
+ 	ret = radeon_pm_late_init(rdev);
+diff --git a/drivers/gpu/drm/radeon/radeon_dp_auxch.c b/drivers/gpu/drm/radeon/radeon_dp_auxch.c
+index 3b0c229d7dcd..db64e0062689 100644
+--- a/drivers/gpu/drm/radeon/radeon_dp_auxch.c
++++ b/drivers/gpu/drm/radeon/radeon_dp_auxch.c
+@@ -105,7 +105,7 @@ radeon_dp_aux_transfer_native(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg
+ 
+ 	tmp &= AUX_HPD_SEL(0x7);
+ 	tmp |= AUX_HPD_SEL(chan->rec.hpd);
+-	tmp |= AUX_EN | AUX_LS_READ_EN;
++	tmp |= AUX_EN | AUX_LS_READ_EN | AUX_HPD_DISCON(0x1);
+ 
+ 	WREG32(AUX_CONTROL + aux_offset[instance], tmp);
+ 
+diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c
+index df7a1719c841..9d210bbcab50 100644
+--- a/drivers/gpu/drm/radeon/radeon_dp_mst.c
++++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c
+@@ -525,17 +525,9 @@ static bool radeon_mst_mode_fixup(struct drm_encoder *encoder,
+ 	drm_mode_set_crtcinfo(adjusted_mode, 0);
+ 	{
+ 	  struct radeon_connector_atom_dig *dig_connector;
+-	  int ret;
+-
+ 	  dig_connector = mst_enc->connector->con_priv;
+-	  ret = radeon_dp_get_dp_link_config(&mst_enc->connector->base,
+-					     dig_connector->dpcd, adjusted_mode->clock,
+-					     &dig_connector->dp_lane_count,
+-					     &dig_connector->dp_clock);
+-	  if (ret) {
+-		  dig_connector->dp_lane_count = 0;
+-		  dig_connector->dp_clock = 0;
+-	  }
++	  dig_connector->dp_lane_count = drm_dp_max_lane_count(dig_connector->dpcd);
++	  dig_connector->dp_clock = drm_dp_max_link_rate(dig_connector->dpcd);
+ 	  DRM_DEBUG_KMS("dig clock %p %d %d\n", dig_connector,
+ 			dig_connector->dp_lane_count, dig_connector->dp_clock);
+ 	}
+diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
+index d2e628eea53d..d179596334a7 100644
+--- a/drivers/gpu/drm/radeon/radeon_fb.c
++++ b/drivers/gpu/drm/radeon/radeon_fb.c
+@@ -292,7 +292,8 @@ out_unref:
+ 
+ void radeon_fb_output_poll_changed(struct radeon_device *rdev)
+ {
+-	drm_fb_helper_hotplug_event(&rdev->mode_info.rfbdev->helper);
++	if (rdev->mode_info.rfbdev)
++		drm_fb_helper_hotplug_event(&rdev->mode_info.rfbdev->helper);
+ }
+ 
+ static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfbdev)
+@@ -325,6 +326,10 @@ int radeon_fbdev_init(struct radeon_device *rdev)
+ 	int bpp_sel = 32;
+ 	int ret;
+ 
++	/* don't enable fbdev if no connectors */
++	if (list_empty(&rdev->ddev->mode_config.connector_list))
++		return 0;
++
+ 	/* select 8 bpp console on RN50 or 16MB cards */
+ 	if (ASIC_IS_RN50(rdev) || rdev->mc.real_vram_size <= (32*1024*1024))
+ 		bpp_sel = 8;
+@@ -377,11 +382,15 @@ void radeon_fbdev_fini(struct radeon_device *rdev)
+ 
+ void radeon_fbdev_set_suspend(struct radeon_device *rdev, int state)
+ {
+-	fb_set_suspend(rdev->mode_info.rfbdev->helper.fbdev, state);
++	if (rdev->mode_info.rfbdev)
++		fb_set_suspend(rdev->mode_info.rfbdev->helper.fbdev, state);
+ }
+ 
+ bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj)
+ {
++	if (!rdev->mode_info.rfbdev)
++		return false;
++
+ 	if (robj == gem_to_radeon_bo(rdev->mode_info.rfbdev->rfb.obj))
+ 		return true;
+ 	return false;
+@@ -389,12 +398,14 @@ bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj)
+ 
+ void radeon_fb_add_connector(struct radeon_device *rdev, struct drm_connector *connector)
+ {
+-	drm_fb_helper_add_one_connector(&rdev->mode_info.rfbdev->helper, connector);
++	if (rdev->mode_info.rfbdev)
++		drm_fb_helper_add_one_connector(&rdev->mode_info.rfbdev->helper, connector);
+ }
+ 
+ void radeon_fb_remove_connector(struct radeon_device *rdev, struct drm_connector *connector)
+ {
+-	drm_fb_helper_remove_one_connector(&rdev->mode_info.rfbdev->helper, connector);
++	if (rdev->mode_info.rfbdev)
++		drm_fb_helper_remove_one_connector(&rdev->mode_info.rfbdev->helper, connector);
+ }
+ 
+ void radeon_fbdev_restore_mode(struct radeon_device *rdev)
+diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
+index e06ac546a90f..f342aad79cc6 100644
+--- a/drivers/gpu/drm/radeon/radeon_ttm.c
++++ b/drivers/gpu/drm/radeon/radeon_ttm.c
+@@ -235,6 +235,8 @@ static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp)
+ {
+ 	struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo);
+ 
++	if (radeon_ttm_tt_has_userptr(bo->ttm))
++		return -EPERM;
+ 	return drm_vma_node_verify_access(&rbo->gem_base.vma_node, filp);
+ }
+ 
+diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
+index a82b891ae1fe..caa73de584a5 100644
+--- a/drivers/gpu/drm/radeon/si_dpm.c
++++ b/drivers/gpu/drm/radeon/si_dpm.c
+@@ -2926,9 +2926,12 @@ static struct si_dpm_quirk si_dpm_quirk_list[] = {
+ 	/* PITCAIRN - https://bugs.freedesktop.org/show_bug.cgi?id=76490 */
+ 	{ PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 },
+ 	{ PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0xe271, 0, 120000 },
++	{ PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0x2015, 0, 120000 },
+ 	{ PCI_VENDOR_ID_ATI, 0x6810, 0x174b, 0xe271, 85000, 90000 },
+ 	{ PCI_VENDOR_ID_ATI, 0x6811, 0x1462, 0x2015, 0, 120000 },
+ 	{ PCI_VENDOR_ID_ATI, 0x6811, 0x1043, 0x2015, 0, 120000 },
++	{ PCI_VENDOR_ID_ATI, 0x6811, 0x148c, 0x2015, 0, 120000 },
++	{ PCI_VENDOR_ID_ATI, 0x6810, 0x1682, 0x9275, 0, 120000 },
+ 	{ 0, 0, 0, 0 },
+ };
+ 
+@@ -3008,6 +3011,10 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
+ 		}
+ 		++p;
+ 	}
++	/* limit mclk on all R7 370 parts for stability */
++	if (rdev->pdev->device == 0x6811 &&
++	    rdev->pdev->revision == 0x81)
++		max_mclk = 120000;
+ 
+ 	if (rps->vce_active) {
+ 		rps->evclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].evclk;
+diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
+index 4cbf26555093..e3daafa1be13 100644
+--- a/drivers/gpu/drm/ttm/ttm_bo.c
++++ b/drivers/gpu/drm/ttm/ttm_bo.c
+@@ -230,22 +230,13 @@ EXPORT_SYMBOL(ttm_bo_del_sub_from_lru);
+ 
+ void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo)
+ {
+-	struct ttm_bo_device *bdev = bo->bdev;
+-	struct ttm_mem_type_manager *man;
++	int put_count = 0;
+ 
+ 	lockdep_assert_held(&bo->resv->lock.base);
+ 
+-	if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) {
+-		list_del_init(&bo->swap);
+-		list_del_init(&bo->lru);
+-
+-	} else {
+-		if (bo->ttm && !(bo->ttm->page_flags & TTM_PAGE_FLAG_SG))
+-			list_move_tail(&bo->swap, &bo->glob->swap_lru);
+-
+-		man = &bdev->man[bo->mem.mem_type];
+-		list_move_tail(&bo->lru, &man->lru);
+-	}
++	put_count = ttm_bo_del_from_lru(bo);
++	ttm_bo_list_ref_sub(bo, put_count, true);
++	ttm_bo_add_to_lru(bo);
+ }
+ EXPORT_SYMBOL(ttm_bo_move_to_lru_tail);
+ 
+diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
+index 200419d4d43c..18a2acbccb7d 100644
+--- a/drivers/gpu/drm/udl/udl_fb.c
++++ b/drivers/gpu/drm/udl/udl_fb.c
+@@ -538,7 +538,7 @@ static int udlfb_create(struct drm_fb_helper *helper,
+ out_destroy_fbi:
+ 	drm_fb_helper_release_fbi(helper);
+ out_gfree:
+-	drm_gem_object_unreference(&ufbdev->ufb.obj->base);
++	drm_gem_object_unreference_unlocked(&ufbdev->ufb.obj->base);
+ out:
+ 	return ret;
+ }
+diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
+index 2a0a784ab6ee..d7528e0d8442 100644
+--- a/drivers/gpu/drm/udl/udl_gem.c
++++ b/drivers/gpu/drm/udl/udl_gem.c
+@@ -52,7 +52,7 @@ udl_gem_create(struct drm_file *file,
+ 		return ret;
+ 	}
+ 
+-	drm_gem_object_unreference(&obj->base);
++	drm_gem_object_unreference_unlocked(&obj->base);
+ 	*handle_p = handle;
+ 	return 0;
+ }
+diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c
+index 22278bcfc60e..ac8eafea6361 100644
+--- a/drivers/gpu/drm/vc4/vc4_bo.c
++++ b/drivers/gpu/drm/vc4/vc4_bo.c
+@@ -499,11 +499,12 @@ vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data,
+ 	if (IS_ERR(bo))
+ 		return PTR_ERR(bo);
+ 
+-	ret = copy_from_user(bo->base.vaddr,
++	if (copy_from_user(bo->base.vaddr,
+ 			     (void __user *)(uintptr_t)args->data,
+-			     args->size);
+-	if (ret != 0)
++			     args->size)) {
++		ret = -EFAULT;
+ 		goto fail;
++	}
+ 	/* Clear the rest of the memory from allocating from the BO
+ 	 * cache.
+ 	 */
+diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c
+index e00db3f510dd..abb98c77bad2 100644
+--- a/drivers/gpu/ipu-v3/ipu-common.c
++++ b/drivers/gpu/ipu-v3/ipu-common.c
+@@ -1068,7 +1068,6 @@ static int ipu_add_client_devices(struct ipu_soc *ipu, unsigned long ipu_base)
+ 			goto err_register;
+ 		}
+ 
+-		pdev->dev.of_node = of_node;
+ 		pdev->dev.parent = dev;
+ 
+ 		ret = platform_device_add_data(pdev, &reg->pdata,
+@@ -1079,6 +1078,12 @@ static int ipu_add_client_devices(struct ipu_soc *ipu, unsigned long ipu_base)
+ 			platform_device_put(pdev);
+ 			goto err_register;
+ 		}
++
++		/*
++		 * Set of_node only after calling platform_device_add. Otherwise
++		 * the platform:imx-ipuv3-crtc modalias won't be used.
++		 */
++		pdev->dev.of_node = of_node;
+ 	}
+ 
+ 	return 0;
+diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
+index 7e89288b1537..99446ffd71fb 100644
+--- a/drivers/hid/hid-core.c
++++ b/drivers/hid/hid-core.c
+@@ -1891,6 +1891,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_ELITE_KBD) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_CORDLESS_DESKTOP_LX500) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_EXTREME_3D) },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DUAL_ACTION) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WHEEL) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD_CORD) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD) },
+@@ -2615,9 +2616,10 @@ int hid_add_device(struct hid_device *hdev)
+ 	/*
+ 	 * Scan generic devices for group information
+ 	 */
+-	if (hid_ignore_special_drivers ||
+-	    (!hdev->group &&
+-	     !hid_match_id(hdev, hid_have_special_driver))) {
++	if (hid_ignore_special_drivers) {
++		hdev->group = HID_GROUP_GENERIC;
++	} else if (!hdev->group &&
++		   !hid_match_id(hdev, hid_have_special_driver)) {
+ 		ret = hid_scan_report(hdev);
+ 		if (ret)
+ 			hid_warn(hdev, "bad device descriptor (%d)\n", ret);
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index b6ff6e78ac54..14c14c82795c 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -255,6 +255,7 @@
+ #define USB_DEVICE_ID_CORSAIR_K90	0x1b02
+ 
+ #define USB_VENDOR_ID_CREATIVELABS	0x041e
++#define USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51	0x322c
+ #define USB_DEVICE_ID_PRODIKEYS_PCMIDI	0x2801
+ 
+ #define USB_VENDOR_ID_CVTOUCH		0x1ff7
+diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
+index 296d4991560e..a20fc604ffd8 100644
+--- a/drivers/hid/hid-multitouch.c
++++ b/drivers/hid/hid-multitouch.c
+@@ -396,6 +396,11 @@ static void mt_feature_mapping(struct hid_device *hdev,
+ 			td->is_buttonpad = true;
+ 
+ 		break;
++	case 0xff0000c5:
++		/* Retrieve the Win8 blob once to enable some devices */
++		if (usage->usage_index == 0)
++			mt_get_feature(hdev, field->report);
++		break;
+ 	}
+ }
+ 
+diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
+index b9216938a718..bb897497f008 100644
+--- a/drivers/hid/i2c-hid/i2c-hid.c
++++ b/drivers/hid/i2c-hid/i2c-hid.c
+@@ -283,17 +283,21 @@ static int i2c_hid_set_or_send_report(struct i2c_client *client, u8 reportType,
+ 	u16 dataRegister = le16_to_cpu(ihid->hdesc.wDataRegister);
+ 	u16 outputRegister = le16_to_cpu(ihid->hdesc.wOutputRegister);
+ 	u16 maxOutputLength = le16_to_cpu(ihid->hdesc.wMaxOutputLength);
++	u16 size;
++	int args_len;
++	int index = 0;
++
++	i2c_hid_dbg(ihid, "%s\n", __func__);
++
++	if (data_len > ihid->bufsize)
++		return -EINVAL;
+ 
+-	/* hid_hw_* already checked that data_len < HID_MAX_BUFFER_SIZE */
+-	u16 size =	2			/* size */ +
++	size =		2			/* size */ +
+ 			(reportID ? 1 : 0)	/* reportID */ +
+ 			data_len		/* buf */;
+-	int args_len =	(reportID >= 0x0F ? 1 : 0) /* optional third byte */ +
++	args_len =	(reportID >= 0x0F ? 1 : 0) /* optional third byte */ +
+ 			2			/* dataRegister */ +
+ 			size			/* args */;
+-	int index = 0;
+-
+-	i2c_hid_dbg(ihid, "%s\n", __func__);
+ 
+ 	if (!use_data && maxOutputLength == 0)
+ 		return -ENOSYS;
+diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
+index ad71160b9ea4..ae83af649a60 100644
+--- a/drivers/hid/usbhid/hid-core.c
++++ b/drivers/hid/usbhid/hid-core.c
+@@ -951,14 +951,6 @@ static int usbhid_output_report(struct hid_device *hid, __u8 *buf, size_t count)
+ 	return ret;
+ }
+ 
+-static void usbhid_restart_queues(struct usbhid_device *usbhid)
+-{
+-	if (usbhid->urbout && !test_bit(HID_OUT_RUNNING, &usbhid->iofl))
+-		usbhid_restart_out_queue(usbhid);
+-	if (!test_bit(HID_CTRL_RUNNING, &usbhid->iofl))
+-		usbhid_restart_ctrl_queue(usbhid);
+-}
+-
+ static void hid_free_buffers(struct usb_device *dev, struct hid_device *hid)
+ {
+ 	struct usbhid_device *usbhid = hid->driver_data;
+@@ -1404,6 +1396,37 @@ static void hid_cease_io(struct usbhid_device *usbhid)
+ 	usb_kill_urb(usbhid->urbout);
+ }
+ 
++static void hid_restart_io(struct hid_device *hid)
++{
++	struct usbhid_device *usbhid = hid->driver_data;
++	int clear_halt = test_bit(HID_CLEAR_HALT, &usbhid->iofl);
++	int reset_pending = test_bit(HID_RESET_PENDING, &usbhid->iofl);
++
++	spin_lock_irq(&usbhid->lock);
++	clear_bit(HID_SUSPENDED, &usbhid->iofl);
++	usbhid_mark_busy(usbhid);
++
++	if (clear_halt || reset_pending)
++		schedule_work(&usbhid->reset_work);
++	usbhid->retry_delay = 0;
++	spin_unlock_irq(&usbhid->lock);
++
++	if (reset_pending || !test_bit(HID_STARTED, &usbhid->iofl))
++		return;
++
++	if (!clear_halt) {
++		if (hid_start_in(hid) < 0)
++			hid_io_error(hid);
++	}
++
++	spin_lock_irq(&usbhid->lock);
++	if (usbhid->urbout && !test_bit(HID_OUT_RUNNING, &usbhid->iofl))
++		usbhid_restart_out_queue(usbhid);
++	if (!test_bit(HID_CTRL_RUNNING, &usbhid->iofl))
++		usbhid_restart_ctrl_queue(usbhid);
++	spin_unlock_irq(&usbhid->lock);
++}
++
+ /* Treat USB reset pretty much the same as suspend/resume */
+ static int hid_pre_reset(struct usb_interface *intf)
+ {
+@@ -1453,14 +1476,14 @@ static int hid_post_reset(struct usb_interface *intf)
+ 		return 1;
+ 	}
+ 
++	/* No need to do another reset or clear a halted endpoint */
+ 	spin_lock_irq(&usbhid->lock);
+ 	clear_bit(HID_RESET_PENDING, &usbhid->iofl);
++	clear_bit(HID_CLEAR_HALT, &usbhid->iofl);
+ 	spin_unlock_irq(&usbhid->lock);
+ 	hid_set_idle(dev, intf->cur_altsetting->desc.bInterfaceNumber, 0, 0);
+-	status = hid_start_in(hid);
+-	if (status < 0)
+-		hid_io_error(hid);
+-	usbhid_restart_queues(usbhid);
++
++	hid_restart_io(hid);
+ 
+ 	return 0;
+ }
+@@ -1483,25 +1506,9 @@ void usbhid_put_power(struct hid_device *hid)
+ #ifdef CONFIG_PM
+ static int hid_resume_common(struct hid_device *hid, bool driver_suspended)
+ {
+-	struct usbhid_device *usbhid = hid->driver_data;
+-	int status;
+-
+-	spin_lock_irq(&usbhid->lock);
+-	clear_bit(HID_SUSPENDED, &usbhid->iofl);
+-	usbhid_mark_busy(usbhid);
+-
+-	if (test_bit(HID_CLEAR_HALT, &usbhid->iofl) ||
+-			test_bit(HID_RESET_PENDING, &usbhid->iofl))
+-		schedule_work(&usbhid->reset_work);
+-	usbhid->retry_delay = 0;
+-
+-	usbhid_restart_queues(usbhid);
+-	spin_unlock_irq(&usbhid->lock);
+-
+-	status = hid_start_in(hid);
+-	if (status < 0)
+-		hid_io_error(hid);
++	int status = 0;
+ 
++	hid_restart_io(hid);
+ 	if (driver_suspended && hid->driver && hid->driver->resume)
+ 		status = hid->driver->resume(hid);
+ 	return status;
+@@ -1570,12 +1577,8 @@ static int hid_suspend(struct usb_interface *intf, pm_message_t message)
+ static int hid_resume(struct usb_interface *intf)
+ {
+ 	struct hid_device *hid = usb_get_intfdata (intf);
+-	struct usbhid_device *usbhid = hid->driver_data;
+ 	int status;
+ 
+-	if (!test_bit(HID_STARTED, &usbhid->iofl))
+-		return 0;
+-
+ 	status = hid_resume_common(hid, true);
+ 	dev_dbg(&intf->dev, "resume status %d\n", status);
+ 	return 0;
+@@ -1584,10 +1587,8 @@ static int hid_resume(struct usb_interface *intf)
+ static int hid_reset_resume(struct usb_interface *intf)
+ {
+ 	struct hid_device *hid = usb_get_intfdata(intf);
+-	struct usbhid_device *usbhid = hid->driver_data;
+ 	int status;
+ 
+-	clear_bit(HID_SUSPENDED, &usbhid->iofl);
+ 	status = hid_post_reset(intf);
+ 	if (status >= 0 && hid->driver && hid->driver->reset_resume) {
+ 		int ret = hid->driver->reset_resume(hid);
+diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
+index 7dd0953cd70f..dc8e6adf95a4 100644
+--- a/drivers/hid/usbhid/hid-quirks.c
++++ b/drivers/hid/usbhid/hid-quirks.c
+@@ -70,6 +70,7 @@ static const struct hid_blacklist {
+ 	{ USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_3AXIS_5BUTTON_STICK, HID_QUIRK_NOGET },
+ 	{ USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_AXIS_295, HID_QUIRK_NOGET },
+ 	{ USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_PIXART_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL },
++	{ USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51, HID_QUIRK_NOGET },
+ 	{ USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET },
+ 	{ USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_WIIU, HID_QUIRK_MULTI_INPUT },
+ 	{ USB_VENDOR_ID_ELAN, HID_ANY_ID, HID_QUIRK_ALWAYS_POLL },
+diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
+index 99ef77fcfb80..f71187aad0d0 100644
+--- a/drivers/hid/wacom_wac.c
++++ b/drivers/hid/wacom_wac.c
+@@ -2409,6 +2409,17 @@ void wacom_setup_device_quirks(struct wacom *wacom)
+ 	}
+ 
+ 	/*
++	 * Hack for the Bamboo One:
++	 * the device presents a PAD/Touch interface as most Bamboos and even
++	 * sends ghosts PAD data on it. However, later, we must disable this
++	 * ghost interface, and we can not detect it unless we set it here
++	 * to WACOM_DEVICETYPE_PAD or WACOM_DEVICETYPE_TOUCH.
++	 */
++	if (features->type == BAMBOO_PEN &&
++	    features->pktlen == WACOM_PKGLEN_BBTOUCH3)
++		features->device_type |= WACOM_DEVICETYPE_PAD;
++
++	/*
+ 	 * Raw Wacom-mode pen and touch events both come from interface
+ 	 * 0, whose HID descriptor has an application usage of 0xFF0D
+ 	 * (i.e., WACOM_VENDORDEFINED_PEN). We route pen packets back
+@@ -3367,6 +3378,10 @@ static const struct wacom_features wacom_features_0x33E =
+ 	{ "Wacom Intuos PT M 2", 21600, 13500, 2047, 63,
+ 	  INTUOSHT2, WACOM_INTUOS_RES, WACOM_INTUOS_RES, .touch_max = 16,
+ 	  .check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE };
++static const struct wacom_features wacom_features_0x343 =
++	{ "Wacom DTK1651", 34616, 19559, 1023, 0,
++	  DTUS, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 4,
++	  WACOM_DTU_OFFSET, WACOM_DTU_OFFSET };
+ 
+ static const struct wacom_features wacom_features_HID_ANY_ID =
+ 	{ "Wacom HID", .type = HID_GENERIC };
+@@ -3532,6 +3547,7 @@ const struct hid_device_id wacom_ids[] = {
+ 	{ USB_DEVICE_WACOM(0x33C) },
+ 	{ USB_DEVICE_WACOM(0x33D) },
+ 	{ USB_DEVICE_WACOM(0x33E) },
++	{ USB_DEVICE_WACOM(0x343) },
+ 	{ USB_DEVICE_WACOM(0x4001) },
+ 	{ USB_DEVICE_WACOM(0x4004) },
+ 	{ USB_DEVICE_WACOM(0x5000) },
+diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
+index b53702ce692f..e35560b955b1 100644
+--- a/drivers/hv/ring_buffer.c
++++ b/drivers/hv/ring_buffer.c
+@@ -103,15 +103,29 @@ static bool hv_need_to_signal(u32 old_write, struct hv_ring_buffer_info *rbi)
+  *    there is room for the producer to send the pending packet.
+  */
+ 
+-static bool hv_need_to_signal_on_read(u32 prev_write_sz,
+-				      struct hv_ring_buffer_info *rbi)
++static bool hv_need_to_signal_on_read(struct hv_ring_buffer_info *rbi)
+ {
+ 	u32 cur_write_sz;
+ 	u32 r_size;
+-	u32 write_loc = rbi->ring_buffer->write_index;
++	u32 write_loc;
+ 	u32 read_loc = rbi->ring_buffer->read_index;
+-	u32 pending_sz = rbi->ring_buffer->pending_send_sz;
++	u32 pending_sz;
+ 
++	/*
++	 * Issue a full memory barrier before making the signaling decision.
++	 * Here is the reason for having this barrier:
++	 * If the reading of the pend_sz (in this function)
++	 * were to be reordered and read before we commit the new read
++	 * index (in the calling function)  we could
++	 * have a problem. If the host were to set the pending_sz after we
++	 * have sampled pending_sz and go to sleep before we commit the
++	 * read index, we could miss sending the interrupt. Issue a full
++	 * memory barrier to address this.
++	 */
++	mb();
++
++	pending_sz = rbi->ring_buffer->pending_send_sz;
++	write_loc = rbi->ring_buffer->write_index;
+ 	/* If the other end is not blocked on write don't bother. */
+ 	if (pending_sz == 0)
+ 		return false;
+@@ -120,7 +134,7 @@ static bool hv_need_to_signal_on_read(u32 prev_write_sz,
+ 	cur_write_sz = write_loc >= read_loc ? r_size - (write_loc - read_loc) :
+ 			read_loc - write_loc;
+ 
+-	if ((prev_write_sz < pending_sz) && (cur_write_sz >= pending_sz))
++	if (cur_write_sz >= pending_sz)
+ 		return true;
+ 
+ 	return false;
+@@ -458,7 +472,7 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info,
+ 	/* Update the read index */
+ 	hv_set_next_read_location(inring_info, next_read_location);
+ 
+-	*signal = hv_need_to_signal_on_read(bytes_avail_towrite, inring_info);
++	*signal = hv_need_to_signal_on_read(inring_info);
+ 
+ out_unlock:
+ 	spin_unlock_irqrestore(&inring_info->ring_lock, flags);
+diff --git a/drivers/hwmon/max1111.c b/drivers/hwmon/max1111.c
+index 36544c4f653c..303d0c9df907 100644
+--- a/drivers/hwmon/max1111.c
++++ b/drivers/hwmon/max1111.c
+@@ -85,6 +85,9 @@ static struct max1111_data *the_max1111;
+ 
+ int max1111_read_channel(int channel)
+ {
++	if (!the_max1111 || !the_max1111->spi)
++		return -ENODEV;
++
+ 	return max1111_read(&the_max1111->spi->dev, channel);
+ }
+ EXPORT_SYMBOL(max1111_read_channel);
+@@ -258,6 +261,9 @@ static int max1111_remove(struct spi_device *spi)
+ {
+ 	struct max1111_data *data = spi_get_drvdata(spi);
+ 
++#ifdef CONFIG_SHARPSL_PM
++	the_max1111 = NULL;
++#endif
+ 	hwmon_device_unregister(data->hwmon_dev);
+ 	sysfs_remove_group(&spi->dev.kobj, &max1110_attr_group);
+ 	sysfs_remove_group(&spi->dev.kobj, &max1111_attr_group);
+diff --git a/drivers/hwtracing/stm/Kconfig b/drivers/hwtracing/stm/Kconfig
+index 83e9f591a54b..e7a348807f0c 100644
+--- a/drivers/hwtracing/stm/Kconfig
++++ b/drivers/hwtracing/stm/Kconfig
+@@ -1,6 +1,7 @@
+ config STM
+ 	tristate "System Trace Module devices"
+ 	select CONFIGFS_FS
++	select SRCU
+ 	help
+ 	  A System Trace Module (STM) is a device exporting data in System
+ 	  Trace Protocol (STP) format as defined by MIPI STP standards.
+diff --git a/drivers/i2c/busses/i2c-cpm.c b/drivers/i2c/busses/i2c-cpm.c
+index 714bdc837769..b167ab25310a 100644
+--- a/drivers/i2c/busses/i2c-cpm.c
++++ b/drivers/i2c/busses/i2c-cpm.c
+@@ -116,8 +116,8 @@ struct cpm_i2c {
+ 	cbd_t __iomem *rbase;
+ 	u_char *txbuf[CPM_MAXBD];
+ 	u_char *rxbuf[CPM_MAXBD];
+-	u32 txdma[CPM_MAXBD];
+-	u32 rxdma[CPM_MAXBD];
++	dma_addr_t txdma[CPM_MAXBD];
++	dma_addr_t rxdma[CPM_MAXBD];
+ };
+ 
+ static irqreturn_t cpm_i2c_interrupt(int irq, void *dev_id)
+diff --git a/drivers/i2c/busses/i2c-exynos5.c b/drivers/i2c/busses/i2c-exynos5.c
+index b29c7500461a..f54ece8fce78 100644
+--- a/drivers/i2c/busses/i2c-exynos5.c
++++ b/drivers/i2c/busses/i2c-exynos5.c
+@@ -671,7 +671,9 @@ static int exynos5_i2c_xfer(struct i2c_adapter *adap,
+ 		return -EIO;
+ 	}
+ 
+-	clk_prepare_enable(i2c->clk);
++	ret = clk_enable(i2c->clk);
++	if (ret)
++		return ret;
+ 
+ 	for (i = 0; i < num; i++, msgs++) {
+ 		stop = (i == num - 1);
+@@ -695,7 +697,7 @@ static int exynos5_i2c_xfer(struct i2c_adapter *adap,
+ 	}
+ 
+  out:
+-	clk_disable_unprepare(i2c->clk);
++	clk_disable(i2c->clk);
+ 	return ret;
+ }
+ 
+@@ -747,7 +749,9 @@ static int exynos5_i2c_probe(struct platform_device *pdev)
+ 		return -ENOENT;
+ 	}
+ 
+-	clk_prepare_enable(i2c->clk);
++	ret = clk_prepare_enable(i2c->clk);
++	if (ret)
++		return ret;
+ 
+ 	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ 	i2c->regs = devm_ioremap_resource(&pdev->dev, mem);
+@@ -799,6 +803,10 @@ static int exynos5_i2c_probe(struct platform_device *pdev)
+ 
+ 	platform_set_drvdata(pdev, i2c);
+ 
++	clk_disable(i2c->clk);
++
++	return 0;
++
+  err_clk:
+ 	clk_disable_unprepare(i2c->clk);
+ 	return ret;
+@@ -810,6 +818,8 @@ static int exynos5_i2c_remove(struct platform_device *pdev)
+ 
+ 	i2c_del_adapter(&i2c->adap);
+ 
++	clk_unprepare(i2c->clk);
++
+ 	return 0;
+ }
+ 
+@@ -821,6 +831,8 @@ static int exynos5_i2c_suspend_noirq(struct device *dev)
+ 
+ 	i2c->suspended = 1;
+ 
++	clk_unprepare(i2c->clk);
++
+ 	return 0;
+ }
+ 
+@@ -830,7 +842,9 @@ static int exynos5_i2c_resume_noirq(struct device *dev)
+ 	struct exynos5_i2c *i2c = platform_get_drvdata(pdev);
+ 	int ret = 0;
+ 
+-	clk_prepare_enable(i2c->clk);
++	ret = clk_prepare_enable(i2c->clk);
++	if (ret)
++		return ret;
+ 
+ 	ret = exynos5_hsi2c_clock_setup(i2c);
+ 	if (ret) {
+@@ -839,7 +853,7 @@ static int exynos5_i2c_resume_noirq(struct device *dev)
+ 	}
+ 
+ 	exynos5_i2c_init(i2c);
+-	clk_disable_unprepare(i2c->clk);
++	clk_disable(i2c->clk);
+ 	i2c->suspended = 0;
+ 
+ 	return 0;
+diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
+index cd4510a63375..146eed70bdf4 100644
+--- a/drivers/idle/intel_idle.c
++++ b/drivers/idle/intel_idle.c
+@@ -65,7 +65,7 @@
+ #include <asm/mwait.h>
+ #include <asm/msr.h>
+ 
+-#define INTEL_IDLE_VERSION "0.4"
++#define INTEL_IDLE_VERSION "0.4.1"
+ #define PREFIX "intel_idle: "
+ 
+ static struct cpuidle_driver intel_idle_driver = {
+@@ -994,36 +994,92 @@ static void intel_idle_cpuidle_devices_uninit(void)
+ }
+ 
+ /*
+- * intel_idle_state_table_update()
+- *
+- * Update the default state_table for this CPU-id
++ * ivt_idle_state_table_update(void)
+  *
+- * Currently used to access tuned IVT multi-socket targets
++ * Tune IVT multi-socket targets
+  * Assumption: num_sockets == (max_package_num + 1)
+  */
+-void intel_idle_state_table_update(void)
++static void ivt_idle_state_table_update(void)
+ {
+ 	/* IVT uses a different table for 1-2, 3-4, and > 4 sockets */
+-	if (boot_cpu_data.x86_model == 0x3e) { /* IVT */
+-		int cpu, package_num, num_sockets = 1;
+-
+-		for_each_online_cpu(cpu) {
+-			package_num = topology_physical_package_id(cpu);
+-			if (package_num + 1 > num_sockets) {
+-				num_sockets = package_num + 1;
+-
+-				if (num_sockets > 4) {
+-					cpuidle_state_table = ivt_cstates_8s;
+-					return;
+-				}
++	int cpu, package_num, num_sockets = 1;
++
++	for_each_online_cpu(cpu) {
++		package_num = topology_physical_package_id(cpu);
++		if (package_num + 1 > num_sockets) {
++			num_sockets = package_num + 1;
++
++			if (num_sockets > 4) {
++				cpuidle_state_table = ivt_cstates_8s;
++				return;
+ 			}
+ 		}
++	}
++
++	if (num_sockets > 2)
++		cpuidle_state_table = ivt_cstates_4s;
++
++	/* else, 1 and 2 socket systems use default ivt_cstates */
++}
++/*
++ * sklh_idle_state_table_update(void)
++ *
++ * On SKL-H (model 0x5e) disable C8 and C9 if:
++ * C10 is enabled and SGX disabled
++ */
++static void sklh_idle_state_table_update(void)
++{
++	unsigned long long msr;
++	unsigned int eax, ebx, ecx, edx;
++
++
++	/* if PC10 disabled via cmdline intel_idle.max_cstate=7 or shallower */
++	if (max_cstate <= 7)
++		return;
++
++	/* if PC10 not present in CPUID.MWAIT.EDX */
++	if ((mwait_substates & (0xF << 28)) == 0)
++		return;
++
++	rdmsrl(MSR_NHM_SNB_PKG_CST_CFG_CTL, msr);
++
++	/* PC10 is not enabled in PKG C-state limit */
++	if ((msr & 0xF) != 8)
++		return;
++
++	ecx = 0;
++	cpuid(7, &eax, &ebx, &ecx, &edx);
++
++	/* if SGX is present */
++	if (ebx & (1 << 2)) {
+ 
+-		if (num_sockets > 2)
+-			cpuidle_state_table = ivt_cstates_4s;
+-		/* else, 1 and 2 socket systems use default ivt_cstates */
++		rdmsrl(MSR_IA32_FEATURE_CONTROL, msr);
++
++		/* if SGX is enabled */
++		if (msr & (1 << 18))
++			return;
++	}
++
++	skl_cstates[5].disabled = 1;	/* C8-SKL */
++	skl_cstates[6].disabled = 1;	/* C9-SKL */
++}
++/*
++ * intel_idle_state_table_update()
++ *
++ * Update the default state_table for this CPU-id
++ */
++
++static void intel_idle_state_table_update(void)
++{
++	switch (boot_cpu_data.x86_model) {
++
++	case 0x3e: /* IVT */
++		ivt_idle_state_table_update();
++		break;
++	case 0x5e: /* SKL-H */
++		sklh_idle_state_table_update();
++		break;
+ 	}
+-	return;
+ }
+ 
+ /*
+@@ -1063,6 +1119,14 @@ static int __init intel_idle_cpuidle_driver_init(void)
+ 		if (num_substates == 0)
+ 			continue;
+ 
++		/* if state marked as disabled, skip it */
++		if (cpuidle_state_table[cstate].disabled != 0) {
++			pr_debug(PREFIX "state %s is disabled",
++				cpuidle_state_table[cstate].name);
++			continue;
++		}
++
++
+ 		if (((mwait_cstate + 1) > 2) &&
+ 			!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
+ 			mark_tsc_unstable("TSC halts in idle"
+diff --git a/drivers/iio/accel/bmc150-accel-core.c b/drivers/iio/accel/bmc150-accel-core.c
+index c73331f7782b..2072a31e813b 100644
+--- a/drivers/iio/accel/bmc150-accel-core.c
++++ b/drivers/iio/accel/bmc150-accel-core.c
+@@ -547,7 +547,7 @@ static int bmc150_accel_get_axis(struct bmc150_accel_data *data,
+ {
+ 	int ret;
+ 	int axis = chan->scan_index;
+-	unsigned int raw_val;
++	__le16 raw_val;
+ 
+ 	mutex_lock(&data->mutex);
+ 	ret = bmc150_accel_set_power_state(data, true);
+@@ -557,14 +557,14 @@ static int bmc150_accel_get_axis(struct bmc150_accel_data *data,
+ 	}
+ 
+ 	ret = regmap_bulk_read(data->regmap, BMC150_ACCEL_AXIS_TO_REG(axis),
+-			       &raw_val, 2);
++			       &raw_val, sizeof(raw_val));
+ 	if (ret < 0) {
+ 		dev_err(data->dev, "Error reading axis %d\n", axis);
+ 		bmc150_accel_set_power_state(data, false);
+ 		mutex_unlock(&data->mutex);
+ 		return ret;
+ 	}
+-	*val = sign_extend32(raw_val >> chan->scan_type.shift,
++	*val = sign_extend32(le16_to_cpu(raw_val) >> chan->scan_type.shift,
+ 			     chan->scan_type.realbits - 1);
+ 	ret = bmc150_accel_set_power_state(data, false);
+ 	mutex_unlock(&data->mutex);
+@@ -988,6 +988,7 @@ static const struct iio_event_spec bmc150_accel_event = {
+ 		.realbits = (bits),					\
+ 		.storagebits = 16,					\
+ 		.shift = 16 - (bits),					\
++		.endianness = IIO_LE,					\
+ 	},								\
+ 	.event_spec = &bmc150_accel_event,				\
+ 	.num_event_specs = 1						\
+diff --git a/drivers/iio/gyro/bmg160_core.c b/drivers/iio/gyro/bmg160_core.c
+index bbce3b09ac45..4dac567e75b4 100644
+--- a/drivers/iio/gyro/bmg160_core.c
++++ b/drivers/iio/gyro/bmg160_core.c
+@@ -452,7 +452,7 @@ static int bmg160_get_temp(struct bmg160_data *data, int *val)
+ static int bmg160_get_axis(struct bmg160_data *data, int axis, int *val)
+ {
+ 	int ret;
+-	unsigned int raw_val;
++	__le16 raw_val;
+ 
+ 	mutex_lock(&data->mutex);
+ 	ret = bmg160_set_power_state(data, true);
+@@ -462,7 +462,7 @@ static int bmg160_get_axis(struct bmg160_data *data, int axis, int *val)
+ 	}
+ 
+ 	ret = regmap_bulk_read(data->regmap, BMG160_AXIS_TO_REG(axis), &raw_val,
+-			       2);
++			       sizeof(raw_val));
+ 	if (ret < 0) {
+ 		dev_err(data->dev, "Error reading axis %d\n", axis);
+ 		bmg160_set_power_state(data, false);
+@@ -470,7 +470,7 @@ static int bmg160_get_axis(struct bmg160_data *data, int axis, int *val)
+ 		return ret;
+ 	}
+ 
+-	*val = sign_extend32(raw_val, 15);
++	*val = sign_extend32(le16_to_cpu(raw_val), 15);
+ 	ret = bmg160_set_power_state(data, false);
+ 	mutex_unlock(&data->mutex);
+ 	if (ret < 0)
+@@ -733,6 +733,7 @@ static const struct iio_event_spec bmg160_event = {
+ 		.sign = 's',						\
+ 		.realbits = 16,					\
+ 		.storagebits = 16,					\
++		.endianness = IIO_LE,					\
+ 	},								\
+ 	.event_spec = &bmg160_event,					\
+ 	.num_event_specs = 1						\
+@@ -780,7 +781,7 @@ static irqreturn_t bmg160_trigger_handler(int irq, void *p)
+ 			mutex_unlock(&data->mutex);
+ 			goto err;
+ 		}
+-		data->buffer[i++] = ret;
++		data->buffer[i++] = val;
+ 	}
+ 	mutex_unlock(&data->mutex);
+ 
+diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
+index 139ae916225f..5b6abc541e8c 100644
+--- a/drivers/iio/industrialio-buffer.c
++++ b/drivers/iio/industrialio-buffer.c
+@@ -645,6 +645,7 @@ static int iio_verify_update(struct iio_dev *indio_dev,
+ 	unsigned int modes;
+ 
+ 	memset(config, 0, sizeof(*config));
++	config->watermark = ~0;
+ 
+ 	/*
+ 	 * If there is just one buffer and we are removing it there is nothing
+diff --git a/drivers/iio/magnetometer/ak8975.c b/drivers/iio/magnetometer/ak8975.c
+index b13936dacc78..f2a7f72f7aa6 100644
+--- a/drivers/iio/magnetometer/ak8975.c
++++ b/drivers/iio/magnetometer/ak8975.c
+@@ -462,6 +462,8 @@ static int ak8975_setup_irq(struct ak8975_data *data)
+ 	int rc;
+ 	int irq;
+ 
++	init_waitqueue_head(&data->data_ready_queue);
++	clear_bit(0, &data->flags);
+ 	if (client->irq)
+ 		irq = client->irq;
+ 	else
+@@ -477,8 +479,6 @@ static int ak8975_setup_irq(struct ak8975_data *data)
+ 		return rc;
+ 	}
+ 
+-	init_waitqueue_head(&data->data_ready_queue);
+-	clear_bit(0, &data->flags);
+ 	data->eoc_irq = irq;
+ 
+ 	return rc;
+@@ -732,7 +732,7 @@ static int ak8975_probe(struct i2c_client *client,
+ 	int eoc_gpio;
+ 	int err;
+ 	const char *name = NULL;
+-	enum asahi_compass_chipset chipset;
++	enum asahi_compass_chipset chipset = AK_MAX_TYPE;
+ 
+ 	/* Grab and set up the supplied GPIO. */
+ 	if (client->dev.platform_data)
+diff --git a/drivers/iio/magnetometer/st_magn.h b/drivers/iio/magnetometer/st_magn.h
+index 06a4d9c35581..9daca4681922 100644
+--- a/drivers/iio/magnetometer/st_magn.h
++++ b/drivers/iio/magnetometer/st_magn.h
+@@ -44,6 +44,7 @@ static inline int st_magn_allocate_ring(struct iio_dev *indio_dev)
+ static inline void st_magn_deallocate_ring(struct iio_dev *indio_dev)
+ {
+ }
++#define ST_MAGN_TRIGGER_SET_STATE NULL
+ #endif /* CONFIG_IIO_BUFFER */
+ 
+ #endif /* ST_MAGN_H */
+diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
+index 53343ffbff7a..1b109b2a235e 100644
+--- a/drivers/infiniband/core/cache.c
++++ b/drivers/infiniband/core/cache.c
+@@ -691,7 +691,8 @@ void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port,
+ 			      NULL);
+ 
+ 		/* Coudn't find default GID location */
+-		WARN_ON(ix < 0);
++		if (WARN_ON(ix < 0))
++			goto release;
+ 
+ 		zattr_type.gid_type = gid_type;
+ 
+diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c
+index 6b4e8a008bc0..564adf3116e8 100644
+--- a/drivers/infiniband/core/ucm.c
++++ b/drivers/infiniband/core/ucm.c
+@@ -48,6 +48,7 @@
+ 
+ #include <asm/uaccess.h>
+ 
++#include <rdma/ib.h>
+ #include <rdma/ib_cm.h>
+ #include <rdma/ib_user_cm.h>
+ #include <rdma/ib_marshall.h>
+@@ -1103,6 +1104,9 @@ static ssize_t ib_ucm_write(struct file *filp, const char __user *buf,
+ 	struct ib_ucm_cmd_hdr hdr;
+ 	ssize_t result;
+ 
++	if (WARN_ON_ONCE(!ib_safe_file_access(filp)))
++		return -EACCES;
++
+ 	if (len < sizeof(hdr))
+ 		return -EINVAL;
+ 
+diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
+index 8b5a934e1133..886f61ea6cc7 100644
+--- a/drivers/infiniband/core/ucma.c
++++ b/drivers/infiniband/core/ucma.c
+@@ -1574,6 +1574,9 @@ static ssize_t ucma_write(struct file *filp, const char __user *buf,
+ 	struct rdma_ucm_cmd_hdr hdr;
+ 	ssize_t ret;
+ 
++	if (WARN_ON_ONCE(!ib_safe_file_access(filp)))
++		return -EACCES;
++
+ 	if (len < sizeof(hdr))
+ 		return -EINVAL;
+ 
+diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
+index 39680aed99dd..d3fb8aa46c59 100644
+--- a/drivers/infiniband/core/uverbs_main.c
++++ b/drivers/infiniband/core/uverbs_main.c
+@@ -48,6 +48,8 @@
+ 
+ #include <asm/uaccess.h>
+ 
++#include <rdma/ib.h>
++
+ #include "uverbs.h"
+ 
+ MODULE_AUTHOR("Roland Dreier");
+@@ -693,6 +695,9 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
+ 	int srcu_key;
+ 	ssize_t ret;
+ 
++	if (WARN_ON_ONCE(!ib_safe_file_access(filp)))
++		return -EACCES;
++
+ 	if (count < sizeof hdr)
+ 		return -EINVAL;
+ 
+diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
+index cf21df4a8bf5..4e94cff5ba71 100644
+--- a/drivers/infiniband/hw/cxgb4/cq.c
++++ b/drivers/infiniband/hw/cxgb4/cq.c
+@@ -162,7 +162,7 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
+ 	cq->bar2_va = c4iw_bar2_addrs(rdev, cq->cqid, T4_BAR2_QTYPE_INGRESS,
+ 				      &cq->bar2_qid,
+ 				      user ? &cq->bar2_pa : NULL);
+-	if (user && !cq->bar2_va) {
++	if (user && !cq->bar2_pa) {
+ 		pr_warn(MOD "%s: cqid %u not in BAR2 range.\n",
+ 			pci_name(rdev->lldi.pdev), cq->cqid);
+ 		ret = -EINVAL;
+diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
+index e99345eb875a..8ff690bf09d9 100644
+--- a/drivers/infiniband/hw/cxgb4/qp.c
++++ b/drivers/infiniband/hw/cxgb4/qp.c
+@@ -185,6 +185,10 @@ void __iomem *c4iw_bar2_addrs(struct c4iw_rdev *rdev, unsigned int qid,
+ 
+ 	if (pbar2_pa)
+ 		*pbar2_pa = (rdev->bar2_pa + bar2_qoffset) & PAGE_MASK;
++
++	if (is_t4(rdev->lldi.adapter_type))
++		return NULL;
++
+ 	return rdev->bar2_kva + bar2_qoffset;
+ }
+ 
+@@ -270,7 +274,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
+ 	/*
+ 	 * User mode must have bar2 access.
+ 	 */
+-	if (user && (!wq->sq.bar2_va || !wq->rq.bar2_va)) {
++	if (user && (!wq->sq.bar2_pa || !wq->rq.bar2_pa)) {
+ 		pr_warn(MOD "%s: sqid %u or rqid %u not in BAR2 range.\n",
+ 			pci_name(rdev->lldi.pdev), wq->sq.qid, wq->rq.qid);
+ 		goto free_dma;
+diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
+index 03c418ccbc98..eaed31d04468 100644
+--- a/drivers/infiniband/hw/mlx5/main.c
++++ b/drivers/infiniband/hw/mlx5/main.c
+@@ -517,7 +517,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
+ 		     sizeof(struct mlx5_wqe_ctrl_seg)) /
+ 		     sizeof(struct mlx5_wqe_data_seg);
+ 	props->max_sge = min(max_rq_sg, max_sq_sg);
+-	props->max_sge_rd = props->max_sge;
++	props->max_sge_rd	   = MLX5_MAX_SGE_RD;
+ 	props->max_cq		   = 1 << MLX5_CAP_GEN(mdev, log_max_cq);
+ 	props->max_cqe = (1 << MLX5_CAP_GEN(mdev, log_max_cq_sz)) - 1;
+ 	props->max_mr		   = 1 << MLX5_CAP_GEN(mdev, log_max_mkey);
+@@ -654,8 +654,8 @@ static int mlx5_query_hca_port(struct ib_device *ibdev, u8 port,
+ 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
+ 	struct mlx5_core_dev *mdev = dev->mdev;
+ 	struct mlx5_hca_vport_context *rep;
+-	int max_mtu;
+-	int oper_mtu;
++	u16 max_mtu;
++	u16 oper_mtu;
+ 	int err;
+ 	u8 ib_link_width_oper;
+ 	u8 vl_hw_cap;
+diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
+index e449e394963f..24f4a782e0f4 100644
+--- a/drivers/infiniband/hw/qib/qib_file_ops.c
++++ b/drivers/infiniband/hw/qib/qib_file_ops.c
+@@ -45,6 +45,8 @@
+ #include <linux/export.h>
+ #include <linux/uio.h>
+ 
++#include <rdma/ib.h>
++
+ #include "qib.h"
+ #include "qib_common.h"
+ #include "qib_user_sdma.h"
+@@ -2067,6 +2069,9 @@ static ssize_t qib_write(struct file *fp, const char __user *data,
+ 	ssize_t ret = 0;
+ 	void *dest;
+ 
++	if (WARN_ON_ONCE(!ib_safe_file_access(fp)))
++		return -EACCES;
++
+ 	if (count < sizeof(cmd.type)) {
+ 		ret = -EINVAL;
+ 		goto bail;
+diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
+index f121e6129339..0e1a802c3618 100644
+--- a/drivers/infiniband/ulp/isert/ib_isert.c
++++ b/drivers/infiniband/ulp/isert/ib_isert.c
+@@ -65,6 +65,7 @@ isert_rdma_accept(struct isert_conn *isert_conn);
+ struct rdma_cm_id *isert_setup_id(struct isert_np *isert_np);
+ 
+ static void isert_release_work(struct work_struct *work);
++static void isert_wait4flush(struct isert_conn *isert_conn);
+ 
+ static inline bool
+ isert_prot_cmd(struct isert_conn *conn, struct se_cmd *cmd)
+@@ -820,12 +821,31 @@ isert_put_conn(struct isert_conn *isert_conn)
+ 	kref_put(&isert_conn->kref, isert_release_kref);
+ }
+ 
++static void
++isert_handle_unbound_conn(struct isert_conn *isert_conn)
++{
++	struct isert_np *isert_np = isert_conn->cm_id->context;
++
++	mutex_lock(&isert_np->mutex);
++	if (!list_empty(&isert_conn->node)) {
++		/*
++		 * This means iscsi doesn't know this connection
++		 * so schedule a cleanup ourselves
++		 */
++		list_del_init(&isert_conn->node);
++		isert_put_conn(isert_conn);
++		complete(&isert_conn->wait);
++		queue_work(isert_release_wq, &isert_conn->release_work);
++	}
++	mutex_unlock(&isert_np->mutex);
++}
++
+ /**
+  * isert_conn_terminate() - Initiate connection termination
+  * @isert_conn: isert connection struct
+  *
+  * Notes:
+- * In case the connection state is FULL_FEATURE, move state
++ * In case the connection state is BOUND, move state
+  * to TEMINATING and start teardown sequence (rdma_disconnect).
+  * In case the connection state is UP, complete flush as well.
+  *
+@@ -837,23 +857,19 @@ isert_conn_terminate(struct isert_conn *isert_conn)
+ {
+ 	int err;
+ 
+-	switch (isert_conn->state) {
+-	case ISER_CONN_TERMINATING:
+-		break;
+-	case ISER_CONN_UP:
+-	case ISER_CONN_FULL_FEATURE: /* FALLTHRU */
+-		isert_info("Terminating conn %p state %d\n",
+-			   isert_conn, isert_conn->state);
+-		isert_conn->state = ISER_CONN_TERMINATING;
+-		err = rdma_disconnect(isert_conn->cm_id);
+-		if (err)
+-			isert_warn("Failed rdma_disconnect isert_conn %p\n",
+-				   isert_conn);
+-		break;
+-	default:
+-		isert_warn("conn %p teminating in state %d\n",
+-			   isert_conn, isert_conn->state);
+-	}
++	if (isert_conn->state >= ISER_CONN_TERMINATING)
++		return;
++
++	isert_info("Terminating conn %p state %d\n",
++		   isert_conn, isert_conn->state);
++	isert_conn->state = ISER_CONN_TERMINATING;
++	err = rdma_disconnect(isert_conn->cm_id);
++	if (err)
++		isert_warn("Failed rdma_disconnect isert_conn %p\n",
++			   isert_conn);
++
++	isert_info("conn %p completing wait\n", isert_conn);
++	complete(&isert_conn->wait);
+ }
+ 
+ static int
+@@ -887,35 +903,27 @@ static int
+ isert_disconnected_handler(struct rdma_cm_id *cma_id,
+ 			   enum rdma_cm_event_type event)
+ {
+-	struct isert_np *isert_np = cma_id->context;
+-	struct isert_conn *isert_conn;
+-	bool terminating = false;
+-
+-	if (isert_np->cm_id == cma_id)
+-		return isert_np_cma_handler(cma_id->context, event);
+-
+-	isert_conn = cma_id->qp->qp_context;
++	struct isert_conn *isert_conn = cma_id->qp->qp_context;
+ 
+ 	mutex_lock(&isert_conn->mutex);
+-	terminating = (isert_conn->state == ISER_CONN_TERMINATING);
+-	isert_conn_terminate(isert_conn);
+-	mutex_unlock(&isert_conn->mutex);
+-
+-	isert_info("conn %p completing wait\n", isert_conn);
+-	complete(&isert_conn->wait);
+-
+-	if (terminating)
+-		goto out;
+-
+-	mutex_lock(&isert_np->mutex);
+-	if (!list_empty(&isert_conn->node)) {
+-		list_del_init(&isert_conn->node);
+-		isert_put_conn(isert_conn);
+-		queue_work(isert_release_wq, &isert_conn->release_work);
++	switch (isert_conn->state) {
++	case ISER_CONN_TERMINATING:
++		break;
++	case ISER_CONN_UP:
++		isert_conn_terminate(isert_conn);
++		isert_wait4flush(isert_conn);
++		isert_handle_unbound_conn(isert_conn);
++		break;
++	case ISER_CONN_BOUND:
++	case ISER_CONN_FULL_FEATURE: /* FALLTHRU */
++		iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
++		break;
++	default:
++		isert_warn("conn %p teminating in state %d\n",
++			   isert_conn, isert_conn->state);
+ 	}
+-	mutex_unlock(&isert_np->mutex);
++	mutex_unlock(&isert_conn->mutex);
+ 
+-out:
+ 	return 0;
+ }
+ 
+@@ -934,12 +942,16 @@ isert_connect_error(struct rdma_cm_id *cma_id)
+ static int
+ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
+ {
++	struct isert_np *isert_np = cma_id->context;
+ 	int ret = 0;
+ 
+ 	isert_info("%s (%d): status %d id %p np %p\n",
+ 		   rdma_event_msg(event->event), event->event,
+ 		   event->status, cma_id, cma_id->context);
+ 
++	if (isert_np->cm_id == cma_id)
++		return isert_np_cma_handler(cma_id->context, event->event);
++
+ 	switch (event->event) {
+ 	case RDMA_CM_EVENT_CONNECT_REQUEST:
+ 		ret = isert_connect_request(cma_id, event);
+@@ -985,13 +997,10 @@ isert_post_recvm(struct isert_conn *isert_conn, u32 count)
+ 	rx_wr--;
+ 	rx_wr->next = NULL; /* mark end of work requests list */
+ 
+-	isert_conn->post_recv_buf_count += count;
+ 	ret = ib_post_recv(isert_conn->qp, isert_conn->rx_wr,
+ 			   &rx_wr_failed);
+-	if (ret) {
++	if (ret)
+ 		isert_err("ib_post_recv() failed with ret: %d\n", ret);
+-		isert_conn->post_recv_buf_count -= count;
+-	}
+ 
+ 	return ret;
+ }
+@@ -1007,12 +1016,9 @@ isert_post_recv(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc)
+ 	rx_wr.num_sge = 1;
+ 	rx_wr.next = NULL;
+ 
+-	isert_conn->post_recv_buf_count++;
+ 	ret = ib_post_recv(isert_conn->qp, &rx_wr, &rx_wr_failed);
+-	if (ret) {
++	if (ret)
+ 		isert_err("ib_post_recv() failed with ret: %d\n", ret);
+-		isert_conn->post_recv_buf_count--;
+-	}
+ 
+ 	return ret;
+ }
+@@ -1132,12 +1138,9 @@ isert_rdma_post_recvl(struct isert_conn *isert_conn)
+ 	rx_wr.sg_list = &sge;
+ 	rx_wr.num_sge = 1;
+ 
+-	isert_conn->post_recv_buf_count++;
+ 	ret = ib_post_recv(isert_conn->qp, &rx_wr, &rx_wr_fail);
+-	if (ret) {
++	if (ret)
+ 		isert_err("ib_post_recv() failed: %d\n", ret);
+-		isert_conn->post_recv_buf_count--;
+-	}
+ 
+ 	return ret;
+ }
+@@ -1633,7 +1636,6 @@ isert_rcv_completion(struct iser_rx_desc *desc,
+ 	ib_dma_sync_single_for_device(ib_dev, rx_dma, rx_buflen,
+ 				      DMA_FROM_DEVICE);
+ 
+-	isert_conn->post_recv_buf_count--;
+ }
+ 
+ static int
+@@ -2048,7 +2050,8 @@ is_isert_tx_desc(struct isert_conn *isert_conn, void *wr_id)
+ 	void *start = isert_conn->rx_descs;
+ 	int len = ISERT_QP_MAX_RECV_DTOS * sizeof(*isert_conn->rx_descs);
+ 
+-	if (wr_id >= start && wr_id < start + len)
++	if ((wr_id >= start && wr_id < start + len) ||
++	    (wr_id == isert_conn->login_req_buf))
+ 		return false;
+ 
+ 	return true;
+@@ -2072,10 +2075,6 @@ isert_cq_comp_err(struct isert_conn *isert_conn, struct ib_wc *wc)
+ 			isert_unmap_tx_desc(desc, ib_dev);
+ 		else
+ 			isert_completion_put(desc, isert_cmd, ib_dev, true);
+-	} else {
+-		isert_conn->post_recv_buf_count--;
+-		if (!isert_conn->post_recv_buf_count)
+-			iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
+ 	}
+ }
+ 
+@@ -3214,6 +3213,7 @@ accept_wait:
+ 
+ 	conn->context = isert_conn;
+ 	isert_conn->conn = conn;
++	isert_conn->state = ISER_CONN_BOUND;
+ 
+ 	isert_set_conn_info(np, conn, isert_conn);
+ 
+diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h
+index 8d50453eef66..1aa019ab9d78 100644
+--- a/drivers/infiniband/ulp/isert/ib_isert.h
++++ b/drivers/infiniband/ulp/isert/ib_isert.h
+@@ -84,6 +84,7 @@ enum iser_ib_op_code {
+ enum iser_conn_state {
+ 	ISER_CONN_INIT,
+ 	ISER_CONN_UP,
++	ISER_CONN_BOUND,
+ 	ISER_CONN_FULL_FEATURE,
+ 	ISER_CONN_TERMINATING,
+ 	ISER_CONN_DOWN,
+@@ -179,7 +180,6 @@ struct isert_device;
+ 
+ struct isert_conn {
+ 	enum iser_conn_state	state;
+-	int			post_recv_buf_count;
+ 	u32			responder_resources;
+ 	u32			initiator_depth;
+ 	bool			pi_support;
+diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
+index 03022f6420d7..a09841abae50 100644
+--- a/drivers/infiniband/ulp/srp/ib_srp.c
++++ b/drivers/infiniband/ulp/srp/ib_srp.c
+@@ -1541,7 +1541,7 @@ static int srp_map_idb(struct srp_rdma_ch *ch, struct srp_request *req,
+ 
+ 	if (dev->use_fast_reg) {
+ 		state.sg = idb_sg;
+-		sg_set_buf(idb_sg, req->indirect_desc, idb_len);
++		sg_init_one(idb_sg, req->indirect_desc, idb_len);
+ 		idb_sg->dma_address = req->indirect_dma_addr; /* hack! */
+ #ifdef CONFIG_NEED_SG_DMA_LENGTH
+ 		idb_sg->dma_length = idb_sg->length;	      /* hack^2 */
+diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
+index 0c37fee363b1..4328679a67a7 100644
+--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
++++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
+@@ -1670,47 +1670,6 @@ send_sense:
+ 	return -1;
+ }
+ 
+-/**
+- * srpt_rx_mgmt_fn_tag() - Process a task management function by tag.
+- * @ch: RDMA channel of the task management request.
+- * @fn: Task management function to perform.
+- * @req_tag: Tag of the SRP task management request.
+- * @mgmt_ioctx: I/O context of the task management request.
+- *
+- * Returns zero if the target core will process the task management
+- * request asynchronously.
+- *
+- * Note: It is assumed that the initiator serializes tag-based task management
+- * requests.
+- */
+-static int srpt_rx_mgmt_fn_tag(struct srpt_send_ioctx *ioctx, u64 tag)
+-{
+-	struct srpt_device *sdev;
+-	struct srpt_rdma_ch *ch;
+-	struct srpt_send_ioctx *target;
+-	int ret, i;
+-
+-	ret = -EINVAL;
+-	ch = ioctx->ch;
+-	BUG_ON(!ch);
+-	BUG_ON(!ch->sport);
+-	sdev = ch->sport->sdev;
+-	BUG_ON(!sdev);
+-	spin_lock_irq(&sdev->spinlock);
+-	for (i = 0; i < ch->rq_size; ++i) {
+-		target = ch->ioctx_ring[i];
+-		if (target->cmd.se_lun == ioctx->cmd.se_lun &&
+-		    target->cmd.tag == tag &&
+-		    srpt_get_cmd_state(target) != SRPT_STATE_DONE) {
+-			ret = 0;
+-			/* now let the target core abort &target->cmd; */
+-			break;
+-		}
+-	}
+-	spin_unlock_irq(&sdev->spinlock);
+-	return ret;
+-}
+-
+ static int srp_tmr_to_tcm(int fn)
+ {
+ 	switch (fn) {
+@@ -1745,7 +1704,6 @@ static void srpt_handle_tsk_mgmt(struct srpt_rdma_ch *ch,
+ 	struct se_cmd *cmd;
+ 	struct se_session *sess = ch->sess;
+ 	uint64_t unpacked_lun;
+-	uint32_t tag = 0;
+ 	int tcm_tmr;
+ 	int rc;
+ 
+@@ -1761,25 +1719,10 @@ static void srpt_handle_tsk_mgmt(struct srpt_rdma_ch *ch,
+ 	srpt_set_cmd_state(send_ioctx, SRPT_STATE_MGMT);
+ 	send_ioctx->cmd.tag = srp_tsk->tag;
+ 	tcm_tmr = srp_tmr_to_tcm(srp_tsk->tsk_mgmt_func);
+-	if (tcm_tmr < 0) {
+-		send_ioctx->cmd.se_tmr_req->response =
+-			TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED;
+-		goto fail;
+-	}
+ 	unpacked_lun = srpt_unpack_lun((uint8_t *)&srp_tsk->lun,
+ 				       sizeof(srp_tsk->lun));
+-
+-	if (srp_tsk->tsk_mgmt_func == SRP_TSK_ABORT_TASK) {
+-		rc = srpt_rx_mgmt_fn_tag(send_ioctx, srp_tsk->task_tag);
+-		if (rc < 0) {
+-			send_ioctx->cmd.se_tmr_req->response =
+-					TMR_TASK_DOES_NOT_EXIST;
+-			goto fail;
+-		}
+-		tag = srp_tsk->task_tag;
+-	}
+ 	rc = target_submit_tmr(&send_ioctx->cmd, sess, NULL, unpacked_lun,
+-				srp_tsk, tcm_tmr, GFP_KERNEL, tag,
++				srp_tsk, tcm_tmr, GFP_KERNEL, srp_tsk->task_tag,
+ 				TARGET_SCF_ACK_KREF);
+ 	if (rc != 0) {
+ 		send_ioctx->cmd.se_tmr_req->response = TMR_FUNCTION_REJECTED;
+diff --git a/drivers/input/misc/ati_remote2.c b/drivers/input/misc/ati_remote2.c
+index cfd58e87da26..1c5914cae853 100644
+--- a/drivers/input/misc/ati_remote2.c
++++ b/drivers/input/misc/ati_remote2.c
+@@ -817,26 +817,49 @@ static int ati_remote2_probe(struct usb_interface *interface, const struct usb_d
+ 
+ 	ar2->udev = udev;
+ 
++	/* Sanity check, first interface must have an endpoint */
++	if (alt->desc.bNumEndpoints < 1 || !alt->endpoint) {
++		dev_err(&interface->dev,
++			"%s(): interface 0 must have an endpoint\n", __func__);
++		r = -ENODEV;
++		goto fail1;
++	}
+ 	ar2->intf[0] = interface;
+ 	ar2->ep[0] = &alt->endpoint[0].desc;
+ 
++	/* Sanity check, the device must have two interfaces */
+ 	ar2->intf[1] = usb_ifnum_to_if(udev, 1);
++	if ((udev->actconfig->desc.bNumInterfaces < 2) || !ar2->intf[1]) {
++		dev_err(&interface->dev, "%s(): need 2 interfaces, found %d\n",
++			__func__, udev->actconfig->desc.bNumInterfaces);
++		r = -ENODEV;
++		goto fail1;
++	}
++
+ 	r = usb_driver_claim_interface(&ati_remote2_driver, ar2->intf[1], ar2);
+ 	if (r)
+ 		goto fail1;
++
++	/* Sanity check, second interface must have an endpoint */
+ 	alt = ar2->intf[1]->cur_altsetting;
++	if (alt->desc.bNumEndpoints < 1 || !alt->endpoint) {
++		dev_err(&interface->dev,
++			"%s(): interface 1 must have an endpoint\n", __func__);
++		r = -ENODEV;
++		goto fail2;
++	}
+ 	ar2->ep[1] = &alt->endpoint[0].desc;
+ 
+ 	r = ati_remote2_urb_init(ar2);
+ 	if (r)
+-		goto fail2;
++		goto fail3;
+ 
+ 	ar2->channel_mask = channel_mask;
+ 	ar2->mode_mask = mode_mask;
+ 
+ 	r = ati_remote2_setup(ar2, ar2->channel_mask);
+ 	if (r)
+-		goto fail2;
++		goto fail3;
+ 
+ 	usb_make_path(udev, ar2->phys, sizeof(ar2->phys));
+ 	strlcat(ar2->phys, "/input0", sizeof(ar2->phys));
+@@ -845,11 +868,11 @@ static int ati_remote2_probe(struct usb_interface *interface, const struct usb_d
+ 
+ 	r = sysfs_create_group(&udev->dev.kobj, &ati_remote2_attr_group);
+ 	if (r)
+-		goto fail2;
++		goto fail3;
+ 
+ 	r = ati_remote2_input_init(ar2);
+ 	if (r)
+-		goto fail3;
++		goto fail4;
+ 
+ 	usb_set_intfdata(interface, ar2);
+ 
+@@ -857,10 +880,11 @@ static int ati_remote2_probe(struct usb_interface *interface, const struct usb_d
+ 
+ 	return 0;
+ 
+- fail3:
++ fail4:
+ 	sysfs_remove_group(&udev->dev.kobj, &ati_remote2_attr_group);
+- fail2:
++ fail3:
+ 	ati_remote2_urb_cleanup(ar2);
++ fail2:
+ 	usb_driver_release_interface(&ati_remote2_driver, ar2->intf[1]);
+  fail1:
+ 	kfree(ar2);
+diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c
+index ac1fa5f44580..9c0ea36913b4 100644
+--- a/drivers/input/misc/ims-pcu.c
++++ b/drivers/input/misc/ims-pcu.c
+@@ -1663,6 +1663,8 @@ static int ims_pcu_parse_cdc_data(struct usb_interface *intf, struct ims_pcu *pc
+ 
+ 	pcu->ctrl_intf = usb_ifnum_to_if(pcu->udev,
+ 					 union_desc->bMasterInterface0);
++	if (!pcu->ctrl_intf)
++		return -EINVAL;
+ 
+ 	alt = pcu->ctrl_intf->cur_altsetting;
+ 	pcu->ep_ctrl = &alt->endpoint[0].desc;
+@@ -1670,6 +1672,8 @@ static int ims_pcu_parse_cdc_data(struct usb_interface *intf, struct ims_pcu *pc
+ 
+ 	pcu->data_intf = usb_ifnum_to_if(pcu->udev,
+ 					 union_desc->bSlaveInterface0);
++	if (!pcu->data_intf)
++		return -EINVAL;
+ 
+ 	alt = pcu->data_intf->cur_altsetting;
+ 	if (alt->desc.bNumEndpoints != 2) {
+diff --git a/drivers/input/misc/max8997_haptic.c b/drivers/input/misc/max8997_haptic.c
+index a806ba3818f7..8d6326d7e7be 100644
+--- a/drivers/input/misc/max8997_haptic.c
++++ b/drivers/input/misc/max8997_haptic.c
+@@ -255,12 +255,14 @@ static int max8997_haptic_probe(struct platform_device *pdev)
+ 	struct max8997_dev *iodev = dev_get_drvdata(pdev->dev.parent);
+ 	const struct max8997_platform_data *pdata =
+ 					dev_get_platdata(iodev->dev);
+-	const struct max8997_haptic_platform_data *haptic_pdata =
+-					pdata->haptic_pdata;
++	const struct max8997_haptic_platform_data *haptic_pdata = NULL;
+ 	struct max8997_haptic *chip;
+ 	struct input_dev *input_dev;
+ 	int error;
+ 
++	if (pdata)
++		haptic_pdata = pdata->haptic_pdata;
++
+ 	if (!haptic_pdata) {
+ 		dev_err(&pdev->dev, "no haptic platform data\n");
+ 		return -EINVAL;
+diff --git a/drivers/input/misc/pmic8xxx-pwrkey.c b/drivers/input/misc/pmic8xxx-pwrkey.c
+index 3f02e0e03d12..67aab86048ad 100644
+--- a/drivers/input/misc/pmic8xxx-pwrkey.c
++++ b/drivers/input/misc/pmic8xxx-pwrkey.c
+@@ -353,7 +353,8 @@ static int pmic8xxx_pwrkey_probe(struct platform_device *pdev)
+ 	if (of_property_read_u32(pdev->dev.of_node, "debounce", &kpd_delay))
+ 		kpd_delay = 15625;
+ 
+-	if (kpd_delay > 62500 || kpd_delay == 0) {
++	/* Valid range of pwr key trigger delay is 1/64 sec to 2 seconds. */
++	if (kpd_delay > USEC_PER_SEC * 2 || kpd_delay < USEC_PER_SEC / 64) {
+ 		dev_err(&pdev->dev, "invalid power key trigger delay\n");
+ 		return -EINVAL;
+ 	}
+@@ -385,8 +386,8 @@ static int pmic8xxx_pwrkey_probe(struct platform_device *pdev)
+ 	pwr->name = "pmic8xxx_pwrkey";
+ 	pwr->phys = "pmic8xxx_pwrkey/input0";
+ 
+-	delay = (kpd_delay << 10) / USEC_PER_SEC;
+-	delay = 1 + ilog2(delay);
++	delay = (kpd_delay << 6) / USEC_PER_SEC;
++	delay = ilog2(delay);
+ 
+ 	err = regmap_read(regmap, PON_CNTL_1, &pon_cntl);
+ 	if (err < 0) {
+diff --git a/drivers/input/misc/powermate.c b/drivers/input/misc/powermate.c
+index 63b539d3daba..84909a12ff36 100644
+--- a/drivers/input/misc/powermate.c
++++ b/drivers/input/misc/powermate.c
+@@ -307,6 +307,9 @@ static int powermate_probe(struct usb_interface *intf, const struct usb_device_i
+ 	int error = -ENOMEM;
+ 
+ 	interface = intf->cur_altsetting;
++	if (interface->desc.bNumEndpoints < 1)
++		return -EINVAL;
++
+ 	endpoint = &interface->endpoint[0].desc;
+ 	if (!usb_endpoint_is_int_in(endpoint))
+ 		return -EIO;
+diff --git a/drivers/input/misc/pwm-beeper.c b/drivers/input/misc/pwm-beeper.c
+index f2261ab54701..18663d4edae5 100644
+--- a/drivers/input/misc/pwm-beeper.c
++++ b/drivers/input/misc/pwm-beeper.c
+@@ -20,21 +20,40 @@
+ #include <linux/platform_device.h>
+ #include <linux/pwm.h>
+ #include <linux/slab.h>
++#include <linux/workqueue.h>
+ 
+ struct pwm_beeper {
+ 	struct input_dev *input;
+ 	struct pwm_device *pwm;
++	struct work_struct work;
+ 	unsigned long period;
+ };
+ 
+ #define HZ_TO_NANOSECONDS(x) (1000000000UL/(x))
+ 
++static void __pwm_beeper_set(struct pwm_beeper *beeper)
++{
++	unsigned long period = beeper->period;
++
++	if (period) {
++		pwm_config(beeper->pwm, period / 2, period);
++		pwm_enable(beeper->pwm);
++	} else
++		pwm_disable(beeper->pwm);
++}
++
++static void pwm_beeper_work(struct work_struct *work)
++{
++	struct pwm_beeper *beeper =
++		container_of(work, struct pwm_beeper, work);
++
++	__pwm_beeper_set(beeper);
++}
++
+ static int pwm_beeper_event(struct input_dev *input,
+ 			    unsigned int type, unsigned int code, int value)
+ {
+-	int ret = 0;
+ 	struct pwm_beeper *beeper = input_get_drvdata(input);
+-	unsigned long period;
+ 
+ 	if (type != EV_SND || value < 0)
+ 		return -EINVAL;
+@@ -49,22 +68,31 @@ static int pwm_beeper_event(struct input_dev *input,
+ 		return -EINVAL;
+ 	}
+ 
+-	if (value == 0) {
+-		pwm_disable(beeper->pwm);
+-	} else {
+-		period = HZ_TO_NANOSECONDS(value);
+-		ret = pwm_config(beeper->pwm, period / 2, period);
+-		if (ret)
+-			return ret;
+-		ret = pwm_enable(beeper->pwm);
+-		if (ret)
+-			return ret;
+-		beeper->period = period;
+-	}
++	if (value == 0)
++		beeper->period = 0;
++	else
++		beeper->period = HZ_TO_NANOSECONDS(value);
++
++	schedule_work(&beeper->work);
+ 
+ 	return 0;
+ }
+ 
++static void pwm_beeper_stop(struct pwm_beeper *beeper)
++{
++	cancel_work_sync(&beeper->work);
++
++	if (beeper->period)
++		pwm_disable(beeper->pwm);
++}
++
++static void pwm_beeper_close(struct input_dev *input)
++{
++	struct pwm_beeper *beeper = input_get_drvdata(input);
++
++	pwm_beeper_stop(beeper);
++}
++
+ static int pwm_beeper_probe(struct platform_device *pdev)
+ {
+ 	unsigned long pwm_id = (unsigned long)dev_get_platdata(&pdev->dev);
+@@ -87,6 +115,8 @@ static int pwm_beeper_probe(struct platform_device *pdev)
+ 		goto err_free;
+ 	}
+ 
++	INIT_WORK(&beeper->work, pwm_beeper_work);
++
+ 	beeper->input = input_allocate_device();
+ 	if (!beeper->input) {
+ 		dev_err(&pdev->dev, "Failed to allocate input device\n");
+@@ -106,6 +136,7 @@ static int pwm_beeper_probe(struct platform_device *pdev)
+ 	beeper->input->sndbit[0] = BIT(SND_TONE) | BIT(SND_BELL);
+ 
+ 	beeper->input->event = pwm_beeper_event;
++	beeper->input->close = pwm_beeper_close;
+ 
+ 	input_set_drvdata(beeper->input, beeper);
+ 
+@@ -135,7 +166,6 @@ static int pwm_beeper_remove(struct platform_device *pdev)
+ 
+ 	input_unregister_device(beeper->input);
+ 
+-	pwm_disable(beeper->pwm);
+ 	pwm_free(beeper->pwm);
+ 
+ 	kfree(beeper);
+@@ -147,8 +177,7 @@ static int __maybe_unused pwm_beeper_suspend(struct device *dev)
+ {
+ 	struct pwm_beeper *beeper = dev_get_drvdata(dev);
+ 
+-	if (beeper->period)
+-		pwm_disable(beeper->pwm);
++	pwm_beeper_stop(beeper);
+ 
+ 	return 0;
+ }
+@@ -157,10 +186,8 @@ static int __maybe_unused pwm_beeper_resume(struct device *dev)
+ {
+ 	struct pwm_beeper *beeper = dev_get_drvdata(dev);
+ 
+-	if (beeper->period) {
+-		pwm_config(beeper->pwm, beeper->period / 2, beeper->period);
+-		pwm_enable(beeper->pwm);
+-	}
++	if (beeper->period)
++		__pwm_beeper_set(beeper);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
+index 6025eb430c0a..a41d8328c064 100644
+--- a/drivers/input/mouse/synaptics.c
++++ b/drivers/input/mouse/synaptics.c
+@@ -862,8 +862,9 @@ static void synaptics_report_ext_buttons(struct psmouse *psmouse,
+ 	if (!SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap))
+ 		return;
+ 
+-	/* Bug in FW 8.1, buttons are reported only when ExtBit is 1 */
+-	if (SYN_ID_FULL(priv->identity) == 0x801 &&
++	/* Bug in FW 8.1 & 8.2, buttons are reported only when ExtBit is 1 */
++	if ((SYN_ID_FULL(priv->identity) == 0x801 ||
++	     SYN_ID_FULL(priv->identity) == 0x802) &&
+ 	    !((psmouse->packet[0] ^ psmouse->packet[3]) & 0x02))
+ 		return;
+ 
+diff --git a/drivers/input/tablet/gtco.c b/drivers/input/tablet/gtco.c
+index 3a7f3a4a4396..7c18249d6c8e 100644
+--- a/drivers/input/tablet/gtco.c
++++ b/drivers/input/tablet/gtco.c
+@@ -858,6 +858,14 @@ static int gtco_probe(struct usb_interface *usbinterface,
+ 		goto err_free_buf;
+ 	}
+ 
++	/* Sanity check that a device has an endpoint */
++	if (usbinterface->altsetting[0].desc.bNumEndpoints < 1) {
++		dev_err(&usbinterface->dev,
++			"Invalid number of endpoints\n");
++		error = -EINVAL;
++		goto err_free_urb;
++	}
++
+ 	/*
+ 	 * The endpoint is always altsetting 0, we know this since we know
+ 	 * this device only has one interrupt endpoint
+@@ -879,7 +887,7 @@ static int gtco_probe(struct usb_interface *usbinterface,
+ 	 * HID report descriptor
+ 	 */
+ 	if (usb_get_extra_descriptor(usbinterface->cur_altsetting,
+-				     HID_DEVICE_TYPE, &hid_desc) != 0){
++				     HID_DEVICE_TYPE, &hid_desc) != 0) {
+ 		dev_err(&usbinterface->dev,
+ 			"Can't retrieve exta USB descriptor to get hid report descriptor length\n");
+ 		error = -EIO;
+diff --git a/drivers/input/touchscreen/zforce_ts.c b/drivers/input/touchscreen/zforce_ts.c
+index 9bbadaaf6bc3..7b3845aa5983 100644
+--- a/drivers/input/touchscreen/zforce_ts.c
++++ b/drivers/input/touchscreen/zforce_ts.c
+@@ -370,8 +370,8 @@ static int zforce_touch_event(struct zforce_ts *ts, u8 *payload)
+ 			point.coord_x = point.coord_y = 0;
+ 		}
+ 
+-		point.state = payload[9 * i + 5] & 0x03;
+-		point.id = (payload[9 * i + 5] & 0xfc) >> 2;
++		point.state = payload[9 * i + 5] & 0x0f;
++		point.id = (payload[9 * i + 5] & 0xf0) >> 4;
+ 
+ 		/* determine touch major, minor and orientation */
+ 		point.area_major = max(payload[9 * i + 6],
+diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
+index 374c129219ef..5efadad4615b 100644
+--- a/drivers/iommu/amd_iommu.c
++++ b/drivers/iommu/amd_iommu.c
+@@ -92,6 +92,7 @@ struct iommu_dev_data {
+ 	struct list_head dev_data_list;	  /* For global dev_data_list */
+ 	struct protection_domain *domain; /* Domain the device is bound to */
+ 	u16 devid;			  /* PCI Device ID */
++	u16 alias;			  /* Alias Device ID */
+ 	bool iommu_v2;			  /* Device can make use of IOMMUv2 */
+ 	bool passthrough;		  /* Device is identity mapped */
+ 	struct {
+@@ -166,6 +167,13 @@ static struct protection_domain *to_pdomain(struct iommu_domain *dom)
+ 	return container_of(dom, struct protection_domain, domain);
+ }
+ 
++static inline u16 get_device_id(struct device *dev)
++{
++	struct pci_dev *pdev = to_pci_dev(dev);
++
++	return PCI_DEVID(pdev->bus->number, pdev->devfn);
++}
++
+ static struct iommu_dev_data *alloc_dev_data(u16 devid)
+ {
+ 	struct iommu_dev_data *dev_data;
+@@ -203,6 +211,68 @@ out_unlock:
+ 	return dev_data;
+ }
+ 
++static int __last_alias(struct pci_dev *pdev, u16 alias, void *data)
++{
++	*(u16 *)data = alias;
++	return 0;
++}
++
++static u16 get_alias(struct device *dev)
++{
++	struct pci_dev *pdev = to_pci_dev(dev);
++	u16 devid, ivrs_alias, pci_alias;
++
++	devid = get_device_id(dev);
++	ivrs_alias = amd_iommu_alias_table[devid];
++	pci_for_each_dma_alias(pdev, __last_alias, &pci_alias);
++
++	if (ivrs_alias == pci_alias)
++		return ivrs_alias;
++
++	/*
++	 * DMA alias showdown
++	 *
++	 * The IVRS is fairly reliable in telling us about aliases, but it
++	 * can't know about every screwy device.  If we don't have an IVRS
++	 * reported alias, use the PCI reported alias.  In that case we may
++	 * still need to initialize the rlookup and dev_table entries if the
++	 * alias is to a non-existent device.
++	 */
++	if (ivrs_alias == devid) {
++		if (!amd_iommu_rlookup_table[pci_alias]) {
++			amd_iommu_rlookup_table[pci_alias] =
++				amd_iommu_rlookup_table[devid];
++			memcpy(amd_iommu_dev_table[pci_alias].data,
++			       amd_iommu_dev_table[devid].data,
++			       sizeof(amd_iommu_dev_table[pci_alias].data));
++		}
++
++		return pci_alias;
++	}
++
++	pr_info("AMD-Vi: Using IVRS reported alias %02x:%02x.%d "
++		"for device %s[%04x:%04x], kernel reported alias "
++		"%02x:%02x.%d\n", PCI_BUS_NUM(ivrs_alias), PCI_SLOT(ivrs_alias),
++		PCI_FUNC(ivrs_alias), dev_name(dev), pdev->vendor, pdev->device,
++		PCI_BUS_NUM(pci_alias), PCI_SLOT(pci_alias),
++		PCI_FUNC(pci_alias));
++
++	/*
++	 * If we don't have a PCI DMA alias and the IVRS alias is on the same
++	 * bus, then the IVRS table may know about a quirk that we don't.
++	 */
++	if (pci_alias == devid &&
++	    PCI_BUS_NUM(ivrs_alias) == pdev->bus->number) {
++		pdev->dev_flags |= PCI_DEV_FLAGS_DMA_ALIAS_DEVFN;
++		pdev->dma_alias_devfn = ivrs_alias & 0xff;
++		pr_info("AMD-Vi: Added PCI DMA alias %02x.%d for %s\n",
++			PCI_SLOT(ivrs_alias), PCI_FUNC(ivrs_alias),
++			dev_name(dev));
++	}
++
++	return ivrs_alias;
++}
++
+ static struct iommu_dev_data *find_dev_data(u16 devid)
+ {
+ 	struct iommu_dev_data *dev_data;
+@@ -215,13 +285,6 @@ static struct iommu_dev_data *find_dev_data(u16 devid)
+ 	return dev_data;
+ }
+ 
+-static inline u16 get_device_id(struct device *dev)
+-{
+-	struct pci_dev *pdev = to_pci_dev(dev);
+-
+-	return PCI_DEVID(pdev->bus->number, pdev->devfn);
+-}
+-
+ static struct iommu_dev_data *get_dev_data(struct device *dev)
+ {
+ 	return dev->archdata.iommu;
+@@ -349,6 +412,8 @@ static int iommu_init_device(struct device *dev)
+ 	if (!dev_data)
+ 		return -ENOMEM;
+ 
++	dev_data->alias = get_alias(dev);
++
+ 	if (pci_iommuv2_capable(pdev)) {
+ 		struct amd_iommu *iommu;
+ 
+@@ -369,7 +434,7 @@ static void iommu_ignore_device(struct device *dev)
+ 	u16 devid, alias;
+ 
+ 	devid = get_device_id(dev);
+-	alias = amd_iommu_alias_table[devid];
++	alias = get_alias(dev);
+ 
+ 	memset(&amd_iommu_dev_table[devid], 0, sizeof(struct dev_table_entry));
+ 	memset(&amd_iommu_dev_table[alias], 0, sizeof(struct dev_table_entry));
+@@ -1061,7 +1126,7 @@ static int device_flush_dte(struct iommu_dev_data *dev_data)
+ 	int ret;
+ 
+ 	iommu = amd_iommu_rlookup_table[dev_data->devid];
+-	alias = amd_iommu_alias_table[dev_data->devid];
++	alias = dev_data->alias;
+ 
+ 	ret = iommu_flush_dte(iommu, dev_data->devid);
+ 	if (!ret && alias != dev_data->devid)
+@@ -2039,7 +2104,7 @@ static void do_attach(struct iommu_dev_data *dev_data,
+ 	bool ats;
+ 
+ 	iommu = amd_iommu_rlookup_table[dev_data->devid];
+-	alias = amd_iommu_alias_table[dev_data->devid];
++	alias = dev_data->alias;
+ 	ats   = dev_data->ats.enabled;
+ 
+ 	/* Update data structures */
+@@ -2073,7 +2138,7 @@ static void do_detach(struct iommu_dev_data *dev_data)
+ 		return;
+ 
+ 	iommu = amd_iommu_rlookup_table[dev_data->devid];
+-	alias = amd_iommu_alias_table[dev_data->devid];
++	alias = dev_data->alias;
+ 
+ 	/* decrease reference counters */
+ 	dev_data->domain->dev_iommu[iommu->index] -= 1;
+diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
+index 72d6182666cb..58f2fe687a24 100644
+--- a/drivers/iommu/dma-iommu.c
++++ b/drivers/iommu/dma-iommu.c
+@@ -403,7 +403,7 @@ static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
+ 		unsigned int s_length = sg_dma_len(s);
+ 		unsigned int s_dma_len = s->length;
+ 
+-		s->offset = s_offset;
++		s->offset += s_offset;
+ 		s->length = s_length;
+ 		sg_dma_address(s) = dma_addr + s_offset;
+ 		dma_addr += s_dma_len;
+@@ -422,7 +422,7 @@ static void __invalidate_sg(struct scatterlist *sg, int nents)
+ 
+ 	for_each_sg(sg, s, nents, i) {
+ 		if (sg_dma_address(s) != DMA_ERROR_CODE)
+-			s->offset = sg_dma_address(s);
++			s->offset += sg_dma_address(s);
+ 		if (sg_dma_len(s))
+ 			s->length = sg_dma_len(s);
+ 		sg_dma_address(s) = DMA_ERROR_CODE;
+diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
+index 0e3b0092ec92..515bb8b80952 100644
+--- a/drivers/iommu/iommu.c
++++ b/drivers/iommu/iommu.c
+@@ -848,7 +848,8 @@ struct iommu_group *iommu_group_get_for_dev(struct device *dev)
+ 	if (!group->default_domain) {
+ 		group->default_domain = __iommu_domain_alloc(dev->bus,
+ 							     IOMMU_DOMAIN_DMA);
+-		group->domain = group->default_domain;
++		if (!group->domain)
++			group->domain = group->default_domain;
+ 	}
+ 
+ 	ret = iommu_group_add_device(group, dev);
+diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
+index d7be6ddc34f6..2fc499a2207e 100644
+--- a/drivers/irqchip/irq-gic-v3.c
++++ b/drivers/irqchip/irq-gic-v3.c
+@@ -361,6 +361,13 @@ static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs
+ 			if (static_key_true(&supports_deactivate))
+ 				gic_write_dir(irqnr);
+ #ifdef CONFIG_SMP
++			/*
++			 * Unlike GICv2, we don't need an smp_rmb() here.
++			 * The control dependency from gic_read_iar to
++			 * the ISB in gic_write_eoir is enough to ensure
++			 * that any shared data read by handle_IPI will
++			 * be read after the ACK.
++			 */
+ 			handle_IPI(irqnr, regs);
+ #else
+ 			WARN_ONCE(true, "Unexpected SGI received!\n");
+@@ -380,6 +387,15 @@ static void __init gic_dist_init(void)
+ 	writel_relaxed(0, base + GICD_CTLR);
+ 	gic_dist_wait_for_rwp();
+ 
++	/*
++	 * Configure SPIs as non-secure Group-1. This will only matter
++	 * if the GIC only has a single security state. This will not
++	 * do the right thing if the kernel is running in secure mode,
++	 * but that's not the intended use case anyway.
++	 */
++	for (i = 32; i < gic_data.irq_nr; i += 32)
++		writel_relaxed(~0, base + GICD_IGROUPR + i / 8);
++
+ 	gic_dist_config(base, gic_data.irq_nr, gic_dist_wait_for_rwp);
+ 
+ 	/* Enable distributor with ARE, Group1 */
+@@ -494,6 +510,9 @@ static void gic_cpu_init(void)
+ 
+ 	rbase = gic_data_rdist_sgi_base();
+ 
++	/* Configure SGIs/PPIs as non-secure Group-1 */
++	writel_relaxed(~0, rbase + GICR_IGROUPR0);
++
+ 	gic_cpu_config(rbase, gic_redist_wait_for_rwp);
+ 
+ 	/* Give LPIs a spin */
+diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
+index 8f9ebf714e2b..eef950046ac0 100644
+--- a/drivers/irqchip/irq-gic.c
++++ b/drivers/irqchip/irq-gic.c
+@@ -344,6 +344,14 @@ static void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
+ 			if (static_key_true(&supports_deactivate))
+ 				writel_relaxed(irqstat, cpu_base + GIC_CPU_DEACTIVATE);
+ #ifdef CONFIG_SMP
++			/*
++			 * Ensure any shared data written by the CPU sending
++			 * the IPI is read after we've read the ACK register
++			 * on the GIC.
++			 *
++			 * Pairs with the write barrier in gic_raise_softirq
++			 */
++			smp_rmb();
+ 			handle_IPI(irqnr, regs);
+ #endif
+ 			continue;
+diff --git a/drivers/irqchip/irq-mxs.c b/drivers/irqchip/irq-mxs.c
+index efe50845939d..17304705f2cf 100644
+--- a/drivers/irqchip/irq-mxs.c
++++ b/drivers/irqchip/irq-mxs.c
+@@ -183,7 +183,7 @@ static void __iomem * __init icoll_init_iobase(struct device_node *np)
+ 	void __iomem *icoll_base;
+ 
+ 	icoll_base = of_io_request_and_map(np, 0, np->name);
+-	if (!icoll_base)
++	if (IS_ERR(icoll_base))
+ 		panic("%s: unable to map resource", np->full_name);
+ 	return icoll_base;
+ }
+diff --git a/drivers/irqchip/irq-sunxi-nmi.c b/drivers/irqchip/irq-sunxi-nmi.c
+index 0820f67cc9a7..668730c5cb66 100644
+--- a/drivers/irqchip/irq-sunxi-nmi.c
++++ b/drivers/irqchip/irq-sunxi-nmi.c
+@@ -160,9 +160,9 @@ static int __init sunxi_sc_nmi_irq_init(struct device_node *node,
+ 
+ 	gc = irq_get_domain_generic_chip(domain, 0);
+ 	gc->reg_base = of_io_request_and_map(node, 0, of_node_full_name(node));
+-	if (!gc->reg_base) {
++	if (IS_ERR(gc->reg_base)) {
+ 		pr_err("unable to map resource\n");
+-		ret = -ENOMEM;
++		ret = PTR_ERR(gc->reg_base);
+ 		goto fail_irqd_remove;
+ 	}
+ 
+diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c
+index 307db1ea22de..b7ddfb352792 100644
+--- a/drivers/lightnvm/rrpc.c
++++ b/drivers/lightnvm/rrpc.c
+@@ -499,12 +499,21 @@ static void rrpc_gc_queue(struct work_struct *work)
+ 	struct rrpc *rrpc = gcb->rrpc;
+ 	struct rrpc_block *rblk = gcb->rblk;
+ 	struct nvm_lun *lun = rblk->parent->lun;
++	struct nvm_block *blk = rblk->parent;
+ 	struct rrpc_lun *rlun = &rrpc->luns[lun->id - rrpc->lun_offset];
+ 
+ 	spin_lock(&rlun->lock);
+ 	list_add_tail(&rblk->prio, &rlun->prio_list);
+ 	spin_unlock(&rlun->lock);
+ 
++	spin_lock(&lun->lock);
++	lun->nr_open_blocks--;
++	lun->nr_closed_blocks++;
++	blk->state &= ~NVM_BLK_ST_OPEN;
++	blk->state |= NVM_BLK_ST_CLOSED;
++	list_move_tail(&rblk->list, &rlun->closed_list);
++	spin_unlock(&lun->lock);
++
+ 	mempool_free(gcb, rrpc->gcb_pool);
+ 	pr_debug("nvm: block '%lu' is full, allow GC (sched)\n",
+ 							rblk->parent->id);
+@@ -668,20 +677,8 @@ static void rrpc_end_io_write(struct rrpc *rrpc, struct rrpc_rq *rrqd,
+ 		lun = rblk->parent->lun;
+ 
+ 		cmnt_size = atomic_inc_return(&rblk->data_cmnt_size);
+-		if (unlikely(cmnt_size == rrpc->dev->pgs_per_blk)) {
+-			struct nvm_block *blk = rblk->parent;
+-			struct rrpc_lun *rlun = rblk->rlun;
+-
+-			spin_lock(&lun->lock);
+-			lun->nr_open_blocks--;
+-			lun->nr_closed_blocks++;
+-			blk->state &= ~NVM_BLK_ST_OPEN;
+-			blk->state |= NVM_BLK_ST_CLOSED;
+-			list_move_tail(&rblk->list, &rlun->closed_list);
+-			spin_unlock(&lun->lock);
+-
++		if (unlikely(cmnt_size == rrpc->dev->pgs_per_blk))
+ 			rrpc_run_gc(rrpc, rblk);
+-		}
+ 	}
+ }
+ 
+diff --git a/drivers/mcb/mcb-parse.c b/drivers/mcb/mcb-parse.c
+index 004926955263..b0155b05cddb 100644
+--- a/drivers/mcb/mcb-parse.c
++++ b/drivers/mcb/mcb-parse.c
+@@ -57,7 +57,7 @@ static int chameleon_parse_gdd(struct mcb_bus *bus,
+ 	mdev->id = GDD_DEV(reg1);
+ 	mdev->rev = GDD_REV(reg1);
+ 	mdev->var = GDD_VAR(reg1);
+-	mdev->bar = GDD_BAR(reg1);
++	mdev->bar = GDD_BAR(reg2);
+ 	mdev->group = GDD_GRP(reg2);
+ 	mdev->inst = GDD_INS(reg2);
+ 
+diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
+index 8d0ead98eb6e..a296425a7270 100644
+--- a/drivers/md/bcache/super.c
++++ b/drivers/md/bcache/super.c
+@@ -1015,8 +1015,12 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c)
+ 	 */
+ 	atomic_set(&dc->count, 1);
+ 
+-	if (bch_cached_dev_writeback_start(dc))
++	/* Block writeback thread, but spawn it */
++	down_write(&dc->writeback_lock);
++	if (bch_cached_dev_writeback_start(dc)) {
++		up_write(&dc->writeback_lock);
+ 		return -ENOMEM;
++	}
+ 
+ 	if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) {
+ 		bch_sectors_dirty_init(dc);
+@@ -1028,6 +1032,9 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c)
+ 	bch_cached_dev_run(dc);
+ 	bcache_device_link(&dc->disk, c, "bdev");
+ 
++	/* Allow the writeback thread to proceed */
++	up_write(&dc->writeback_lock);
++
+ 	pr_info("Caching %s as %s on set %pU",
+ 		bdevname(dc->bdev, buf), dc->disk.disk->disk_name,
+ 		dc->disk.c->sb.set_uuid);
+@@ -1366,6 +1373,9 @@ static void cache_set_flush(struct closure *cl)
+ 	struct btree *b;
+ 	unsigned i;
+ 
++	if (!c)
++		closure_return(cl);
++
+ 	bch_cache_accounting_destroy(&c->accounting);
+ 
+ 	kobject_put(&c->internal);
+@@ -1828,11 +1838,12 @@ static int cache_alloc(struct cache_sb *sb, struct cache *ca)
+ 	return 0;
+ }
+ 
+-static void register_cache(struct cache_sb *sb, struct page *sb_page,
++static int register_cache(struct cache_sb *sb, struct page *sb_page,
+ 				struct block_device *bdev, struct cache *ca)
+ {
+ 	char name[BDEVNAME_SIZE];
+-	const char *err = "cannot allocate memory";
++	const char *err = NULL;
++	int ret = 0;
+ 
+ 	memcpy(&ca->sb, sb, sizeof(struct cache_sb));
+ 	ca->bdev = bdev;
+@@ -1847,27 +1858,35 @@ static void register_cache(struct cache_sb *sb, struct page *sb_page,
+ 	if (blk_queue_discard(bdev_get_queue(ca->bdev)))
+ 		ca->discard = CACHE_DISCARD(&ca->sb);
+ 
+-	if (cache_alloc(sb, ca) != 0)
++	ret = cache_alloc(sb, ca);
++	if (ret != 0)
+ 		goto err;
+ 
+-	err = "error creating kobject";
+-	if (kobject_add(&ca->kobj, &part_to_dev(bdev->bd_part)->kobj, "bcache"))
+-		goto err;
++	if (kobject_add(&ca->kobj, &part_to_dev(bdev->bd_part)->kobj, "bcache")) {
++		err = "error calling kobject_add";
++		ret = -ENOMEM;
++		goto out;
++	}
+ 
+ 	mutex_lock(&bch_register_lock);
+ 	err = register_cache_set(ca);
+ 	mutex_unlock(&bch_register_lock);
+ 
+-	if (err)
+-		goto err;
++	if (err) {
++		ret = -ENODEV;
++		goto out;
++	}
+ 
+ 	pr_info("registered cache device %s", bdevname(bdev, name));
++
+ out:
+ 	kobject_put(&ca->kobj);
+-	return;
++
+ err:
+-	pr_notice("error opening %s: %s", bdevname(bdev, name), err);
+-	goto out;
++	if (err)
++		pr_notice("error opening %s: %s", bdevname(bdev, name), err);
++
++	return ret;
+ }
+ 
+ /* Global interfaces/init */
+@@ -1965,7 +1984,8 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
+ 		if (!ca)
+ 			goto err_close;
+ 
+-		register_cache(sb, sb_page, bdev, ca);
++		if (register_cache(sb, sb_page, bdev, ca) != 0)
++			goto err_close;
+ 	}
+ out:
+ 	if (sb_page)
+diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
+index f6543f3a970f..3970cda10080 100644
+--- a/drivers/md/dm-cache-metadata.c
++++ b/drivers/md/dm-cache-metadata.c
+@@ -867,18 +867,55 @@ static int blocks_are_unmapped_or_clean(struct dm_cache_metadata *cmd,
+ 	return 0;
+ }
+ 
+-#define WRITE_LOCK(cmd) \
+-	if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) \
+-		return -EINVAL; \
+-	down_write(&cmd->root_lock)
++static bool cmd_write_lock(struct dm_cache_metadata *cmd)
++{
++	down_write(&cmd->root_lock);
++	if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) {
++		up_write(&cmd->root_lock);
++		return false;
++	}
++	return true;
++}
++
++#define WRITE_LOCK(cmd)				\
++	do {					\
++		if (!cmd_write_lock((cmd)))	\
++			return -EINVAL;		\
++	} while(0)
+ 
+-#define WRITE_LOCK_VOID(cmd) \
+-	if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) \
+-		return; \
+-	down_write(&cmd->root_lock)
++#define WRITE_LOCK_VOID(cmd)			\
++	do {					\
++		if (!cmd_write_lock((cmd)))	\
++			return;			\
++	} while(0)
+ 
+ #define WRITE_UNLOCK(cmd) \
+-	up_write(&cmd->root_lock)
++	up_write(&(cmd)->root_lock)
++
++static bool cmd_read_lock(struct dm_cache_metadata *cmd)
++{
++	down_read(&cmd->root_lock);
++	if (cmd->fail_io) {
++		up_read(&cmd->root_lock);
++		return false;
++	}
++	return true;
++}
++
++#define READ_LOCK(cmd)				\
++	do {					\
++		if (!cmd_read_lock((cmd)))	\
++			return -EINVAL;		\
++	} while(0)
++
++#define READ_LOCK_VOID(cmd)			\
++	do {					\
++		if (!cmd_read_lock((cmd)))	\
++			return;			\
++	} while(0)
++
++#define READ_UNLOCK(cmd) \
++	up_read(&(cmd)->root_lock)
+ 
+ int dm_cache_resize(struct dm_cache_metadata *cmd, dm_cblock_t new_cache_size)
+ {
+@@ -1015,22 +1052,20 @@ int dm_cache_load_discards(struct dm_cache_metadata *cmd,
+ {
+ 	int r;
+ 
+-	down_read(&cmd->root_lock);
++	READ_LOCK(cmd);
+ 	r = __load_discards(cmd, fn, context);
+-	up_read(&cmd->root_lock);
++	READ_UNLOCK(cmd);
+ 
+ 	return r;
+ }
+ 
+-dm_cblock_t dm_cache_size(struct dm_cache_metadata *cmd)
++int dm_cache_size(struct dm_cache_metadata *cmd, dm_cblock_t *result)
+ {
+-	dm_cblock_t r;
+-
+-	down_read(&cmd->root_lock);
+-	r = cmd->cache_blocks;
+-	up_read(&cmd->root_lock);
++	READ_LOCK(cmd);
++	*result = cmd->cache_blocks;
++	READ_UNLOCK(cmd);
+ 
+-	return r;
++	return 0;
+ }
+ 
+ static int __remove(struct dm_cache_metadata *cmd, dm_cblock_t cblock)
+@@ -1188,9 +1223,9 @@ int dm_cache_load_mappings(struct dm_cache_metadata *cmd,
+ {
+ 	int r;
+ 
+-	down_read(&cmd->root_lock);
++	READ_LOCK(cmd);
+ 	r = __load_mappings(cmd, policy, fn, context);
+-	up_read(&cmd->root_lock);
++	READ_UNLOCK(cmd);
+ 
+ 	return r;
+ }
+@@ -1215,18 +1250,18 @@ static int __dump_mappings(struct dm_cache_metadata *cmd)
+ 
+ void dm_cache_dump(struct dm_cache_metadata *cmd)
+ {
+-	down_read(&cmd->root_lock);
++	READ_LOCK_VOID(cmd);
+ 	__dump_mappings(cmd);
+-	up_read(&cmd->root_lock);
++	READ_UNLOCK(cmd);
+ }
+ 
+ int dm_cache_changed_this_transaction(struct dm_cache_metadata *cmd)
+ {
+ 	int r;
+ 
+-	down_read(&cmd->root_lock);
++	READ_LOCK(cmd);
+ 	r = cmd->changed;
+-	up_read(&cmd->root_lock);
++	READ_UNLOCK(cmd);
+ 
+ 	return r;
+ }
+@@ -1276,9 +1311,9 @@ int dm_cache_set_dirty(struct dm_cache_metadata *cmd,
+ void dm_cache_metadata_get_stats(struct dm_cache_metadata *cmd,
+ 				 struct dm_cache_statistics *stats)
+ {
+-	down_read(&cmd->root_lock);
++	READ_LOCK_VOID(cmd);
+ 	*stats = cmd->stats;
+-	up_read(&cmd->root_lock);
++	READ_UNLOCK(cmd);
+ }
+ 
+ void dm_cache_metadata_set_stats(struct dm_cache_metadata *cmd,
+@@ -1312,9 +1347,9 @@ int dm_cache_get_free_metadata_block_count(struct dm_cache_metadata *cmd,
+ {
+ 	int r = -EINVAL;
+ 
+-	down_read(&cmd->root_lock);
++	READ_LOCK(cmd);
+ 	r = dm_sm_get_nr_free(cmd->metadata_sm, result);
+-	up_read(&cmd->root_lock);
++	READ_UNLOCK(cmd);
+ 
+ 	return r;
+ }
+@@ -1324,9 +1359,9 @@ int dm_cache_get_metadata_dev_size(struct dm_cache_metadata *cmd,
+ {
+ 	int r = -EINVAL;
+ 
+-	down_read(&cmd->root_lock);
++	READ_LOCK(cmd);
+ 	r = dm_sm_get_nr_blocks(cmd->metadata_sm, result);
+-	up_read(&cmd->root_lock);
++	READ_UNLOCK(cmd);
+ 
+ 	return r;
+ }
+@@ -1417,7 +1452,13 @@ int dm_cache_write_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *
+ 
+ int dm_cache_metadata_all_clean(struct dm_cache_metadata *cmd, bool *result)
+ {
+-	return blocks_are_unmapped_or_clean(cmd, 0, cmd->cache_blocks, result);
++	int r;
++
++	READ_LOCK(cmd);
++	r = blocks_are_unmapped_or_clean(cmd, 0, cmd->cache_blocks, result);
++	READ_UNLOCK(cmd);
++
++	return r;
+ }
+ 
+ void dm_cache_metadata_set_read_only(struct dm_cache_metadata *cmd)
+@@ -1440,10 +1481,7 @@ int dm_cache_metadata_set_needs_check(struct dm_cache_metadata *cmd)
+ 	struct dm_block *sblock;
+ 	struct cache_disk_superblock *disk_super;
+ 
+-	/*
+-	 * We ignore fail_io for this function.
+-	 */
+-	down_write(&cmd->root_lock);
++	WRITE_LOCK(cmd);
+ 	set_bit(NEEDS_CHECK, &cmd->flags);
+ 
+ 	r = superblock_lock(cmd, &sblock);
+@@ -1458,19 +1496,17 @@ int dm_cache_metadata_set_needs_check(struct dm_cache_metadata *cmd)
+ 	dm_bm_unlock(sblock);
+ 
+ out:
+-	up_write(&cmd->root_lock);
++	WRITE_UNLOCK(cmd);
+ 	return r;
+ }
+ 
+-bool dm_cache_metadata_needs_check(struct dm_cache_metadata *cmd)
++int dm_cache_metadata_needs_check(struct dm_cache_metadata *cmd, bool *result)
+ {
+-	bool needs_check;
++	READ_LOCK(cmd);
++	*result = !!test_bit(NEEDS_CHECK, &cmd->flags);
++	READ_UNLOCK(cmd);
+ 
+-	down_read(&cmd->root_lock);
+-	needs_check = !!test_bit(NEEDS_CHECK, &cmd->flags);
+-	up_read(&cmd->root_lock);
+-
+-	return needs_check;
++	return 0;
+ }
+ 
+ int dm_cache_metadata_abort(struct dm_cache_metadata *cmd)
+diff --git a/drivers/md/dm-cache-metadata.h b/drivers/md/dm-cache-metadata.h
+index 2ffee21f318d..8528744195e5 100644
+--- a/drivers/md/dm-cache-metadata.h
++++ b/drivers/md/dm-cache-metadata.h
+@@ -66,7 +66,7 @@ void dm_cache_metadata_close(struct dm_cache_metadata *cmd);
+  * origin blocks to map to.
+  */
+ int dm_cache_resize(struct dm_cache_metadata *cmd, dm_cblock_t new_cache_size);
+-dm_cblock_t dm_cache_size(struct dm_cache_metadata *cmd);
++int dm_cache_size(struct dm_cache_metadata *cmd, dm_cblock_t *result);
+ 
+ int dm_cache_discard_bitset_resize(struct dm_cache_metadata *cmd,
+ 				   sector_t discard_block_size,
+@@ -137,7 +137,7 @@ int dm_cache_write_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *
+  */
+ int dm_cache_metadata_all_clean(struct dm_cache_metadata *cmd, bool *result);
+ 
+-bool dm_cache_metadata_needs_check(struct dm_cache_metadata *cmd);
++int dm_cache_metadata_needs_check(struct dm_cache_metadata *cmd, bool *result);
+ int dm_cache_metadata_set_needs_check(struct dm_cache_metadata *cmd);
+ void dm_cache_metadata_set_read_only(struct dm_cache_metadata *cmd);
+ void dm_cache_metadata_set_read_write(struct dm_cache_metadata *cmd);
+diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
+index 5780accffa30..bb9b92ebbf8e 100644
+--- a/drivers/md/dm-cache-target.c
++++ b/drivers/md/dm-cache-target.c
+@@ -984,9 +984,14 @@ static void notify_mode_switch(struct cache *cache, enum cache_metadata_mode mod
+ 
+ static void set_cache_mode(struct cache *cache, enum cache_metadata_mode new_mode)
+ {
+-	bool needs_check = dm_cache_metadata_needs_check(cache->cmd);
++	bool needs_check;
+ 	enum cache_metadata_mode old_mode = get_cache_mode(cache);
+ 
++	if (dm_cache_metadata_needs_check(cache->cmd, &needs_check)) {
++		DMERR("unable to read needs_check flag, setting failure mode");
++		new_mode = CM_FAIL;
++	}
++
+ 	if (new_mode == CM_WRITE && needs_check) {
+ 		DMERR("%s: unable to switch cache to write mode until repaired.",
+ 		      cache_device_name(cache));
+@@ -3510,6 +3515,7 @@ static void cache_status(struct dm_target *ti, status_type_t type,
+ 	char buf[BDEVNAME_SIZE];
+ 	struct cache *cache = ti->private;
+ 	dm_cblock_t residency;
++	bool needs_check;
+ 
+ 	switch (type) {
+ 	case STATUSTYPE_INFO:
+@@ -3583,7 +3589,9 @@ static void cache_status(struct dm_target *ti, status_type_t type,
+ 		else
+ 			DMEMIT("rw ");
+ 
+-		if (dm_cache_metadata_needs_check(cache->cmd))
++		r = dm_cache_metadata_needs_check(cache->cmd, &needs_check);
++
++		if (r || needs_check)
+ 			DMEMIT("needs_check ");
+ 		else
+ 			DMEMIT("- ");
+diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
+index 3766386080a4..e4d1bafe78c1 100644
+--- a/drivers/md/dm-snap.c
++++ b/drivers/md/dm-snap.c
+@@ -1105,6 +1105,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+ 	int i;
+ 	int r = -EINVAL;
+ 	char *origin_path, *cow_path;
++	dev_t origin_dev, cow_dev;
+ 	unsigned args_used, num_flush_bios = 1;
+ 	fmode_t origin_mode = FMODE_READ;
+ 
+@@ -1135,11 +1136,19 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+ 		ti->error = "Cannot get origin device";
+ 		goto bad_origin;
+ 	}
++	origin_dev = s->origin->bdev->bd_dev;
+ 
+ 	cow_path = argv[0];
+ 	argv++;
+ 	argc--;
+ 
++	cow_dev = dm_get_dev_t(cow_path);
++	if (cow_dev && cow_dev == origin_dev) {
++		ti->error = "COW device cannot be the same as origin device";
++		r = -EINVAL;
++		goto bad_cow;
++	}
++
+ 	r = dm_get_device(ti, cow_path, dm_table_get_mode(ti->table), &s->cow);
+ 	if (r) {
+ 		ti->error = "Cannot get COW device";
+diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
+index 061152a43730..cb5d0daf53bb 100644
+--- a/drivers/md/dm-table.c
++++ b/drivers/md/dm-table.c
+@@ -365,6 +365,26 @@ static int upgrade_mode(struct dm_dev_internal *dd, fmode_t new_mode,
+ }
+ 
+ /*
++ * Convert the path to a device
++ */
++dev_t dm_get_dev_t(const char *path)
++{
++	dev_t uninitialized_var(dev);
++	struct block_device *bdev;
++
++	bdev = lookup_bdev(path);
++	if (IS_ERR(bdev))
++		dev = name_to_dev_t(path);
++	else {
++		dev = bdev->bd_dev;
++		bdput(bdev);
++	}
++
++	return dev;
++}
++EXPORT_SYMBOL_GPL(dm_get_dev_t);
++
++/*
+  * Add a device to the list, or just increment the usage count if
+  * it's already present.
+  */
+@@ -372,23 +392,15 @@ int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,
+ 		  struct dm_dev **result)
+ {
+ 	int r;
+-	dev_t uninitialized_var(dev);
++	dev_t dev;
+ 	struct dm_dev_internal *dd;
+ 	struct dm_table *t = ti->table;
+-	struct block_device *bdev;
+ 
+ 	BUG_ON(!t);
+ 
+-	/* convert the path to a device */
+-	bdev = lookup_bdev(path);
+-	if (IS_ERR(bdev)) {
+-		dev = name_to_dev_t(path);
+-		if (!dev)
+-			return -ENODEV;
+-	} else {
+-		dev = bdev->bd_dev;
+-		bdput(bdev);
+-	}
++	dev = dm_get_dev_t(path);
++	if (!dev)
++		return -ENODEV;
+ 
+ 	dd = find_device(&t->devices, dev);
+ 	if (!dd) {
+diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
+index f962d6453afd..185010d9cccc 100644
+--- a/drivers/md/dm-thin-metadata.c
++++ b/drivers/md/dm-thin-metadata.c
+@@ -1981,5 +1981,8 @@ bool dm_pool_metadata_needs_check(struct dm_pool_metadata *pmd)
+ 
+ void dm_pool_issue_prefetches(struct dm_pool_metadata *pmd)
+ {
+-	dm_tm_issue_prefetches(pmd->tm);
++	down_read(&pmd->root_lock);
++	if (!pmd->fail_io)
++		dm_tm_issue_prefetches(pmd->tm);
++	up_read(&pmd->root_lock);
+ }
+diff --git a/drivers/md/dm.c b/drivers/md/dm.c
+index dd834927bc66..c338aebb4ccd 100644
+--- a/drivers/md/dm.c
++++ b/drivers/md/dm.c
+@@ -1109,12 +1109,8 @@ static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
+ 	 * back into ->request_fn() could deadlock attempting to grab the
+ 	 * queue lock again.
+ 	 */
+-	if (run_queue) {
+-		if (md->queue->mq_ops)
+-			blk_mq_run_hw_queues(md->queue, true);
+-		else
+-			blk_run_queue_async(md->queue);
+-	}
++	if (!md->queue->mq_ops && run_queue)
++		blk_run_queue_async(md->queue);
+ 
+ 	/*
+ 	 * dm_put() must be at the end of this function. See the comment above
+@@ -1214,9 +1210,9 @@ static void dm_requeue_original_request(struct mapped_device *md,
+ {
+ 	int rw = rq_data_dir(rq);
+ 
++	rq_end_stats(md, rq);
+ 	dm_unprep_request(rq);
+ 
+-	rq_end_stats(md, rq);
+ 	if (!rq->q->mq_ops)
+ 		old_requeue_request(rq);
+ 	else {
+@@ -1336,7 +1332,10 @@ static void dm_complete_request(struct request *rq, int error)
+ 	struct dm_rq_target_io *tio = tio_from_request(rq);
+ 
+ 	tio->error = error;
+-	blk_complete_request(rq);
++	if (!rq->q->mq_ops)
++		blk_complete_request(rq);
++	else
++		blk_mq_complete_request(rq, error);
+ }
+ 
+ /*
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index e55e6cf9ec17..7551278030d8 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -284,6 +284,8 @@ static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio)
+ 	 * go away inside make_request
+ 	 */
+ 	sectors = bio_sectors(bio);
++	/* bio could be mergeable after passing to underlayer */
++	bio->bi_rw &= ~REQ_NOMERGE;
+ 	mddev->pers->make_request(mddev, bio);
+ 
+ 	cpu = part_stat_lock();
+diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
+index 0a72ab6e6c20..dd483bb2e111 100644
+--- a/drivers/md/multipath.c
++++ b/drivers/md/multipath.c
+@@ -129,7 +129,9 @@ static void multipath_make_request(struct mddev *mddev, struct bio * bio)
+ 	}
+ 	multipath = conf->multipaths + mp_bh->path;
+ 
+-	mp_bh->bio = *bio;
++	bio_init(&mp_bh->bio);
++	__bio_clone_fast(&mp_bh->bio, bio);
++
+ 	mp_bh->bio.bi_iter.bi_sector += multipath->rdev->data_offset;
+ 	mp_bh->bio.bi_bdev = multipath->rdev->bdev;
+ 	mp_bh->bio.bi_rw |= REQ_FAILFAST_TRANSPORT;
+diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
+index 4e3843f7d245..bb5bce059eb4 100644
+--- a/drivers/md/raid1.c
++++ b/drivers/md/raid1.c
+@@ -2274,6 +2274,7 @@ static void handle_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
+ 	if (fail) {
+ 		spin_lock_irq(&conf->device_lock);
+ 		list_add(&r1_bio->retry_list, &conf->bio_end_io_list);
++		conf->nr_queued++;
+ 		spin_unlock_irq(&conf->device_lock);
+ 		md_wakeup_thread(conf->mddev->thread);
+ 	} else {
+@@ -2391,8 +2392,10 @@ static void raid1d(struct md_thread *thread)
+ 		LIST_HEAD(tmp);
+ 		spin_lock_irqsave(&conf->device_lock, flags);
+ 		if (!test_bit(MD_CHANGE_PENDING, &mddev->flags)) {
+-			list_add(&tmp, &conf->bio_end_io_list);
+-			list_del_init(&conf->bio_end_io_list);
++			while (!list_empty(&conf->bio_end_io_list)) {
++				list_move(conf->bio_end_io_list.prev, &tmp);
++				conf->nr_queued--;
++			}
+ 		}
+ 		spin_unlock_irqrestore(&conf->device_lock, flags);
+ 		while (!list_empty(&tmp)) {
+diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
+index 1c1447dd3417..e3fd725d5c4d 100644
+--- a/drivers/md/raid10.c
++++ b/drivers/md/raid10.c
+@@ -2664,6 +2664,7 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
+ 		if (fail) {
+ 			spin_lock_irq(&conf->device_lock);
+ 			list_add(&r10_bio->retry_list, &conf->bio_end_io_list);
++			conf->nr_queued++;
+ 			spin_unlock_irq(&conf->device_lock);
+ 			md_wakeup_thread(conf->mddev->thread);
+ 		} else {
+@@ -2691,8 +2692,10 @@ static void raid10d(struct md_thread *thread)
+ 		LIST_HEAD(tmp);
+ 		spin_lock_irqsave(&conf->device_lock, flags);
+ 		if (!test_bit(MD_CHANGE_PENDING, &mddev->flags)) {
+-			list_add(&tmp, &conf->bio_end_io_list);
+-			list_del_init(&conf->bio_end_io_list);
++			while (!list_empty(&conf->bio_end_io_list)) {
++				list_move(conf->bio_end_io_list.prev, &tmp);
++				conf->nr_queued--;
++			}
+ 		}
+ 		spin_unlock_irqrestore(&conf->device_lock, flags);
+ 		while (!list_empty(&tmp)) {
+diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
+index b4f02c9959f2..32d52878f182 100644
+--- a/drivers/md/raid5.c
++++ b/drivers/md/raid5.c
+@@ -340,8 +340,7 @@ static void release_inactive_stripe_list(struct r5conf *conf,
+ 					 int hash)
+ {
+ 	int size;
+-	unsigned long do_wakeup = 0;
+-	int i = 0;
++	bool do_wakeup = false;
+ 	unsigned long flags;
+ 
+ 	if (hash == NR_STRIPE_HASH_LOCKS) {
+@@ -362,19 +361,15 @@ static void release_inactive_stripe_list(struct r5conf *conf,
+ 			    !list_empty(list))
+ 				atomic_dec(&conf->empty_inactive_list_nr);
+ 			list_splice_tail_init(list, conf->inactive_list + hash);
+-			do_wakeup |= 1 << hash;
++			do_wakeup = true;
+ 			spin_unlock_irqrestore(conf->hash_locks + hash, flags);
+ 		}
+ 		size--;
+ 		hash--;
+ 	}
+ 
+-	for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++) {
+-		if (do_wakeup & (1 << i))
+-			wake_up(&conf->wait_for_stripe[i]);
+-	}
+-
+ 	if (do_wakeup) {
++		wake_up(&conf->wait_for_stripe);
+ 		if (atomic_read(&conf->active_stripes) == 0)
+ 			wake_up(&conf->wait_for_quiescent);
+ 		if (conf->retry_read_aligned)
+@@ -687,15 +682,14 @@ raid5_get_active_stripe(struct r5conf *conf, sector_t sector,
+ 			if (!sh) {
+ 				set_bit(R5_INACTIVE_BLOCKED,
+ 					&conf->cache_state);
+-				wait_event_exclusive_cmd(
+-					conf->wait_for_stripe[hash],
++				wait_event_lock_irq(
++					conf->wait_for_stripe,
+ 					!list_empty(conf->inactive_list + hash) &&
+ 					(atomic_read(&conf->active_stripes)
+ 					 < (conf->max_nr_stripes * 3 / 4)
+ 					 || !test_bit(R5_INACTIVE_BLOCKED,
+ 						      &conf->cache_state)),
+-					spin_unlock_irq(conf->hash_locks + hash),
+-					spin_lock_irq(conf->hash_locks + hash));
++					*(conf->hash_locks + hash));
+ 				clear_bit(R5_INACTIVE_BLOCKED,
+ 					  &conf->cache_state);
+ 			} else {
+@@ -720,9 +714,6 @@ raid5_get_active_stripe(struct r5conf *conf, sector_t sector,
+ 		}
+ 	} while (sh == NULL);
+ 
+-	if (!list_empty(conf->inactive_list + hash))
+-		wake_up(&conf->wait_for_stripe[hash]);
+-
+ 	spin_unlock_irq(conf->hash_locks + hash);
+ 	return sh;
+ }
+@@ -2089,6 +2080,14 @@ static int resize_chunks(struct r5conf *conf, int new_disks, int new_sectors)
+ 	unsigned long cpu;
+ 	int err = 0;
+ 
++	/*
++	 * Never shrink. And mddev_suspend() could deadlock if this is called
++	 * from raid5d. In that case, scribble_disks and scribble_sectors
++	 * should equal to new_disks and new_sectors
++	 */
++	if (conf->scribble_disks >= new_disks &&
++	    conf->scribble_sectors >= new_sectors)
++		return 0;
+ 	mddev_suspend(conf->mddev);
+ 	get_online_cpus();
+ 	for_each_present_cpu(cpu) {
+@@ -2110,6 +2109,10 @@ static int resize_chunks(struct r5conf *conf, int new_disks, int new_sectors)
+ 	}
+ 	put_online_cpus();
+ 	mddev_resume(conf->mddev);
++	if (!err) {
++		conf->scribble_disks = new_disks;
++		conf->scribble_sectors = new_sectors;
++	}
+ 	return err;
+ }
+ 
+@@ -2190,7 +2193,7 @@ static int resize_stripes(struct r5conf *conf, int newsize)
+ 	cnt = 0;
+ 	list_for_each_entry(nsh, &newstripes, lru) {
+ 		lock_device_hash_lock(conf, hash);
+-		wait_event_exclusive_cmd(conf->wait_for_stripe[hash],
++		wait_event_cmd(conf->wait_for_stripe,
+ 				    !list_empty(conf->inactive_list + hash),
+ 				    unlock_device_hash_lock(conf, hash),
+ 				    lock_device_hash_lock(conf, hash));
+@@ -4236,7 +4239,6 @@ static void break_stripe_batch_list(struct stripe_head *head_sh,
+ 		WARN_ON_ONCE(sh->state & ((1 << STRIPE_ACTIVE) |
+ 					  (1 << STRIPE_SYNCING) |
+ 					  (1 << STRIPE_REPLACED) |
+-					  (1 << STRIPE_PREREAD_ACTIVE) |
+ 					  (1 << STRIPE_DELAYED) |
+ 					  (1 << STRIPE_BIT_DELAY) |
+ 					  (1 << STRIPE_FULL_WRITE) |
+@@ -4251,6 +4253,7 @@ static void break_stripe_batch_list(struct stripe_head *head_sh,
+ 					      (1 << STRIPE_REPLACED)));
+ 
+ 		set_mask_bits(&sh->state, ~(STRIPE_EXPAND_SYNC_FLAGS |
++					    (1 << STRIPE_PREREAD_ACTIVE) |
+ 					    (1 << STRIPE_DEGRADED)),
+ 			      head_sh->state & (1 << STRIPE_INSYNC));
+ 
+@@ -6413,6 +6416,12 @@ static int raid5_alloc_percpu(struct r5conf *conf)
+ 	}
+ 	put_online_cpus();
+ 
++	if (!err) {
++		conf->scribble_disks = max(conf->raid_disks,
++			conf->previous_raid_disks);
++		conf->scribble_sectors = max(conf->chunk_sectors,
++			conf->prev_chunk_sectors);
++	}
+ 	return err;
+ }
+ 
+@@ -6503,9 +6512,7 @@ static struct r5conf *setup_conf(struct mddev *mddev)
+ 	seqcount_init(&conf->gen_lock);
+ 	mutex_init(&conf->cache_size_mutex);
+ 	init_waitqueue_head(&conf->wait_for_quiescent);
+-	for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++) {
+-		init_waitqueue_head(&conf->wait_for_stripe[i]);
+-	}
++	init_waitqueue_head(&conf->wait_for_stripe);
+ 	init_waitqueue_head(&conf->wait_for_overlap);
+ 	INIT_LIST_HEAD(&conf->handle_list);
+ 	INIT_LIST_HEAD(&conf->hold_list);
+@@ -7014,8 +7021,8 @@ static int raid5_run(struct mddev *mddev)
+ 		}
+ 
+ 		if (discard_supported &&
+-		   mddev->queue->limits.max_discard_sectors >= stripe &&
+-		   mddev->queue->limits.discard_granularity >= stripe)
++		    mddev->queue->limits.max_discard_sectors >= (stripe >> 9) &&
++		    mddev->queue->limits.discard_granularity >= stripe)
+ 			queue_flag_set_unlocked(QUEUE_FLAG_DISCARD,
+ 						mddev->queue);
+ 		else
+diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
+index a415e1cd39b8..517d4b68a1be 100644
+--- a/drivers/md/raid5.h
++++ b/drivers/md/raid5.h
+@@ -510,6 +510,8 @@ struct r5conf {
+ 					      * conversions
+ 					      */
+ 	} __percpu *percpu;
++	int scribble_disks;
++	int scribble_sectors;
+ #ifdef CONFIG_HOTPLUG_CPU
+ 	struct notifier_block	cpu_notify;
+ #endif
+@@ -522,7 +524,7 @@ struct r5conf {
+ 	atomic_t		empty_inactive_list_nr;
+ 	struct llist_head	released_stripes;
+ 	wait_queue_head_t	wait_for_quiescent;
+-	wait_queue_head_t	wait_for_stripe[NR_STRIPE_HASH_LOCKS];
++	wait_queue_head_t	wait_for_stripe;
+ 	wait_queue_head_t	wait_for_overlap;
+ 	unsigned long		cache_state;
+ #define R5_INACTIVE_BLOCKED	1	/* release of inactive stripes blocked,
+diff --git a/drivers/media/i2c/adv7511.c b/drivers/media/i2c/adv7511.c
+index 471fd23b5c5c..08d2c6bf7341 100644
+--- a/drivers/media/i2c/adv7511.c
++++ b/drivers/media/i2c/adv7511.c
+@@ -1161,12 +1161,23 @@ static void adv7511_dbg_dump_edid(int lvl, int debug, struct v4l2_subdev *sd, in
+ 	}
+ }
+ 
++static void adv7511_notify_no_edid(struct v4l2_subdev *sd)
++{
++	struct adv7511_state *state = get_adv7511_state(sd);
++	struct adv7511_edid_detect ed;
++
++	/* We failed to read the EDID, so send an event for this. */
++	ed.present = false;
++	ed.segment = adv7511_rd(sd, 0xc4);
++	v4l2_subdev_notify(sd, ADV7511_EDID_DETECT, (void *)&ed);
++	v4l2_ctrl_s_ctrl(state->have_edid0_ctrl, 0x0);
++}
++
+ static void adv7511_edid_handler(struct work_struct *work)
+ {
+ 	struct delayed_work *dwork = to_delayed_work(work);
+ 	struct adv7511_state *state = container_of(dwork, struct adv7511_state, edid_handler);
+ 	struct v4l2_subdev *sd = &state->sd;
+-	struct adv7511_edid_detect ed;
+ 
+ 	v4l2_dbg(1, debug, sd, "%s:\n", __func__);
+ 
+@@ -1191,9 +1202,7 @@ static void adv7511_edid_handler(struct work_struct *work)
+ 	}
+ 
+ 	/* We failed to read the EDID, so send an event for this. */
+-	ed.present = false;
+-	ed.segment = adv7511_rd(sd, 0xc4);
+-	v4l2_subdev_notify(sd, ADV7511_EDID_DETECT, (void *)&ed);
++	adv7511_notify_no_edid(sd);
+ 	v4l2_dbg(1, debug, sd, "%s: no edid found\n", __func__);
+ }
+ 
+@@ -1264,7 +1273,6 @@ static void adv7511_check_monitor_present_status(struct v4l2_subdev *sd)
+ 	/* update read only ctrls */
+ 	v4l2_ctrl_s_ctrl(state->hotplug_ctrl, adv7511_have_hotplug(sd) ? 0x1 : 0x0);
+ 	v4l2_ctrl_s_ctrl(state->rx_sense_ctrl, adv7511_have_rx_sense(sd) ? 0x1 : 0x0);
+-	v4l2_ctrl_s_ctrl(state->have_edid0_ctrl, state->edid.segments ? 0x1 : 0x0);
+ 
+ 	if ((status & MASK_ADV7511_HPD_DETECT) && ((status & MASK_ADV7511_MSEN_DETECT) || state->edid.segments)) {
+ 		v4l2_dbg(1, debug, sd, "%s: hotplug and (rx-sense or edid)\n", __func__);
+@@ -1294,6 +1302,7 @@ static void adv7511_check_monitor_present_status(struct v4l2_subdev *sd)
+ 		}
+ 		adv7511_s_power(sd, false);
+ 		memset(&state->edid, 0, sizeof(struct adv7511_state_edid));
++		adv7511_notify_no_edid(sd);
+ 	}
+ }
+ 
+@@ -1370,6 +1379,7 @@ static bool adv7511_check_edid_status(struct v4l2_subdev *sd)
+ 		}
+ 		/* one more segment read ok */
+ 		state->edid.segments = segment + 1;
++		v4l2_ctrl_s_ctrl(state->have_edid0_ctrl, 0x1);
+ 		if (((state->edid.data[0x7e] >> 1) + 1) > state->edid.segments) {
+ 			/* Request next EDID segment */
+ 			v4l2_dbg(1, debug, sd, "%s: request segment %d\n", __func__, state->edid.segments);
+@@ -1389,7 +1399,6 @@ static bool adv7511_check_edid_status(struct v4l2_subdev *sd)
+ 		ed.present = true;
+ 		ed.segment = 0;
+ 		state->edid_detect_counter++;
+-		v4l2_ctrl_s_ctrl(state->have_edid0_ctrl, state->edid.segments ? 0x1 : 0x0);
+ 		v4l2_subdev_notify(sd, ADV7511_EDID_DETECT, (void *)&ed);
+ 		return ed.present;
+ 	}
+diff --git a/drivers/media/pci/bt8xx/bttv-driver.c b/drivers/media/pci/bt8xx/bttv-driver.c
+index 9400e996087b..bedbd51fb77c 100644
+--- a/drivers/media/pci/bt8xx/bttv-driver.c
++++ b/drivers/media/pci/bt8xx/bttv-driver.c
+@@ -2334,6 +2334,19 @@ static int bttv_g_fmt_vid_overlay(struct file *file, void *priv,
+ 	return 0;
+ }
+ 
++static void bttv_get_width_mask_vid_cap(const struct bttv_format *fmt,
++					unsigned int *width_mask,
++					unsigned int *width_bias)
++{
++	if (fmt->flags & FORMAT_FLAGS_PLANAR) {
++		*width_mask = ~15; /* width must be a multiple of 16 pixels */
++		*width_bias = 8;   /* nearest */
++	} else {
++		*width_mask = ~3; /* width must be a multiple of 4 pixels */
++		*width_bias = 2;  /* nearest */
++	}
++}
++
+ static int bttv_try_fmt_vid_cap(struct file *file, void *priv,
+ 						struct v4l2_format *f)
+ {
+@@ -2343,6 +2356,7 @@ static int bttv_try_fmt_vid_cap(struct file *file, void *priv,
+ 	enum v4l2_field field;
+ 	__s32 width, height;
+ 	__s32 height2;
++	unsigned int width_mask, width_bias;
+ 	int rc;
+ 
+ 	fmt = format_by_fourcc(f->fmt.pix.pixelformat);
+@@ -2375,9 +2389,9 @@ static int bttv_try_fmt_vid_cap(struct file *file, void *priv,
+ 	width = f->fmt.pix.width;
+ 	height = f->fmt.pix.height;
+ 
++	bttv_get_width_mask_vid_cap(fmt, &width_mask, &width_bias);
+ 	rc = limit_scaled_size_lock(fh, &width, &height, field,
+-			       /* width_mask: 4 pixels */ ~3,
+-			       /* width_bias: nearest */ 2,
++			       width_mask, width_bias,
+ 			       /* adjust_size */ 1,
+ 			       /* adjust_crop */ 0);
+ 	if (0 != rc)
+@@ -2410,6 +2424,7 @@ static int bttv_s_fmt_vid_cap(struct file *file, void *priv,
+ 	struct bttv_fh *fh = priv;
+ 	struct bttv *btv = fh->btv;
+ 	__s32 width, height;
++	unsigned int width_mask, width_bias;
+ 	enum v4l2_field field;
+ 
+ 	retval = bttv_switch_type(fh, f->type);
+@@ -2424,9 +2439,10 @@ static int bttv_s_fmt_vid_cap(struct file *file, void *priv,
+ 	height = f->fmt.pix.height;
+ 	field = f->fmt.pix.field;
+ 
++	fmt = format_by_fourcc(f->fmt.pix.pixelformat);
++	bttv_get_width_mask_vid_cap(fmt, &width_mask, &width_bias);
+ 	retval = limit_scaled_size_lock(fh, &width, &height, f->fmt.pix.field,
+-			       /* width_mask: 4 pixels */ ~3,
+-			       /* width_bias: nearest */ 2,
++			       width_mask, width_bias,
+ 			       /* adjust_size */ 1,
+ 			       /* adjust_crop */ 1);
+ 	if (0 != retval)
+@@ -2434,8 +2450,6 @@ static int bttv_s_fmt_vid_cap(struct file *file, void *priv,
+ 
+ 	f->fmt.pix.field = field;
+ 
+-	fmt = format_by_fourcc(f->fmt.pix.pixelformat);
+-
+ 	/* update our state informations */
+ 	fh->fmt              = fmt;
+ 	fh->cap.field        = f->fmt.pix.field;
+diff --git a/drivers/media/pci/saa7134/saa7134-video.c b/drivers/media/pci/saa7134/saa7134-video.c
+index a63c1366a64e..1293563b7dce 100644
+--- a/drivers/media/pci/saa7134/saa7134-video.c
++++ b/drivers/media/pci/saa7134/saa7134-video.c
+@@ -1219,10 +1219,13 @@ static int saa7134_g_fmt_vid_cap(struct file *file, void *priv,
+ 	f->fmt.pix.height       = dev->height;
+ 	f->fmt.pix.field        = dev->field;
+ 	f->fmt.pix.pixelformat  = dev->fmt->fourcc;
+-	f->fmt.pix.bytesperline =
+-		(f->fmt.pix.width * dev->fmt->depth) >> 3;
++	if (dev->fmt->planar)
++		f->fmt.pix.bytesperline = f->fmt.pix.width;
++	else
++		f->fmt.pix.bytesperline =
++			(f->fmt.pix.width * dev->fmt->depth) / 8;
+ 	f->fmt.pix.sizeimage =
+-		f->fmt.pix.height * f->fmt.pix.bytesperline;
++		(f->fmt.pix.height * f->fmt.pix.width * dev->fmt->depth) / 8;
+ 	f->fmt.pix.colorspace   = V4L2_COLORSPACE_SMPTE170M;
+ 	return 0;
+ }
+@@ -1298,10 +1301,13 @@ static int saa7134_try_fmt_vid_cap(struct file *file, void *priv,
+ 	if (f->fmt.pix.height > maxh)
+ 		f->fmt.pix.height = maxh;
+ 	f->fmt.pix.width &= ~0x03;
+-	f->fmt.pix.bytesperline =
+-		(f->fmt.pix.width * fmt->depth) >> 3;
++	if (fmt->planar)
++		f->fmt.pix.bytesperline = f->fmt.pix.width;
++	else
++		f->fmt.pix.bytesperline =
++			(f->fmt.pix.width * fmt->depth) / 8;
+ 	f->fmt.pix.sizeimage =
+-		f->fmt.pix.height * f->fmt.pix.bytesperline;
++		(f->fmt.pix.height * f->fmt.pix.width * fmt->depth) / 8;
+ 	f->fmt.pix.colorspace   = V4L2_COLORSPACE_SMPTE170M;
+ 
+ 	return 0;
+diff --git a/drivers/media/platform/coda/coda-bit.c b/drivers/media/platform/coda/coda-bit.c
+index 7d28899f89ce..6efe9d002961 100644
+--- a/drivers/media/platform/coda/coda-bit.c
++++ b/drivers/media/platform/coda/coda-bit.c
+@@ -1342,7 +1342,7 @@ static void coda_finish_encode(struct coda_ctx *ctx)
+ 
+ 	/* Calculate bytesused field */
+ 	if (dst_buf->sequence == 0) {
+-		vb2_set_plane_payload(&dst_buf->vb2_buf, 0,
++		vb2_set_plane_payload(&dst_buf->vb2_buf, 0, wr_ptr - start_ptr +
+ 					ctx->vpu_header_size[0] +
+ 					ctx->vpu_header_size[1] +
+ 					ctx->vpu_header_size[2]);
+diff --git a/drivers/media/platform/coda/coda-common.c b/drivers/media/platform/coda/coda-common.c
+index 2d782ce94a67..7ae89c684783 100644
+--- a/drivers/media/platform/coda/coda-common.c
++++ b/drivers/media/platform/coda/coda-common.c
+@@ -2118,14 +2118,12 @@ static int coda_probe(struct platform_device *pdev)
+ 
+ 	pdev_id = of_id ? of_id->data : platform_get_device_id(pdev);
+ 
+-	if (of_id) {
++	if (of_id)
+ 		dev->devtype = of_id->data;
+-	} else if (pdev_id) {
++	else if (pdev_id)
+ 		dev->devtype = &coda_devdata[pdev_id->driver_data];
+-	} else {
+-		ret = -EINVAL;
+-		goto err_v4l2_register;
+-	}
++	else
++		return -EINVAL;
+ 
+ 	spin_lock_init(&dev->irqlock);
+ 	INIT_LIST_HEAD(&dev->instances);
+diff --git a/drivers/media/platform/vsp1/vsp1_sru.c b/drivers/media/platform/vsp1/vsp1_sru.c
+index 6310acab60e7..d41ae950d1a1 100644
+--- a/drivers/media/platform/vsp1/vsp1_sru.c
++++ b/drivers/media/platform/vsp1/vsp1_sru.c
+@@ -154,6 +154,7 @@ static int sru_s_stream(struct v4l2_subdev *subdev, int enable)
+ 	mutex_lock(sru->ctrls.lock);
+ 	ctrl0 |= vsp1_sru_read(sru, VI6_SRU_CTRL0)
+ 	       & (VI6_SRU_CTRL0_PARAM0_MASK | VI6_SRU_CTRL0_PARAM1_MASK);
++	vsp1_sru_write(sru, VI6_SRU_CTRL0, ctrl0);
+ 	mutex_unlock(sru->ctrls.lock);
+ 
+ 	vsp1_sru_write(sru, VI6_SRU_CTRL1, VI6_SRU_CTRL1_PARAM5);
+diff --git a/drivers/media/usb/au0828/au0828-core.c b/drivers/media/usb/au0828/au0828-core.c
+index 9e29e70a78d7..d8988801dde0 100644
+--- a/drivers/media/usb/au0828/au0828-core.c
++++ b/drivers/media/usb/au0828/au0828-core.c
+@@ -192,7 +192,7 @@ static void au0828_usb_disconnect(struct usb_interface *interface)
+ 	   Set the status so poll routines can check and avoid
+ 	   access after disconnect.
+ 	*/
+-	dev->dev_state = DEV_DISCONNECTED;
++	set_bit(DEV_DISCONNECTED, &dev->dev_state);
+ 
+ 	au0828_rc_unregister(dev);
+ 	/* Digital TV */
+diff --git a/drivers/media/usb/au0828/au0828-input.c b/drivers/media/usb/au0828/au0828-input.c
+index b0f067971979..3d6687f0407d 100644
+--- a/drivers/media/usb/au0828/au0828-input.c
++++ b/drivers/media/usb/au0828/au0828-input.c
+@@ -130,7 +130,7 @@ static int au0828_get_key_au8522(struct au0828_rc *ir)
+ 	bool first = true;
+ 
+ 	/* do nothing if device is disconnected */
+-	if (ir->dev->dev_state == DEV_DISCONNECTED)
++	if (test_bit(DEV_DISCONNECTED, &ir->dev->dev_state))
+ 		return 0;
+ 
+ 	/* Check IR int */
+@@ -260,7 +260,7 @@ static void au0828_rc_stop(struct rc_dev *rc)
+ 	cancel_delayed_work_sync(&ir->work);
+ 
+ 	/* do nothing if device is disconnected */
+-	if (ir->dev->dev_state != DEV_DISCONNECTED) {
++	if (!test_bit(DEV_DISCONNECTED, &ir->dev->dev_state)) {
+ 		/* Disable IR */
+ 		au8522_rc_clear(ir, 0xe0, 1 << 4);
+ 	}
+diff --git a/drivers/media/usb/au0828/au0828-video.c b/drivers/media/usb/au0828/au0828-video.c
+index a13625722848..8bc69af874a0 100644
+--- a/drivers/media/usb/au0828/au0828-video.c
++++ b/drivers/media/usb/au0828/au0828-video.c
+@@ -104,14 +104,13 @@ static inline void print_err_status(struct au0828_dev *dev,
+ 
+ static int check_dev(struct au0828_dev *dev)
+ {
+-	if (dev->dev_state & DEV_DISCONNECTED) {
++	if (test_bit(DEV_DISCONNECTED, &dev->dev_state)) {
+ 		pr_info("v4l2 ioctl: device not present\n");
+ 		return -ENODEV;
+ 	}
+ 
+-	if (dev->dev_state & DEV_MISCONFIGURED) {
+-		pr_info("v4l2 ioctl: device is misconfigured; "
+-		       "close and open it again\n");
++	if (test_bit(DEV_MISCONFIGURED, &dev->dev_state)) {
++		pr_info("v4l2 ioctl: device is misconfigured; close and open it again\n");
+ 		return -EIO;
+ 	}
+ 	return 0;
+@@ -519,8 +518,8 @@ static inline int au0828_isoc_copy(struct au0828_dev *dev, struct urb *urb)
+ 	if (!dev)
+ 		return 0;
+ 
+-	if ((dev->dev_state & DEV_DISCONNECTED) ||
+-	    (dev->dev_state & DEV_MISCONFIGURED))
++	if (test_bit(DEV_DISCONNECTED, &dev->dev_state) ||
++	    test_bit(DEV_MISCONFIGURED, &dev->dev_state))
+ 		return 0;
+ 
+ 	if (urb->status < 0) {
+@@ -822,10 +821,10 @@ static int au0828_stream_interrupt(struct au0828_dev *dev)
+ 	int ret = 0;
+ 
+ 	dev->stream_state = STREAM_INTERRUPT;
+-	if (dev->dev_state == DEV_DISCONNECTED)
++	if (test_bit(DEV_DISCONNECTED, &dev->dev_state))
+ 		return -ENODEV;
+ 	else if (ret) {
+-		dev->dev_state = DEV_MISCONFIGURED;
++		set_bit(DEV_MISCONFIGURED, &dev->dev_state);
+ 		dprintk(1, "%s device is misconfigured!\n", __func__);
+ 		return ret;
+ 	}
+@@ -1014,7 +1013,7 @@ static int au0828_v4l2_open(struct file *filp)
+ 	int ret;
+ 
+ 	dprintk(1,
+-		"%s called std_set %d dev_state %d stream users %d users %d\n",
++		"%s called std_set %d dev_state %ld stream users %d users %d\n",
+ 		__func__, dev->std_set_in_tuner_core, dev->dev_state,
+ 		dev->streaming_users, dev->users);
+ 
+@@ -1033,7 +1032,7 @@ static int au0828_v4l2_open(struct file *filp)
+ 		au0828_analog_stream_enable(dev);
+ 		au0828_analog_stream_reset(dev);
+ 		dev->stream_state = STREAM_OFF;
+-		dev->dev_state |= DEV_INITIALIZED;
++		set_bit(DEV_INITIALIZED, &dev->dev_state);
+ 	}
+ 	dev->users++;
+ 	mutex_unlock(&dev->lock);
+@@ -1047,7 +1046,7 @@ static int au0828_v4l2_close(struct file *filp)
+ 	struct video_device *vdev = video_devdata(filp);
+ 
+ 	dprintk(1,
+-		"%s called std_set %d dev_state %d stream users %d users %d\n",
++		"%s called std_set %d dev_state %ld stream users %d users %d\n",
+ 		__func__, dev->std_set_in_tuner_core, dev->dev_state,
+ 		dev->streaming_users, dev->users);
+ 
+@@ -1063,7 +1062,7 @@ static int au0828_v4l2_close(struct file *filp)
+ 		del_timer_sync(&dev->vbi_timeout);
+ 	}
+ 
+-	if (dev->dev_state == DEV_DISCONNECTED)
++	if (test_bit(DEV_DISCONNECTED, &dev->dev_state))
+ 		goto end;
+ 
+ 	if (dev->users == 1) {
+@@ -1092,7 +1091,7 @@ static void au0828_init_tuner(struct au0828_dev *dev)
+ 		.type = V4L2_TUNER_ANALOG_TV,
+ 	};
+ 
+-	dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
++	dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
+ 		dev->std_set_in_tuner_core, dev->dev_state);
+ 
+ 	if (dev->std_set_in_tuner_core)
+@@ -1164,7 +1163,7 @@ static int vidioc_querycap(struct file *file, void  *priv,
+ 	struct video_device *vdev = video_devdata(file);
+ 	struct au0828_dev *dev = video_drvdata(file);
+ 
+-	dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
++	dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
+ 		dev->std_set_in_tuner_core, dev->dev_state);
+ 
+ 	strlcpy(cap->driver, "au0828", sizeof(cap->driver));
+@@ -1207,7 +1206,7 @@ static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
+ {
+ 	struct au0828_dev *dev = video_drvdata(file);
+ 
+-	dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
++	dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
+ 		dev->std_set_in_tuner_core, dev->dev_state);
+ 
+ 	f->fmt.pix.width = dev->width;
+@@ -1226,7 +1225,7 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
+ {
+ 	struct au0828_dev *dev = video_drvdata(file);
+ 
+-	dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
++	dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
+ 		dev->std_set_in_tuner_core, dev->dev_state);
+ 
+ 	return au0828_set_format(dev, VIDIOC_TRY_FMT, f);
+@@ -1238,7 +1237,7 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
+ 	struct au0828_dev *dev = video_drvdata(file);
+ 	int rc;
+ 
+-	dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
++	dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
+ 		dev->std_set_in_tuner_core, dev->dev_state);
+ 
+ 	rc = check_dev(dev);
+@@ -1260,7 +1259,7 @@ static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id norm)
+ {
+ 	struct au0828_dev *dev = video_drvdata(file);
+ 
+-	dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
++	dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
+ 		dev->std_set_in_tuner_core, dev->dev_state);
+ 
+ 	if (norm == dev->std)
+@@ -1292,7 +1291,7 @@ static int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *norm)
+ {
+ 	struct au0828_dev *dev = video_drvdata(file);
+ 
+-	dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
++	dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
+ 		dev->std_set_in_tuner_core, dev->dev_state);
+ 
+ 	*norm = dev->std;
+@@ -1315,7 +1314,7 @@ static int vidioc_enum_input(struct file *file, void *priv,
+ 		[AU0828_VMUX_DEBUG] = "tv debug"
+ 	};
+ 
+-	dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
++	dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
+ 		dev->std_set_in_tuner_core, dev->dev_state);
+ 
+ 	tmp = input->index;
+@@ -1345,7 +1344,7 @@ static int vidioc_g_input(struct file *file, void *priv, unsigned int *i)
+ {
+ 	struct au0828_dev *dev = video_drvdata(file);
+ 
+-	dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
++	dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
+ 		dev->std_set_in_tuner_core, dev->dev_state);
+ 
+ 	*i = dev->ctrl_input;
+@@ -1356,7 +1355,7 @@ static void au0828_s_input(struct au0828_dev *dev, int index)
+ {
+ 	int i;
+ 
+-	dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
++	dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
+ 		dev->std_set_in_tuner_core, dev->dev_state);
+ 
+ 	switch (AUVI_INPUT(index).type) {
+@@ -1441,7 +1440,7 @@ static int vidioc_g_audio(struct file *file, void *priv, struct v4l2_audio *a)
+ {
+ 	struct au0828_dev *dev = video_drvdata(file);
+ 
+-	dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
++	dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
+ 		dev->std_set_in_tuner_core, dev->dev_state);
+ 
+ 	a->index = dev->ctrl_ainput;
+@@ -1461,7 +1460,7 @@ static int vidioc_s_audio(struct file *file, void *priv, const struct v4l2_audio
+ 	if (a->index != dev->ctrl_ainput)
+ 		return -EINVAL;
+ 
+-	dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
++	dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
+ 		dev->std_set_in_tuner_core, dev->dev_state);
+ 	return 0;
+ }
+@@ -1473,7 +1472,7 @@ static int vidioc_g_tuner(struct file *file, void *priv, struct v4l2_tuner *t)
+ 	if (t->index != 0)
+ 		return -EINVAL;
+ 
+-	dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
++	dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
+ 		dev->std_set_in_tuner_core, dev->dev_state);
+ 
+ 	strcpy(t->name, "Auvitek tuner");
+@@ -1493,7 +1492,7 @@ static int vidioc_s_tuner(struct file *file, void *priv,
+ 	if (t->index != 0)
+ 		return -EINVAL;
+ 
+-	dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
++	dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
+ 		dev->std_set_in_tuner_core, dev->dev_state);
+ 
+ 	au0828_init_tuner(dev);
+@@ -1515,7 +1514,7 @@ static int vidioc_g_frequency(struct file *file, void *priv,
+ 
+ 	if (freq->tuner != 0)
+ 		return -EINVAL;
+-	dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
++	dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
+ 		dev->std_set_in_tuner_core, dev->dev_state);
+ 	freq->frequency = dev->ctrl_freq;
+ 	return 0;
+@@ -1530,7 +1529,7 @@ static int vidioc_s_frequency(struct file *file, void *priv,
+ 	if (freq->tuner != 0)
+ 		return -EINVAL;
+ 
+-	dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
++	dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
+ 		dev->std_set_in_tuner_core, dev->dev_state);
+ 
+ 	au0828_init_tuner(dev);
+@@ -1556,7 +1555,7 @@ static int vidioc_g_fmt_vbi_cap(struct file *file, void *priv,
+ {
+ 	struct au0828_dev *dev = video_drvdata(file);
+ 
+-	dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
++	dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
+ 		dev->std_set_in_tuner_core, dev->dev_state);
+ 
+ 	format->fmt.vbi.samples_per_line = dev->vbi_width;
+@@ -1582,7 +1581,7 @@ static int vidioc_cropcap(struct file *file, void *priv,
+ 	if (cc->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ 		return -EINVAL;
+ 
+-	dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
++	dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
+ 		dev->std_set_in_tuner_core, dev->dev_state);
+ 
+ 	cc->bounds.left = 0;
+@@ -1604,7 +1603,7 @@ static int vidioc_g_register(struct file *file, void *priv,
+ {
+ 	struct au0828_dev *dev = video_drvdata(file);
+ 
+-	dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
++	dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
+ 		dev->std_set_in_tuner_core, dev->dev_state);
+ 
+ 	reg->val = au0828_read(dev, reg->reg);
+@@ -1617,7 +1616,7 @@ static int vidioc_s_register(struct file *file, void *priv,
+ {
+ 	struct au0828_dev *dev = video_drvdata(file);
+ 
+-	dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
++	dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
+ 		dev->std_set_in_tuner_core, dev->dev_state);
+ 
+ 	return au0828_writereg(dev, reg->reg, reg->val);
+diff --git a/drivers/media/usb/au0828/au0828.h b/drivers/media/usb/au0828/au0828.h
+index 8276072bc55a..b28a05d76618 100644
+--- a/drivers/media/usb/au0828/au0828.h
++++ b/drivers/media/usb/au0828/au0828.h
+@@ -21,6 +21,7 @@
+ 
+ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+ 
++#include <linux/bitops.h>
+ #include <linux/usb.h>
+ #include <linux/i2c.h>
+ #include <linux/i2c-algo-bit.h>
+@@ -122,9 +123,9 @@ enum au0828_stream_state {
+ 
+ /* device state */
+ enum au0828_dev_state {
+-	DEV_INITIALIZED = 0x01,
+-	DEV_DISCONNECTED = 0x02,
+-	DEV_MISCONFIGURED = 0x04
++	DEV_INITIALIZED = 0,
++	DEV_DISCONNECTED = 1,
++	DEV_MISCONFIGURED = 2
+ };
+ 
+ struct au0828_dev;
+@@ -248,7 +249,7 @@ struct au0828_dev {
+ 	int input_type;
+ 	int std_set_in_tuner_core;
+ 	unsigned int ctrl_input;
+-	enum au0828_dev_state dev_state;
++	long unsigned int dev_state; /* defined at enum au0828_dev_state */;
+ 	enum au0828_stream_state stream_state;
+ 	wait_queue_head_t open;
+ 
+diff --git a/drivers/media/usb/pwc/pwc-if.c b/drivers/media/usb/pwc/pwc-if.c
+index 086cf1c7bd7d..18aed5dd325e 100644
+--- a/drivers/media/usb/pwc/pwc-if.c
++++ b/drivers/media/usb/pwc/pwc-if.c
+@@ -91,6 +91,7 @@ static const struct usb_device_id pwc_device_table [] = {
+ 	{ USB_DEVICE(0x0471, 0x0312) },
+ 	{ USB_DEVICE(0x0471, 0x0313) }, /* the 'new' 720K */
+ 	{ USB_DEVICE(0x0471, 0x0329) }, /* Philips SPC 900NC PC Camera */
++	{ USB_DEVICE(0x0471, 0x032C) }, /* Philips SPC 880NC PC Camera */
+ 	{ USB_DEVICE(0x069A, 0x0001) }, /* Askey */
+ 	{ USB_DEVICE(0x046D, 0x08B0) }, /* Logitech QuickCam Pro 3000 */
+ 	{ USB_DEVICE(0x046D, 0x08B1) }, /* Logitech QuickCam Notebook Pro */
+@@ -810,6 +811,11 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
+ 			name = "Philips SPC 900NC webcam";
+ 			type_id = 740;
+ 			break;
++		case 0x032C:
++			PWC_INFO("Philips SPC 880NC USB webcam detected.\n");
++			name = "Philips SPC 880NC webcam";
++			type_id = 740;
++			break;
+ 		default:
+ 			return -ENODEV;
+ 			break;
+diff --git a/drivers/media/usb/usbvision/usbvision-video.c b/drivers/media/usb/usbvision/usbvision-video.c
+index de9ff3bb8edd..6996ab8db108 100644
+--- a/drivers/media/usb/usbvision/usbvision-video.c
++++ b/drivers/media/usb/usbvision/usbvision-video.c
+@@ -1461,13 +1461,6 @@ static int usbvision_probe(struct usb_interface *intf,
+ 	printk(KERN_INFO "%s: %s found\n", __func__,
+ 				usbvision_device_data[model].model_string);
+ 
+-	/*
+-	 * this is a security check.
+-	 * an exploit using an incorrect bInterfaceNumber is known
+-	 */
+-	if (ifnum >= USB_MAXINTERFACES || !dev->actconfig->interface[ifnum])
+-		return -ENODEV;
+-
+ 	if (usbvision_device_data[model].interface >= 0)
+ 		interface = &dev->actconfig->interface[usbvision_device_data[model].interface]->altsetting[0];
+ 	else if (ifnum < dev->actconfig->desc.bNumInterfaces)
+diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
+index 8fd84a67478a..019644ff627d 100644
+--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
++++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
+@@ -415,7 +415,8 @@ static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
+ 		get_user(kp->index, &up->index) ||
+ 		get_user(kp->type, &up->type) ||
+ 		get_user(kp->flags, &up->flags) ||
+-		get_user(kp->memory, &up->memory))
++		get_user(kp->memory, &up->memory) ||
++		get_user(kp->length, &up->length))
+ 			return -EFAULT;
+ 
+ 	if (V4L2_TYPE_IS_OUTPUT(kp->type))
+@@ -427,9 +428,6 @@ static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
+ 			return -EFAULT;
+ 
+ 	if (V4L2_TYPE_IS_MULTIPLANAR(kp->type)) {
+-		if (get_user(kp->length, &up->length))
+-			return -EFAULT;
+-
+ 		num_planes = kp->length;
+ 		if (num_planes == 0) {
+ 			kp->m.planes = NULL;
+@@ -462,16 +460,14 @@ static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
+ 	} else {
+ 		switch (kp->memory) {
+ 		case V4L2_MEMORY_MMAP:
+-			if (get_user(kp->length, &up->length) ||
+-				get_user(kp->m.offset, &up->m.offset))
++			if (get_user(kp->m.offset, &up->m.offset))
+ 				return -EFAULT;
+ 			break;
+ 		case V4L2_MEMORY_USERPTR:
+ 			{
+ 			compat_long_t tmp;
+ 
+-			if (get_user(kp->length, &up->length) ||
+-			    get_user(tmp, &up->m.userptr))
++			if (get_user(tmp, &up->m.userptr))
+ 				return -EFAULT;
+ 
+ 			kp->m.userptr = (unsigned long)compat_ptr(tmp);
+@@ -513,7 +509,8 @@ static int put_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
+ 		copy_to_user(&up->timecode, &kp->timecode, sizeof(struct v4l2_timecode)) ||
+ 		put_user(kp->sequence, &up->sequence) ||
+ 		put_user(kp->reserved2, &up->reserved2) ||
+-		put_user(kp->reserved, &up->reserved))
++		put_user(kp->reserved, &up->reserved) ||
++		put_user(kp->length, &up->length))
+ 			return -EFAULT;
+ 
+ 	if (V4L2_TYPE_IS_MULTIPLANAR(kp->type)) {
+@@ -536,13 +533,11 @@ static int put_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
+ 	} else {
+ 		switch (kp->memory) {
+ 		case V4L2_MEMORY_MMAP:
+-			if (put_user(kp->length, &up->length) ||
+-				put_user(kp->m.offset, &up->m.offset))
++			if (put_user(kp->m.offset, &up->m.offset))
+ 				return -EFAULT;
+ 			break;
+ 		case V4L2_MEMORY_USERPTR:
+-			if (put_user(kp->length, &up->length) ||
+-				put_user(kp->m.userptr, &up->m.userptr))
++			if (put_user(kp->m.userptr, &up->m.userptr))
+ 				return -EFAULT;
+ 			break;
+ 		case V4L2_MEMORY_OVERLAY:
+diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
+index ff8953ae52d1..d7d7c52a3060 100644
+--- a/drivers/media/v4l2-core/videobuf2-core.c
++++ b/drivers/media/v4l2-core/videobuf2-core.c
+@@ -1643,7 +1643,7 @@ static int __vb2_wait_for_done_vb(struct vb2_queue *q, int nonblocking)
+  * Will sleep if required for nonblocking == false.
+  */
+ static int __vb2_get_done_vb(struct vb2_queue *q, struct vb2_buffer **vb,
+-				int nonblocking)
++			     void *pb, int nonblocking)
+ {
+ 	unsigned long flags;
+ 	int ret;
+@@ -1664,10 +1664,10 @@ static int __vb2_get_done_vb(struct vb2_queue *q, struct vb2_buffer **vb,
+ 	/*
+ 	 * Only remove the buffer from done_list if v4l2_buffer can handle all
+ 	 * the planes.
+-	 * Verifying planes is NOT necessary since it already has been checked
+-	 * before the buffer is queued/prepared. So it can never fail.
+ 	 */
+-	list_del(&(*vb)->done_entry);
++	ret = call_bufop(q, verify_planes_array, *vb, pb);
++	if (!ret)
++		list_del(&(*vb)->done_entry);
+ 	spin_unlock_irqrestore(&q->done_lock, flags);
+ 
+ 	return ret;
+@@ -1746,7 +1746,7 @@ int vb2_core_dqbuf(struct vb2_queue *q, unsigned int *pindex, void *pb,
+ 	struct vb2_buffer *vb = NULL;
+ 	int ret;
+ 
+-	ret = __vb2_get_done_vb(q, &vb, nonblocking);
++	ret = __vb2_get_done_vb(q, &vb, pb, nonblocking);
+ 	if (ret < 0)
+ 		return ret;
+ 
+@@ -2293,6 +2293,16 @@ unsigned int vb2_core_poll(struct vb2_queue *q, struct file *file,
+ 		return POLLERR;
+ 
+ 	/*
++	 * If this quirk is set and QBUF hasn't been called yet then
++	 * return POLLERR as well. This only affects capture queues, output
++	 * queues will always initialize waiting_for_buffers to false.
++	 * This quirk is set by V4L2 for backwards compatibility reasons.
++	 */
++	if (q->quirk_poll_must_check_waiting_for_buffers &&
++	    q->waiting_for_buffers && (req_events & (POLLIN | POLLRDNORM)))
++		return POLLERR;
++
++	/*
+ 	 * For output streams you can call write() as long as there are fewer
+ 	 * buffers queued than there are buffers available.
+ 	 */
+diff --git a/drivers/media/v4l2-core/videobuf2-memops.c b/drivers/media/v4l2-core/videobuf2-memops.c
+index dbec5923fcf0..3c3b517f1d1c 100644
+--- a/drivers/media/v4l2-core/videobuf2-memops.c
++++ b/drivers/media/v4l2-core/videobuf2-memops.c
+@@ -49,7 +49,7 @@ struct frame_vector *vb2_create_framevec(unsigned long start,
+ 	vec = frame_vector_create(nr);
+ 	if (!vec)
+ 		return ERR_PTR(-ENOMEM);
+-	ret = get_vaddr_frames(start, nr, write, 1, vec);
++	ret = get_vaddr_frames(start & PAGE_MASK, nr, write, true, vec);
+ 	if (ret < 0)
+ 		goto out_destroy;
+ 	/* We accept only complete set of PFNs */
+diff --git a/drivers/media/v4l2-core/videobuf2-v4l2.c b/drivers/media/v4l2-core/videobuf2-v4l2.c
+index 91f552124050..0b1b8c7b6ce5 100644
+--- a/drivers/media/v4l2-core/videobuf2-v4l2.c
++++ b/drivers/media/v4l2-core/videobuf2-v4l2.c
+@@ -765,6 +765,12 @@ int vb2_queue_init(struct vb2_queue *q)
+ 	q->is_output = V4L2_TYPE_IS_OUTPUT(q->type);
+ 	q->copy_timestamp = (q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK)
+ 			== V4L2_BUF_FLAG_TIMESTAMP_COPY;
++	/*
++	 * For compatibility with vb1: if QBUF hasn't been called yet, then
++	 * return POLLERR as well. This only affects capture queues, output
++	 * queues will always initialize waiting_for_buffers to false.
++	 */
++	q->quirk_poll_must_check_waiting_for_buffers = true;
+ 
+ 	return vb2_core_queue_init(q);
+ }
+@@ -818,14 +824,6 @@ unsigned int vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait)
+ 			poll_wait(file, &fh->wait, wait);
+ 	}
+ 
+-	/*
+-	 * For compatibility with vb1: if QBUF hasn't been called yet, then
+-	 * return POLLERR as well. This only affects capture queues, output
+-	 * queues will always initialize waiting_for_buffers to false.
+-	 */
+-	if (q->waiting_for_buffers && (req_events & (POLLIN | POLLRDNORM)))
+-		return POLLERR;
+-
+ 	return res | vb2_core_poll(q, file, wait);
+ }
+ EXPORT_SYMBOL_GPL(vb2_poll);
+diff --git a/drivers/mfd/intel-lpss.c b/drivers/mfd/intel-lpss.c
+index 1743788f1595..1bbbe877ba7e 100644
+--- a/drivers/mfd/intel-lpss.c
++++ b/drivers/mfd/intel-lpss.c
+@@ -453,6 +453,7 @@ int intel_lpss_probe(struct device *dev,
+ err_remove_ltr:
+ 	intel_lpss_debugfs_remove(lpss);
+ 	intel_lpss_ltr_hide(lpss);
++	intel_lpss_unregister_clock(lpss);
+ 
+ err_clk_register:
+ 	ida_simple_remove(&intel_lpss_devid_ida, lpss->devid);
+diff --git a/drivers/mfd/omap-usb-tll.c b/drivers/mfd/omap-usb-tll.c
+index b7b3e8ee64f2..c30290f33430 100644
+--- a/drivers/mfd/omap-usb-tll.c
++++ b/drivers/mfd/omap-usb-tll.c
+@@ -269,6 +269,8 @@ static int usbtll_omap_probe(struct platform_device *pdev)
+ 
+ 		if (IS_ERR(tll->ch_clk[i]))
+ 			dev_dbg(dev, "can't get clock : %s\n", clkname);
++		else
++			clk_prepare(tll->ch_clk[i]);
+ 	}
+ 
+ 	pm_runtime_put_sync(dev);
+@@ -301,9 +303,12 @@ static int usbtll_omap_remove(struct platform_device *pdev)
+ 	tll_dev = NULL;
+ 	spin_unlock(&tll_lock);
+ 
+-	for (i = 0; i < tll->nch; i++)
+-		if (!IS_ERR(tll->ch_clk[i]))
++	for (i = 0; i < tll->nch; i++) {
++		if (!IS_ERR(tll->ch_clk[i])) {
++			clk_unprepare(tll->ch_clk[i]);
+ 			clk_put(tll->ch_clk[i]);
++		}
++	}
+ 
+ 	pm_runtime_disable(&pdev->dev);
+ 	return 0;
+@@ -420,7 +425,7 @@ int omap_tll_enable(struct usbhs_omap_platform_data *pdata)
+ 			if (IS_ERR(tll->ch_clk[i]))
+ 				continue;
+ 
+-			r = clk_prepare_enable(tll->ch_clk[i]);
++			r = clk_enable(tll->ch_clk[i]);
+ 			if (r) {
+ 				dev_err(tll_dev,
+ 				 "Error enabling ch %d clock: %d\n", i, r);
+@@ -448,7 +453,7 @@ int omap_tll_disable(struct usbhs_omap_platform_data *pdata)
+ 	for (i = 0; i < tll->nch; i++) {
+ 		if (omap_usb_mode_needs_tll(pdata->port_mode[i])) {
+ 			if (!IS_ERR(tll->ch_clk[i]))
+-				clk_disable_unprepare(tll->ch_clk[i]);
++				clk_disable(tll->ch_clk[i]);
+ 		}
+ 	}
+ 
+diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
+index 054fc10cb3b6..b22c03264270 100644
+--- a/drivers/misc/Kconfig
++++ b/drivers/misc/Kconfig
+@@ -440,7 +440,7 @@ config ARM_CHARLCD
+ 	  still useful.
+ 
+ config BMP085
+-	bool
++	tristate
+ 	depends on SYSFS
+ 
+ config BMP085_I2C
+diff --git a/drivers/misc/ad525x_dpot.c b/drivers/misc/ad525x_dpot.c
+index 15e88078ba1e..f1a0b99f5a9a 100644
+--- a/drivers/misc/ad525x_dpot.c
++++ b/drivers/misc/ad525x_dpot.c
+@@ -216,7 +216,7 @@ static s32 dpot_read_i2c(struct dpot_data *dpot, u8 reg)
+ 			 */
+ 			value = swab16(value);
+ 
+-			if (dpot->uid == DPOT_UID(AD5271_ID))
++			if (dpot->uid == DPOT_UID(AD5274_ID))
+ 				value = value >> 2;
+ 		return value;
+ 	default:
+diff --git a/drivers/misc/cxl/irq.c b/drivers/misc/cxl/irq.c
+index 09a406058c46..efbb6945eb18 100644
+--- a/drivers/misc/cxl/irq.c
++++ b/drivers/misc/cxl/irq.c
+@@ -288,7 +288,6 @@ unsigned int cxl_map_irq(struct cxl *adapter, irq_hw_number_t hwirq,
+ void cxl_unmap_irq(unsigned int virq, void *cookie)
+ {
+ 	free_irq(virq, cookie);
+-	irq_dispose_mapping(virq);
+ }
+ 
+ static int cxl_register_one_irq(struct cxl *adapter,
+diff --git a/drivers/misc/mei/amthif.c b/drivers/misc/mei/amthif.c
+index cd0403f09267..e79c0371ee6f 100644
+--- a/drivers/misc/mei/amthif.c
++++ b/drivers/misc/mei/amthif.c
+@@ -417,8 +417,10 @@ int mei_amthif_irq_read_msg(struct mei_cl *cl,
+ 
+ 	dev = cl->dev;
+ 
+-	if (dev->iamthif_state != MEI_IAMTHIF_READING)
++	if (dev->iamthif_state != MEI_IAMTHIF_READING) {
++		mei_irq_discard_msg(dev, mei_hdr);
+ 		return 0;
++	}
+ 
+ 	ret = mei_cl_irq_read_msg(cl, mei_hdr, cmpl_list);
+ 	if (ret)
+diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
+index 0b05aa938799..a77643954523 100644
+--- a/drivers/misc/mei/bus.c
++++ b/drivers/misc/mei/bus.c
+@@ -53,6 +53,11 @@ ssize_t __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length,
+ 	bus = cl->dev;
+ 
+ 	mutex_lock(&bus->device_lock);
++	if (bus->dev_state != MEI_DEV_ENABLED) {
++		rets = -ENODEV;
++		goto out;
++	}
++
+ 	if (!mei_cl_is_connected(cl)) {
+ 		rets = -ENODEV;
+ 		goto out;
+@@ -109,6 +114,10 @@ ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length)
+ 	bus = cl->dev;
+ 
+ 	mutex_lock(&bus->device_lock);
++	if (bus->dev_state != MEI_DEV_ENABLED) {
++		rets = -ENODEV;
++		goto out;
++	}
+ 
+ 	cb = mei_cl_read_cb(cl, NULL);
+ 	if (cb)
+@@ -213,17 +222,23 @@ EXPORT_SYMBOL_GPL(mei_cldev_recv);
+ static void mei_cl_bus_event_work(struct work_struct *work)
+ {
+ 	struct mei_cl_device *cldev;
++	struct mei_device *bus;
+ 
+ 	cldev = container_of(work, struct mei_cl_device, event_work);
+ 
++	bus = cldev->bus;
++
+ 	if (cldev->event_cb)
+ 		cldev->event_cb(cldev, cldev->events, cldev->event_context);
+ 
+ 	cldev->events = 0;
+ 
+ 	/* Prepare for the next read */
+-	if (cldev->events_mask & BIT(MEI_CL_EVENT_RX))
++	if (cldev->events_mask & BIT(MEI_CL_EVENT_RX)) {
++		mutex_lock(&bus->device_lock);
+ 		mei_cl_read_start(cldev->cl, 0, NULL);
++		mutex_unlock(&bus->device_lock);
++	}
+ }
+ 
+ /**
+@@ -287,6 +302,7 @@ int mei_cldev_register_event_cb(struct mei_cl_device *cldev,
+ 				unsigned long events_mask,
+ 				mei_cldev_event_cb_t event_cb, void *context)
+ {
++	struct mei_device *bus = cldev->bus;
+ 	int ret;
+ 
+ 	if (cldev->event_cb)
+@@ -299,15 +315,17 @@ int mei_cldev_register_event_cb(struct mei_cl_device *cldev,
+ 	INIT_WORK(&cldev->event_work, mei_cl_bus_event_work);
+ 
+ 	if (cldev->events_mask & BIT(MEI_CL_EVENT_RX)) {
++		mutex_lock(&bus->device_lock);
+ 		ret = mei_cl_read_start(cldev->cl, 0, NULL);
++		mutex_unlock(&bus->device_lock);
+ 		if (ret && ret != -EBUSY)
+ 			return ret;
+ 	}
+ 
+ 	if (cldev->events_mask & BIT(MEI_CL_EVENT_NOTIF)) {
+-		mutex_lock(&cldev->cl->dev->device_lock);
++		mutex_lock(&bus->device_lock);
+ 		ret = mei_cl_notify_request(cldev->cl, NULL, event_cb ? 1 : 0);
+-		mutex_unlock(&cldev->cl->dev->device_lock);
++		mutex_unlock(&bus->device_lock);
+ 		if (ret)
+ 			return ret;
+ 	}
+diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
+index a6c87c713193..958af84884b5 100644
+--- a/drivers/misc/mei/client.c
++++ b/drivers/misc/mei/client.c
+@@ -1735,6 +1735,10 @@ void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb)
+ 			wake_up(&cl->wait);
+ 
+ 		break;
++	case MEI_FOP_DISCONNECT_RSP:
++		mei_io_cb_free(cb);
++		mei_cl_set_disconnected(cl);
++		break;
+ 	default:
+ 		BUG_ON(0);
+ 	}
+diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c
+index e7b7aad0999b..fd8a9f057ea6 100644
+--- a/drivers/misc/mei/hbm.c
++++ b/drivers/misc/mei/hbm.c
+@@ -873,8 +873,7 @@ static int mei_hbm_fw_disconnect_req(struct mei_device *dev,
+ 		cb = mei_io_cb_init(cl, MEI_FOP_DISCONNECT_RSP, NULL);
+ 		if (!cb)
+ 			return -ENOMEM;
+-		cl_dbg(dev, cl, "add disconnect response as first\n");
+-		list_add(&cb->list, &dev->ctrl_wr_list.list);
++		list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
+ 	}
+ 	return 0;
+ }
+diff --git a/drivers/misc/mei/interrupt.c b/drivers/misc/mei/interrupt.c
+index 64b568a0268d..d1df797c7568 100644
+--- a/drivers/misc/mei/interrupt.c
++++ b/drivers/misc/mei/interrupt.c
+@@ -76,7 +76,6 @@ static inline int mei_cl_hbm_equal(struct mei_cl *cl,
+  * @dev: mei device
+  * @hdr: message header
+  */
+-static inline
+ void mei_irq_discard_msg(struct mei_device *dev, struct mei_msg_hdr *hdr)
+ {
+ 	/*
+@@ -184,10 +183,7 @@ static int mei_cl_irq_disconnect_rsp(struct mei_cl *cl, struct mei_cl_cb *cb,
+ 		return -EMSGSIZE;
+ 
+ 	ret = mei_hbm_cl_disconnect_rsp(dev, cl);
+-	mei_cl_set_disconnected(cl);
+-	mei_io_cb_free(cb);
+-	mei_me_cl_put(cl->me_cl);
+-	cl->me_cl = NULL;
++	list_move_tail(&cb->list, &cmpl_list->list);
+ 
+ 	return ret;
+ }
+diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h
+index 4250555d5e72..1b06e2fd6858 100644
+--- a/drivers/misc/mei/mei_dev.h
++++ b/drivers/misc/mei/mei_dev.h
+@@ -782,6 +782,8 @@ bool mei_hbuf_acquire(struct mei_device *dev);
+ 
+ bool mei_write_is_idle(struct mei_device *dev);
+ 
++void mei_irq_discard_msg(struct mei_device *dev, struct mei_msg_hdr *hdr);
++
+ #if IS_ENABLED(CONFIG_DEBUG_FS)
+ int mei_dbgfs_register(struct mei_device *dev, const char *name);
+ void mei_dbgfs_deregister(struct mei_device *dev);
+diff --git a/drivers/misc/mic/scif/scif_rma.c b/drivers/misc/mic/scif/scif_rma.c
+index 8310b4dbff06..6a451bd65bf3 100644
+--- a/drivers/misc/mic/scif/scif_rma.c
++++ b/drivers/misc/mic/scif/scif_rma.c
+@@ -1511,7 +1511,7 @@ off_t scif_register_pinned_pages(scif_epd_t epd,
+ 	if ((map_flags & SCIF_MAP_FIXED) &&
+ 	    ((ALIGN(offset, PAGE_SIZE) != offset) ||
+ 	    (offset < 0) ||
+-	    (offset + (off_t)len < offset)))
++	    (len > LONG_MAX - offset)))
+ 		return -EINVAL;
+ 
+ 	might_sleep();
+@@ -1614,7 +1614,7 @@ off_t scif_register(scif_epd_t epd, void *addr, size_t len, off_t offset,
+ 	if ((map_flags & SCIF_MAP_FIXED) &&
+ 	    ((ALIGN(offset, PAGE_SIZE) != offset) ||
+ 	    (offset < 0) ||
+-	    (offset + (off_t)len < offset)))
++	    (len > LONG_MAX - offset)))
+ 		return -EINVAL;
+ 
+ 	/* Unsupported protection requested */
+@@ -1732,7 +1732,8 @@ scif_unregister(scif_epd_t epd, off_t offset, size_t len)
+ 
+ 	/* Offset is not page aligned or offset+len wraps around */
+ 	if ((ALIGN(offset, PAGE_SIZE) != offset) ||
+-	    (offset + (off_t)len < offset))
++	    (offset < 0) ||
++	    (len > LONG_MAX - offset))
+ 		return -EINVAL;
+ 
+ 	err = scif_verify_epd(ep);
+diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
+index fe207e542032..7630b6b7eb75 100644
+--- a/drivers/mmc/card/block.c
++++ b/drivers/mmc/card/block.c
+@@ -86,7 +86,6 @@ static int max_devices;
+ 
+ /* TODO: Replace these with struct ida */
+ static DECLARE_BITMAP(dev_use, MAX_DEVICES);
+-static DECLARE_BITMAP(name_use, MAX_DEVICES);
+ 
+ /*
+  * There is one mmc_blk_data per slot.
+@@ -105,7 +104,6 @@ struct mmc_blk_data {
+ 	unsigned int	usage;
+ 	unsigned int	read_only;
+ 	unsigned int	part_type;
+-	unsigned int	name_idx;
+ 	unsigned int	reset_done;
+ #define MMC_BLK_READ		BIT(0)
+ #define MMC_BLK_WRITE		BIT(1)
+@@ -589,6 +587,14 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
+ 	struct mmc_card *card;
+ 	int err = 0, ioc_err = 0;
+ 
++	/*
++	 * The caller must have CAP_SYS_RAWIO, and must be calling this on the
++	 * whole block device, not on a partition.  This prevents overspray
++	 * between sibling partitions.
++	 */
++	if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains))
++		return -EPERM;
++
+ 	idata = mmc_blk_ioctl_copy_from_user(ic_ptr);
+ 	if (IS_ERR(idata))
+ 		return PTR_ERR(idata);
+@@ -631,6 +637,14 @@ static int mmc_blk_ioctl_multi_cmd(struct block_device *bdev,
+ 	int i, err = 0, ioc_err = 0;
+ 	__u64 num_of_cmds;
+ 
++	/*
++	 * The caller must have CAP_SYS_RAWIO, and must be calling this on the
++	 * whole block device, not on a partition.  This prevents overspray
++	 * between sibling partitions.
++	 */
++	if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains))
++		return -EPERM;
++
+ 	if (copy_from_user(&num_of_cmds, &user->num_of_cmds,
+ 			   sizeof(num_of_cmds)))
+ 		return -EFAULT;
+@@ -688,14 +702,6 @@ cmd_err:
+ static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode,
+ 	unsigned int cmd, unsigned long arg)
+ {
+-	/*
+-	 * The caller must have CAP_SYS_RAWIO, and must be calling this on the
+-	 * whole block device, not on a partition.  This prevents overspray
+-	 * between sibling partitions.
+-	 */
+-	if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains))
+-		return -EPERM;
+-
+ 	switch (cmd) {
+ 	case MMC_IOC_CMD:
+ 		return mmc_blk_ioctl_cmd(bdev,
+@@ -2194,19 +2200,6 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
+ 		goto out;
+ 	}
+ 
+-	/*
+-	 * !subname implies we are creating main mmc_blk_data that will be
+-	 * associated with mmc_card with dev_set_drvdata. Due to device
+-	 * partitions, devidx will not coincide with a per-physical card
+-	 * index anymore so we keep track of a name index.
+-	 */
+-	if (!subname) {
+-		md->name_idx = find_first_zero_bit(name_use, max_devices);
+-		__set_bit(md->name_idx, name_use);
+-	} else
+-		md->name_idx = ((struct mmc_blk_data *)
+-				dev_to_disk(parent)->private_data)->name_idx;
+-
+ 	md->area_type = area_type;
+ 
+ 	/*
+@@ -2256,7 +2249,7 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
+ 	 */
+ 
+ 	snprintf(md->disk->disk_name, sizeof(md->disk->disk_name),
+-		 "mmcblk%u%s", md->name_idx, subname ? subname : "");
++		 "mmcblk%u%s", card->host->index, subname ? subname : "");
+ 
+ 	if (mmc_card_mmc(card))
+ 		blk_queue_logical_block_size(md->queue.queue,
+@@ -2410,7 +2403,6 @@ static void mmc_blk_remove_parts(struct mmc_card *card,
+ 	struct list_head *pos, *q;
+ 	struct mmc_blk_data *part_md;
+ 
+-	__clear_bit(md->name_idx, name_use);
+ 	list_for_each_safe(pos, q, &md->part) {
+ 		part_md = list_entry(pos, struct mmc_blk_data, part);
+ 		list_del(pos);
+@@ -2502,11 +2494,12 @@ static const struct mmc_fixup blk_fixups[] =
+ 		  MMC_QUIRK_BLK_NO_CMD23),
+ 
+ 	/*
+-	 * Some Micron MMC cards needs longer data read timeout than
+-	 * indicated in CSD.
++	 * Some MMC cards need longer data read timeout than indicated in CSD.
+ 	 */
+ 	MMC_FIXUP(CID_NAME_ANY, CID_MANFID_MICRON, 0x200, add_quirk_mmc,
+ 		  MMC_QUIRK_LONG_READ_TIME),
++	MMC_FIXUP("008GE0", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
++		  MMC_QUIRK_LONG_READ_TIME),
+ 
+ 	/*
+ 	 * On these Samsung MoviNAND parts, performing secure erase or
+diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
+index f95d41ffc766..228a81bcea49 100644
+--- a/drivers/mmc/core/core.c
++++ b/drivers/mmc/core/core.c
+@@ -868,11 +868,11 @@ void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card)
+ 	/*
+ 	 * Some cards require longer data read timeout than indicated in CSD.
+ 	 * Address this by setting the read timeout to a "reasonably high"
+-	 * value. For the cards tested, 300ms has proven enough. If necessary,
++	 * value. For the cards tested, 600ms has proven enough. If necessary,
+ 	 * this value can be increased if other problematic cards require this.
+ 	 */
+ 	if (mmc_card_long_read_time(card) && data->flags & MMC_DATA_READ) {
+-		data->timeout_ns = 300000000;
++		data->timeout_ns = 600000000;
+ 		data->timeout_clks = 0;
+ 	}
+ 
+diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
+index bf49e44571f2..07a419fedd43 100644
+--- a/drivers/mmc/core/mmc.c
++++ b/drivers/mmc/core/mmc.c
+@@ -333,6 +333,9 @@ static void mmc_manage_gp_partitions(struct mmc_card *card, u8 *ext_csd)
+ 	}
+ }
+ 
++/* Minimum partition switch timeout in milliseconds */
++#define MMC_MIN_PART_SWITCH_TIME	300
++
+ /*
+  * Decode extended CSD.
+  */
+@@ -397,6 +400,10 @@ static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd)
+ 
+ 		/* EXT_CSD value is in units of 10ms, but we store in ms */
+ 		card->ext_csd.part_time = 10 * ext_csd[EXT_CSD_PART_SWITCH_TIME];
++		/* Some eMMC set the value too low so set a minimum */
++		if (card->ext_csd.part_time &&
++		    card->ext_csd.part_time < MMC_MIN_PART_SWITCH_TIME)
++			card->ext_csd.part_time = MMC_MIN_PART_SWITCH_TIME;
+ 
+ 		/* Sleep / awake timeout in 100ns units */
+ 		if (sa_shift > 0 && sa_shift <= 0x17)
+diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
+index 1526b8a10b09..3b944fc70eec 100644
+--- a/drivers/mmc/host/Kconfig
++++ b/drivers/mmc/host/Kconfig
+@@ -97,6 +97,7 @@ config MMC_RICOH_MMC
+ config MMC_SDHCI_ACPI
+ 	tristate "SDHCI support for ACPI enumerated SDHCI controllers"
+ 	depends on MMC_SDHCI && ACPI
++	select IOSF_MBI if X86
+ 	help
+ 	  This selects support for ACPI enumerated SDHCI controllers,
+ 	  identified by ACPI Compatibility ID PNP0D40 or specific
+diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
+index 851ccd9ac868..25c179592125 100644
+--- a/drivers/mmc/host/atmel-mci.c
++++ b/drivers/mmc/host/atmel-mci.c
+@@ -2443,7 +2443,7 @@ static int atmci_configure_dma(struct atmel_mci *host)
+ 		struct mci_platform_data *pdata = host->pdev->dev.platform_data;
+ 		dma_cap_mask_t mask;
+ 
+-		if (!pdata->dma_filter)
++		if (!pdata || !pdata->dma_filter)
+ 			return -ENODEV;
+ 
+ 		dma_cap_zero(mask);
+diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
+index 3446097a43c0..e77d79c8cd9f 100644
+--- a/drivers/mmc/host/mmc_spi.c
++++ b/drivers/mmc/host/mmc_spi.c
+@@ -1442,6 +1442,12 @@ static int mmc_spi_probe(struct spi_device *spi)
+ 					     host->pdata->cd_debounce);
+ 		if (status != 0)
+ 			goto fail_add_host;
++
++		/* The platform has a CD GPIO signal that may support
++		 * interrupts, so let mmc_gpiod_request_cd_irq() decide
++		 * if polling is needed or not.
++		 */
++		mmc->caps &= ~MMC_CAP_NEEDS_POLL;
+ 		mmc_gpiod_request_cd_irq(mmc);
+ 	}
+ 
+diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
+index a5cda926d38e..e517be7f03bf 100644
+--- a/drivers/mmc/host/sdhci-acpi.c
++++ b/drivers/mmc/host/sdhci-acpi.c
+@@ -41,6 +41,11 @@
+ #include <linux/mmc/pm.h>
+ #include <linux/mmc/slot-gpio.h>
+ 
++#ifdef CONFIG_X86
++#include <asm/cpu_device_id.h>
++#include <asm/iosf_mbi.h>
++#endif
++
+ #include "sdhci.h"
+ 
+ enum {
+@@ -146,6 +151,75 @@ static const struct sdhci_acpi_chip sdhci_acpi_chip_int = {
+ 	.ops = &sdhci_acpi_ops_int,
+ };
+ 
++#ifdef CONFIG_X86
++
++static bool sdhci_acpi_byt(void)
++{
++	static const struct x86_cpu_id byt[] = {
++		{ X86_VENDOR_INTEL, 6, 0x37 },
++		{}
++	};
++
++	return x86_match_cpu(byt);
++}
++
++#define BYT_IOSF_SCCEP			0x63
++#define BYT_IOSF_OCP_NETCTRL0		0x1078
++#define BYT_IOSF_OCP_TIMEOUT_BASE	GENMASK(10, 8)
++
++static void sdhci_acpi_byt_setting(struct device *dev)
++{
++	u32 val = 0;
++
++	if (!sdhci_acpi_byt())
++		return;
++
++	if (iosf_mbi_read(BYT_IOSF_SCCEP, MBI_CR_READ, BYT_IOSF_OCP_NETCTRL0,
++			  &val)) {
++		dev_err(dev, "%s read error\n", __func__);
++		return;
++	}
++
++	if (!(val & BYT_IOSF_OCP_TIMEOUT_BASE))
++		return;
++
++	val &= ~BYT_IOSF_OCP_TIMEOUT_BASE;
++
++	if (iosf_mbi_write(BYT_IOSF_SCCEP, MBI_CR_WRITE, BYT_IOSF_OCP_NETCTRL0,
++			   val)) {
++		dev_err(dev, "%s write error\n", __func__);
++		return;
++	}
++
++	dev_dbg(dev, "%s completed\n", __func__);
++}
++
++static bool sdhci_acpi_byt_defer(struct device *dev)
++{
++	if (!sdhci_acpi_byt())
++		return false;
++
++	if (!iosf_mbi_available())
++		return true;
++
++	sdhci_acpi_byt_setting(dev);
++
++	return false;
++}
++
++#else
++
++static inline void sdhci_acpi_byt_setting(struct device *dev)
++{
++}
++
++static inline bool sdhci_acpi_byt_defer(struct device *dev)
++{
++	return false;
++}
++
++#endif
++
+ static int bxt_get_cd(struct mmc_host *mmc)
+ {
+ 	int gpio_cd = mmc_gpio_get_cd(mmc);
+@@ -233,7 +307,7 @@ static const struct sdhci_acpi_slot sdhci_acpi_slot_int_emmc = {
+ 	.chip    = &sdhci_acpi_chip_int,
+ 	.caps    = MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE |
+ 		   MMC_CAP_HW_RESET | MMC_CAP_1_8V_DDR |
+-		   MMC_CAP_BUS_WIDTH_TEST | MMC_CAP_WAIT_WHILE_BUSY,
++		   MMC_CAP_WAIT_WHILE_BUSY,
+ 	.caps2   = MMC_CAP2_HC_ERASE_SZ,
+ 	.flags   = SDHCI_ACPI_RUNTIME_PM,
+ 	.quirks  = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
+@@ -248,7 +322,7 @@ static const struct sdhci_acpi_slot sdhci_acpi_slot_int_sdio = {
+ 		   SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
+ 	.quirks2 = SDHCI_QUIRK2_HOST_OFF_CARD_ON,
+ 	.caps    = MMC_CAP_NONREMOVABLE | MMC_CAP_POWER_OFF_CARD |
+-		   MMC_CAP_BUS_WIDTH_TEST | MMC_CAP_WAIT_WHILE_BUSY,
++		   MMC_CAP_WAIT_WHILE_BUSY,
+ 	.flags   = SDHCI_ACPI_RUNTIME_PM,
+ 	.pm_caps = MMC_PM_KEEP_POWER,
+ 	.probe_slot	= sdhci_acpi_sdio_probe_slot,
+@@ -260,7 +334,7 @@ static const struct sdhci_acpi_slot sdhci_acpi_slot_int_sd = {
+ 	.quirks  = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
+ 	.quirks2 = SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON |
+ 		   SDHCI_QUIRK2_STOP_WITH_TC,
+-	.caps    = MMC_CAP_BUS_WIDTH_TEST | MMC_CAP_WAIT_WHILE_BUSY,
++	.caps    = MMC_CAP_WAIT_WHILE_BUSY,
+ 	.probe_slot	= sdhci_acpi_sd_probe_slot,
+ };
+ 
+@@ -322,7 +396,7 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
+ {
+ 	struct device *dev = &pdev->dev;
+ 	acpi_handle handle = ACPI_HANDLE(dev);
+-	struct acpi_device *device;
++	struct acpi_device *device, *child;
+ 	struct sdhci_acpi_host *c;
+ 	struct sdhci_host *host;
+ 	struct resource *iomem;
+@@ -334,9 +408,17 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
+ 	if (acpi_bus_get_device(handle, &device))
+ 		return -ENODEV;
+ 
++	/* Power on the SDHCI controller and its children */
++	acpi_device_fix_up_power(device);
++	list_for_each_entry(child, &device->children, node)
++		acpi_device_fix_up_power(child);
++
+ 	if (acpi_bus_get_status(device) || !device->status.present)
+ 		return -ENODEV;
+ 
++	if (sdhci_acpi_byt_defer(dev))
++		return -EPROBE_DEFER;
++
+ 	hid = acpi_device_hid(device);
+ 	uid = device->pnp.unique_id;
+ 
+@@ -460,6 +542,8 @@ static int sdhci_acpi_resume(struct device *dev)
+ {
+ 	struct sdhci_acpi_host *c = dev_get_drvdata(dev);
+ 
++	sdhci_acpi_byt_setting(&c->pdev->dev);
++
+ 	return sdhci_resume_host(c->host);
+ }
+ 
+@@ -483,6 +567,8 @@ static int sdhci_acpi_runtime_resume(struct device *dev)
+ {
+ 	struct sdhci_acpi_host *c = dev_get_drvdata(dev);
+ 
++	sdhci_acpi_byt_setting(&c->pdev->dev);
++
+ 	return sdhci_runtime_resume_host(c->host);
+ }
+ 
+diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
+index df3b8eced8c4..807f930a7c14 100644
+--- a/drivers/mmc/host/sdhci-pci-core.c
++++ b/drivers/mmc/host/sdhci-pci-core.c
+@@ -361,7 +361,6 @@ static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot)
+ {
+ 	slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE |
+ 				 MMC_CAP_HW_RESET | MMC_CAP_1_8V_DDR |
+-				 MMC_CAP_BUS_WIDTH_TEST |
+ 				 MMC_CAP_WAIT_WHILE_BUSY;
+ 	slot->host->mmc->caps2 |= MMC_CAP2_HC_ERASE_SZ;
+ 	slot->hw_reset = sdhci_pci_int_hw_reset;
+@@ -377,19 +376,18 @@ static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot)
+ static int byt_sdio_probe_slot(struct sdhci_pci_slot *slot)
+ {
+ 	slot->host->mmc->caps |= MMC_CAP_POWER_OFF_CARD | MMC_CAP_NONREMOVABLE |
+-				 MMC_CAP_BUS_WIDTH_TEST |
+ 				 MMC_CAP_WAIT_WHILE_BUSY;
+ 	return 0;
+ }
+ 
+ static int byt_sd_probe_slot(struct sdhci_pci_slot *slot)
+ {
+-	slot->host->mmc->caps |= MMC_CAP_BUS_WIDTH_TEST |
+-				 MMC_CAP_WAIT_WHILE_BUSY;
++	slot->host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
+ 	slot->cd_con_id = NULL;
+ 	slot->cd_idx = 0;
+ 	slot->cd_override_level = true;
+ 	if (slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BXT_SD ||
++	    slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BXTM_SD ||
+ 	    slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_APL_SD)
+ 		slot->host->mmc_host_ops.get_cd = bxt_get_cd;
+ 
+@@ -1173,6 +1171,30 @@ static const struct pci_device_id pci_ids[] = {
+ 
+ 	{
+ 		.vendor		= PCI_VENDOR_ID_INTEL,
++		.device		= PCI_DEVICE_ID_INTEL_BXTM_EMMC,
++		.subvendor	= PCI_ANY_ID,
++		.subdevice	= PCI_ANY_ID,
++		.driver_data	= (kernel_ulong_t)&sdhci_intel_byt_emmc,
++	},
++
++	{
++		.vendor		= PCI_VENDOR_ID_INTEL,
++		.device		= PCI_DEVICE_ID_INTEL_BXTM_SDIO,
++		.subvendor	= PCI_ANY_ID,
++		.subdevice	= PCI_ANY_ID,
++		.driver_data	= (kernel_ulong_t)&sdhci_intel_byt_sdio,
++	},
++
++	{
++		.vendor		= PCI_VENDOR_ID_INTEL,
++		.device		= PCI_DEVICE_ID_INTEL_BXTM_SD,
++		.subvendor	= PCI_ANY_ID,
++		.subdevice	= PCI_ANY_ID,
++		.driver_data	= (kernel_ulong_t)&sdhci_intel_byt_sd,
++	},
++
++	{
++		.vendor		= PCI_VENDOR_ID_INTEL,
+ 		.device		= PCI_DEVICE_ID_INTEL_APL_EMMC,
+ 		.subvendor	= PCI_ANY_ID,
+ 		.subdevice	= PCI_ANY_ID,
+diff --git a/drivers/mmc/host/sdhci-pci.h b/drivers/mmc/host/sdhci-pci.h
+index d1a0b4db60db..89e7151684a1 100644
+--- a/drivers/mmc/host/sdhci-pci.h
++++ b/drivers/mmc/host/sdhci-pci.h
+@@ -28,6 +28,9 @@
+ #define PCI_DEVICE_ID_INTEL_BXT_SD	0x0aca
+ #define PCI_DEVICE_ID_INTEL_BXT_EMMC	0x0acc
+ #define PCI_DEVICE_ID_INTEL_BXT_SDIO	0x0ad0
++#define PCI_DEVICE_ID_INTEL_BXTM_SD	0x1aca
++#define PCI_DEVICE_ID_INTEL_BXTM_EMMC	0x1acc
++#define PCI_DEVICE_ID_INTEL_BXTM_SDIO	0x1ad0
+ #define PCI_DEVICE_ID_INTEL_APL_SD	0x5aca
+ #define PCI_DEVICE_ID_INTEL_APL_EMMC	0x5acc
+ #define PCI_DEVICE_ID_INTEL_APL_SDIO	0x5ad0
+diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c
+index f5edf9d3a18a..452bf500c82e 100644
+--- a/drivers/mmc/host/sdhci-pxav3.c
++++ b/drivers/mmc/host/sdhci-pxav3.c
+@@ -137,6 +137,10 @@ static int armada_38x_quirks(struct platform_device *pdev,
+ 
+ 	host->quirks &= ~SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN;
+ 	host->quirks |= SDHCI_QUIRK_MISSING_CAPS;
++
++	host->caps = sdhci_readl(host, SDHCI_CAPABILITIES);
++	host->caps1 = sdhci_readl(host, SDHCI_CAPABILITIES_1);
++
+ 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ 					   "conf-sdio3");
+ 	if (res) {
+@@ -150,7 +154,6 @@ static int armada_38x_quirks(struct platform_device *pdev,
+ 		 * Configuration register, if the adjustment is not done,
+ 		 * remove them from the capabilities.
+ 		 */
+-		host->caps1 = sdhci_readl(host, SDHCI_CAPABILITIES_1);
+ 		host->caps1 &= ~(SDHCI_SUPPORT_SDR50 | SDHCI_SUPPORT_DDR50);
+ 
+ 		dev_warn(&pdev->dev, "conf-sdio3 register not found: disabling SDR50 and DDR50 modes.\nConsider updating your dtb\n");
+@@ -161,7 +164,6 @@ static int armada_38x_quirks(struct platform_device *pdev,
+ 	 * controller has different capabilities than the ones shown
+ 	 * in its registers
+ 	 */
+-	host->caps = sdhci_readl(host, SDHCI_CAPABILITIES);
+ 	if (of_property_read_bool(np, "no-1-8-v")) {
+ 		host->caps &= ~SDHCI_CAN_VDD_180;
+ 		host->mmc->caps &= ~MMC_CAP_1_8V_DDR;
+@@ -307,8 +309,30 @@ static void pxav3_set_uhs_signaling(struct sdhci_host *host, unsigned int uhs)
+ 		__func__, uhs, ctrl_2);
+ }
+ 
++static void pxav3_set_power(struct sdhci_host *host, unsigned char mode,
++			    unsigned short vdd)
++{
++	struct mmc_host *mmc = host->mmc;
++	u8 pwr = host->pwr;
++
++	sdhci_set_power(host, mode, vdd);
++
++	if (host->pwr == pwr)
++		return;
++
++	if (host->pwr == 0)
++		vdd = 0;
++
++	if (!IS_ERR(mmc->supply.vmmc)) {
++		spin_unlock_irq(&host->lock);
++		mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
++		spin_lock_irq(&host->lock);
++	}
++}
++
+ static const struct sdhci_ops pxav3_sdhci_ops = {
+ 	.set_clock = sdhci_set_clock,
++	.set_power = pxav3_set_power,
+ 	.platform_send_init_74_clocks = pxav3_gen_init_74_clocks,
+ 	.get_max_clock = sdhci_pltfm_clk_get_max_clock,
+ 	.set_bus_width = sdhci_set_bus_width,
+diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
+index 83c4bf7bc16c..0004721cd213 100644
+--- a/drivers/mmc/host/sdhci-tegra.c
++++ b/drivers/mmc/host/sdhci-tegra.c
+@@ -147,10 +147,16 @@ static void tegra_sdhci_reset(struct sdhci_host *host, u8 mask)
+ 	/* Advertise UHS modes as supported by host */
+ 	if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR50)
+ 		misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDR50;
++	else
++		misc_ctrl &= ~SDHCI_MISC_CTRL_ENABLE_SDR50;
+ 	if (soc_data->nvquirks & NVQUIRK_ENABLE_DDR50)
+ 		misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_DDR50;
++	else
++		misc_ctrl &= ~SDHCI_MISC_CTRL_ENABLE_DDR50;
+ 	if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR104)
+ 		misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDR104;
++	else
++		misc_ctrl &= ~SDHCI_MISC_CTRL_ENABLE_SDR104;
+ 	sdhci_writel(host, misc_ctrl, SDHCI_TEGRA_VENDOR_MISC_CTRL);
+ 
+ 	clk_ctrl = sdhci_readl(host, SDHCI_TEGRA_VENDOR_CLOCK_CTRL);
+@@ -188,7 +194,7 @@ static void tegra_sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
+ 	unsigned long host_clk;
+ 
+ 	if (!clock)
+-		return;
++		return sdhci_set_clock(host, clock);
+ 
+ 	host_clk = tegra_host->ddr_signaling ? clock * 2 : clock;
+ 	clk_set_rate(pltfm_host->clk, host_clk);
+@@ -335,6 +341,10 @@ static const struct sdhci_pltfm_data sdhci_tegra114_pdata = {
+ 
+ static const struct sdhci_tegra_soc_data soc_data_tegra114 = {
+ 	.pdata = &sdhci_tegra114_pdata,
++};
++
++static const struct sdhci_tegra_soc_data soc_data_tegra124 = {
++	.pdata = &sdhci_tegra114_pdata,
+ 	.nvquirks = NVQUIRK_ENABLE_SDR50 |
+ 		    NVQUIRK_ENABLE_DDR50 |
+ 		    NVQUIRK_ENABLE_SDR104,
+@@ -357,7 +367,7 @@ static const struct sdhci_tegra_soc_data soc_data_tegra210 = {
+ 
+ static const struct of_device_id sdhci_tegra_dt_match[] = {
+ 	{ .compatible = "nvidia,tegra210-sdhci", .data = &soc_data_tegra210 },
+-	{ .compatible = "nvidia,tegra124-sdhci", .data = &soc_data_tegra114 },
++	{ .compatible = "nvidia,tegra124-sdhci", .data = &soc_data_tegra124 },
+ 	{ .compatible = "nvidia,tegra114-sdhci", .data = &soc_data_tegra114 },
+ 	{ .compatible = "nvidia,tegra30-sdhci", .data = &soc_data_tegra30 },
+ 	{ .compatible = "nvidia,tegra20-sdhci", .data = &soc_data_tegra20 },
+diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
+index add9fdfd1d8f..6d485b5fa5ca 100644
+--- a/drivers/mmc/host/sdhci.c
++++ b/drivers/mmc/host/sdhci.c
+@@ -465,8 +465,6 @@ static void sdhci_adma_mark_end(void *desc)
+ static int sdhci_adma_table_pre(struct sdhci_host *host,
+ 	struct mmc_data *data)
+ {
+-	int direction;
+-
+ 	void *desc;
+ 	void *align;
+ 	dma_addr_t addr;
+@@ -483,20 +481,9 @@ static int sdhci_adma_table_pre(struct sdhci_host *host,
+ 	 * We currently guess that it is LE.
+ 	 */
+ 
+-	if (data->flags & MMC_DATA_READ)
+-		direction = DMA_FROM_DEVICE;
+-	else
+-		direction = DMA_TO_DEVICE;
+-
+-	host->align_addr = dma_map_single(mmc_dev(host->mmc),
+-		host->align_buffer, host->align_buffer_sz, direction);
+-	if (dma_mapping_error(mmc_dev(host->mmc), host->align_addr))
+-		goto fail;
+-	BUG_ON(host->align_addr & SDHCI_ADMA2_MASK);
+-
+ 	host->sg_count = sdhci_pre_dma_transfer(host, data);
+ 	if (host->sg_count < 0)
+-		goto unmap_align;
++		return -EINVAL;
+ 
+ 	desc = host->adma_table;
+ 	align = host->align_buffer;
+@@ -570,22 +557,7 @@ static int sdhci_adma_table_pre(struct sdhci_host *host,
+ 		/* nop, end, valid */
+ 		sdhci_adma_write_desc(host, desc, 0, 0, ADMA2_NOP_END_VALID);
+ 	}
+-
+-	/*
+-	 * Resync align buffer as we might have changed it.
+-	 */
+-	if (data->flags & MMC_DATA_WRITE) {
+-		dma_sync_single_for_device(mmc_dev(host->mmc),
+-			host->align_addr, host->align_buffer_sz, direction);
+-	}
+-
+ 	return 0;
+-
+-unmap_align:
+-	dma_unmap_single(mmc_dev(host->mmc), host->align_addr,
+-		host->align_buffer_sz, direction);
+-fail:
+-	return -EINVAL;
+ }
+ 
+ static void sdhci_adma_table_post(struct sdhci_host *host,
+@@ -605,9 +577,6 @@ static void sdhci_adma_table_post(struct sdhci_host *host,
+ 	else
+ 		direction = DMA_TO_DEVICE;
+ 
+-	dma_unmap_single(mmc_dev(host->mmc), host->align_addr,
+-		host->align_buffer_sz, direction);
+-
+ 	/* Do a quick scan of the SG list for any unaligned mappings */
+ 	has_unaligned = false;
+ 	for_each_sg(data->sg, sg, host->sg_count, i)
+@@ -666,9 +635,20 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd)
+ 	if (!data)
+ 		target_timeout = cmd->busy_timeout * 1000;
+ 	else {
+-		target_timeout = data->timeout_ns / 1000;
+-		if (host->clock)
+-			target_timeout += data->timeout_clks / host->clock;
++		target_timeout = DIV_ROUND_UP(data->timeout_ns, 1000);
++		if (host->clock && data->timeout_clks) {
++			unsigned long long val;
++
++			/*
++			 * data->timeout_clks is in units of clock cycles.
++			 * host->clock is in Hz.  target_timeout is in us.
++			 * Hence, us = 1000000 * cycles / Hz.  Round up.
++			 */
++			val = 1000000 * data->timeout_clks;
++			if (do_div(val, host->clock))
++				target_timeout++;
++			target_timeout += val;
++		}
+ 	}
+ 
+ 	/*
+@@ -1003,6 +983,9 @@ void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
+ 
+ 	WARN_ON(host->cmd);
+ 
++	/* Initially, a command has no error */
++	cmd->error = 0;
++
+ 	/* Wait max 10 ms */
+ 	timeout = 10;
+ 
+@@ -1097,8 +1080,6 @@ static void sdhci_finish_command(struct sdhci_host *host)
+ 		}
+ 	}
+ 
+-	host->cmd->error = 0;
+-
+ 	/* Finished CMD23, now send actual command. */
+ 	if (host->cmd == host->mrq->sbc) {
+ 		host->cmd = NULL;
+@@ -1269,10 +1250,24 @@ clock_set:
+ }
+ EXPORT_SYMBOL_GPL(sdhci_set_clock);
+ 
+-static void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
+-			    unsigned short vdd)
++static void sdhci_set_power_reg(struct sdhci_host *host, unsigned char mode,
++				unsigned short vdd)
+ {
+ 	struct mmc_host *mmc = host->mmc;
++
++	spin_unlock_irq(&host->lock);
++	mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
++	spin_lock_irq(&host->lock);
++
++	if (mode != MMC_POWER_OFF)
++		sdhci_writeb(host, SDHCI_POWER_ON, SDHCI_POWER_CONTROL);
++	else
++		sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
++}
++
++void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
++		     unsigned short vdd)
++{
+ 	u8 pwr = 0;
+ 
+ 	if (mode != MMC_POWER_OFF) {
+@@ -1304,7 +1299,6 @@ static void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
+ 		sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
+ 		if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
+ 			sdhci_runtime_pm_bus_off(host);
+-		vdd = 0;
+ 	} else {
+ 		/*
+ 		 * Spec says that we should clear the power reg before setting
+@@ -1335,12 +1329,20 @@ static void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
+ 		if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER)
+ 			mdelay(10);
+ 	}
++}
++EXPORT_SYMBOL_GPL(sdhci_set_power);
+ 
+-	if (!IS_ERR(mmc->supply.vmmc)) {
+-		spin_unlock_irq(&host->lock);
+-		mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
+-		spin_lock_irq(&host->lock);
+-	}
++static void __sdhci_set_power(struct sdhci_host *host, unsigned char mode,
++			      unsigned short vdd)
++{
++	struct mmc_host *mmc = host->mmc;
++
++	if (host->ops->set_power)
++		host->ops->set_power(host, mode, vdd);
++	else if (!IS_ERR(mmc->supply.vmmc))
++		sdhci_set_power_reg(host, mode, vdd);
++	else
++		sdhci_set_power(host, mode, vdd);
+ }
+ 
+ /*****************************************************************************\
+@@ -1490,7 +1492,7 @@ static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios)
+ 		}
+ 	}
+ 
+-	sdhci_set_power(host, ios->power_mode, ios->vdd);
++	__sdhci_set_power(host, ios->power_mode, ios->vdd);
+ 
+ 	if (host->ops->platform_send_init_74_clocks)
+ 		host->ops->platform_send_init_74_clocks(host, ios->power_mode);
+@@ -2114,14 +2116,13 @@ static void sdhci_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
+ 	struct sdhci_host *host = mmc_priv(mmc);
+ 	struct mmc_data *data = mrq->data;
+ 
+-	if (host->flags & SDHCI_REQ_USE_DMA) {
+-		if (data->host_cookie == COOKIE_GIVEN ||
+-				data->host_cookie == COOKIE_MAPPED)
+-			dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
+-					 data->flags & MMC_DATA_WRITE ?
+-					 DMA_TO_DEVICE : DMA_FROM_DEVICE);
+-		data->host_cookie = COOKIE_UNMAPPED;
+-	}
++	if (data->host_cookie == COOKIE_GIVEN ||
++	    data->host_cookie == COOKIE_MAPPED)
++		dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
++			     data->flags & MMC_DATA_WRITE ?
++			       DMA_TO_DEVICE : DMA_FROM_DEVICE);
++
++	data->host_cookie = COOKIE_UNMAPPED;
+ }
+ 
+ static int sdhci_pre_dma_transfer(struct sdhci_host *host,
+@@ -2238,6 +2239,22 @@ static void sdhci_tasklet_finish(unsigned long param)
+ 	mrq = host->mrq;
+ 
+ 	/*
++	 * Always unmap the data buffers if they were mapped by
++	 * sdhci_prepare_data() whenever we finish with a request.
++	 * This avoids leaking DMA mappings on error.
++	 */
++	if (host->flags & SDHCI_REQ_USE_DMA) {
++		struct mmc_data *data = mrq->data;
++
++		if (data && data->host_cookie == COOKIE_MAPPED) {
++			dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
++				     (data->flags & MMC_DATA_READ) ?
++				     DMA_FROM_DEVICE : DMA_TO_DEVICE);
++			data->host_cookie = COOKIE_UNMAPPED;
++		}
++	}
++
++	/*
+ 	 * The controller needs a reset of internal state machines
+ 	 * upon error conditions.
+ 	 */
+@@ -2322,13 +2339,30 @@ static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *mask)
+ 		return;
+ 	}
+ 
+-	if (intmask & SDHCI_INT_TIMEOUT)
+-		host->cmd->error = -ETIMEDOUT;
+-	else if (intmask & (SDHCI_INT_CRC | SDHCI_INT_END_BIT |
+-			SDHCI_INT_INDEX))
+-		host->cmd->error = -EILSEQ;
++	if (intmask & (SDHCI_INT_TIMEOUT | SDHCI_INT_CRC |
++		       SDHCI_INT_END_BIT | SDHCI_INT_INDEX)) {
++		if (intmask & SDHCI_INT_TIMEOUT)
++			host->cmd->error = -ETIMEDOUT;
++		else
++			host->cmd->error = -EILSEQ;
++
++		/*
++		 * If this command initiates a data phase and a response
++		 * CRC error is signalled, the card can start transferring
++		 * data - the card may have received the command without
++		 * error.  We must not terminate the mmc_request early.
++		 *
++		 * If the card did not receive the command or returned an
++		 * error which prevented it sending data, the data phase
++		 * will time out.
++		 */
++		if (host->cmd->data &&
++		    (intmask & (SDHCI_INT_CRC | SDHCI_INT_TIMEOUT)) ==
++		     SDHCI_INT_CRC) {
++			host->cmd = NULL;
++			return;
++		}
+ 
+-	if (host->cmd->error) {
+ 		tasklet_schedule(&host->finish_tasklet);
+ 		return;
+ 	}
+@@ -2967,14 +3001,21 @@ int sdhci_add_host(struct sdhci_host *host)
+ 						      &host->adma_addr,
+ 						      GFP_KERNEL);
+ 		host->align_buffer_sz = SDHCI_MAX_SEGS * SDHCI_ADMA2_ALIGN;
+-		host->align_buffer = kmalloc(host->align_buffer_sz, GFP_KERNEL);
++		host->align_buffer = dma_alloc_coherent(mmc_dev(mmc),
++							host->align_buffer_sz,
++							&host->align_addr,
++							GFP_KERNEL);
+ 		if (!host->adma_table || !host->align_buffer) {
+ 			if (host->adma_table)
+ 				dma_free_coherent(mmc_dev(mmc),
+ 						  host->adma_table_sz,
+ 						  host->adma_table,
+ 						  host->adma_addr);
+-			kfree(host->align_buffer);
++			if (host->align_buffer)
++				dma_free_coherent(mmc_dev(mmc),
++						  host->align_buffer_sz,
++						  host->align_buffer,
++						  host->align_addr);
+ 			pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n",
+ 				mmc_hostname(mmc));
+ 			host->flags &= ~SDHCI_USE_ADMA;
+@@ -2986,10 +3027,14 @@ int sdhci_add_host(struct sdhci_host *host)
+ 			host->flags &= ~SDHCI_USE_ADMA;
+ 			dma_free_coherent(mmc_dev(mmc), host->adma_table_sz,
+ 					  host->adma_table, host->adma_addr);
+-			kfree(host->align_buffer);
++			dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz,
++					  host->align_buffer, host->align_addr);
+ 			host->adma_table = NULL;
+ 			host->align_buffer = NULL;
+ 		}
++
++		/* dma_alloc_coherent returns page aligned and sized buffers */
++		BUG_ON(host->align_addr & SDHCI_ADMA2_MASK);
+ 	}
+ 
+ 	/*
+@@ -3072,14 +3117,14 @@ int sdhci_add_host(struct sdhci_host *host)
+ 		if (caps[0] & SDHCI_TIMEOUT_CLK_UNIT)
+ 			host->timeout_clk *= 1000;
+ 
++		if (override_timeout_clk)
++			host->timeout_clk = override_timeout_clk;
++
+ 		mmc->max_busy_timeout = host->ops->get_max_timeout_count ?
+ 			host->ops->get_max_timeout_count(host) : 1 << 27;
+ 		mmc->max_busy_timeout /= host->timeout_clk;
+ 	}
+ 
+-	if (override_timeout_clk)
+-		host->timeout_clk = override_timeout_clk;
+-
+ 	mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_ERASE | MMC_CAP_CMD23;
+ 	mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
+ 
+@@ -3452,7 +3497,9 @@ void sdhci_remove_host(struct sdhci_host *host, int dead)
+ 	if (host->adma_table)
+ 		dma_free_coherent(mmc_dev(mmc), host->adma_table_sz,
+ 				  host->adma_table, host->adma_addr);
+-	kfree(host->align_buffer);
++	if (host->align_buffer)
++		dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz,
++				  host->align_buffer, host->align_addr);
+ 
+ 	host->adma_table = NULL;
+ 	host->align_buffer = NULL;
+diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
+index 0115e9907bf8..033d72b5bbd5 100644
+--- a/drivers/mmc/host/sdhci.h
++++ b/drivers/mmc/host/sdhci.h
+@@ -529,6 +529,8 @@ struct sdhci_ops {
+ #endif
+ 
+ 	void	(*set_clock)(struct sdhci_host *host, unsigned int clock);
++	void	(*set_power)(struct sdhci_host *host, unsigned char mode,
++			     unsigned short vdd);
+ 
+ 	int		(*enable_dma)(struct sdhci_host *host);
+ 	unsigned int	(*get_max_clock)(struct sdhci_host *host);
+@@ -660,6 +662,8 @@ static inline bool sdhci_sdio_irq_enabled(struct sdhci_host *host)
+ }
+ 
+ void sdhci_set_clock(struct sdhci_host *host, unsigned int clock);
++void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
++		     unsigned short vdd);
+ void sdhci_set_bus_width(struct sdhci_host *host, int width);
+ void sdhci_reset(struct sdhci_host *host, u8 mask);
+ void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing);
+diff --git a/drivers/mtd/nand/brcmnand/brcmnand.c b/drivers/mtd/nand/brcmnand/brcmnand.c
+index 844fc07d22cd..f7009c1cb90c 100644
+--- a/drivers/mtd/nand/brcmnand/brcmnand.c
++++ b/drivers/mtd/nand/brcmnand/brcmnand.c
+@@ -311,6 +311,36 @@ static const u16 brcmnand_regs_v60[] = {
+ 	[BRCMNAND_FC_BASE]		= 0x400,
+ };
+ 
++/* BRCMNAND v7.1 */
++static const u16 brcmnand_regs_v71[] = {
++	[BRCMNAND_CMD_START]		=  0x04,
++	[BRCMNAND_CMD_EXT_ADDRESS]	=  0x08,
++	[BRCMNAND_CMD_ADDRESS]		=  0x0c,
++	[BRCMNAND_INTFC_STATUS]		=  0x14,
++	[BRCMNAND_CS_SELECT]		=  0x18,
++	[BRCMNAND_CS_XOR]		=  0x1c,
++	[BRCMNAND_LL_OP]		=  0x20,
++	[BRCMNAND_CS0_BASE]		=  0x50,
++	[BRCMNAND_CS1_BASE]		=     0,
++	[BRCMNAND_CORR_THRESHOLD]	=  0xdc,
++	[BRCMNAND_CORR_THRESHOLD_EXT]	=  0xe0,
++	[BRCMNAND_UNCORR_COUNT]		=  0xfc,
++	[BRCMNAND_CORR_COUNT]		= 0x100,
++	[BRCMNAND_CORR_EXT_ADDR]	= 0x10c,
++	[BRCMNAND_CORR_ADDR]		= 0x110,
++	[BRCMNAND_UNCORR_EXT_ADDR]	= 0x114,
++	[BRCMNAND_UNCORR_ADDR]		= 0x118,
++	[BRCMNAND_SEMAPHORE]		= 0x150,
++	[BRCMNAND_ID]			= 0x194,
++	[BRCMNAND_ID_EXT]		= 0x198,
++	[BRCMNAND_LL_RDATA]		= 0x19c,
++	[BRCMNAND_OOB_READ_BASE]	= 0x200,
++	[BRCMNAND_OOB_READ_10_BASE]	=     0,
++	[BRCMNAND_OOB_WRITE_BASE]	= 0x280,
++	[BRCMNAND_OOB_WRITE_10_BASE]	=     0,
++	[BRCMNAND_FC_BASE]		= 0x400,
++};
++
+ enum brcmnand_cs_reg {
+ 	BRCMNAND_CS_CFG_EXT = 0,
+ 	BRCMNAND_CS_CFG,
+@@ -406,7 +436,9 @@ static int brcmnand_revision_init(struct brcmnand_controller *ctrl)
+ 	}
+ 
+ 	/* Register offsets */
+-	if (ctrl->nand_version >= 0x0600)
++	if (ctrl->nand_version >= 0x0701)
++		ctrl->reg_offsets = brcmnand_regs_v71;
++	else if (ctrl->nand_version >= 0x0600)
+ 		ctrl->reg_offsets = brcmnand_regs_v60;
+ 	else if (ctrl->nand_version >= 0x0500)
+ 		ctrl->reg_offsets = brcmnand_regs_v50;
+diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
+index f2c8ff398d6c..171d146645ba 100644
+--- a/drivers/mtd/nand/nand_base.c
++++ b/drivers/mtd/nand/nand_base.c
+@@ -4021,7 +4021,6 @@ static int nand_dt_init(struct nand_chip *chip)
+  * This is the first phase of the normal nand_scan() function. It reads the
+  * flash ID and sets up MTD fields accordingly.
+  *
+- * The mtd->owner field must be set to the module of the caller.
+  */
+ int nand_scan_ident(struct mtd_info *mtd, int maxchips,
+ 		    struct nand_flash_dev *table)
+@@ -4443,19 +4442,12 @@ EXPORT_SYMBOL(nand_scan_tail);
+  *
+  * This fills out all the uninitialized function pointers with the defaults.
+  * The flash ID is read and the mtd/chip structures are filled with the
+- * appropriate values. The mtd->owner field must be set to the module of the
+- * caller.
++ * appropriate values.
+  */
+ int nand_scan(struct mtd_info *mtd, int maxchips)
+ {
+ 	int ret;
+ 
+-	/* Many callers got this wrong, so check for it for a while... */
+-	if (!mtd->owner && caller_is_module()) {
+-		pr_crit("%s called with NULL mtd->owner!\n", __func__);
+-		BUG();
+-	}
+-
+ 	ret = nand_scan_ident(mtd, maxchips, NULL);
+ 	if (!ret)
+ 		ret = nand_scan_tail(mtd);
+diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
+index 86fc245dc71a..fd78644469fa 100644
+--- a/drivers/mtd/nand/pxa3xx_nand.c
++++ b/drivers/mtd/nand/pxa3xx_nand.c
+@@ -1738,7 +1738,7 @@ static int alloc_nand_resource(struct platform_device *pdev)
+ 	if (ret < 0)
+ 		return ret;
+ 
+-	if (use_dma) {
++	if (!np && use_dma) {
+ 		r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
+ 		if (r == NULL) {
+ 			dev_err(&pdev->dev,
+diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c
+index 43b3392ffee7..652d01832873 100644
+--- a/drivers/mtd/onenand/onenand_base.c
++++ b/drivers/mtd/onenand/onenand_base.c
+@@ -2599,6 +2599,7 @@ static int onenand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
+  */
+ static int onenand_block_markbad(struct mtd_info *mtd, loff_t ofs)
+ {
++	struct onenand_chip *this = mtd->priv;
+ 	int ret;
+ 
+ 	ret = onenand_block_isbad(mtd, ofs);
+@@ -2610,7 +2611,7 @@ static int onenand_block_markbad(struct mtd_info *mtd, loff_t ofs)
+ 	}
+ 
+ 	onenand_get_device(mtd, FL_WRITING);
+-	ret = mtd_block_markbad(mtd, ofs);
++	ret = this->block_markbad(mtd, ofs);
+ 	onenand_release_device(mtd);
+ 	return ret;
+ }
+diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c
+index ed0c19c558b5..3028c06547c1 100644
+--- a/drivers/mtd/spi-nor/spi-nor.c
++++ b/drivers/mtd/spi-nor/spi-nor.c
+@@ -1100,45 +1100,6 @@ static int spansion_quad_enable(struct spi_nor *nor)
+ 	return 0;
+ }
+ 
+-static int micron_quad_enable(struct spi_nor *nor)
+-{
+-	int ret;
+-	u8 val;
+-
+-	ret = nor->read_reg(nor, SPINOR_OP_RD_EVCR, &val, 1);
+-	if (ret < 0) {
+-		dev_err(nor->dev, "error %d reading EVCR\n", ret);
+-		return ret;
+-	}
+-
+-	write_enable(nor);
+-
+-	/* set EVCR, enable quad I/O */
+-	nor->cmd_buf[0] = val & ~EVCR_QUAD_EN_MICRON;
+-	ret = nor->write_reg(nor, SPINOR_OP_WD_EVCR, nor->cmd_buf, 1);
+-	if (ret < 0) {
+-		dev_err(nor->dev, "error while writing EVCR register\n");
+-		return ret;
+-	}
+-
+-	ret = spi_nor_wait_till_ready(nor);
+-	if (ret)
+-		return ret;
+-
+-	/* read EVCR and check it */
+-	ret = nor->read_reg(nor, SPINOR_OP_RD_EVCR, &val, 1);
+-	if (ret < 0) {
+-		dev_err(nor->dev, "error %d reading EVCR\n", ret);
+-		return ret;
+-	}
+-	if (val & EVCR_QUAD_EN_MICRON) {
+-		dev_err(nor->dev, "Micron EVCR Quad bit not clear\n");
+-		return -EINVAL;
+-	}
+-
+-	return 0;
+-}
+-
+ static int set_quad_mode(struct spi_nor *nor, const struct flash_info *info)
+ {
+ 	int status;
+@@ -1152,12 +1113,7 @@ static int set_quad_mode(struct spi_nor *nor, const struct flash_info *info)
+ 		}
+ 		return status;
+ 	case SNOR_MFR_MICRON:
+-		status = micron_quad_enable(nor);
+-		if (status) {
+-			dev_err(nor->dev, "Micron quad-read not enabled\n");
+-			return -EINVAL;
+-		}
+-		return status;
++		return 0;
+ 	default:
+ 		status = spansion_quad_enable(nor);
+ 		if (status) {
+diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
+index 5b9834cf2820..96fddb016bf1 100644
+--- a/drivers/mtd/ubi/eba.c
++++ b/drivers/mtd/ubi/eba.c
+@@ -426,8 +426,25 @@ retry:
+ 						 pnum, vol_id, lnum);
+ 					err = -EBADMSG;
+ 				} else {
+-					err = -EINVAL;
+-					ubi_ro_mode(ubi);
++					/*
++					 * Ending up here in the non-Fastmap case
++					 * is a clear bug as the VID header had to
++					 * be present at scan time to have it referenced.
++					 * With fastmap the story is more complicated.
++					 * Fastmap has the mapping info without the need
++					 * of a full scan. So the LEB could have been
++					 * unmapped, Fastmap cannot know this and keeps
++					 * the LEB referenced.
++					 * This is valid and works as the layer above UBI
++					 * has to do bookkeeping about used/referenced
++					 * LEBs in any case.
++					 */
++					if (ubi->fast_attach) {
++						err = -EBADMSG;
++					} else {
++						err = -EINVAL;
++						ubi_ro_mode(ubi);
++					}
+ 				}
+ 			}
+ 			goto out_free;
+diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c
+index 263b439e21a8..990898b9dc72 100644
+--- a/drivers/mtd/ubi/fastmap.c
++++ b/drivers/mtd/ubi/fastmap.c
+@@ -1058,6 +1058,7 @@ int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
+ 	ubi_msg(ubi, "fastmap WL pool size: %d",
+ 		ubi->fm_wl_pool.max_size);
+ 	ubi->fm_disabled = 0;
++	ubi->fast_attach = 1;
+ 
+ 	ubi_free_vid_hdr(ubi, vh);
+ 	kfree(ech);
+diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
+index 2974b67f6c6c..de1ea2e4c37d 100644
+--- a/drivers/mtd/ubi/ubi.h
++++ b/drivers/mtd/ubi/ubi.h
+@@ -462,6 +462,7 @@ struct ubi_debug_info {
+  * @fm_eba_sem: allows ubi_update_fastmap() to block EBA table changes
+  * @fm_work: fastmap work queue
+  * @fm_work_scheduled: non-zero if fastmap work was scheduled
++ * @fast_attach: non-zero if UBI was attached by fastmap
+  *
+  * @used: RB-tree of used physical eraseblocks
+  * @erroneous: RB-tree of erroneous used physical eraseblocks
+@@ -570,6 +571,7 @@ struct ubi_device {
+ 	size_t fm_size;
+ 	struct work_struct fm_work;
+ 	int fm_work_scheduled;
++	int fast_attach;
+ 
+ 	/* Wear-leveling sub-system's stuff */
+ 	struct rb_root used;
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index b7f1a9919033..5ec8195b02c0 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -3308,6 +3308,30 @@ static int bond_close(struct net_device *bond_dev)
+ 	return 0;
+ }
+ 
++/* fold stats, assuming all rtnl_link_stats64 fields are u64, but
++ * that some drivers can provide 32bit values only.
++ */
++static void bond_fold_stats(struct rtnl_link_stats64 *_res,
++			    const struct rtnl_link_stats64 *_new,
++			    const struct rtnl_link_stats64 *_old)
++{
++	const u64 *new = (const u64 *)_new;
++	const u64 *old = (const u64 *)_old;
++	u64 *res = (u64 *)_res;
++	int i;
++
++	for (i = 0; i < sizeof(*_res) / sizeof(u64); i++) {
++		u64 nv = new[i];
++		u64 ov = old[i];
++
++		/* detects if this particular field is 32bit only */
++		if (((nv | ov) >> 32) == 0)
++			res[i] += (u32)nv - (u32)ov;
++		else
++			res[i] += nv - ov;
++	}
++}
++
+ static struct rtnl_link_stats64 *bond_get_stats(struct net_device *bond_dev,
+ 						struct rtnl_link_stats64 *stats)
+ {
+@@ -3316,43 +3340,23 @@ static struct rtnl_link_stats64 *bond_get_stats(struct net_device *bond_dev,
+ 	struct list_head *iter;
+ 	struct slave *slave;
+ 
++	spin_lock(&bond->stats_lock);
+ 	memcpy(stats, &bond->bond_stats, sizeof(*stats));
+ 
+-	bond_for_each_slave(bond, slave, iter) {
+-		const struct rtnl_link_stats64 *sstats =
++	rcu_read_lock();
++	bond_for_each_slave_rcu(bond, slave, iter) {
++		const struct rtnl_link_stats64 *new =
+ 			dev_get_stats(slave->dev, &temp);
+-		struct rtnl_link_stats64 *pstats = &slave->slave_stats;
+-
+-		stats->rx_packets +=  sstats->rx_packets - pstats->rx_packets;
+-		stats->rx_bytes += sstats->rx_bytes - pstats->rx_bytes;
+-		stats->rx_errors += sstats->rx_errors - pstats->rx_errors;
+-		stats->rx_dropped += sstats->rx_dropped - pstats->rx_dropped;
+-
+-		stats->tx_packets += sstats->tx_packets - pstats->tx_packets;;
+-		stats->tx_bytes += sstats->tx_bytes - pstats->tx_bytes;
+-		stats->tx_errors += sstats->tx_errors - pstats->tx_errors;
+-		stats->tx_dropped += sstats->tx_dropped - pstats->tx_dropped;
+-
+-		stats->multicast += sstats->multicast - pstats->multicast;
+-		stats->collisions += sstats->collisions - pstats->collisions;
+-
+-		stats->rx_length_errors += sstats->rx_length_errors - pstats->rx_length_errors;
+-		stats->rx_over_errors += sstats->rx_over_errors - pstats->rx_over_errors;
+-		stats->rx_crc_errors += sstats->rx_crc_errors - pstats->rx_crc_errors;
+-		stats->rx_frame_errors += sstats->rx_frame_errors - pstats->rx_frame_errors;
+-		stats->rx_fifo_errors += sstats->rx_fifo_errors - pstats->rx_fifo_errors;
+-		stats->rx_missed_errors += sstats->rx_missed_errors - pstats->rx_missed_errors;
+-
+-		stats->tx_aborted_errors += sstats->tx_aborted_errors - pstats->tx_aborted_errors;
+-		stats->tx_carrier_errors += sstats->tx_carrier_errors - pstats->tx_carrier_errors;
+-		stats->tx_fifo_errors += sstats->tx_fifo_errors - pstats->tx_fifo_errors;
+-		stats->tx_heartbeat_errors += sstats->tx_heartbeat_errors - pstats->tx_heartbeat_errors;
+-		stats->tx_window_errors += sstats->tx_window_errors - pstats->tx_window_errors;
++
++		bond_fold_stats(stats, new, &slave->slave_stats);
+ 
+ 		/* save off the slave stats for the next run */
+-		memcpy(pstats, sstats, sizeof(*sstats));
++		memcpy(&slave->slave_stats, new, sizeof(*new));
+ 	}
++	rcu_read_unlock();
++
+ 	memcpy(&bond->bond_stats, stats, sizeof(*stats));
++	spin_unlock(&bond->stats_lock);
+ 
+ 	return stats;
+ }
+@@ -4166,6 +4170,7 @@ void bond_setup(struct net_device *bond_dev)
+ 	struct bonding *bond = netdev_priv(bond_dev);
+ 
+ 	spin_lock_init(&bond->mode_lock);
++	spin_lock_init(&bond->stats_lock);
+ 	bond->params = bonding_defaults;
+ 
+ 	/* Initialize pointers */
+diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
+index 141c2a42d7ed..910c12e2638e 100644
+--- a/drivers/net/can/dev.c
++++ b/drivers/net/can/dev.c
+@@ -696,11 +696,17 @@ int can_change_mtu(struct net_device *dev, int new_mtu)
+ 	/* allow change of MTU according to the CANFD ability of the device */
+ 	switch (new_mtu) {
+ 	case CAN_MTU:
++		/* 'CANFD-only' controllers can not switch to CAN_MTU */
++		if (priv->ctrlmode_static & CAN_CTRLMODE_FD)
++			return -EINVAL;
++
+ 		priv->ctrlmode &= ~CAN_CTRLMODE_FD;
+ 		break;
+ 
+ 	case CANFD_MTU:
+-		if (!(priv->ctrlmode_supported & CAN_CTRLMODE_FD))
++		/* check for potential CANFD ability */
++		if (!(priv->ctrlmode_supported & CAN_CTRLMODE_FD) &&
++		    !(priv->ctrlmode_static & CAN_CTRLMODE_FD))
+ 			return -EINVAL;
+ 
+ 		priv->ctrlmode |= CAN_CTRLMODE_FD;
+@@ -782,6 +788,35 @@ static const struct nla_policy can_policy[IFLA_CAN_MAX + 1] = {
+ 				= { .len = sizeof(struct can_bittiming_const) },
+ };
+ 
++static int can_validate(struct nlattr *tb[], struct nlattr *data[])
++{
++	bool is_can_fd = false;
++
++	/* Make sure that valid CAN FD configurations always consist of
++	 * - nominal/arbitration bittiming
++	 * - data bittiming
++	 * - control mode with CAN_CTRLMODE_FD set
++	 */
++
++	if (data[IFLA_CAN_CTRLMODE]) {
++		struct can_ctrlmode *cm = nla_data(data[IFLA_CAN_CTRLMODE]);
++
++		is_can_fd = cm->flags & cm->mask & CAN_CTRLMODE_FD;
++	}
++
++	if (is_can_fd) {
++		if (!data[IFLA_CAN_BITTIMING] || !data[IFLA_CAN_DATA_BITTIMING])
++			return -EOPNOTSUPP;
++	}
++
++	if (data[IFLA_CAN_DATA_BITTIMING]) {
++		if (!is_can_fd || !data[IFLA_CAN_BITTIMING])
++			return -EOPNOTSUPP;
++	}
++
++	return 0;
++}
++
+ static int can_changelink(struct net_device *dev,
+ 			  struct nlattr *tb[], struct nlattr *data[])
+ {
+@@ -813,19 +848,31 @@ static int can_changelink(struct net_device *dev,
+ 
+ 	if (data[IFLA_CAN_CTRLMODE]) {
+ 		struct can_ctrlmode *cm;
++		u32 ctrlstatic;
++		u32 maskedflags;
+ 
+ 		/* Do not allow changing controller mode while running */
+ 		if (dev->flags & IFF_UP)
+ 			return -EBUSY;
+ 		cm = nla_data(data[IFLA_CAN_CTRLMODE]);
++		ctrlstatic = priv->ctrlmode_static;
++		maskedflags = cm->flags & cm->mask;
++
++		/* check whether provided bits are allowed to be passed */
++		if (cm->mask & ~(priv->ctrlmode_supported | ctrlstatic))
++			return -EOPNOTSUPP;
++
++		/* do not check for static fd-non-iso if 'fd' is disabled */
++		if (!(maskedflags & CAN_CTRLMODE_FD))
++			ctrlstatic &= ~CAN_CTRLMODE_FD_NON_ISO;
+ 
+-		/* check whether changed bits are allowed to be modified */
+-		if (cm->mask & ~priv->ctrlmode_supported)
++		/* make sure static options are provided by configuration */
++		if ((maskedflags & ctrlstatic) != ctrlstatic)
+ 			return -EOPNOTSUPP;
+ 
+ 		/* clear bits to be modified and copy the flag values */
+ 		priv->ctrlmode &= ~cm->mask;
+-		priv->ctrlmode |= (cm->flags & cm->mask);
++		priv->ctrlmode |= maskedflags;
+ 
+ 		/* CAN_CTRLMODE_FD can only be set when driver supports FD */
+ 		if (priv->ctrlmode & CAN_CTRLMODE_FD)
+@@ -966,6 +1013,7 @@ static struct rtnl_link_ops can_link_ops __read_mostly = {
+ 	.maxtype	= IFLA_CAN_MAX,
+ 	.policy		= can_policy,
+ 	.setup		= can_setup,
++	.validate	= can_validate,
+ 	.newlink	= can_newlink,
+ 	.changelink	= can_changelink,
+ 	.get_size	= can_get_size,
+diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
+index 39cf911f7a1e..195f15edb32e 100644
+--- a/drivers/net/can/m_can/m_can.c
++++ b/drivers/net/can/m_can/m_can.c
+@@ -955,7 +955,7 @@ static struct net_device *alloc_m_can_dev(void)
+ 	priv->can.do_get_berr_counter = m_can_get_berr_counter;
+ 
+ 	/* CAN_CTRLMODE_FD_NON_ISO is fixed with M_CAN IP v3.0.1 */
+-	priv->can.ctrlmode = CAN_CTRLMODE_FD_NON_ISO;
++	can_set_static_ctrlmode(dev, CAN_CTRLMODE_FD_NON_ISO);
+ 
+ 	/* CAN_CTRLMODE_FD_NON_ISO can not be changed with M_CAN IP v3.0.1 */
+ 	priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
+diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
+index 8f76f4558a88..2ff465848b65 100644
+--- a/drivers/net/ethernet/atheros/atlx/atl2.c
++++ b/drivers/net/ethernet/atheros/atlx/atl2.c
+@@ -1412,7 +1412,7 @@ static int atl2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 
+ 	err = -EIO;
+ 
+-	netdev->hw_features = NETIF_F_SG | NETIF_F_HW_VLAN_CTAG_RX;
++	netdev->hw_features = NETIF_F_HW_VLAN_CTAG_RX;
+ 	netdev->features |= (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
+ 
+ 	/* Init PHY as early as possible due to power saving issue  */
+diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+index d7e01a74e927..6746fd03cb3a 100644
+--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
++++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+@@ -1197,7 +1197,7 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
+ 			dev->stats.tx_bytes += tx_cb_ptr->skb->len;
+ 			dma_unmap_single(&dev->dev,
+ 					 dma_unmap_addr(tx_cb_ptr, dma_addr),
+-					 tx_cb_ptr->skb->len,
++					 dma_unmap_len(tx_cb_ptr, dma_len),
+ 					 DMA_TO_DEVICE);
+ 			bcmgenet_free_cb(tx_cb_ptr);
+ 		} else if (dma_unmap_addr(tx_cb_ptr, dma_addr)) {
+@@ -1308,7 +1308,7 @@ static int bcmgenet_xmit_single(struct net_device *dev,
+ 	}
+ 
+ 	dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping);
+-	dma_unmap_len_set(tx_cb_ptr, dma_len, skb->len);
++	dma_unmap_len_set(tx_cb_ptr, dma_len, skb_len);
+ 	length_status = (skb_len << DMA_BUFLENGTH_SHIFT) | dma_desc_flags |
+ 			(priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT) |
+ 			DMA_TX_APPEND_CRC;
+diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+index 767347b1f631..f50bdbfaae7c 100644
+--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
++++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+@@ -519,6 +519,7 @@ static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs,
+ 		nicvf_config_vlan_stripping(nic, nic->netdev->features);
+ 
+ 	/* Enable Receive queue */
++	memset(&rq_cfg, 0, sizeof(struct rq_cfg));
+ 	rq_cfg.ena = 1;
+ 	rq_cfg.tcp_ena = 0;
+ 	nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, *(u64 *)&rq_cfg);
+@@ -551,6 +552,7 @@ void nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs,
+ 			      qidx, (u64)(cq->dmem.phys_base));
+ 
+ 	/* Enable Completion queue */
++	memset(&cq_cfg, 0, sizeof(struct cq_cfg));
+ 	cq_cfg.ena = 1;
+ 	cq_cfg.reset = 0;
+ 	cq_cfg.caching = 0;
+@@ -599,6 +601,7 @@ static void nicvf_snd_queue_config(struct nicvf *nic, struct queue_set *qs,
+ 			      qidx, (u64)(sq->dmem.phys_base));
+ 
+ 	/* Enable send queue  & set queue size */
++	memset(&sq_cfg, 0, sizeof(struct sq_cfg));
+ 	sq_cfg.ena = 1;
+ 	sq_cfg.reset = 0;
+ 	sq_cfg.ldwb = 0;
+@@ -635,6 +638,7 @@ static void nicvf_rbdr_config(struct nicvf *nic, struct queue_set *qs,
+ 
+ 	/* Enable RBDR  & set queue size */
+ 	/* Buffer size should be in multiples of 128 bytes */
++	memset(&rbdr_cfg, 0, sizeof(struct rbdr_cfg));
+ 	rbdr_cfg.ena = 1;
+ 	rbdr_cfg.reset = 0;
+ 	rbdr_cfg.ldwb = 0;
+diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
+index 41c81f6ec630..0d6e8c177650 100644
+--- a/drivers/net/ethernet/freescale/fec_main.c
++++ b/drivers/net/ethernet/freescale/fec_main.c
+@@ -1566,9 +1566,15 @@ fec_enet_rx(struct net_device *ndev, int budget)
+ 	struct fec_enet_private *fep = netdev_priv(ndev);
+ 
+ 	for_each_set_bit(queue_id, &fep->work_rx, FEC_ENET_MAX_RX_QS) {
+-		clear_bit(queue_id, &fep->work_rx);
+-		pkt_received += fec_enet_rx_queue(ndev,
++		int ret;
++
++		ret = fec_enet_rx_queue(ndev,
+ 					budget - pkt_received, queue_id);
++
++		if (ret < budget - pkt_received)
++			clear_bit(queue_id, &fep->work_rx);
++
++		pkt_received += ret;
+ 	}
+ 	return pkt_received;
+ }
+diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
+index b0ae69f84493..2ee05cebea75 100644
+--- a/drivers/net/ethernet/marvell/mvneta.c
++++ b/drivers/net/ethernet/marvell/mvneta.c
+@@ -3070,17 +3070,17 @@ static int mvneta_stop(struct net_device *dev)
+ 	struct mvneta_port *pp = netdev_priv(dev);
+ 
+ 	/* Inform that we are stopping so we don't want to setup the
+-	 * driver for new CPUs in the notifiers
++	 * driver for new CPUs in the notifiers. The code of the
++	 * notifier for CPU online is protected by the same spinlock,
++	 * so when we get the lock, the notifer work is done.
+ 	 */
+ 	spin_lock(&pp->lock);
+ 	pp->is_stopped = true;
++	spin_unlock(&pp->lock);
++
+ 	mvneta_stop_dev(pp);
+ 	mvneta_mdio_remove(pp);
+ 	unregister_cpu_notifier(&pp->cpu_notifier);
+-	/* Now that the notifier are unregistered, we can release le
+-	 * lock
+-	 */
+-	spin_unlock(&pp->lock);
+ 	on_each_cpu(mvneta_percpu_disable, pp, true);
+ 	free_percpu_irq(dev->irq, pp->ports);
+ 	mvneta_cleanup_rxqs(pp);
+@@ -3612,6 +3612,7 @@ static int mvneta_probe(struct platform_device *pdev)
+ 	dev->ethtool_ops = &mvneta_eth_tool_ops;
+ 
+ 	pp = netdev_priv(dev);
++	spin_lock_init(&pp->lock);
+ 	pp->phy_node = phy_node;
+ 	pp->phy_interface = phy_mode;
+ 
+@@ -3720,7 +3721,7 @@ static int mvneta_probe(struct platform_device *pdev)
+ 	dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
+ 	dev->hw_features |= dev->features;
+ 	dev->vlan_features |= dev->features;
+-	dev->priv_flags |= IFF_UNICAST_FLT;
++	dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE;
+ 	dev->gso_max_segs = MVNETA_MAX_TSO_SEGS;
+ 
+ 	err = register_netdev(dev);
+diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+index 41440b2b20a3..03ef9aca21e4 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+@@ -704,7 +704,7 @@ static int get_fixed_ipv6_csum(__wsum hw_checksum, struct sk_buff *skb,
+ 
+ 	if (ipv6h->nexthdr == IPPROTO_FRAGMENT || ipv6h->nexthdr == IPPROTO_HOPOPTS)
+ 		return -1;
+-	hw_checksum = csum_add(hw_checksum, (__force __wsum)(ipv6h->nexthdr << 8));
++	hw_checksum = csum_add(hw_checksum, (__force __wsum)htons(ipv6h->nexthdr));
+ 
+ 	csum_pseudo_hdr = csum_partial(&ipv6h->saddr,
+ 				       sizeof(ipv6h->saddr) + sizeof(ipv6h->daddr), 0);
+diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+index e0946ab22010..0debb611da8b 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+@@ -402,7 +402,6 @@ static bool mlx4_en_process_tx_cq(struct net_device *dev,
+ 	u32 packets = 0;
+ 	u32 bytes = 0;
+ 	int factor = priv->cqe_factor;
+-	u64 timestamp = 0;
+ 	int done = 0;
+ 	int budget = priv->tx_work_limit;
+ 	u32 last_nr_txbb;
+@@ -442,9 +441,12 @@ static bool mlx4_en_process_tx_cq(struct net_device *dev,
+ 		new_index = be16_to_cpu(cqe->wqe_index) & size_mask;
+ 
+ 		do {
++			u64 timestamp = 0;
++
+ 			txbbs_skipped += last_nr_txbb;
+ 			ring_index = (ring_index + last_nr_txbb) & size_mask;
+-			if (ring->tx_info[ring_index].ts_requested)
++
++			if (unlikely(ring->tx_info[ring_index].ts_requested))
+ 				timestamp = mlx4_en_get_cqe_ts(cqe);
+ 
+ 			/* free next descriptor */
+diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+index 25ce1b030a00..cd9b2b28df88 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
++++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+@@ -3141,7 +3141,7 @@ static int verify_qp_parameters(struct mlx4_dev *dev,
+ 		case QP_TRANS_RTS2RTS:
+ 		case QP_TRANS_SQD2SQD:
+ 		case QP_TRANS_SQD2RTS:
+-			if (slave != mlx4_master_func_num(dev))
++			if (slave != mlx4_master_func_num(dev)) {
+ 				if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
+ 					port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
+ 					if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
+@@ -3160,6 +3160,7 @@ static int verify_qp_parameters(struct mlx4_dev *dev,
+ 					if (qp_ctx->alt_path.mgid_index >= num_gids)
+ 						return -EINVAL;
+ 				}
++			}
+ 			break;
+ 		default:
+ 			break;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+index 402994bf7e16..e293a2ec2775 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+@@ -1389,24 +1389,50 @@ static int mlx5e_refresh_tirs_self_loopback_enable(struct mlx5e_priv *priv)
+ 	return 0;
+ }
+ 
+-static int mlx5e_set_dev_port_mtu(struct net_device *netdev)
++static int mlx5e_set_mtu(struct mlx5e_priv *priv, u16 mtu)
+ {
+-	struct mlx5e_priv *priv = netdev_priv(netdev);
+ 	struct mlx5_core_dev *mdev = priv->mdev;
+-	int hw_mtu;
++	u16 hw_mtu = MLX5E_SW2HW_MTU(mtu);
+ 	int err;
+ 
+-	err = mlx5_set_port_mtu(mdev, MLX5E_SW2HW_MTU(netdev->mtu), 1);
++	err = mlx5_set_port_mtu(mdev, hw_mtu, 1);
+ 	if (err)
+ 		return err;
+ 
+-	mlx5_query_port_oper_mtu(mdev, &hw_mtu, 1);
++	/* Update vport context MTU */
++	mlx5_modify_nic_vport_mtu(mdev, hw_mtu);
++	return 0;
++}
++
++static void mlx5e_query_mtu(struct mlx5e_priv *priv, u16 *mtu)
++{
++	struct mlx5_core_dev *mdev = priv->mdev;
++	u16 hw_mtu = 0;
++	int err;
++
++	err = mlx5_query_nic_vport_mtu(mdev, &hw_mtu);
++	if (err || !hw_mtu) /* fallback to port oper mtu */
++		mlx5_query_port_oper_mtu(mdev, &hw_mtu, 1);
++
++	*mtu = MLX5E_HW2SW_MTU(hw_mtu);
++}
++
++static int mlx5e_set_dev_port_mtu(struct net_device *netdev)
++{
++	struct mlx5e_priv *priv = netdev_priv(netdev);
++	u16 mtu;
++	int err;
+ 
+-	if (MLX5E_HW2SW_MTU(hw_mtu) != netdev->mtu)
+-		netdev_warn(netdev, "%s: Port MTU %d is different than netdev mtu %d\n",
+-			    __func__, MLX5E_HW2SW_MTU(hw_mtu), netdev->mtu);
++	err = mlx5e_set_mtu(priv, netdev->mtu);
++	if (err)
++		return err;
+ 
+-	netdev->mtu = MLX5E_HW2SW_MTU(hw_mtu);
++	mlx5e_query_mtu(priv, &mtu);
++	if (mtu != netdev->mtu)
++		netdev_warn(netdev, "%s: VPort MTU %d is different than netdev mtu %d\n",
++			    __func__, mtu, netdev->mtu);
++
++	netdev->mtu = mtu;
+ 	return 0;
+ }
+ 
+@@ -1906,22 +1932,27 @@ static int mlx5e_set_features(struct net_device *netdev,
+ 	return err;
+ }
+ 
++#define MXL5_HW_MIN_MTU 64
++#define MXL5E_MIN_MTU (MXL5_HW_MIN_MTU + ETH_FCS_LEN)
++
+ static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu)
+ {
+ 	struct mlx5e_priv *priv = netdev_priv(netdev);
+ 	struct mlx5_core_dev *mdev = priv->mdev;
+ 	bool was_opened;
+-	int max_mtu;
++	u16 max_mtu;
++	u16 min_mtu;
+ 	int err = 0;
+ 
+ 	mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
+ 
+ 	max_mtu = MLX5E_HW2SW_MTU(max_mtu);
++	min_mtu = MLX5E_HW2SW_MTU(MXL5E_MIN_MTU);
+ 
+-	if (new_mtu > max_mtu) {
++	if (new_mtu > max_mtu || new_mtu < min_mtu) {
+ 		netdev_err(netdev,
+-			   "%s: Bad MTU (%d) > (%d) Max\n",
+-			   __func__, new_mtu, max_mtu);
++			   "%s: Bad MTU (%d), valid range is: [%d..%d]\n",
++			   __func__, new_mtu, min_mtu, max_mtu);
+ 		return -EINVAL;
+ 	}
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+index 6f68dba8d7ed..cc901852f1a9 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+@@ -957,33 +957,6 @@ unlock_fg:
+ 	return rule;
+ }
+ 
+-static struct mlx5_flow_rule *add_rule_to_auto_fg(struct mlx5_flow_table *ft,
+-						  u8 match_criteria_enable,
+-						  u32 *match_criteria,
+-						  u32 *match_value,
+-						  u8 action,
+-						  u32 flow_tag,
+-						  struct mlx5_flow_destination *dest)
+-{
+-	struct mlx5_flow_rule *rule;
+-	struct mlx5_flow_group *g;
+-
+-	g = create_autogroup(ft, match_criteria_enable, match_criteria);
+-	if (IS_ERR(g))
+-		return (void *)g;
+-
+-	rule = add_rule_fg(g, match_value,
+-			   action, flow_tag, dest);
+-	if (IS_ERR(rule)) {
+-		/* Remove assumes refcount > 0 and autogroup creates a group
+-		 * with a refcount = 0.
+-		 */
+-		tree_get_node(&g->node);
+-		tree_remove_node(&g->node);
+-	}
+-	return rule;
+-}
+-
+ struct mlx5_flow_rule *
+ mlx5_add_flow_rule(struct mlx5_flow_table *ft,
+ 		   u8 match_criteria_enable,
+@@ -1008,8 +981,23 @@ mlx5_add_flow_rule(struct mlx5_flow_table *ft,
+ 				goto unlock;
+ 		}
+ 
+-	rule = add_rule_to_auto_fg(ft, match_criteria_enable, match_criteria,
+-				   match_value, action, flow_tag, dest);
++	g = create_autogroup(ft, match_criteria_enable, match_criteria);
++	if (IS_ERR(g)) {
++		rule = (void *)g;
++		goto unlock;
++	}
++
++	rule = add_rule_fg(g, match_value,
++			   action, flow_tag, dest);
++	if (IS_ERR(rule)) {
++		/* Remove assumes refcount > 0 and autogroup creates a group
++		 * with a refcount = 0.
++		 */
++		unlock_ref_node(&ft->node);
++		tree_get_node(&g->node);
++		tree_remove_node(&g->node);
++		return rule;
++	}
+ unlock:
+ 	unlock_ref_node(&ft->node);
+ 	return rule;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
+index 1545a944c309..b86fe50d5d93 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
+@@ -423,6 +423,10 @@ static int handle_hca_cap_atomic(struct mlx5_core_dev *dev)
+ 					 HCA_CAP_OPMOD_GET_CUR);
+ 		if (err)
+ 			return err;
++		err = mlx5_core_get_caps(dev, MLX5_CAP_ATOMIC,
++					 HCA_CAP_OPMOD_GET_MAX);
++		if (err)
++			return err;
+ 	} else {
+ 		return 0;
+ 	}
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
+index a87e773e93f3..53a793bc2e3d 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
+@@ -246,8 +246,8 @@ int mlx5_query_port_admin_status(struct mlx5_core_dev *dev,
+ }
+ EXPORT_SYMBOL_GPL(mlx5_query_port_admin_status);
+ 
+-static void mlx5_query_port_mtu(struct mlx5_core_dev *dev, int *admin_mtu,
+-				int *max_mtu, int *oper_mtu, u8 port)
++static void mlx5_query_port_mtu(struct mlx5_core_dev *dev, u16 *admin_mtu,
++				u16 *max_mtu, u16 *oper_mtu, u8 port)
+ {
+ 	u32 in[MLX5_ST_SZ_DW(pmtu_reg)];
+ 	u32 out[MLX5_ST_SZ_DW(pmtu_reg)];
+@@ -267,7 +267,7 @@ static void mlx5_query_port_mtu(struct mlx5_core_dev *dev, int *admin_mtu,
+ 		*admin_mtu = MLX5_GET(pmtu_reg, out, admin_mtu);
+ }
+ 
+-int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu, u8 port)
++int mlx5_set_port_mtu(struct mlx5_core_dev *dev, u16 mtu, u8 port)
+ {
+ 	u32 in[MLX5_ST_SZ_DW(pmtu_reg)];
+ 	u32 out[MLX5_ST_SZ_DW(pmtu_reg)];
+@@ -282,14 +282,14 @@ int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu, u8 port)
+ }
+ EXPORT_SYMBOL_GPL(mlx5_set_port_mtu);
+ 
+-void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu,
++void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, u16 *max_mtu,
+ 			     u8 port)
+ {
+ 	mlx5_query_port_mtu(dev, NULL, max_mtu, NULL, port);
+ }
+ EXPORT_SYMBOL_GPL(mlx5_query_port_max_mtu);
+ 
+-void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu,
++void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, u16 *oper_mtu,
+ 			      u8 port)
+ {
+ 	mlx5_query_port_mtu(dev, NULL, NULL, oper_mtu, port);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+index c7398b95aecd..6d5f56e73b5d 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+@@ -196,6 +196,46 @@ int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *mdev,
+ }
+ EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mac_address);
+ 
++int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu)
++{
++	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
++	u32 *out;
++	int err;
++
++	out = mlx5_vzalloc(outlen);
++	if (!out)
++		return -ENOMEM;
++
++	err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
++	if (!err)
++		*mtu = MLX5_GET(query_nic_vport_context_out, out,
++				nic_vport_context.mtu);
++
++	kvfree(out);
++	return err;
++}
++EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mtu);
++
++int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 mtu)
++{
++	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
++	void *in;
++	int err;
++
++	in = mlx5_vzalloc(inlen);
++	if (!in)
++		return -ENOMEM;
++
++	MLX5_SET(modify_nic_vport_context_in, in, field_select.mtu, 1);
++	MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.mtu, mtu);
++
++	err = mlx5_modify_nic_vport_context(mdev, in, inlen);
++
++	kvfree(in);
++	return err;
++}
++EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mtu);
++
+ int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev,
+ 				  u32 vport,
+ 				  enum mlx5_list_type list_type,
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+index 3b89ed2f3c76..65a115fc0c96 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+@@ -118,6 +118,8 @@ struct mlxsw_sp {
+ #define MLXSW_SP_DEFAULT_LEARNING_INTERVAL 100
+ 		unsigned int interval; /* ms */
+ 	} fdb_notify;
++#define MLXSW_SP_MIN_AGEING_TIME 10
++#define MLXSW_SP_MAX_AGEING_TIME 1000000
+ #define MLXSW_SP_DEFAULT_AGEING_TIME 300
+ 	u32 ageing_time;
+ 	struct mlxsw_sp_upper master_bridge;
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+index 7b56098acc58..e1c74efff51a 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+@@ -311,8 +311,13 @@ static int mlxsw_sp_port_attr_br_ageing_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ 	unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
+ 	u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000;
+ 
+-	if (switchdev_trans_ph_prepare(trans))
+-		return 0;
++	if (switchdev_trans_ph_prepare(trans)) {
++		if (ageing_time < MLXSW_SP_MIN_AGEING_TIME ||
++		    ageing_time > MLXSW_SP_MAX_AGEING_TIME)
++			return -ERANGE;
++		else
++			return 0;
++	}
+ 
+ 	return mlxsw_sp_ageing_set(mlxsw_sp, ageing_time);
+ }
+diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+index 46bbea8e023c..55007f1e6bbc 100644
+--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
++++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+@@ -566,6 +566,7 @@ struct qlcnic_adapter_stats {
+ 	u64  tx_dma_map_error;
+ 	u64  spurious_intr;
+ 	u64  mac_filter_limit_overrun;
++	u64  mbx_spurious_intr;
+ };
+ 
+ /*
+@@ -1099,7 +1100,7 @@ struct qlcnic_mailbox {
+ 	unsigned long		status;
+ 	spinlock_t		queue_lock;	/* Mailbox queue lock */
+ 	spinlock_t		aen_lock;	/* Mailbox response/AEN lock */
+-	atomic_t		rsp_status;
++	u32			rsp_status;
+ 	u32			num_cmds;
+ };
+ 
+diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+index 37a731be7d39..f9640d5ce6ba 100644
+--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
++++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+@@ -491,7 +491,7 @@ irqreturn_t qlcnic_83xx_clear_legacy_intr(struct qlcnic_adapter *adapter)
+ 
+ static inline void qlcnic_83xx_notify_mbx_response(struct qlcnic_mailbox *mbx)
+ {
+-	atomic_set(&mbx->rsp_status, QLC_83XX_MBX_RESPONSE_ARRIVED);
++	mbx->rsp_status = QLC_83XX_MBX_RESPONSE_ARRIVED;
+ 	complete(&mbx->completion);
+ }
+ 
+@@ -510,7 +510,7 @@ static void qlcnic_83xx_poll_process_aen(struct qlcnic_adapter *adapter)
+ 	if (event &  QLCNIC_MBX_ASYNC_EVENT) {
+ 		__qlcnic_83xx_process_aen(adapter);
+ 	} else {
+-		if (atomic_read(&mbx->rsp_status) != rsp_status)
++		if (mbx->rsp_status != rsp_status)
+ 			qlcnic_83xx_notify_mbx_response(mbx);
+ 	}
+ out:
+@@ -1023,7 +1023,7 @@ static void qlcnic_83xx_process_aen(struct qlcnic_adapter *adapter)
+ 		if (event &  QLCNIC_MBX_ASYNC_EVENT) {
+ 			__qlcnic_83xx_process_aen(adapter);
+ 		} else {
+-			if (atomic_read(&mbx->rsp_status) != rsp_status)
++			if (mbx->rsp_status != rsp_status)
+ 				qlcnic_83xx_notify_mbx_response(mbx);
+ 		}
+ 	}
+@@ -2338,9 +2338,9 @@ static void qlcnic_83xx_handle_link_aen(struct qlcnic_adapter *adapter,
+ 
+ static irqreturn_t qlcnic_83xx_handle_aen(int irq, void *data)
+ {
++	u32 mask, resp, event, rsp_status = QLC_83XX_MBX_RESPONSE_ARRIVED;
+ 	struct qlcnic_adapter *adapter = data;
+ 	struct qlcnic_mailbox *mbx;
+-	u32 mask, resp, event;
+ 	unsigned long flags;
+ 
+ 	mbx = adapter->ahw->mailbox;
+@@ -2350,10 +2350,14 @@ static irqreturn_t qlcnic_83xx_handle_aen(int irq, void *data)
+ 		goto out;
+ 
+ 	event = readl(QLCNIC_MBX_FW(adapter->ahw, 0));
+-	if (event &  QLCNIC_MBX_ASYNC_EVENT)
++	if (event &  QLCNIC_MBX_ASYNC_EVENT) {
+ 		__qlcnic_83xx_process_aen(adapter);
+-	else
+-		qlcnic_83xx_notify_mbx_response(mbx);
++	} else {
++		if (mbx->rsp_status != rsp_status)
++			qlcnic_83xx_notify_mbx_response(mbx);
++		else
++			adapter->stats.mbx_spurious_intr++;
++	}
+ 
+ out:
+ 	mask = QLCRDX(adapter->ahw, QLCNIC_DEF_INT_MASK);
+@@ -4050,10 +4054,10 @@ static void qlcnic_83xx_mailbox_worker(struct work_struct *work)
+ 	struct qlcnic_adapter *adapter = mbx->adapter;
+ 	const struct qlcnic_mbx_ops *mbx_ops = mbx->ops;
+ 	struct device *dev = &adapter->pdev->dev;
+-	atomic_t *rsp_status = &mbx->rsp_status;
+ 	struct list_head *head = &mbx->cmd_q;
+ 	struct qlcnic_hardware_context *ahw;
+ 	struct qlcnic_cmd_args *cmd = NULL;
++	unsigned long flags;
+ 
+ 	ahw = adapter->ahw;
+ 
+@@ -4063,7 +4067,9 @@ static void qlcnic_83xx_mailbox_worker(struct work_struct *work)
+ 			return;
+ 		}
+ 
+-		atomic_set(rsp_status, QLC_83XX_MBX_RESPONSE_WAIT);
++		spin_lock_irqsave(&mbx->aen_lock, flags);
++		mbx->rsp_status = QLC_83XX_MBX_RESPONSE_WAIT;
++		spin_unlock_irqrestore(&mbx->aen_lock, flags);
+ 
+ 		spin_lock(&mbx->queue_lock);
+ 
+diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+index 494e8105adee..0a2318cad34d 100644
+--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
++++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+@@ -59,7 +59,8 @@ static const struct qlcnic_stats qlcnic_gstrings_stats[] = {
+ 	 QLC_OFF(stats.mac_filter_limit_overrun)},
+ 	{"spurious intr", QLC_SIZEOF(stats.spurious_intr),
+ 	 QLC_OFF(stats.spurious_intr)},
+-
++	{"mbx spurious intr", QLC_SIZEOF(stats.mbx_spurious_intr),
++	 QLC_OFF(stats.mbx_spurious_intr)},
+ };
+ 
+ static const char qlcnic_device_gstrings_stats[][ETH_GSTRING_LEN] = {
+diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+index 997976426799..b28e73ea2c25 100644
+--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
++++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+@@ -1648,7 +1648,18 @@ static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
+ 		return;
+ 	}
+ 	skb_reserve(new_skb, NET_IP_ALIGN);
++
++	pci_dma_sync_single_for_cpu(qdev->pdev,
++				    dma_unmap_addr(sbq_desc, mapaddr),
++				    dma_unmap_len(sbq_desc, maplen),
++				    PCI_DMA_FROMDEVICE);
++
+ 	memcpy(skb_put(new_skb, length), skb->data, length);
++
++	pci_dma_sync_single_for_device(qdev->pdev,
++				       dma_unmap_addr(sbq_desc, mapaddr),
++				       dma_unmap_len(sbq_desc, maplen),
++				       PCI_DMA_FROMDEVICE);
+ 	skb = new_skb;
+ 
+ 	/* Frame error, so drop the packet. */
+diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
+index 738449992876..01f6d5bbd420 100644
+--- a/drivers/net/ethernet/renesas/sh_eth.c
++++ b/drivers/net/ethernet/renesas/sh_eth.c
+@@ -1136,11 +1136,8 @@ static void sh_eth_ring_format(struct net_device *ndev)
+ 			break;
+ 		sh_eth_set_receive_align(skb);
+ 
+-		/* RX descriptor */
+-		rxdesc = &mdp->rx_ring[i];
+ 		/* The size of the buffer is a multiple of 32 bytes. */
+ 		buf_len = ALIGN(mdp->rx_buf_sz, 32);
+-		rxdesc->len = cpu_to_le32(buf_len << 16);
+ 		dma_addr = dma_map_single(&ndev->dev, skb->data, buf_len,
+ 					  DMA_FROM_DEVICE);
+ 		if (dma_mapping_error(&ndev->dev, dma_addr)) {
+@@ -1148,6 +1145,10 @@ static void sh_eth_ring_format(struct net_device *ndev)
+ 			break;
+ 		}
+ 		mdp->rx_skbuff[i] = skb;
++
++		/* RX descriptor */
++		rxdesc = &mdp->rx_ring[i];
++		rxdesc->len = cpu_to_le32(buf_len << 16);
+ 		rxdesc->addr = cpu_to_le32(dma_addr);
+ 		rxdesc->status = cpu_to_le32(RD_RACT | RD_RFP);
+ 
+@@ -1163,7 +1164,8 @@ static void sh_eth_ring_format(struct net_device *ndev)
+ 	mdp->dirty_rx = (u32) (i - mdp->num_rx_ring);
+ 
+ 	/* Mark the last entry as wrapping the ring. */
+-	rxdesc->status |= cpu_to_le32(RD_RDLE);
++	if (rxdesc)
++		rxdesc->status |= cpu_to_le32(RD_RDLE);
+ 
+ 	memset(mdp->tx_ring, 0, tx_ringsize);
+ 
+diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c
+index 166a7fc87e2f..f39e7198e818 100644
+--- a/drivers/net/ethernet/rocker/rocker.c
++++ b/drivers/net/ethernet/rocker/rocker.c
+@@ -239,6 +239,7 @@ struct rocker {
+ 	struct {
+ 		u64 id;
+ 	} hw;
++	unsigned long ageing_time;
+ 	spinlock_t cmd_ring_lock;		/* for cmd ring accesses */
+ 	struct rocker_dma_ring_info cmd_ring;
+ 	struct rocker_dma_ring_info event_ring;
+@@ -3704,7 +3705,7 @@ static void rocker_fdb_cleanup(unsigned long data)
+ 	struct rocker_port *rocker_port;
+ 	struct rocker_fdb_tbl_entry *entry;
+ 	struct hlist_node *tmp;
+-	unsigned long next_timer = jiffies + BR_MIN_AGEING_TIME;
++	unsigned long next_timer = jiffies + rocker->ageing_time;
+ 	unsigned long expires;
+ 	unsigned long lock_flags;
+ 	int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_REMOVE |
+@@ -4367,8 +4368,12 @@ static int rocker_port_bridge_ageing_time(struct rocker_port *rocker_port,
+ 					  struct switchdev_trans *trans,
+ 					  u32 ageing_time)
+ {
++	struct rocker *rocker = rocker_port->rocker;
++
+ 	if (!switchdev_trans_ph_prepare(trans)) {
+ 		rocker_port->ageing_time = clock_t_to_jiffies(ageing_time);
++		if (rocker_port->ageing_time < rocker->ageing_time)
++			rocker->ageing_time = rocker_port->ageing_time;
+ 		mod_timer(&rocker_port->rocker->fdb_cleanup_timer, jiffies);
+ 	}
+ 
+@@ -5206,10 +5211,13 @@ static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ 		goto err_init_tbls;
+ 	}
+ 
++	rocker->ageing_time = BR_DEFAULT_AGEING_TIME;
+ 	setup_timer(&rocker->fdb_cleanup_timer, rocker_fdb_cleanup,
+ 		    (unsigned long) rocker);
+ 	mod_timer(&rocker->fdb_cleanup_timer, jiffies);
+ 
++	rocker->ageing_time = BR_DEFAULT_AGEING_TIME;
++
+ 	err = rocker_probe_ports(rocker);
+ 	if (err) {
+ 		dev_err(&pdev->dev, "failed to probe ports\n");
+diff --git a/drivers/net/irda/irtty-sir.c b/drivers/net/irda/irtty-sir.c
+index 696852eb23c3..7a3f990c1935 100644
+--- a/drivers/net/irda/irtty-sir.c
++++ b/drivers/net/irda/irtty-sir.c
+@@ -430,16 +430,6 @@ static int irtty_open(struct tty_struct *tty)
+ 
+ 	/* Module stuff handled via irda_ldisc.owner - Jean II */
+ 
+-	/* First make sure we're not already connected. */
+-	if (tty->disc_data != NULL) {
+-		priv = tty->disc_data;
+-		if (priv && priv->magic == IRTTY_MAGIC) {
+-			ret = -EEXIST;
+-			goto out;
+-		}
+-		tty->disc_data = NULL;		/* ### */
+-	}
+-
+ 	/* stop the underlying  driver */
+ 	irtty_stop_receiver(tty, TRUE);
+ 	if (tty->ops->stop)
+diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
+index d636d051fac8..95394edd1ed5 100644
+--- a/drivers/net/macvtap.c
++++ b/drivers/net/macvtap.c
+@@ -760,6 +760,8 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
+ 			macvtap16_to_cpu(q, vnet_hdr.hdr_len) : GOODCOPY_LEN;
+ 		if (copylen > good_linear)
+ 			copylen = good_linear;
++		else if (copylen < ETH_HLEN)
++			copylen = ETH_HLEN;
+ 		linear = copylen;
+ 		i = *from;
+ 		iov_iter_advance(&i, copylen);
+@@ -769,10 +771,11 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
+ 
+ 	if (!zerocopy) {
+ 		copylen = len;
+-		if (macvtap16_to_cpu(q, vnet_hdr.hdr_len) > good_linear)
++		linear = macvtap16_to_cpu(q, vnet_hdr.hdr_len);
++		if (linear > good_linear)
+ 			linear = good_linear;
+-		else
+-			linear = macvtap16_to_cpu(q, vnet_hdr.hdr_len);
++		else if (linear < ETH_HLEN)
++			linear = ETH_HLEN;
+ 	}
+ 
+ 	skb = macvtap_alloc_skb(&q->sk, MACVTAP_RESERVE, copylen,
+diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
+index d61da9ece3ba..aafe237b25ac 100644
+--- a/drivers/net/ppp/ppp_generic.c
++++ b/drivers/net/ppp/ppp_generic.c
+@@ -575,7 +575,7 @@ static int get_filter(void __user *arg, struct sock_filter **p)
+ 
+ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+ {
+-	struct ppp_file *pf = file->private_data;
++	struct ppp_file *pf;
+ 	struct ppp *ppp;
+ 	int err = -EFAULT, val, val2, i;
+ 	struct ppp_idle idle;
+@@ -585,9 +585,14 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+ 	void __user *argp = (void __user *)arg;
+ 	int __user *p = argp;
+ 
+-	if (!pf)
+-		return ppp_unattached_ioctl(current->nsproxy->net_ns,
+-					pf, file, cmd, arg);
++	mutex_lock(&ppp_mutex);
++
++	pf = file->private_data;
++	if (!pf) {
++		err = ppp_unattached_ioctl(current->nsproxy->net_ns,
++					   pf, file, cmd, arg);
++		goto out;
++	}
+ 
+ 	if (cmd == PPPIOCDETACH) {
+ 		/*
+@@ -602,7 +607,6 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+ 		 * this fd and reopening /dev/ppp.
+ 		 */
+ 		err = -EINVAL;
+-		mutex_lock(&ppp_mutex);
+ 		if (pf->kind == INTERFACE) {
+ 			ppp = PF_TO_PPP(pf);
+ 			rtnl_lock();
+@@ -616,15 +620,13 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+ 		} else
+ 			pr_warn("PPPIOCDETACH file->f_count=%ld\n",
+ 				atomic_long_read(&file->f_count));
+-		mutex_unlock(&ppp_mutex);
+-		return err;
++		goto out;
+ 	}
+ 
+ 	if (pf->kind == CHANNEL) {
+ 		struct channel *pch;
+ 		struct ppp_channel *chan;
+ 
+-		mutex_lock(&ppp_mutex);
+ 		pch = PF_TO_CHANNEL(pf);
+ 
+ 		switch (cmd) {
+@@ -646,17 +648,16 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+ 				err = chan->ops->ioctl(chan, cmd, arg);
+ 			up_read(&pch->chan_sem);
+ 		}
+-		mutex_unlock(&ppp_mutex);
+-		return err;
++		goto out;
+ 	}
+ 
+ 	if (pf->kind != INTERFACE) {
+ 		/* can't happen */
+ 		pr_err("PPP: not interface or channel??\n");
+-		return -EINVAL;
++		err = -EINVAL;
++		goto out;
+ 	}
+ 
+-	mutex_lock(&ppp_mutex);
+ 	ppp = PF_TO_PPP(pf);
+ 	switch (cmd) {
+ 	case PPPIOCSMRU:
+@@ -831,7 +832,10 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+ 	default:
+ 		err = -ENOTTY;
+ 	}
++
++out:
+ 	mutex_unlock(&ppp_mutex);
++
+ 	return err;
+ }
+ 
+@@ -844,7 +848,6 @@ static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf,
+ 	struct ppp_net *pn;
+ 	int __user *p = (int __user *)arg;
+ 
+-	mutex_lock(&ppp_mutex);
+ 	switch (cmd) {
+ 	case PPPIOCNEWUNIT:
+ 		/* Create a new ppp unit */
+@@ -894,7 +897,7 @@ static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf,
+ 	default:
+ 		err = -ENOTTY;
+ 	}
+-	mutex_unlock(&ppp_mutex);
++
+ 	return err;
+ }
+ 
+@@ -2304,7 +2307,7 @@ int ppp_register_net_channel(struct net *net, struct ppp_channel *chan)
+ 
+ 	pch->ppp = NULL;
+ 	pch->chan = chan;
+-	pch->chan_net = net;
++	pch->chan_net = get_net(net);
+ 	chan->ppp = pch;
+ 	init_ppp_file(&pch->file, CHANNEL);
+ 	pch->file.hdrlen = chan->hdrlen;
+@@ -2401,6 +2404,8 @@ ppp_unregister_channel(struct ppp_channel *chan)
+ 	spin_lock_bh(&pn->all_channels_lock);
+ 	list_del(&pch->list);
+ 	spin_unlock_bh(&pn->all_channels_lock);
++	put_net(pch->chan_net);
++	pch->chan_net = NULL;
+ 
+ 	pch->file.dead = 1;
+ 	wake_up_interruptible(&pch->file.rwait);
+diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c
+index 01f08a7751f7..e7034c55e796 100644
+--- a/drivers/net/rionet.c
++++ b/drivers/net/rionet.c
+@@ -280,7 +280,7 @@ static void rionet_outb_msg_event(struct rio_mport *mport, void *dev_id, int mbo
+ 	struct net_device *ndev = dev_id;
+ 	struct rionet_private *rnet = netdev_priv(ndev);
+ 
+-	spin_lock(&rnet->lock);
++	spin_lock(&rnet->tx_lock);
+ 
+ 	if (netif_msg_intr(rnet))
+ 		printk(KERN_INFO
+@@ -299,7 +299,7 @@ static void rionet_outb_msg_event(struct rio_mport *mport, void *dev_id, int mbo
+ 	if (rnet->tx_cnt < RIONET_TX_RING_SIZE)
+ 		netif_wake_queue(ndev);
+ 
+-	spin_unlock(&rnet->lock);
++	spin_unlock(&rnet->tx_lock);
+ }
+ 
+ static int rionet_open(struct net_device *ndev)
+diff --git a/drivers/net/tun.c b/drivers/net/tun.c
+index 88bb8cc3555b..81ecc2ed8137 100644
+--- a/drivers/net/tun.c
++++ b/drivers/net/tun.c
+@@ -621,7 +621,8 @@ static int tun_attach(struct tun_struct *tun, struct file *file, bool skip_filte
+ 
+ 	/* Re-attach the filter to persist device */
+ 	if (!skip_filter && (tun->filter_attached == true)) {
+-		err = sk_attach_filter(&tun->fprog, tfile->socket.sk);
++		err = __sk_attach_filter(&tun->fprog, tfile->socket.sk,
++					 lockdep_rtnl_is_held());
+ 		if (!err)
+ 			goto out;
+ 	}
+@@ -1000,7 +1001,6 @@ static void tun_net_init(struct net_device *dev)
+ 		/* Zero header length */
+ 		dev->type = ARPHRD_NONE;
+ 		dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
+-		dev->tx_queue_len = TUN_READQ_SIZE;  /* We prefer our own queue length */
+ 		break;
+ 
+ 	case IFF_TAP:
+@@ -1012,7 +1012,6 @@ static void tun_net_init(struct net_device *dev)
+ 
+ 		eth_hw_addr_random(dev);
+ 
+-		dev->tx_queue_len = TUN_READQ_SIZE;  /* We prefer our own queue length */
+ 		break;
+ 	}
+ }
+@@ -1466,6 +1465,8 @@ static void tun_setup(struct net_device *dev)
+ 
+ 	dev->ethtool_ops = &tun_ethtool_ops;
+ 	dev->destructor = tun_free_netdev;
++	/* We prefer our own queue length */
++	dev->tx_queue_len = TUN_READQ_SIZE;
+ }
+ 
+ /* Trivial set of netlink ops to allow deleting tun or tap
+@@ -1807,7 +1808,7 @@ static void tun_detach_filter(struct tun_struct *tun, int n)
+ 
+ 	for (i = 0; i < n; i++) {
+ 		tfile = rtnl_dereference(tun->tfiles[i]);
+-		sk_detach_filter(tfile->socket.sk);
++		__sk_detach_filter(tfile->socket.sk, lockdep_rtnl_is_held());
+ 	}
+ 
+ 	tun->filter_attached = false;
+@@ -1820,7 +1821,8 @@ static int tun_attach_filter(struct tun_struct *tun)
+ 
+ 	for (i = 0; i < tun->numqueues; i++) {
+ 		tfile = rtnl_dereference(tun->tfiles[i]);
+-		ret = sk_attach_filter(&tun->fprog, tfile->socket.sk);
++		ret = __sk_attach_filter(&tun->fprog, tfile->socket.sk,
++					 lockdep_rtnl_is_held());
+ 		if (ret) {
+ 			tun_detach_filter(tun, i);
+ 			return ret;
+diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c
+index 0c5c22b84da8..7de5ab589e4e 100644
+--- a/drivers/net/usb/asix_common.c
++++ b/drivers/net/usb/asix_common.c
+@@ -66,7 +66,7 @@ int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb,
+ 	 * buffer.
+ 	 */
+ 	if (rx->remaining && (rx->remaining + sizeof(u32) <= skb->len)) {
+-		offset = ((rx->remaining + 1) & 0xfffe) + sizeof(u32);
++		offset = ((rx->remaining + 1) & 0xfffe);
+ 		rx->header = get_unaligned_le32(skb->data + offset);
+ 		offset = 0;
+ 
+diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c
+index bdd83d95ec0a..96a5028621c8 100644
+--- a/drivers/net/usb/cdc_mbim.c
++++ b/drivers/net/usb/cdc_mbim.c
+@@ -617,8 +617,13 @@ static const struct usb_device_id mbim_devs[] = {
+ 	{ USB_VENDOR_AND_INTERFACE_INFO(0x0bdb, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
+ 	  .driver_info = (unsigned long)&cdc_mbim_info,
+ 	},
+-	/* Huawei E3372 fails unless NDP comes after the IP packets */
+-	{ USB_DEVICE_AND_INTERFACE_INFO(0x12d1, 0x157d, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
++
++	/* Some Huawei devices, ME906s-158 (12d1:15c1) and E3372
++	 * (12d1:157d), are known to fail unless the NDP is placed
++	 * after the IP packets.  Applying the quirk to all Huawei
++	 * devices is broader than necessary, but harmless.
++	 */
++	{ USB_VENDOR_AND_INTERFACE_INFO(0x12d1, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
+ 	  .driver_info = (unsigned long)&cdc_mbim_info_ndp_to_end,
+ 	},
+ 	/* default entry */
+diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
+index a3a4ccf7cf52..1232a8c608b4 100644
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -844,6 +844,7 @@ static const struct usb_device_id products[] = {
+ 	{QMI_FIXED_INTF(0x19d2, 0x1426, 2)},	/* ZTE MF91 */
+ 	{QMI_FIXED_INTF(0x19d2, 0x1428, 2)},	/* Telewell TW-LTE 4G v2 */
+ 	{QMI_FIXED_INTF(0x19d2, 0x2002, 4)},	/* ZTE (Vodafone) K3765-Z */
++	{QMI_FIXED_INTF(0x2001, 0x7e19, 4)},	/* D-Link DWM-221 B1 */
+ 	{QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)},    /* Sierra Wireless MC7700 */
+ 	{QMI_FIXED_INTF(0x114f, 0x68a2, 8)},    /* Sierra Wireless MC7750 */
+ 	{QMI_FIXED_INTF(0x1199, 0x68a2, 8)},	/* Sierra Wireless MC7710 in QMI mode */
+diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c
+index 44541dbc5c28..69b994f3b8c5 100644
+--- a/drivers/net/wan/farsync.c
++++ b/drivers/net/wan/farsync.c
+@@ -2516,7 +2516,7 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+                 dev->mem_start   = card->phys_mem
+                                  + BUF_OFFSET ( txBuffer[i][0][0]);
+                 dev->mem_end     = card->phys_mem
+-                                 + BUF_OFFSET ( txBuffer[i][NUM_TX_BUFFER][0]);
++                                 + BUF_OFFSET ( txBuffer[i][NUM_TX_BUFFER - 1][LEN_RX_BUFFER - 1]);
+                 dev->base_addr   = card->pci_conf;
+                 dev->irq         = card->irq;
+ 
+diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
+index 91afa3ae414c..a192d451dab2 100644
+--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
++++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
+@@ -2143,11 +2143,7 @@ EXPORT_SYMBOL(ath10k_htt_t2h_msg_handler);
+ void ath10k_htt_rx_pktlog_completion_handler(struct ath10k *ar,
+ 					     struct sk_buff *skb)
+ {
+-	struct ath10k_pktlog_10_4_hdr *hdr =
+-		(struct ath10k_pktlog_10_4_hdr *)skb->data;
+-
+-	trace_ath10k_htt_pktlog(ar, hdr->payload,
+-				sizeof(*hdr) + __le16_to_cpu(hdr->size));
++	trace_ath10k_htt_pktlog(ar, skb->data, skb->len);
+ 	dev_kfree_skb_any(skb);
+ }
+ EXPORT_SYMBOL(ath10k_htt_rx_pktlog_completion_handler);
+diff --git a/drivers/net/wireless/ath/ath9k/ar5008_phy.c b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
+index 8f8793004b9f..1b271b99c49e 100644
+--- a/drivers/net/wireless/ath/ath9k/ar5008_phy.c
++++ b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
+@@ -274,6 +274,9 @@ void ar5008_hw_cmn_spur_mitigate(struct ath_hw *ah,
+ 	};
+ 	static const int inc[4] = { 0, 100, 0, 0 };
+ 
++	memset(&mask_m, 0, sizeof(int8_t) * 123);
++	memset(&mask_p, 0, sizeof(int8_t) * 123);
++
+ 	cur_bin = -6000;
+ 	upper = bin + 100;
+ 	lower = bin - 100;
+@@ -424,14 +427,9 @@ static void ar5008_hw_spur_mitigate(struct ath_hw *ah,
+ 	int tmp, new;
+ 	int i;
+ 
+-	int8_t mask_m[123];
+-	int8_t mask_p[123];
+ 	int cur_bb_spur;
+ 	bool is2GHz = IS_CHAN_2GHZ(chan);
+ 
+-	memset(&mask_m, 0, sizeof(int8_t) * 123);
+-	memset(&mask_p, 0, sizeof(int8_t) * 123);
+-
+ 	for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
+ 		cur_bb_spur = ah->eep_ops->get_spur_channel(ah, i, is2GHz);
+ 		if (AR_NO_SPUR == cur_bb_spur)
+diff --git a/drivers/net/wireless/ath/ath9k/ar9002_phy.c b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
+index db6624527d99..53d7445a5d12 100644
+--- a/drivers/net/wireless/ath/ath9k/ar9002_phy.c
++++ b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
+@@ -178,14 +178,9 @@ static void ar9002_hw_spur_mitigate(struct ath_hw *ah,
+ 	int i;
+ 	struct chan_centers centers;
+ 
+-	int8_t mask_m[123];
+-	int8_t mask_p[123];
+ 	int cur_bb_spur;
+ 	bool is2GHz = IS_CHAN_2GHZ(chan);
+ 
+-	memset(&mask_m, 0, sizeof(int8_t) * 123);
+-	memset(&mask_p, 0, sizeof(int8_t) * 123);
+-
+ 	ath9k_hw_get_channel_centers(ah, chan, &centers);
+ 	freq = centers.synth_center;
+ 
+diff --git a/drivers/net/wireless/ath/ath9k/eeprom.c b/drivers/net/wireless/ath/ath9k/eeprom.c
+index 73fb4232f9f2..a794157a147d 100644
+--- a/drivers/net/wireless/ath/ath9k/eeprom.c
++++ b/drivers/net/wireless/ath/ath9k/eeprom.c
+@@ -477,10 +477,9 @@ void ath9k_hw_get_gain_boundaries_pdadcs(struct ath_hw *ah,
+ 
+ 	if (match) {
+ 		if (AR_SREV_9287(ah)) {
+-			/* FIXME: array overrun? */
+ 			for (i = 0; i < numXpdGains; i++) {
+ 				minPwrT4[i] = data_9287[idxL].pwrPdg[i][0];
+-				maxPwrT4[i] = data_9287[idxL].pwrPdg[i][4];
++				maxPwrT4[i] = data_9287[idxL].pwrPdg[i][intercepts - 1];
+ 				ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i],
+ 						data_9287[idxL].pwrPdg[i],
+ 						data_9287[idxL].vpdPdg[i],
+@@ -490,7 +489,7 @@ void ath9k_hw_get_gain_boundaries_pdadcs(struct ath_hw *ah,
+ 		} else if (eeprom_4k) {
+ 			for (i = 0; i < numXpdGains; i++) {
+ 				minPwrT4[i] = data_4k[idxL].pwrPdg[i][0];
+-				maxPwrT4[i] = data_4k[idxL].pwrPdg[i][4];
++				maxPwrT4[i] = data_4k[idxL].pwrPdg[i][intercepts - 1];
+ 				ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i],
+ 						data_4k[idxL].pwrPdg[i],
+ 						data_4k[idxL].vpdPdg[i],
+@@ -500,7 +499,7 @@ void ath9k_hw_get_gain_boundaries_pdadcs(struct ath_hw *ah,
+ 		} else {
+ 			for (i = 0; i < numXpdGains; i++) {
+ 				minPwrT4[i] = data_def[idxL].pwrPdg[i][0];
+-				maxPwrT4[i] = data_def[idxL].pwrPdg[i][4];
++				maxPwrT4[i] = data_def[idxL].pwrPdg[i][intercepts - 1];
+ 				ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i],
+ 						data_def[idxL].pwrPdg[i],
+ 						data_def[idxL].vpdPdg[i],
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c
+index 2ca783fa50cf..7e269f9aa607 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c
+@@ -32,7 +32,7 @@
+ #define BRCMF_FLOWRING_LOW		(BRCMF_FLOWRING_HIGH - 256)
+ #define BRCMF_FLOWRING_INVALID_IFIDX	0xff
+ 
+-#define BRCMF_FLOWRING_HASH_AP(da, fifo, ifidx) (da[5] + fifo + ifidx * 16)
++#define BRCMF_FLOWRING_HASH_AP(da, fifo, ifidx) (da[5] * 2 + fifo + ifidx * 16)
+ #define BRCMF_FLOWRING_HASH_STA(fifo, ifidx) (fifo + ifidx * 16)
+ 
+ static const u8 brcmf_flowring_prio2fifo[] = {
+@@ -68,7 +68,7 @@ u32 brcmf_flowring_lookup(struct brcmf_flowring *flow, u8 da[ETH_ALEN],
+ 			  u8 prio, u8 ifidx)
+ {
+ 	struct brcmf_flowring_hash *hash;
+-	u8 hash_idx;
++	u16 hash_idx;
+ 	u32 i;
+ 	bool found;
+ 	bool sta;
+@@ -88,6 +88,7 @@ u32 brcmf_flowring_lookup(struct brcmf_flowring *flow, u8 da[ETH_ALEN],
+ 	}
+ 	hash_idx =  sta ? BRCMF_FLOWRING_HASH_STA(fifo, ifidx) :
+ 			  BRCMF_FLOWRING_HASH_AP(mac, fifo, ifidx);
++	hash_idx &= (BRCMF_FLOWRING_HASHSIZE - 1);
+ 	found = false;
+ 	hash = flow->hash;
+ 	for (i = 0; i < BRCMF_FLOWRING_HASHSIZE; i++) {
+@@ -98,6 +99,7 @@ u32 brcmf_flowring_lookup(struct brcmf_flowring *flow, u8 da[ETH_ALEN],
+ 			break;
+ 		}
+ 		hash_idx++;
++		hash_idx &= (BRCMF_FLOWRING_HASHSIZE - 1);
+ 	}
+ 	if (found)
+ 		return hash[hash_idx].flowid;
+@@ -111,7 +113,7 @@ u32 brcmf_flowring_create(struct brcmf_flowring *flow, u8 da[ETH_ALEN],
+ {
+ 	struct brcmf_flowring_ring *ring;
+ 	struct brcmf_flowring_hash *hash;
+-	u8 hash_idx;
++	u16 hash_idx;
+ 	u32 i;
+ 	bool found;
+ 	u8 fifo;
+@@ -131,6 +133,7 @@ u32 brcmf_flowring_create(struct brcmf_flowring *flow, u8 da[ETH_ALEN],
+ 	}
+ 	hash_idx =  sta ? BRCMF_FLOWRING_HASH_STA(fifo, ifidx) :
+ 			  BRCMF_FLOWRING_HASH_AP(mac, fifo, ifidx);
++	hash_idx &= (BRCMF_FLOWRING_HASHSIZE - 1);
+ 	found = false;
+ 	hash = flow->hash;
+ 	for (i = 0; i < BRCMF_FLOWRING_HASHSIZE; i++) {
+@@ -140,6 +143,7 @@ u32 brcmf_flowring_create(struct brcmf_flowring *flow, u8 da[ETH_ALEN],
+ 			break;
+ 		}
+ 		hash_idx++;
++		hash_idx &= (BRCMF_FLOWRING_HASHSIZE - 1);
+ 	}
+ 	if (found) {
+ 		for (i = 0; i < flow->nrofrings; i++) {
+@@ -169,7 +173,7 @@ u32 brcmf_flowring_create(struct brcmf_flowring *flow, u8 da[ETH_ALEN],
+ }
+ 
+ 
+-u8 brcmf_flowring_tid(struct brcmf_flowring *flow, u8 flowid)
++u8 brcmf_flowring_tid(struct brcmf_flowring *flow, u16 flowid)
+ {
+ 	struct brcmf_flowring_ring *ring;
+ 
+@@ -179,7 +183,7 @@ u8 brcmf_flowring_tid(struct brcmf_flowring *flow, u8 flowid)
+ }
+ 
+ 
+-static void brcmf_flowring_block(struct brcmf_flowring *flow, u8 flowid,
++static void brcmf_flowring_block(struct brcmf_flowring *flow, u16 flowid,
+ 				 bool blocked)
+ {
+ 	struct brcmf_flowring_ring *ring;
+@@ -228,10 +232,10 @@ static void brcmf_flowring_block(struct brcmf_flowring *flow, u8 flowid,
+ }
+ 
+ 
+-void brcmf_flowring_delete(struct brcmf_flowring *flow, u8 flowid)
++void brcmf_flowring_delete(struct brcmf_flowring *flow, u16 flowid)
+ {
+ 	struct brcmf_flowring_ring *ring;
+-	u8 hash_idx;
++	u16 hash_idx;
+ 	struct sk_buff *skb;
+ 
+ 	ring = flow->rings[flowid];
+@@ -253,7 +257,7 @@ void brcmf_flowring_delete(struct brcmf_flowring *flow, u8 flowid)
+ }
+ 
+ 
+-u32 brcmf_flowring_enqueue(struct brcmf_flowring *flow, u8 flowid,
++u32 brcmf_flowring_enqueue(struct brcmf_flowring *flow, u16 flowid,
+ 			   struct sk_buff *skb)
+ {
+ 	struct brcmf_flowring_ring *ring;
+@@ -279,7 +283,7 @@ u32 brcmf_flowring_enqueue(struct brcmf_flowring *flow, u8 flowid,
+ }
+ 
+ 
+-struct sk_buff *brcmf_flowring_dequeue(struct brcmf_flowring *flow, u8 flowid)
++struct sk_buff *brcmf_flowring_dequeue(struct brcmf_flowring *flow, u16 flowid)
+ {
+ 	struct brcmf_flowring_ring *ring;
+ 	struct sk_buff *skb;
+@@ -300,7 +304,7 @@ struct sk_buff *brcmf_flowring_dequeue(struct brcmf_flowring *flow, u8 flowid)
+ }
+ 
+ 
+-void brcmf_flowring_reinsert(struct brcmf_flowring *flow, u8 flowid,
++void brcmf_flowring_reinsert(struct brcmf_flowring *flow, u16 flowid,
+ 			     struct sk_buff *skb)
+ {
+ 	struct brcmf_flowring_ring *ring;
+@@ -311,7 +315,7 @@ void brcmf_flowring_reinsert(struct brcmf_flowring *flow, u8 flowid,
+ }
+ 
+ 
+-u32 brcmf_flowring_qlen(struct brcmf_flowring *flow, u8 flowid)
++u32 brcmf_flowring_qlen(struct brcmf_flowring *flow, u16 flowid)
+ {
+ 	struct brcmf_flowring_ring *ring;
+ 
+@@ -326,7 +330,7 @@ u32 brcmf_flowring_qlen(struct brcmf_flowring *flow, u8 flowid)
+ }
+ 
+ 
+-void brcmf_flowring_open(struct brcmf_flowring *flow, u8 flowid)
++void brcmf_flowring_open(struct brcmf_flowring *flow, u16 flowid)
+ {
+ 	struct brcmf_flowring_ring *ring;
+ 
+@@ -340,10 +344,10 @@ void brcmf_flowring_open(struct brcmf_flowring *flow, u8 flowid)
+ }
+ 
+ 
+-u8 brcmf_flowring_ifidx_get(struct brcmf_flowring *flow, u8 flowid)
++u8 brcmf_flowring_ifidx_get(struct brcmf_flowring *flow, u16 flowid)
+ {
+ 	struct brcmf_flowring_ring *ring;
+-	u8 hash_idx;
++	u16 hash_idx;
+ 
+ 	ring = flow->rings[flowid];
+ 	hash_idx = ring->hash_id;
+@@ -384,7 +388,7 @@ void brcmf_flowring_detach(struct brcmf_flowring *flow)
+ 	struct brcmf_pub *drvr = bus_if->drvr;
+ 	struct brcmf_flowring_tdls_entry *search;
+ 	struct brcmf_flowring_tdls_entry *remove;
+-	u8 flowid;
++	u16 flowid;
+ 
+ 	for (flowid = 0; flowid < flow->nrofrings; flowid++) {
+ 		if (flow->rings[flowid])
+@@ -408,7 +412,7 @@ void brcmf_flowring_configure_addr_mode(struct brcmf_flowring *flow, int ifidx,
+ 	struct brcmf_bus *bus_if = dev_get_drvdata(flow->dev);
+ 	struct brcmf_pub *drvr = bus_if->drvr;
+ 	u32 i;
+-	u8 flowid;
++	u16 flowid;
+ 
+ 	if (flow->addr_mode[ifidx] != addr_mode) {
+ 		for (i = 0; i < ARRAY_SIZE(flow->hash); i++) {
+@@ -434,7 +438,7 @@ void brcmf_flowring_delete_peer(struct brcmf_flowring *flow, int ifidx,
+ 	struct brcmf_flowring_tdls_entry *prev;
+ 	struct brcmf_flowring_tdls_entry *search;
+ 	u32 i;
+-	u8 flowid;
++	u16 flowid;
+ 	bool sta;
+ 
+ 	sta = (flow->addr_mode[ifidx] == ADDR_INDIRECT);
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.h
+index 95fd1c9675d1..068e68d94999 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.h
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.h
+@@ -16,7 +16,7 @@
+ #define BRCMFMAC_FLOWRING_H
+ 
+ 
+-#define BRCMF_FLOWRING_HASHSIZE		256
++#define BRCMF_FLOWRING_HASHSIZE		512		/* has to be 2^x */
+ #define BRCMF_FLOWRING_INVALID_ID	0xFFFFFFFF
+ 
+ 
+@@ -24,7 +24,7 @@ struct brcmf_flowring_hash {
+ 	u8 mac[ETH_ALEN];
+ 	u8 fifo;
+ 	u8 ifidx;
+-	u8 flowid;
++	u16 flowid;
+ };
+ 
+ enum ring_status {
+@@ -61,16 +61,16 @@ u32 brcmf_flowring_lookup(struct brcmf_flowring *flow, u8 da[ETH_ALEN],
+ 			  u8 prio, u8 ifidx);
+ u32 brcmf_flowring_create(struct brcmf_flowring *flow, u8 da[ETH_ALEN],
+ 			  u8 prio, u8 ifidx);
+-void brcmf_flowring_delete(struct brcmf_flowring *flow, u8 flowid);
+-void brcmf_flowring_open(struct brcmf_flowring *flow, u8 flowid);
+-u8 brcmf_flowring_tid(struct brcmf_flowring *flow, u8 flowid);
+-u32 brcmf_flowring_enqueue(struct brcmf_flowring *flow, u8 flowid,
++void brcmf_flowring_delete(struct brcmf_flowring *flow, u16 flowid);
++void brcmf_flowring_open(struct brcmf_flowring *flow, u16 flowid);
++u8 brcmf_flowring_tid(struct brcmf_flowring *flow, u16 flowid);
++u32 brcmf_flowring_enqueue(struct brcmf_flowring *flow, u16 flowid,
+ 			   struct sk_buff *skb);
+-struct sk_buff *brcmf_flowring_dequeue(struct brcmf_flowring *flow, u8 flowid);
+-void brcmf_flowring_reinsert(struct brcmf_flowring *flow, u8 flowid,
++struct sk_buff *brcmf_flowring_dequeue(struct brcmf_flowring *flow, u16 flowid);
++void brcmf_flowring_reinsert(struct brcmf_flowring *flow, u16 flowid,
+ 			     struct sk_buff *skb);
+-u32 brcmf_flowring_qlen(struct brcmf_flowring *flow, u8 flowid);
+-u8 brcmf_flowring_ifidx_get(struct brcmf_flowring *flow, u8 flowid);
++u32 brcmf_flowring_qlen(struct brcmf_flowring *flow, u16 flowid);
++u8 brcmf_flowring_ifidx_get(struct brcmf_flowring *flow, u16 flowid);
+ struct brcmf_flowring *brcmf_flowring_attach(struct device *dev, u16 nrofrings);
+ void brcmf_flowring_detach(struct brcmf_flowring *flow);
+ void brcmf_flowring_configure_addr_mode(struct brcmf_flowring *flow, int ifidx,
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
+index c2bdb91746cf..922966734a7f 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
+@@ -677,7 +677,7 @@ static u32 brcmf_msgbuf_flowring_create(struct brcmf_msgbuf *msgbuf, int ifidx,
+ }
+ 
+ 
+-static void brcmf_msgbuf_txflow(struct brcmf_msgbuf *msgbuf, u8 flowid)
++static void brcmf_msgbuf_txflow(struct brcmf_msgbuf *msgbuf, u16 flowid)
+ {
+ 	struct brcmf_flowring *flow = msgbuf->flow;
+ 	struct brcmf_commonring *commonring;
+@@ -1310,7 +1310,7 @@ int brcmf_proto_msgbuf_rx_trigger(struct device *dev)
+ }
+ 
+ 
+-void brcmf_msgbuf_delete_flowring(struct brcmf_pub *drvr, u8 flowid)
++void brcmf_msgbuf_delete_flowring(struct brcmf_pub *drvr, u16 flowid)
+ {
+ 	struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
+ 	struct msgbuf_tx_flowring_delete_req *delete;
+@@ -1415,6 +1415,13 @@ int brcmf_proto_msgbuf_attach(struct brcmf_pub *drvr)
+ 	u32 count;
+ 
+ 	if_msgbuf = drvr->bus_if->msgbuf;
++
++	if (if_msgbuf->nrof_flowrings >= BRCMF_FLOWRING_HASHSIZE) {
++		brcmf_err("driver not configured for this many flowrings %d\n",
++			  if_msgbuf->nrof_flowrings);
++		if_msgbuf->nrof_flowrings = BRCMF_FLOWRING_HASHSIZE - 1;
++	}
++
+ 	msgbuf = kzalloc(sizeof(*msgbuf), GFP_KERNEL);
+ 	if (!msgbuf)
+ 		goto fail;
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.h
+index 3d513e407e3d..ee6906a3c3f6 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.h
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.h
+@@ -33,7 +33,7 @@
+ 
+ 
+ int brcmf_proto_msgbuf_rx_trigger(struct device *dev);
+-void brcmf_msgbuf_delete_flowring(struct brcmf_pub *drvr, u8 flowid);
++void brcmf_msgbuf_delete_flowring(struct brcmf_pub *drvr, u16 flowid);
+ int brcmf_proto_msgbuf_attach(struct brcmf_pub *drvr);
+ void brcmf_proto_msgbuf_detach(struct brcmf_pub *drvr);
+ #else
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+index d70a1716f3e0..1486f33a743e 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+@@ -1143,6 +1143,8 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
+ 	/* the fw is stopped, the aux sta is dead: clean up driver state */
+ 	iwl_mvm_del_aux_sta(mvm);
+ 
++	iwl_free_fw_paging(mvm);
++
+ 	/*
+ 	 * Clear IN_HW_RESTART flag when stopping the hw (as restart_complete()
+ 	 * won't be called in this case).
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+index e80be9a59520..89ea70deeb84 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+@@ -684,8 +684,6 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
+ 	for (i = 0; i < NVM_MAX_NUM_SECTIONS; i++)
+ 		kfree(mvm->nvm_sections[i].data);
+ 
+-	iwl_free_fw_paging(mvm);
+-
+ 	iwl_mvm_tof_clean(mvm);
+ 
+ 	ieee80211_free_hw(mvm->hw);
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+index 5a854c609477..1198caac35c8 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+@@ -731,8 +731,8 @@ static int iwl_pcie_rsa_race_bug_wa(struct iwl_trans *trans)
+ 	 */
+ 	val = iwl_read_prph(trans, PREG_AUX_BUS_WPROT_0);
+ 	if (val & (BIT(1) | BIT(17))) {
+-		IWL_INFO(trans,
+-			 "can't access the RSA semaphore it is write protected\n");
++		IWL_DEBUG_INFO(trans,
++			       "can't access the RSA semaphore it is write protected\n");
+ 		return 0;
+ 	}
+ 
+diff --git a/drivers/net/wireless/marvell/mwifiex/sta_event.c b/drivers/net/wireless/marvell/mwifiex/sta_event.c
+index ff3ee9dfbbd5..23bae87d4d3d 100644
+--- a/drivers/net/wireless/marvell/mwifiex/sta_event.c
++++ b/drivers/net/wireless/marvell/mwifiex/sta_event.c
+@@ -607,11 +607,13 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
+ 
+ 	case EVENT_PS_AWAKE:
+ 		mwifiex_dbg(adapter, EVENT, "info: EVENT: AWAKE\n");
+-		if (!adapter->pps_uapsd_mode && priv->port_open &&
++		if (!adapter->pps_uapsd_mode &&
++		    (priv->port_open ||
++		     (priv->bss_mode == NL80211_IFTYPE_ADHOC)) &&
+ 		    priv->media_connected && adapter->sleep_period.period) {
+-				adapter->pps_uapsd_mode = true;
+-				mwifiex_dbg(adapter, EVENT,
+-					    "event: PPS/UAPSD mode activated\n");
++			adapter->pps_uapsd_mode = true;
++			mwifiex_dbg(adapter, EVENT,
++				    "event: PPS/UAPSD mode activated\n");
+ 		}
+ 		adapter->tx_lock_flag = false;
+ 		if (adapter->pps_uapsd_mode && adapter->gen_null_pkt) {
+diff --git a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
+index 6a4fc5d183cf..d7db6f23e728 100644
+--- a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
++++ b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
+@@ -314,6 +314,7 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss,
+ 			mwifiex_dbg(adapter, ERROR,
+ 				    "Attempt to reconnect on csa closed chan(%d)\n",
+ 				    bss_desc->channel);
++			ret = -1;
+ 			goto done;
+ 		}
+ 
+diff --git a/drivers/net/wireless/marvell/mwifiex/wmm.c b/drivers/net/wireless/marvell/mwifiex/wmm.c
+index acccd6734e3b..499e5a741c62 100644
+--- a/drivers/net/wireless/marvell/mwifiex/wmm.c
++++ b/drivers/net/wireless/marvell/mwifiex/wmm.c
+@@ -475,7 +475,8 @@ mwifiex_wmm_lists_empty(struct mwifiex_adapter *adapter)
+ 		priv = adapter->priv[i];
+ 		if (!priv)
+ 			continue;
+-		if (!priv->port_open)
++		if (!priv->port_open &&
++		    (priv->bss_mode != NL80211_IFTYPE_ADHOC))
+ 			continue;
+ 		if (adapter->if_ops.is_port_ready &&
+ 		    !adapter->if_ops.is_port_ready(priv))
+@@ -1099,7 +1100,8 @@ mwifiex_wmm_get_highest_priolist_ptr(struct mwifiex_adapter *adapter,
+ 
+ 			priv_tmp = adapter->bss_prio_tbl[j].bss_prio_cur->priv;
+ 
+-			if (!priv_tmp->port_open ||
++			if (((priv_tmp->bss_mode != NL80211_IFTYPE_ADHOC) &&
++			     !priv_tmp->port_open) ||
+ 			    (atomic_read(&priv_tmp->wmm.tx_pkts_queued) == 0))
+ 				continue;
+ 
+diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800usb.c b/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
+index bf9afbf46c1b..4b0bb6b4f6f1 100644
+--- a/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
++++ b/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
+@@ -1026,6 +1026,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
+ 	{ USB_DEVICE(0x0411, 0x01a2) },
+ 	{ USB_DEVICE(0x0411, 0x01ee) },
+ 	{ USB_DEVICE(0x0411, 0x01a8) },
++	{ USB_DEVICE(0x0411, 0x01fd) },
+ 	/* Corega */
+ 	{ USB_DEVICE(0x07aa, 0x002f) },
+ 	{ USB_DEVICE(0x07aa, 0x003c) },
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rc.c b/drivers/net/wireless/realtek/rtlwifi/rc.c
+index 28f7010e7108..1aca77719521 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rc.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rc.c
+@@ -41,7 +41,7 @@ static u8 _rtl_rc_get_highest_rix(struct rtl_priv *rtlpriv,
+ 	struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+ 	struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ 	struct rtl_sta_info *sta_entry = NULL;
+-	u8 wireless_mode = 0;
++	u16 wireless_mode = 0;
+ 
+ 	/*
+ 	 *this rate is no use for true rate, firmware
+@@ -99,7 +99,7 @@ static void _rtl_rc_rate_set_series(struct rtl_priv *rtlpriv,
+ {
+ 	struct rtl_mac *mac = rtl_mac(rtlpriv);
+ 	struct rtl_sta_info *sta_entry = NULL;
+-	u8 wireless_mode = 0;
++	u16 wireless_mode = 0;
+ 	u8 sgi_20 = 0, sgi_40 = 0, sgi_80 = 0;
+ 
+ 	if (sta) {
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
+index bbb789f8990b..5e5719b26774 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
+@@ -3855,7 +3855,7 @@ void rtl8821ae_update_channel_access_setting(struct ieee80211_hw *hw)
+ {
+ 	struct rtl_priv *rtlpriv = rtl_priv(hw);
+ 	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+-	u8 wireless_mode = mac->mode;
++	u16 wireless_mode = mac->mode;
+ 	u8 sifs_timer, r2t_sifs;
+ 
+ 	rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SLOT_TIME,
+diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h
+index 4544752a2ba8..84397b190cc0 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/wifi.h
++++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h
+@@ -1323,14 +1323,13 @@ struct rtl_tid_data {
+ 
+ struct rtl_sta_info {
+ 	struct list_head list;
+-	u8 ratr_index;
+-	u8 wireless_mode;
+-	u8 mimo_ps;
+-	u8 mac_addr[ETH_ALEN];
+ 	struct rtl_tid_data tids[MAX_TID_COUNT];
+-
+ 	/* just used for ap adhoc or mesh*/
+ 	struct rssi_sta rssi_stat;
++	u16 wireless_mode;
++	u8 ratr_index;
++	u8 mimo_ps;
++	u8 mac_addr[ETH_ALEN];
+ } __packed;
+ 
+ struct rtl_priv;
+diff --git a/drivers/net/wireless/ti/wlcore/event.c b/drivers/net/wireless/ti/wlcore/event.c
+index c96405498bf4..4b59f67724de 100644
+--- a/drivers/net/wireless/ti/wlcore/event.c
++++ b/drivers/net/wireless/ti/wlcore/event.c
+@@ -38,7 +38,7 @@
+ 
+ int wlcore_event_fw_logger(struct wl1271 *wl)
+ {
+-	u32 ret;
++	int ret;
+ 	struct fw_logger_information fw_log;
+ 	u8  *buffer;
+ 	u32 internal_fw_addrbase = WL18XX_DATA_RAM_BASE_ADDRESS;
+diff --git a/drivers/nfc/st21nfca/i2c.c b/drivers/nfc/st21nfca/i2c.c
+index 1f44a151d206..d5a099b022e4 100644
+--- a/drivers/nfc/st21nfca/i2c.c
++++ b/drivers/nfc/st21nfca/i2c.c
+@@ -524,8 +524,10 @@ static int st21nfca_hci_i2c_acpi_request_resources(struct i2c_client *client)
+ 	/* Get EN GPIO from ACPI */
+ 	gpiod_ena = devm_gpiod_get_index(dev, ST21NFCA_GPIO_NAME_EN, 1,
+ 					 GPIOD_OUT_LOW);
+-	if (!IS_ERR(gpiod_ena))
+-		phy->gpio_ena = desc_to_gpio(gpiod_ena);
++	if (!IS_ERR(gpiod_ena)) {
++		nfc_err(dev, "Unable to get ENABLE GPIO\n");
++		return -ENODEV;
++	}
+ 
+ 	phy->gpio_ena = desc_to_gpio(gpiod_ena);
+ 
+diff --git a/drivers/ntb/hw/amd/ntb_hw_amd.c b/drivers/ntb/hw/amd/ntb_hw_amd.c
+index 588803ad6847..6ccba0d862df 100644
+--- a/drivers/ntb/hw/amd/ntb_hw_amd.c
++++ b/drivers/ntb/hw/amd/ntb_hw_amd.c
+@@ -357,20 +357,6 @@ static int amd_ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits)
+ 	return 0;
+ }
+ 
+-static int amd_ntb_peer_db_addr(struct ntb_dev *ntb,
+-				phys_addr_t *db_addr,
+-				resource_size_t *db_size)
+-{
+-	struct amd_ntb_dev *ndev = ntb_ndev(ntb);
+-
+-	if (db_addr)
+-		*db_addr = (phys_addr_t)(ndev->peer_mmio + AMD_DBREQ_OFFSET);
+-	if (db_size)
+-		*db_size = sizeof(u32);
+-
+-	return 0;
+-}
+-
+ static int amd_ntb_peer_db_set(struct ntb_dev *ntb, u64 db_bits)
+ {
+ 	struct amd_ntb_dev *ndev = ntb_ndev(ntb);
+@@ -415,20 +401,6 @@ static int amd_ntb_spad_write(struct ntb_dev *ntb,
+ 	return 0;
+ }
+ 
+-static int amd_ntb_peer_spad_addr(struct ntb_dev *ntb, int idx,
+-				  phys_addr_t *spad_addr)
+-{
+-	struct amd_ntb_dev *ndev = ntb_ndev(ntb);
+-
+-	if (idx < 0 || idx >= ndev->spad_count)
+-		return -EINVAL;
+-
+-	if (spad_addr)
+-		*spad_addr = (phys_addr_t)(ndev->self_mmio + AMD_SPAD_OFFSET +
+-					   ndev->peer_spad + (idx << 2));
+-	return 0;
+-}
+-
+ static u32 amd_ntb_peer_spad_read(struct ntb_dev *ntb, int idx)
+ {
+ 	struct amd_ntb_dev *ndev = ntb_ndev(ntb);
+@@ -472,12 +444,10 @@ static const struct ntb_dev_ops amd_ntb_ops = {
+ 	.db_clear		= amd_ntb_db_clear,
+ 	.db_set_mask		= amd_ntb_db_set_mask,
+ 	.db_clear_mask		= amd_ntb_db_clear_mask,
+-	.peer_db_addr		= amd_ntb_peer_db_addr,
+ 	.peer_db_set		= amd_ntb_peer_db_set,
+ 	.spad_count		= amd_ntb_spad_count,
+ 	.spad_read		= amd_ntb_spad_read,
+ 	.spad_write		= amd_ntb_spad_write,
+-	.peer_spad_addr		= amd_ntb_peer_spad_addr,
+ 	.peer_spad_read		= amd_ntb_peer_spad_read,
+ 	.peer_spad_write	= amd_ntb_peer_spad_write,
+ };
+diff --git a/drivers/ntb/test/ntb_perf.c b/drivers/ntb/test/ntb_perf.c
+index c8a37ba4b4f9..6bdc1e7b7503 100644
+--- a/drivers/ntb/test/ntb_perf.c
++++ b/drivers/ntb/test/ntb_perf.c
+@@ -178,7 +178,7 @@ static void perf_copy_callback(void *data)
+ 	atomic_dec(&pctx->dma_sync);
+ }
+ 
+-static ssize_t perf_copy(struct pthr_ctx *pctx, char *dst,
++static ssize_t perf_copy(struct pthr_ctx *pctx, char __iomem *dst,
+ 			 char *src, size_t size)
+ {
+ 	struct perf_ctx *perf = pctx->perf;
+@@ -189,7 +189,8 @@ static ssize_t perf_copy(struct pthr_ctx *pctx, char *dst,
+ 	dma_cookie_t cookie;
+ 	size_t src_off, dst_off;
+ 	struct perf_mw *mw = &perf->mw;
+-	u64 vbase, dst_vaddr;
++	void __iomem *vbase;
++	void __iomem *dst_vaddr;
+ 	dma_addr_t dst_phys;
+ 	int retries = 0;
+ 
+@@ -204,14 +205,14 @@ static ssize_t perf_copy(struct pthr_ctx *pctx, char *dst,
+ 	}
+ 
+ 	device = chan->device;
+-	src_off = (size_t)src & ~PAGE_MASK;
+-	dst_off = (size_t)dst & ~PAGE_MASK;
++	src_off = (uintptr_t)src & ~PAGE_MASK;
++	dst_off = (uintptr_t __force)dst & ~PAGE_MASK;
+ 
+ 	if (!is_dma_copy_aligned(device, src_off, dst_off, size))
+ 		return -ENODEV;
+ 
+-	vbase = (u64)(u64 *)mw->vbase;
+-	dst_vaddr = (u64)(u64 *)dst;
++	vbase = mw->vbase;
++	dst_vaddr = dst;
+ 	dst_phys = mw->phys_addr + (dst_vaddr - vbase);
+ 
+ 	unmap = dmaengine_get_unmap_data(device->dev, 1, GFP_NOWAIT);
+@@ -261,13 +262,13 @@ err_get_unmap:
+ 	return 0;
+ }
+ 
+-static int perf_move_data(struct pthr_ctx *pctx, char *dst, char *src,
++static int perf_move_data(struct pthr_ctx *pctx, char __iomem *dst, char *src,
+ 			  u64 buf_size, u64 win_size, u64 total)
+ {
+ 	int chunks, total_chunks, i;
+ 	int copied_chunks = 0;
+ 	u64 copied = 0, result;
+-	char *tmp = dst;
++	char __iomem *tmp = dst;
+ 	u64 perf, diff_us;
+ 	ktime_t kstart, kstop, kdiff;
+ 
+@@ -324,7 +325,7 @@ static int ntb_perf_thread(void *data)
+ 	struct perf_ctx *perf = pctx->perf;
+ 	struct pci_dev *pdev = perf->ntb->pdev;
+ 	struct perf_mw *mw = &perf->mw;
+-	char *dst;
++	char __iomem *dst;
+ 	u64 win_size, buf_size, total;
+ 	void *src;
+ 	int rc, node, i;
+@@ -364,7 +365,7 @@ static int ntb_perf_thread(void *data)
+ 	if (buf_size > MAX_TEST_SIZE)
+ 		buf_size = MAX_TEST_SIZE;
+ 
+-	dst = (char *)mw->vbase;
++	dst = (char __iomem *)mw->vbase;
+ 
+ 	atomic_inc(&perf->tsync);
+ 	while (atomic_read(&perf->tsync) != perf->perf_threads)
+diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
+index 5d28e9405f32..cdbab06bf74f 100644
+--- a/drivers/nvdimm/bus.c
++++ b/drivers/nvdimm/bus.c
+@@ -335,7 +335,7 @@ static const struct nd_cmd_desc __nd_cmd_dimm_descs[] = {
+ 	[ND_CMD_IMPLEMENTED] = { },
+ 	[ND_CMD_SMART] = {
+ 		.out_num = 2,
+-		.out_sizes = { 4, 8, },
++		.out_sizes = { 4, 128, },
+ 	},
+ 	[ND_CMD_SMART_THRESHOLD] = {
+ 		.out_num = 2,
+@@ -513,10 +513,10 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
+ 
+ 	/* fail write commands (when read-only) */
+ 	if (read_only)
+-		switch (ioctl_cmd) {
+-		case ND_IOCTL_VENDOR:
+-		case ND_IOCTL_SET_CONFIG_DATA:
+-		case ND_IOCTL_ARS_START:
++		switch (cmd) {
++		case ND_CMD_VENDOR:
++		case ND_CMD_SET_CONFIG_DATA:
++		case ND_CMD_ARS_START:
+ 			dev_dbg(&nvdimm_bus->dev, "'%s' command while read-only.\n",
+ 					nvdimm ? nvdimm_cmd_name(cmd)
+ 					: nvdimm_bus_cmd_name(cmd));
+diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
+index ae81a2f1da50..f0b56b3aac4d 100644
+--- a/drivers/nvdimm/pfn_devs.c
++++ b/drivers/nvdimm/pfn_devs.c
+@@ -315,7 +315,7 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn)
+ 	} else {
+ 		/* from init we validate */
+ 		if (memcmp(nd_pfn->uuid, pfn_sb->uuid, 16) != 0)
+-			return -EINVAL;
++			return -ENODEV;
+ 	}
+ 
+ 	if (nd_pfn->align > nvdimm_namespace_capacity(ndns)) {
+diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
+index 8d0b54670184..02c574f8ccb3 100644
+--- a/drivers/nvdimm/pmem.c
++++ b/drivers/nvdimm/pmem.c
+@@ -66,22 +66,25 @@ static int pmem_do_bvec(struct pmem_device *pmem, struct page *page,
+ 			unsigned int len, unsigned int off, int rw,
+ 			sector_t sector)
+ {
++	int rc = 0;
+ 	void *mem = kmap_atomic(page);
+ 	phys_addr_t pmem_off = sector * 512 + pmem->data_offset;
+ 	void __pmem *pmem_addr = pmem->virt_addr + pmem_off;
+ 
+ 	if (rw == READ) {
+ 		if (unlikely(is_bad_pmem(&pmem->bb, sector, len)))
+-			return -EIO;
+-		memcpy_from_pmem(mem + off, pmem_addr, len);
+-		flush_dcache_page(page);
++			rc = -EIO;
++		else {
++			memcpy_from_pmem(mem + off, pmem_addr, len);
++			flush_dcache_page(page);
++		}
+ 	} else {
+ 		flush_dcache_page(page);
+ 		memcpy_to_pmem(pmem_addr, mem + off, len);
+ 	}
+ 
+ 	kunmap_atomic(mem);
+-	return 0;
++	return rc;
+ }
+ 
+ static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio)
+@@ -311,9 +314,16 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
+ 	 * implementation will limit the pfns advertised through
+ 	 * ->direct_access() to those that are included in the memmap.
+ 	 */
+-	if (nd_pfn->mode == PFN_MODE_PMEM)
+-		offset = ALIGN(SZ_8K + 64 * npfns, nd_pfn->align);
+-	else if (nd_pfn->mode == PFN_MODE_RAM)
++	if (nd_pfn->mode == PFN_MODE_PMEM) {
++		unsigned long memmap_size;
++
++		/*
++		 * vmemmap_populate_hugepages() allocates the memmap array in
++		 * HPAGE_SIZE chunks.
++		 */
++		memmap_size = ALIGN(64 * npfns, PMD_SIZE);
++		offset = ALIGN(SZ_8K + memmap_size, nd_pfn->align);
++	} else if (nd_pfn->mode == PFN_MODE_RAM)
+ 		offset = ALIGN(SZ_8K, nd_pfn->align);
+ 	else
+ 		goto err;
+diff --git a/drivers/nvmem/mxs-ocotp.c b/drivers/nvmem/mxs-ocotp.c
+index 8ba19bba3156..2bb3c5799ac4 100644
+--- a/drivers/nvmem/mxs-ocotp.c
++++ b/drivers/nvmem/mxs-ocotp.c
+@@ -94,7 +94,7 @@ static int mxs_ocotp_read(void *context, const void *reg, size_t reg_size,
+ 	if (ret)
+ 		goto close_banks;
+ 
+-	while (val_size) {
++	while (val_size >= reg_size) {
+ 		if ((offset < OCOTP_DATA_OFFSET) || (offset % 16)) {
+ 			/* fill up non-data register */
+ 			*buf = 0;
+@@ -103,7 +103,7 @@ static int mxs_ocotp_read(void *context, const void *reg, size_t reg_size,
+ 		}
+ 
+ 		buf++;
+-		val_size--;
++		val_size -= reg_size;
+ 		offset += reg_size;
+ 	}
+ 
+diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c
+index 1a3556a9e9ea..ed01c0172e4a 100644
+--- a/drivers/of/of_reserved_mem.c
++++ b/drivers/of/of_reserved_mem.c
+@@ -32,11 +32,13 @@ int __init __weak early_init_dt_alloc_reserved_memory_arch(phys_addr_t size,
+ 	phys_addr_t align, phys_addr_t start, phys_addr_t end, bool nomap,
+ 	phys_addr_t *res_base)
+ {
++	phys_addr_t base;
+ 	/*
+ 	 * We use __memblock_alloc_base() because memblock_alloc_base()
+ 	 * panic()s on allocation failure.
+ 	 */
+-	phys_addr_t base = __memblock_alloc_base(size, align, end);
++	end = !end ? MEMBLOCK_ALLOC_ANYWHERE : end;
++	base = __memblock_alloc_base(size, align, end);
+ 	if (!base)
+ 		return -ENOMEM;
+ 
+diff --git a/drivers/pci/host/pci-imx6.c b/drivers/pci/host/pci-imx6.c
+index fe600964fa50..88ccfeaa49c7 100644
+--- a/drivers/pci/host/pci-imx6.c
++++ b/drivers/pci/host/pci-imx6.c
+@@ -32,7 +32,7 @@
+ #define to_imx6_pcie(x)	container_of(x, struct imx6_pcie, pp)
+ 
+ struct imx6_pcie {
+-	struct gpio_desc	*reset_gpio;
++	int			reset_gpio;
+ 	struct clk		*pcie_bus;
+ 	struct clk		*pcie_phy;
+ 	struct clk		*pcie;
+@@ -287,10 +287,10 @@ static int imx6_pcie_deassert_core_reset(struct pcie_port *pp)
+ 	usleep_range(200, 500);
+ 
+ 	/* Some boards don't have PCIe reset GPIO. */
+-	if (imx6_pcie->reset_gpio) {
+-		gpiod_set_value_cansleep(imx6_pcie->reset_gpio, 0);
++	if (gpio_is_valid(imx6_pcie->reset_gpio)) {
++		gpio_set_value_cansleep(imx6_pcie->reset_gpio, 0);
+ 		msleep(100);
+-		gpiod_set_value_cansleep(imx6_pcie->reset_gpio, 1);
++		gpio_set_value_cansleep(imx6_pcie->reset_gpio, 1);
+ 	}
+ 	return 0;
+ 
+@@ -561,6 +561,7 @@ static int __init imx6_pcie_probe(struct platform_device *pdev)
+ {
+ 	struct imx6_pcie *imx6_pcie;
+ 	struct pcie_port *pp;
++	struct device_node *np = pdev->dev.of_node;
+ 	struct resource *dbi_base;
+ 	int ret;
+ 
+@@ -581,8 +582,15 @@ static int __init imx6_pcie_probe(struct platform_device *pdev)
+ 		return PTR_ERR(pp->dbi_base);
+ 
+ 	/* Fetch GPIOs */
+-	imx6_pcie->reset_gpio = devm_gpiod_get_optional(&pdev->dev, "reset",
+-							GPIOD_OUT_LOW);
++	imx6_pcie->reset_gpio = of_get_named_gpio(np, "reset-gpio", 0);
++	if (gpio_is_valid(imx6_pcie->reset_gpio)) {
++		ret = devm_gpio_request_one(&pdev->dev, imx6_pcie->reset_gpio,
++					    GPIOF_OUT_INIT_LOW, "PCIe reset");
++		if (ret) {
++			dev_err(&pdev->dev, "unable to get reset gpio\n");
++			return ret;
++		}
++	}
+ 
+ 	/* Fetch clocks */
+ 	imx6_pcie->pcie_phy = devm_clk_get(&pdev->dev, "pcie_phy");
+diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
+index 6d7ab9bb0d5a..6b0056e9c33e 100644
+--- a/drivers/pci/probe.c
++++ b/drivers/pci/probe.c
+@@ -179,6 +179,9 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
+ 	u16 orig_cmd;
+ 	struct pci_bus_region region, inverted_region;
+ 
++	if (dev->non_compliant_bars)
++		return 0;
++
+ 	mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
+ 
+ 	/* No printks while decoding is disabled! */
+@@ -1171,6 +1174,7 @@ static void pci_msi_setup_pci_dev(struct pci_dev *dev)
+ int pci_setup_device(struct pci_dev *dev)
+ {
+ 	u32 class;
++	u16 cmd;
+ 	u8 hdr_type;
+ 	int pos = 0;
+ 	struct pci_bus_region region;
+@@ -1214,6 +1218,16 @@ int pci_setup_device(struct pci_dev *dev)
+ 	/* device class may be changed after fixup */
+ 	class = dev->class >> 8;
+ 
++	if (dev->non_compliant_bars) {
++		pci_read_config_word(dev, PCI_COMMAND, &cmd);
++		if (cmd & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) {
++			dev_info(&dev->dev, "device has non-compliant BARs; disabling IO/MEM decoding\n");
++			cmd &= ~PCI_COMMAND_IO;
++			cmd &= ~PCI_COMMAND_MEMORY;
++			pci_write_config_word(dev, PCI_COMMAND, cmd);
++		}
++	}
++
+ 	switch (dev->hdr_type) {		    /* header type */
+ 	case PCI_HEADER_TYPE_NORMAL:		    /* standard header */
+ 		if (class == PCI_CLASS_BRIDGE_PCI)
+diff --git a/drivers/pcmcia/db1xxx_ss.c b/drivers/pcmcia/db1xxx_ss.c
+index 4c2fa05b4589..944674ee3464 100644
+--- a/drivers/pcmcia/db1xxx_ss.c
++++ b/drivers/pcmcia/db1xxx_ss.c
+@@ -56,6 +56,7 @@ struct db1x_pcmcia_sock {
+ 	int	stschg_irq;	/* card-status-change irq */
+ 	int	card_irq;	/* card irq */
+ 	int	eject_irq;	/* db1200/pb1200 have these */
++	int	insert_gpio;	/* db1000 carddetect gpio */
+ 
+ #define BOARD_TYPE_DEFAULT	0	/* most boards */
+ #define BOARD_TYPE_DB1200	1	/* IRQs aren't gpios */
+@@ -83,7 +84,7 @@ static int db1200_card_inserted(struct db1x_pcmcia_sock *sock)
+ /* carddetect gpio: low-active */
+ static int db1000_card_inserted(struct db1x_pcmcia_sock *sock)
+ {
+-	return !gpio_get_value(irq_to_gpio(sock->insert_irq));
++	return !gpio_get_value(sock->insert_gpio);
+ }
+ 
+ static int db1x_card_inserted(struct db1x_pcmcia_sock *sock)
+@@ -457,9 +458,15 @@ static int db1x_pcmcia_socket_probe(struct platform_device *pdev)
+ 	r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "card");
+ 	sock->card_irq = r ? r->start : 0;
+ 
+-	/* insert: irq which triggers on card insertion/ejection */
++	/* insert: irq which triggers on card insertion/ejection
++	 * BIG FAT NOTE: on DB1000/1100/1500/1550 we pass a GPIO here!
++	 */
+ 	r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "insert");
+ 	sock->insert_irq = r ? r->start : -1;
++	if (sock->board_type == BOARD_TYPE_DEFAULT) {
++		sock->insert_gpio = r ? r->start : -1;
++		sock->insert_irq = r ? gpio_to_irq(r->start) : -1;
++	}
+ 
+ 	/* stschg: irq which trigger on card status change (optional) */
+ 	r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "stschg");
+diff --git a/drivers/pinctrl/bcm/pinctrl-bcm2835.c b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
+index 0f5997ceb494..08b1d93da9fe 100644
+--- a/drivers/pinctrl/bcm/pinctrl-bcm2835.c
++++ b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
+@@ -779,7 +779,7 @@ static int bcm2835_pctl_dt_node_to_map(struct pinctrl_dev *pctldev,
+ 		}
+ 		if (num_pulls) {
+ 			err = of_property_read_u32_index(np, "brcm,pull",
+-					(num_funcs > 1) ? i : 0, &pull);
++					(num_pulls > 1) ? i : 0, &pull);
+ 			if (err)
+ 				goto out;
+ 			err = bcm2835_pctl_dt_node_to_map_pull(pc, np, pin,
+diff --git a/drivers/pinctrl/freescale/pinctrl-imx.c b/drivers/pinctrl/freescale/pinctrl-imx.c
+index a5bb93987378..1029aa7889b5 100644
+--- a/drivers/pinctrl/freescale/pinctrl-imx.c
++++ b/drivers/pinctrl/freescale/pinctrl-imx.c
+@@ -726,19 +726,18 @@ int imx_pinctrl_probe(struct platform_device *pdev,
+ 
+ 	if (of_property_read_bool(dev_np, "fsl,input-sel")) {
+ 		np = of_parse_phandle(dev_np, "fsl,input-sel", 0);
+-		if (np) {
+-			ipctl->input_sel_base = of_iomap(np, 0);
+-			if (IS_ERR(ipctl->input_sel_base)) {
+-				of_node_put(np);
+-				dev_err(&pdev->dev,
+-					"iomuxc input select base address not found\n");
+-				return PTR_ERR(ipctl->input_sel_base);
+-			}
+-		} else {
++		if (!np) {
+ 			dev_err(&pdev->dev, "iomuxc fsl,input-sel property not found\n");
+ 			return -EINVAL;
+ 		}
++
++		ipctl->input_sel_base = of_iomap(np, 0);
+ 		of_node_put(np);
++		if (!ipctl->input_sel_base) {
++			dev_err(&pdev->dev,
++				"iomuxc input select base address not found\n");
++			return -ENOMEM;
++		}
+ 	}
+ 
+ 	imx_pinctrl_desc.name = dev_name(&pdev->dev);
+diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
+index e96e86d2e745..3878d23ca7a8 100644
+--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
++++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
+@@ -949,7 +949,8 @@ static int mtk_gpio_set_debounce(struct gpio_chip *chip, unsigned offset,
+ 	struct mtk_pinctrl *pctl = dev_get_drvdata(chip->parent);
+ 	int eint_num, virq, eint_offset;
+ 	unsigned int set_offset, bit, clr_bit, clr_offset, rst, i, unmask, dbnc;
+-	static const unsigned int dbnc_arr[] = {0 , 1, 16, 32, 64, 128, 256};
++	static const unsigned int debounce_time[] = {500, 1000, 16000, 32000, 64000,
++						128000, 256000};
+ 	const struct mtk_desc_pin *pin;
+ 	struct irq_data *d;
+ 
+@@ -967,9 +968,9 @@ static int mtk_gpio_set_debounce(struct gpio_chip *chip, unsigned offset,
+ 	if (!mtk_eint_can_en_debounce(pctl, eint_num))
+ 		return -ENOSYS;
+ 
+-	dbnc = ARRAY_SIZE(dbnc_arr);
+-	for (i = 0; i < ARRAY_SIZE(dbnc_arr); i++) {
+-		if (debounce <= dbnc_arr[i]) {
++	dbnc = ARRAY_SIZE(debounce_time);
++	for (i = 0; i < ARRAY_SIZE(debounce_time); i++) {
++		if (debounce <= debounce_time[i]) {
+ 			dbnc = i;
+ 			break;
+ 		}
+diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik.c b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
+index 352406108fa0..c8969dd49449 100644
+--- a/drivers/pinctrl/nomadik/pinctrl-nomadik.c
++++ b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
+@@ -990,7 +990,7 @@ static void nmk_gpio_dbg_show_one(struct seq_file *s,
+ 		int val;
+ 
+ 		if (pull)
+-			pullidx = data_out ? 1 : 2;
++			pullidx = data_out ? 2 : 1;
+ 
+ 		seq_printf(s, " gpio-%-3d (%-20.20s) in  %s %s",
+ 			   gpio,
+diff --git a/drivers/pinctrl/pinctrl-at91-pio4.c b/drivers/pinctrl/pinctrl-at91-pio4.c
+index ee69db6ae1c7..e1c0d4e1bb33 100644
+--- a/drivers/pinctrl/pinctrl-at91-pio4.c
++++ b/drivers/pinctrl/pinctrl-at91-pio4.c
+@@ -722,9 +722,11 @@ static int atmel_conf_pin_config_group_set(struct pinctrl_dev *pctldev,
+ 			break;
+ 		case PIN_CONFIG_BIAS_PULL_UP:
+ 			conf |= ATMEL_PIO_PUEN_MASK;
++			conf &= (~ATMEL_PIO_PDEN_MASK);
+ 			break;
+ 		case PIN_CONFIG_BIAS_PULL_DOWN:
+ 			conf |= ATMEL_PIO_PDEN_MASK;
++			conf &= (~ATMEL_PIO_PUEN_MASK);
+ 			break;
+ 		case PIN_CONFIG_DRIVE_OPEN_DRAIN:
+ 			if (arg == 0)
+diff --git a/drivers/pinctrl/pinctrl-pistachio.c b/drivers/pinctrl/pinctrl-pistachio.c
+index 856f736cb1a6..2673cd9d106e 100644
+--- a/drivers/pinctrl/pinctrl-pistachio.c
++++ b/drivers/pinctrl/pinctrl-pistachio.c
+@@ -469,27 +469,27 @@ static const char * const pistachio_mips_pll_lock_groups[] = {
+ 	"mfio83",
+ };
+ 
+-static const char * const pistachio_sys_pll_lock_groups[] = {
++static const char * const pistachio_audio_pll_lock_groups[] = {
+ 	"mfio84",
+ };
+ 
+-static const char * const pistachio_wifi_pll_lock_groups[] = {
++static const char * const pistachio_rpu_v_pll_lock_groups[] = {
+ 	"mfio85",
+ };
+ 
+-static const char * const pistachio_bt_pll_lock_groups[] = {
++static const char * const pistachio_rpu_l_pll_lock_groups[] = {
+ 	"mfio86",
+ };
+ 
+-static const char * const pistachio_rpu_v_pll_lock_groups[] = {
++static const char * const pistachio_sys_pll_lock_groups[] = {
+ 	"mfio87",
+ };
+ 
+-static const char * const pistachio_rpu_l_pll_lock_groups[] = {
++static const char * const pistachio_wifi_pll_lock_groups[] = {
+ 	"mfio88",
+ };
+ 
+-static const char * const pistachio_audio_pll_lock_groups[] = {
++static const char * const pistachio_bt_pll_lock_groups[] = {
+ 	"mfio89",
+ };
+ 
+@@ -559,12 +559,12 @@ enum pistachio_mux_option {
+ 	PISTACHIO_FUNCTION_DREQ4,
+ 	PISTACHIO_FUNCTION_DREQ5,
+ 	PISTACHIO_FUNCTION_MIPS_PLL_LOCK,
++	PISTACHIO_FUNCTION_AUDIO_PLL_LOCK,
++	PISTACHIO_FUNCTION_RPU_V_PLL_LOCK,
++	PISTACHIO_FUNCTION_RPU_L_PLL_LOCK,
+ 	PISTACHIO_FUNCTION_SYS_PLL_LOCK,
+ 	PISTACHIO_FUNCTION_WIFI_PLL_LOCK,
+ 	PISTACHIO_FUNCTION_BT_PLL_LOCK,
+-	PISTACHIO_FUNCTION_RPU_V_PLL_LOCK,
+-	PISTACHIO_FUNCTION_RPU_L_PLL_LOCK,
+-	PISTACHIO_FUNCTION_AUDIO_PLL_LOCK,
+ 	PISTACHIO_FUNCTION_DEBUG_RAW_CCA_IND,
+ 	PISTACHIO_FUNCTION_DEBUG_ED_SEC20_CCA_IND,
+ 	PISTACHIO_FUNCTION_DEBUG_ED_SEC40_CCA_IND,
+@@ -620,12 +620,12 @@ static const struct pistachio_function pistachio_functions[] = {
+ 	FUNCTION(dreq4),
+ 	FUNCTION(dreq5),
+ 	FUNCTION(mips_pll_lock),
++	FUNCTION(audio_pll_lock),
++	FUNCTION(rpu_v_pll_lock),
++	FUNCTION(rpu_l_pll_lock),
+ 	FUNCTION(sys_pll_lock),
+ 	FUNCTION(wifi_pll_lock),
+ 	FUNCTION(bt_pll_lock),
+-	FUNCTION(rpu_v_pll_lock),
+-	FUNCTION(rpu_l_pll_lock),
+-	FUNCTION(audio_pll_lock),
+ 	FUNCTION(debug_raw_cca_ind),
+ 	FUNCTION(debug_ed_sec20_cca_ind),
+ 	FUNCTION(debug_ed_sec40_cca_ind),
+diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
+index d24e5f1d1525..bd2e657163b8 100644
+--- a/drivers/pinctrl/pinctrl-single.c
++++ b/drivers/pinctrl/pinctrl-single.c
+@@ -1273,9 +1273,9 @@ static int pcs_parse_bits_in_pinctrl_entry(struct pcs_device *pcs,
+ 
+ 		/* Parse pins in each row from LSB */
+ 		while (mask) {
+-			bit_pos = ffs(mask);
++			bit_pos = __ffs(mask);
+ 			pin_num_from_lsb = bit_pos / pcs->bits_per_pin;
+-			mask_pos = ((pcs->fmask) << (bit_pos - 1));
++			mask_pos = ((pcs->fmask) << bit_pos);
+ 			val_pos = val & mask_pos;
+ 			submask = mask & mask_pos;
+ 
+@@ -1844,7 +1844,7 @@ static int pcs_probe(struct platform_device *pdev)
+ 	ret = of_property_read_u32(np, "pinctrl-single,function-mask",
+ 				   &pcs->fmask);
+ 	if (!ret) {
+-		pcs->fshift = ffs(pcs->fmask) - 1;
++		pcs->fshift = __ffs(pcs->fmask);
+ 		pcs->fmax = pcs->fmask >> pcs->fshift;
+ 	} else {
+ 		/* If mask property doesn't exist, function mux is invalid. */
+diff --git a/drivers/pinctrl/sh-pfc/core.c b/drivers/pinctrl/sh-pfc/core.c
+index 181ea98a63b7..2b0d70217bbd 100644
+--- a/drivers/pinctrl/sh-pfc/core.c
++++ b/drivers/pinctrl/sh-pfc/core.c
+@@ -545,7 +545,9 @@ static int sh_pfc_probe(struct platform_device *pdev)
+ 			return ret;
+ 	}
+ 
+-	pinctrl_provide_dummies();
++	/* Enable dummy states for those platforms without pinctrl support */
++	if (!of_have_populated_dt())
++		pinctrl_provide_dummies();
+ 
+ 	ret = sh_pfc_init_ranges(pfc);
+ 	if (ret < 0)
+diff --git a/drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c b/drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c
+index 00265f0435a7..8b381d69df86 100644
+--- a/drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c
++++ b/drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c
+@@ -485,6 +485,7 @@ static const struct sunxi_pinctrl_desc sun8i_a33_pinctrl_data = {
+ 	.pins = sun8i_a33_pins,
+ 	.npins = ARRAY_SIZE(sun8i_a33_pins),
+ 	.irq_banks = 2,
++	.irq_bank_base = 1,
+ };
+ 
+ static int sun8i_a33_pinctrl_probe(struct platform_device *pdev)
+diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
+index 7a2465f5e71e..884c2b314567 100644
+--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c
++++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
+@@ -578,7 +578,7 @@ static void sunxi_pinctrl_irq_release_resources(struct irq_data *d)
+ static int sunxi_pinctrl_irq_set_type(struct irq_data *d, unsigned int type)
+ {
+ 	struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d);
+-	u32 reg = sunxi_irq_cfg_reg(d->hwirq);
++	u32 reg = sunxi_irq_cfg_reg(d->hwirq, pctl->desc->irq_bank_base);
+ 	u8 index = sunxi_irq_cfg_offset(d->hwirq);
+ 	unsigned long flags;
+ 	u32 regval;
+@@ -625,7 +625,8 @@ static int sunxi_pinctrl_irq_set_type(struct irq_data *d, unsigned int type)
+ static void sunxi_pinctrl_irq_ack(struct irq_data *d)
+ {
+ 	struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d);
+-	u32 status_reg = sunxi_irq_status_reg(d->hwirq);
++	u32 status_reg = sunxi_irq_status_reg(d->hwirq,
++					      pctl->desc->irq_bank_base);
+ 	u8 status_idx = sunxi_irq_status_offset(d->hwirq);
+ 
+ 	/* Clear the IRQ */
+@@ -635,7 +636,7 @@ static void sunxi_pinctrl_irq_ack(struct irq_data *d)
+ static void sunxi_pinctrl_irq_mask(struct irq_data *d)
+ {
+ 	struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d);
+-	u32 reg = sunxi_irq_ctrl_reg(d->hwirq);
++	u32 reg = sunxi_irq_ctrl_reg(d->hwirq, pctl->desc->irq_bank_base);
+ 	u8 idx = sunxi_irq_ctrl_offset(d->hwirq);
+ 	unsigned long flags;
+ 	u32 val;
+@@ -652,7 +653,7 @@ static void sunxi_pinctrl_irq_mask(struct irq_data *d)
+ static void sunxi_pinctrl_irq_unmask(struct irq_data *d)
+ {
+ 	struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d);
+-	u32 reg = sunxi_irq_ctrl_reg(d->hwirq);
++	u32 reg = sunxi_irq_ctrl_reg(d->hwirq, pctl->desc->irq_bank_base);
+ 	u8 idx = sunxi_irq_ctrl_offset(d->hwirq);
+ 	unsigned long flags;
+ 	u32 val;
+@@ -744,7 +745,7 @@ static void sunxi_pinctrl_irq_handler(struct irq_desc *desc)
+ 	if (bank == pctl->desc->irq_banks)
+ 		return;
+ 
+-	reg = sunxi_irq_status_reg_from_bank(bank);
++	reg = sunxi_irq_status_reg_from_bank(bank, pctl->desc->irq_bank_base);
+ 	val = readl(pctl->membase + reg);
+ 
+ 	if (val) {
+@@ -1023,9 +1024,11 @@ int sunxi_pinctrl_init(struct platform_device *pdev,
+ 
+ 	for (i = 0; i < pctl->desc->irq_banks; i++) {
+ 		/* Mask and clear all IRQs before registering a handler */
+-		writel(0, pctl->membase + sunxi_irq_ctrl_reg_from_bank(i));
++		writel(0, pctl->membase + sunxi_irq_ctrl_reg_from_bank(i,
++						pctl->desc->irq_bank_base));
+ 		writel(0xffffffff,
+-			pctl->membase + sunxi_irq_status_reg_from_bank(i));
++		       pctl->membase + sunxi_irq_status_reg_from_bank(i,
++						pctl->desc->irq_bank_base));
+ 
+ 		irq_set_chained_handler_and_data(pctl->irq[i],
+ 						 sunxi_pinctrl_irq_handler,
+diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.h b/drivers/pinctrl/sunxi/pinctrl-sunxi.h
+index e248e81a0f9e..0afce1ab12d0 100644
+--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.h
++++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.h
+@@ -97,6 +97,7 @@ struct sunxi_pinctrl_desc {
+ 	int				npins;
+ 	unsigned			pin_base;
+ 	unsigned			irq_banks;
++	unsigned			irq_bank_base;
+ 	bool				irq_read_needs_mux;
+ };
+ 
+@@ -233,12 +234,12 @@ static inline u32 sunxi_pull_offset(u16 pin)
+ 	return pin_num * PULL_PINS_BITS;
+ }
+ 
+-static inline u32 sunxi_irq_cfg_reg(u16 irq)
++static inline u32 sunxi_irq_cfg_reg(u16 irq, unsigned bank_base)
+ {
+ 	u8 bank = irq / IRQ_PER_BANK;
+ 	u8 reg = (irq % IRQ_PER_BANK) / IRQ_CFG_IRQ_PER_REG * 0x04;
+ 
+-	return IRQ_CFG_REG + bank * IRQ_MEM_SIZE + reg;
++	return IRQ_CFG_REG + (bank_base + bank) * IRQ_MEM_SIZE + reg;
+ }
+ 
+ static inline u32 sunxi_irq_cfg_offset(u16 irq)
+@@ -247,16 +248,16 @@ static inline u32 sunxi_irq_cfg_offset(u16 irq)
+ 	return irq_num * IRQ_CFG_IRQ_BITS;
+ }
+ 
+-static inline u32 sunxi_irq_ctrl_reg_from_bank(u8 bank)
++static inline u32 sunxi_irq_ctrl_reg_from_bank(u8 bank, unsigned bank_base)
+ {
+-	return IRQ_CTRL_REG + bank * IRQ_MEM_SIZE;
++	return IRQ_CTRL_REG + (bank_base + bank) * IRQ_MEM_SIZE;
+ }
+ 
+-static inline u32 sunxi_irq_ctrl_reg(u16 irq)
++static inline u32 sunxi_irq_ctrl_reg(u16 irq, unsigned bank_base)
+ {
+ 	u8 bank = irq / IRQ_PER_BANK;
+ 
+-	return sunxi_irq_ctrl_reg_from_bank(bank);
++	return sunxi_irq_ctrl_reg_from_bank(bank, bank_base);
+ }
+ 
+ static inline u32 sunxi_irq_ctrl_offset(u16 irq)
+@@ -265,16 +266,16 @@ static inline u32 sunxi_irq_ctrl_offset(u16 irq)
+ 	return irq_num * IRQ_CTRL_IRQ_BITS;
+ }
+ 
+-static inline u32 sunxi_irq_status_reg_from_bank(u8 bank)
++static inline u32 sunxi_irq_status_reg_from_bank(u8 bank, unsigned bank_base)
+ {
+-	return IRQ_STATUS_REG + bank * IRQ_MEM_SIZE;
++	return IRQ_STATUS_REG + (bank_base + bank) * IRQ_MEM_SIZE;
+ }
+ 
+-static inline u32 sunxi_irq_status_reg(u16 irq)
++static inline u32 sunxi_irq_status_reg(u16 irq, unsigned bank_base)
+ {
+ 	u8 bank = irq / IRQ_PER_BANK;
+ 
+-	return sunxi_irq_status_reg_from_bank(bank);
++	return sunxi_irq_status_reg_from_bank(bank, bank_base);
+ }
+ 
+ static inline u32 sunxi_irq_status_offset(u16 irq)
+diff --git a/drivers/platform/x86/dell-rbtn.c b/drivers/platform/x86/dell-rbtn.c
+index cd410e392550..d33e9ad3218f 100644
+--- a/drivers/platform/x86/dell-rbtn.c
++++ b/drivers/platform/x86/dell-rbtn.c
+@@ -28,6 +28,7 @@ struct rbtn_data {
+ 	enum rbtn_type type;
+ 	struct rfkill *rfkill;
+ 	struct input_dev *input_dev;
++	bool suspended;
+ };
+ 
+ 
+@@ -220,9 +221,55 @@ static const struct acpi_device_id rbtn_ids[] = {
+ 	{ "", 0 },
+ };
+ 
++#ifdef CONFIG_PM_SLEEP
++static void ACPI_SYSTEM_XFACE rbtn_clear_suspended_flag(void *context)
++{
++	struct rbtn_data *rbtn_data = context;
++
++	rbtn_data->suspended = false;
++}
++
++static int rbtn_suspend(struct device *dev)
++{
++	struct acpi_device *device = to_acpi_device(dev);
++	struct rbtn_data *rbtn_data = acpi_driver_data(device);
++
++	rbtn_data->suspended = true;
++
++	return 0;
++}
++
++static int rbtn_resume(struct device *dev)
++{
++	struct acpi_device *device = to_acpi_device(dev);
++	struct rbtn_data *rbtn_data = acpi_driver_data(device);
++	acpi_status status;
++
++	/*
++	 * Upon resume, some BIOSes send an ACPI notification thet triggers
++	 * an unwanted input event. In order to ignore it, we use a flag
++	 * that we set at suspend and clear once we have received the extra
++	 * ACPI notification. Since ACPI notifications are delivered
++	 * asynchronously to drivers, we clear the flag from the workqueue
++	 * used to deliver the notifications. This should be enough
++	 * to have the flag cleared only after we received the extra
++	 * notification, if any.
++	 */
++	status = acpi_os_execute(OSL_NOTIFY_HANDLER,
++			 rbtn_clear_suspended_flag, rbtn_data);
++	if (ACPI_FAILURE(status))
++		rbtn_clear_suspended_flag(rbtn_data);
++
++	return 0;
++}
++#endif
++
++static SIMPLE_DEV_PM_OPS(rbtn_pm_ops, rbtn_suspend, rbtn_resume);
++
+ static struct acpi_driver rbtn_driver = {
+ 	.name = "dell-rbtn",
+ 	.ids = rbtn_ids,
++	.drv.pm = &rbtn_pm_ops,
+ 	.ops = {
+ 		.add = rbtn_add,
+ 		.remove = rbtn_remove,
+@@ -384,6 +431,15 @@ static void rbtn_notify(struct acpi_device *device, u32 event)
+ {
+ 	struct rbtn_data *rbtn_data = device->driver_data;
+ 
++	/*
++	 * Some BIOSes send a notification at resume.
++	 * Ignore it to prevent unwanted input events.
++	 */
++	if (rbtn_data->suspended) {
++		dev_dbg(&device->dev, "ACPI notification ignored\n");
++		return;
++	}
++
+ 	if (event != 0x80) {
+ 		dev_info(&device->dev, "Received unknown event (0x%x)\n",
+ 			 event);
+diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
+index d78ee151c9e4..be3bc2f4edd4 100644
+--- a/drivers/platform/x86/ideapad-laptop.c
++++ b/drivers/platform/x86/ideapad-laptop.c
+@@ -865,6 +865,20 @@ static const struct dmi_system_id no_hw_rfkill_list[] = {
+ 		},
+ 	},
+ 	{
++		.ident = "Lenovo ideapad Y700-15ISK",
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++			DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad Y700-15ISK"),
++		},
++	},
++	{
++		.ident = "Lenovo ideapad Y700 Touch-15ISK",
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++			DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad Y700 Touch-15ISK"),
++		},
++	},
++	{
+ 		.ident = "Lenovo ideapad Y700-17ISK",
+ 		.matches = {
+ 			DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
+index 73833079bac8..d6baea6a7544 100644
+--- a/drivers/platform/x86/toshiba_acpi.c
++++ b/drivers/platform/x86/toshiba_acpi.c
+@@ -133,7 +133,7 @@ MODULE_LICENSE("GPL");
+ /* Field definitions */
+ #define HCI_ACCEL_MASK			0x7fff
+ #define HCI_HOTKEY_DISABLE		0x0b
+-#define HCI_HOTKEY_ENABLE		0x01
++#define HCI_HOTKEY_ENABLE		0x09
+ #define HCI_HOTKEY_SPECIAL_FUNCTIONS	0x10
+ #define HCI_LCD_BRIGHTNESS_BITS		3
+ #define HCI_LCD_BRIGHTNESS_SHIFT	(16-HCI_LCD_BRIGHTNESS_BITS)
+diff --git a/drivers/pwm/pwm-brcmstb.c b/drivers/pwm/pwm-brcmstb.c
+index 423ce087cd9c..5d5adee16886 100644
+--- a/drivers/pwm/pwm-brcmstb.c
++++ b/drivers/pwm/pwm-brcmstb.c
+@@ -274,8 +274,8 @@ static int brcmstb_pwm_probe(struct platform_device *pdev)
+ 
+ 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ 	p->base = devm_ioremap_resource(&pdev->dev, res);
+-	if (!p->base) {
+-		ret = -ENOMEM;
++	if (IS_ERR(p->base)) {
++		ret = PTR_ERR(p->base);
+ 		goto out_clk;
+ 	}
+ 
+diff --git a/drivers/pwm/pwm-omap-dmtimer.c b/drivers/pwm/pwm-omap-dmtimer.c
+index 826634ec0d5c..e0679eb399f6 100644
+--- a/drivers/pwm/pwm-omap-dmtimer.c
++++ b/drivers/pwm/pwm-omap-dmtimer.c
+@@ -31,6 +31,7 @@
+ #include <linux/time.h>
+ 
+ #define DM_TIMER_LOAD_MIN 0xfffffffe
++#define DM_TIMER_MAX      0xffffffff
+ 
+ struct pwm_omap_dmtimer_chip {
+ 	struct pwm_chip chip;
+@@ -46,13 +47,9 @@ to_pwm_omap_dmtimer_chip(struct pwm_chip *chip)
+ 	return container_of(chip, struct pwm_omap_dmtimer_chip, chip);
+ }
+ 
+-static int pwm_omap_dmtimer_calc_value(unsigned long clk_rate, int ns)
++static u32 pwm_omap_dmtimer_get_clock_cycles(unsigned long clk_rate, int ns)
+ {
+-	u64 c = (u64)clk_rate * ns;
+-
+-	do_div(c, NSEC_PER_SEC);
+-
+-	return DM_TIMER_LOAD_MIN - c;
++	return DIV_ROUND_CLOSEST_ULL((u64)clk_rate * ns, NSEC_PER_SEC);
+ }
+ 
+ static void pwm_omap_dmtimer_start(struct pwm_omap_dmtimer_chip *omap)
+@@ -99,7 +96,8 @@ static int pwm_omap_dmtimer_config(struct pwm_chip *chip,
+ 				   int duty_ns, int period_ns)
+ {
+ 	struct pwm_omap_dmtimer_chip *omap = to_pwm_omap_dmtimer_chip(chip);
+-	int load_value, match_value;
++	u32 period_cycles, duty_cycles;
++	u32 load_value, match_value;
+ 	struct clk *fclk;
+ 	unsigned long clk_rate;
+ 	bool timer_active;
+@@ -117,15 +115,13 @@ static int pwm_omap_dmtimer_config(struct pwm_chip *chip,
+ 	fclk = omap->pdata->get_fclk(omap->dm_timer);
+ 	if (!fclk) {
+ 		dev_err(chip->dev, "invalid pmtimer fclk\n");
+-		mutex_unlock(&omap->mutex);
+-		return -EINVAL;
++		goto err_einval;
+ 	}
+ 
+ 	clk_rate = clk_get_rate(fclk);
+ 	if (!clk_rate) {
+ 		dev_err(chip->dev, "invalid pmtimer fclk rate\n");
+-		mutex_unlock(&omap->mutex);
+-		return -EINVAL;
++		goto err_einval;
+ 	}
+ 
+ 	dev_dbg(chip->dev, "clk rate: %luHz\n", clk_rate);
+@@ -133,11 +129,45 @@ static int pwm_omap_dmtimer_config(struct pwm_chip *chip,
+ 	/*
+ 	 * Calculate the appropriate load and match values based on the
+ 	 * specified period and duty cycle. The load value determines the
+-	 * cycle time and the match value determines the duty cycle.
++	 * period time and the match value determines the duty time.
++	 *
++	 * The period lasts for (DM_TIMER_MAX-load_value+1) clock cycles.
++	 * Similarly, the active time lasts (match_value-load_value+1) cycles.
++	 * The non-active time is the remainder: (DM_TIMER_MAX-match_value)
++	 * clock cycles.
++	 *
++	 * NOTE: It is required that: load_value <= match_value < DM_TIMER_MAX
++	 *
++	 * References:
++	 *   OMAP4430/60/70 TRM sections 22.2.4.10 and 22.2.4.11
++	 *   AM335x Sitara TRM sections 20.1.3.5 and 20.1.3.6
+ 	 */
+-	load_value = pwm_omap_dmtimer_calc_value(clk_rate, period_ns);
+-	match_value = pwm_omap_dmtimer_calc_value(clk_rate,
+-						  period_ns - duty_ns);
++	period_cycles = pwm_omap_dmtimer_get_clock_cycles(clk_rate, period_ns);
++	duty_cycles = pwm_omap_dmtimer_get_clock_cycles(clk_rate, duty_ns);
++
++	if (period_cycles < 2) {
++		dev_info(chip->dev,
++			 "period %d ns too short for clock rate %lu Hz\n",
++			 period_ns, clk_rate);
++		goto err_einval;
++	}
++
++	if (duty_cycles < 1) {
++		dev_dbg(chip->dev,
++			"duty cycle %d ns is too short for clock rate %lu Hz\n",
++			duty_ns, clk_rate);
++		dev_dbg(chip->dev, "using minimum of 1 clock cycle\n");
++		duty_cycles = 1;
++	} else if (duty_cycles >= period_cycles) {
++		dev_dbg(chip->dev,
++			"duty cycle %d ns is too long for period %d ns at clock rate %lu Hz\n",
++			duty_ns, period_ns, clk_rate);
++		dev_dbg(chip->dev, "using maximum of 1 clock cycle less than period\n");
++		duty_cycles = period_cycles - 1;
++	}
++
++	load_value = (DM_TIMER_MAX - period_cycles) + 1;
++	match_value = load_value + duty_cycles - 1;
+ 
+ 	/*
+ 	 * We MUST stop the associated dual-mode timer before attempting to
+@@ -166,6 +196,11 @@ static int pwm_omap_dmtimer_config(struct pwm_chip *chip,
+ 	mutex_unlock(&omap->mutex);
+ 
+ 	return 0;
++
++err_einval:
++	mutex_unlock(&omap->mutex);
++
++	return -EINVAL;
+ }
+ 
+ static int pwm_omap_dmtimer_set_polarity(struct pwm_chip *chip,
+diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c
+index f2e1a39ce0f3..5cf4a97e0304 100644
+--- a/drivers/regulator/axp20x-regulator.c
++++ b/drivers/regulator/axp20x-regulator.c
+@@ -221,10 +221,10 @@ static const struct regulator_desc axp22x_regulators[] = {
+ 		 AXP22X_ELDO2_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, BIT(1)),
+ 	AXP_DESC(AXP22X, ELDO3, "eldo3", "eldoin", 700, 3300, 100,
+ 		 AXP22X_ELDO3_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, BIT(2)),
+-	AXP_DESC_IO(AXP22X, LDO_IO0, "ldo_io0", "ips", 1800, 3300, 100,
++	AXP_DESC_IO(AXP22X, LDO_IO0, "ldo_io0", "ips", 700, 3300, 100,
+ 		    AXP22X_LDO_IO0_V_OUT, 0x1f, AXP20X_GPIO0_CTRL, 0x07,
+ 		    AXP22X_IO_ENABLED, AXP22X_IO_DISABLED),
+-	AXP_DESC_IO(AXP22X, LDO_IO1, "ldo_io1", "ips", 1800, 3300, 100,
++	AXP_DESC_IO(AXP22X, LDO_IO1, "ldo_io1", "ips", 700, 3300, 100,
+ 		    AXP22X_LDO_IO1_V_OUT, 0x1f, AXP20X_GPIO1_CTRL, 0x07,
+ 		    AXP22X_IO_ENABLED, AXP22X_IO_DISABLED),
+ 	AXP_DESC_FIXED(AXP22X, RTC_LDO, "rtc_ldo", "ips", 3000),
+diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c
+index 3242ffc0cb25..09dce49609c1 100644
+--- a/drivers/regulator/s2mps11.c
++++ b/drivers/regulator/s2mps11.c
+@@ -306,7 +306,7 @@ static struct regulator_ops s2mps11_buck_ops = {
+ 	.enable_mask	= S2MPS11_ENABLE_MASK			\
+ }
+ 
+-#define regulator_desc_s2mps11_buck6_10(num, min, step) {	\
++#define regulator_desc_s2mps11_buck67810(num, min, step) {	\
+ 	.name		= "BUCK"#num,				\
+ 	.id		= S2MPS11_BUCK##num,			\
+ 	.ops		= &s2mps11_buck_ops,			\
+@@ -322,6 +322,22 @@ static struct regulator_ops s2mps11_buck_ops = {
+ 	.enable_mask	= S2MPS11_ENABLE_MASK			\
+ }
+ 
++#define regulator_desc_s2mps11_buck9 {				\
++	.name		= "BUCK9",				\
++	.id		= S2MPS11_BUCK9,			\
++	.ops		= &s2mps11_buck_ops,			\
++	.type		= REGULATOR_VOLTAGE,			\
++	.owner		= THIS_MODULE,				\
++	.min_uV		= MIN_3000_MV,				\
++	.uV_step	= STEP_25_MV,				\
++	.n_voltages	= S2MPS11_BUCK9_N_VOLTAGES,		\
++	.ramp_delay	= S2MPS11_RAMP_DELAY,			\
++	.vsel_reg	= S2MPS11_REG_B9CTRL2,			\
++	.vsel_mask	= S2MPS11_BUCK9_VSEL_MASK,		\
++	.enable_reg	= S2MPS11_REG_B9CTRL1,			\
++	.enable_mask	= S2MPS11_ENABLE_MASK			\
++}
++
+ static const struct regulator_desc s2mps11_regulators[] = {
+ 	regulator_desc_s2mps11_ldo(1, STEP_25_MV),
+ 	regulator_desc_s2mps11_ldo(2, STEP_50_MV),
+@@ -366,11 +382,11 @@ static const struct regulator_desc s2mps11_regulators[] = {
+ 	regulator_desc_s2mps11_buck1_4(3),
+ 	regulator_desc_s2mps11_buck1_4(4),
+ 	regulator_desc_s2mps11_buck5,
+-	regulator_desc_s2mps11_buck6_10(6, MIN_600_MV, STEP_6_25_MV),
+-	regulator_desc_s2mps11_buck6_10(7, MIN_600_MV, STEP_6_25_MV),
+-	regulator_desc_s2mps11_buck6_10(8, MIN_600_MV, STEP_6_25_MV),
+-	regulator_desc_s2mps11_buck6_10(9, MIN_3000_MV, STEP_25_MV),
+-	regulator_desc_s2mps11_buck6_10(10, MIN_750_MV, STEP_12_5_MV),
++	regulator_desc_s2mps11_buck67810(6, MIN_600_MV, STEP_6_25_MV),
++	regulator_desc_s2mps11_buck67810(7, MIN_600_MV, STEP_6_25_MV),
++	regulator_desc_s2mps11_buck67810(8, MIN_600_MV, STEP_6_25_MV),
++	regulator_desc_s2mps11_buck9,
++	regulator_desc_s2mps11_buck67810(10, MIN_750_MV, STEP_12_5_MV),
+ };
+ 
+ static struct regulator_ops s2mps14_reg_ops;
+diff --git a/drivers/regulator/s5m8767.c b/drivers/regulator/s5m8767.c
+index 58f5d3b8e981..27343e1c43ef 100644
+--- a/drivers/regulator/s5m8767.c
++++ b/drivers/regulator/s5m8767.c
+@@ -202,9 +202,10 @@ static int s5m8767_get_register(struct s5m8767_info *s5m8767, int reg_id,
+ 		}
+ 	}
+ 
+-	if (i < s5m8767->num_regulators)
+-		*enable_ctrl =
+-		s5m8767_opmode_reg[reg_id][mode] << S5M8767_ENCTRL_SHIFT;
++	if (i >= s5m8767->num_regulators)
++		return -EINVAL;
++
++	*enable_ctrl = s5m8767_opmode_reg[reg_id][mode] << S5M8767_ENCTRL_SHIFT;
+ 
+ 	return 0;
+ }
+@@ -937,8 +938,12 @@ static int s5m8767_pmic_probe(struct platform_device *pdev)
+ 			else
+ 				regulators[id].vsel_mask = 0xff;
+ 
+-			s5m8767_get_register(s5m8767, id, &enable_reg,
++			ret = s5m8767_get_register(s5m8767, id, &enable_reg,
+ 					     &enable_val);
++			if (ret) {
++				dev_err(s5m8767->dev, "error reading registers\n");
++				return ret;
++			}
+ 			regulators[id].enable_reg = enable_reg;
+ 			regulators[id].enable_mask = S5M8767_ENCTRL_MASK;
+ 			regulators[id].enable_val = enable_val;
+diff --git a/drivers/rtc/rtc-ds1685.c b/drivers/rtc/rtc-ds1685.c
+index 535050fc5e9f..08e0ff8c786a 100644
+--- a/drivers/rtc/rtc-ds1685.c
++++ b/drivers/rtc/rtc-ds1685.c
+@@ -187,9 +187,9 @@ ds1685_rtc_end_data_access(struct ds1685_priv *rtc)
+  * Only use this where you are certain another lock will not be held.
+  */
+ static inline void
+-ds1685_rtc_begin_ctrl_access(struct ds1685_priv *rtc, unsigned long flags)
++ds1685_rtc_begin_ctrl_access(struct ds1685_priv *rtc, unsigned long *flags)
+ {
+-	spin_lock_irqsave(&rtc->lock, flags);
++	spin_lock_irqsave(&rtc->lock, *flags);
+ 	ds1685_rtc_switch_to_bank1(rtc);
+ }
+ 
+@@ -1300,7 +1300,7 @@ ds1685_rtc_sysfs_ctrl_regs_store(struct device *dev,
+ {
+ 	struct ds1685_priv *rtc = dev_get_drvdata(dev);
+ 	u8 reg = 0, bit = 0, tmp;
+-	unsigned long flags = 0;
++	unsigned long flags;
+ 	long int val = 0;
+ 	const struct ds1685_rtc_ctrl_regs *reg_info =
+ 		ds1685_rtc_sysfs_ctrl_regs_lookup(attr->attr.name);
+@@ -1321,7 +1321,7 @@ ds1685_rtc_sysfs_ctrl_regs_store(struct device *dev,
+ 	bit = reg_info->bit;
+ 
+ 	/* Safe to spinlock during a write. */
+-	ds1685_rtc_begin_ctrl_access(rtc, flags);
++	ds1685_rtc_begin_ctrl_access(rtc, &flags);
+ 	tmp = rtc->read(rtc, reg);
+ 	rtc->write(rtc, reg, (val ? (tmp | bit) : (tmp & ~(bit))));
+ 	ds1685_rtc_end_ctrl_access(rtc, flags);
+diff --git a/drivers/rtc/rtc-hym8563.c b/drivers/rtc/rtc-hym8563.c
+index 097325d96db5..b1b4746a0eab 100644
+--- a/drivers/rtc/rtc-hym8563.c
++++ b/drivers/rtc/rtc-hym8563.c
+@@ -144,7 +144,7 @@ static int hym8563_rtc_set_time(struct device *dev, struct rtc_time *tm)
+ 	 * it does not seem to carry it over a subsequent write/read.
+ 	 * So we'll limit ourself to 100 years, starting at 2000 for now.
+ 	 */
+-	buf[6] = tm->tm_year - 100;
++	buf[6] = bin2bcd(tm->tm_year - 100);
+ 
+ 	/*
+ 	 * CTL1 only contains TEST-mode bits apart from stop,
+diff --git a/drivers/rtc/rtc-max77686.c b/drivers/rtc/rtc-max77686.c
+index 7184a0eda793..725dccae24e7 100644
+--- a/drivers/rtc/rtc-max77686.c
++++ b/drivers/rtc/rtc-max77686.c
+@@ -465,7 +465,7 @@ static int max77686_rtc_probe(struct platform_device *pdev)
+ 
+ 	info->virq = regmap_irq_get_virq(max77686->rtc_irq_data,
+ 					 MAX77686_RTCIRQ_RTCA1);
+-	if (!info->virq) {
++	if (info->virq <= 0) {
+ 		ret = -ENXIO;
+ 		goto err_rtc;
+ 	}
+diff --git a/drivers/rtc/rtc-rx8025.c b/drivers/rtc/rtc-rx8025.c
+index bd911bafb809..17341feadad1 100644
+--- a/drivers/rtc/rtc-rx8025.c
++++ b/drivers/rtc/rtc-rx8025.c
+@@ -65,7 +65,6 @@
+ 
+ static const struct i2c_device_id rx8025_id[] = {
+ 	{ "rx8025", 0 },
+-	{ "rv8803", 1 },
+ 	{ }
+ };
+ MODULE_DEVICE_TABLE(i2c, rx8025_id);
+diff --git a/drivers/rtc/rtc-vr41xx.c b/drivers/rtc/rtc-vr41xx.c
+index f64c282275b3..e1b86bb01062 100644
+--- a/drivers/rtc/rtc-vr41xx.c
++++ b/drivers/rtc/rtc-vr41xx.c
+@@ -272,12 +272,13 @@ static irqreturn_t rtclong1_interrupt(int irq, void *dev_id)
+ }
+ 
+ static const struct rtc_class_ops vr41xx_rtc_ops = {
+-	.release	= vr41xx_rtc_release,
+-	.ioctl		= vr41xx_rtc_ioctl,
+-	.read_time	= vr41xx_rtc_read_time,
+-	.set_time	= vr41xx_rtc_set_time,
+-	.read_alarm	= vr41xx_rtc_read_alarm,
+-	.set_alarm	= vr41xx_rtc_set_alarm,
++	.release		= vr41xx_rtc_release,
++	.ioctl			= vr41xx_rtc_ioctl,
++	.read_time		= vr41xx_rtc_read_time,
++	.set_time		= vr41xx_rtc_set_time,
++	.read_alarm		= vr41xx_rtc_read_alarm,
++	.set_alarm		= vr41xx_rtc_set_alarm,
++	.alarm_irq_enable	= vr41xx_rtc_alarm_irq_enable,
+ };
+ 
+ static int rtc_probe(struct platform_device *pdev)
+diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
+index d72867257346..3eff2a69fe08 100644
+--- a/drivers/scsi/NCR5380.c
++++ b/drivers/scsi/NCR5380.c
+@@ -760,7 +760,7 @@ static struct scsi_cmnd *dequeue_next_cmd(struct Scsi_Host *instance)
+ 	struct NCR5380_cmd *ncmd;
+ 	struct scsi_cmnd *cmd;
+ 
+-	if (list_empty(&hostdata->autosense)) {
++	if (hostdata->sensing || list_empty(&hostdata->autosense)) {
+ 		list_for_each_entry(ncmd, &hostdata->unissued, list) {
+ 			cmd = NCR5380_to_scmd(ncmd);
+ 			dsprintk(NDEBUG_QUEUES, instance, "dequeue: cmd=%p target=%d busy=0x%02x lun=%llu\n",
+@@ -793,7 +793,7 @@ static void requeue_cmd(struct Scsi_Host *instance, struct scsi_cmnd *cmd)
+ 	struct NCR5380_hostdata *hostdata = shost_priv(instance);
+ 	struct NCR5380_cmd *ncmd = scsi_cmd_priv(cmd);
+ 
+-	if (hostdata->sensing) {
++	if (hostdata->sensing == cmd) {
+ 		scsi_eh_restore_cmnd(cmd, &hostdata->ses);
+ 		list_add(&ncmd->list, &hostdata->autosense);
+ 		hostdata->sensing = NULL;
+@@ -815,15 +815,17 @@ static void NCR5380_main(struct work_struct *work)
+ 	struct NCR5380_hostdata *hostdata =
+ 		container_of(work, struct NCR5380_hostdata, main_task);
+ 	struct Scsi_Host *instance = hostdata->host;
+-	struct scsi_cmnd *cmd;
+ 	int done;
+ 
+ 	do {
+ 		done = 1;
+ 
+ 		spin_lock_irq(&hostdata->lock);
+-		while (!hostdata->connected &&
+-		       (cmd = dequeue_next_cmd(instance))) {
++		while (!hostdata->connected && !hostdata->selecting) {
++			struct scsi_cmnd *cmd = dequeue_next_cmd(instance);
++
++			if (!cmd)
++				break;
+ 
+ 			dsprintk(NDEBUG_MAIN, instance, "main: dequeued %p\n", cmd);
+ 
+@@ -840,8 +842,7 @@ static void NCR5380_main(struct work_struct *work)
+ 			 * entire unit.
+ 			 */
+ 
+-			cmd = NCR5380_select(instance, cmd);
+-			if (!cmd) {
++			if (!NCR5380_select(instance, cmd)) {
+ 				dsprintk(NDEBUG_MAIN, instance, "main: select complete\n");
+ 			} else {
+ 				dsprintk(NDEBUG_MAIN | NDEBUG_QUEUES, instance,
+@@ -1056,6 +1057,11 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance,
+ 		/* Reselection interrupt */
+ 		goto out;
+ 	}
++	if (!hostdata->selecting) {
++		/* Command was aborted */
++		NCR5380_write(MODE_REG, MR_BASE);
++		goto out;
++	}
+ 	if (err < 0) {
+ 		NCR5380_write(MODE_REG, MR_BASE);
+ 		shost_printk(KERN_ERR, instance,
+@@ -1759,9 +1765,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
+ 	unsigned char msgout = NOP;
+ 	int sink = 0;
+ 	int len;
+-#if defined(PSEUDO_DMA) || defined(REAL_DMA_POLL)
+ 	int transfersize;
+-#endif
+ 	unsigned char *data;
+ 	unsigned char phase, tmp, extended_msg[10], old_phase = 0xff;
+ 	struct scsi_cmnd *cmd;
+@@ -1798,6 +1802,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
+ 				do_abort(instance);
+ 				cmd->result = DID_ERROR << 16;
+ 				complete_cmd(instance, cmd);
++				hostdata->connected = NULL;
+ 				return;
+ #endif
+ 			case PHASE_DATAIN:
+@@ -1847,20 +1852,23 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
+ 						sink = 1;
+ 						do_abort(instance);
+ 						cmd->result = DID_ERROR << 16;
+-						complete_cmd(instance, cmd);
+ 						/* XXX - need to source or sink data here, as appropriate */
+ 					} else
+ 						cmd->SCp.this_residual -= transfersize - len;
+ 				} else
+ #endif				/* defined(PSEUDO_DMA) || defined(REAL_DMA_POLL) */
+ 				{
+-					spin_unlock_irq(&hostdata->lock);
+-					NCR5380_transfer_pio(instance, &phase,
+-					                     (int *)&cmd->SCp.this_residual,
++					/* Break up transfer into 3 ms chunks,
++					 * presuming 6 accesses per handshake.
++					 */
++					transfersize = min((unsigned long)cmd->SCp.this_residual,
++					                   hostdata->accesses_per_ms / 2);
++					len = transfersize;
++					NCR5380_transfer_pio(instance, &phase, &len,
+ 					                     (unsigned char **)&cmd->SCp.ptr);
+-					spin_lock_irq(&hostdata->lock);
++					cmd->SCp.this_residual -= transfersize - len;
+ 				}
+-				break;
++				return;
+ 			case PHASE_MSGIN:
+ 				len = 1;
+ 				data = &tmp;
+@@ -2292,14 +2300,17 @@ static bool list_del_cmd(struct list_head *haystack,
+  * [disconnected -> connected ->]...
+  * [autosense -> connected ->] done
+  *
+- * If cmd is unissued then just remove it.
+- * If cmd is disconnected, try to select the target.
+- * If cmd is connected, try to send an abort message.
+- * If cmd is waiting for autosense, give it a chance to complete but check
+- * that it isn't left connected.
+  * If cmd was not found at all then presumably it has already been completed,
+  * in which case return SUCCESS to try to avoid further EH measures.
++ *
+  * If the command has not completed yet, we must not fail to find it.
++ * We have no option but to forget the aborted command (even if it still
++ * lacks sense data). The mid-layer may re-issue a command that is in error
++ * recovery (see scsi_send_eh_cmnd), but the logic and data structures in
++ * this driver are such that a command can appear on one queue only.
++ *
++ * The lock protects driver data structures, but EH handlers also use it
++ * to serialize their own execution and prevent their own re-entry.
+  */
+ 
+ static int NCR5380_abort(struct scsi_cmnd *cmd)
+@@ -2322,6 +2333,7 @@ static int NCR5380_abort(struct scsi_cmnd *cmd)
+ 		         "abort: removed %p from issue queue\n", cmd);
+ 		cmd->result = DID_ABORT << 16;
+ 		cmd->scsi_done(cmd); /* No tag or busy flag to worry about */
++		goto out;
+ 	}
+ 
+ 	if (hostdata->selecting == cmd) {
+@@ -2336,58 +2348,21 @@ static int NCR5380_abort(struct scsi_cmnd *cmd)
+ 	if (list_del_cmd(&hostdata->disconnected, cmd)) {
+ 		dsprintk(NDEBUG_ABORT, instance,
+ 		         "abort: removed %p from disconnected list\n", cmd);
+-		cmd->result = DID_ERROR << 16;
+-		if (!hostdata->connected)
+-			NCR5380_select(instance, cmd);
+-		if (hostdata->connected != cmd) {
+-			complete_cmd(instance, cmd);
+-			result = FAILED;
+-			goto out;
+-		}
++		/* Can't call NCR5380_select() and send ABORT because that
++		 * means releasing the lock. Need a bus reset.
++		 */
++		set_host_byte(cmd, DID_ERROR);
++		complete_cmd(instance, cmd);
++		result = FAILED;
++		goto out;
+ 	}
+ 
+ 	if (hostdata->connected == cmd) {
+ 		dsprintk(NDEBUG_ABORT, instance, "abort: cmd %p is connected\n", cmd);
+ 		hostdata->connected = NULL;
+-		if (do_abort(instance)) {
+-			set_host_byte(cmd, DID_ERROR);
+-			complete_cmd(instance, cmd);
+-			result = FAILED;
+-			goto out;
+-		}
+-		set_host_byte(cmd, DID_ABORT);
+ #ifdef REAL_DMA
+ 		hostdata->dma_len = 0;
+ #endif
+-		if (cmd->cmnd[0] == REQUEST_SENSE)
+-			complete_cmd(instance, cmd);
+-		else {
+-			struct NCR5380_cmd *ncmd = scsi_cmd_priv(cmd);
+-
+-			/* Perform autosense for this command */
+-			list_add(&ncmd->list, &hostdata->autosense);
+-		}
+-	}
+-
+-	if (list_find_cmd(&hostdata->autosense, cmd)) {
+-		dsprintk(NDEBUG_ABORT, instance,
+-		         "abort: found %p on sense queue\n", cmd);
+-		spin_unlock_irqrestore(&hostdata->lock, flags);
+-		queue_work(hostdata->work_q, &hostdata->main_task);
+-		msleep(1000);
+-		spin_lock_irqsave(&hostdata->lock, flags);
+-		if (list_del_cmd(&hostdata->autosense, cmd)) {
+-			dsprintk(NDEBUG_ABORT, instance,
+-			         "abort: removed %p from sense queue\n", cmd);
+-			set_host_byte(cmd, DID_ABORT);
+-			complete_cmd(instance, cmd);
+-			goto out;
+-		}
+-	}
+-
+-	if (hostdata->connected == cmd) {
+-		dsprintk(NDEBUG_ABORT, instance, "abort: cmd %p is connected\n", cmd);
+-		hostdata->connected = NULL;
+ 		if (do_abort(instance)) {
+ 			set_host_byte(cmd, DID_ERROR);
+ 			complete_cmd(instance, cmd);
+@@ -2395,9 +2370,14 @@ static int NCR5380_abort(struct scsi_cmnd *cmd)
+ 			goto out;
+ 		}
+ 		set_host_byte(cmd, DID_ABORT);
+-#ifdef REAL_DMA
+-		hostdata->dma_len = 0;
+-#endif
++		complete_cmd(instance, cmd);
++		goto out;
++	}
++
++	if (list_del_cmd(&hostdata->autosense, cmd)) {
++		dsprintk(NDEBUG_ABORT, instance,
++		         "abort: removed %p from sense queue\n", cmd);
++		set_host_byte(cmd, DID_ERROR);
+ 		complete_cmd(instance, cmd);
+ 	}
+ 
+@@ -2450,7 +2430,16 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd)
+ 	 * commands!
+ 	 */
+ 
+-	hostdata->selecting = NULL;
++	if (list_del_cmd(&hostdata->unissued, cmd)) {
++		cmd->result = DID_RESET << 16;
++		cmd->scsi_done(cmd);
++	}
++
++	if (hostdata->selecting) {
++		hostdata->selecting->result = DID_RESET << 16;
++		complete_cmd(instance, hostdata->selecting);
++		hostdata->selecting = NULL;
++	}
+ 
+ 	list_for_each_entry(ncmd, &hostdata->disconnected, list) {
+ 		struct scsi_cmnd *cmd = NCR5380_to_scmd(ncmd);
+@@ -2458,6 +2447,7 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd)
+ 		set_host_byte(cmd, DID_RESET);
+ 		cmd->scsi_done(cmd);
+ 	}
++	INIT_LIST_HEAD(&hostdata->disconnected);
+ 
+ 	list_for_each_entry(ncmd, &hostdata->autosense, list) {
+ 		struct scsi_cmnd *cmd = NCR5380_to_scmd(ncmd);
+@@ -2465,6 +2455,7 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd)
+ 		set_host_byte(cmd, DID_RESET);
+ 		cmd->scsi_done(cmd);
+ 	}
++	INIT_LIST_HEAD(&hostdata->autosense);
+ 
+ 	if (hostdata->connected) {
+ 		set_host_byte(hostdata->connected, DID_RESET);
+@@ -2472,12 +2463,6 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd)
+ 		hostdata->connected = NULL;
+ 	}
+ 
+-	if (hostdata->sensing) {
+-		set_host_byte(hostdata->connected, DID_RESET);
+-		complete_cmd(instance, hostdata->sensing);
+-		hostdata->sensing = NULL;
+-	}
+-
+ 	for (i = 0; i < 8; ++i)
+ 		hostdata->busy[i] = 0;
+ #ifdef REAL_DMA
+diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
+index 074878b55a0b..d044f3f273be 100644
+--- a/drivers/scsi/aacraid/aacraid.h
++++ b/drivers/scsi/aacraid/aacraid.h
+@@ -944,6 +944,7 @@ struct fib {
+ 	 */
+ 	struct list_head	fiblink;
+ 	void			*data;
++	u32			vector_no;
+ 	struct hw_fib		*hw_fib_va;		/* Actual shared object */
+ 	dma_addr_t		hw_fib_pa;		/* physical address of hw_fib*/
+ };
+@@ -2113,6 +2114,7 @@ static inline unsigned int cap_to_cyls(sector_t capacity, unsigned divisor)
+ int aac_acquire_irq(struct aac_dev *dev);
+ void aac_free_irq(struct aac_dev *dev);
+ const char *aac_driverinfo(struct Scsi_Host *);
++void aac_fib_vector_assign(struct aac_dev *dev);
+ struct fib *aac_fib_alloc(struct aac_dev *dev);
+ int aac_fib_setup(struct aac_dev *dev);
+ void aac_fib_map_free(struct aac_dev *dev);
+diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
+index a1f90fe849c9..4cbf54928640 100644
+--- a/drivers/scsi/aacraid/commsup.c
++++ b/drivers/scsi/aacraid/commsup.c
+@@ -83,13 +83,38 @@ static int fib_map_alloc(struct aac_dev *dev)
+ 
+ void aac_fib_map_free(struct aac_dev *dev)
+ {
+-	pci_free_consistent(dev->pdev,
+-	  dev->max_fib_size * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB),
+-	  dev->hw_fib_va, dev->hw_fib_pa);
++	if (dev->hw_fib_va && dev->max_fib_size) {
++		pci_free_consistent(dev->pdev,
++		(dev->max_fib_size *
++		(dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB)),
++		dev->hw_fib_va, dev->hw_fib_pa);
++	}
+ 	dev->hw_fib_va = NULL;
+ 	dev->hw_fib_pa = 0;
+ }
+ 
++void aac_fib_vector_assign(struct aac_dev *dev)
++{
++	u32 i = 0;
++	u32 vector = 1;
++	struct fib *fibptr = NULL;
++
++	for (i = 0, fibptr = &dev->fibs[i];
++		i < (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB);
++		i++, fibptr++) {
++		if ((dev->max_msix == 1) ||
++		  (i > ((dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB - 1)
++			- dev->vector_cap))) {
++			fibptr->vector_no = 0;
++		} else {
++			fibptr->vector_no = vector;
++			vector++;
++			if (vector == dev->max_msix)
++				vector = 1;
++		}
++	}
++}
++
+ /**
+  *	aac_fib_setup	-	setup the fibs
+  *	@dev: Adapter to set up
+@@ -151,6 +176,12 @@ int aac_fib_setup(struct aac_dev * dev)
+ 		hw_fib_pa = hw_fib_pa +
+ 			dev->max_fib_size + sizeof(struct aac_fib_xporthdr);
+ 	}
++
++	/*
++	 *Assign vector numbers to fibs
++	 */
++	aac_fib_vector_assign(dev);
++
+ 	/*
+ 	 *	Add the fib chain to the free list
+ 	 */
+diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
+index 76eaa38ffd6e..8a8e84548d64 100644
+--- a/drivers/scsi/aacraid/linit.c
++++ b/drivers/scsi/aacraid/linit.c
+@@ -1404,8 +1404,18 @@ static int aac_acquire_resources(struct aac_dev *dev)
+ 
+ 	aac_adapter_enable_int(dev);
+ 
+-	if (!dev->sync_mode)
++	/*max msix may change  after EEH
++	 * Re-assign vectors to fibs
++	 */
++	aac_fib_vector_assign(dev);
++
++	if (!dev->sync_mode) {
++		/* After EEH recovery or suspend resume, max_msix count
++		 * may change, therfore updating in init as well.
++		 */
+ 		aac_adapter_start(dev);
++		dev->init->Sa_MSIXVectors = cpu_to_le32(dev->max_msix);
++	}
+ 	return 0;
+ 
+ error_iounmap:
+diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c
+index 2aa34ea8ceb1..bc0203f3d243 100644
+--- a/drivers/scsi/aacraid/src.c
++++ b/drivers/scsi/aacraid/src.c
+@@ -156,8 +156,8 @@ static irqreturn_t aac_src_intr_message(int irq, void *dev_id)
+ 				break;
+ 			if (dev->msi_enabled && dev->max_msix > 1)
+ 				atomic_dec(&dev->rrq_outstanding[vector_no]);
+-			aac_intr_normal(dev, handle-1, 0, isFastResponse, NULL);
+ 			dev->host_rrq[index++] = 0;
++			aac_intr_normal(dev, handle-1, 0, isFastResponse, NULL);
+ 			if (index == (vector_no + 1) * dev->vector_cap)
+ 				index = vector_no * dev->vector_cap;
+ 			dev->host_rrq_idx[vector_no] = index;
+@@ -452,36 +452,20 @@ static int aac_src_deliver_message(struct fib *fib)
+ #endif
+ 
+ 	u16 hdr_size = le16_to_cpu(fib->hw_fib_va->header.Size);
++	u16 vector_no;
+ 
+ 	atomic_inc(&q->numpending);
+ 
+ 	if (dev->msi_enabled && fib->hw_fib_va->header.Command != AifRequest &&
+ 	    dev->max_msix > 1) {
+-		u_int16_t vector_no, first_choice = 0xffff;
+-
+-		vector_no = dev->fibs_pushed_no % dev->max_msix;
+-		do {
+-			vector_no += 1;
+-			if (vector_no == dev->max_msix)
+-				vector_no = 1;
+-			if (atomic_read(&dev->rrq_outstanding[vector_no]) <
+-			    dev->vector_cap)
+-				break;
+-			if (0xffff == first_choice)
+-				first_choice = vector_no;
+-			else if (vector_no == first_choice)
+-				break;
+-		} while (1);
+-		if (vector_no == first_choice)
+-			vector_no = 0;
+-		atomic_inc(&dev->rrq_outstanding[vector_no]);
+-		if (dev->fibs_pushed_no == 0xffffffff)
+-			dev->fibs_pushed_no = 0;
+-		else
+-			dev->fibs_pushed_no++;
++		vector_no = fib->vector_no;
+ 		fib->hw_fib_va->header.Handle += (vector_no << 16);
++	} else {
++		vector_no = 0;
+ 	}
+ 
++	atomic_inc(&dev->rrq_outstanding[vector_no]);
++
+ 	if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE2) {
+ 		/* Calculate the amount to the fibsize bits */
+ 		fibsize = (hdr_size + 127) / 128 - 1;
+diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.c b/drivers/scsi/aic7xxx/aic7xxx_osm.c
+index b846a4683562..fc6a83188c1e 100644
+--- a/drivers/scsi/aic7xxx/aic7xxx_osm.c
++++ b/drivers/scsi/aic7xxx/aic7xxx_osm.c
+@@ -1336,6 +1336,7 @@ ahc_platform_set_tags(struct ahc_softc *ahc, struct scsi_device *sdev,
+ 	case AHC_DEV_Q_TAGGED:
+ 		scsi_change_queue_depth(sdev,
+ 				dev->openings + dev->active);
++		break;
+ 	default:
+ 		/*
+ 		 * We allow the OS to queue 2 untagged transactions to
+diff --git a/drivers/scsi/atari_NCR5380.c b/drivers/scsi/atari_NCR5380.c
+index e65478651ca9..389825ba5d96 100644
+--- a/drivers/scsi/atari_NCR5380.c
++++ b/drivers/scsi/atari_NCR5380.c
+@@ -862,7 +862,7 @@ static struct scsi_cmnd *dequeue_next_cmd(struct Scsi_Host *instance)
+ 	struct NCR5380_cmd *ncmd;
+ 	struct scsi_cmnd *cmd;
+ 
+-	if (list_empty(&hostdata->autosense)) {
++	if (hostdata->sensing || list_empty(&hostdata->autosense)) {
+ 		list_for_each_entry(ncmd, &hostdata->unissued, list) {
+ 			cmd = NCR5380_to_scmd(ncmd);
+ 			dsprintk(NDEBUG_QUEUES, instance, "dequeue: cmd=%p target=%d busy=0x%02x lun=%llu\n",
+@@ -901,7 +901,7 @@ static void requeue_cmd(struct Scsi_Host *instance, struct scsi_cmnd *cmd)
+ 	struct NCR5380_hostdata *hostdata = shost_priv(instance);
+ 	struct NCR5380_cmd *ncmd = scsi_cmd_priv(cmd);
+ 
+-	if (hostdata->sensing) {
++	if (hostdata->sensing == cmd) {
+ 		scsi_eh_restore_cmnd(cmd, &hostdata->ses);
+ 		list_add(&ncmd->list, &hostdata->autosense);
+ 		hostdata->sensing = NULL;
+@@ -923,7 +923,6 @@ static void NCR5380_main(struct work_struct *work)
+ 	struct NCR5380_hostdata *hostdata =
+ 		container_of(work, struct NCR5380_hostdata, main_task);
+ 	struct Scsi_Host *instance = hostdata->host;
+-	struct scsi_cmnd *cmd;
+ 	int done;
+ 
+ 	/*
+@@ -936,8 +935,11 @@ static void NCR5380_main(struct work_struct *work)
+ 		done = 1;
+ 
+ 		spin_lock_irq(&hostdata->lock);
+-		while (!hostdata->connected &&
+-		       (cmd = dequeue_next_cmd(instance))) {
++		while (!hostdata->connected && !hostdata->selecting) {
++			struct scsi_cmnd *cmd = dequeue_next_cmd(instance);
++
++			if (!cmd)
++				break;
+ 
+ 			dsprintk(NDEBUG_MAIN, instance, "main: dequeued %p\n", cmd);
+ 
+@@ -960,8 +962,7 @@ static void NCR5380_main(struct work_struct *work)
+ #ifdef SUPPORT_TAGS
+ 			cmd_get_tag(cmd, cmd->cmnd[0] != REQUEST_SENSE);
+ #endif
+-			cmd = NCR5380_select(instance, cmd);
+-			if (!cmd) {
++			if (!NCR5380_select(instance, cmd)) {
+ 				dsprintk(NDEBUG_MAIN, instance, "main: select complete\n");
+ 				maybe_release_dma_irq(instance);
+ 			} else {
+@@ -1257,6 +1258,11 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance,
+ 		/* Reselection interrupt */
+ 		goto out;
+ 	}
++	if (!hostdata->selecting) {
++		/* Command was aborted */
++		NCR5380_write(MODE_REG, MR_BASE);
++		goto out;
++	}
+ 	if (err < 0) {
+ 		NCR5380_write(MODE_REG, MR_BASE);
+ 		shost_printk(KERN_ERR, instance,
+@@ -1838,9 +1844,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
+ 	unsigned char msgout = NOP;
+ 	int sink = 0;
+ 	int len;
+-#if defined(REAL_DMA)
+ 	int transfersize;
+-#endif
+ 	unsigned char *data;
+ 	unsigned char phase, tmp, extended_msg[10], old_phase = 0xff;
+ 	struct scsi_cmnd *cmd;
+@@ -1909,6 +1913,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
+ 				do_abort(instance);
+ 				cmd->result = DID_ERROR << 16;
+ 				complete_cmd(instance, cmd);
++				hostdata->connected = NULL;
+ 				return;
+ #endif
+ 			case PHASE_DATAIN:
+@@ -1966,7 +1971,6 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
+ 						sink = 1;
+ 						do_abort(instance);
+ 						cmd->result = DID_ERROR << 16;
+-						complete_cmd(instance, cmd);
+ 						/* XXX - need to source or sink data here, as appropriate */
+ 					} else {
+ #ifdef REAL_DMA
+@@ -1983,18 +1987,22 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
+ 				} else
+ #endif /* defined(REAL_DMA) */
+ 				{
+-					spin_unlock_irq(&hostdata->lock);
+-					NCR5380_transfer_pio(instance, &phase,
+-					                     (int *)&cmd->SCp.this_residual,
++					/* Break up transfer into 3 ms chunks,
++					 * presuming 6 accesses per handshake.
++					 */
++					transfersize = min((unsigned long)cmd->SCp.this_residual,
++					                   hostdata->accesses_per_ms / 2);
++					len = transfersize;
++					NCR5380_transfer_pio(instance, &phase, &len,
+ 					                     (unsigned char **)&cmd->SCp.ptr);
+-					spin_lock_irq(&hostdata->lock);
++					cmd->SCp.this_residual -= transfersize - len;
+ 				}
+ #if defined(CONFIG_SUN3) && defined(REAL_DMA)
+ 				/* if we had intended to dma that command clear it */
+ 				if (sun3_dma_setup_done == cmd)
+ 					sun3_dma_setup_done = NULL;
+ #endif
+-				break;
++				return;
+ 			case PHASE_MSGIN:
+ 				len = 1;
+ 				data = &tmp;
+@@ -2487,14 +2495,17 @@ static bool list_del_cmd(struct list_head *haystack,
+  * [disconnected -> connected ->]...
+  * [autosense -> connected ->] done
+  *
+- * If cmd is unissued then just remove it.
+- * If cmd is disconnected, try to select the target.
+- * If cmd is connected, try to send an abort message.
+- * If cmd is waiting for autosense, give it a chance to complete but check
+- * that it isn't left connected.
+  * If cmd was not found at all then presumably it has already been completed,
+  * in which case return SUCCESS to try to avoid further EH measures.
++ *
+  * If the command has not completed yet, we must not fail to find it.
++ * We have no option but to forget the aborted command (even if it still
++ * lacks sense data). The mid-layer may re-issue a command that is in error
++ * recovery (see scsi_send_eh_cmnd), but the logic and data structures in
++ * this driver are such that a command can appear on one queue only.
++ *
++ * The lock protects driver data structures, but EH handlers also use it
++ * to serialize their own execution and prevent their own re-entry.
+  */
+ 
+ static int NCR5380_abort(struct scsi_cmnd *cmd)
+@@ -2517,6 +2528,7 @@ static int NCR5380_abort(struct scsi_cmnd *cmd)
+ 		         "abort: removed %p from issue queue\n", cmd);
+ 		cmd->result = DID_ABORT << 16;
+ 		cmd->scsi_done(cmd); /* No tag or busy flag to worry about */
++		goto out;
+ 	}
+ 
+ 	if (hostdata->selecting == cmd) {
+@@ -2531,58 +2543,21 @@ static int NCR5380_abort(struct scsi_cmnd *cmd)
+ 	if (list_del_cmd(&hostdata->disconnected, cmd)) {
+ 		dsprintk(NDEBUG_ABORT, instance,
+ 		         "abort: removed %p from disconnected list\n", cmd);
+-		cmd->result = DID_ERROR << 16;
+-		if (!hostdata->connected)
+-			NCR5380_select(instance, cmd);
+-		if (hostdata->connected != cmd) {
+-			complete_cmd(instance, cmd);
+-			result = FAILED;
+-			goto out;
+-		}
++		/* Can't call NCR5380_select() and send ABORT because that
++		 * means releasing the lock. Need a bus reset.
++		 */
++		set_host_byte(cmd, DID_ERROR);
++		complete_cmd(instance, cmd);
++		result = FAILED;
++		goto out;
+ 	}
+ 
+ 	if (hostdata->connected == cmd) {
+ 		dsprintk(NDEBUG_ABORT, instance, "abort: cmd %p is connected\n", cmd);
+ 		hostdata->connected = NULL;
+-		if (do_abort(instance)) {
+-			set_host_byte(cmd, DID_ERROR);
+-			complete_cmd(instance, cmd);
+-			result = FAILED;
+-			goto out;
+-		}
+-		set_host_byte(cmd, DID_ABORT);
+ #ifdef REAL_DMA
+ 		hostdata->dma_len = 0;
+ #endif
+-		if (cmd->cmnd[0] == REQUEST_SENSE)
+-			complete_cmd(instance, cmd);
+-		else {
+-			struct NCR5380_cmd *ncmd = scsi_cmd_priv(cmd);
+-
+-			/* Perform autosense for this command */
+-			list_add(&ncmd->list, &hostdata->autosense);
+-		}
+-	}
+-
+-	if (list_find_cmd(&hostdata->autosense, cmd)) {
+-		dsprintk(NDEBUG_ABORT, instance,
+-		         "abort: found %p on sense queue\n", cmd);
+-		spin_unlock_irqrestore(&hostdata->lock, flags);
+-		queue_work(hostdata->work_q, &hostdata->main_task);
+-		msleep(1000);
+-		spin_lock_irqsave(&hostdata->lock, flags);
+-		if (list_del_cmd(&hostdata->autosense, cmd)) {
+-			dsprintk(NDEBUG_ABORT, instance,
+-			         "abort: removed %p from sense queue\n", cmd);
+-			set_host_byte(cmd, DID_ABORT);
+-			complete_cmd(instance, cmd);
+-			goto out;
+-		}
+-	}
+-
+-	if (hostdata->connected == cmd) {
+-		dsprintk(NDEBUG_ABORT, instance, "abort: cmd %p is connected\n", cmd);
+-		hostdata->connected = NULL;
+ 		if (do_abort(instance)) {
+ 			set_host_byte(cmd, DID_ERROR);
+ 			complete_cmd(instance, cmd);
+@@ -2590,9 +2565,14 @@ static int NCR5380_abort(struct scsi_cmnd *cmd)
+ 			goto out;
+ 		}
+ 		set_host_byte(cmd, DID_ABORT);
+-#ifdef REAL_DMA
+-		hostdata->dma_len = 0;
+-#endif
++		complete_cmd(instance, cmd);
++		goto out;
++	}
++
++	if (list_del_cmd(&hostdata->autosense, cmd)) {
++		dsprintk(NDEBUG_ABORT, instance,
++		         "abort: removed %p from sense queue\n", cmd);
++		set_host_byte(cmd, DID_ERROR);
+ 		complete_cmd(instance, cmd);
+ 	}
+ 
+@@ -2646,7 +2626,16 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd)
+ 	 * commands!
+ 	 */
+ 
+-	hostdata->selecting = NULL;
++	if (list_del_cmd(&hostdata->unissued, cmd)) {
++		cmd->result = DID_RESET << 16;
++		cmd->scsi_done(cmd);
++	}
++
++	if (hostdata->selecting) {
++		hostdata->selecting->result = DID_RESET << 16;
++		complete_cmd(instance, hostdata->selecting);
++		hostdata->selecting = NULL;
++	}
+ 
+ 	list_for_each_entry(ncmd, &hostdata->disconnected, list) {
+ 		struct scsi_cmnd *cmd = NCR5380_to_scmd(ncmd);
+@@ -2654,6 +2643,7 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd)
+ 		set_host_byte(cmd, DID_RESET);
+ 		cmd->scsi_done(cmd);
+ 	}
++	INIT_LIST_HEAD(&hostdata->disconnected);
+ 
+ 	list_for_each_entry(ncmd, &hostdata->autosense, list) {
+ 		struct scsi_cmnd *cmd = NCR5380_to_scmd(ncmd);
+@@ -2661,6 +2651,7 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd)
+ 		set_host_byte(cmd, DID_RESET);
+ 		cmd->scsi_done(cmd);
+ 	}
++	INIT_LIST_HEAD(&hostdata->autosense);
+ 
+ 	if (hostdata->connected) {
+ 		set_host_byte(hostdata->connected, DID_RESET);
+@@ -2668,12 +2659,6 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd)
+ 		hostdata->connected = NULL;
+ 	}
+ 
+-	if (hostdata->sensing) {
+-		set_host_byte(hostdata->connected, DID_RESET);
+-		complete_cmd(instance, hostdata->sensing);
+-		hostdata->sensing = NULL;
+-	}
+-
+ #ifdef SUPPORT_TAGS
+ 	free_all_tags(hostdata);
+ #endif
+diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
+index cb9072a841be..069e5c50abd0 100644
+--- a/drivers/scsi/be2iscsi/be_main.c
++++ b/drivers/scsi/be2iscsi/be_main.c
+@@ -4468,6 +4468,7 @@ put_shost:
+ 	scsi_host_put(phba->shost);
+ free_kset:
+ 	iscsi_boot_destroy_kset(phba->boot_kset);
++	phba->boot_kset = NULL;
+ 	return -ENOMEM;
+ }
+ 
+diff --git a/drivers/scsi/device_handler/Kconfig b/drivers/scsi/device_handler/Kconfig
+index e5647d59224f..0b331c9c0a8f 100644
+--- a/drivers/scsi/device_handler/Kconfig
++++ b/drivers/scsi/device_handler/Kconfig
+@@ -13,13 +13,13 @@ menuconfig SCSI_DH
+ 
+ config SCSI_DH_RDAC
+ 	tristate "LSI RDAC Device Handler"
+-	depends on SCSI_DH
++	depends on SCSI_DH && SCSI
+ 	help
+ 	If you have a LSI RDAC select y. Otherwise, say N.
+ 
+ config SCSI_DH_HP_SW
+ 	tristate "HP/COMPAQ MSA Device Handler"
+-	depends on SCSI_DH
++	depends on SCSI_DH && SCSI
+ 	help
+ 	If you have a HP/COMPAQ MSA device that requires START_STOP to
+ 	be sent to start it and cannot upgrade the firmware then select y.
+@@ -27,13 +27,13 @@ config SCSI_DH_HP_SW
+ 
+ config SCSI_DH_EMC
+ 	tristate "EMC CLARiiON Device Handler"
+-	depends on SCSI_DH
++	depends on SCSI_DH && SCSI
+ 	help
+ 	If you have a EMC CLARiiON select y. Otherwise, say N.
+ 
+ config SCSI_DH_ALUA
+ 	tristate "SPC-3 ALUA Device Handler"
+-	depends on SCSI_DH
++	depends on SCSI_DH && SCSI
+ 	help
+ 	  SCSI Device handler for generic SPC-3 Asymmetric Logical Unit
+ 	  Access (ALUA).
+diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
+index a544366a367e..f57d02c3b6cf 100644
+--- a/drivers/scsi/lpfc/lpfc_init.c
++++ b/drivers/scsi/lpfc/lpfc_init.c
+@@ -2860,7 +2860,7 @@ lpfc_online(struct lpfc_hba *phba)
+ 	}
+ 
+ 	vports = lpfc_create_vport_work_array(phba);
+-	if (vports != NULL)
++	if (vports != NULL) {
+ 		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
+ 			struct Scsi_Host *shost;
+ 			shost = lpfc_shost_from_vport(vports[i]);
+@@ -2877,7 +2877,8 @@ lpfc_online(struct lpfc_hba *phba)
+ 			}
+ 			spin_unlock_irq(shost->host_lock);
+ 		}
+-		lpfc_destroy_vport_work_array(phba, vports);
++	}
++	lpfc_destroy_vport_work_array(phba, vports);
+ 
+ 	lpfc_unblock_mgmt_io(phba);
+ 	return 0;
+diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
+index 97a1c1c33b05..00ce3e269a43 100644
+--- a/drivers/scsi/megaraid/megaraid_sas_base.c
++++ b/drivers/scsi/megaraid/megaraid_sas_base.c
+@@ -6282,12 +6282,13 @@ out:
+ 	}
+ 
+ 	for (i = 0; i < ioc->sge_count; i++) {
+-		if (kbuff_arr[i])
++		if (kbuff_arr[i]) {
+ 			dma_free_coherent(&instance->pdev->dev,
+ 					  le32_to_cpu(kern_sge32[i].length),
+ 					  kbuff_arr[i],
+ 					  le32_to_cpu(kern_sge32[i].phys_addr));
+ 			kbuff_arr[i] = NULL;
++		}
+ 	}
+ 
+ 	megasas_return_cmd(instance, cmd);
+diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
+index 5d0ec42a9317..634254a52301 100644
+--- a/drivers/scsi/qla1280.c
++++ b/drivers/scsi/qla1280.c
+@@ -4214,7 +4214,7 @@ static struct scsi_host_template qla1280_driver_template = {
+ 	.eh_bus_reset_handler	= qla1280_eh_bus_reset,
+ 	.eh_host_reset_handler	= qla1280_eh_adapter_reset,
+ 	.bios_param		= qla1280_biosparam,
+-	.can_queue		= 0xfffff,
++	.can_queue		= MAX_OUTSTANDING_COMMANDS,
+ 	.this_id		= -1,
+ 	.sg_tablesize		= SG_ALL,
+ 	.use_clustering		= ENABLE_CLUSTERING,
+diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
+index b1bf42b93fcc..1deb6adc411f 100644
+--- a/drivers/scsi/scsi.c
++++ b/drivers/scsi/scsi.c
+@@ -784,8 +784,9 @@ void scsi_attach_vpd(struct scsi_device *sdev)
+ 	int pg83_supported = 0;
+ 	unsigned char __rcu *vpd_buf, *orig_vpd_buf = NULL;
+ 
+-	if (sdev->skip_vpd_pages)
++	if (!scsi_device_supports_vpd(sdev))
+ 		return;
++
+ retry_pg0:
+ 	vpd_buf = kmalloc(vpd_len, GFP_KERNEL);
+ 	if (!vpd_buf)
+diff --git a/drivers/scsi/scsi_common.c b/drivers/scsi/scsi_common.c
+index c126966130ab..ce79de822e46 100644
+--- a/drivers/scsi/scsi_common.c
++++ b/drivers/scsi/scsi_common.c
+@@ -278,8 +278,16 @@ int scsi_set_sense_information(u8 *buf, int buf_len, u64 info)
+ 		ucp[3] = 0;
+ 		put_unaligned_be64(info, &ucp[4]);
+ 	} else if ((buf[0] & 0x7f) == 0x70) {
+-		buf[0] |= 0x80;
+-		put_unaligned_be64(info, &buf[3]);
++		/*
++		 * Only set the 'VALID' bit if we can represent the value
++		 * correctly; otherwise just fill out the lower bytes and
++		 * clear the 'VALID' flag.
++		 */
++		if (info <= 0xffffffffUL)
++			buf[0] |= 0x80;
++		else
++			buf[0] &= 0x7f;
++		put_unaligned_be32((u32)info, &buf[3]);
+ 	}
+ 
+ 	return 0;
+diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
+index 6a820668d442..b7cc6027cb7b 100644
+--- a/drivers/scsi/scsi_scan.c
++++ b/drivers/scsi/scsi_scan.c
+@@ -315,6 +315,7 @@ static void scsi_target_destroy(struct scsi_target *starget)
+ 	struct Scsi_Host *shost = dev_to_shost(dev->parent);
+ 	unsigned long flags;
+ 
++	BUG_ON(starget->state == STARGET_DEL);
+ 	starget->state = STARGET_DEL;
+ 	transport_destroy_device(dev);
+ 	spin_lock_irqsave(shost->host_lock, flags);
+diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
+index 00bc7218a7f8..9e5f893aa3ad 100644
+--- a/drivers/scsi/scsi_sysfs.c
++++ b/drivers/scsi/scsi_sysfs.c
+@@ -1272,18 +1272,18 @@ static void __scsi_remove_target(struct scsi_target *starget)
+ void scsi_remove_target(struct device *dev)
+ {
+ 	struct Scsi_Host *shost = dev_to_shost(dev->parent);
+-	struct scsi_target *starget, *last_target = NULL;
++	struct scsi_target *starget;
+ 	unsigned long flags;
+ 
+ restart:
+ 	spin_lock_irqsave(shost->host_lock, flags);
+ 	list_for_each_entry(starget, &shost->__targets, siblings) {
+ 		if (starget->state == STARGET_DEL ||
+-		    starget == last_target)
++		    starget->state == STARGET_REMOVE)
+ 			continue;
+ 		if (starget->dev.parent == dev || &starget->dev == dev) {
+ 			kref_get(&starget->reap_ref);
+-			last_target = starget;
++			starget->state = STARGET_REMOVE;
+ 			spin_unlock_irqrestore(shost->host_lock, flags);
+ 			__scsi_remove_target(starget);
+ 			scsi_target_reap(starget);
+diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
+index d749da765df1..974ca5b45f8d 100644
+--- a/drivers/scsi/sd.c
++++ b/drivers/scsi/sd.c
+@@ -648,7 +648,7 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode)
+ 	 */
+ 	if (sdkp->lbprz) {
+ 		q->limits.discard_alignment = 0;
+-		q->limits.discard_granularity = 1;
++		q->limits.discard_granularity = logical_block_size;
+ 	} else {
+ 		q->limits.discard_alignment = sdkp->unmap_alignment *
+ 			logical_block_size;
+@@ -1275,18 +1275,19 @@ static int sd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+ 	struct scsi_disk *sdkp = scsi_disk(bdev->bd_disk);
+ 	struct scsi_device *sdp = sdkp->device;
+ 	struct Scsi_Host *host = sdp->host;
++	sector_t capacity = logical_to_sectors(sdp, sdkp->capacity);
+ 	int diskinfo[4];
+ 
+ 	/* default to most commonly used values */
+-        diskinfo[0] = 0x40;	/* 1 << 6 */
+-       	diskinfo[1] = 0x20;	/* 1 << 5 */
+-       	diskinfo[2] = sdkp->capacity >> 11;
+-	
++	diskinfo[0] = 0x40;	/* 1 << 6 */
++	diskinfo[1] = 0x20;	/* 1 << 5 */
++	diskinfo[2] = capacity >> 11;
++
+ 	/* override with calculated, extended default, or driver values */
+ 	if (host->hostt->bios_param)
+-		host->hostt->bios_param(sdp, bdev, sdkp->capacity, diskinfo);
++		host->hostt->bios_param(sdp, bdev, capacity, diskinfo);
+ 	else
+-		scsicam_bios_param(bdev, sdkp->capacity, diskinfo);
++		scsicam_bios_param(bdev, capacity, diskinfo);
+ 
+ 	geo->heads = diskinfo[0];
+ 	geo->sectors = diskinfo[1];
+@@ -2337,14 +2338,6 @@ got_data:
+ 	if (sdkp->capacity > 0xffffffff)
+ 		sdp->use_16_for_rw = 1;
+ 
+-	/* Rescale capacity to 512-byte units */
+-	if (sector_size == 4096)
+-		sdkp->capacity <<= 3;
+-	else if (sector_size == 2048)
+-		sdkp->capacity <<= 2;
+-	else if (sector_size == 1024)
+-		sdkp->capacity <<= 1;
+-
+ 	blk_queue_physical_block_size(sdp->request_queue,
+ 				      sdkp->physical_block_size);
+ 	sdkp->device->sector_size = sector_size;
+@@ -2795,28 +2788,6 @@ static void sd_read_write_same(struct scsi_disk *sdkp, unsigned char *buffer)
+ 		sdkp->ws10 = 1;
+ }
+ 
+-static int sd_try_extended_inquiry(struct scsi_device *sdp)
+-{
+-	/* Attempt VPD inquiry if the device blacklist explicitly calls
+-	 * for it.
+-	 */
+-	if (sdp->try_vpd_pages)
+-		return 1;
+-	/*
+-	 * Although VPD inquiries can go to SCSI-2 type devices,
+-	 * some USB ones crash on receiving them, and the pages
+-	 * we currently ask for are for SPC-3 and beyond
+-	 */
+-	if (sdp->scsi_level > SCSI_SPC_2 && !sdp->skip_vpd_pages)
+-		return 1;
+-	return 0;
+-}
+-
+-static inline u32 logical_to_sectors(struct scsi_device *sdev, u32 blocks)
+-{
+-	return blocks << (ilog2(sdev->sector_size) - 9);
+-}
+-
+ /**
+  *	sd_revalidate_disk - called the first time a new disk is seen,
+  *	performs disk spin up, read_capacity, etc.
+@@ -2856,7 +2827,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
+ 	if (sdkp->media_present) {
+ 		sd_read_capacity(sdkp, buffer);
+ 
+-		if (sd_try_extended_inquiry(sdp)) {
++		if (scsi_device_supports_vpd(sdp)) {
+ 			sd_read_block_provisioning(sdkp);
+ 			sd_read_block_limits(sdkp);
+ 			sd_read_block_characteristics(sdkp);
+@@ -2900,7 +2871,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
+ 	/* Combine with controller limits */
+ 	q->limits.max_sectors = min(rw_max, queue_max_hw_sectors(q));
+ 
+-	set_capacity(disk, sdkp->capacity);
++	set_capacity(disk, logical_to_sectors(sdp, sdkp->capacity));
+ 	sd_config_write_same(sdkp);
+ 	kfree(buffer);
+ 
+diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
+index 5f2a84aff29f..654630bb7d0e 100644
+--- a/drivers/scsi/sd.h
++++ b/drivers/scsi/sd.h
+@@ -65,7 +65,7 @@ struct scsi_disk {
+ 	struct device	dev;
+ 	struct gendisk	*disk;
+ 	atomic_t	openers;
+-	sector_t	capacity;	/* size in 512-byte sectors */
++	sector_t	capacity;	/* size in logical blocks */
+ 	u32		max_xfer_blocks;
+ 	u32		opt_xfer_blocks;
+ 	u32		max_ws_blocks;
+@@ -146,6 +146,11 @@ static inline int scsi_medium_access_command(struct scsi_cmnd *scmd)
+ 	return 0;
+ }
+ 
++static inline sector_t logical_to_sectors(struct scsi_device *sdev, sector_t blocks)
++{
++	return blocks << (ilog2(sdev->sector_size) - 9);
++}
++
+ /*
+  * A DIF-capable target device can be formatted with different
+  * protection schemes.  Currently 0 through 3 are defined:
+diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
+index 5e820674432c..ae7d9bdf409c 100644
+--- a/drivers/scsi/sg.c
++++ b/drivers/scsi/sg.c
+@@ -652,7 +652,8 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
+ 	else
+ 		hp->dxfer_direction = (mxsize > 0) ? SG_DXFER_FROM_DEV : SG_DXFER_NONE;
+ 	hp->dxfer_len = mxsize;
+-	if (hp->dxfer_direction == SG_DXFER_TO_DEV)
++	if ((hp->dxfer_direction == SG_DXFER_TO_DEV) ||
++	    (hp->dxfer_direction == SG_DXFER_TO_FROM_DEV))
+ 		hp->dxferp = (char __user *)buf + cmd_size;
+ 	else
+ 		hp->dxferp = NULL;
+diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
+index 292c04eec9ad..3ddcabb790a8 100644
+--- a/drivers/scsi/storvsc_drv.c
++++ b/drivers/scsi/storvsc_drv.c
+@@ -914,8 +914,9 @@ static void storvsc_handle_error(struct vmscsi_request *vm_srb,
+ 		do_work = true;
+ 		process_err_fn = storvsc_remove_lun;
+ 		break;
+-	case (SRB_STATUS_ABORTED | SRB_STATUS_AUTOSENSE_VALID):
+-		if ((asc == 0x2a) && (ascq == 0x9)) {
++	case SRB_STATUS_ABORTED:
++		if (vm_srb->srb_status & SRB_STATUS_AUTOSENSE_VALID &&
++		    (asc == 0x2a) && (ascq == 0x9)) {
+ 			do_work = true;
+ 			process_err_fn = storvsc_device_scan;
+ 			/*
+diff --git a/drivers/soc/rockchip/pm_domains.c b/drivers/soc/rockchip/pm_domains.c
+index 534c58937a56..4a65c5bda146 100644
+--- a/drivers/soc/rockchip/pm_domains.c
++++ b/drivers/soc/rockchip/pm_domains.c
+@@ -419,6 +419,7 @@ static int rockchip_pm_domain_probe(struct platform_device *pdev)
+ 		if (error) {
+ 			dev_err(dev, "failed to handle node %s: %d\n",
+ 				node->name, error);
++			of_node_put(node);
+ 			goto err_out;
+ 		}
+ 	}
+diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
+index ab9914ad8365..64232ecbb821 100644
+--- a/drivers/spi/spi-pxa2xx.c
++++ b/drivers/spi/spi-pxa2xx.c
+@@ -111,7 +111,7 @@ static const struct lpss_config lpss_platforms[] = {
+ 		.reg_general = -1,
+ 		.reg_ssp = 0x20,
+ 		.reg_cs_ctrl = 0x24,
+-		.reg_capabilities = 0xfc,
++		.reg_capabilities = -1,
+ 		.rx_threshold = 1,
+ 		.tx_threshold_lo = 32,
+ 		.tx_threshold_hi = 56,
+diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c
+index 7cb1b2d710c1..475fb44c1883 100644
+--- a/drivers/spi/spi-rockchip.c
++++ b/drivers/spi/spi-rockchip.c
+@@ -265,7 +265,10 @@ static inline u32 rx_max(struct rockchip_spi *rs)
+ static void rockchip_spi_set_cs(struct spi_device *spi, bool enable)
+ {
+ 	u32 ser;
+-	struct rockchip_spi *rs = spi_master_get_devdata(spi->master);
++	struct spi_master *master = spi->master;
++	struct rockchip_spi *rs = spi_master_get_devdata(master);
++
++	pm_runtime_get_sync(rs->dev);
+ 
+ 	ser = readl_relaxed(rs->regs + ROCKCHIP_SPI_SER) & SER_MASK;
+ 
+@@ -290,6 +293,8 @@ static void rockchip_spi_set_cs(struct spi_device *spi, bool enable)
+ 		ser &= ~(1 << spi->chip_select);
+ 
+ 	writel_relaxed(ser, rs->regs + ROCKCHIP_SPI_SER);
++
++	pm_runtime_put_sync(rs->dev);
+ }
+ 
+ static int rockchip_spi_prepare_message(struct spi_master *master,
+diff --git a/drivers/spi/spi-ti-qspi.c b/drivers/spi/spi-ti-qspi.c
+index 64318fcfacf2..5044c6198332 100644
+--- a/drivers/spi/spi-ti-qspi.c
++++ b/drivers/spi/spi-ti-qspi.c
+@@ -94,6 +94,7 @@ struct ti_qspi {
+ #define QSPI_FLEN(n)			((n - 1) << 0)
+ #define QSPI_WLEN_MAX_BITS		128
+ #define QSPI_WLEN_MAX_BYTES		16
++#define QSPI_WLEN_MASK			QSPI_WLEN(QSPI_WLEN_MAX_BITS)
+ 
+ /* STATUS REGISTER */
+ #define BUSY				0x01
+@@ -224,16 +225,16 @@ static inline int ti_qspi_poll_wc(struct ti_qspi *qspi)
+ 	return  -ETIMEDOUT;
+ }
+ 
+-static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t)
++static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t,
++			  int count)
+ {
+-	int wlen, count, xfer_len;
++	int wlen, xfer_len;
+ 	unsigned int cmd;
+ 	const u8 *txbuf;
+ 	u32 data;
+ 
+ 	txbuf = t->tx_buf;
+ 	cmd = qspi->cmd | QSPI_WR_SNGL;
+-	count = t->len;
+ 	wlen = t->bits_per_word >> 3;	/* in bytes */
+ 	xfer_len = wlen;
+ 
+@@ -293,9 +294,10 @@ static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t)
+ 	return 0;
+ }
+ 
+-static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t)
++static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t,
++			 int count)
+ {
+-	int wlen, count;
++	int wlen;
+ 	unsigned int cmd;
+ 	u8 *rxbuf;
+ 
+@@ -312,7 +314,6 @@ static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t)
+ 		cmd |= QSPI_RD_SNGL;
+ 		break;
+ 	}
+-	count = t->len;
+ 	wlen = t->bits_per_word >> 3;	/* in bytes */
+ 
+ 	while (count) {
+@@ -343,12 +344,13 @@ static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t)
+ 	return 0;
+ }
+ 
+-static int qspi_transfer_msg(struct ti_qspi *qspi, struct spi_transfer *t)
++static int qspi_transfer_msg(struct ti_qspi *qspi, struct spi_transfer *t,
++			     int count)
+ {
+ 	int ret;
+ 
+ 	if (t->tx_buf) {
+-		ret = qspi_write_msg(qspi, t);
++		ret = qspi_write_msg(qspi, t, count);
+ 		if (ret) {
+ 			dev_dbg(qspi->dev, "Error while writing\n");
+ 			return ret;
+@@ -356,7 +358,7 @@ static int qspi_transfer_msg(struct ti_qspi *qspi, struct spi_transfer *t)
+ 	}
+ 
+ 	if (t->rx_buf) {
+-		ret = qspi_read_msg(qspi, t);
++		ret = qspi_read_msg(qspi, t, count);
+ 		if (ret) {
+ 			dev_dbg(qspi->dev, "Error while reading\n");
+ 			return ret;
+@@ -373,7 +375,8 @@ static int ti_qspi_start_transfer_one(struct spi_master *master,
+ 	struct spi_device *spi = m->spi;
+ 	struct spi_transfer *t;
+ 	int status = 0, ret;
+-	int frame_length;
++	unsigned int frame_len_words, transfer_len_words;
++	int wlen;
+ 
+ 	/* setup device control reg */
+ 	qspi->dc = 0;
+@@ -385,30 +388,38 @@ static int ti_qspi_start_transfer_one(struct spi_master *master,
+ 	if (spi->mode & SPI_CS_HIGH)
+ 		qspi->dc |= QSPI_CSPOL(spi->chip_select);
+ 
+-	frame_length = (m->frame_length << 3) / spi->bits_per_word;
+-
+-	frame_length = clamp(frame_length, 0, QSPI_FRAME);
++	frame_len_words = 0;
++	list_for_each_entry(t, &m->transfers, transfer_list)
++		frame_len_words += t->len / (t->bits_per_word >> 3);
++	frame_len_words = min_t(unsigned int, frame_len_words, QSPI_FRAME);
+ 
+ 	/* setup command reg */
+ 	qspi->cmd = 0;
+ 	qspi->cmd |= QSPI_EN_CS(spi->chip_select);
+-	qspi->cmd |= QSPI_FLEN(frame_length);
++	qspi->cmd |= QSPI_FLEN(frame_len_words);
+ 
+ 	ti_qspi_write(qspi, qspi->dc, QSPI_SPI_DC_REG);
+ 
+ 	mutex_lock(&qspi->list_lock);
+ 
+ 	list_for_each_entry(t, &m->transfers, transfer_list) {
+-		qspi->cmd |= QSPI_WLEN(t->bits_per_word);
++		qspi->cmd = ((qspi->cmd & ~QSPI_WLEN_MASK) |
++			     QSPI_WLEN(t->bits_per_word));
++
++		wlen = t->bits_per_word >> 3;
++		transfer_len_words = min(t->len / wlen, frame_len_words);
+ 
+-		ret = qspi_transfer_msg(qspi, t);
++		ret = qspi_transfer_msg(qspi, t, transfer_len_words * wlen);
+ 		if (ret) {
+ 			dev_dbg(qspi->dev, "transfer message failed\n");
+ 			mutex_unlock(&qspi->list_lock);
+ 			return -EINVAL;
+ 		}
+ 
+-		m->actual_length += t->len;
++		m->actual_length += transfer_len_words * wlen;
++		frame_len_words -= transfer_len_words;
++		if (frame_len_words == 0)
++			break;
+ 	}
+ 
+ 	mutex_unlock(&qspi->list_lock);
+diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
+index e237e9f3312d..df560216d702 100644
+--- a/drivers/staging/android/ion/ion.c
++++ b/drivers/staging/android/ion/ion.c
+@@ -251,8 +251,10 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
+ 	 * memory coming from the heaps is ready for dma, ie if it has a
+ 	 * cached mapping that mapping has been invalidated
+ 	 */
+-	for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i)
++	for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
+ 		sg_dma_address(sg) = sg_phys(sg);
++		sg_dma_len(sg) = sg->length;
++	}
+ 	mutex_lock(&dev->buffer_lock);
+ 	ion_buffer_add(dev, buffer);
+ 	mutex_unlock(&dev->buffer_lock);
+diff --git a/drivers/staging/android/ion/ion_test.c b/drivers/staging/android/ion/ion_test.c
+index b8dcf5a26cc4..58d46893e5ff 100644
+--- a/drivers/staging/android/ion/ion_test.c
++++ b/drivers/staging/android/ion/ion_test.c
+@@ -285,8 +285,8 @@ static int __init ion_test_init(void)
+ {
+ 	ion_test_pdev = platform_device_register_simple("ion-test",
+ 							-1, NULL, 0);
+-	if (!ion_test_pdev)
+-		return -ENODEV;
++	if (IS_ERR(ion_test_pdev))
++		return PTR_ERR(ion_test_pdev);
+ 
+ 	return platform_driver_probe(&ion_test_platform_driver, ion_test_probe);
+ }
+diff --git a/drivers/staging/comedi/drivers/das1800.c b/drivers/staging/comedi/drivers/das1800.c
+index 940781183fac..3be10963f98b 100644
+--- a/drivers/staging/comedi/drivers/das1800.c
++++ b/drivers/staging/comedi/drivers/das1800.c
+@@ -567,14 +567,17 @@ static int das1800_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
+ 	struct comedi_isadma_desc *desc;
+ 	int i;
+ 
+-	outb(0x0, dev->iobase + DAS1800_STATUS);	/* disable conversions */
+-	outb(0x0, dev->iobase + DAS1800_CONTROL_B);	/* disable interrupts and dma */
+-	outb(0x0, dev->iobase + DAS1800_CONTROL_A);	/* disable and clear fifo and stop triggering */
+-
+-	for (i = 0; i < 2; i++) {
+-		desc = &dma->desc[i];
+-		if (desc->chan)
+-			comedi_isadma_disable(desc->chan);
++	/* disable and stop conversions */
++	outb(0x0, dev->iobase + DAS1800_STATUS);
++	outb(0x0, dev->iobase + DAS1800_CONTROL_B);
++	outb(0x0, dev->iobase + DAS1800_CONTROL_A);
++
++	if (dma) {
++		for (i = 0; i < 2; i++) {
++			desc = &dma->desc[i];
++			if (desc->chan)
++				comedi_isadma_disable(desc->chan);
++		}
+ 	}
+ 
+ 	return 0;
+@@ -934,13 +937,14 @@ static void das1800_ai_setup_dma(struct comedi_device *dev,
+ {
+ 	struct das1800_private *devpriv = dev->private;
+ 	struct comedi_isadma *dma = devpriv->dma;
+-	struct comedi_isadma_desc *desc = &dma->desc[0];
++	struct comedi_isadma_desc *desc;
+ 	unsigned int bytes;
+ 
+ 	if ((devpriv->irq_dma_bits & DMA_ENABLED) == 0)
+ 		return;
+ 
+ 	dma->cur_dma = 0;
++	desc = &dma->desc[0];
+ 
+ 	/* determine a dma transfer size to fill buffer in 0.3 sec */
+ 	bytes = das1800_ai_transfer_size(dev, s, desc->maxsize, 300000000);
+diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c
+index 5e8130a7d670..0e9f77924e26 100644
+--- a/drivers/staging/comedi/drivers/ni_mio_common.c
++++ b/drivers/staging/comedi/drivers/ni_mio_common.c
+@@ -246,24 +246,24 @@ static void ni_writel(struct comedi_device *dev, uint32_t data, int reg)
+ {
+ 	if (dev->mmio)
+ 		writel(data, dev->mmio + reg);
+-
+-	outl(data, dev->iobase + reg);
++	else
++		outl(data, dev->iobase + reg);
+ }
+ 
+ static void ni_writew(struct comedi_device *dev, uint16_t data, int reg)
+ {
+ 	if (dev->mmio)
+ 		writew(data, dev->mmio + reg);
+-
+-	outw(data, dev->iobase + reg);
++	else
++		outw(data, dev->iobase + reg);
+ }
+ 
+ static void ni_writeb(struct comedi_device *dev, uint8_t data, int reg)
+ {
+ 	if (dev->mmio)
+ 		writeb(data, dev->mmio + reg);
+-
+-	outb(data, dev->iobase + reg);
++	else
++		outb(data, dev->iobase + reg);
+ }
+ 
+ static uint32_t ni_readl(struct comedi_device *dev, int reg)
+diff --git a/drivers/staging/comedi/drivers/ni_tiocmd.c b/drivers/staging/comedi/drivers/ni_tiocmd.c
+index 437f723bb34d..823e47910004 100644
+--- a/drivers/staging/comedi/drivers/ni_tiocmd.c
++++ b/drivers/staging/comedi/drivers/ni_tiocmd.c
+@@ -92,7 +92,7 @@ static int ni_tio_input_inttrig(struct comedi_device *dev,
+ 	unsigned long flags;
+ 	int ret = 0;
+ 
+-	if (trig_num != cmd->start_src)
++	if (trig_num != cmd->start_arg)
+ 		return -EINVAL;
+ 
+ 	spin_lock_irqsave(&counter->lock, flags);
+diff --git a/drivers/staging/rdma/hfi1/TODO b/drivers/staging/rdma/hfi1/TODO
+index 05de0dad8762..4c6f1d7d2eaf 100644
+--- a/drivers/staging/rdma/hfi1/TODO
++++ b/drivers/staging/rdma/hfi1/TODO
+@@ -3,4 +3,4 @@ July, 2015
+ - Remove unneeded file entries in sysfs
+ - Remove software processing of IB protocol and place in library for use
+   by qib, ipath (if still present), hfi1, and eventually soft-roce
+-
++- Replace incorrect uAPI
+diff --git a/drivers/staging/rdma/hfi1/file_ops.c b/drivers/staging/rdma/hfi1/file_ops.c
+index d57d549052c8..29ae777556d2 100644
+--- a/drivers/staging/rdma/hfi1/file_ops.c
++++ b/drivers/staging/rdma/hfi1/file_ops.c
+@@ -52,6 +52,8 @@
+ #include <linux/vmalloc.h>
+ #include <linux/io.h>
+ 
++#include <rdma/ib.h>
++
+ #include "hfi.h"
+ #include "pio.h"
+ #include "device.h"
+@@ -194,6 +196,10 @@ static ssize_t hfi1_file_write(struct file *fp, const char __user *data,
+ 	int uctxt_required = 1;
+ 	int must_be_root = 0;
+ 
++	/* FIXME: This interface cannot continue out of staging */
++	if (WARN_ON_ONCE(!ib_safe_file_access(fp)))
++		return -EACCES;
++
+ 	if (count < sizeof(cmd)) {
+ 		ret = -EINVAL;
+ 		goto bail;
+diff --git a/drivers/staging/wilc1000/wilc_spi.c b/drivers/staging/wilc1000/wilc_spi.c
+index 86de50c9f7f5..b3d6541b3896 100644
+--- a/drivers/staging/wilc1000/wilc_spi.c
++++ b/drivers/staging/wilc1000/wilc_spi.c
+@@ -120,8 +120,6 @@ static u8 crc7(u8 crc, const u8 *buffer, u32 len)
+ 
+ #define USE_SPI_DMA     0
+ 
+-static const struct wilc1000_ops wilc1000_spi_ops;
+-
+ static int wilc_bus_probe(struct spi_device *spi)
+ {
+ 	int ret, gpio;
+diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
+index 867bc6d0a68a..43d8b42c0f22 100644
+--- a/drivers/target/target_core_transport.c
++++ b/drivers/target/target_core_transport.c
+@@ -2596,8 +2596,6 @@ void target_wait_for_sess_cmds(struct se_session *se_sess)
+ 
+ 	list_for_each_entry_safe(se_cmd, tmp_cmd,
+ 				&se_sess->sess_wait_list, se_cmd_list) {
+-		list_del_init(&se_cmd->se_cmd_list);
+-
+ 		pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:"
+ 			" %d\n", se_cmd, se_cmd->t_state,
+ 			se_cmd->se_tfo->get_cmd_state(se_cmd));
+diff --git a/drivers/thermal/rockchip_thermal.c b/drivers/thermal/rockchip_thermal.c
+index b58e3fb9b311..433085a97626 100644
+--- a/drivers/thermal/rockchip_thermal.c
++++ b/drivers/thermal/rockchip_thermal.c
+@@ -693,15 +693,14 @@ static int rockchip_configure_from_dt(struct device *dev,
+ 			 thermal->chip->tshut_temp);
+ 		thermal->tshut_temp = thermal->chip->tshut_temp;
+ 	} else {
++		if (shut_temp > INT_MAX) {
++			dev_err(dev, "Invalid tshut temperature specified: %d\n",
++				shut_temp);
++			return -ERANGE;
++		}
+ 		thermal->tshut_temp = shut_temp;
+ 	}
+ 
+-	if (thermal->tshut_temp > INT_MAX) {
+-		dev_err(dev, "Invalid tshut temperature specified: %d\n",
+-			thermal->tshut_temp);
+-		return -ERANGE;
+-	}
+-
+ 	if (of_property_read_u32(np, "rockchip,hw-tshut-mode", &tshut_mode)) {
+ 		dev_warn(dev,
+ 			 "Missing tshut mode property, using default (%s)\n",
+diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
+index a0a8fd1235e2..d4b54653ecf8 100644
+--- a/drivers/thermal/thermal_core.c
++++ b/drivers/thermal/thermal_core.c
+@@ -454,6 +454,10 @@ static void handle_thermal_trip(struct thermal_zone_device *tz, int trip)
+ {
+ 	enum thermal_trip_type type;
+ 
++	/* Ignore disabled trip points */
++	if (test_bit(trip, &tz->trips_disabled))
++		return;
++
+ 	tz->ops->get_trip_type(tz, trip, &type);
+ 
+ 	if (type == THERMAL_TRIP_CRITICAL || type == THERMAL_TRIP_HOT)
+@@ -1800,6 +1804,7 @@ struct thermal_zone_device *thermal_zone_device_register(const char *type,
+ {
+ 	struct thermal_zone_device *tz;
+ 	enum thermal_trip_type trip_type;
++	int trip_temp;
+ 	int result;
+ 	int count;
+ 	int passive = 0;
+@@ -1871,9 +1876,15 @@ struct thermal_zone_device *thermal_zone_device_register(const char *type,
+ 		goto unregister;
+ 
+ 	for (count = 0; count < trips; count++) {
+-		tz->ops->get_trip_type(tz, count, &trip_type);
++		if (tz->ops->get_trip_type(tz, count, &trip_type))
++			set_bit(count, &tz->trips_disabled);
+ 		if (trip_type == THERMAL_TRIP_PASSIVE)
+ 			passive = 1;
++		if (tz->ops->get_trip_temp(tz, count, &trip_temp))
++			set_bit(count, &tz->trips_disabled);
++		/* Check for bogus trip points */
++		if (trip_temp == 0)
++			set_bit(count, &tz->trips_disabled);
+ 	}
+ 
+ 	if (!passive) {
+diff --git a/drivers/thunderbolt/eeprom.c b/drivers/thunderbolt/eeprom.c
+index 0dde34e3a7c5..545c60c826a1 100644
+--- a/drivers/thunderbolt/eeprom.c
++++ b/drivers/thunderbolt/eeprom.c
+@@ -444,6 +444,7 @@ int tb_drom_read(struct tb_switch *sw)
+ 	return tb_drom_parse_entries(sw);
+ err:
+ 	kfree(sw->drom);
++	sw->drom = NULL;
+ 	return -EIO;
+ 
+ }
+diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
+index c3fe026d3168..9aff37186246 100644
+--- a/drivers/tty/n_gsm.c
++++ b/drivers/tty/n_gsm.c
+@@ -2045,7 +2045,9 @@ static void gsm_cleanup_mux(struct gsm_mux *gsm)
+ 		}
+ 	}
+ 	spin_unlock(&gsm_mux_lock);
+-	WARN_ON(i == MAX_MUX);
++	/* open failed before registering => nothing to do */
++	if (i == MAX_MUX)
++		return;
+ 
+ 	/* In theory disconnecting DLCI 0 is sufficient but for some
+ 	   modems this is apparently not the case. */
+diff --git a/drivers/tty/n_hdlc.c b/drivers/tty/n_hdlc.c
+index bbc4ce66c2c1..644ddb841d9f 100644
+--- a/drivers/tty/n_hdlc.c
++++ b/drivers/tty/n_hdlc.c
+@@ -600,7 +600,7 @@ static ssize_t n_hdlc_tty_read(struct tty_struct *tty, struct file *file,
+ 	add_wait_queue(&tty->read_wait, &wait);
+ 
+ 	for (;;) {
+-		if (test_bit(TTY_OTHER_DONE, &tty->flags)) {
++		if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) {
+ 			ret = -EIO;
+ 			break;
+ 		}
+@@ -828,7 +828,7 @@ static unsigned int n_hdlc_tty_poll(struct tty_struct *tty, struct file *filp,
+ 		/* set bits for operations that won't block */
+ 		if (n_hdlc->rx_buf_list.head)
+ 			mask |= POLLIN | POLLRDNORM;	/* readable */
+-		if (test_bit(TTY_OTHER_DONE, &tty->flags))
++		if (test_bit(TTY_OTHER_CLOSED, &tty->flags))
+ 			mask |= POLLHUP;
+ 		if (tty_hung_up_p(filp))
+ 			mask |= POLLHUP;
+diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
+index b280abaad91b..c12def71ed37 100644
+--- a/drivers/tty/n_tty.c
++++ b/drivers/tty/n_tty.c
+@@ -1963,18 +1963,6 @@ static inline int input_available_p(struct tty_struct *tty, int poll)
+ 		return ldata->commit_head - ldata->read_tail >= amt;
+ }
+ 
+-static inline int check_other_done(struct tty_struct *tty)
+-{
+-	int done = test_bit(TTY_OTHER_DONE, &tty->flags);
+-	if (done) {
+-		/* paired with cmpxchg() in check_other_closed(); ensures
+-		 * read buffer head index is not stale
+-		 */
+-		smp_mb__after_atomic();
+-	}
+-	return done;
+-}
+-
+ /**
+  *	copy_from_read_buf	-	copy read data directly
+  *	@tty: terminal device
+@@ -2170,7 +2158,7 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file,
+ 	struct n_tty_data *ldata = tty->disc_data;
+ 	unsigned char __user *b = buf;
+ 	DEFINE_WAIT_FUNC(wait, woken_wake_function);
+-	int c, done;
++	int c;
+ 	int minimum, time;
+ 	ssize_t retval = 0;
+ 	long timeout;
+@@ -2238,32 +2226,35 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file,
+ 		    ((minimum - (b - buf)) >= 1))
+ 			ldata->minimum_to_wake = (minimum - (b - buf));
+ 
+-		done = check_other_done(tty);
+-
+ 		if (!input_available_p(tty, 0)) {
+-			if (done) {
+-				retval = -EIO;
+-				break;
+-			}
+-			if (tty_hung_up_p(file))
+-				break;
+-			if (!timeout)
+-				break;
+-			if (file->f_flags & O_NONBLOCK) {
+-				retval = -EAGAIN;
+-				break;
+-			}
+-			if (signal_pending(current)) {
+-				retval = -ERESTARTSYS;
+-				break;
+-			}
+ 			up_read(&tty->termios_rwsem);
++			tty_buffer_flush_work(tty->port);
++			down_read(&tty->termios_rwsem);
++			if (!input_available_p(tty, 0)) {
++				if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) {
++					retval = -EIO;
++					break;
++				}
++				if (tty_hung_up_p(file))
++					break;
++				if (!timeout)
++					break;
++				if (file->f_flags & O_NONBLOCK) {
++					retval = -EAGAIN;
++					break;
++				}
++				if (signal_pending(current)) {
++					retval = -ERESTARTSYS;
++					break;
++				}
++				up_read(&tty->termios_rwsem);
+ 
+-			timeout = wait_woken(&wait, TASK_INTERRUPTIBLE,
+-					     timeout);
++				timeout = wait_woken(&wait, TASK_INTERRUPTIBLE,
++						timeout);
+ 
+-			down_read(&tty->termios_rwsem);
+-			continue;
++				down_read(&tty->termios_rwsem);
++				continue;
++			}
+ 		}
+ 
+ 		if (ldata->icanon && !L_EXTPROC(tty)) {
+@@ -2445,12 +2436,17 @@ static unsigned int n_tty_poll(struct tty_struct *tty, struct file *file,
+ 
+ 	poll_wait(file, &tty->read_wait, wait);
+ 	poll_wait(file, &tty->write_wait, wait);
+-	if (check_other_done(tty))
+-		mask |= POLLHUP;
+ 	if (input_available_p(tty, 1))
+ 		mask |= POLLIN | POLLRDNORM;
++	else {
++		tty_buffer_flush_work(tty->port);
++		if (input_available_p(tty, 1))
++			mask |= POLLIN | POLLRDNORM;
++	}
+ 	if (tty->packet && tty->link->ctrl_status)
+ 		mask |= POLLPRI | POLLIN | POLLRDNORM;
++	if (test_bit(TTY_OTHER_CLOSED, &tty->flags))
++		mask |= POLLHUP;
+ 	if (tty_hung_up_p(file))
+ 		mask |= POLLHUP;
+ 	if (!(mask & (POLLHUP | POLLIN | POLLRDNORM))) {
+diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
+index 2348fa613707..6427a39bd360 100644
+--- a/drivers/tty/pty.c
++++ b/drivers/tty/pty.c
+@@ -59,7 +59,7 @@ static void pty_close(struct tty_struct *tty, struct file *filp)
+ 	if (!tty->link)
+ 		return;
+ 	set_bit(TTY_OTHER_CLOSED, &tty->link->flags);
+-	tty_flip_buffer_push(tty->link->port);
++	wake_up_interruptible(&tty->link->read_wait);
+ 	wake_up_interruptible(&tty->link->write_wait);
+ 	if (tty->driver->subtype == PTY_TYPE_MASTER) {
+ 		set_bit(TTY_OTHER_CLOSED, &tty->flags);
+@@ -247,9 +247,7 @@ static int pty_open(struct tty_struct *tty, struct file *filp)
+ 		goto out;
+ 
+ 	clear_bit(TTY_IO_ERROR, &tty->flags);
+-	/* TTY_OTHER_CLOSED must be cleared before TTY_OTHER_DONE */
+ 	clear_bit(TTY_OTHER_CLOSED, &tty->link->flags);
+-	clear_bit(TTY_OTHER_DONE, &tty->link->flags);
+ 	set_bit(TTY_THROTTLED, &tty->flags);
+ 	return 0;
+ 
+diff --git a/drivers/tty/serial/8250/8250_mid.c b/drivers/tty/serial/8250/8250_mid.c
+index 88531a36b69c..ed489880e62b 100644
+--- a/drivers/tty/serial/8250/8250_mid.c
++++ b/drivers/tty/serial/8250/8250_mid.c
+@@ -14,6 +14,7 @@
+ #include <linux/pci.h>
+ 
+ #include <linux/dma/hsu.h>
++#include <linux/8250_pci.h>
+ 
+ #include "8250.h"
+ 
+@@ -24,6 +25,7 @@
+ #define PCI_DEVICE_ID_INTEL_DNV_UART	0x19d8
+ 
+ /* Intel MID Specific registers */
++#define INTEL_MID_UART_DNV_FISR		0x08
+ #define INTEL_MID_UART_PS		0x30
+ #define INTEL_MID_UART_MUL		0x34
+ #define INTEL_MID_UART_DIV		0x38
+@@ -31,6 +33,7 @@
+ struct mid8250;
+ 
+ struct mid8250_board {
++	unsigned int flags;
+ 	unsigned long freq;
+ 	unsigned int base_baud;
+ 	int (*setup)(struct mid8250 *, struct uart_port *p);
+@@ -88,16 +91,16 @@ static int tng_setup(struct mid8250 *mid, struct uart_port *p)
+ static int dnv_handle_irq(struct uart_port *p)
+ {
+ 	struct mid8250 *mid = p->private_data;
+-	int ret;
+-
+-	ret = hsu_dma_irq(&mid->dma_chip, 0);
+-	ret |= hsu_dma_irq(&mid->dma_chip, 1);
+-
+-	/* For now, letting the HW generate separate interrupt for the UART */
+-	if (ret)
+-		return ret;
+-
+-	return serial8250_handle_irq(p, serial_port_in(p, UART_IIR));
++	unsigned int fisr = serial_port_in(p, INTEL_MID_UART_DNV_FISR);
++	int ret = IRQ_NONE;
++
++	if (fisr & BIT(2))
++		ret |= hsu_dma_irq(&mid->dma_chip, 1);
++	if (fisr & BIT(1))
++		ret |= hsu_dma_irq(&mid->dma_chip, 0);
++	if (fisr & BIT(0))
++		ret |= serial8250_handle_irq(p, serial_port_in(p, UART_IIR));
++	return ret;
+ }
+ 
+ #define DNV_DMA_CHAN_OFFSET 0x80
+@@ -106,12 +109,13 @@ static int dnv_setup(struct mid8250 *mid, struct uart_port *p)
+ {
+ 	struct hsu_dma_chip *chip = &mid->dma_chip;
+ 	struct pci_dev *pdev = to_pci_dev(p->dev);
++	unsigned int bar = FL_GET_BASE(mid->board->flags);
+ 	int ret;
+ 
+ 	chip->dev = &pdev->dev;
+ 	chip->irq = pdev->irq;
+ 	chip->regs = p->membase;
+-	chip->length = pci_resource_len(pdev, 0);
++	chip->length = pci_resource_len(pdev, bar);
+ 	chip->offset = DNV_DMA_CHAN_OFFSET;
+ 
+ 	/* Falling back to PIO mode if DMA probing fails */
+@@ -217,6 +221,7 @@ static int mid8250_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ {
+ 	struct uart_8250_port uart;
+ 	struct mid8250 *mid;
++	unsigned int bar;
+ 	int ret;
+ 
+ 	ret = pcim_enable_device(pdev);
+@@ -230,6 +235,7 @@ static int mid8250_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ 		return -ENOMEM;
+ 
+ 	mid->board = (struct mid8250_board *)id->driver_data;
++	bar = FL_GET_BASE(mid->board->flags);
+ 
+ 	memset(&uart, 0, sizeof(struct uart_8250_port));
+ 
+@@ -242,8 +248,8 @@ static int mid8250_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ 	uart.port.flags = UPF_SHARE_IRQ | UPF_FIXED_PORT | UPF_FIXED_TYPE;
+ 	uart.port.set_termios = mid8250_set_termios;
+ 
+-	uart.port.mapbase = pci_resource_start(pdev, 0);
+-	uart.port.membase = pcim_iomap(pdev, 0, 0);
++	uart.port.mapbase = pci_resource_start(pdev, bar);
++	uart.port.membase = pcim_iomap(pdev, bar, 0);
+ 	if (!uart.port.membase)
+ 		return -ENOMEM;
+ 
+@@ -282,18 +288,21 @@ static void mid8250_remove(struct pci_dev *pdev)
+ }
+ 
+ static const struct mid8250_board pnw_board = {
++	.flags = FL_BASE0,
+ 	.freq = 50000000,
+ 	.base_baud = 115200,
+ 	.setup = pnw_setup,
+ };
+ 
+ static const struct mid8250_board tng_board = {
++	.flags = FL_BASE0,
+ 	.freq = 38400000,
+ 	.base_baud = 1843200,
+ 	.setup = tng_setup,
+ };
+ 
+ static const struct mid8250_board dnv_board = {
++	.flags = FL_BASE1,
+ 	.freq = 133333333,
+ 	.base_baud = 115200,
+ 	.setup = dnv_setup,
+diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
+index 7cd6f9a90542..c1d4a8fa9be8 100644
+--- a/drivers/tty/serial/8250/8250_pci.c
++++ b/drivers/tty/serial/8250/8250_pci.c
+@@ -1401,6 +1401,9 @@ byt_set_termios(struct uart_port *p, struct ktermios *termios,
+ 	unsigned long m, n;
+ 	u32 reg;
+ 
++	/* Gracefully handle the B0 case: fall back to B9600 */
++	fuart = fuart ? fuart : 9600 * 16;
++
+ 	/* Get Fuart closer to Fref */
+ 	fuart *= rounddown_pow_of_two(fref / fuart);
+ 
+diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
+index 8d262bce97e4..720b9465b12e 100644
+--- a/drivers/tty/serial/8250/8250_port.c
++++ b/drivers/tty/serial/8250/8250_port.c
+@@ -731,22 +731,16 @@ static int size_fifo(struct uart_8250_port *up)
+  */
+ static unsigned int autoconfig_read_divisor_id(struct uart_8250_port *p)
+ {
+-	unsigned char old_dll, old_dlm, old_lcr;
+-	unsigned int id;
++	unsigned char old_lcr;
++	unsigned int id, old_dl;
+ 
+ 	old_lcr = serial_in(p, UART_LCR);
+ 	serial_out(p, UART_LCR, UART_LCR_CONF_MODE_A);
++	old_dl = serial_dl_read(p);
++	serial_dl_write(p, 0);
++	id = serial_dl_read(p);
++	serial_dl_write(p, old_dl);
+ 
+-	old_dll = serial_in(p, UART_DLL);
+-	old_dlm = serial_in(p, UART_DLM);
+-
+-	serial_out(p, UART_DLL, 0);
+-	serial_out(p, UART_DLM, 0);
+-
+-	id = serial_in(p, UART_DLL) | serial_in(p, UART_DLM) << 8;
+-
+-	serial_out(p, UART_DLL, old_dll);
+-	serial_out(p, UART_DLM, old_dlm);
+ 	serial_out(p, UART_LCR, old_lcr);
+ 
+ 	return id;
+diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
+index 1c0884d8ef32..172a8ccb0b63 100644
+--- a/drivers/tty/serial/atmel_serial.c
++++ b/drivers/tty/serial/atmel_serial.c
+@@ -273,6 +273,13 @@ static bool atmel_use_dma_rx(struct uart_port *port)
+ 	return atmel_port->use_dma_rx;
+ }
+ 
++static bool atmel_use_fifo(struct uart_port *port)
++{
++	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
++
++	return atmel_port->fifo_size;
++}
++
+ static unsigned int atmel_get_lines_status(struct uart_port *port)
+ {
+ 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
+@@ -2082,7 +2089,12 @@ static void atmel_set_termios(struct uart_port *port, struct ktermios *termios,
+ 		mode |= ATMEL_US_USMODE_RS485;
+ 	} else if (termios->c_cflag & CRTSCTS) {
+ 		/* RS232 with hardware handshake (RTS/CTS) */
+-		mode |= ATMEL_US_USMODE_HWHS;
++		if (atmel_use_dma_rx(port) && !atmel_use_fifo(port)) {
++			dev_info(port->dev, "not enabling hardware flow control because DMA is used");
++			termios->c_cflag &= ~CRTSCTS;
++		} else {
++			mode |= ATMEL_US_USMODE_HWHS;
++		}
+ 	} else {
+ 		/* RS232 without hadware handshake */
+ 		mode |= ATMEL_US_USMODE_NORMAL;
+diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
+index d72cd736bdc6..8320173af846 100644
+--- a/drivers/tty/serial/samsung.c
++++ b/drivers/tty/serial/samsung.c
+@@ -1263,6 +1263,8 @@ static void s3c24xx_serial_set_termios(struct uart_port *port,
+ 	/* check to see if we need  to change clock source */
+ 
+ 	if (ourport->baudclk != clk) {
++		clk_prepare_enable(clk);
++
+ 		s3c24xx_serial_setsource(port, clk_sel);
+ 
+ 		if (!IS_ERR(ourport->baudclk)) {
+@@ -1270,8 +1272,6 @@ static void s3c24xx_serial_set_termios(struct uart_port *port,
+ 			ourport->baudclk = ERR_PTR(-EINVAL);
+ 		}
+ 
+-		clk_prepare_enable(clk);
+-
+ 		ourport->baudclk = clk;
+ 		ourport->baudclk_rate = clk ? clk_get_rate(clk) : 0;
+ 	}
+diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c
+index 3cd31e0d4bd9..fb31eecb708d 100644
+--- a/drivers/tty/tty_buffer.c
++++ b/drivers/tty/tty_buffer.c
+@@ -37,29 +37,6 @@
+ 
+ #define TTY_BUFFER_PAGE	(((PAGE_SIZE - sizeof(struct tty_buffer)) / 2) & ~0xFF)
+ 
+-/*
+- * If all tty flip buffers have been processed by flush_to_ldisc() or
+- * dropped by tty_buffer_flush(), check if the linked pty has been closed.
+- * If so, wake the reader/poll to process
+- */
+-static inline void check_other_closed(struct tty_struct *tty)
+-{
+-	unsigned long flags, old;
+-
+-	/* transition from TTY_OTHER_CLOSED => TTY_OTHER_DONE must be atomic */
+-	for (flags = ACCESS_ONCE(tty->flags);
+-	     test_bit(TTY_OTHER_CLOSED, &flags);
+-	     ) {
+-		old = flags;
+-		__set_bit(TTY_OTHER_DONE, &flags);
+-		flags = cmpxchg(&tty->flags, old, flags);
+-		if (old == flags) {
+-			wake_up_interruptible(&tty->read_wait);
+-			break;
+-		}
+-	}
+-}
+-
+ /**
+  *	tty_buffer_lock_exclusive	-	gain exclusive access to buffer
+  *	tty_buffer_unlock_exclusive	-	release exclusive access
+@@ -254,8 +231,6 @@ void tty_buffer_flush(struct tty_struct *tty, struct tty_ldisc *ld)
+ 	if (ld && ld->ops->flush_buffer)
+ 		ld->ops->flush_buffer(tty);
+ 
+-	check_other_closed(tty);
+-
+ 	atomic_dec(&buf->priority);
+ 	mutex_unlock(&buf->lock);
+ }
+@@ -505,10 +480,8 @@ static void flush_to_ldisc(struct work_struct *work)
+ 		 */
+ 		count = smp_load_acquire(&head->commit) - head->read;
+ 		if (!count) {
+-			if (next == NULL) {
+-				check_other_closed(tty);
++			if (next == NULL)
+ 				break;
+-			}
+ 			buf->head = next;
+ 			tty_buffer_free(port, head);
+ 			continue;
+@@ -597,3 +570,8 @@ bool tty_buffer_cancel_work(struct tty_port *port)
+ {
+ 	return cancel_work_sync(&port->buf.work);
+ }
++
++void tty_buffer_flush_work(struct tty_port *port)
++{
++	flush_work(&port->buf.work);
++}
+diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
+index bd51bdd0a7bf..2b5382ea4842 100644
+--- a/drivers/tty/vt/vt.c
++++ b/drivers/tty/vt/vt.c
+@@ -3583,9 +3583,10 @@ static int do_register_con_driver(const struct consw *csw, int first, int last)
+ 		goto err;
+ 
+ 	desc = csw->con_startup();
+-
+-	if (!desc)
++	if (!desc) {
++		retval = -ENODEV;
+ 		goto err;
++	}
+ 
+ 	retval = -EINVAL;
+ 
+diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
+index fa4e23930614..d37fdcc3143c 100644
+--- a/drivers/usb/class/cdc-acm.c
++++ b/drivers/usb/class/cdc-acm.c
+@@ -1114,6 +1114,9 @@ static int acm_probe(struct usb_interface *intf,
+ 	if (quirks == NO_UNION_NORMAL) {
+ 		data_interface = usb_ifnum_to_if(usb_dev, 1);
+ 		control_interface = usb_ifnum_to_if(usb_dev, 0);
++		/* we would crash */
++		if (!data_interface || !control_interface)
++			return -ENODEV;
+ 		goto skip_normal_probe;
+ 	}
+ 
+diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
+index 56593a9a8726..dadd1e8dfe09 100644
+--- a/drivers/usb/core/driver.c
++++ b/drivers/usb/core/driver.c
+@@ -284,7 +284,7 @@ static int usb_probe_interface(struct device *dev)
+ 	struct usb_device *udev = interface_to_usbdev(intf);
+ 	const struct usb_device_id *id;
+ 	int error = -ENODEV;
+-	int lpm_disable_error;
++	int lpm_disable_error = -ENODEV;
+ 
+ 	dev_dbg(dev, "%s\n", __func__);
+ 
+@@ -336,12 +336,14 @@ static int usb_probe_interface(struct device *dev)
+ 	 * setting during probe, that should also be fine.  usb_set_interface()
+ 	 * will attempt to disable LPM, and fail if it can't disable it.
+ 	 */
+-	lpm_disable_error = usb_unlocked_disable_lpm(udev);
+-	if (lpm_disable_error && driver->disable_hub_initiated_lpm) {
+-		dev_err(&intf->dev, "%s Failed to disable LPM for driver %s\n.",
+-				__func__, driver->name);
+-		error = lpm_disable_error;
+-		goto err;
++	if (driver->disable_hub_initiated_lpm) {
++		lpm_disable_error = usb_unlocked_disable_lpm(udev);
++		if (lpm_disable_error) {
++			dev_err(&intf->dev, "%s Failed to disable LPM for driver %s\n.",
++					__func__, driver->name);
++			error = lpm_disable_error;
++			goto err;
++		}
+ 	}
+ 
+ 	/* Carry out a deferred switch to altsetting 0 */
+@@ -391,7 +393,8 @@ static int usb_unbind_interface(struct device *dev)
+ 	struct usb_interface *intf = to_usb_interface(dev);
+ 	struct usb_host_endpoint *ep, **eps = NULL;
+ 	struct usb_device *udev;
+-	int i, j, error, r, lpm_disable_error;
++	int i, j, error, r;
++	int lpm_disable_error = -ENODEV;
+ 
+ 	intf->condition = USB_INTERFACE_UNBINDING;
+ 
+@@ -399,12 +402,13 @@ static int usb_unbind_interface(struct device *dev)
+ 	udev = interface_to_usbdev(intf);
+ 	error = usb_autoresume_device(udev);
+ 
+-	/* Hub-initiated LPM policy may change, so attempt to disable LPM until
++	/* If hub-initiated LPM policy may change, attempt to disable LPM until
+ 	 * the driver is unbound.  If LPM isn't disabled, that's fine because it
+ 	 * wouldn't be enabled unless all the bound interfaces supported
+ 	 * hub-initiated LPM.
+ 	 */
+-	lpm_disable_error = usb_unlocked_disable_lpm(udev);
++	if (driver->disable_hub_initiated_lpm)
++		lpm_disable_error = usb_unlocked_disable_lpm(udev);
+ 
+ 	/*
+ 	 * Terminate all URBs for this interface unless the driver
+@@ -502,11 +506,15 @@ static int usb_unbind_interface(struct device *dev)
+ int usb_driver_claim_interface(struct usb_driver *driver,
+ 				struct usb_interface *iface, void *priv)
+ {
+-	struct device *dev = &iface->dev;
++	struct device *dev;
+ 	struct usb_device *udev;
+ 	int retval = 0;
+-	int lpm_disable_error;
++	int lpm_disable_error = -ENODEV;
+ 
++	if (!iface)
++		return -ENODEV;
++
++	dev = &iface->dev;
+ 	if (dev->driver)
+ 		return -EBUSY;
+ 
+@@ -522,12 +530,14 @@ int usb_driver_claim_interface(struct usb_driver *driver,
+ 
+ 	iface->condition = USB_INTERFACE_BOUND;
+ 
+-	/* Disable LPM until this driver is bound. */
+-	lpm_disable_error = usb_unlocked_disable_lpm(udev);
+-	if (lpm_disable_error && driver->disable_hub_initiated_lpm) {
+-		dev_err(&iface->dev, "%s Failed to disable LPM for driver %s\n.",
+-				__func__, driver->name);
+-		return -ENOMEM;
++	/* See the comment about disabling LPM in usb_probe_interface(). */
++	if (driver->disable_hub_initiated_lpm) {
++		lpm_disable_error = usb_unlocked_disable_lpm(udev);
++		if (lpm_disable_error) {
++			dev_err(&iface->dev, "%s Failed to disable LPM for driver %s\n.",
++					__func__, driver->name);
++			return -ENOMEM;
++		}
+ 	}
+ 
+ 	/* Claimed interfaces are initially inactive (suspended) and
+diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
+index 9eb1cff28bd4..b8b580e5ae6e 100644
+--- a/drivers/usb/core/hcd-pci.c
++++ b/drivers/usb/core/hcd-pci.c
+@@ -74,6 +74,15 @@ static void for_each_companion(struct pci_dev *pdev, struct usb_hcd *hcd,
+ 		if (companion->bus != pdev->bus ||
+ 				PCI_SLOT(companion->devfn) != slot)
+ 			continue;
++
++		/*
++		 * Companion device should be either UHCI,OHCI or EHCI host
++		 * controller, otherwise skip.
++		 */
++		if (companion->class != CL_UHCI && companion->class != CL_OHCI &&
++				companion->class != CL_EHCI)
++			continue;
++
+ 		companion_hcd = pci_get_drvdata(companion);
+ 		if (!companion_hcd || !companion_hcd->self.root_hub)
+ 			continue;
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index 51b436918f78..84f65743f29a 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -4292,7 +4292,7 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
+ {
+ 	struct usb_device	*hdev = hub->hdev;
+ 	struct usb_hcd		*hcd = bus_to_hcd(hdev->bus);
+-	int			i, j, retval;
++	int			retries, operations, retval, i;
+ 	unsigned		delay = HUB_SHORT_RESET_TIME;
+ 	enum usb_device_speed	oldspeed = udev->speed;
+ 	const char		*speed;
+@@ -4394,7 +4394,7 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
+ 	 * first 8 bytes of the device descriptor to get the ep0 maxpacket
+ 	 * value.
+ 	 */
+-	for (i = 0; i < GET_DESCRIPTOR_TRIES; (++i, msleep(100))) {
++	for (retries = 0; retries < GET_DESCRIPTOR_TRIES; (++retries, msleep(100))) {
+ 		bool did_new_scheme = false;
+ 
+ 		if (use_new_scheme(udev, retry_counter)) {
+@@ -4421,7 +4421,7 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
+ 			 * 255 is for WUSB devices, we actually need to use
+ 			 * 512 (WUSB1.0[4.8.1]).
+ 			 */
+-			for (j = 0; j < 3; ++j) {
++			for (operations = 0; operations < 3; ++operations) {
+ 				buf->bMaxPacketSize0 = 0;
+ 				r = usb_control_msg(udev, usb_rcvaddr0pipe(),
+ 					USB_REQ_GET_DESCRIPTOR, USB_DIR_IN,
+@@ -4441,7 +4441,13 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
+ 						r = -EPROTO;
+ 					break;
+ 				}
+-				if (r == 0)
++				/*
++				 * Some devices time out if they are powered on
++				 * when already connected. They need a second
++				 * reset. But only on the first attempt,
++				 * lest we get into a time out/reset loop
++				 */
++				if (r == 0  || (r == -ETIMEDOUT && retries == 0))
+ 					break;
+ 			}
+ 			udev->descriptor.bMaxPacketSize0 =
+@@ -4473,7 +4479,7 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
+ 		 * authorization will assign the final address.
+ 		 */
+ 		if (udev->wusb == 0) {
+-			for (j = 0; j < SET_ADDRESS_TRIES; ++j) {
++			for (operations = 0; operations < SET_ADDRESS_TRIES; ++operations) {
+ 				retval = hub_set_address(udev, devnum);
+ 				if (retval >= 0)
+ 					break;
+diff --git a/drivers/usb/core/port.c b/drivers/usb/core/port.c
+index 14718a9ffcfb..460c855be0d0 100644
+--- a/drivers/usb/core/port.c
++++ b/drivers/usb/core/port.c
+@@ -249,18 +249,12 @@ static int usb_port_runtime_suspend(struct device *dev)
+ 
+ 	return retval;
+ }
+-
+-static int usb_port_prepare(struct device *dev)
+-{
+-	return 1;
+-}
+ #endif
+ 
+ static const struct dev_pm_ops usb_port_pm_ops = {
+ #ifdef CONFIG_PM
+ 	.runtime_suspend =	usb_port_runtime_suspend,
+ 	.runtime_resume =	usb_port_runtime_resume,
+-	.prepare =		usb_port_prepare,
+ #endif
+ };
+ 
+diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
+index ebb29caa3fe4..77e4c9bc0ab1 100644
+--- a/drivers/usb/core/usb.c
++++ b/drivers/usb/core/usb.c
+@@ -311,13 +311,7 @@ static int usb_dev_uevent(struct device *dev, struct kobj_uevent_env *env)
+ 
+ static int usb_dev_prepare(struct device *dev)
+ {
+-	struct usb_device *udev = to_usb_device(dev);
+-
+-	/* Return 0 if the current wakeup setting is wrong, otherwise 1 */
+-	if (udev->do_remote_wakeup != device_may_wakeup(dev))
+-		return 0;
+-
+-	return 1;
++	return 0;		/* Implement eventually? */
+ }
+ 
+ static void usb_dev_complete(struct device *dev)
+diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
+index cf43e9e18368..97ef75af9632 100644
+--- a/drivers/usb/gadget/function/f_fs.c
++++ b/drivers/usb/gadget/function/f_fs.c
+@@ -646,24 +646,23 @@ static void ffs_user_copy_worker(struct work_struct *work)
+ 						   work);
+ 	int ret = io_data->req->status ? io_data->req->status :
+ 					 io_data->req->actual;
++	bool kiocb_has_eventfd = io_data->kiocb->ki_flags & IOCB_EVENTFD;
+ 
+ 	if (io_data->read && ret > 0) {
+ 		use_mm(io_data->mm);
+ 		ret = copy_to_iter(io_data->buf, ret, &io_data->data);
+-		if (iov_iter_count(&io_data->data))
++		if (ret != io_data->req->actual && iov_iter_count(&io_data->data))
+ 			ret = -EFAULT;
+ 		unuse_mm(io_data->mm);
+ 	}
+ 
+ 	io_data->kiocb->ki_complete(io_data->kiocb, ret, ret);
+ 
+-	if (io_data->ffs->ffs_eventfd &&
+-	    !(io_data->kiocb->ki_flags & IOCB_EVENTFD))
++	if (io_data->ffs->ffs_eventfd && !kiocb_has_eventfd)
+ 		eventfd_signal(io_data->ffs->ffs_eventfd, 1);
+ 
+ 	usb_ep_free_request(io_data->ep, io_data->req);
+ 
+-	io_data->kiocb->private = NULL;
+ 	if (io_data->read)
+ 		kfree(io_data->to_free);
+ 	kfree(io_data->buf);
+diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c
+index 223ccf89d226..a4f664062e0c 100644
+--- a/drivers/usb/gadget/function/f_mass_storage.c
++++ b/drivers/usb/gadget/function/f_mass_storage.c
+@@ -2977,25 +2977,6 @@ void fsg_common_set_inquiry_string(struct fsg_common *common, const char *vn,
+ }
+ EXPORT_SYMBOL_GPL(fsg_common_set_inquiry_string);
+ 
+-int fsg_common_run_thread(struct fsg_common *common)
+-{
+-	common->state = FSG_STATE_IDLE;
+-	/* Tell the thread to start working */
+-	common->thread_task =
+-		kthread_create(fsg_main_thread, common, "file-storage");
+-	if (IS_ERR(common->thread_task)) {
+-		common->state = FSG_STATE_TERMINATED;
+-		return PTR_ERR(common->thread_task);
+-	}
+-
+-	DBG(common, "I/O thread pid: %d\n", task_pid_nr(common->thread_task));
+-
+-	wake_up_process(common->thread_task);
+-
+-	return 0;
+-}
+-EXPORT_SYMBOL_GPL(fsg_common_run_thread);
+-
+ static void fsg_common_release(struct kref *ref)
+ {
+ 	struct fsg_common *common = container_of(ref, struct fsg_common, ref);
+@@ -3005,6 +2986,7 @@ static void fsg_common_release(struct kref *ref)
+ 	if (common->state != FSG_STATE_TERMINATED) {
+ 		raise_exception(common, FSG_STATE_EXIT);
+ 		wait_for_completion(&common->thread_notifier);
++		common->thread_task = NULL;
+ 	}
+ 
+ 	for (i = 0; i < ARRAY_SIZE(common->luns); ++i) {
+@@ -3050,9 +3032,21 @@ static int fsg_bind(struct usb_configuration *c, struct usb_function *f)
+ 		if (ret)
+ 			return ret;
+ 		fsg_common_set_inquiry_string(fsg->common, NULL, NULL);
+-		ret = fsg_common_run_thread(fsg->common);
+-		if (ret)
++	}
++
++	if (!common->thread_task) {
++		common->state = FSG_STATE_IDLE;
++		common->thread_task =
++			kthread_create(fsg_main_thread, common, "file-storage");
++		if (IS_ERR(common->thread_task)) {
++			int ret = PTR_ERR(common->thread_task);
++			common->thread_task = NULL;
++			common->state = FSG_STATE_TERMINATED;
+ 			return ret;
++		}
++		DBG(common, "I/O thread pid: %d\n",
++		    task_pid_nr(common->thread_task));
++		wake_up_process(common->thread_task);
+ 	}
+ 
+ 	fsg->gadget = gadget;
+diff --git a/drivers/usb/gadget/function/f_mass_storage.h b/drivers/usb/gadget/function/f_mass_storage.h
+index 445df6775609..b6a9918eaefb 100644
+--- a/drivers/usb/gadget/function/f_mass_storage.h
++++ b/drivers/usb/gadget/function/f_mass_storage.h
+@@ -153,8 +153,6 @@ int fsg_common_create_luns(struct fsg_common *common, struct fsg_config *cfg);
+ void fsg_common_set_inquiry_string(struct fsg_common *common, const char *vn,
+ 				   const char *pn);
+ 
+-int fsg_common_run_thread(struct fsg_common *common);
+-
+ void fsg_config_from_params(struct fsg_config *cfg,
+ 			    const struct fsg_module_parameters *params,
+ 			    unsigned int fsg_num_buffers);
+diff --git a/drivers/usb/gadget/legacy/acm_ms.c b/drivers/usb/gadget/legacy/acm_ms.c
+index c16089efc322..c39de65a448b 100644
+--- a/drivers/usb/gadget/legacy/acm_ms.c
++++ b/drivers/usb/gadget/legacy/acm_ms.c
+@@ -133,10 +133,6 @@ static int acm_ms_do_config(struct usb_configuration *c)
+ 	if (status < 0)
+ 		goto put_msg;
+ 
+-	status = fsg_common_run_thread(opts->common);
+-	if (status)
+-		goto remove_acm;
+-
+ 	status = usb_add_function(c, f_msg);
+ 	if (status)
+ 		goto remove_acm;
+diff --git a/drivers/usb/gadget/legacy/mass_storage.c b/drivers/usb/gadget/legacy/mass_storage.c
+index e61af53c7d2b..125974f32f50 100644
+--- a/drivers/usb/gadget/legacy/mass_storage.c
++++ b/drivers/usb/gadget/legacy/mass_storage.c
+@@ -132,10 +132,6 @@ static int msg_do_config(struct usb_configuration *c)
+ 	if (IS_ERR(f_msg))
+ 		return PTR_ERR(f_msg);
+ 
+-	ret = fsg_common_run_thread(opts->common);
+-	if (ret)
+-		goto put_func;
+-
+ 	ret = usb_add_function(c, f_msg);
+ 	if (ret)
+ 		goto put_func;
+diff --git a/drivers/usb/gadget/legacy/multi.c b/drivers/usb/gadget/legacy/multi.c
+index 229d704a620b..a70a406580ea 100644
+--- a/drivers/usb/gadget/legacy/multi.c
++++ b/drivers/usb/gadget/legacy/multi.c
+@@ -137,7 +137,6 @@ static struct usb_function *f_msg_rndis;
+ 
+ static int rndis_do_config(struct usb_configuration *c)
+ {
+-	struct fsg_opts *fsg_opts;
+ 	int ret;
+ 
+ 	if (gadget_is_otg(c->cdev->gadget)) {
+@@ -169,11 +168,6 @@ static int rndis_do_config(struct usb_configuration *c)
+ 		goto err_fsg;
+ 	}
+ 
+-	fsg_opts = fsg_opts_from_func_inst(fi_msg);
+-	ret = fsg_common_run_thread(fsg_opts->common);
+-	if (ret)
+-		goto err_run;
+-
+ 	ret = usb_add_function(c, f_msg_rndis);
+ 	if (ret)
+ 		goto err_run;
+@@ -225,7 +219,6 @@ static struct usb_function *f_msg_multi;
+ 
+ static int cdc_do_config(struct usb_configuration *c)
+ {
+-	struct fsg_opts *fsg_opts;
+ 	int ret;
+ 
+ 	if (gadget_is_otg(c->cdev->gadget)) {
+@@ -258,11 +251,6 @@ static int cdc_do_config(struct usb_configuration *c)
+ 		goto err_fsg;
+ 	}
+ 
+-	fsg_opts = fsg_opts_from_func_inst(fi_msg);
+-	ret = fsg_common_run_thread(fsg_opts->common);
+-	if (ret)
+-		goto err_run;
+-
+ 	ret = usb_add_function(c, f_msg_multi);
+ 	if (ret)
+ 		goto err_run;
+diff --git a/drivers/usb/gadget/legacy/nokia.c b/drivers/usb/gadget/legacy/nokia.c
+index 09975046c694..b1e535f4022e 100644
+--- a/drivers/usb/gadget/legacy/nokia.c
++++ b/drivers/usb/gadget/legacy/nokia.c
+@@ -152,7 +152,6 @@ static int nokia_bind_config(struct usb_configuration *c)
+ 	struct usb_function *f_ecm;
+ 	struct usb_function *f_obex2 = NULL;
+ 	struct usb_function *f_msg;
+-	struct fsg_opts *fsg_opts;
+ 	int status = 0;
+ 	int obex1_stat = -1;
+ 	int obex2_stat = -1;
+@@ -222,12 +221,6 @@ static int nokia_bind_config(struct usb_configuration *c)
+ 		goto err_ecm;
+ 	}
+ 
+-	fsg_opts = fsg_opts_from_func_inst(fi_msg);
+-
+-	status = fsg_common_run_thread(fsg_opts->common);
+-	if (status)
+-		goto err_msg;
+-
+ 	status = usb_add_function(c, f_msg);
+ 	if (status)
+ 		goto err_msg;
+diff --git a/drivers/usb/gadget/udc/udc-core.c b/drivers/usb/gadget/udc/udc-core.c
+index b86a6f03592e..e272b3ba1d14 100644
+--- a/drivers/usb/gadget/udc/udc-core.c
++++ b/drivers/usb/gadget/udc/udc-core.c
+@@ -75,7 +75,7 @@ int usb_gadget_map_request(struct usb_gadget *gadget,
+ 		mapped = dma_map_sg(dev, req->sg, req->num_sgs,
+ 				is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ 		if (mapped == 0) {
+-			dev_err(&gadget->dev, "failed to map SGs\n");
++			dev_err(dev, "failed to map SGs\n");
+ 			return -EFAULT;
+ 		}
+ 
+diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
+index 5cd080e0a685..743d9a20e248 100644
+--- a/drivers/usb/host/xhci-mem.c
++++ b/drivers/usb/host/xhci-mem.c
+@@ -1873,6 +1873,12 @@ no_bw:
+ 	kfree(xhci->rh_bw);
+ 	kfree(xhci->ext_caps);
+ 
++	xhci->usb2_ports = NULL;
++	xhci->usb3_ports = NULL;
++	xhci->port_array = NULL;
++	xhci->rh_bw = NULL;
++	xhci->ext_caps = NULL;
++
+ 	xhci->page_size = 0;
+ 	xhci->page_shift = 0;
+ 	xhci->bus_state[0].bus_suspended = 0;
+diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
+index f0640b7a1c42..48672fac7ff3 100644
+--- a/drivers/usb/host/xhci-pci.c
++++ b/drivers/usb/host/xhci-pci.c
+@@ -48,6 +48,7 @@
+ #define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI		0xa12f
+ #define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI	0x9d2f
+ #define PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI		0x0aa8
++#define PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI		0x1aa8
+ 
+ static const char hcd_name[] = "xhci_hcd";
+ 
+@@ -155,7 +156,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
+ 		(pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI ||
+ 		 pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI ||
+ 		 pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI ||
+-		 pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI)) {
++		 pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI ||
++		 pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI)) {
+ 		xhci->quirks |= XHCI_PME_STUCK_QUIRK;
+ 	}
+ 	if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
+@@ -302,6 +304,7 @@ static void xhci_pci_remove(struct pci_dev *dev)
+ 	struct xhci_hcd *xhci;
+ 
+ 	xhci = hcd_to_xhci(pci_get_drvdata(dev));
++	xhci->xhc_state |= XHCI_STATE_REMOVING;
+ 	if (xhci->shared_hcd) {
+ 		usb_remove_hcd(xhci->shared_hcd);
+ 		usb_put_hcd(xhci->shared_hcd);
+diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
+index d39d6bf1d090..d4962208be30 100644
+--- a/drivers/usb/host/xhci-plat.c
++++ b/drivers/usb/host/xhci-plat.c
+@@ -39,12 +39,25 @@ static const struct xhci_driver_overrides xhci_plat_overrides __initconst = {
+ 
+ static void xhci_plat_quirks(struct device *dev, struct xhci_hcd *xhci)
+ {
++	struct usb_hcd *hcd = xhci_to_hcd(xhci);
++
+ 	/*
+ 	 * As of now platform drivers don't provide MSI support so we ensure
+ 	 * here that the generic code does not try to make a pci_dev from our
+ 	 * dev struct in order to setup MSI
+ 	 */
+ 	xhci->quirks |= XHCI_PLAT;
++
++	/*
++	 * On R-Car Gen2 and Gen3, the AC64 bit (bit 0) of HCCPARAMS1 is set
++	 * to 1. However, these SoCs don't support 64-bit address memory
++	 * pointers. So, this driver clears the AC64 bit of xhci->hcc_params
++	 * to call dma_set_coherent_mask(dev, DMA_BIT_MASK(32)) in
++	 * xhci_gen_setup().
++	 */
++	if (xhci_plat_type_is(hcd, XHCI_PLAT_TYPE_RENESAS_RCAR_GEN2) ||
++	    xhci_plat_type_is(hcd, XHCI_PLAT_TYPE_RENESAS_RCAR_GEN3))
++		xhci->quirks |= XHCI_NO_64BIT_SUPPORT;
+ }
+ 
+ /* called during probe() after chip reset completes */
+diff --git a/drivers/usb/host/xhci-plat.h b/drivers/usb/host/xhci-plat.h
+index 5a2e2e3936c4..529c3c40f901 100644
+--- a/drivers/usb/host/xhci-plat.h
++++ b/drivers/usb/host/xhci-plat.h
+@@ -14,7 +14,7 @@
+ #include "xhci.h"	/* for hcd_to_xhci() */
+ 
+ enum xhci_plat_type {
+-	XHCI_PLAT_TYPE_MARVELL_ARMADA,
++	XHCI_PLAT_TYPE_MARVELL_ARMADA = 1,
+ 	XHCI_PLAT_TYPE_RENESAS_RCAR_GEN2,
+ 	XHCI_PLAT_TYPE_RENESAS_RCAR_GEN3,
+ };
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index 3915657e6078..a85a1c993d61 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -4014,7 +4014,8 @@ static int queue_command(struct xhci_hcd *xhci, struct xhci_command *cmd,
+ 	int reserved_trbs = xhci->cmd_ring_reserved_trbs;
+ 	int ret;
+ 
+-	if (xhci->xhc_state) {
++	if ((xhci->xhc_state & XHCI_STATE_DYING) ||
++		(xhci->xhc_state & XHCI_STATE_HALTED)) {
+ 		xhci_dbg(xhci, "xHCI dying or halted, can't queue_command\n");
+ 		return -ESHUTDOWN;
+ 	}
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index 0c8087d3c313..8e713cca58ed 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -147,7 +147,8 @@ static int xhci_start(struct xhci_hcd *xhci)
+ 				"waited %u microseconds.\n",
+ 				XHCI_MAX_HALT_USEC);
+ 	if (!ret)
+-		xhci->xhc_state &= ~(XHCI_STATE_HALTED | XHCI_STATE_DYING);
++		/* clear state flags. Including dying, halted or removing */
++		xhci->xhc_state = 0;
+ 
+ 	return ret;
+ }
+@@ -1108,8 +1109,8 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
+ 		/* Resume root hubs only when have pending events. */
+ 		status = readl(&xhci->op_regs->status);
+ 		if (status & STS_EINT) {
+-			usb_hcd_resume_root_hub(hcd);
+ 			usb_hcd_resume_root_hub(xhci->shared_hcd);
++			usb_hcd_resume_root_hub(hcd);
+ 		}
+ 	}
+ 
+@@ -1124,10 +1125,10 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
+ 
+ 	/* Re-enable port polling. */
+ 	xhci_dbg(xhci, "%s: starting port polling.\n", __func__);
+-	set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
+-	usb_hcd_poll_rh_status(hcd);
+ 	set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
+ 	usb_hcd_poll_rh_status(xhci->shared_hcd);
++	set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
++	usb_hcd_poll_rh_status(hcd);
+ 
+ 	return retval;
+ }
+@@ -2770,7 +2771,8 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
+ 	if (ret <= 0)
+ 		return ret;
+ 	xhci = hcd_to_xhci(hcd);
+-	if (xhci->xhc_state & XHCI_STATE_DYING)
++	if ((xhci->xhc_state & XHCI_STATE_DYING) ||
++		(xhci->xhc_state & XHCI_STATE_REMOVING))
+ 		return -ENODEV;
+ 
+ 	xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
+@@ -3817,7 +3819,7 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
+ 
+ 	mutex_lock(&xhci->mutex);
+ 
+-	if (xhci->xhc_state)	/* dying or halted */
++	if (xhci->xhc_state)	/* dying, removing or halted */
+ 		goto out;
+ 
+ 	if (!udev->slot_id) {
+@@ -4944,6 +4946,16 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
+ 		return retval;
+ 	xhci_dbg(xhci, "Reset complete\n");
+ 
++	/*
++	 * On some xHCI controllers (e.g. R-Car SoCs), the AC64 bit (bit 0)
++	 * of HCCPARAMS1 is set to 1. However, the xHCs don't support 64-bit
++	 * address memory pointers actually. So, this driver clears the AC64
++	 * bit of xhci->hcc_params to call dma_set_coherent_mask(dev,
++	 * DMA_BIT_MASK(32)) in this xhci_gen_setup().
++	 */
++	if (xhci->quirks & XHCI_NO_64BIT_SUPPORT)
++		xhci->hcc_params &= ~BIT(0);
++
+ 	/* Set dma_mask and coherent_dma_mask to 64-bits,
+ 	 * if xHC supports 64-bit addressing */
+ 	if (HCC_64BIT_ADDR(xhci->hcc_params) &&
+diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
+index cc651383ce5a..1cdea4a8c895 100644
+--- a/drivers/usb/host/xhci.h
++++ b/drivers/usb/host/xhci.h
+@@ -1596,6 +1596,7 @@ struct xhci_hcd {
+  */
+ #define XHCI_STATE_DYING	(1 << 0)
+ #define XHCI_STATE_HALTED	(1 << 1)
++#define XHCI_STATE_REMOVING	(1 << 2)
+ 	/* Statistics */
+ 	int			error_bitmask;
+ 	unsigned int		quirks;
+@@ -1632,6 +1633,7 @@ struct xhci_hcd {
+ #define XHCI_PME_STUCK_QUIRK	(1 << 20)
+ #define XHCI_MTK_HOST		(1 << 21)
+ #define XHCI_SSIC_PORT_UNUSED	(1 << 22)
++#define XHCI_NO_64BIT_SUPPORT	(1 << 23)
+ 	unsigned int		num_active_eps;
+ 	unsigned int		limit_active_eps;
+ 	/* There are two roothubs to keep track of bus suspend info for */
+diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c
+index c6bfd13f6c92..1950e87b4219 100644
+--- a/drivers/usb/misc/iowarrior.c
++++ b/drivers/usb/misc/iowarrior.c
+@@ -787,6 +787,12 @@ static int iowarrior_probe(struct usb_interface *interface,
+ 	iface_desc = interface->cur_altsetting;
+ 	dev->product_id = le16_to_cpu(udev->descriptor.idProduct);
+ 
++	if (iface_desc->desc.bNumEndpoints < 1) {
++		dev_err(&interface->dev, "Invalid number of endpoints\n");
++		retval = -EINVAL;
++		goto error;
++	}
++
+ 	/* set up the endpoint information */
+ 	for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
+ 		endpoint = &iface_desc->endpoint[i].desc;
+diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c
+index 92fdb6e9faff..c78ff95a43be 100644
+--- a/drivers/usb/misc/usbtest.c
++++ b/drivers/usb/misc/usbtest.c
+@@ -529,6 +529,7 @@ static struct scatterlist *
+ alloc_sglist(int nents, int max, int vary, struct usbtest_dev *dev, int pipe)
+ {
+ 	struct scatterlist	*sg;
++	unsigned int		n_size = 0;
+ 	unsigned		i;
+ 	unsigned		size = max;
+ 	unsigned		maxpacket =
+@@ -561,7 +562,8 @@ alloc_sglist(int nents, int max, int vary, struct usbtest_dev *dev, int pipe)
+ 			break;
+ 		case 1:
+ 			for (j = 0; j < size; j++)
+-				*buf++ = (u8) ((j % maxpacket) % 63);
++				*buf++ = (u8) (((j + n_size) % maxpacket) % 63);
++			n_size += size;
+ 			break;
+ 		}
+ 
+diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c
+index c0f5c652d272..f1893e08e51a 100644
+--- a/drivers/usb/renesas_usbhs/fifo.c
++++ b/drivers/usb/renesas_usbhs/fifo.c
+@@ -190,7 +190,8 @@ static int usbhsf_pkt_handler(struct usbhs_pipe *pipe, int type)
+ 		goto __usbhs_pkt_handler_end;
+ 	}
+ 
+-	ret = func(pkt, &is_done);
++	if (likely(func))
++		ret = func(pkt, &is_done);
+ 
+ 	if (is_done)
+ 		__usbhsf_pkt_del(pkt);
+@@ -889,6 +890,7 @@ static int usbhsf_dma_prepare_push(struct usbhs_pkt *pkt, int *is_done)
+ 
+ 	pkt->trans = len;
+ 
++	usbhsf_tx_irq_ctrl(pipe, 0);
+ 	INIT_WORK(&pkt->work, xfer_work);
+ 	schedule_work(&pkt->work);
+ 
+diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c
+index 657f9672ceba..251053551866 100644
+--- a/drivers/usb/renesas_usbhs/mod_gadget.c
++++ b/drivers/usb/renesas_usbhs/mod_gadget.c
+@@ -158,10 +158,14 @@ static void usbhsg_queue_done(struct usbhs_priv *priv, struct usbhs_pkt *pkt)
+ 	struct usbhs_pipe *pipe = pkt->pipe;
+ 	struct usbhsg_uep *uep = usbhsg_pipe_to_uep(pipe);
+ 	struct usbhsg_request *ureq = usbhsg_pkt_to_ureq(pkt);
++	unsigned long flags;
+ 
+ 	ureq->req.actual = pkt->actual;
+ 
+-	usbhsg_queue_pop(uep, ureq, 0);
++	usbhs_lock(priv, flags);
++	if (uep)
++		__usbhsg_queue_pop(uep, ureq, 0);
++	usbhs_unlock(priv, flags);
+ }
+ 
+ static void usbhsg_queue_push(struct usbhsg_uep *uep,
+diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
+index 73a366de5102..a543cdc0f88f 100644
+--- a/drivers/usb/serial/cp210x.c
++++ b/drivers/usb/serial/cp210x.c
+@@ -109,6 +109,7 @@ static const struct usb_device_id id_table[] = {
+ 	{ USB_DEVICE(0x10C4, 0x826B) }, /* Cygnal Integrated Products, Inc., Fasttrax GPS demonstration module */
+ 	{ USB_DEVICE(0x10C4, 0x8281) }, /* Nanotec Plug & Drive */
+ 	{ USB_DEVICE(0x10C4, 0x8293) }, /* Telegesis ETRX2USB */
++	{ USB_DEVICE(0x10C4, 0x82F4) }, /* Starizona MicroTouch */
+ 	{ USB_DEVICE(0x10C4, 0x82F9) }, /* Procyon AVS */
+ 	{ USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */
+ 	{ USB_DEVICE(0x10C4, 0x8382) }, /* Cygnal Integrated Products, Inc. */
+@@ -118,6 +119,7 @@ static const struct usb_device_id id_table[] = {
+ 	{ USB_DEVICE(0x10C4, 0x8418) }, /* IRZ Automation Teleport SG-10 GSM/GPRS Modem */
+ 	{ USB_DEVICE(0x10C4, 0x846E) }, /* BEI USB Sensor Interface (VCP) */
+ 	{ USB_DEVICE(0x10C4, 0x8477) }, /* Balluff RFID */
++	{ USB_DEVICE(0x10C4, 0x84B6) }, /* Starizona Hyperion */
+ 	{ USB_DEVICE(0x10C4, 0x85EA) }, /* AC-Services IBUS-IF */
+ 	{ USB_DEVICE(0x10C4, 0x85EB) }, /* AC-Services CIS-IBUS */
+ 	{ USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */
+@@ -141,6 +143,8 @@ static const struct usb_device_id id_table[] = {
+ 	{ USB_DEVICE(0x10C4, 0xF004) }, /* Elan Digital Systems USBcount50 */
+ 	{ USB_DEVICE(0x10C5, 0xEA61) }, /* Silicon Labs MobiData GPRS USB Modem */
+ 	{ USB_DEVICE(0x10CE, 0xEA6A) }, /* Silicon Labs MobiData GPRS USB Modem 100EU */
++	{ USB_DEVICE(0x12B8, 0xEC60) }, /* Link G4 ECU */
++	{ USB_DEVICE(0x12B8, 0xEC62) }, /* Link G4+ ECU */
+ 	{ USB_DEVICE(0x13AD, 0x9999) }, /* Baltech card reader */
+ 	{ USB_DEVICE(0x1555, 0x0004) }, /* Owen AC4 USB-RS485 Converter */
+ 	{ USB_DEVICE(0x166A, 0x0201) }, /* Clipsal 5500PACA C-Bus Pascal Automation Controller */
+@@ -165,6 +169,7 @@ static const struct usb_device_id id_table[] = {
+ 	{ USB_DEVICE(0x18EF, 0xE025) }, /* ELV Marble Sound Board 1 */
+ 	{ USB_DEVICE(0x1901, 0x0190) }, /* GE B850 CP2105 Recorder interface */
+ 	{ USB_DEVICE(0x1901, 0x0193) }, /* GE B650 CP2104 PMC interface */
++	{ USB_DEVICE(0x1901, 0x0194) },	/* GE Healthcare Remote Alarm Box */
+ 	{ USB_DEVICE(0x19CF, 0x3000) }, /* Parrot NMEA GPS Flight Recorder */
+ 	{ USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */
+ 	{ USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */
+diff --git a/drivers/usb/serial/cypress_m8.c b/drivers/usb/serial/cypress_m8.c
+index 01bf53392819..244acb1299a9 100644
+--- a/drivers/usb/serial/cypress_m8.c
++++ b/drivers/usb/serial/cypress_m8.c
+@@ -447,6 +447,11 @@ static int cypress_generic_port_probe(struct usb_serial_port *port)
+ 	struct usb_serial *serial = port->serial;
+ 	struct cypress_private *priv;
+ 
++	if (!port->interrupt_out_urb || !port->interrupt_in_urb) {
++		dev_err(&port->dev, "required endpoint is missing\n");
++		return -ENODEV;
++	}
++
+ 	priv = kzalloc(sizeof(struct cypress_private), GFP_KERNEL);
+ 	if (!priv)
+ 		return -ENOMEM;
+@@ -606,12 +611,6 @@ static int cypress_open(struct tty_struct *tty, struct usb_serial_port *port)
+ 		cypress_set_termios(tty, port, &priv->tmp_termios);
+ 
+ 	/* setup the port and start reading from the device */
+-	if (!port->interrupt_in_urb) {
+-		dev_err(&port->dev, "%s - interrupt_in_urb is empty!\n",
+-			__func__);
+-		return -1;
+-	}
+-
+ 	usb_fill_int_urb(port->interrupt_in_urb, serial->dev,
+ 		usb_rcvintpipe(serial->dev, port->interrupt_in_endpointAddress),
+ 		port->interrupt_in_urb->transfer_buffer,
+diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c
+index 12b0e67473ba..3df7b7ec178e 100644
+--- a/drivers/usb/serial/digi_acceleport.c
++++ b/drivers/usb/serial/digi_acceleport.c
+@@ -1251,8 +1251,27 @@ static int digi_port_init(struct usb_serial_port *port, unsigned port_num)
+ 
+ static int digi_startup(struct usb_serial *serial)
+ {
++	struct device *dev = &serial->interface->dev;
+ 	struct digi_serial *serial_priv;
+ 	int ret;
++	int i;
++
++	/* check whether the device has the expected number of endpoints */
++	if (serial->num_port_pointers < serial->type->num_ports + 1) {
++		dev_err(dev, "OOB endpoints missing\n");
++		return -ENODEV;
++	}
++
++	for (i = 0; i < serial->type->num_ports + 1 ; i++) {
++		if (!serial->port[i]->read_urb) {
++			dev_err(dev, "bulk-in endpoint missing\n");
++			return -ENODEV;
++		}
++		if (!serial->port[i]->write_urb) {
++			dev_err(dev, "bulk-out endpoint missing\n");
++			return -ENODEV;
++		}
++	}
+ 
+ 	serial_priv = kzalloc(sizeof(*serial_priv), GFP_KERNEL);
+ 	if (!serial_priv)
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index 8c660ae401d8..b61f12160d37 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -1004,6 +1004,10 @@ static const struct usb_device_id id_table_combined[] = {
+ 	{ USB_DEVICE(FTDI_VID, CHETCO_SEASMART_DISPLAY_PID) },
+ 	{ USB_DEVICE(FTDI_VID, CHETCO_SEASMART_LITE_PID) },
+ 	{ USB_DEVICE(FTDI_VID, CHETCO_SEASMART_ANALOG_PID) },
++	/* ICP DAS I-756xU devices */
++	{ USB_DEVICE(ICPDAS_VID, ICPDAS_I7560U_PID) },
++	{ USB_DEVICE(ICPDAS_VID, ICPDAS_I7561U_PID) },
++	{ USB_DEVICE(ICPDAS_VID, ICPDAS_I7563U_PID) },
+ 	{ }					/* Terminating entry */
+ };
+ 
+diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
+index a84df2513994..c5d6c1e73e8e 100644
+--- a/drivers/usb/serial/ftdi_sio_ids.h
++++ b/drivers/usb/serial/ftdi_sio_ids.h
+@@ -872,6 +872,14 @@
+ #define NOVITUS_BONO_E_PID		0x6010
+ 
+ /*
++ * ICPDAS I-756*U devices
++ */
++#define ICPDAS_VID			0x1b5c
++#define ICPDAS_I7560U_PID		0x0103
++#define ICPDAS_I7561U_PID		0x0104
++#define ICPDAS_I7563U_PID		0x0105
++
++/*
+  * RT Systems programming cables for various ham radios
+  */
+ #define RTSYSTEMS_VID		0x2100	/* Vendor ID */
+diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c
+index f49327d20ee8..0a935b1e5475 100644
+--- a/drivers/usb/serial/io_edgeport.c
++++ b/drivers/usb/serial/io_edgeport.c
+@@ -2849,14 +2849,16 @@ static int edge_startup(struct usb_serial *serial)
+ 				/* not set up yet, so do it now */
+ 				edge_serial->interrupt_read_urb =
+ 						usb_alloc_urb(0, GFP_KERNEL);
+-				if (!edge_serial->interrupt_read_urb)
+-					return -ENOMEM;
++				if (!edge_serial->interrupt_read_urb) {
++					response = -ENOMEM;
++					break;
++				}
+ 
+ 				edge_serial->interrupt_in_buffer =
+ 					kmalloc(buffer_size, GFP_KERNEL);
+ 				if (!edge_serial->interrupt_in_buffer) {
+-					usb_free_urb(edge_serial->interrupt_read_urb);
+-					return -ENOMEM;
++					response = -ENOMEM;
++					break;
+ 				}
+ 				edge_serial->interrupt_in_endpoint =
+ 						endpoint->bEndpointAddress;
+@@ -2884,14 +2886,16 @@ static int edge_startup(struct usb_serial *serial)
+ 				/* not set up yet, so do it now */
+ 				edge_serial->read_urb =
+ 						usb_alloc_urb(0, GFP_KERNEL);
+-				if (!edge_serial->read_urb)
+-					return -ENOMEM;
++				if (!edge_serial->read_urb) {
++					response = -ENOMEM;
++					break;
++				}
+ 
+ 				edge_serial->bulk_in_buffer =
+ 					kmalloc(buffer_size, GFP_KERNEL);
+ 				if (!edge_serial->bulk_in_buffer) {
+-					usb_free_urb(edge_serial->read_urb);
+-					return -ENOMEM;
++					response = -ENOMEM;
++					break;
+ 				}
+ 				edge_serial->bulk_in_endpoint =
+ 						endpoint->bEndpointAddress;
+@@ -2917,9 +2921,22 @@ static int edge_startup(struct usb_serial *serial)
+ 			}
+ 		}
+ 
+-		if (!interrupt_in_found || !bulk_in_found || !bulk_out_found) {
+-			dev_err(ddev, "Error - the proper endpoints were not found!\n");
+-			return -ENODEV;
++		if (response || !interrupt_in_found || !bulk_in_found ||
++							!bulk_out_found) {
++			if (!response) {
++				dev_err(ddev, "expected endpoints not found\n");
++				response = -ENODEV;
++			}
++
++			usb_free_urb(edge_serial->interrupt_read_urb);
++			kfree(edge_serial->interrupt_in_buffer);
++
++			usb_free_urb(edge_serial->read_urb);
++			kfree(edge_serial->bulk_in_buffer);
++
++			kfree(edge_serial);
++
++			return response;
+ 		}
+ 
+ 		/* start interrupt read for this edgeport this interrupt will
+@@ -2942,16 +2959,9 @@ static void edge_disconnect(struct usb_serial *serial)
+ {
+ 	struct edgeport_serial *edge_serial = usb_get_serial_data(serial);
+ 
+-	/* stop reads and writes on all ports */
+-	/* free up our endpoint stuff */
+ 	if (edge_serial->is_epic) {
+ 		usb_kill_urb(edge_serial->interrupt_read_urb);
+-		usb_free_urb(edge_serial->interrupt_read_urb);
+-		kfree(edge_serial->interrupt_in_buffer);
+-
+ 		usb_kill_urb(edge_serial->read_urb);
+-		usb_free_urb(edge_serial->read_urb);
+-		kfree(edge_serial->bulk_in_buffer);
+ 	}
+ }
+ 
+@@ -2964,6 +2974,16 @@ static void edge_release(struct usb_serial *serial)
+ {
+ 	struct edgeport_serial *edge_serial = usb_get_serial_data(serial);
+ 
++	if (edge_serial->is_epic) {
++		usb_kill_urb(edge_serial->interrupt_read_urb);
++		usb_free_urb(edge_serial->interrupt_read_urb);
++		kfree(edge_serial->interrupt_in_buffer);
++
++		usb_kill_urb(edge_serial->read_urb);
++		usb_free_urb(edge_serial->read_urb);
++		kfree(edge_serial->bulk_in_buffer);
++	}
++
+ 	kfree(edge_serial);
+ }
+ 
+diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c
+index e07b15ed5814..7faa901ee47f 100644
+--- a/drivers/usb/serial/keyspan.c
++++ b/drivers/usb/serial/keyspan.c
+@@ -2376,6 +2376,10 @@ static void keyspan_release(struct usb_serial *serial)
+ 
+ 	s_priv = usb_get_serial_data(serial);
+ 
++	/* Make sure to unlink the URBs submitted in attach. */
++	usb_kill_urb(s_priv->instat_urb);
++	usb_kill_urb(s_priv->indat_urb);
++
+ 	usb_free_urb(s_priv->instat_urb);
+ 	usb_free_urb(s_priv->indat_urb);
+ 	usb_free_urb(s_priv->glocont_urb);
+diff --git a/drivers/usb/serial/mct_u232.c b/drivers/usb/serial/mct_u232.c
+index fd707d6a10e2..89726f702202 100644
+--- a/drivers/usb/serial/mct_u232.c
++++ b/drivers/usb/serial/mct_u232.c
+@@ -376,14 +376,21 @@ static void mct_u232_msr_to_state(struct usb_serial_port *port,
+ 
+ static int mct_u232_port_probe(struct usb_serial_port *port)
+ {
++	struct usb_serial *serial = port->serial;
+ 	struct mct_u232_private *priv;
+ 
++	/* check first to simplify error handling */
++	if (!serial->port[1] || !serial->port[1]->interrupt_in_urb) {
++		dev_err(&port->dev, "expected endpoint missing\n");
++		return -ENODEV;
++	}
++
+ 	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ 	if (!priv)
+ 		return -ENOMEM;
+ 
+ 	/* Use second interrupt-in endpoint for reading. */
+-	priv->read_urb = port->serial->port[1]->interrupt_in_urb;
++	priv->read_urb = serial->port[1]->interrupt_in_urb;
+ 	priv->read_urb->context = port;
+ 
+ 	spin_lock_init(&priv->lock);
+diff --git a/drivers/usb/serial/mxuport.c b/drivers/usb/serial/mxuport.c
+index 31a8b47f1ac6..c6596cbcc4b6 100644
+--- a/drivers/usb/serial/mxuport.c
++++ b/drivers/usb/serial/mxuport.c
+@@ -1259,6 +1259,15 @@ static int mxuport_attach(struct usb_serial *serial)
+ 	return 0;
+ }
+ 
++static void mxuport_release(struct usb_serial *serial)
++{
++	struct usb_serial_port *port0 = serial->port[0];
++	struct usb_serial_port *port1 = serial->port[1];
++
++	usb_serial_generic_close(port1);
++	usb_serial_generic_close(port0);
++}
++
+ static int mxuport_open(struct tty_struct *tty, struct usb_serial_port *port)
+ {
+ 	struct mxuport_port *mxport = usb_get_serial_port_data(port);
+@@ -1361,6 +1370,7 @@ static struct usb_serial_driver mxuport_device = {
+ 	.probe			= mxuport_probe,
+ 	.port_probe		= mxuport_port_probe,
+ 	.attach			= mxuport_attach,
++	.release		= mxuport_release,
+ 	.calc_num_ports		= mxuport_calc_num_ports,
+ 	.open			= mxuport_open,
+ 	.close			= mxuport_close,
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 348e19834b83..d96d423d00e6 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -375,18 +375,22 @@ static void option_instat_callback(struct urb *urb);
+ #define HAIER_PRODUCT_CE81B			0x10f8
+ #define HAIER_PRODUCT_CE100			0x2009
+ 
+-/* Cinterion (formerly Siemens) products */
+-#define SIEMENS_VENDOR_ID				0x0681
+-#define CINTERION_VENDOR_ID				0x1e2d
++/* Gemalto's Cinterion products (formerly Siemens) */
++#define SIEMENS_VENDOR_ID			0x0681
++#define CINTERION_VENDOR_ID			0x1e2d
++#define CINTERION_PRODUCT_HC25_MDMNET		0x0040
+ #define CINTERION_PRODUCT_HC25_MDM		0x0047
+-#define CINTERION_PRODUCT_HC25_MDMNET	0x0040
++#define CINTERION_PRODUCT_HC28_MDMNET		0x004A /* same for HC28J */
+ #define CINTERION_PRODUCT_HC28_MDM		0x004C
+-#define CINTERION_PRODUCT_HC28_MDMNET	0x004A /* same for HC28J */
+ #define CINTERION_PRODUCT_EU3_E			0x0051
+ #define CINTERION_PRODUCT_EU3_P			0x0052
+ #define CINTERION_PRODUCT_PH8			0x0053
+ #define CINTERION_PRODUCT_AHXX			0x0055
+ #define CINTERION_PRODUCT_PLXX			0x0060
++#define CINTERION_PRODUCT_PH8_2RMNET		0x0082
++#define CINTERION_PRODUCT_PH8_AUDIO		0x0083
++#define CINTERION_PRODUCT_AHXX_2RMNET		0x0084
++#define CINTERION_PRODUCT_AHXX_AUDIO		0x0085
+ 
+ /* Olivetti products */
+ #define OLIVETTI_VENDOR_ID			0x0b3c
+@@ -633,6 +637,10 @@ static const struct option_blacklist_info telit_le922_blacklist_usbcfg3 = {
+ 	.reserved = BIT(1) | BIT(2) | BIT(3),
+ };
+ 
++static const struct option_blacklist_info cinterion_rmnet2_blacklist = {
++	.reserved = BIT(4) | BIT(5),
++};
++
+ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
+ 	{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) },
+@@ -1602,7 +1610,79 @@ static const struct usb_device_id option_ids[] = {
+ 		.driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0178, 0xff, 0xff, 0xff),
+ 		.driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+-	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffe9, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff42, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff43, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff44, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff45, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff46, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff47, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff48, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff49, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff4a, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff4b, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff4c, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff4d, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff4e, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff4f, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff50, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff51, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff52, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff53, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff54, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff55, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff56, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff57, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff58, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff59, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff5a, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff5b, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff5c, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff5d, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff5e, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff5f, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff60, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff61, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff62, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff63, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff64, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff65, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff66, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff67, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff68, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff69, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff6a, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff6b, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff6c, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff6d, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff6e, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff6f, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff70, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff71, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff72, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff73, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff74, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff75, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff76, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff77, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff78, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff79, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff7a, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff7b, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff7c, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff7d, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff7e, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff7f, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff80, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff81, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff82, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff83, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff84, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff85, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff86, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff87, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff88, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff89, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff8a, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff8b, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff8c, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff8d, 0xff, 0xff, 0xff) },
+@@ -1613,6 +1693,61 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff92, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff93, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff94, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff9f, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa0, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa1, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa2, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa3, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa4, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa5, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa6, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa7, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa8, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa9, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffaa, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffab, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffac, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffae, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffaf, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb0, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb1, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb2, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb3, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb4, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb5, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb6, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb7, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb8, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb9, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffba, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffbb, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffbc, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffbd, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffbe, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffbf, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc0, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc1, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc2, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc3, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc4, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc5, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc6, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc7, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc8, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc9, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffca, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffcb, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffcc, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffcd, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffce, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffcf, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffd0, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffd1, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffd2, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffd3, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffd4, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffd5, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffe9, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffec, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffee, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xfff6, 0xff, 0xff, 0xff) },
+@@ -1712,7 +1847,13 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX, 0xff) },
+ 	{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PLXX),
+ 		.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+-	{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) }, 
++	{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8_2RMNET, 0xff),
++		.driver_info = (kernel_ulong_t)&cinterion_rmnet2_blacklist },
++	{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8_AUDIO, 0xff),
++		.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
++	{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX_2RMNET, 0xff) },
++	{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX_AUDIO, 0xff) },
++	{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) },
+ 	{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) },
+ 	{ USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDM) },
+ 	{ USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDMNET) },
+@@ -1818,6 +1959,8 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d02, 0xff, 0x00, 0x00) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x02, 0x01) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x00, 0x00) },
++	{ USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e19, 0xff),			/* D-Link DWM-221 B1 */
++	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
+ 	{ USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) },                /* OLICARD300 - MT6225 */
+diff --git a/drivers/usb/serial/quatech2.c b/drivers/usb/serial/quatech2.c
+index 504f5bff79c0..b18974cbd995 100644
+--- a/drivers/usb/serial/quatech2.c
++++ b/drivers/usb/serial/quatech2.c
+@@ -141,6 +141,7 @@ static void qt2_release(struct usb_serial *serial)
+ 
+ 	serial_priv = usb_get_serial_data(serial);
+ 
++	usb_kill_urb(serial_priv->read_urb);
+ 	usb_free_urb(serial_priv->read_urb);
+ 	kfree(serial_priv->read_buffer);
+ 	kfree(serial_priv);
+diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
+index 9ff9404f99d7..e4ade8d89eb5 100644
+--- a/drivers/usb/storage/uas.c
++++ b/drivers/usb/storage/uas.c
+@@ -2,7 +2,7 @@
+  * USB Attached SCSI
+  * Note that this is not the same as the USB Mass Storage driver
+  *
+- * Copyright Hans de Goede <hdegoede@redhat.com> for Red Hat, Inc. 2013 - 2014
++ * Copyright Hans de Goede <hdegoede@redhat.com> for Red Hat, Inc. 2013 - 2016
+  * Copyright Matthew Wilcox for Intel Corp, 2010
+  * Copyright Sarah Sharp for Intel Corp, 2010
+  *
+@@ -757,6 +757,17 @@ static int uas_eh_bus_reset_handler(struct scsi_cmnd *cmnd)
+ 	return SUCCESS;
+ }
+ 
++static int uas_target_alloc(struct scsi_target *starget)
++{
++	struct uas_dev_info *devinfo = (struct uas_dev_info *)
++			dev_to_shost(starget->dev.parent)->hostdata;
++
++	if (devinfo->flags & US_FL_NO_REPORT_LUNS)
++		starget->no_report_luns = 1;
++
++	return 0;
++}
++
+ static int uas_slave_alloc(struct scsi_device *sdev)
+ {
+ 	struct uas_dev_info *devinfo =
+@@ -800,7 +811,6 @@ static int uas_slave_configure(struct scsi_device *sdev)
+ 	if (devinfo->flags & US_FL_BROKEN_FUA)
+ 		sdev->broken_fua = 1;
+ 
+-	scsi_change_queue_depth(sdev, devinfo->qdepth - 2);
+ 	return 0;
+ }
+ 
+@@ -808,11 +818,12 @@ static struct scsi_host_template uas_host_template = {
+ 	.module = THIS_MODULE,
+ 	.name = "uas",
+ 	.queuecommand = uas_queuecommand,
++	.target_alloc = uas_target_alloc,
+ 	.slave_alloc = uas_slave_alloc,
+ 	.slave_configure = uas_slave_configure,
+ 	.eh_abort_handler = uas_eh_abort_handler,
+ 	.eh_bus_reset_handler = uas_eh_bus_reset_handler,
+-	.can_queue = 65536,	/* Is there a limit on the _host_ ? */
++	.can_queue = MAX_CMNDS,
+ 	.this_id = -1,
+ 	.sg_tablesize = SG_NONE,
+ 	.skip_settle_delay = 1,
+@@ -932,6 +943,12 @@ static int uas_probe(struct usb_interface *intf, const struct usb_device_id *id)
+ 	if (result)
+ 		goto set_alt0;
+ 
++	/*
++	 * 1 tag is reserved for untagged commands +
++	 * 1 tag to avoid off by one errors in some bridge firmwares
++	 */
++	shost->can_queue = devinfo->qdepth - 2;
++
+ 	usb_set_intfdata(intf, shost);
+ 	result = scsi_add_host(shost, &intf->dev);
+ 	if (result)
+diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
+index ccc113e83d88..53341a77d89f 100644
+--- a/drivers/usb/storage/unusual_uas.h
++++ b/drivers/usb/storage/unusual_uas.h
+@@ -64,6 +64,13 @@ UNUSUAL_DEV(0x0bc2, 0x3312, 0x0000, 0x9999,
+ 		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ 		US_FL_NO_ATA_1X),
+ 
++/* Reported-by: David Webb <djw@noc.ac.uk> */
++UNUSUAL_DEV(0x0bc2, 0x331a, 0x0000, 0x9999,
++		"Seagate",
++		"Expansion Desk",
++		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
++		US_FL_NO_REPORT_LUNS),
++
+ /* Reported-by: Hans de Goede <hdegoede@redhat.com> */
+ UNUSUAL_DEV(0x0bc2, 0x3320, 0x0000, 0x9999,
+ 		"Seagate",
+diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
+index 43576ed31ccd..9de988a0f856 100644
+--- a/drivers/usb/storage/usb.c
++++ b/drivers/usb/storage/usb.c
+@@ -482,7 +482,7 @@ void usb_stor_adjust_quirks(struct usb_device *udev, unsigned long *fflags)
+ 			US_FL_NO_READ_DISC_INFO | US_FL_NO_READ_CAPACITY_16 |
+ 			US_FL_INITIAL_READ10 | US_FL_WRITE_CACHE |
+ 			US_FL_NO_ATA_1X | US_FL_NO_REPORT_OPCODES |
+-			US_FL_MAX_SECTORS_240);
++			US_FL_MAX_SECTORS_240 | US_FL_NO_REPORT_LUNS);
+ 
+ 	p = quirks;
+ 	while (*p) {
+@@ -532,6 +532,9 @@ void usb_stor_adjust_quirks(struct usb_device *udev, unsigned long *fflags)
+ 		case 'i':
+ 			f |= US_FL_IGNORE_DEVICE;
+ 			break;
++		case 'j':
++			f |= US_FL_NO_REPORT_LUNS;
++			break;
+ 		case 'l':
+ 			f |= US_FL_NOT_LOCKABLE;
+ 			break;
+diff --git a/drivers/usb/usbip/usbip_common.c b/drivers/usb/usbip/usbip_common.c
+index facaaf003f19..e40da7759a0e 100644
+--- a/drivers/usb/usbip/usbip_common.c
++++ b/drivers/usb/usbip/usbip_common.c
+@@ -741,6 +741,17 @@ int usbip_recv_xbuff(struct usbip_device *ud, struct urb *urb)
+ 	if (!(size > 0))
+ 		return 0;
+ 
++	if (size > urb->transfer_buffer_length) {
++		/* should not happen, probably malicious packet */
++		if (ud->side == USBIP_STUB) {
++			usbip_event_add(ud, SDEV_EVENT_ERROR_TCP);
++			return 0;
++		} else {
++			usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
++			return -EPIPE;
++		}
++	}
++
+ 	ret = usbip_recv(ud->tcp_socket, urb->transfer_buffer, size);
+ 	if (ret != size) {
+ 		dev_err(&urb->dev->dev, "recv xbuf, %d\n", ret);
+diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
+index 8ea45a5cd806..d889ef2048df 100644
+--- a/drivers/video/fbdev/Kconfig
++++ b/drivers/video/fbdev/Kconfig
+@@ -2246,7 +2246,6 @@ config XEN_FBDEV_FRONTEND
+ 	select FB_SYS_IMAGEBLIT
+ 	select FB_SYS_FOPS
+ 	select FB_DEFERRED_IO
+-	select INPUT_XEN_KBDDEV_FRONTEND if INPUT_MISC
+ 	select XEN_XENBUS_FRONTEND
+ 	default y
+ 	help
+diff --git a/drivers/video/fbdev/amba-clcd.c b/drivers/video/fbdev/amba-clcd.c
+index 9362424c2340..f9ef06d0cd48 100644
+--- a/drivers/video/fbdev/amba-clcd.c
++++ b/drivers/video/fbdev/amba-clcd.c
+@@ -440,13 +440,14 @@ static int clcdfb_register(struct clcd_fb *fb)
+ 		fb->off_ienb = CLCD_PL111_IENB;
+ 		fb->off_cntl = CLCD_PL111_CNTL;
+ 	} else {
+-#ifdef CONFIG_ARCH_VERSATILE
+-		fb->off_ienb = CLCD_PL111_IENB;
+-		fb->off_cntl = CLCD_PL111_CNTL;
+-#else
+-		fb->off_ienb = CLCD_PL110_IENB;
+-		fb->off_cntl = CLCD_PL110_CNTL;
+-#endif
++		if (of_machine_is_compatible("arm,versatile-ab") ||
++		    of_machine_is_compatible("arm,versatile-pb")) {
++			fb->off_ienb = CLCD_PL111_IENB;
++			fb->off_cntl = CLCD_PL111_CNTL;
++		} else {
++			fb->off_ienb = CLCD_PL110_IENB;
++			fb->off_cntl = CLCD_PL110_CNTL;
++		}
+ 	}
+ 
+ 	fb->clk = clk_get(&fb->dev->dev, NULL);
+diff --git a/drivers/video/fbdev/da8xx-fb.c b/drivers/video/fbdev/da8xx-fb.c
+index 6b2a06d09f2b..d8d583d32a37 100644
+--- a/drivers/video/fbdev/da8xx-fb.c
++++ b/drivers/video/fbdev/da8xx-fb.c
+@@ -209,8 +209,7 @@ static struct fb_videomode known_lcd_panels[] = {
+ 		.lower_margin   = 2,
+ 		.hsync_len      = 0,
+ 		.vsync_len      = 0,
+-		.sync           = FB_SYNC_CLK_INVERT |
+-			FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
++		.sync           = FB_SYNC_CLK_INVERT,
+ 	},
+ 	/* Sharp LK043T1DG01 */
+ 	[1] = {
+@@ -224,7 +223,7 @@ static struct fb_videomode known_lcd_panels[] = {
+ 		.lower_margin   = 2,
+ 		.hsync_len      = 41,
+ 		.vsync_len      = 10,
+-		.sync           = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
++		.sync           = 0,
+ 		.flag           = 0,
+ 	},
+ 	[2] = {
+@@ -239,7 +238,7 @@ static struct fb_videomode known_lcd_panels[] = {
+ 		.lower_margin   = 10,
+ 		.hsync_len      = 10,
+ 		.vsync_len      = 10,
+-		.sync           = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
++		.sync           = 0,
+ 		.flag           = 0,
+ 	},
+ 	[3] = {
+diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c
+index 7760fc1a2218..1f413a2f70b6 100644
+--- a/drivers/virtio/virtio_pci_modern.c
++++ b/drivers/virtio/virtio_pci_modern.c
+@@ -17,6 +17,7 @@
+  *
+  */
+ 
++#include <linux/delay.h>
+ #define VIRTIO_PCI_NO_LEGACY
+ #include "virtio_pci_common.h"
+ 
+@@ -271,9 +272,13 @@ static void vp_reset(struct virtio_device *vdev)
+ 	struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+ 	/* 0 status means a reset. */
+ 	vp_iowrite8(0, &vp_dev->common->device_status);
+-	/* Flush out the status write, and flush in device writes,
+-	 * including MSI-X interrupts, if any. */
+-	vp_ioread8(&vp_dev->common->device_status);
++	/* After writing 0 to device_status, the driver MUST wait for a read of
++	 * device_status to return 0 before reinitializing the device.
++	 * This will flush out the status write, and flush in device writes,
++	 * including MSI-X interrupts, if any.
++	 */
++	while (vp_ioread8(&vp_dev->common->device_status))
++		msleep(1);
+ 	/* Flush pending VQ/configuration callbacks. */
+ 	vp_synchronize_vectors(vdev);
+ }
+diff --git a/drivers/watchdog/rc32434_wdt.c b/drivers/watchdog/rc32434_wdt.c
+index 71e78ef4b736..3a75f3b53452 100644
+--- a/drivers/watchdog/rc32434_wdt.c
++++ b/drivers/watchdog/rc32434_wdt.c
+@@ -237,7 +237,7 @@ static long rc32434_wdt_ioctl(struct file *file, unsigned int cmd,
+ 			return -EINVAL;
+ 		/* Fall through */
+ 	case WDIOC_GETTIMEOUT:
+-		return copy_to_user(argp, &timeout, sizeof(int));
++		return copy_to_user(argp, &timeout, sizeof(int)) ? -EFAULT : 0;
+ 	default:
+ 		return -ENOTTY;
+ 	}
+diff --git a/drivers/watchdog/sp5100_tco.c b/drivers/watchdog/sp5100_tco.c
+index 6467b91f2245..028618c5eeba 100644
+--- a/drivers/watchdog/sp5100_tco.c
++++ b/drivers/watchdog/sp5100_tco.c
+@@ -73,6 +73,13 @@ MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started."
+ /*
+  * Some TCO specific functions
+  */
++
++static bool tco_has_sp5100_reg_layout(struct pci_dev *dev)
++{
++	return dev->device == PCI_DEVICE_ID_ATI_SBX00_SMBUS &&
++	       dev->revision < 0x40;
++}
++
+ static void tco_timer_start(void)
+ {
+ 	u32 val;
+@@ -129,7 +136,7 @@ static void tco_timer_enable(void)
+ {
+ 	int val;
+ 
+-	if (sp5100_tco_pci->revision >= 0x40) {
++	if (!tco_has_sp5100_reg_layout(sp5100_tco_pci)) {
+ 		/* For SB800 or later */
+ 		/* Set the Watchdog timer resolution to 1 sec */
+ 		outb(SB800_PM_WATCHDOG_CONFIG, SB800_IO_PM_INDEX_REG);
+@@ -342,8 +349,7 @@ static unsigned char sp5100_tco_setupdevice(void)
+ 	/*
+ 	 * Determine type of southbridge chipset.
+ 	 */
+-	if (sp5100_tco_pci->device == PCI_DEVICE_ID_ATI_SBX00_SMBUS &&
+-	    sp5100_tco_pci->revision < 0x40) {
++	if (tco_has_sp5100_reg_layout(sp5100_tco_pci)) {
+ 		dev_name = SP5100_DEVNAME;
+ 		index_reg = SP5100_IO_PM_INDEX_REG;
+ 		data_reg = SP5100_IO_PM_DATA_REG;
+@@ -388,8 +394,7 @@ static unsigned char sp5100_tco_setupdevice(void)
+ 	 * Secondly, Find the watchdog timer MMIO address
+ 	 * from SBResource_MMIO register.
+ 	 */
+-	if (sp5100_tco_pci->device == PCI_DEVICE_ID_ATI_SBX00_SMBUS &&
+-	    sp5100_tco_pci->revision < 0x40) {
++	if (tco_has_sp5100_reg_layout(sp5100_tco_pci)) {
+ 		/* Read SBResource_MMIO from PCI config(PCI_Reg: 9Ch) */
+ 		pci_read_config_dword(sp5100_tco_pci,
+ 				      SP5100_SB_RESOURCE_MMIO_BASE, &val);
+diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
+index 12eab503efd1..364bc44610c1 100644
+--- a/drivers/xen/balloon.c
++++ b/drivers/xen/balloon.c
+@@ -152,6 +152,8 @@ static DECLARE_WAIT_QUEUE_HEAD(balloon_wq);
+ static void balloon_process(struct work_struct *work);
+ static DECLARE_DELAYED_WORK(balloon_worker, balloon_process);
+ 
++static void release_memory_resource(struct resource *resource);
++
+ /* When ballooning out (allocating memory to return to Xen) we don't really
+    want the kernel to try too hard since that can trigger the oom killer. */
+ #define GFP_BALLOON \
+@@ -268,6 +270,20 @@ static struct resource *additional_memory_resource(phys_addr_t size)
+ 		return NULL;
+ 	}
+ 
++#ifdef CONFIG_SPARSEMEM
++	{
++		unsigned long limit = 1UL << (MAX_PHYSMEM_BITS - PAGE_SHIFT);
++		unsigned long pfn = res->start >> PAGE_SHIFT;
++
++		if (pfn > limit) {
++			pr_err("New System RAM resource outside addressable RAM (%lu > %lu)\n",
++			       pfn, limit);
++			release_memory_resource(res);
++			return NULL;
++		}
++	}
++#endif
++
+ 	return res;
+ }
+ 
+diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
+index 524c22146429..44367783f07a 100644
+--- a/drivers/xen/events/events_base.c
++++ b/drivers/xen/events/events_base.c
+@@ -484,9 +484,19 @@ static void eoi_pirq(struct irq_data *data)
+ 	struct physdev_eoi eoi = { .irq = pirq_from_irq(data->irq) };
+ 	int rc = 0;
+ 
+-	irq_move_irq(data);
++	if (!VALID_EVTCHN(evtchn))
++		return;
+ 
+-	if (VALID_EVTCHN(evtchn))
++	if (unlikely(irqd_is_setaffinity_pending(data))) {
++		int masked = test_and_set_mask(evtchn);
++
++		clear_evtchn(evtchn);
++
++		irq_move_masked_irq(data);
++
++		if (!masked)
++			unmask_evtchn(evtchn);
++	} else
+ 		clear_evtchn(evtchn);
+ 
+ 	if (pirq_needs_eoi(data->irq)) {
+@@ -1357,9 +1367,19 @@ static void ack_dynirq(struct irq_data *data)
+ {
+ 	int evtchn = evtchn_from_irq(data->irq);
+ 
+-	irq_move_irq(data);
++	if (!VALID_EVTCHN(evtchn))
++		return;
+ 
+-	if (VALID_EVTCHN(evtchn))
++	if (unlikely(irqd_is_setaffinity_pending(data))) {
++		int masked = test_and_set_mask(evtchn);
++
++		clear_evtchn(evtchn);
++
++		irq_move_masked_irq(data);
++
++		if (!masked)
++			unmask_evtchn(evtchn);
++	} else
+ 		clear_evtchn(evtchn);
+ }
+ 
+diff --git a/drivers/xen/evtchn.c b/drivers/xen/evtchn.c
+index 38272ad24551..f4edd6df3df2 100644
+--- a/drivers/xen/evtchn.c
++++ b/drivers/xen/evtchn.c
+@@ -316,7 +316,6 @@ static int evtchn_resize_ring(struct per_user_data *u)
+ {
+ 	unsigned int new_size;
+ 	evtchn_port_t *new_ring, *old_ring;
+-	unsigned int p, c;
+ 
+ 	/*
+ 	 * Ensure the ring is large enough to capture all possible
+@@ -346,20 +345,17 @@ static int evtchn_resize_ring(struct per_user_data *u)
+ 	/*
+ 	 * Copy the old ring contents to the new ring.
+ 	 *
+-	 * If the ring contents crosses the end of the current ring,
+-	 * it needs to be copied in two chunks.
++	 * To take care of wrapping, a full ring, and the new index
++	 * pointing into the second half, simply copy the old contents
++	 * twice.
+ 	 *
+ 	 * +---------+    +------------------+
+-	 * |34567  12| -> |       1234567    |
+-	 * +-----p-c-+    +------------------+
++	 * |34567  12| -> |34567  1234567  12|
++	 * +-----p-c-+    +-------c------p---+
+ 	 */
+-	p = evtchn_ring_offset(u, u->ring_prod);
+-	c = evtchn_ring_offset(u, u->ring_cons);
+-	if (p < c) {
+-		memcpy(new_ring + c, u->ring + c, (u->ring_size - c) * sizeof(*u->ring));
+-		memcpy(new_ring + u->ring_size, u->ring, p * sizeof(*u->ring));
+-	} else
+-		memcpy(new_ring + c, u->ring + c, (p - c) * sizeof(*u->ring));
++	memcpy(new_ring, old_ring, u->ring_size * sizeof(*u->ring));
++	memcpy(new_ring + u->ring_size, old_ring,
++	       u->ring_size * sizeof(*u->ring));
+ 
+ 	u->ring = new_ring;
+ 	u->ring_size = new_size;
+diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
+index 769e0ff1b4ce..dea6486a7508 100644
+--- a/fs/btrfs/ctree.c
++++ b/fs/btrfs/ctree.c
+@@ -19,6 +19,7 @@
+ #include <linux/sched.h>
+ #include <linux/slab.h>
+ #include <linux/rbtree.h>
++#include <linux/vmalloc.h>
+ #include "ctree.h"
+ #include "disk-io.h"
+ #include "transaction.h"
+@@ -5361,10 +5362,13 @@ int btrfs_compare_trees(struct btrfs_root *left_root,
+ 		goto out;
+ 	}
+ 
+-	tmp_buf = kmalloc(left_root->nodesize, GFP_NOFS);
++	tmp_buf = kmalloc(left_root->nodesize, GFP_KERNEL | __GFP_NOWARN);
+ 	if (!tmp_buf) {
+-		ret = -ENOMEM;
+-		goto out;
++		tmp_buf = vmalloc(left_root->nodesize);
++		if (!tmp_buf) {
++			ret = -ENOMEM;
++			goto out;
++		}
+ 	}
+ 
+ 	left_path->search_commit_root = 1;
+@@ -5565,7 +5569,7 @@ int btrfs_compare_trees(struct btrfs_root *left_root,
+ out:
+ 	btrfs_free_path(left_path);
+ 	btrfs_free_path(right_path);
+-	kfree(tmp_buf);
++	kvfree(tmp_buf);
+ 	return ret;
+ }
+ 
+diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
+index bfe4a337fb4d..6661ad8b4088 100644
+--- a/fs/btrfs/ctree.h
++++ b/fs/btrfs/ctree.h
+@@ -2252,7 +2252,7 @@ struct btrfs_ioctl_defrag_range_args {
+ #define BTRFS_MOUNT_FREE_SPACE_TREE	(1 << 26)
+ 
+ #define BTRFS_DEFAULT_COMMIT_INTERVAL	(30)
+-#define BTRFS_DEFAULT_MAX_INLINE	(8192)
++#define BTRFS_DEFAULT_MAX_INLINE	(2048)
+ 
+ #define btrfs_clear_opt(o, opt)		((o) &= ~BTRFS_MOUNT_##opt)
+ #define btrfs_set_opt(o, opt)		((o) |= BTRFS_MOUNT_##opt)
+diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
+index cbb7dbfb3fff..218f51a5dbab 100644
+--- a/fs/btrfs/dev-replace.c
++++ b/fs/btrfs/dev-replace.c
+@@ -394,6 +394,8 @@ int btrfs_dev_replace_start(struct btrfs_root *root,
+ 	dev_replace->cursor_right = 0;
+ 	dev_replace->is_valid = 1;
+ 	dev_replace->item_needs_writeback = 1;
++	atomic64_set(&dev_replace->num_write_errors, 0);
++	atomic64_set(&dev_replace->num_uncorrectable_read_errors, 0);
+ 	args->result = BTRFS_IOCTL_DEV_REPLACE_RESULT_NO_ERROR;
+ 	btrfs_dev_replace_unlock(dev_replace);
+ 
+diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
+index 4545e2e2ad45..ae6e3e36fdf0 100644
+--- a/fs/btrfs/disk-io.c
++++ b/fs/btrfs/disk-io.c
+@@ -303,7 +303,7 @@ static int csum_tree_block(struct btrfs_fs_info *fs_info,
+ 		err = map_private_extent_buffer(buf, offset, 32,
+ 					&kaddr, &map_start, &map_len);
+ 		if (err)
+-			return 1;
++			return err;
+ 		cur_len = min(len, map_len - (offset - map_start));
+ 		crc = btrfs_csum_data(kaddr + offset - map_start,
+ 				      crc, cur_len);
+@@ -313,7 +313,7 @@ static int csum_tree_block(struct btrfs_fs_info *fs_info,
+ 	if (csum_size > sizeof(inline_result)) {
+ 		result = kzalloc(csum_size, GFP_NOFS);
+ 		if (!result)
+-			return 1;
++			return -ENOMEM;
+ 	} else {
+ 		result = (char *)&inline_result;
+ 	}
+@@ -334,7 +334,7 @@ static int csum_tree_block(struct btrfs_fs_info *fs_info,
+ 				val, found, btrfs_header_level(buf));
+ 			if (result != (char *)&inline_result)
+ 				kfree(result);
+-			return 1;
++			return -EUCLEAN;
+ 		}
+ 	} else {
+ 		write_extent_buffer(buf, result, 0, csum_size);
+@@ -513,11 +513,21 @@ static int csum_dirty_buffer(struct btrfs_fs_info *fs_info, struct page *page)
+ 	eb = (struct extent_buffer *)page->private;
+ 	if (page != eb->pages[0])
+ 		return 0;
++
+ 	found_start = btrfs_header_bytenr(eb);
+-	if (WARN_ON(found_start != start || !PageUptodate(page)))
+-		return 0;
+-	csum_tree_block(fs_info, eb, 0);
+-	return 0;
++	/*
++	 * Please do not consolidate these warnings into a single if.
++	 * It is useful to know what went wrong.
++	 */
++	if (WARN_ON(found_start != start))
++		return -EUCLEAN;
++	if (WARN_ON(!PageUptodate(page)))
++		return -EUCLEAN;
++
++	ASSERT(memcmp_extent_buffer(eb, fs_info->fsid,
++			btrfs_header_fsid(), BTRFS_FSID_SIZE) == 0);
++
++	return csum_tree_block(fs_info, eb, 0);
+ }
+ 
+ static int check_tree_block_fsid(struct btrfs_fs_info *fs_info,
+@@ -660,10 +670,8 @@ static int btree_readpage_end_io_hook(struct btrfs_io_bio *io_bio,
+ 				       eb, found_level);
+ 
+ 	ret = csum_tree_block(root->fs_info, eb, 1);
+-	if (ret) {
+-		ret = -EIO;
++	if (ret)
+ 		goto err;
+-	}
+ 
+ 	/*
+ 	 * If this is a leaf block and it is corrupt, set the corrupt bit so
+@@ -1830,7 +1838,7 @@ static int cleaner_kthread(void *arg)
+ 		 */
+ 		btrfs_delete_unused_bgs(root->fs_info);
+ sleep:
+-		if (!try_to_freeze() && !again) {
++		if (!again) {
+ 			set_current_state(TASK_INTERRUPTIBLE);
+ 			if (!kthread_should_stop())
+ 				schedule();
+diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
+index 098bb8f690c9..5d956b869e03 100644
+--- a/fs/btrfs/file.c
++++ b/fs/btrfs/file.c
+@@ -1883,7 +1883,7 @@ static int start_ordered_ops(struct inode *inode, loff_t start, loff_t end)
+  */
+ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
+ {
+-	struct dentry *dentry = file->f_path.dentry;
++	struct dentry *dentry = file_dentry(file);
+ 	struct inode *inode = d_inode(dentry);
+ 	struct btrfs_root *root = BTRFS_I(inode)->root;
+ 	struct btrfs_trans_handle *trans;
+@@ -1996,10 +1996,11 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
+ 	 */
+ 	smp_mb();
+ 	if (btrfs_inode_in_log(inode, root->fs_info->generation) ||
+-	    (BTRFS_I(inode)->last_trans <=
+-	     root->fs_info->last_trans_committed &&
+-	     (full_sync ||
+-	      !btrfs_have_ordered_extents_in_range(inode, start, len)))) {
++	    (full_sync && BTRFS_I(inode)->last_trans <=
++	     root->fs_info->last_trans_committed) ||
++	    (!btrfs_have_ordered_extents_in_range(inode, start, len) &&
++	     BTRFS_I(inode)->last_trans
++	     <= root->fs_info->last_trans_committed)) {
+ 		/*
+ 		 * We'v had everything committed since the last time we were
+ 		 * modified so clear this flag in case it was set for whatever
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index d96f5cf38a2d..f407e487c687 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -4211,11 +4211,20 @@ static int truncate_space_check(struct btrfs_trans_handle *trans,
+ {
+ 	int ret;
+ 
++	/*
++	 * This is only used to apply pressure to the enospc system, we don't
++	 * intend to use this reservation at all.
++	 */
+ 	bytes_deleted = btrfs_csum_bytes_to_leaves(root, bytes_deleted);
++	bytes_deleted *= root->nodesize;
+ 	ret = btrfs_block_rsv_add(root, &root->fs_info->trans_block_rsv,
+ 				  bytes_deleted, BTRFS_RESERVE_NO_FLUSH);
+-	if (!ret)
++	if (!ret) {
++		trace_btrfs_space_reservation(root->fs_info, "transaction",
++					      trans->transid,
++					      bytes_deleted, 1);
+ 		trans->bytes_reserved += bytes_deleted;
++	}
+ 	return ret;
+ 
+ }
+@@ -7414,7 +7423,26 @@ static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend,
+ 				     cached_state, GFP_NOFS);
+ 
+ 		if (ordered) {
+-			btrfs_start_ordered_extent(inode, ordered, 1);
++			/*
++			 * If we are doing a DIO read and the ordered extent we
++			 * found is for a buffered write, we can not wait for it
++			 * to complete and retry, because if we do so we can
++			 * deadlock with concurrent buffered writes on page
++			 * locks. This happens only if our DIO read covers more
++			 * than one extent map, if at this point has already
++			 * created an ordered extent for a previous extent map
++			 * and locked its range in the inode's io tree, and a
++			 * concurrent write against that previous extent map's
++			 * range and this range started (we unlock the ranges
++			 * in the io tree only when the bios complete and
++			 * buffered writes always lock pages before attempting
++			 * to lock range in the io tree).
++			 */
++			if (writing ||
++			    test_bit(BTRFS_ORDERED_DIRECT, &ordered->flags))
++				btrfs_start_ordered_extent(inode, ordered, 1);
++			else
++				ret = -ENOTBLK;
+ 			btrfs_put_ordered_extent(ordered);
+ 		} else {
+ 			/*
+@@ -7431,9 +7459,11 @@ static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend,
+ 			 * that page.
+ 			 */
+ 			ret = -ENOTBLK;
+-			break;
+ 		}
+ 
++		if (ret)
++			break;
++
+ 		cond_resched();
+ 	}
+ 
+diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
+index 48aee9846329..e3791f268489 100644
+--- a/fs/btrfs/ioctl.c
++++ b/fs/btrfs/ioctl.c
+@@ -59,6 +59,7 @@
+ #include "props.h"
+ #include "sysfs.h"
+ #include "qgroup.h"
++#include "tree-log.h"
+ 
+ #ifdef CONFIG_64BIT
+ /* If we have a 32-bit userspace and 64-bit kernel, then the UAPI
+@@ -1656,7 +1657,7 @@ static noinline int btrfs_ioctl_snap_create_transid(struct file *file,
+ 
+ 		src_inode = file_inode(src.file);
+ 		if (src_inode->i_sb != file_inode(file)->i_sb) {
+-			btrfs_info(BTRFS_I(src_inode)->root->fs_info,
++			btrfs_info(BTRFS_I(file_inode(file))->root->fs_info,
+ 				   "Snapshot src from another FS");
+ 			ret = -EXDEV;
+ 		} else if (!inode_owner_or_capable(src_inode)) {
+@@ -2097,8 +2098,6 @@ static noinline int search_ioctl(struct inode *inode,
+ 		key.offset = (u64)-1;
+ 		root = btrfs_read_fs_root_no_name(info, &key);
+ 		if (IS_ERR(root)) {
+-			btrfs_err(info, "could not find root %llu",
+-			       sk->tree_id);
+ 			btrfs_free_path(path);
+ 			return -ENOENT;
+ 		}
+@@ -2476,6 +2475,8 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
+ 	trans->block_rsv = &block_rsv;
+ 	trans->bytes_reserved = block_rsv.size;
+ 
++	btrfs_record_snapshot_destroy(trans, dir);
++
+ 	ret = btrfs_unlink_subvol(trans, root, dir,
+ 				dest->root_key.objectid,
+ 				dentry->d_name.name,
+@@ -3068,6 +3069,9 @@ static int btrfs_extent_same(struct inode *src, u64 loff, u64 olen,
+ 		ret = extent_same_check_offsets(src, loff, &len, olen);
+ 		if (ret)
+ 			goto out_unlock;
++		ret = extent_same_check_offsets(src, dst_loff, &len, olen);
++		if (ret)
++			goto out_unlock;
+ 
+ 		/*
+ 		 * Single inode case wants the same checks, except we
+diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
+index 5279fdae7142..7173360eea7a 100644
+--- a/fs/btrfs/qgroup.c
++++ b/fs/btrfs/qgroup.c
+@@ -1842,8 +1842,10 @@ out:
+ }
+ 
+ /*
+- * copy the acounting information between qgroups. This is necessary when a
+- * snapshot or a subvolume is created
++ * Copy the acounting information between qgroups. This is necessary
++ * when a snapshot or a subvolume is created. Throwing an error will
++ * cause a transaction abort so we take extra care here to only error
++ * when a readonly fs is a reasonable outcome.
+  */
+ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
+ 			 struct btrfs_fs_info *fs_info, u64 srcid, u64 objectid,
+@@ -1873,15 +1875,15 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
+ 		       2 * inherit->num_excl_copies;
+ 		for (i = 0; i < nums; ++i) {
+ 			srcgroup = find_qgroup_rb(fs_info, *i_qgroups);
+-			if (!srcgroup) {
+-				ret = -EINVAL;
+-				goto out;
+-			}
+ 
+-			if ((srcgroup->qgroupid >> 48) <= (objectid >> 48)) {
+-				ret = -EINVAL;
+-				goto out;
+-			}
++			/*
++			 * Zero out invalid groups so we can ignore
++			 * them later.
++			 */
++			if (!srcgroup ||
++			    ((srcgroup->qgroupid >> 48) <= (objectid >> 48)))
++				*i_qgroups = 0ULL;
++
+ 			++i_qgroups;
+ 		}
+ 	}
+@@ -1916,17 +1918,19 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
+ 	 */
+ 	if (inherit) {
+ 		i_qgroups = (u64 *)(inherit + 1);
+-		for (i = 0; i < inherit->num_qgroups; ++i) {
++		for (i = 0; i < inherit->num_qgroups; ++i, ++i_qgroups) {
++			if (*i_qgroups == 0)
++				continue;
+ 			ret = add_qgroup_relation_item(trans, quota_root,
+ 						       objectid, *i_qgroups);
+-			if (ret)
++			if (ret && ret != -EEXIST)
+ 				goto out;
+ 			ret = add_qgroup_relation_item(trans, quota_root,
+ 						       *i_qgroups, objectid);
+-			if (ret)
++			if (ret && ret != -EEXIST)
+ 				goto out;
+-			++i_qgroups;
+ 		}
++		ret = 0;
+ 	}
+ 
+ 
+@@ -1987,17 +1991,22 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
+ 
+ 	i_qgroups = (u64 *)(inherit + 1);
+ 	for (i = 0; i < inherit->num_qgroups; ++i) {
+-		ret = add_relation_rb(quota_root->fs_info, objectid,
+-				      *i_qgroups);
+-		if (ret)
+-			goto unlock;
++		if (*i_qgroups) {
++			ret = add_relation_rb(quota_root->fs_info, objectid,
++					      *i_qgroups);
++			if (ret)
++				goto unlock;
++		}
+ 		++i_qgroups;
+ 	}
+ 
+-	for (i = 0; i <  inherit->num_ref_copies; ++i) {
++	for (i = 0; i <  inherit->num_ref_copies; ++i, i_qgroups += 2) {
+ 		struct btrfs_qgroup *src;
+ 		struct btrfs_qgroup *dst;
+ 
++		if (!i_qgroups[0] || !i_qgroups[1])
++			continue;
++
+ 		src = find_qgroup_rb(fs_info, i_qgroups[0]);
+ 		dst = find_qgroup_rb(fs_info, i_qgroups[1]);
+ 
+@@ -2008,12 +2017,14 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
+ 
+ 		dst->rfer = src->rfer - level_size;
+ 		dst->rfer_cmpr = src->rfer_cmpr - level_size;
+-		i_qgroups += 2;
+ 	}
+-	for (i = 0; i <  inherit->num_excl_copies; ++i) {
++	for (i = 0; i <  inherit->num_excl_copies; ++i, i_qgroups += 2) {
+ 		struct btrfs_qgroup *src;
+ 		struct btrfs_qgroup *dst;
+ 
++		if (!i_qgroups[0] || !i_qgroups[1])
++			continue;
++
+ 		src = find_qgroup_rb(fs_info, i_qgroups[0]);
+ 		dst = find_qgroup_rb(fs_info, i_qgroups[1]);
+ 
+@@ -2024,7 +2035,6 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
+ 
+ 		dst->excl = src->excl + level_size;
+ 		dst->excl_cmpr = src->excl_cmpr + level_size;
+-		i_qgroups += 2;
+ 	}
+ 
+ unlock:
+diff --git a/fs/btrfs/reada.c b/fs/btrfs/reada.c
+index 619f92963e27..49b3fb73ffbf 100644
+--- a/fs/btrfs/reada.c
++++ b/fs/btrfs/reada.c
+@@ -265,7 +265,7 @@ static struct reada_zone *reada_find_zone(struct btrfs_fs_info *fs_info,
+ 	spin_unlock(&fs_info->reada_lock);
+ 
+ 	if (ret == 1) {
+-		if (logical >= zone->start && logical < zone->end)
++		if (logical >= zone->start && logical <= zone->end)
+ 			return zone;
+ 		spin_lock(&fs_info->reada_lock);
+ 		kref_put(&zone->refcnt, reada_zone_release);
+@@ -679,7 +679,7 @@ static int reada_start_machine_dev(struct btrfs_fs_info *fs_info,
+ 	 */
+ 	ret = radix_tree_gang_lookup(&dev->reada_extents, (void **)&re,
+ 				     dev->reada_next >> PAGE_CACHE_SHIFT, 1);
+-	if (ret == 0 || re->logical >= dev->reada_curr_zone->end) {
++	if (ret == 0 || re->logical > dev->reada_curr_zone->end) {
+ 		ret = reada_pick_zone(dev);
+ 		if (!ret) {
+ 			spin_unlock(&fs_info->reada_lock);
+diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
+index 2bd0011450df..5c806f0d443d 100644
+--- a/fs/btrfs/relocation.c
++++ b/fs/btrfs/relocation.c
+@@ -1850,6 +1850,7 @@ again:
+ 			eb = read_tree_block(dest, old_bytenr, old_ptr_gen);
+ 			if (IS_ERR(eb)) {
+ 				ret = PTR_ERR(eb);
++				break;
+ 			} else if (!extent_buffer_uptodate(eb)) {
+ 				ret = -EIO;
+ 				free_extent_buffer(eb);
+diff --git a/fs/btrfs/tests/btrfs-tests.c b/fs/btrfs/tests/btrfs-tests.c
+index 0e1e61a7ec23..d39f714dabeb 100644
+--- a/fs/btrfs/tests/btrfs-tests.c
++++ b/fs/btrfs/tests/btrfs-tests.c
+@@ -189,12 +189,6 @@ btrfs_alloc_dummy_block_group(unsigned long length)
+ 		kfree(cache);
+ 		return NULL;
+ 	}
+-	cache->fs_info = btrfs_alloc_dummy_fs_info();
+-	if (!cache->fs_info) {
+-		kfree(cache->free_space_ctl);
+-		kfree(cache);
+-		return NULL;
+-	}
+ 
+ 	cache->key.objectid = 0;
+ 	cache->key.offset = length;
+diff --git a/fs/btrfs/tests/free-space-tree-tests.c b/fs/btrfs/tests/free-space-tree-tests.c
+index d05fe1ab4808..7cea4462acd5 100644
+--- a/fs/btrfs/tests/free-space-tree-tests.c
++++ b/fs/btrfs/tests/free-space-tree-tests.c
+@@ -485,6 +485,7 @@ static int run_test(test_func_t test_func, int bitmaps)
+ 	cache->bitmap_low_thresh = 0;
+ 	cache->bitmap_high_thresh = (u32)-1;
+ 	cache->needs_free_space = 1;
++	cache->fs_info = root->fs_info;
+ 
+ 	btrfs_init_dummy_trans(&trans);
+ 
+diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
+index 978c3a810893..58ae0a2ce65c 100644
+--- a/fs/btrfs/tree-log.c
++++ b/fs/btrfs/tree-log.c
+@@ -4414,6 +4414,127 @@ static int btrfs_log_trailing_hole(struct btrfs_trans_handle *trans,
+ 	return ret;
+ }
+ 
++/*
++ * When we are logging a new inode X, check if it doesn't have a reference that
++ * matches the reference from some other inode Y created in a past transaction
++ * and that was renamed in the current transaction. If we don't do this, then at
++ * log replay time we can lose inode Y (and all its files if it's a directory):
++ *
++ * mkdir /mnt/x
++ * echo "hello world" > /mnt/x/foobar
++ * sync
++ * mv /mnt/x /mnt/y
++ * mkdir /mnt/x                 # or touch /mnt/x
++ * xfs_io -c fsync /mnt/x
++ * <power fail>
++ * mount fs, trigger log replay
++ *
++ * After the log replay procedure, we would lose the first directory and all its
++ * files (file foobar).
++ * For the case where inode Y is not a directory we simply end up losing it:
++ *
++ * echo "123" > /mnt/foo
++ * sync
++ * mv /mnt/foo /mnt/bar
++ * echo "abc" > /mnt/foo
++ * xfs_io -c fsync /mnt/foo
++ * <power fail>
++ *
++ * We also need this for cases where a snapshot entry is replaced by some other
++ * entry (file or directory) otherwise we end up with an unreplayable log due to
++ * attempts to delete the snapshot entry (entry of type BTRFS_ROOT_ITEM_KEY) as
++ * if it were a regular entry:
++ *
++ * mkdir /mnt/x
++ * btrfs subvolume snapshot /mnt /mnt/x/snap
++ * btrfs subvolume delete /mnt/x/snap
++ * rmdir /mnt/x
++ * mkdir /mnt/x
++ * fsync /mnt/x or fsync some new file inside it
++ * <power fail>
++ *
++ * The snapshot delete, rmdir of x, mkdir of a new x and the fsync all happen in
++ * the same transaction.
++ */
++static int btrfs_check_ref_name_override(struct extent_buffer *eb,
++					 const int slot,
++					 const struct btrfs_key *key,
++					 struct inode *inode)
++{
++	int ret;
++	struct btrfs_path *search_path;
++	char *name = NULL;
++	u32 name_len = 0;
++	u32 item_size = btrfs_item_size_nr(eb, slot);
++	u32 cur_offset = 0;
++	unsigned long ptr = btrfs_item_ptr_offset(eb, slot);
++
++	search_path = btrfs_alloc_path();
++	if (!search_path)
++		return -ENOMEM;
++	search_path->search_commit_root = 1;
++	search_path->skip_locking = 1;
++
++	while (cur_offset < item_size) {
++		u64 parent;
++		u32 this_name_len;
++		u32 this_len;
++		unsigned long name_ptr;
++		struct btrfs_dir_item *di;
++
++		if (key->type == BTRFS_INODE_REF_KEY) {
++			struct btrfs_inode_ref *iref;
++
++			iref = (struct btrfs_inode_ref *)(ptr + cur_offset);
++			parent = key->offset;
++			this_name_len = btrfs_inode_ref_name_len(eb, iref);
++			name_ptr = (unsigned long)(iref + 1);
++			this_len = sizeof(*iref) + this_name_len;
++		} else {
++			struct btrfs_inode_extref *extref;
++
++			extref = (struct btrfs_inode_extref *)(ptr +
++							       cur_offset);
++			parent = btrfs_inode_extref_parent(eb, extref);
++			this_name_len = btrfs_inode_extref_name_len(eb, extref);
++			name_ptr = (unsigned long)&extref->name;
++			this_len = sizeof(*extref) + this_name_len;
++		}
++
++		if (this_name_len > name_len) {
++			char *new_name;
++
++			new_name = krealloc(name, this_name_len, GFP_NOFS);
++			if (!new_name) {
++				ret = -ENOMEM;
++				goto out;
++			}
++			name_len = this_name_len;
++			name = new_name;
++		}
++
++		read_extent_buffer(eb, name, name_ptr, this_name_len);
++		di = btrfs_lookup_dir_item(NULL, BTRFS_I(inode)->root,
++					   search_path, parent,
++					   name, this_name_len, 0);
++		if (di && !IS_ERR(di)) {
++			ret = 1;
++			goto out;
++		} else if (IS_ERR(di)) {
++			ret = PTR_ERR(di);
++			goto out;
++		}
++		btrfs_release_path(search_path);
++
++		cur_offset += this_len;
++	}
++	ret = 0;
++out:
++	btrfs_free_path(search_path);
++	kfree(name);
++	return ret;
++}
++
+ /* log a single inode in the tree log.
+  * At least one parent directory for this inode must exist in the tree
+  * or be logged already.
+@@ -4500,7 +4621,22 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
+ 
+ 	mutex_lock(&BTRFS_I(inode)->log_mutex);
+ 
+-	btrfs_get_logged_extents(inode, &logged_list, start, end);
++	/*
++	 * Collect ordered extents only if we are logging data. This is to
++	 * ensure a subsequent request to log this inode in LOG_INODE_ALL mode
++	 * will process the ordered extents if they still exists at the time,
++	 * because when we collect them we test and set for the flag
++	 * BTRFS_ORDERED_LOGGED to prevent multiple log requests to process the
++	 * same ordered extents. The consequence for the LOG_INODE_ALL log mode
++	 * not processing the ordered extents is that we end up logging the
++	 * corresponding file extent items, based on the extent maps in the
++	 * inode's extent_map_tree's modified_list, without logging the
++	 * respective checksums (since the may still be only attached to the
++	 * ordered extents and have not been inserted in the csum tree by
++	 * btrfs_finish_ordered_io() yet).
++	 */
++	if (inode_only == LOG_INODE_ALL)
++		btrfs_get_logged_extents(inode, &logged_list, start, end);
+ 
+ 	/*
+ 	 * a brute force approach to making sure we get the most uptodate
+@@ -4586,6 +4722,22 @@ again:
+ 		if (min_key.type == BTRFS_INODE_ITEM_KEY)
+ 			need_log_inode_item = false;
+ 
++		if ((min_key.type == BTRFS_INODE_REF_KEY ||
++		     min_key.type == BTRFS_INODE_EXTREF_KEY) &&
++		    BTRFS_I(inode)->generation == trans->transid) {
++			ret = btrfs_check_ref_name_override(path->nodes[0],
++							    path->slots[0],
++							    &min_key, inode);
++			if (ret < 0) {
++				err = ret;
++				goto out_unlock;
++			} else if (ret > 0) {
++				err = 1;
++				btrfs_set_log_full_commit(root->fs_info, trans);
++				goto out_unlock;
++			}
++		}
++
+ 		/* Skip xattrs, we log them later with btrfs_log_all_xattrs() */
+ 		if (min_key.type == BTRFS_XATTR_ITEM_KEY) {
+ 			if (ins_nr == 0)
+@@ -4772,6 +4924,42 @@ out_unlock:
+ }
+ 
+ /*
++ * Check if we must fallback to a transaction commit when logging an inode.
++ * This must be called after logging the inode and is used only in the context
++ * when fsyncing an inode requires the need to log some other inode - in which
++ * case we can't lock the i_mutex of each other inode we need to log as that
++ * can lead to deadlocks with concurrent fsync against other inodes (as we can
++ * log inodes up or down in the hierarchy) or rename operations for example. So
++ * we take the log_mutex of the inode after we have logged it and then check for
++ * its last_unlink_trans value - this is safe because any task setting
++ * last_unlink_trans must take the log_mutex and it must do this before it does
++ * the actual unlink operation, so if we do this check before a concurrent task
++ * sets last_unlink_trans it means we've logged a consistent version/state of
++ * all the inode items, otherwise we are not sure and must do a transaction
++ * commit (the concurrent task migth have only updated last_unlink_trans before
++ * we logged the inode or it might have also done the unlink).
++ */
++static bool btrfs_must_commit_transaction(struct btrfs_trans_handle *trans,
++					  struct inode *inode)
++{
++	struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
++	bool ret = false;
++
++	mutex_lock(&BTRFS_I(inode)->log_mutex);
++	if (BTRFS_I(inode)->last_unlink_trans > fs_info->last_trans_committed) {
++		/*
++		 * Make sure any commits to the log are forced to be full
++		 * commits.
++		 */
++		btrfs_set_log_full_commit(fs_info, trans);
++		ret = true;
++	}
++	mutex_unlock(&BTRFS_I(inode)->log_mutex);
++
++	return ret;
++}
++
++/*
+  * follow the dentry parent pointers up the chain and see if any
+  * of the directories in it require a full commit before they can
+  * be logged.  Returns zero if nothing special needs to be done or 1 if
+@@ -4784,7 +4972,6 @@ static noinline int check_parent_dirs_for_sync(struct btrfs_trans_handle *trans,
+ 					       u64 last_committed)
+ {
+ 	int ret = 0;
+-	struct btrfs_root *root;
+ 	struct dentry *old_parent = NULL;
+ 	struct inode *orig_inode = inode;
+ 
+@@ -4816,14 +5003,7 @@ static noinline int check_parent_dirs_for_sync(struct btrfs_trans_handle *trans,
+ 			BTRFS_I(inode)->logged_trans = trans->transid;
+ 		smp_mb();
+ 
+-		if (BTRFS_I(inode)->last_unlink_trans > last_committed) {
+-			root = BTRFS_I(inode)->root;
+-
+-			/*
+-			 * make sure any commits to the log are forced
+-			 * to be full commits
+-			 */
+-			btrfs_set_log_full_commit(root->fs_info, trans);
++		if (btrfs_must_commit_transaction(trans, inode)) {
+ 			ret = 1;
+ 			break;
+ 		}
+@@ -4982,6 +5162,9 @@ process_leaf:
+ 			btrfs_release_path(path);
+ 			ret = btrfs_log_inode(trans, root, di_inode,
+ 					      log_mode, 0, LLONG_MAX, ctx);
++			if (!ret &&
++			    btrfs_must_commit_transaction(trans, di_inode))
++				ret = 1;
+ 			iput(di_inode);
+ 			if (ret)
+ 				goto next_dir_inode;
+@@ -5096,6 +5279,9 @@ static int btrfs_log_all_parents(struct btrfs_trans_handle *trans,
+ 
+ 			ret = btrfs_log_inode(trans, root, dir_inode,
+ 					      LOG_INODE_ALL, 0, LLONG_MAX, ctx);
++			if (!ret &&
++			    btrfs_must_commit_transaction(trans, dir_inode))
++				ret = 1;
+ 			iput(dir_inode);
+ 			if (ret)
+ 				goto out;
+@@ -5447,6 +5633,9 @@ error:
+  * They revolve around files there were unlinked from the directory, and
+  * this function updates the parent directory so that a full commit is
+  * properly done if it is fsync'd later after the unlinks are done.
++ *
++ * Must be called before the unlink operations (updates to the subvolume tree,
++ * inodes, etc) are done.
+  */
+ void btrfs_record_unlink_dir(struct btrfs_trans_handle *trans,
+ 			     struct inode *dir, struct inode *inode,
+@@ -5462,8 +5651,11 @@ void btrfs_record_unlink_dir(struct btrfs_trans_handle *trans,
+ 	 * into the file.  When the file is logged we check it and
+ 	 * don't log the parents if the file is fully on disk.
+ 	 */
+-	if (S_ISREG(inode->i_mode))
++	if (S_ISREG(inode->i_mode)) {
++		mutex_lock(&BTRFS_I(inode)->log_mutex);
+ 		BTRFS_I(inode)->last_unlink_trans = trans->transid;
++		mutex_unlock(&BTRFS_I(inode)->log_mutex);
++	}
+ 
+ 	/*
+ 	 * if this directory was already logged any new
+@@ -5494,7 +5686,29 @@ void btrfs_record_unlink_dir(struct btrfs_trans_handle *trans,
+ 	return;
+ 
+ record:
++	mutex_lock(&BTRFS_I(dir)->log_mutex);
+ 	BTRFS_I(dir)->last_unlink_trans = trans->transid;
++	mutex_unlock(&BTRFS_I(dir)->log_mutex);
++}
++
++/*
++ * Make sure that if someone attempts to fsync the parent directory of a deleted
++ * snapshot, it ends up triggering a transaction commit. This is to guarantee
++ * that after replaying the log tree of the parent directory's root we will not
++ * see the snapshot anymore and at log replay time we will not see any log tree
++ * corresponding to the deleted snapshot's root, which could lead to replaying
++ * it after replaying the log tree of the parent directory (which would replay
++ * the snapshot delete operation).
++ *
++ * Must be called before the actual snapshot destroy operation (updates to the
++ * parent root and tree of tree roots trees, etc) are done.
++ */
++void btrfs_record_snapshot_destroy(struct btrfs_trans_handle *trans,
++				   struct inode *dir)
++{
++	mutex_lock(&BTRFS_I(dir)->log_mutex);
++	BTRFS_I(dir)->last_unlink_trans = trans->transid;
++	mutex_unlock(&BTRFS_I(dir)->log_mutex);
+ }
+ 
+ /*
+diff --git a/fs/btrfs/tree-log.h b/fs/btrfs/tree-log.h
+index 6916a781ea02..a9f1b75d080d 100644
+--- a/fs/btrfs/tree-log.h
++++ b/fs/btrfs/tree-log.h
+@@ -79,6 +79,8 @@ int btrfs_pin_log_trans(struct btrfs_root *root);
+ void btrfs_record_unlink_dir(struct btrfs_trans_handle *trans,
+ 			     struct inode *dir, struct inode *inode,
+ 			     int for_rename);
++void btrfs_record_snapshot_destroy(struct btrfs_trans_handle *trans,
++				   struct inode *dir);
+ int btrfs_log_new_name(struct btrfs_trans_handle *trans,
+ 			struct inode *inode, struct inode *old_dir,
+ 			struct dentry *parent);
+diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
+index 59727e32ed0f..af0ec2d5ad0e 100644
+--- a/fs/cifs/sess.c
++++ b/fs/cifs/sess.c
+@@ -400,19 +400,27 @@ int build_ntlmssp_auth_blob(unsigned char *pbuffer,
+ 	sec_blob->LmChallengeResponse.MaximumLength = 0;
+ 
+ 	sec_blob->NtChallengeResponse.BufferOffset = cpu_to_le32(tmp - pbuffer);
+-	rc = setup_ntlmv2_rsp(ses, nls_cp);
+-	if (rc) {
+-		cifs_dbg(VFS, "Error %d during NTLMSSP authentication\n", rc);
+-		goto setup_ntlmv2_ret;
++	if (ses->user_name != NULL) {
++		rc = setup_ntlmv2_rsp(ses, nls_cp);
++		if (rc) {
++			cifs_dbg(VFS, "Error %d during NTLMSSP authentication\n", rc);
++			goto setup_ntlmv2_ret;
++		}
++		memcpy(tmp, ses->auth_key.response + CIFS_SESS_KEY_SIZE,
++				ses->auth_key.len - CIFS_SESS_KEY_SIZE);
++		tmp += ses->auth_key.len - CIFS_SESS_KEY_SIZE;
++
++		sec_blob->NtChallengeResponse.Length =
++				cpu_to_le16(ses->auth_key.len - CIFS_SESS_KEY_SIZE);
++		sec_blob->NtChallengeResponse.MaximumLength =
++				cpu_to_le16(ses->auth_key.len - CIFS_SESS_KEY_SIZE);
++	} else {
++		/*
++		 * don't send an NT Response for anonymous access
++		 */
++		sec_blob->NtChallengeResponse.Length = 0;
++		sec_blob->NtChallengeResponse.MaximumLength = 0;
+ 	}
+-	memcpy(tmp, ses->auth_key.response + CIFS_SESS_KEY_SIZE,
+-			ses->auth_key.len - CIFS_SESS_KEY_SIZE);
+-	tmp += ses->auth_key.len - CIFS_SESS_KEY_SIZE;
+-
+-	sec_blob->NtChallengeResponse.Length =
+-			cpu_to_le16(ses->auth_key.len - CIFS_SESS_KEY_SIZE);
+-	sec_blob->NtChallengeResponse.MaximumLength =
+-			cpu_to_le16(ses->auth_key.len - CIFS_SESS_KEY_SIZE);
+ 
+ 	if (ses->domainName == NULL) {
+ 		sec_blob->DomainName.BufferOffset = cpu_to_le32(tmp - pbuffer);
+@@ -670,20 +678,24 @@ sess_auth_lanman(struct sess_data *sess_data)
+ 
+ 	pSMB->req.hdr.Flags2 &= ~SMBFLG2_UNICODE;
+ 
+-	/* no capabilities flags in old lanman negotiation */
+-	pSMB->old_req.PasswordLength = cpu_to_le16(CIFS_AUTH_RESP_SIZE);
+-
+-	/* Calculate hash with password and copy into bcc_ptr.
+-	 * Encryption Key (stored as in cryptkey) gets used if the
+-	 * security mode bit in Negottiate Protocol response states
+-	 * to use challenge/response method (i.e. Password bit is 1).
+-	 */
+-	rc = calc_lanman_hash(ses->password, ses->server->cryptkey,
+-			      ses->server->sec_mode & SECMODE_PW_ENCRYPT ?
+-			      true : false, lnm_session_key);
+-
+-	memcpy(bcc_ptr, (char *)lnm_session_key, CIFS_AUTH_RESP_SIZE);
+-	bcc_ptr += CIFS_AUTH_RESP_SIZE;
++	if (ses->user_name != NULL) {
++		/* no capabilities flags in old lanman negotiation */
++		pSMB->old_req.PasswordLength = cpu_to_le16(CIFS_AUTH_RESP_SIZE);
++
++		/* Calculate hash with password and copy into bcc_ptr.
++		 * Encryption Key (stored as in cryptkey) gets used if the
++		 * security mode bit in Negottiate Protocol response states
++		 * to use challenge/response method (i.e. Password bit is 1).
++		 */
++		rc = calc_lanman_hash(ses->password, ses->server->cryptkey,
++				      ses->server->sec_mode & SECMODE_PW_ENCRYPT ?
++				      true : false, lnm_session_key);
++
++		memcpy(bcc_ptr, (char *)lnm_session_key, CIFS_AUTH_RESP_SIZE);
++		bcc_ptr += CIFS_AUTH_RESP_SIZE;
++	} else {
++		pSMB->old_req.PasswordLength = 0;
++	}
+ 
+ 	/*
+ 	 * can not sign if LANMAN negotiated so no need
+@@ -769,26 +781,31 @@ sess_auth_ntlm(struct sess_data *sess_data)
+ 	capabilities = cifs_ssetup_hdr(ses, pSMB);
+ 
+ 	pSMB->req_no_secext.Capabilities = cpu_to_le32(capabilities);
+-	pSMB->req_no_secext.CaseInsensitivePasswordLength =
+-			cpu_to_le16(CIFS_AUTH_RESP_SIZE);
+-	pSMB->req_no_secext.CaseSensitivePasswordLength =
+-			cpu_to_le16(CIFS_AUTH_RESP_SIZE);
+-
+-	/* calculate ntlm response and session key */
+-	rc = setup_ntlm_response(ses, sess_data->nls_cp);
+-	if (rc) {
+-		cifs_dbg(VFS, "Error %d during NTLM authentication\n",
+-				 rc);
+-		goto out;
+-	}
++	if (ses->user_name != NULL) {
++		pSMB->req_no_secext.CaseInsensitivePasswordLength =
++				cpu_to_le16(CIFS_AUTH_RESP_SIZE);
++		pSMB->req_no_secext.CaseSensitivePasswordLength =
++				cpu_to_le16(CIFS_AUTH_RESP_SIZE);
++
++		/* calculate ntlm response and session key */
++		rc = setup_ntlm_response(ses, sess_data->nls_cp);
++		if (rc) {
++			cifs_dbg(VFS, "Error %d during NTLM authentication\n",
++					 rc);
++			goto out;
++		}
+ 
+-	/* copy ntlm response */
+-	memcpy(bcc_ptr, ses->auth_key.response + CIFS_SESS_KEY_SIZE,
+-			CIFS_AUTH_RESP_SIZE);
+-	bcc_ptr += CIFS_AUTH_RESP_SIZE;
+-	memcpy(bcc_ptr, ses->auth_key.response + CIFS_SESS_KEY_SIZE,
+-			CIFS_AUTH_RESP_SIZE);
+-	bcc_ptr += CIFS_AUTH_RESP_SIZE;
++		/* copy ntlm response */
++		memcpy(bcc_ptr, ses->auth_key.response + CIFS_SESS_KEY_SIZE,
++				CIFS_AUTH_RESP_SIZE);
++		bcc_ptr += CIFS_AUTH_RESP_SIZE;
++		memcpy(bcc_ptr, ses->auth_key.response + CIFS_SESS_KEY_SIZE,
++				CIFS_AUTH_RESP_SIZE);
++		bcc_ptr += CIFS_AUTH_RESP_SIZE;
++	} else {
++		pSMB->req_no_secext.CaseInsensitivePasswordLength = 0;
++		pSMB->req_no_secext.CaseSensitivePasswordLength = 0;
++	}
+ 
+ 	if (ses->capabilities & CAP_UNICODE) {
+ 		/* unicode strings must be word aligned */
+@@ -878,22 +895,26 @@ sess_auth_ntlmv2(struct sess_data *sess_data)
+ 	/* LM2 password would be here if we supported it */
+ 	pSMB->req_no_secext.CaseInsensitivePasswordLength = 0;
+ 
+-	/* calculate nlmv2 response and session key */
+-	rc = setup_ntlmv2_rsp(ses, sess_data->nls_cp);
+-	if (rc) {
+-		cifs_dbg(VFS, "Error %d during NTLMv2 authentication\n", rc);
+-		goto out;
+-	}
++	if (ses->user_name != NULL) {
++		/* calculate nlmv2 response and session key */
++		rc = setup_ntlmv2_rsp(ses, sess_data->nls_cp);
++		if (rc) {
++			cifs_dbg(VFS, "Error %d during NTLMv2 authentication\n", rc);
++			goto out;
++		}
+ 
+-	memcpy(bcc_ptr, ses->auth_key.response + CIFS_SESS_KEY_SIZE,
+-			ses->auth_key.len - CIFS_SESS_KEY_SIZE);
+-	bcc_ptr += ses->auth_key.len - CIFS_SESS_KEY_SIZE;
++		memcpy(bcc_ptr, ses->auth_key.response + CIFS_SESS_KEY_SIZE,
++				ses->auth_key.len - CIFS_SESS_KEY_SIZE);
++		bcc_ptr += ses->auth_key.len - CIFS_SESS_KEY_SIZE;
+ 
+-	/* set case sensitive password length after tilen may get
+-	 * assigned, tilen is 0 otherwise.
+-	 */
+-	pSMB->req_no_secext.CaseSensitivePasswordLength =
+-		cpu_to_le16(ses->auth_key.len - CIFS_SESS_KEY_SIZE);
++		/* set case sensitive password length after tilen may get
++		 * assigned, tilen is 0 otherwise.
++		 */
++		pSMB->req_no_secext.CaseSensitivePasswordLength =
++			cpu_to_le16(ses->auth_key.len - CIFS_SESS_KEY_SIZE);
++	} else {
++		pSMB->req_no_secext.CaseSensitivePasswordLength = 0;
++	}
+ 
+ 	if (ses->capabilities & CAP_UNICODE) {
+ 		if (sess_data->iov[0].iov_len % 2) {
+diff --git a/fs/cifs/smb2glob.h b/fs/cifs/smb2glob.h
+index bc0bb9c34f72..0ffa18094335 100644
+--- a/fs/cifs/smb2glob.h
++++ b/fs/cifs/smb2glob.h
+@@ -44,6 +44,7 @@
+ #define SMB2_OP_DELETE 7
+ #define SMB2_OP_HARDLINK 8
+ #define SMB2_OP_SET_EOF 9
++#define SMB2_OP_RMDIR 10
+ 
+ /* Used when constructing chained read requests. */
+ #define CHAINED_REQUEST 1
+diff --git a/fs/cifs/smb2inode.c b/fs/cifs/smb2inode.c
+index 899bbc86f73e..4f0231e685a9 100644
+--- a/fs/cifs/smb2inode.c
++++ b/fs/cifs/smb2inode.c
+@@ -80,6 +80,10 @@ smb2_open_op_close(const unsigned int xid, struct cifs_tcon *tcon,
+ 		 * SMB2_open() call.
+ 		 */
+ 		break;
++	case SMB2_OP_RMDIR:
++		tmprc = SMB2_rmdir(xid, tcon, fid.persistent_fid,
++				   fid.volatile_fid);
++		break;
+ 	case SMB2_OP_RENAME:
+ 		tmprc = SMB2_rename(xid, tcon, fid.persistent_fid,
+ 				    fid.volatile_fid, (__le16 *)data);
+@@ -191,8 +195,8 @@ smb2_rmdir(const unsigned int xid, struct cifs_tcon *tcon, const char *name,
+ 	   struct cifs_sb_info *cifs_sb)
+ {
+ 	return smb2_open_op_close(xid, tcon, cifs_sb, name, DELETE, FILE_OPEN,
+-				  CREATE_NOT_FILE | CREATE_DELETE_ON_CLOSE,
+-				  NULL, SMB2_OP_DELETE);
++				  CREATE_NOT_FILE,
++				  NULL, SMB2_OP_RMDIR);
+ }
+ 
+ int
+diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
+index 42e1f440eb1e..8f38e33d365b 100644
+--- a/fs/cifs/smb2pdu.c
++++ b/fs/cifs/smb2pdu.c
+@@ -2575,6 +2575,22 @@ SMB2_rename(const unsigned int xid, struct cifs_tcon *tcon,
+ }
+ 
+ int
++SMB2_rmdir(const unsigned int xid, struct cifs_tcon *tcon,
++		  u64 persistent_fid, u64 volatile_fid)
++{
++	__u8 delete_pending = 1;
++	void *data;
++	unsigned int size;
++
++	data = &delete_pending;
++	size = 1; /* sizeof __u8 */
++
++	return send_set_info(xid, tcon, persistent_fid, volatile_fid,
++			current->tgid, FILE_DISPOSITION_INFORMATION, 1, &data,
++			&size);
++}
++
++int
+ SMB2_set_hardlink(const unsigned int xid, struct cifs_tcon *tcon,
+ 		  u64 persistent_fid, u64 volatile_fid, __le16 *target_file)
+ {
+diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h
+index 4f07dc93608d..eb2cde2f64ba 100644
+--- a/fs/cifs/smb2proto.h
++++ b/fs/cifs/smb2proto.h
+@@ -141,6 +141,8 @@ extern int SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
+ extern int SMB2_rename(const unsigned int xid, struct cifs_tcon *tcon,
+ 		       u64 persistent_fid, u64 volatile_fid,
+ 		       __le16 *target_file);
++extern int SMB2_rmdir(const unsigned int xid, struct cifs_tcon *tcon,
++		      u64 persistent_fid, u64 volatile_fid);
+ extern int SMB2_set_hardlink(const unsigned int xid, struct cifs_tcon *tcon,
+ 			     u64 persistent_fid, u64 volatile_fid,
+ 			     __le16 *target_file);
+diff --git a/fs/coredump.c b/fs/coredump.c
+index 9ea87e9fdccf..47c32c3bfa1d 100644
+--- a/fs/coredump.c
++++ b/fs/coredump.c
+@@ -32,6 +32,9 @@
+ #include <linux/pipe_fs_i.h>
+ #include <linux/oom.h>
+ #include <linux/compat.h>
++#include <linux/sched.h>
++#include <linux/fs.h>
++#include <linux/path.h>
+ #include <linux/timekeeping.h>
+ 
+ #include <asm/uaccess.h>
+@@ -649,6 +652,8 @@ void do_coredump(const siginfo_t *siginfo)
+ 		}
+ 	} else {
+ 		struct inode *inode;
++		int open_flags = O_CREAT | O_RDWR | O_NOFOLLOW |
++				 O_LARGEFILE | O_EXCL;
+ 
+ 		if (cprm.limit < binfmt->min_coredump)
+ 			goto fail_unlock;
+@@ -687,10 +692,27 @@ void do_coredump(const siginfo_t *siginfo)
+ 		 * what matters is that at least one of the two processes
+ 		 * writes its coredump successfully, not which one.
+ 		 */
+-		cprm.file = filp_open(cn.corename,
+-				 O_CREAT | 2 | O_NOFOLLOW |
+-				 O_LARGEFILE | O_EXCL,
+-				 0600);
++		if (need_suid_safe) {
++			/*
++			 * Using user namespaces, normal user tasks can change
++			 * their current->fs->root to point to arbitrary
++			 * directories. Since the intention of the "only dump
++			 * with a fully qualified path" rule is to control where
++			 * coredumps may be placed using root privileges,
++			 * current->fs->root must not be used. Instead, use the
++			 * root directory of init_task.
++			 */
++			struct path root;
++
++			task_lock(&init_task);
++			get_fs_root(init_task.fs, &root);
++			task_unlock(&init_task);
++			cprm.file = file_open_root(root.dentry, root.mnt,
++				cn.corename, open_flags, 0600);
++			path_put(&root);
++		} else {
++			cprm.file = filp_open(cn.corename, open_flags, 0600);
++		}
+ 		if (IS_ERR(cprm.file))
+ 			goto fail_unlock;
+ 
+diff --git a/fs/dcache.c b/fs/dcache.c
+index 2398f9f94337..7566b2689609 100644
+--- a/fs/dcache.c
++++ b/fs/dcache.c
+@@ -1667,7 +1667,8 @@ void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op)
+ 				DCACHE_OP_REVALIDATE	|
+ 				DCACHE_OP_WEAK_REVALIDATE	|
+ 				DCACHE_OP_DELETE	|
+-				DCACHE_OP_SELECT_INODE));
++				DCACHE_OP_SELECT_INODE	|
++				DCACHE_OP_REAL));
+ 	dentry->d_op = op;
+ 	if (!op)
+ 		return;
+@@ -1685,6 +1686,8 @@ void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op)
+ 		dentry->d_flags |= DCACHE_OP_PRUNE;
+ 	if (op->d_select_inode)
+ 		dentry->d_flags |= DCACHE_OP_SELECT_INODE;
++	if (op->d_real)
++		dentry->d_flags |= DCACHE_OP_REAL;
+ 
+ }
+ EXPORT_SYMBOL(d_set_d_op);
+diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
+index bece948b363d..8580831ed237 100644
+--- a/fs/debugfs/inode.c
++++ b/fs/debugfs/inode.c
+@@ -457,7 +457,7 @@ struct dentry *debugfs_create_automount(const char *name,
+ 	if (unlikely(!inode))
+ 		return failed_creating(dentry);
+ 
+-	inode->i_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
++	make_empty_dir_inode(inode);
+ 	inode->i_flags |= S_AUTOMOUNT;
+ 	inode->i_private = data;
+ 	dentry->d_fsdata = (void *)f;
+diff --git a/fs/ext4/crypto.c b/fs/ext4/crypto.c
+index 38f7562489bb..25634c353191 100644
+--- a/fs/ext4/crypto.c
++++ b/fs/ext4/crypto.c
+@@ -34,6 +34,7 @@
+ #include <linux/random.h>
+ #include <linux/scatterlist.h>
+ #include <linux/spinlock_types.h>
++#include <linux/namei.h>
+ 
+ #include "ext4_extents.h"
+ #include "xattr.h"
+@@ -475,13 +476,19 @@ uint32_t ext4_validate_encryption_key_size(uint32_t mode, uint32_t size)
+  */
+ static int ext4_d_revalidate(struct dentry *dentry, unsigned int flags)
+ {
+-	struct inode *dir = d_inode(dentry->d_parent);
+-	struct ext4_crypt_info *ci = EXT4_I(dir)->i_crypt_info;
++	struct dentry *dir;
++	struct ext4_crypt_info *ci;
+ 	int dir_has_key, cached_with_key;
+ 
+-	if (!ext4_encrypted_inode(dir))
+-		return 0;
++	if (flags & LOOKUP_RCU)
++		return -ECHILD;
+ 
++	dir = dget_parent(dentry);
++	if (!ext4_encrypted_inode(d_inode(dir))) {
++		dput(dir);
++		return 0;
++	}
++	ci = EXT4_I(d_inode(dir))->i_crypt_info;
+ 	if (ci && ci->ci_keyring_key &&
+ 	    (ci->ci_keyring_key->flags & ((1 << KEY_FLAG_INVALIDATED) |
+ 					  (1 << KEY_FLAG_REVOKED) |
+@@ -491,6 +498,7 @@ static int ext4_d_revalidate(struct dentry *dentry, unsigned int flags)
+ 	/* this should eventually be an flag in d_flags */
+ 	cached_with_key = dentry->d_fsdata != NULL;
+ 	dir_has_key = (ci != NULL);
++	dput(dir);
+ 
+ 	/*
+ 	 * If the dentry was cached without the key, and it is a
+diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
+index 157b458a69d4..b213449a5d1b 100644
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -900,6 +900,29 @@ do {									       \
+ #include "extents_status.h"
+ 
+ /*
++ * Lock subclasses for i_data_sem in the ext4_inode_info structure.
++ *
++ * These are needed to avoid lockdep false positives when we need to
++ * allocate blocks to the quota inode during ext4_map_blocks(), while
++ * holding i_data_sem for a normal (non-quota) inode.  Since we don't
++ * do quota tracking for the quota inode, this avoids deadlock (as
++ * well as infinite recursion, since it isn't turtles all the way
++ * down...)
++ *
++ *  I_DATA_SEM_NORMAL - Used for most inodes
++ *  I_DATA_SEM_OTHER  - Used by move_inode.c for the second normal inode
++ *			  where the second inode has larger inode number
++ *			  than the first
++ *  I_DATA_SEM_QUOTA  - Used for quota inodes only
++ */
++enum {
++	I_DATA_SEM_NORMAL = 0,
++	I_DATA_SEM_OTHER,
++	I_DATA_SEM_QUOTA,
++};
++
++
++/*
+  * fourth extended file system inode data in memory
+  */
+ struct ext4_inode_info {
+diff --git a/fs/ext4/file.c b/fs/ext4/file.c
+index 4cd318f31cbe..38847f38b34a 100644
+--- a/fs/ext4/file.c
++++ b/fs/ext4/file.c
+@@ -335,7 +335,7 @@ static int ext4_file_open(struct inode * inode, struct file * filp)
+ 	struct super_block *sb = inode->i_sb;
+ 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+ 	struct vfsmount *mnt = filp->f_path.mnt;
+-	struct inode *dir = filp->f_path.dentry->d_parent->d_inode;
++	struct dentry *dir;
+ 	struct path path;
+ 	char buf[64], *cp;
+ 	int ret;
+@@ -379,14 +379,18 @@ static int ext4_file_open(struct inode * inode, struct file * filp)
+ 		if (ext4_encryption_info(inode) == NULL)
+ 			return -ENOKEY;
+ 	}
+-	if (ext4_encrypted_inode(dir) &&
+-	    !ext4_is_child_context_consistent_with_parent(dir, inode)) {
++
++	dir = dget_parent(file_dentry(filp));
++	if (ext4_encrypted_inode(d_inode(dir)) &&
++	    !ext4_is_child_context_consistent_with_parent(d_inode(dir), inode)) {
+ 		ext4_warning(inode->i_sb,
+ 			     "Inconsistent encryption contexts: %lu/%lu\n",
+-			     (unsigned long) dir->i_ino,
++			     (unsigned long) d_inode(dir)->i_ino,
+ 			     (unsigned long) inode->i_ino);
++		dput(dir);
+ 		return -EPERM;
+ 	}
++	dput(dir);
+ 	/*
+ 	 * Set up the jbd2_inode if we are opening the inode for
+ 	 * writing and the journal is present
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index aee960b1af34..e6218cbc8332 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -5261,6 +5261,8 @@ int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
+ 	might_sleep();
+ 	trace_ext4_mark_inode_dirty(inode, _RET_IP_);
+ 	err = ext4_reserve_inode_write(handle, inode, &iloc);
++	if (err)
++		return err;
+ 	if (ext4_handle_valid(handle) &&
+ 	    EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize &&
+ 	    !ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND)) {
+@@ -5291,9 +5293,7 @@ int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
+ 			}
+ 		}
+ 	}
+-	if (!err)
+-		err = ext4_mark_iloc_dirty(handle, inode, &iloc);
+-	return err;
++	return ext4_mark_iloc_dirty(handle, inode, &iloc);
+ }
+ 
+ /*
+diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c
+index 4098acc701c3..796ff0eafd3c 100644
+--- a/fs/ext4/move_extent.c
++++ b/fs/ext4/move_extent.c
+@@ -60,10 +60,10 @@ ext4_double_down_write_data_sem(struct inode *first, struct inode *second)
+ {
+ 	if (first < second) {
+ 		down_write(&EXT4_I(first)->i_data_sem);
+-		down_write_nested(&EXT4_I(second)->i_data_sem, SINGLE_DEPTH_NESTING);
++		down_write_nested(&EXT4_I(second)->i_data_sem, I_DATA_SEM_OTHER);
+ 	} else {
+ 		down_write(&EXT4_I(second)->i_data_sem);
+-		down_write_nested(&EXT4_I(first)->i_data_sem, SINGLE_DEPTH_NESTING);
++		down_write_nested(&EXT4_I(first)->i_data_sem, I_DATA_SEM_OTHER);
+ 
+ 	}
+ }
+@@ -484,6 +484,13 @@ mext_check_arguments(struct inode *orig_inode,
+ 		return -EBUSY;
+ 	}
+ 
++	if (IS_NOQUOTA(orig_inode) || IS_NOQUOTA(donor_inode)) {
++		ext4_debug("ext4 move extent: The argument files should "
++			"not be quota files [ino:orig %lu, donor %lu]\n",
++			orig_inode->i_ino, donor_inode->i_ino);
++		return -EBUSY;
++	}
++
+ 	/* Ext4 move extent supports only extent based file */
+ 	if (!(ext4_test_inode_flag(orig_inode, EXT4_INODE_EXTENTS))) {
+ 		ext4_debug("ext4 move extent: orig file is not extents "
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index 3ed01ec011d7..a76ca677fd1a 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -1324,9 +1324,9 @@ static int set_qf_name(struct super_block *sb, int qtype, substring_t *args)
+ 		return -1;
+ 	}
+ 	if (ext4_has_feature_quota(sb)) {
+-		ext4_msg(sb, KERN_ERR, "Cannot set journaled quota options "
+-			 "when QUOTA feature is enabled");
+-		return -1;
++		ext4_msg(sb, KERN_INFO, "Journaled quota options "
++			 "ignored when QUOTA feature is enabled");
++		return 1;
+ 	}
+ 	qname = match_strdup(args);
+ 	if (!qname) {
+@@ -1689,10 +1689,10 @@ static int handle_mount_opt(struct super_block *sb, char *opt, int token,
+ 			return -1;
+ 		}
+ 		if (ext4_has_feature_quota(sb)) {
+-			ext4_msg(sb, KERN_ERR,
+-				 "Cannot set journaled quota options "
++			ext4_msg(sb, KERN_INFO,
++				 "Quota format mount options ignored "
+ 				 "when QUOTA feature is enabled");
+-			return -1;
++			return 1;
+ 		}
+ 		sbi->s_jquota_fmt = m->mount_opt;
+ #endif
+@@ -1753,11 +1753,11 @@ static int parse_options(char *options, struct super_block *sb,
+ #ifdef CONFIG_QUOTA
+ 	if (ext4_has_feature_quota(sb) &&
+ 	    (test_opt(sb, USRQUOTA) || test_opt(sb, GRPQUOTA))) {
+-		ext4_msg(sb, KERN_ERR, "Cannot set quota options when QUOTA "
+-			 "feature is enabled");
+-		return 0;
+-	}
+-	if (sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA]) {
++		ext4_msg(sb, KERN_INFO, "Quota feature enabled, usrquota and grpquota "
++			 "mount options ignored.");
++		clear_opt(sb, USRQUOTA);
++		clear_opt(sb, GRPQUOTA);
++	} else if (sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA]) {
+ 		if (test_opt(sb, USRQUOTA) && sbi->s_qf_names[USRQUOTA])
+ 			clear_opt(sb, USRQUOTA);
+ 
+@@ -5021,6 +5021,20 @@ static int ext4_quota_on_mount(struct super_block *sb, int type)
+ 					EXT4_SB(sb)->s_jquota_fmt, type);
+ }
+ 
++static void lockdep_set_quota_inode(struct inode *inode, int subclass)
++{
++	struct ext4_inode_info *ei = EXT4_I(inode);
++
++	/* The first argument of lockdep_set_subclass has to be
++	 * *exactly* the same as the argument to init_rwsem() --- in
++	 * this case, in init_once() --- or lockdep gets unhappy
++	 * because the name of the lock is set using the
++	 * stringification of the argument to init_rwsem().
++	 */
++	(void) ei;	/* shut up clang warning if !CONFIG_LOCKDEP */
++	lockdep_set_subclass(&ei->i_data_sem, subclass);
++}
++
+ /*
+  * Standard function to be called on quota_on
+  */
+@@ -5060,8 +5074,12 @@ static int ext4_quota_on(struct super_block *sb, int type, int format_id,
+ 		if (err)
+ 			return err;
+ 	}
+-
+-	return dquot_quota_on(sb, type, format_id, path);
++	lockdep_set_quota_inode(path->dentry->d_inode, I_DATA_SEM_QUOTA);
++	err = dquot_quota_on(sb, type, format_id, path);
++	if (err)
++		lockdep_set_quota_inode(path->dentry->d_inode,
++					     I_DATA_SEM_NORMAL);
++	return err;
+ }
+ 
+ static int ext4_quota_enable(struct super_block *sb, int type, int format_id,
+@@ -5088,8 +5106,11 @@ static int ext4_quota_enable(struct super_block *sb, int type, int format_id,
+ 
+ 	/* Don't account quota for quota files to avoid recursion */
+ 	qf_inode->i_flags |= S_NOQUOTA;
++	lockdep_set_quota_inode(qf_inode, I_DATA_SEM_QUOTA);
+ 	err = dquot_enable(qf_inode, type, format_id, flags);
+ 	iput(qf_inode);
++	if (err)
++		lockdep_set_quota_inode(qf_inode, I_DATA_SEM_NORMAL);
+ 
+ 	return err;
+ }
+diff --git a/fs/f2fs/crypto_policy.c b/fs/f2fs/crypto_policy.c
+index d4a96af513c2..596f02490f27 100644
+--- a/fs/f2fs/crypto_policy.c
++++ b/fs/f2fs/crypto_policy.c
+@@ -192,7 +192,8 @@ int f2fs_inherit_context(struct inode *parent, struct inode *child,
+ 		return res;
+ 
+ 	ci = F2FS_I(parent)->i_crypt_info;
+-	BUG_ON(ci == NULL);
++	if (ci == NULL)
++		return -ENOKEY;
+ 
+ 	ctx.format = F2FS_ENCRYPTION_CONTEXT_FORMAT_V1;
+ 
+diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
+index 5c06db17e41f..44802599fa67 100644
+--- a/fs/f2fs/data.c
++++ b/fs/f2fs/data.c
+@@ -67,7 +67,6 @@ static void f2fs_write_end_io(struct bio *bio)
+ 		f2fs_restore_and_release_control_page(&page);
+ 
+ 		if (unlikely(bio->bi_error)) {
+-			set_page_dirty(page);
+ 			set_bit(AS_EIO, &page->mapping->flags);
+ 			f2fs_stop_checkpoint(sbi);
+ 		}
+@@ -504,7 +503,7 @@ static int __allocate_data_blocks(struct inode *inode, loff_t offset,
+ 	struct dnode_of_data dn;
+ 	u64 start = F2FS_BYTES_TO_BLK(offset);
+ 	u64 len = F2FS_BYTES_TO_BLK(count);
+-	bool allocated;
++	bool allocated = false;
+ 	u64 end_offset;
+ 	int err = 0;
+ 
+@@ -546,7 +545,7 @@ static int __allocate_data_blocks(struct inode *inode, loff_t offset,
+ 		f2fs_put_dnode(&dn);
+ 		f2fs_unlock_op(sbi);
+ 
+-		f2fs_balance_fs(sbi, dn.node_changed);
++		f2fs_balance_fs(sbi, allocated);
+ 	}
+ 	return err;
+ 
+@@ -556,7 +555,7 @@ sync_out:
+ 	f2fs_put_dnode(&dn);
+ out:
+ 	f2fs_unlock_op(sbi);
+-	f2fs_balance_fs(sbi, dn.node_changed);
++	f2fs_balance_fs(sbi, allocated);
+ 	return err;
+ }
+ 
+@@ -650,14 +649,14 @@ get_next:
+ 	if (dn.ofs_in_node >= end_offset) {
+ 		if (allocated)
+ 			sync_inode_page(&dn);
+-		allocated = false;
+ 		f2fs_put_dnode(&dn);
+ 
+ 		if (create) {
+ 			f2fs_unlock_op(sbi);
+-			f2fs_balance_fs(sbi, dn.node_changed);
++			f2fs_balance_fs(sbi, allocated);
+ 			f2fs_lock_op(sbi);
+ 		}
++		allocated = false;
+ 
+ 		set_new_dnode(&dn, inode, NULL, NULL, 0);
+ 		err = get_dnode_of_data(&dn, pgofs, mode);
+@@ -715,7 +714,7 @@ put_out:
+ unlock_out:
+ 	if (create) {
+ 		f2fs_unlock_op(sbi);
+-		f2fs_balance_fs(sbi, dn.node_changed);
++		f2fs_balance_fs(sbi, allocated);
+ 	}
+ out:
+ 	trace_f2fs_map_blocks(inode, map, err);
+diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
+index faa7495e2d7e..30e6b6563494 100644
+--- a/fs/f2fs/dir.c
++++ b/fs/f2fs/dir.c
+@@ -892,11 +892,19 @@ out:
+ 	return err;
+ }
+ 
++static int f2fs_dir_open(struct inode *inode, struct file *filp)
++{
++	if (f2fs_encrypted_inode(inode))
++		return f2fs_get_encryption_info(inode) ? -EACCES : 0;
++	return 0;
++}
++
+ const struct file_operations f2fs_dir_operations = {
+ 	.llseek		= generic_file_llseek,
+ 	.read		= generic_read_dir,
+ 	.iterate	= f2fs_readdir,
+ 	.fsync		= f2fs_sync_file,
++	.open		= f2fs_dir_open,
+ 	.unlocked_ioctl	= f2fs_ioctl,
+ #ifdef CONFIG_COMPAT
+ 	.compat_ioctl   = f2fs_compat_ioctl,
+diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
+index ea272be62677..5a322bc00ac4 100644
+--- a/fs/f2fs/file.c
++++ b/fs/f2fs/file.c
+@@ -425,6 +425,8 @@ static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma)
+ 		err = f2fs_get_encryption_info(inode);
+ 		if (err)
+ 			return 0;
++		if (!f2fs_encrypted_inode(inode))
++			return -ENOKEY;
+ 	}
+ 
+ 	/* we don't need to use inline_data strictly */
+@@ -444,7 +446,9 @@ static int f2fs_file_open(struct inode *inode, struct file *filp)
+ 	if (!ret && f2fs_encrypted_inode(inode)) {
+ 		ret = f2fs_get_encryption_info(inode);
+ 		if (ret)
+-			ret = -EACCES;
++			return -EACCES;
++		if (!f2fs_encrypted_inode(inode))
++			return -ENOKEY;
+ 	}
+ 	return ret;
+ }
+diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
+index 6f944e5eb76e..7e9e38769660 100644
+--- a/fs/f2fs/namei.c
++++ b/fs/f2fs/namei.c
+@@ -980,12 +980,6 @@ static const char *f2fs_encrypted_get_link(struct dentry *dentry,
+ 	}
+ 	memcpy(cstr.name, sd->encrypted_path, cstr.len);
+ 
+-	/* this is broken symlink case */
+-	if (unlikely(cstr.name[0] == 0)) {
+-		res = -ENOENT;
+-		goto errout;
+-	}
+-
+ 	if ((cstr.len + sizeof(struct f2fs_encrypted_symlink_data) - 1) >
+ 								max_size) {
+ 		/* Symlink data on the disk is corrupted */
+@@ -1002,6 +996,12 @@ static const char *f2fs_encrypted_get_link(struct dentry *dentry,
+ 
+ 	kfree(cstr.name);
+ 
++	/* this is broken symlink case */
++	if (unlikely(pstr.name[0] == 0)) {
++		res = -ENOENT;
++		goto errout;
++	}
++
+ 	paddr = pstr.name;
+ 
+ 	/* Null-terminate the name */
+diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
+index 6134832baaaf..013a62b2f8ca 100644
+--- a/fs/f2fs/super.c
++++ b/fs/f2fs/super.c
+@@ -926,9 +926,25 @@ static loff_t max_file_blocks(void)
+ 	return result;
+ }
+ 
++static int __f2fs_commit_super(struct buffer_head *bh,
++			struct f2fs_super_block *super)
++{
++	lock_buffer(bh);
++	if (super)
++		memcpy(bh->b_data + F2FS_SUPER_OFFSET, super, sizeof(*super));
++	set_buffer_uptodate(bh);
++	set_buffer_dirty(bh);
++	unlock_buffer(bh);
++
++	/* it's rare case, we can do fua all the time */
++	return __sync_dirty_buffer(bh, WRITE_FLUSH_FUA);
++}
++
+ static inline bool sanity_check_area_boundary(struct super_block *sb,
+-					struct f2fs_super_block *raw_super)
++					struct buffer_head *bh)
+ {
++	struct f2fs_super_block *raw_super = (struct f2fs_super_block *)
++					(bh->b_data + F2FS_SUPER_OFFSET);
+ 	u32 segment0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr);
+ 	u32 cp_blkaddr = le32_to_cpu(raw_super->cp_blkaddr);
+ 	u32 sit_blkaddr = le32_to_cpu(raw_super->sit_blkaddr);
+@@ -942,6 +958,10 @@ static inline bool sanity_check_area_boundary(struct super_block *sb,
+ 	u32 segment_count_main = le32_to_cpu(raw_super->segment_count_main);
+ 	u32 segment_count = le32_to_cpu(raw_super->segment_count);
+ 	u32 log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
++	u64 main_end_blkaddr = main_blkaddr +
++				(segment_count_main << log_blocks_per_seg);
++	u64 seg_end_blkaddr = segment0_blkaddr +
++				(segment_count << log_blocks_per_seg);
+ 
+ 	if (segment0_blkaddr != cp_blkaddr) {
+ 		f2fs_msg(sb, KERN_INFO,
+@@ -986,22 +1006,45 @@ static inline bool sanity_check_area_boundary(struct super_block *sb,
+ 		return true;
+ 	}
+ 
+-	if (main_blkaddr + (segment_count_main << log_blocks_per_seg) !=
+-		segment0_blkaddr + (segment_count << log_blocks_per_seg)) {
++	if (main_end_blkaddr > seg_end_blkaddr) {
+ 		f2fs_msg(sb, KERN_INFO,
+-			"Wrong MAIN_AREA boundary, start(%u) end(%u) blocks(%u)",
++			"Wrong MAIN_AREA boundary, start(%u) end(%u) block(%u)",
+ 			main_blkaddr,
+-			segment0_blkaddr + (segment_count << log_blocks_per_seg),
++			segment0_blkaddr +
++				(segment_count << log_blocks_per_seg),
+ 			segment_count_main << log_blocks_per_seg);
+ 		return true;
++	} else if (main_end_blkaddr < seg_end_blkaddr) {
++		int err = 0;
++		char *res;
++
++		/* fix in-memory information all the time */
++		raw_super->segment_count = cpu_to_le32((main_end_blkaddr -
++				segment0_blkaddr) >> log_blocks_per_seg);
++
++		if (f2fs_readonly(sb) || bdev_read_only(sb->s_bdev)) {
++			res = "internally";
++		} else {
++			err = __f2fs_commit_super(bh, NULL);
++			res = err ? "failed" : "done";
++		}
++		f2fs_msg(sb, KERN_INFO,
++			"Fix alignment : %s, start(%u) end(%u) block(%u)",
++			res, main_blkaddr,
++			segment0_blkaddr +
++				(segment_count << log_blocks_per_seg),
++			segment_count_main << log_blocks_per_seg);
++		if (err)
++			return true;
+ 	}
+-
+ 	return false;
+ }
+ 
+ static int sanity_check_raw_super(struct super_block *sb,
+-			struct f2fs_super_block *raw_super)
++				struct buffer_head *bh)
+ {
++	struct f2fs_super_block *raw_super = (struct f2fs_super_block *)
++					(bh->b_data + F2FS_SUPER_OFFSET);
+ 	unsigned int blocksize;
+ 
+ 	if (F2FS_SUPER_MAGIC != le32_to_cpu(raw_super->magic)) {
+@@ -1068,7 +1111,7 @@ static int sanity_check_raw_super(struct super_block *sb,
+ 	}
+ 
+ 	/* check CP/SIT/NAT/SSA/MAIN_AREA area boundary */
+-	if (sanity_check_area_boundary(sb, raw_super))
++	if (sanity_check_area_boundary(sb, bh))
+ 		return 1;
+ 
+ 	return 0;
+@@ -1134,103 +1177,87 @@ static void init_sb_info(struct f2fs_sb_info *sbi)
+ 
+ /*
+  * Read f2fs raw super block.
+- * Because we have two copies of super block, so read the first one at first,
+- * if the first one is invalid, move to read the second one.
++ * Because we have two copies of super block, so read both of them
++ * to get the first valid one. If any one of them is broken, we pass
++ * them recovery flag back to the caller.
+  */
+ static int read_raw_super_block(struct super_block *sb,
+ 			struct f2fs_super_block **raw_super,
+ 			int *valid_super_block, int *recovery)
+ {
+-	int block = 0;
++	int block;
+ 	struct buffer_head *bh;
+-	struct f2fs_super_block *super, *buf;
++	struct f2fs_super_block *super;
+ 	int err = 0;
+ 
+ 	super = kzalloc(sizeof(struct f2fs_super_block), GFP_KERNEL);
+ 	if (!super)
+ 		return -ENOMEM;
+-retry:
+-	bh = sb_bread(sb, block);
+-	if (!bh) {
+-		*recovery = 1;
+-		f2fs_msg(sb, KERN_ERR, "Unable to read %dth superblock",
++
++	for (block = 0; block < 2; block++) {
++		bh = sb_bread(sb, block);
++		if (!bh) {
++			f2fs_msg(sb, KERN_ERR, "Unable to read %dth superblock",
+ 				block + 1);
+-		err = -EIO;
+-		goto next;
+-	}
++			err = -EIO;
++			continue;
++		}
+ 
+-	buf = (struct f2fs_super_block *)(bh->b_data + F2FS_SUPER_OFFSET);
++		/* sanity checking of raw super */
++		if (sanity_check_raw_super(sb, bh)) {
++			f2fs_msg(sb, KERN_ERR,
++				"Can't find valid F2FS filesystem in %dth superblock",
++				block + 1);
++			err = -EINVAL;
++			brelse(bh);
++			continue;
++		}
+ 
+-	/* sanity checking of raw super */
+-	if (sanity_check_raw_super(sb, buf)) {
++		if (!*raw_super) {
++			memcpy(super, bh->b_data + F2FS_SUPER_OFFSET,
++							sizeof(*super));
++			*valid_super_block = block;
++			*raw_super = super;
++		}
+ 		brelse(bh);
+-		*recovery = 1;
+-		f2fs_msg(sb, KERN_ERR,
+-			"Can't find valid F2FS filesystem in %dth superblock",
+-								block + 1);
+-		err = -EINVAL;
+-		goto next;
+ 	}
+ 
+-	if (!*raw_super) {
+-		memcpy(super, buf, sizeof(*super));
+-		*valid_super_block = block;
+-		*raw_super = super;
+-	}
+-	brelse(bh);
+-
+-next:
+-	/* check the validity of the second superblock */
+-	if (block == 0) {
+-		block++;
+-		goto retry;
+-	}
++	/* Fail to read any one of the superblocks*/
++	if (err < 0)
++		*recovery = 1;
+ 
+ 	/* No valid superblock */
+-	if (!*raw_super) {
++	if (!*raw_super)
+ 		kfree(super);
+-		return err;
+-	}
++	else
++		err = 0;
+ 
+-	return 0;
++	return err;
+ }
+ 
+-static int __f2fs_commit_super(struct f2fs_sb_info *sbi, int block)
++int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover)
+ {
+-	struct f2fs_super_block *super = F2FS_RAW_SUPER(sbi);
+ 	struct buffer_head *bh;
+ 	int err;
+ 
+-	bh = sb_getblk(sbi->sb, block);
++	/* write back-up superblock first */
++	bh = sb_getblk(sbi->sb, sbi->valid_super_block ? 0: 1);
+ 	if (!bh)
+ 		return -EIO;
+-
+-	lock_buffer(bh);
+-	memcpy(bh->b_data + F2FS_SUPER_OFFSET, super, sizeof(*super));
+-	set_buffer_uptodate(bh);
+-	set_buffer_dirty(bh);
+-	unlock_buffer(bh);
+-
+-	/* it's rare case, we can do fua all the time */
+-	err = __sync_dirty_buffer(bh, WRITE_FLUSH_FUA);
++	err = __f2fs_commit_super(bh, F2FS_RAW_SUPER(sbi));
+ 	brelse(bh);
+ 
+-	return err;
+-}
+-
+-int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover)
+-{
+-	int err;
+-
+-	/* write back-up superblock first */
+-	err = __f2fs_commit_super(sbi, sbi->valid_super_block ? 0 : 1);
+-
+ 	/* if we are in recovery path, skip writing valid superblock */
+ 	if (recover || err)
+ 		return err;
+ 
+ 	/* write current valid superblock */
+-	return __f2fs_commit_super(sbi, sbi->valid_super_block);
++	bh = sb_getblk(sbi->sb, sbi->valid_super_block);
++	if (!bh)
++		return -EIO;
++	err = __f2fs_commit_super(bh, F2FS_RAW_SUPER(sbi));
++	brelse(bh);
++	return err;
+ }
+ 
+ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
+diff --git a/fs/fhandle.c b/fs/fhandle.c
+index d59712dfa3e7..ca3c3dd01789 100644
+--- a/fs/fhandle.c
++++ b/fs/fhandle.c
+@@ -228,7 +228,7 @@ long do_handle_open(int mountdirfd,
+ 		path_put(&path);
+ 		return fd;
+ 	}
+-	file = file_open_root(path.dentry, path.mnt, "", open_flag);
++	file = file_open_root(path.dentry, path.mnt, "", open_flag, 0);
+ 	if (IS_ERR(file)) {
+ 		put_unused_fd(fd);
+ 		retval =  PTR_ERR(file);
+diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
+index 5c46ed9f3e14..fee81e8768c9 100644
+--- a/fs/fs-writeback.c
++++ b/fs/fs-writeback.c
+@@ -281,13 +281,15 @@ locked_inode_to_wb_and_lock_list(struct inode *inode)
+ 		wb_get(wb);
+ 		spin_unlock(&inode->i_lock);
+ 		spin_lock(&wb->list_lock);
+-		wb_put(wb);		/* not gonna deref it anymore */
+ 
+ 		/* i_wb may have changed inbetween, can't use inode_to_wb() */
+-		if (likely(wb == inode->i_wb))
+-			return wb;	/* @inode already has ref */
++		if (likely(wb == inode->i_wb)) {
++			wb_put(wb);	/* @inode already has ref */
++			return wb;
++		}
+ 
+ 		spin_unlock(&wb->list_lock);
++		wb_put(wb);
+ 		cpu_relax();
+ 		spin_lock(&inode->i_lock);
+ 	}
+@@ -1337,10 +1339,10 @@ __writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
+  * we go e.g. from filesystem. Flusher thread uses __writeback_single_inode()
+  * and does more profound writeback list handling in writeback_sb_inodes().
+  */
+-static int
+-writeback_single_inode(struct inode *inode, struct bdi_writeback *wb,
+-		       struct writeback_control *wbc)
++static int writeback_single_inode(struct inode *inode,
++				  struct writeback_control *wbc)
+ {
++	struct bdi_writeback *wb;
+ 	int ret = 0;
+ 
+ 	spin_lock(&inode->i_lock);
+@@ -1378,7 +1380,8 @@ writeback_single_inode(struct inode *inode, struct bdi_writeback *wb,
+ 	ret = __writeback_single_inode(inode, wbc);
+ 
+ 	wbc_detach_inode(wbc);
+-	spin_lock(&wb->list_lock);
++
++	wb = inode_to_wb_and_lock_list(inode);
+ 	spin_lock(&inode->i_lock);
+ 	/*
+ 	 * If inode is clean, remove it from writeback lists. Otherwise don't
+@@ -1453,6 +1456,7 @@ static long writeback_sb_inodes(struct super_block *sb,
+ 
+ 	while (!list_empty(&wb->b_io)) {
+ 		struct inode *inode = wb_inode(wb->b_io.prev);
++		struct bdi_writeback *tmp_wb;
+ 
+ 		if (inode->i_sb != sb) {
+ 			if (work->sb) {
+@@ -1543,15 +1547,23 @@ static long writeback_sb_inodes(struct super_block *sb,
+ 			cond_resched();
+ 		}
+ 
+-
+-		spin_lock(&wb->list_lock);
++		/*
++		 * Requeue @inode if still dirty.  Be careful as @inode may
++		 * have been switched to another wb in the meantime.
++		 */
++		tmp_wb = inode_to_wb_and_lock_list(inode);
+ 		spin_lock(&inode->i_lock);
+ 		if (!(inode->i_state & I_DIRTY_ALL))
+ 			wrote++;
+-		requeue_inode(inode, wb, &wbc);
++		requeue_inode(inode, tmp_wb, &wbc);
+ 		inode_sync_complete(inode);
+ 		spin_unlock(&inode->i_lock);
+ 
++		if (unlikely(tmp_wb != wb)) {
++			spin_unlock(&tmp_wb->list_lock);
++			spin_lock(&wb->list_lock);
++		}
++
+ 		/*
+ 		 * bail out to wb_writeback() often enough to check
+ 		 * background threshold and other termination conditions.
+@@ -2338,7 +2350,6 @@ EXPORT_SYMBOL(sync_inodes_sb);
+  */
+ int write_inode_now(struct inode *inode, int sync)
+ {
+-	struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
+ 	struct writeback_control wbc = {
+ 		.nr_to_write = LONG_MAX,
+ 		.sync_mode = sync ? WB_SYNC_ALL : WB_SYNC_NONE,
+@@ -2350,7 +2361,7 @@ int write_inode_now(struct inode *inode, int sync)
+ 		wbc.nr_to_write = 0;
+ 
+ 	might_sleep();
+-	return writeback_single_inode(inode, wb, &wbc);
++	return writeback_single_inode(inode, &wbc);
+ }
+ EXPORT_SYMBOL(write_inode_now);
+ 
+@@ -2367,7 +2378,7 @@ EXPORT_SYMBOL(write_inode_now);
+  */
+ int sync_inode(struct inode *inode, struct writeback_control *wbc)
+ {
+-	return writeback_single_inode(inode, &inode_to_bdi(inode)->wb, wbc);
++	return writeback_single_inode(inode, wbc);
+ }
+ EXPORT_SYMBOL(sync_inode);
+ 
+diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
+index 8e3ee1936c7e..c5b6b7165489 100644
+--- a/fs/fuse/cuse.c
++++ b/fs/fuse/cuse.c
+@@ -90,7 +90,7 @@ static struct list_head *cuse_conntbl_head(dev_t devt)
+ 
+ static ssize_t cuse_read_iter(struct kiocb *kiocb, struct iov_iter *to)
+ {
+-	struct fuse_io_priv io = { .async = 0, .file = kiocb->ki_filp };
++	struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(kiocb->ki_filp);
+ 	loff_t pos = 0;
+ 
+ 	return fuse_direct_io(&io, to, &pos, FUSE_DIO_CUSE);
+@@ -98,7 +98,7 @@ static ssize_t cuse_read_iter(struct kiocb *kiocb, struct iov_iter *to)
+ 
+ static ssize_t cuse_write_iter(struct kiocb *kiocb, struct iov_iter *from)
+ {
+-	struct fuse_io_priv io = { .async = 0, .file = kiocb->ki_filp };
++	struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(kiocb->ki_filp);
+ 	loff_t pos = 0;
+ 	/*
+ 	 * No locking or generic_write_checks(), the server is
+diff --git a/fs/fuse/file.c b/fs/fuse/file.c
+index b03d253ece15..416108b42412 100644
+--- a/fs/fuse/file.c
++++ b/fs/fuse/file.c
+@@ -528,6 +528,11 @@ static void fuse_release_user_pages(struct fuse_req *req, int write)
+ 	}
+ }
+ 
++static void fuse_io_release(struct kref *kref)
++{
++	kfree(container_of(kref, struct fuse_io_priv, refcnt));
++}
++
+ static ssize_t fuse_get_res_by_io(struct fuse_io_priv *io)
+ {
+ 	if (io->err)
+@@ -585,8 +590,9 @@ static void fuse_aio_complete(struct fuse_io_priv *io, int err, ssize_t pos)
+ 		}
+ 
+ 		io->iocb->ki_complete(io->iocb, res, 0);
+-		kfree(io);
+ 	}
++
++	kref_put(&io->refcnt, fuse_io_release);
+ }
+ 
+ static void fuse_aio_complete_req(struct fuse_conn *fc, struct fuse_req *req)
+@@ -613,6 +619,7 @@ static size_t fuse_async_req_send(struct fuse_conn *fc, struct fuse_req *req,
+ 		size_t num_bytes, struct fuse_io_priv *io)
+ {
+ 	spin_lock(&io->lock);
++	kref_get(&io->refcnt);
+ 	io->size += num_bytes;
+ 	io->reqs++;
+ 	spin_unlock(&io->lock);
+@@ -691,7 +698,7 @@ static void fuse_short_read(struct fuse_req *req, struct inode *inode,
+ 
+ static int fuse_do_readpage(struct file *file, struct page *page)
+ {
+-	struct fuse_io_priv io = { .async = 0, .file = file };
++	struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(file);
+ 	struct inode *inode = page->mapping->host;
+ 	struct fuse_conn *fc = get_fuse_conn(inode);
+ 	struct fuse_req *req;
+@@ -984,7 +991,7 @@ static size_t fuse_send_write_pages(struct fuse_req *req, struct file *file,
+ 	size_t res;
+ 	unsigned offset;
+ 	unsigned i;
+-	struct fuse_io_priv io = { .async = 0, .file = file };
++	struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(file);
+ 
+ 	for (i = 0; i < req->num_pages; i++)
+ 		fuse_wait_on_page_writeback(inode, req->pages[i]->index);
+@@ -1398,7 +1405,7 @@ static ssize_t __fuse_direct_read(struct fuse_io_priv *io,
+ 
+ static ssize_t fuse_direct_read_iter(struct kiocb *iocb, struct iov_iter *to)
+ {
+-	struct fuse_io_priv io = { .async = 0, .file = iocb->ki_filp };
++	struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(iocb->ki_filp);
+ 	return __fuse_direct_read(&io, to, &iocb->ki_pos);
+ }
+ 
+@@ -1406,7 +1413,7 @@ static ssize_t fuse_direct_write_iter(struct kiocb *iocb, struct iov_iter *from)
+ {
+ 	struct file *file = iocb->ki_filp;
+ 	struct inode *inode = file_inode(file);
+-	struct fuse_io_priv io = { .async = 0, .file = file };
++	struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(file);
+ 	ssize_t res;
+ 
+ 	if (is_bad_inode(inode))
+@@ -2843,6 +2850,7 @@ fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t offset)
+ 	loff_t i_size;
+ 	size_t count = iov_iter_count(iter);
+ 	struct fuse_io_priv *io;
++	bool is_sync = is_sync_kiocb(iocb);
+ 
+ 	pos = offset;
+ 	inode = file->f_mapping->host;
+@@ -2863,6 +2871,7 @@ fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t offset)
+ 	if (!io)
+ 		return -ENOMEM;
+ 	spin_lock_init(&io->lock);
++	kref_init(&io->refcnt);
+ 	io->reqs = 1;
+ 	io->bytes = -1;
+ 	io->size = 0;
+@@ -2882,12 +2891,18 @@ fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t offset)
+ 	 * to wait on real async I/O requests, so we must submit this request
+ 	 * synchronously.
+ 	 */
+-	if (!is_sync_kiocb(iocb) && (offset + count > i_size) &&
++	if (!is_sync && (offset + count > i_size) &&
+ 	    iov_iter_rw(iter) == WRITE)
+ 		io->async = false;
+ 
+-	if (io->async && is_sync_kiocb(iocb))
++	if (io->async && is_sync) {
++		/*
++		 * Additional reference to keep io around after
++		 * calling fuse_aio_complete()
++		 */
++		kref_get(&io->refcnt);
+ 		io->done = &wait;
++	}
+ 
+ 	if (iov_iter_rw(iter) == WRITE) {
+ 		ret = fuse_direct_io(io, iter, &pos, FUSE_DIO_WRITE);
+@@ -2900,14 +2915,14 @@ fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t offset)
+ 		fuse_aio_complete(io, ret < 0 ? ret : 0, -1);
+ 
+ 		/* we have a non-extending, async request, so return */
+-		if (!is_sync_kiocb(iocb))
++		if (!is_sync)
+ 			return -EIOCBQUEUED;
+ 
+ 		wait_for_completion(&wait);
+ 		ret = fuse_get_res_by_io(io);
+ 	}
+ 
+-	kfree(io);
++	kref_put(&io->refcnt, fuse_io_release);
+ 
+ 	if (iov_iter_rw(iter) == WRITE) {
+ 		if (ret > 0)
+diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
+index ce394b5fe6b4..eddbe02c4028 100644
+--- a/fs/fuse/fuse_i.h
++++ b/fs/fuse/fuse_i.h
+@@ -22,6 +22,7 @@
+ #include <linux/rbtree.h>
+ #include <linux/poll.h>
+ #include <linux/workqueue.h>
++#include <linux/kref.h>
+ 
+ /** Max number of pages that can be used in a single read request */
+ #define FUSE_MAX_PAGES_PER_REQ 32
+@@ -243,6 +244,7 @@ struct fuse_args {
+ 
+ /** The request IO state (for asynchronous processing) */
+ struct fuse_io_priv {
++	struct kref refcnt;
+ 	int async;
+ 	spinlock_t lock;
+ 	unsigned reqs;
+@@ -256,6 +258,13 @@ struct fuse_io_priv {
+ 	struct completion *done;
+ };
+ 
++#define FUSE_IO_PRIV_SYNC(f) \
++{					\
++	.refcnt = { ATOMIC_INIT(1) },	\
++	.async = 0,			\
++	.file = f,			\
++}
++
+ /**
+  * Request flags
+  *
+diff --git a/fs/hpfs/super.c b/fs/hpfs/super.c
+index 458cf463047b..82067ca22f2b 100644
+--- a/fs/hpfs/super.c
++++ b/fs/hpfs/super.c
+@@ -15,6 +15,7 @@
+ #include <linux/sched.h>
+ #include <linux/bitmap.h>
+ #include <linux/slab.h>
++#include <linux/seq_file.h>
+ 
+ /* Mark the filesystem dirty, so that chkdsk checks it when os/2 booted */
+ 
+@@ -453,10 +454,6 @@ static int hpfs_remount_fs(struct super_block *s, int *flags, char *data)
+ 	int lowercase, eas, chk, errs, chkdsk, timeshift;
+ 	int o;
+ 	struct hpfs_sb_info *sbi = hpfs_sb(s);
+-	char *new_opts = kstrdup(data, GFP_KERNEL);
+-
+-	if (!new_opts)
+-		return -ENOMEM;
+ 
+ 	sync_filesystem(s);
+ 
+@@ -493,17 +490,44 @@ static int hpfs_remount_fs(struct super_block *s, int *flags, char *data)
+ 
+ 	if (!(*flags & MS_RDONLY)) mark_dirty(s, 1);
+ 
+-	replace_mount_options(s, new_opts);
+-
+ 	hpfs_unlock(s);
+ 	return 0;
+ 
+ out_err:
+ 	hpfs_unlock(s);
+-	kfree(new_opts);
+ 	return -EINVAL;
+ }
+ 
++static int hpfs_show_options(struct seq_file *seq, struct dentry *root)
++{
++	struct hpfs_sb_info *sbi = hpfs_sb(root->d_sb);
++
++	seq_printf(seq, ",uid=%u", from_kuid_munged(&init_user_ns, sbi->sb_uid));
++	seq_printf(seq, ",gid=%u", from_kgid_munged(&init_user_ns, sbi->sb_gid));
++	seq_printf(seq, ",umask=%03o", (~sbi->sb_mode & 0777));
++	if (sbi->sb_lowercase)
++		seq_printf(seq, ",case=lower");
++	if (!sbi->sb_chk)
++		seq_printf(seq, ",check=none");
++	if (sbi->sb_chk == 2)
++		seq_printf(seq, ",check=strict");
++	if (!sbi->sb_err)
++		seq_printf(seq, ",errors=continue");
++	if (sbi->sb_err == 2)
++		seq_printf(seq, ",errors=panic");
++	if (!sbi->sb_chkdsk)
++		seq_printf(seq, ",chkdsk=no");
++	if (sbi->sb_chkdsk == 2)
++		seq_printf(seq, ",chkdsk=always");
++	if (!sbi->sb_eas)
++		seq_printf(seq, ",eas=no");
++	if (sbi->sb_eas == 1)
++		seq_printf(seq, ",eas=ro");
++	if (sbi->sb_timeshift)
++		seq_printf(seq, ",timeshift=%d", sbi->sb_timeshift);
++	return 0;
++}
++
+ /* Super operations */
+ 
+ static const struct super_operations hpfs_sops =
+@@ -514,7 +538,7 @@ static const struct super_operations hpfs_sops =
+ 	.put_super	= hpfs_put_super,
+ 	.statfs		= hpfs_statfs,
+ 	.remount_fs	= hpfs_remount_fs,
+-	.show_options	= generic_show_options,
++	.show_options	= hpfs_show_options,
+ };
+ 
+ static int hpfs_fill_super(struct super_block *s, void *options, int silent)
+@@ -537,8 +561,6 @@ static int hpfs_fill_super(struct super_block *s, void *options, int silent)
+ 
+ 	int o;
+ 
+-	save_mount_options(s, options);
+-
+ 	sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
+ 	if (!sbi) {
+ 		return -ENOMEM;
+diff --git a/fs/isofs/rock.c b/fs/isofs/rock.c
+index 5384ceb35b1c..98b3eb7d8eaf 100644
+--- a/fs/isofs/rock.c
++++ b/fs/isofs/rock.c
+@@ -203,6 +203,8 @@ int get_rock_ridge_filename(struct iso_directory_record *de,
+ 	int retnamlen = 0;
+ 	int truncate = 0;
+ 	int ret = 0;
++	char *p;
++	int len;
+ 
+ 	if (!ISOFS_SB(inode->i_sb)->s_rock)
+ 		return 0;
+@@ -267,12 +269,17 @@ repeat:
+ 					rr->u.NM.flags);
+ 				break;
+ 			}
+-			if ((strlen(retname) + rr->len - 5) >= 254) {
++			len = rr->len - 5;
++			if (retnamlen + len >= 254) {
+ 				truncate = 1;
+ 				break;
+ 			}
+-			strncat(retname, rr->u.NM.name, rr->len - 5);
+-			retnamlen += rr->len - 5;
++			p = memchr(rr->u.NM.name, '\0', len);
++			if (unlikely(p))
++				len = p - rr->u.NM.name;
++			memcpy(retname + retnamlen, rr->u.NM.name, len);
++			retnamlen += len;
++			retname[retnamlen] = '\0';
+ 			break;
+ 		case SIG('R', 'E'):
+ 			kfree(rs.buffer);
+diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
+index 81e622681c82..624a57a9c4aa 100644
+--- a/fs/jbd2/journal.c
++++ b/fs/jbd2/journal.c
+@@ -1408,11 +1408,12 @@ out:
+ /**
+  * jbd2_mark_journal_empty() - Mark on disk journal as empty.
+  * @journal: The journal to update.
++ * @write_op: With which operation should we write the journal sb
+  *
+  * Update a journal's dynamic superblock fields to show that journal is empty.
+  * Write updated superblock to disk waiting for IO to complete.
+  */
+-static void jbd2_mark_journal_empty(journal_t *journal)
++static void jbd2_mark_journal_empty(journal_t *journal, int write_op)
+ {
+ 	journal_superblock_t *sb = journal->j_superblock;
+ 
+@@ -1430,7 +1431,7 @@ static void jbd2_mark_journal_empty(journal_t *journal)
+ 	sb->s_start    = cpu_to_be32(0);
+ 	read_unlock(&journal->j_state_lock);
+ 
+-	jbd2_write_superblock(journal, WRITE_FUA);
++	jbd2_write_superblock(journal, write_op);
+ 
+ 	/* Log is no longer empty */
+ 	write_lock(&journal->j_state_lock);
+@@ -1716,7 +1717,13 @@ int jbd2_journal_destroy(journal_t *journal)
+ 	if (journal->j_sb_buffer) {
+ 		if (!is_journal_aborted(journal)) {
+ 			mutex_lock(&journal->j_checkpoint_mutex);
+-			jbd2_mark_journal_empty(journal);
++
++			write_lock(&journal->j_state_lock);
++			journal->j_tail_sequence =
++				++journal->j_transaction_sequence;
++			write_unlock(&journal->j_state_lock);
++
++			jbd2_mark_journal_empty(journal, WRITE_FLUSH_FUA);
+ 			mutex_unlock(&journal->j_checkpoint_mutex);
+ 		} else
+ 			err = -EIO;
+@@ -1975,7 +1982,7 @@ int jbd2_journal_flush(journal_t *journal)
+ 	 * the magic code for a fully-recovered superblock.  Any future
+ 	 * commits of data to the journal will restore the current
+ 	 * s_start value. */
+-	jbd2_mark_journal_empty(journal);
++	jbd2_mark_journal_empty(journal, WRITE_FUA);
+ 	mutex_unlock(&journal->j_checkpoint_mutex);
+ 	write_lock(&journal->j_state_lock);
+ 	J_ASSERT(!journal->j_running_transaction);
+@@ -2021,7 +2028,7 @@ int jbd2_journal_wipe(journal_t *journal, int write)
+ 	if (write) {
+ 		/* Lock to make assertions happy... */
+ 		mutex_lock(&journal->j_checkpoint_mutex);
+-		jbd2_mark_journal_empty(journal);
++		jbd2_mark_journal_empty(journal, WRITE_FUA);
+ 		mutex_unlock(&journal->j_checkpoint_mutex);
+ 	}
+ 
+diff --git a/fs/namei.c b/fs/namei.c
+index 9c590e0f66e9..7824bfb89ada 100644
+--- a/fs/namei.c
++++ b/fs/namei.c
+@@ -2968,22 +2968,10 @@ no_open:
+ 		dentry = lookup_real(dir, dentry, nd->flags);
+ 		if (IS_ERR(dentry))
+ 			return PTR_ERR(dentry);
+-
+-		if (create_error) {
+-			int open_flag = op->open_flag;
+-
+-			error = create_error;
+-			if ((open_flag & O_EXCL)) {
+-				if (!dentry->d_inode)
+-					goto out;
+-			} else if (!dentry->d_inode) {
+-				goto out;
+-			} else if ((open_flag & O_TRUNC) &&
+-				   d_is_reg(dentry)) {
+-				goto out;
+-			}
+-			/* will fail later, go on to get the right error */
+-		}
++	}
++	if (create_error && !dentry->d_inode) {
++		error = create_error;
++		goto out;
+ 	}
+ looked_up:
+ 	path->dentry = dentry;
+@@ -4258,7 +4246,11 @@ int vfs_rename(struct inode *old_dir, struct dentry *old_dentry,
+ 	bool new_is_dir = false;
+ 	unsigned max_links = new_dir->i_sb->s_max_links;
+ 
+-	if (source == target)
++	/*
++	 * Check source == target.
++	 * On overlayfs need to look at underlying inodes.
++	 */
++	if (vfs_select_inode(old_dentry, 0) == vfs_select_inode(new_dentry, 0))
+ 		return 0;
+ 
+ 	error = may_delete(old_dir, old_dentry, is_dir);
+diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
+index 9cce67043f92..7ded17764754 100644
+--- a/fs/nfs/dir.c
++++ b/fs/nfs/dir.c
+@@ -377,7 +377,7 @@ int nfs_readdir_xdr_filler(struct page **pages, nfs_readdir_descriptor_t *desc,
+  again:
+ 	timestamp = jiffies;
+ 	gencount = nfs_inc_attr_generation_counter();
+-	error = NFS_PROTO(inode)->readdir(file->f_path.dentry, cred, entry->cookie, pages,
++	error = NFS_PROTO(inode)->readdir(file_dentry(file), cred, entry->cookie, pages,
+ 					  NFS_SERVER(inode)->dtsize, desc->plus);
+ 	if (error < 0) {
+ 		/* We requested READDIRPLUS, but the server doesn't grok it */
+@@ -560,7 +560,7 @@ int nfs_readdir_page_filler(nfs_readdir_descriptor_t *desc, struct nfs_entry *en
+ 		count++;
+ 
+ 		if (desc->plus != 0)
+-			nfs_prime_dcache(desc->file->f_path.dentry, entry);
++			nfs_prime_dcache(file_dentry(desc->file), entry);
+ 
+ 		status = nfs_readdir_add_to_array(entry, page);
+ 		if (status != 0)
+@@ -864,7 +864,7 @@ static bool nfs_dir_mapping_need_revalidate(struct inode *dir)
+  */
+ static int nfs_readdir(struct file *file, struct dir_context *ctx)
+ {
+-	struct dentry	*dentry = file->f_path.dentry;
++	struct dentry	*dentry = file_dentry(file);
+ 	struct inode	*inode = d_inode(dentry);
+ 	nfs_readdir_descriptor_t my_desc,
+ 			*desc = &my_desc;
+diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
+index 86faecf8f328..847b678af4f0 100644
+--- a/fs/nfs/inode.c
++++ b/fs/nfs/inode.c
+@@ -940,7 +940,7 @@ int nfs_open(struct inode *inode, struct file *filp)
+ {
+ 	struct nfs_open_context *ctx;
+ 
+-	ctx = alloc_nfs_open_context(filp->f_path.dentry, filp->f_mode);
++	ctx = alloc_nfs_open_context(file_dentry(filp), filp->f_mode);
+ 	if (IS_ERR(ctx))
+ 		return PTR_ERR(ctx);
+ 	nfs_file_set_open_context(filp, ctx);
+diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c
+index 57ca1c8039c1..2a9ff14cfb3b 100644
+--- a/fs/nfs/nfs4file.c
++++ b/fs/nfs/nfs4file.c
+@@ -26,7 +26,7 @@ static int
+ nfs4_file_open(struct inode *inode, struct file *filp)
+ {
+ 	struct nfs_open_context *ctx;
+-	struct dentry *dentry = filp->f_path.dentry;
++	struct dentry *dentry = file_dentry(filp);
+ 	struct dentry *parent = NULL;
+ 	struct inode *dir;
+ 	unsigned openflags = filp->f_flags;
+@@ -57,7 +57,7 @@ nfs4_file_open(struct inode *inode, struct file *filp)
+ 	parent = dget_parent(dentry);
+ 	dir = d_inode(parent);
+ 
+-	ctx = alloc_nfs_open_context(filp->f_path.dentry, filp->f_mode);
++	ctx = alloc_nfs_open_context(file_dentry(filp), filp->f_mode);
+ 	err = PTR_ERR(ctx);
+ 	if (IS_ERR(ctx))
+ 		goto out;
+diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
+index 4cba7865f496..f8082c7cde8b 100644
+--- a/fs/nfsd/nfs4proc.c
++++ b/fs/nfsd/nfs4proc.c
+@@ -878,6 +878,7 @@ nfsd4_secinfo(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+ 				    &exp, &dentry);
+ 	if (err)
+ 		return err;
++	fh_unlock(&cstate->current_fh);
+ 	if (d_really_is_negative(dentry)) {
+ 		exp_put(exp);
+ 		err = nfserr_noent;
+diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
+index d6ef0955a979..1600ec470ce7 100644
+--- a/fs/nfsd/nfs4xdr.c
++++ b/fs/nfsd/nfs4xdr.c
+@@ -1072,8 +1072,9 @@ nfsd4_decode_rename(struct nfsd4_compoundargs *argp, struct nfsd4_rename *rename
+ 
+ 	READ_BUF(4);
+ 	rename->rn_snamelen = be32_to_cpup(p++);
+-	READ_BUF(rename->rn_snamelen + 4);
++	READ_BUF(rename->rn_snamelen);
+ 	SAVEMEM(rename->rn_sname, rename->rn_snamelen);
++	READ_BUF(4);
+ 	rename->rn_tnamelen = be32_to_cpup(p++);
+ 	READ_BUF(rename->rn_tnamelen);
+ 	SAVEMEM(rename->rn_tname, rename->rn_tnamelen);
+@@ -1155,13 +1156,14 @@ nfsd4_decode_setclientid(struct nfsd4_compoundargs *argp, struct nfsd4_setclient
+ 	READ_BUF(8);
+ 	setclientid->se_callback_prog = be32_to_cpup(p++);
+ 	setclientid->se_callback_netid_len = be32_to_cpup(p++);
+-
+-	READ_BUF(setclientid->se_callback_netid_len + 4);
++	READ_BUF(setclientid->se_callback_netid_len);
+ 	SAVEMEM(setclientid->se_callback_netid_val, setclientid->se_callback_netid_len);
++	READ_BUF(4);
+ 	setclientid->se_callback_addr_len = be32_to_cpup(p++);
+ 
+-	READ_BUF(setclientid->se_callback_addr_len + 4);
++	READ_BUF(setclientid->se_callback_addr_len);
+ 	SAVEMEM(setclientid->se_callback_addr_val, setclientid->se_callback_addr_len);
++	READ_BUF(4);
+ 	setclientid->se_callback_ident = be32_to_cpup(p++);
+ 
+ 	DECODE_TAIL;
+@@ -1835,8 +1837,9 @@ nfsd4_decode_compound(struct nfsd4_compoundargs *argp)
+ 
+ 	READ_BUF(4);
+ 	argp->taglen = be32_to_cpup(p++);
+-	READ_BUF(argp->taglen + 8);
++	READ_BUF(argp->taglen);
+ 	SAVEMEM(argp->tag, argp->taglen);
++	READ_BUF(8);
+ 	argp->minorversion = be32_to_cpup(p++);
+ 	argp->opcnt = be32_to_cpup(p++);
+ 	max_reply += 4 + (XDR_QUADLEN(argp->taglen) << 2);
+diff --git a/fs/ocfs2/acl.c b/fs/ocfs2/acl.c
+index 0cdf497c91ef..2162434728c0 100644
+--- a/fs/ocfs2/acl.c
++++ b/fs/ocfs2/acl.c
+@@ -322,3 +322,90 @@ struct posix_acl *ocfs2_iop_get_acl(struct inode *inode, int type)
+ 	brelse(di_bh);
+ 	return acl;
+ }
++
++int ocfs2_acl_chmod(struct inode *inode, struct buffer_head *bh)
++{
++	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
++	struct posix_acl *acl;
++	int ret;
++
++	if (S_ISLNK(inode->i_mode))
++		return -EOPNOTSUPP;
++
++	if (!(osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL))
++		return 0;
++
++	acl = ocfs2_get_acl_nolock(inode, ACL_TYPE_ACCESS, bh);
++	if (IS_ERR(acl) || !acl)
++		return PTR_ERR(acl);
++	ret = __posix_acl_chmod(&acl, GFP_KERNEL, inode->i_mode);
++	if (ret)
++		return ret;
++	ret = ocfs2_set_acl(NULL, inode, NULL, ACL_TYPE_ACCESS,
++			    acl, NULL, NULL);
++	posix_acl_release(acl);
++	return ret;
++}
++
++/*
++ * Initialize the ACLs of a new inode. If parent directory has default ACL,
++ * then clone to new inode. Called from ocfs2_mknod.
++ */
++int ocfs2_init_acl(handle_t *handle,
++		   struct inode *inode,
++		   struct inode *dir,
++		   struct buffer_head *di_bh,
++		   struct buffer_head *dir_bh,
++		   struct ocfs2_alloc_context *meta_ac,
++		   struct ocfs2_alloc_context *data_ac)
++{
++	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
++	struct posix_acl *acl = NULL;
++	int ret = 0, ret2;
++	umode_t mode;
++
++	if (!S_ISLNK(inode->i_mode)) {
++		if (osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL) {
++			acl = ocfs2_get_acl_nolock(dir, ACL_TYPE_DEFAULT,
++						   dir_bh);
++			if (IS_ERR(acl))
++				return PTR_ERR(acl);
++		}
++		if (!acl) {
++			mode = inode->i_mode & ~current_umask();
++			ret = ocfs2_acl_set_mode(inode, di_bh, handle, mode);
++			if (ret) {
++				mlog_errno(ret);
++				goto cleanup;
++			}
++		}
++	}
++	if ((osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL) && acl) {
++		if (S_ISDIR(inode->i_mode)) {
++			ret = ocfs2_set_acl(handle, inode, di_bh,
++					    ACL_TYPE_DEFAULT, acl,
++					    meta_ac, data_ac);
++			if (ret)
++				goto cleanup;
++		}
++		mode = inode->i_mode;
++		ret = __posix_acl_create(&acl, GFP_NOFS, &mode);
++		if (ret < 0)
++			return ret;
++
++		ret2 = ocfs2_acl_set_mode(inode, di_bh, handle, mode);
++		if (ret2) {
++			mlog_errno(ret2);
++			ret = ret2;
++			goto cleanup;
++		}
++		if (ret > 0) {
++			ret = ocfs2_set_acl(handle, inode,
++					    di_bh, ACL_TYPE_ACCESS,
++					    acl, meta_ac, data_ac);
++		}
++	}
++cleanup:
++	posix_acl_release(acl);
++	return ret;
++}
+diff --git a/fs/ocfs2/acl.h b/fs/ocfs2/acl.h
+index 3fce68d08625..2783a75b3999 100644
+--- a/fs/ocfs2/acl.h
++++ b/fs/ocfs2/acl.h
+@@ -35,5 +35,10 @@ int ocfs2_set_acl(handle_t *handle,
+ 			 struct posix_acl *acl,
+ 			 struct ocfs2_alloc_context *meta_ac,
+ 			 struct ocfs2_alloc_context *data_ac);
++extern int ocfs2_acl_chmod(struct inode *, struct buffer_head *);
++extern int ocfs2_init_acl(handle_t *, struct inode *, struct inode *,
++			  struct buffer_head *, struct buffer_head *,
++			  struct ocfs2_alloc_context *,
++			  struct ocfs2_alloc_context *);
+ 
+ #endif /* OCFS2_ACL_H */
+diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
+index a76b9ea7722e..a2370e2c7295 100644
+--- a/fs/ocfs2/cluster/heartbeat.c
++++ b/fs/ocfs2/cluster/heartbeat.c
+@@ -1445,8 +1445,8 @@ static void o2hb_region_release(struct config_item *item)
+ 	debugfs_remove(reg->hr_debug_dir);
+ 	kfree(reg->hr_db_livenodes);
+ 	kfree(reg->hr_db_regnum);
+-	kfree(reg->hr_debug_elapsed_time);
+-	kfree(reg->hr_debug_pinned);
++	kfree(reg->hr_db_elapsed_time);
++	kfree(reg->hr_db_pinned);
+ 
+ 	spin_lock(&o2hb_live_lock);
+ 	list_del(&reg->hr_all_item);
+diff --git a/fs/ocfs2/dlm/dlmconvert.c b/fs/ocfs2/dlm/dlmconvert.c
+index e36d63ff1783..f90931335c6b 100644
+--- a/fs/ocfs2/dlm/dlmconvert.c
++++ b/fs/ocfs2/dlm/dlmconvert.c
+@@ -262,6 +262,7 @@ enum dlm_status dlmconvert_remote(struct dlm_ctxt *dlm,
+ 				  struct dlm_lock *lock, int flags, int type)
+ {
+ 	enum dlm_status status;
++	u8 old_owner = res->owner;
+ 
+ 	mlog(0, "type=%d, convert_type=%d, busy=%d\n", lock->ml.type,
+ 	     lock->ml.convert_type, res->state & DLM_LOCK_RES_IN_PROGRESS);
+@@ -287,6 +288,19 @@ enum dlm_status dlmconvert_remote(struct dlm_ctxt *dlm,
+ 		status = DLM_DENIED;
+ 		goto bail;
+ 	}
++
++	if (lock->ml.type == type && lock->ml.convert_type == LKM_IVMODE) {
++		mlog(0, "last convert request returned DLM_RECOVERING, but "
++		     "owner has already queued and sent ast to me. res %.*s, "
++		     "(cookie=%u:%llu, type=%d, conv=%d)\n",
++		     res->lockname.len, res->lockname.name,
++		     dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
++		     dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)),
++		     lock->ml.type, lock->ml.convert_type);
++		status = DLM_NORMAL;
++		goto bail;
++	}
++
+ 	res->state |= DLM_LOCK_RES_IN_PROGRESS;
+ 	/* move lock to local convert queue */
+ 	/* do not alter lock refcount.  switching lists. */
+@@ -316,11 +330,19 @@ enum dlm_status dlmconvert_remote(struct dlm_ctxt *dlm,
+ 	spin_lock(&res->spinlock);
+ 	res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
+ 	lock->convert_pending = 0;
+-	/* if it failed, move it back to granted queue */
++	/* if it failed, move it back to granted queue.
++	 * if master returns DLM_NORMAL and then down before sending ast,
++	 * it may have already been moved to granted queue, reset to
++	 * DLM_RECOVERING and retry convert */
+ 	if (status != DLM_NORMAL) {
+ 		if (status != DLM_NOTQUEUED)
+ 			dlm_error(status);
+ 		dlm_revert_pending_convert(res, lock);
++	} else if ((res->state & DLM_LOCK_RES_RECOVERING) ||
++			(old_owner != res->owner)) {
++		mlog(0, "res %.*s is in recovering or has been recovered.\n",
++				res->lockname.len, res->lockname.name);
++		status = DLM_RECOVERING;
+ 	}
+ bail:
+ 	spin_unlock(&res->spinlock);
+diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
+index b94a425f0175..23d0ab881f6e 100644
+--- a/fs/ocfs2/dlm/dlmrecovery.c
++++ b/fs/ocfs2/dlm/dlmrecovery.c
+@@ -2071,7 +2071,6 @@ void dlm_move_lockres_to_recovery_list(struct dlm_ctxt *dlm,
+ 			dlm_lock_get(lock);
+ 			if (lock->convert_pending) {
+ 				/* move converting lock back to granted */
+-				BUG_ON(i != DLM_CONVERTING_LIST);
+ 				mlog(0, "node died with convert pending "
+ 				     "on %.*s. move back to granted list.\n",
+ 				     res->lockname.len, res->lockname.name);
+diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
+index 7cb38fdca229..3d60fda1cb09 100644
+--- a/fs/ocfs2/file.c
++++ b/fs/ocfs2/file.c
+@@ -1268,20 +1268,20 @@ bail_unlock_rw:
+ 	if (size_change)
+ 		ocfs2_rw_unlock(inode, 1);
+ bail:
+-	brelse(bh);
+ 
+ 	/* Release quota pointers in case we acquired them */
+ 	for (qtype = 0; qtype < OCFS2_MAXQUOTAS; qtype++)
+ 		dqput(transfer_to[qtype]);
+ 
+ 	if (!status && attr->ia_valid & ATTR_MODE) {
+-		status = posix_acl_chmod(inode, inode->i_mode);
++		status = ocfs2_acl_chmod(inode, bh);
+ 		if (status < 0)
+ 			mlog_errno(status);
+ 	}
+ 	if (inode_locked)
+ 		ocfs2_inode_unlock(inode, 1);
+ 
++	brelse(bh);
+ 	return status;
+ }
+ 
+diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
+index 6b3e87189a64..a8f1225e6d9b 100644
+--- a/fs/ocfs2/namei.c
++++ b/fs/ocfs2/namei.c
+@@ -259,7 +259,6 @@ static int ocfs2_mknod(struct inode *dir,
+ 	struct ocfs2_dir_lookup_result lookup = { NULL, };
+ 	sigset_t oldset;
+ 	int did_block_signals = 0;
+-	struct posix_acl *default_acl = NULL, *acl = NULL;
+ 	struct ocfs2_dentry_lock *dl = NULL;
+ 
+ 	trace_ocfs2_mknod(dir, dentry, dentry->d_name.len, dentry->d_name.name,
+@@ -367,12 +366,6 @@ static int ocfs2_mknod(struct inode *dir,
+ 		goto leave;
+ 	}
+ 
+-	status = posix_acl_create(dir, &inode->i_mode, &default_acl, &acl);
+-	if (status) {
+-		mlog_errno(status);
+-		goto leave;
+-	}
+-
+ 	handle = ocfs2_start_trans(osb, ocfs2_mknod_credits(osb->sb,
+ 							    S_ISDIR(mode),
+ 							    xattr_credits));
+@@ -421,16 +414,8 @@ static int ocfs2_mknod(struct inode *dir,
+ 		inc_nlink(dir);
+ 	}
+ 
+-	if (default_acl) {
+-		status = ocfs2_set_acl(handle, inode, new_fe_bh,
+-				       ACL_TYPE_DEFAULT, default_acl,
+-				       meta_ac, data_ac);
+-	}
+-	if (!status && acl) {
+-		status = ocfs2_set_acl(handle, inode, new_fe_bh,
+-				       ACL_TYPE_ACCESS, acl,
+-				       meta_ac, data_ac);
+-	}
++	status = ocfs2_init_acl(handle, inode, dir, new_fe_bh, parent_fe_bh,
++			 meta_ac, data_ac);
+ 
+ 	if (status < 0) {
+ 		mlog_errno(status);
+@@ -472,10 +457,6 @@ static int ocfs2_mknod(struct inode *dir,
+ 	d_instantiate(dentry, inode);
+ 	status = 0;
+ leave:
+-	if (default_acl)
+-		posix_acl_release(default_acl);
+-	if (acl)
+-		posix_acl_release(acl);
+ 	if (status < 0 && did_quota_inode)
+ 		dquot_free_inode(inode);
+ 	if (handle)
+diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
+index 3eff031aaf26..9919964c5b3b 100644
+--- a/fs/ocfs2/refcounttree.c
++++ b/fs/ocfs2/refcounttree.c
+@@ -4248,20 +4248,12 @@ static int ocfs2_reflink(struct dentry *old_dentry, struct inode *dir,
+ 	struct inode *inode = d_inode(old_dentry);
+ 	struct buffer_head *old_bh = NULL;
+ 	struct inode *new_orphan_inode = NULL;
+-	struct posix_acl *default_acl, *acl;
+-	umode_t mode;
+ 
+ 	if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb)))
+ 		return -EOPNOTSUPP;
+ 
+-	mode = inode->i_mode;
+-	error = posix_acl_create(dir, &mode, &default_acl, &acl);
+-	if (error) {
+-		mlog_errno(error);
+-		return error;
+-	}
+ 
+-	error = ocfs2_create_inode_in_orphan(dir, mode,
++	error = ocfs2_create_inode_in_orphan(dir, inode->i_mode,
+ 					     &new_orphan_inode);
+ 	if (error) {
+ 		mlog_errno(error);
+@@ -4300,16 +4292,11 @@ static int ocfs2_reflink(struct dentry *old_dentry, struct inode *dir,
+ 	/* If the security isn't preserved, we need to re-initialize them. */
+ 	if (!preserve) {
+ 		error = ocfs2_init_security_and_acl(dir, new_orphan_inode,
+-						    &new_dentry->d_name,
+-						    default_acl, acl);
++						    &new_dentry->d_name);
+ 		if (error)
+ 			mlog_errno(error);
+ 	}
+ out:
+-	if (default_acl)
+-		posix_acl_release(default_acl);
+-	if (acl)
+-		posix_acl_release(acl);
+ 	if (!error) {
+ 		error = ocfs2_mv_orphaned_inode_to_new(dir, new_orphan_inode,
+ 						       new_dentry);
+diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
+index 7d3d979f57d9..f19b7381a998 100644
+--- a/fs/ocfs2/xattr.c
++++ b/fs/ocfs2/xattr.c
+@@ -7216,12 +7216,10 @@ out:
+  */
+ int ocfs2_init_security_and_acl(struct inode *dir,
+ 				struct inode *inode,
+-				const struct qstr *qstr,
+-				struct posix_acl *default_acl,
+-				struct posix_acl *acl)
++				const struct qstr *qstr)
+ {
+-	struct buffer_head *dir_bh = NULL;
+ 	int ret = 0;
++	struct buffer_head *dir_bh = NULL;
+ 
+ 	ret = ocfs2_init_security_get(inode, dir, qstr, NULL);
+ 	if (ret) {
+@@ -7234,11 +7232,9 @@ int ocfs2_init_security_and_acl(struct inode *dir,
+ 		mlog_errno(ret);
+ 		goto leave;
+ 	}
+-
+-	if (!ret && default_acl)
+-		ret = ocfs2_iop_set_acl(inode, default_acl, ACL_TYPE_DEFAULT);
+-	if (!ret && acl)
+-		ret = ocfs2_iop_set_acl(inode, acl, ACL_TYPE_ACCESS);
++	ret = ocfs2_init_acl(NULL, inode, dir, NULL, dir_bh, NULL, NULL);
++	if (ret)
++		mlog_errno(ret);
+ 
+ 	ocfs2_inode_unlock(dir, 0);
+ 	brelse(dir_bh);
+diff --git a/fs/ocfs2/xattr.h b/fs/ocfs2/xattr.h
+index f10d5b93c366..1633cc15ea1f 100644
+--- a/fs/ocfs2/xattr.h
++++ b/fs/ocfs2/xattr.h
+@@ -94,7 +94,5 @@ int ocfs2_reflink_xattrs(struct inode *old_inode,
+ 			 bool preserve_security);
+ int ocfs2_init_security_and_acl(struct inode *dir,
+ 				struct inode *inode,
+-				const struct qstr *qstr,
+-				struct posix_acl *default_acl,
+-				struct posix_acl *acl);
++				const struct qstr *qstr);
+ #endif /* OCFS2_XATTR_H */
+diff --git a/fs/open.c b/fs/open.c
+index 55bdc75e2172..081d3d6df74b 100644
+--- a/fs/open.c
++++ b/fs/open.c
+@@ -840,16 +840,12 @@ EXPORT_SYMBOL(file_path);
+ int vfs_open(const struct path *path, struct file *file,
+ 	     const struct cred *cred)
+ {
+-	struct dentry *dentry = path->dentry;
+-	struct inode *inode = dentry->d_inode;
++	struct inode *inode = vfs_select_inode(path->dentry, file->f_flags);
+ 
+-	file->f_path = *path;
+-	if (dentry->d_flags & DCACHE_OP_SELECT_INODE) {
+-		inode = dentry->d_op->d_select_inode(dentry, file->f_flags);
+-		if (IS_ERR(inode))
+-			return PTR_ERR(inode);
+-	}
++	if (IS_ERR(inode))
++		return PTR_ERR(inode);
+ 
++	file->f_path = *path;
+ 	return do_dentry_open(file, inode, NULL, cred);
+ }
+ 
+@@ -992,14 +988,12 @@ struct file *filp_open(const char *filename, int flags, umode_t mode)
+ EXPORT_SYMBOL(filp_open);
+ 
+ struct file *file_open_root(struct dentry *dentry, struct vfsmount *mnt,
+-			    const char *filename, int flags)
++			    const char *filename, int flags, umode_t mode)
+ {
+ 	struct open_flags op;
+-	int err = build_open_flags(flags, 0, &op);
++	int err = build_open_flags(flags, mode, &op);
+ 	if (err)
+ 		return ERR_PTR(err);
+-	if (flags & O_CREAT)
+-		return ERR_PTR(-EINVAL);
+ 	return do_file_open_root(dentry, mnt, filename, &op);
+ }
+ EXPORT_SYMBOL(file_open_root);
+diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
+index 619ad4b016d2..4399ea804447 100644
+--- a/fs/overlayfs/super.c
++++ b/fs/overlayfs/super.c
+@@ -295,6 +295,37 @@ static void ovl_dentry_release(struct dentry *dentry)
+ 	}
+ }
+ 
++static struct dentry *ovl_d_real(struct dentry *dentry, struct inode *inode)
++{
++	struct dentry *real;
++
++	if (d_is_dir(dentry)) {
++		if (!inode || inode == d_inode(dentry))
++			return dentry;
++		goto bug;
++	}
++
++	real = ovl_dentry_upper(dentry);
++	if (real && (!inode || inode == d_inode(real)))
++		return real;
++
++	real = ovl_dentry_lower(dentry);
++	if (!real)
++		goto bug;
++
++	if (!inode || inode == d_inode(real))
++		return real;
++
++	/* Handle recursion */
++	if (real->d_flags & DCACHE_OP_REAL)
++		return real->d_op->d_real(real, inode);
++
++bug:
++	WARN(1, "ovl_d_real(%pd4, %s:%lu\n): real dentry not found\n", dentry,
++	     inode ? inode->i_sb->s_id : "NULL", inode ? inode->i_ino : 0);
++	return dentry;
++}
++
+ static int ovl_dentry_revalidate(struct dentry *dentry, unsigned int flags)
+ {
+ 	struct ovl_entry *oe = dentry->d_fsdata;
+@@ -339,11 +370,13 @@ static int ovl_dentry_weak_revalidate(struct dentry *dentry, unsigned int flags)
+ static const struct dentry_operations ovl_dentry_operations = {
+ 	.d_release = ovl_dentry_release,
+ 	.d_select_inode = ovl_d_select_inode,
++	.d_real = ovl_d_real,
+ };
+ 
+ static const struct dentry_operations ovl_reval_dentry_operations = {
+ 	.d_release = ovl_dentry_release,
+ 	.d_select_inode = ovl_d_select_inode,
++	.d_real = ovl_d_real,
+ 	.d_revalidate = ovl_dentry_revalidate,
+ 	.d_weak_revalidate = ovl_dentry_weak_revalidate,
+ };
+diff --git a/fs/pnode.c b/fs/pnode.c
+index c524fdddc7fb..99899705b105 100644
+--- a/fs/pnode.c
++++ b/fs/pnode.c
+@@ -198,7 +198,7 @@ static struct mount *next_group(struct mount *m, struct mount *origin)
+ 
+ /* all accesses are serialized by namespace_sem */
+ static struct user_namespace *user_ns;
+-static struct mount *last_dest, *last_source, *dest_master;
++static struct mount *last_dest, *first_source, *last_source, *dest_master;
+ static struct mountpoint *mp;
+ static struct hlist_head *list;
+ 
+@@ -221,20 +221,22 @@ static int propagate_one(struct mount *m)
+ 		type = CL_MAKE_SHARED;
+ 	} else {
+ 		struct mount *n, *p;
++		bool done;
+ 		for (n = m; ; n = p) {
+ 			p = n->mnt_master;
+-			if (p == dest_master || IS_MNT_MARKED(p)) {
+-				while (last_dest->mnt_master != p) {
+-					last_source = last_source->mnt_master;
+-					last_dest = last_source->mnt_parent;
+-				}
+-				if (!peers(n, last_dest)) {
+-					last_source = last_source->mnt_master;
+-					last_dest = last_source->mnt_parent;
+-				}
++			if (p == dest_master || IS_MNT_MARKED(p))
+ 				break;
+-			}
+ 		}
++		do {
++			struct mount *parent = last_source->mnt_parent;
++			if (last_source == first_source)
++				break;
++			done = parent->mnt_master == p;
++			if (done && peers(n, parent))
++				break;
++			last_source = last_source->mnt_master;
++		} while (!done);
++
+ 		type = CL_SLAVE;
+ 		/* beginning of peer group among the slaves? */
+ 		if (IS_MNT_SHARED(m))
+@@ -286,6 +288,7 @@ int propagate_mnt(struct mount *dest_mnt, struct mountpoint *dest_mp,
+ 	 */
+ 	user_ns = current->nsproxy->mnt_ns->user_ns;
+ 	last_dest = dest_mnt;
++	first_source = source_mnt;
+ 	last_source = source_mnt;
+ 	mp = dest_mp;
+ 	list = tree_list;
+diff --git a/fs/proc/base.c b/fs/proc/base.c
+index 4f764c2ac1a5..45f2162e55b2 100644
+--- a/fs/proc/base.c
++++ b/fs/proc/base.c
+@@ -955,7 +955,8 @@ static ssize_t environ_read(struct file *file, char __user *buf,
+ 	struct mm_struct *mm = file->private_data;
+ 	unsigned long env_start, env_end;
+ 
+-	if (!mm)
++	/* Ensure the process spawned far enough to have an environment. */
++	if (!mm || !mm->env_end)
+ 		return 0;
+ 
+ 	page = (char *)__get_free_page(GFP_TEMPORARY);
+diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
+index fa95ab2d3674..9d2f3e0a6360 100644
+--- a/fs/proc/task_mmu.c
++++ b/fs/proc/task_mmu.c
+@@ -1504,6 +1504,32 @@ static struct page *can_gather_numa_stats(pte_t pte, struct vm_area_struct *vma,
+ 	return page;
+ }
+ 
++#ifdef CONFIG_TRANSPARENT_HUGEPAGE
++static struct page *can_gather_numa_stats_pmd(pmd_t pmd,
++					      struct vm_area_struct *vma,
++					      unsigned long addr)
++{
++	struct page *page;
++	int nid;
++
++	if (!pmd_present(pmd))
++		return NULL;
++
++	page = vm_normal_page_pmd(vma, addr, pmd);
++	if (!page)
++		return NULL;
++
++	if (PageReserved(page))
++		return NULL;
++
++	nid = page_to_nid(page);
++	if (!node_isset(nid, node_states[N_MEMORY]))
++		return NULL;
++
++	return page;
++}
++#endif
++
+ static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
+ 		unsigned long end, struct mm_walk *walk)
+ {
+@@ -1513,14 +1539,14 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
+ 	pte_t *orig_pte;
+ 	pte_t *pte;
+ 
++#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ 	ptl = pmd_trans_huge_lock(pmd, vma);
+ 	if (ptl) {
+-		pte_t huge_pte = *(pte_t *)pmd;
+ 		struct page *page;
+ 
+-		page = can_gather_numa_stats(huge_pte, vma, addr);
++		page = can_gather_numa_stats_pmd(*pmd, vma, addr);
+ 		if (page)
+-			gather_stats(page, md, pte_dirty(huge_pte),
++			gather_stats(page, md, pmd_dirty(*pmd),
+ 				     HPAGE_PMD_SIZE/PAGE_SIZE);
+ 		spin_unlock(ptl);
+ 		return 0;
+@@ -1528,6 +1554,7 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
+ 
+ 	if (pmd_trans_unstable(pmd))
+ 		return 0;
++#endif
+ 	orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
+ 	do {
+ 		struct page *page = can_gather_numa_stats(*pte, vma, addr);
+diff --git a/fs/proc_namespace.c b/fs/proc_namespace.c
+index 2256e7e23e67..3f1190d18991 100644
+--- a/fs/proc_namespace.c
++++ b/fs/proc_namespace.c
+@@ -199,6 +199,8 @@ static int show_vfsstat(struct seq_file *m, struct vfsmount *mnt)
+ 	if (sb->s_op->show_devname) {
+ 		seq_puts(m, "device ");
+ 		err = sb->s_op->show_devname(m, mnt_path.dentry);
++		if (err)
++			goto out;
+ 	} else {
+ 		if (r->mnt_devname) {
+ 			seq_puts(m, "device ");
+diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
+index 3c3b81bb6dfe..850d17fa0aa3 100644
+--- a/fs/quota/dquot.c
++++ b/fs/quota/dquot.c
+@@ -1398,7 +1398,7 @@ static int dquot_active(const struct inode *inode)
+ static int __dquot_initialize(struct inode *inode, int type)
+ {
+ 	int cnt, init_needed = 0;
+-	struct dquot **dquots, *got[MAXQUOTAS];
++	struct dquot **dquots, *got[MAXQUOTAS] = {};
+ 	struct super_block *sb = inode->i_sb;
+ 	qsize_t rsv;
+ 	int ret = 0;
+@@ -1415,7 +1415,6 @@ static int __dquot_initialize(struct inode *inode, int type)
+ 		int rc;
+ 		struct dquot *dquot;
+ 
+-		got[cnt] = NULL;
+ 		if (type != -1 && cnt != type)
+ 			continue;
+ 		/*
+diff --git a/fs/splice.c b/fs/splice.c
+index 82bc0d64fc38..19e0b103d253 100644
+--- a/fs/splice.c
++++ b/fs/splice.c
+@@ -185,6 +185,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
+ 	unsigned int spd_pages = spd->nr_pages;
+ 	int ret, do_wakeup, page_nr;
+ 
++	if (!spd_pages)
++		return 0;
++
+ 	ret = 0;
+ 	do_wakeup = 0;
+ 	page_nr = 0;
+diff --git a/fs/xfs/xfs_attr_list.c b/fs/xfs/xfs_attr_list.c
+index 0ef7c2ed3f8a..4fa14820e2e2 100644
+--- a/fs/xfs/xfs_attr_list.c
++++ b/fs/xfs/xfs_attr_list.c
+@@ -202,8 +202,10 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context)
+ 					sbp->namelen,
+ 					sbp->valuelen,
+ 					&sbp->name[sbp->namelen]);
+-		if (error)
++		if (error) {
++			kmem_free(sbuf);
+ 			return error;
++		}
+ 		if (context->seen_enough)
+ 			break;
+ 		cursor->offset++;
+@@ -454,14 +456,13 @@ xfs_attr3_leaf_list_int(
+ 				args.rmtblkcnt = xfs_attr3_rmt_blocks(
+ 							args.dp->i_mount, valuelen);
+ 				retval = xfs_attr_rmtval_get(&args);
+-				if (retval)
+-					return retval;
+-				retval = context->put_listent(context,
+-						entry->flags,
+-						name_rmt->name,
+-						(int)name_rmt->namelen,
+-						valuelen,
+-						args.value);
++				if (!retval)
++					retval = context->put_listent(context,
++							entry->flags,
++							name_rmt->name,
++							(int)name_rmt->namelen,
++							valuelen,
++							args.value);
+ 				kmem_free(args.value);
+ 			} else {
+ 				retval = context->put_listent(context,
+diff --git a/include/asm-generic/bitops/lock.h b/include/asm-generic/bitops/lock.h
+index c30266e94806..8ef0ccbf8167 100644
+--- a/include/asm-generic/bitops/lock.h
++++ b/include/asm-generic/bitops/lock.h
+@@ -29,16 +29,16 @@ do {					\
+  * @nr: the bit to set
+  * @addr: the address to start counting from
+  *
+- * This operation is like clear_bit_unlock, however it is not atomic.
+- * It does provide release barrier semantics so it can be used to unlock
+- * a bit lock, however it would only be used if no other CPU can modify
+- * any bits in the memory until the lock is released (a good example is
+- * if the bit lock itself protects access to the other bits in the word).
++ * A weaker form of clear_bit_unlock() as used by __bit_lock_unlock(). If all
++ * the bits in the word are protected by this lock some archs can use weaker
++ * ops to safely unlock.
++ *
++ * See for example x86's implementation.
+  */
+ #define __clear_bit_unlock(nr, addr)	\
+ do {					\
+-	smp_mb();			\
+-	__clear_bit(nr, addr);		\
++	smp_mb__before_atomic();	\
++	clear_bit(nr, addr);		\
+ } while (0)
+ 
+ #endif /* _ASM_GENERIC_BITOPS_LOCK_H_ */
+diff --git a/include/asm-generic/futex.h b/include/asm-generic/futex.h
+index e56272c919b5..bf2d34c9d804 100644
+--- a/include/asm-generic/futex.h
++++ b/include/asm-generic/futex.h
+@@ -108,11 +108,15 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
+ 	u32 val;
+ 
+ 	preempt_disable();
+-	if (unlikely(get_user(val, uaddr) != 0))
++	if (unlikely(get_user(val, uaddr) != 0)) {
++		preempt_enable();
+ 		return -EFAULT;
++	}
+ 
+-	if (val == oldval && unlikely(put_user(newval, uaddr) != 0))
++	if (val == oldval && unlikely(put_user(newval, uaddr) != 0)) {
++		preempt_enable();
+ 		return -EFAULT;
++	}
+ 
+ 	*uval = val;
+ 	preempt_enable();
+diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h
+index 39e1cb201b8e..332da3ad8eb5 100644
+--- a/include/asm-generic/qspinlock.h
++++ b/include/asm-generic/qspinlock.h
+@@ -28,7 +28,30 @@
+  */
+ static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
+ {
+-	return atomic_read(&lock->val);
++	/*
++	 * queued_spin_lock_slowpath() can ACQUIRE the lock before
++	 * issuing the unordered store that sets _Q_LOCKED_VAL.
++	 *
++	 * See both smp_cond_acquire() sites for more detail.
++	 *
++	 * This however means that in code like:
++	 *
++	 *   spin_lock(A)		spin_lock(B)
++	 *   spin_unlock_wait(B)	spin_is_locked(A)
++	 *   do_something()		do_something()
++	 *
++	 * Both CPUs can end up running do_something() because the store
++	 * setting _Q_LOCKED_VAL will pass through the loads in
++	 * spin_unlock_wait() and/or spin_is_locked().
++	 *
++	 * Avoid this by issuing a full memory barrier between the spin_lock()
++	 * and the loads in spin_unlock_wait() and spin_is_locked().
++	 *
++	 * Note that regular mutual exclusion doesn't care about this
++	 * delayed store.
++	 */
++	smp_mb();
++	return atomic_read(&lock->val) & _Q_LOCKED_MASK;
+ }
+ 
+ /**
+@@ -108,6 +131,8 @@ static __always_inline void queued_spin_unlock(struct qspinlock *lock)
+  */
+ static inline void queued_spin_unlock_wait(struct qspinlock *lock)
+ {
++	/* See queued_spin_is_locked() */
++	smp_mb();
+ 	while (atomic_read(&lock->val) & _Q_LOCKED_MASK)
+ 		cpu_relax();
+ }
+diff --git a/include/asm-generic/siginfo.h b/include/asm-generic/siginfo.h
+index 3d1a3af5cf59..a2508a8f9a9c 100644
+--- a/include/asm-generic/siginfo.h
++++ b/include/asm-generic/siginfo.h
+@@ -17,21 +17,6 @@
+ struct siginfo;
+ void do_schedule_next_timer(struct siginfo *info);
+ 
+-#ifndef HAVE_ARCH_COPY_SIGINFO
+-
+-#include <linux/string.h>
+-
+-static inline void copy_siginfo(struct siginfo *to, struct siginfo *from)
+-{
+-	if (from->si_code < 0)
+-		memcpy(to, from, sizeof(*to));
+-	else
+-		/* _sigchld is currently the largest know union member */
+-		memcpy(to, from, __ARCH_SI_PREAMBLE_SIZE + sizeof(from->_sifields._sigchld));
+-}
+-
+-#endif
+-
+ extern int copy_siginfo_to_user(struct siginfo __user *to, const struct siginfo *from);
+ 
+ #endif
+diff --git a/include/drm/drm_cache.h b/include/drm/drm_cache.h
+index 461a0558bca4..cebecff536a3 100644
+--- a/include/drm/drm_cache.h
++++ b/include/drm/drm_cache.h
+@@ -39,6 +39,8 @@ static inline bool drm_arch_can_wc_memory(void)
+ {
+ #if defined(CONFIG_PPC) && !defined(CONFIG_NOT_COHERENT_CACHE)
+ 	return false;
++#elif defined(CONFIG_MIPS) && defined(CONFIG_CPU_LOONGSON3)
++	return false;
+ #else
+ 	return true;
+ #endif
+diff --git a/include/keys/trusted-type.h b/include/keys/trusted-type.h
+index 42cf2d991bf4..4ea7e55f20b0 100644
+--- a/include/keys/trusted-type.h
++++ b/include/keys/trusted-type.h
+@@ -38,7 +38,7 @@ struct trusted_key_options {
+ 	unsigned char pcrinfo[MAX_PCRINFO_SIZE];
+ 	int pcrlock;
+ 	uint32_t hash;
+-	uint32_t digest_len;
++	uint32_t policydigest_len;
+ 	unsigned char policydigest[MAX_DIGEST_SIZE];
+ 	uint32_t policyhandle;
+ };
+diff --git a/include/linux/bpf.h b/include/linux/bpf.h
+index 83d1926c61e4..67bc2da5d233 100644
+--- a/include/linux/bpf.h
++++ b/include/linux/bpf.h
+@@ -165,12 +165,13 @@ void bpf_register_prog_type(struct bpf_prog_type_list *tl);
+ void bpf_register_map_type(struct bpf_map_type_list *tl);
+ 
+ struct bpf_prog *bpf_prog_get(u32 ufd);
++struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog);
+ void bpf_prog_put(struct bpf_prog *prog);
+ void bpf_prog_put_rcu(struct bpf_prog *prog);
+ 
+ struct bpf_map *bpf_map_get_with_uref(u32 ufd);
+ struct bpf_map *__bpf_map_get(struct fd f);
+-void bpf_map_inc(struct bpf_map *map, bool uref);
++struct bpf_map *bpf_map_inc(struct bpf_map *map, bool uref);
+ void bpf_map_put_with_uref(struct bpf_map *map);
+ void bpf_map_put(struct bpf_map *map);
+ 
+diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h
+index 735f9f8c4e43..5261751f6bd4 100644
+--- a/include/linux/can/dev.h
++++ b/include/linux/can/dev.h
+@@ -40,8 +40,11 @@ struct can_priv {
+ 	struct can_clock clock;
+ 
+ 	enum can_state state;
+-	u32 ctrlmode;
+-	u32 ctrlmode_supported;
++
++	/* CAN controller features - see include/uapi/linux/can/netlink.h */
++	u32 ctrlmode;		/* current options setting */
++	u32 ctrlmode_supported;	/* options that can be modified by netlink */
++	u32 ctrlmode_static;	/* static enabled options for driver/hardware */
+ 
+ 	int restart_ms;
+ 	struct timer_list restart_timer;
+@@ -108,6 +111,21 @@ static inline bool can_is_canfd_skb(const struct sk_buff *skb)
+ 	return skb->len == CANFD_MTU;
+ }
+ 
++/* helper to define static CAN controller features at device creation time */
++static inline void can_set_static_ctrlmode(struct net_device *dev,
++					   u32 static_mode)
++{
++	struct can_priv *priv = netdev_priv(dev);
++
++	/* alloc_candev() succeeded => netdev_priv() is valid at this point */
++	priv->ctrlmode = static_mode;
++	priv->ctrlmode_static = static_mode;
++
++	/* override MTU which was set by default in can_setup()? */
++	if (static_mode & CAN_CTRLMODE_FD)
++		dev->mtu = CANFD_MTU;
++}
++
+ /* get data length from can_dlc with sanitized can_dlc */
+ u8 can_dlc2len(u8 can_dlc);
+ 
+diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
+index 789471dba6fb..7fc7cb7872e3 100644
+--- a/include/linux/cgroup-defs.h
++++ b/include/linux/cgroup-defs.h
+@@ -210,6 +210,9 @@ struct css_set {
+ 	/* all css_task_iters currently walking this cset */
+ 	struct list_head task_iters;
+ 
++	/* dead and being drained, ignore for migration */
++	bool dead;
++
+ 	/* For RCU-protected deletion */
+ 	struct rcu_head rcu_head;
+ };
+@@ -439,6 +442,7 @@ struct cgroup_subsys {
+ 	int (*can_attach)(struct cgroup_taskset *tset);
+ 	void (*cancel_attach)(struct cgroup_taskset *tset);
+ 	void (*attach)(struct cgroup_taskset *tset);
++	void (*post_attach)(void);
+ 	int (*can_fork)(struct task_struct *task);
+ 	void (*cancel_fork)(struct task_struct *task);
+ 	void (*fork)(struct task_struct *task);
+diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
+index 1143e38555a4..408a60dca353 100644
+--- a/include/linux/clk-provider.h
++++ b/include/linux/clk-provider.h
+@@ -385,6 +385,7 @@ struct clk_divider {
+ #define CLK_DIVIDER_MAX_AT_ZERO		BIT(6)
+ 
+ extern const struct clk_ops clk_divider_ops;
++extern const struct clk_ops clk_divider_ro_ops;
+ 
+ unsigned long divider_recalc_rate(struct clk_hw *hw, unsigned long parent_rate,
+ 		unsigned int val, const struct clk_div_table *table,
+diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
+index 22ab246feed3..eeae401a2412 100644
+--- a/include/linux/compiler-gcc.h
++++ b/include/linux/compiler-gcc.h
+@@ -199,7 +199,7 @@
+ #define unreachable() __builtin_unreachable()
+ 
+ /* Mark a function definition as prohibited from being cloned. */
+-#define __noclone	__attribute__((__noclone__))
++#define __noclone	__attribute__((__noclone__, __optimize__("no-tracer")))
+ 
+ #endif /* GCC_VERSION >= 40500 */
+ 
+diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
+index fea160ee5803..85a868ccb493 100644
+--- a/include/linux/cpuset.h
++++ b/include/linux/cpuset.h
+@@ -137,8 +137,6 @@ static inline void set_mems_allowed(nodemask_t nodemask)
+ 	task_unlock(current);
+ }
+ 
+-extern void cpuset_post_attach_flush(void);
+-
+ #else /* !CONFIG_CPUSETS */
+ 
+ static inline bool cpusets_enabled(void) { return false; }
+@@ -245,10 +243,6 @@ static inline bool read_mems_allowed_retry(unsigned int seq)
+ 	return false;
+ }
+ 
+-static inline void cpuset_post_attach_flush(void)
+-{
+-}
+-
+ #endif /* !CONFIG_CPUSETS */
+ 
+ #endif /* _LINUX_CPUSET_H */
+diff --git a/include/linux/dcache.h b/include/linux/dcache.h
+index c4b5f4b3f8f8..96c1a2da92d7 100644
+--- a/include/linux/dcache.h
++++ b/include/linux/dcache.h
+@@ -161,6 +161,7 @@ struct dentry_operations {
+ 	struct vfsmount *(*d_automount)(struct path *);
+ 	int (*d_manage)(struct dentry *, bool);
+ 	struct inode *(*d_select_inode)(struct dentry *, unsigned);
++	struct dentry *(*d_real)(struct dentry *, struct inode *);
+ } ____cacheline_aligned;
+ 
+ /*
+@@ -227,6 +228,7 @@ struct dentry_operations {
+ #define DCACHE_MAY_FREE			0x00800000
+ #define DCACHE_FALLTHRU			0x01000000 /* Fall through to lower layer */
+ #define DCACHE_OP_SELECT_INODE		0x02000000 /* Unioned entry: dcache op selects inode */
++#define DCACHE_OP_REAL			0x08000000
+ 
+ extern seqlock_t rename_lock;
+ 
+@@ -582,4 +584,24 @@ static inline struct dentry *d_backing_dentry(struct dentry *upper)
+ 	return upper;
+ }
+ 
++static inline struct dentry *d_real(struct dentry *dentry)
++{
++	if (unlikely(dentry->d_flags & DCACHE_OP_REAL))
++		return dentry->d_op->d_real(dentry, NULL);
++	else
++		return dentry;
++}
++
++static inline struct inode *vfs_select_inode(struct dentry *dentry,
++					     unsigned open_flags)
++{
++	struct inode *inode = d_inode(dentry);
++
++	if (inode && unlikely(dentry->d_flags & DCACHE_OP_SELECT_INODE))
++		inode = dentry->d_op->d_select_inode(dentry, open_flags);
++
++	return inode;
++}
++
++
+ #endif	/* __LINUX_DCACHE_H */
+diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
+index ec1c61c87d89..899ab9f8549e 100644
+--- a/include/linux/device-mapper.h
++++ b/include/linux/device-mapper.h
+@@ -124,6 +124,8 @@ struct dm_dev {
+ 	char name[16];
+ };
+ 
++dev_t dm_get_dev_t(const char *path);
++
+ /*
+  * Constructors should call these functions to ensure destination devices
+  * are opened/closed correctly.
+diff --git a/include/linux/filter.h b/include/linux/filter.h
+index 43aa1f8855c7..a51a5361695f 100644
+--- a/include/linux/filter.h
++++ b/include/linux/filter.h
+@@ -465,10 +465,14 @@ int bpf_prog_create_from_user(struct bpf_prog **pfp, struct sock_fprog *fprog,
+ void bpf_prog_destroy(struct bpf_prog *fp);
+ 
+ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk);
++int __sk_attach_filter(struct sock_fprog *fprog, struct sock *sk,
++		       bool locked);
+ int sk_attach_bpf(u32 ufd, struct sock *sk);
+ int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk);
+ int sk_reuseport_attach_bpf(u32 ufd, struct sock *sk);
+ int sk_detach_filter(struct sock *sk);
++int __sk_detach_filter(struct sock *sk, bool locked);
++
+ int sk_get_filter(struct sock *sk, struct sock_filter __user *filter,
+ 		  unsigned int len);
+ 
+diff --git a/include/linux/fs.h b/include/linux/fs.h
+index ae681002100a..83c77b093ce2 100644
+--- a/include/linux/fs.h
++++ b/include/linux/fs.h
+@@ -1234,6 +1234,16 @@ static inline struct inode *file_inode(const struct file *f)
+ 	return f->f_inode;
+ }
+ 
++static inline struct dentry *file_dentry(const struct file *file)
++{
++	struct dentry *dentry = file->f_path.dentry;
++
++	if (unlikely(dentry->d_flags & DCACHE_OP_REAL))
++		return dentry->d_op->d_real(dentry, file_inode(file));
++	else
++		return dentry;
++}
++
+ static inline int locks_lock_file_wait(struct file *filp, struct file_lock *fl)
+ {
+ 	return locks_lock_inode_wait(file_inode(filp), fl);
+@@ -2259,7 +2269,7 @@ extern long do_sys_open(int dfd, const char __user *filename, int flags,
+ extern struct file *file_open_name(struct filename *, int, umode_t);
+ extern struct file *filp_open(const char *, int, umode_t);
+ extern struct file *file_open_root(struct dentry *, struct vfsmount *,
+-				   const char *, int);
++				   const char *, int, umode_t);
+ extern struct file * dentry_open(const struct path *, int, const struct cred *);
+ extern int filp_close(struct file *, fl_owner_t id);
+ 
+diff --git a/include/linux/hash.h b/include/linux/hash.h
+index 1afde47e1528..79c52fa81cac 100644
+--- a/include/linux/hash.h
++++ b/include/linux/hash.h
+@@ -32,12 +32,28 @@
+ #error Wordsize not 32 or 64
+ #endif
+ 
++/*
++ * The above primes are actively bad for hashing, since they are
++ * too sparse. The 32-bit one is mostly ok, the 64-bit one causes
++ * real problems. Besides, the "prime" part is pointless for the
++ * multiplicative hash.
++ *
++ * Although a random odd number will do, it turns out that the golden
++ * ratio phi = (sqrt(5)-1)/2, or its negative, has particularly nice
++ * properties.
++ *
++ * These are the negative, (1 - phi) = (phi^2) = (3 - sqrt(5))/2.
++ * (See Knuth vol 3, section 6.4, exercise 9.)
++ */
++#define GOLDEN_RATIO_32 0x61C88647
++#define GOLDEN_RATIO_64 0x61C8864680B583EBull
++
+ static __always_inline u64 hash_64(u64 val, unsigned int bits)
+ {
+ 	u64 hash = val;
+ 
+-#if defined(CONFIG_ARCH_HAS_FAST_MULTIPLIER) && BITS_PER_LONG == 64
+-	hash = hash * GOLDEN_RATIO_PRIME_64;
++#if BITS_PER_LONG == 64
++	hash = hash * GOLDEN_RATIO_64;
+ #else
+ 	/*  Sigh, gcc can't optimise this alone like it does for 32 bits. */
+ 	u64 n = hash;
+diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h
+index a338a688ee4a..dcb89e3515db 100644
+--- a/include/linux/if_bridge.h
++++ b/include/linux/if_bridge.h
+@@ -46,10 +46,6 @@ struct br_ip_list {
+ #define BR_LEARNING_SYNC	BIT(9)
+ #define BR_PROXYARP_WIFI	BIT(10)
+ 
+-/* values as per ieee8021QBridgeFdbAgingTime */
+-#define BR_MIN_AGEING_TIME	(10 * HZ)
+-#define BR_MAX_AGEING_TIME	(1000000 * HZ)
+-
+ #define BR_DEFAULT_AGEING_TIME	(300 * HZ)
+ 
+ extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __user *));
+diff --git a/include/linux/kernel.h b/include/linux/kernel.h
+index f31638c6e873..95452f72349a 100644
+--- a/include/linux/kernel.h
++++ b/include/linux/kernel.h
+@@ -635,7 +635,7 @@ do {							\
+ 
+ #define do_trace_printk(fmt, args...)					\
+ do {									\
+-	static const char *trace_printk_fmt				\
++	static const char *trace_printk_fmt __used			\
+ 		__attribute__((section("__trace_printk_fmt"))) =	\
+ 		__builtin_constant_p(fmt) ? fmt : NULL;			\
+ 									\
+@@ -679,7 +679,7 @@ int __trace_printk(unsigned long ip, const char *fmt, ...);
+  */
+ 
+ #define trace_puts(str) ({						\
+-	static const char *trace_printk_fmt				\
++	static const char *trace_printk_fmt __used			\
+ 		__attribute__((section("__trace_printk_fmt"))) =	\
+ 		__builtin_constant_p(str) ? str : NULL;			\
+ 									\
+@@ -701,7 +701,7 @@ extern void trace_dump_stack(int skip);
+ #define ftrace_vprintk(fmt, vargs)					\
+ do {									\
+ 	if (__builtin_constant_p(fmt)) {				\
+-		static const char *trace_printk_fmt			\
++		static const char *trace_printk_fmt __used		\
+ 		  __attribute__((section("__trace_printk_fmt"))) =	\
+ 			__builtin_constant_p(fmt) ? fmt : NULL;		\
+ 									\
+diff --git a/include/linux/mfd/samsung/s2mps11.h b/include/linux/mfd/samsung/s2mps11.h
+index b288965e8101..2c14eeca46f0 100644
+--- a/include/linux/mfd/samsung/s2mps11.h
++++ b/include/linux/mfd/samsung/s2mps11.h
+@@ -173,10 +173,12 @@ enum s2mps11_regulators {
+ 
+ #define S2MPS11_LDO_VSEL_MASK	0x3F
+ #define S2MPS11_BUCK_VSEL_MASK	0xFF
++#define S2MPS11_BUCK9_VSEL_MASK	0x1F
+ #define S2MPS11_ENABLE_MASK	(0x03 << S2MPS11_ENABLE_SHIFT)
+ #define S2MPS11_ENABLE_SHIFT	0x06
+ #define S2MPS11_LDO_N_VOLTAGES	(S2MPS11_LDO_VSEL_MASK + 1)
+ #define S2MPS11_BUCK_N_VOLTAGES (S2MPS11_BUCK_VSEL_MASK + 1)
++#define S2MPS11_BUCK9_N_VOLTAGES (S2MPS11_BUCK9_VSEL_MASK + 1)
+ #define S2MPS11_RAMP_DELAY	25000		/* uV/us */
+ 
+ #define S2MPS11_CTRL1_PWRHOLD_MASK	BIT(4)
+diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
+index 987764afa65c..f8b83792939b 100644
+--- a/include/linux/mlx5/device.h
++++ b/include/linux/mlx5/device.h
+@@ -363,6 +363,17 @@ enum {
+ 	MLX5_CAP_OFF_CMDIF_CSUM		= 46,
+ };
+ 
++enum {
++	/*
++	 * Max wqe size for rdma read is 512 bytes, so this
++	 * limits our max_sge_rd as the wqe needs to fit:
++	 * - ctrl segment (16 bytes)
++	 * - rdma segment (16 bytes)
++	 * - scatter elements (16 bytes each)
++	 */
++	MLX5_MAX_SGE_RD	= (512 - 16 - 16) / 16
++};
++
+ struct mlx5_inbox_hdr {
+ 	__be16		opcode;
+ 	u8		rsvd[4];
+diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
+index 1e3006dcf35d..46dd88e7071b 100644
+--- a/include/linux/mlx5/driver.h
++++ b/include/linux/mlx5/driver.h
+@@ -813,9 +813,9 @@ int mlx5_set_port_admin_status(struct mlx5_core_dev *dev,
+ int mlx5_query_port_admin_status(struct mlx5_core_dev *dev,
+ 				 enum mlx5_port_status *status);
+ 
+-int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu, u8 port);
+-void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu, u8 port);
+-void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu,
++int mlx5_set_port_mtu(struct mlx5_core_dev *dev, u16 mtu, u8 port);
++void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, u16 *max_mtu, u8 port);
++void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, u16 *oper_mtu,
+ 			      u8 port);
+ 
+ int mlx5_query_port_vl_hw_cap(struct mlx5_core_dev *dev,
+diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h
+index 123771003e68..a3f3c71124d3 100644
+--- a/include/linux/mlx5/vport.h
++++ b/include/linux/mlx5/vport.h
+@@ -45,6 +45,8 @@ int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
+ 				     u16 vport, u8 *addr);
+ int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *dev,
+ 				      u16 vport, u8 *addr);
++int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu);
++int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 mtu);
+ int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev,
+ 					   u64 *system_image_guid);
+ int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid);
+diff --git a/include/linux/mm.h b/include/linux/mm.h
+index 516e14944339..e4e59f9d82f1 100644
+--- a/include/linux/mm.h
++++ b/include/linux/mm.h
+@@ -456,11 +456,20 @@ static inline int page_mapcount(struct page *page)
+ 
+ #ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ int total_mapcount(struct page *page);
++int page_trans_huge_mapcount(struct page *page, int *total_mapcount);
+ #else
+ static inline int total_mapcount(struct page *page)
+ {
+ 	return page_mapcount(page);
+ }
++static inline int page_trans_huge_mapcount(struct page *page,
++					   int *total_mapcount)
++{
++	int mapcount = page_mapcount(page);
++	if (total_mapcount)
++		*total_mapcount = mapcount;
++	return mapcount;
++}
+ #endif
+ 
+ static inline int page_count(struct page *page)
+@@ -1010,6 +1019,8 @@ static inline bool page_mapped(struct page *page)
+ 	page = compound_head(page);
+ 	if (atomic_read(compound_mapcount_ptr(page)) >= 0)
+ 		return true;
++	if (PageHuge(page))
++		return false;
+ 	for (i = 0; i < hpage_nr_pages(page); i++) {
+ 		if (atomic_read(&page[i]._mapcount) >= 0)
+ 			return true;
+@@ -1117,6 +1128,8 @@ struct zap_details {
+ 
+ struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
+ 		pte_t pte);
++struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
++				pmd_t pmd);
+ 
+ int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
+ 		unsigned long size);
+diff --git a/include/linux/net.h b/include/linux/net.h
+index 0b4ac7da583a..25ef630f1bd6 100644
+--- a/include/linux/net.h
++++ b/include/linux/net.h
+@@ -245,7 +245,15 @@ do {								\
+ 	net_ratelimited_function(pr_warn, fmt, ##__VA_ARGS__)
+ #define net_info_ratelimited(fmt, ...)				\
+ 	net_ratelimited_function(pr_info, fmt, ##__VA_ARGS__)
+-#if defined(DEBUG)
++#if defined(CONFIG_DYNAMIC_DEBUG)
++#define net_dbg_ratelimited(fmt, ...)					\
++do {									\
++	DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt);			\
++	if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT) &&	\
++	    net_ratelimit())						\
++		__dynamic_pr_debug(&descriptor, fmt, ##__VA_ARGS__);	\
++} while (0)
++#elif defined(DEBUG)
+ #define net_dbg_ratelimited(fmt, ...)				\
+ 	net_ratelimited_function(pr_debug, fmt, ##__VA_ARGS__)
+ #else
+diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
+index 5440b7b705eb..6d1d8f4f759b 100644
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -267,6 +267,7 @@ struct header_ops {
+ 	void	(*cache_update)(struct hh_cache *hh,
+ 				const struct net_device *dev,
+ 				const unsigned char *haddr);
++	bool	(*validate)(const char *ll_header, unsigned int len);
+ };
+ 
+ /* These flag bits are private to the generic network queueing
+@@ -1420,8 +1421,7 @@ enum netdev_priv_flags {
+  *	@dma:		DMA channel
+  *	@mtu:		Interface MTU value
+  *	@type:		Interface hardware type
+- *	@hard_header_len: Hardware header length, which means that this is the
+- *			  minimum size of a packet.
++ *	@hard_header_len: Maximum hardware header length.
+  *
+  *	@needed_headroom: Extra headroom the hardware may need, but not in all
+  *			  cases can this be guaranteed
+@@ -2627,6 +2627,24 @@ static inline int dev_parse_header(const struct sk_buff *skb,
+ 	return dev->header_ops->parse(skb, haddr);
+ }
+ 
++/* ll_header must have at least hard_header_len allocated */
++static inline bool dev_validate_header(const struct net_device *dev,
++				       char *ll_header, int len)
++{
++	if (likely(len >= dev->hard_header_len))
++		return true;
++
++	if (capable(CAP_SYS_RAWIO)) {
++		memset(ll_header + len, 0, dev->hard_header_len - len);
++		return true;
++	}
++
++	if (dev->header_ops && dev->header_ops->validate)
++		return dev->header_ops->validate(ll_header, len);
++
++	return false;
++}
++
+ typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len);
+ int register_gifconf(unsigned int family, gifconf_func_t *gifconf);
+ static inline int unregister_gifconf(unsigned int family)
+diff --git a/include/linux/pci.h b/include/linux/pci.h
+index 27716254dcc5..60042ab5d7bd 100644
+--- a/include/linux/pci.h
++++ b/include/linux/pci.h
+@@ -359,6 +359,7 @@ struct pci_dev {
+ 	unsigned int	io_window_1k:1;	/* Intel P2P bridge 1K I/O windows */
+ 	unsigned int	irq_managed:1;
+ 	unsigned int	has_secondary_link:1;
++	unsigned int	non_compliant_bars:1;	/* broken BARs; ignore them */
+ 	pci_dev_flags_t dev_flags;
+ 	atomic_t	enable_cnt;	/* pci_enable_device has been called */
+ 
+diff --git a/include/linux/platform_data/mmp_dma.h b/include/linux/platform_data/mmp_dma.h
+index 2a330ec9e2af..d1397c8ed94e 100644
+--- a/include/linux/platform_data/mmp_dma.h
++++ b/include/linux/platform_data/mmp_dma.h
+@@ -14,6 +14,7 @@
+ 
+ struct mmp_dma_platdata {
+ 	int dma_channels;
++	int nb_requestors;
+ };
+ 
+ #endif /* MMP_DMA_H */
+diff --git a/include/linux/rculist_nulls.h b/include/linux/rculist_nulls.h
+index 1c33dd7da4a7..4ae95f7e8597 100644
+--- a/include/linux/rculist_nulls.h
++++ b/include/linux/rculist_nulls.h
+@@ -98,6 +98,45 @@ static inline void hlist_nulls_add_head_rcu(struct hlist_nulls_node *n,
+ 	if (!is_a_nulls(first))
+ 		first->pprev = &n->next;
+ }
++
++/**
++ * hlist_nulls_add_tail_rcu
++ * @n: the element to add to the hash list.
++ * @h: the list to add to.
++ *
++ * Description:
++ * Adds the specified element to the end of the specified hlist_nulls,
++ * while permitting racing traversals.  NOTE: tail insertion requires
++ * list traversal.
++ *
++ * The caller must take whatever precautions are necessary
++ * (such as holding appropriate locks) to avoid racing
++ * with another list-mutation primitive, such as hlist_nulls_add_head_rcu()
++ * or hlist_nulls_del_rcu(), running on this same list.
++ * However, it is perfectly legal to run concurrently with
++ * the _rcu list-traversal primitives, such as
++ * hlist_nulls_for_each_entry_rcu(), used to prevent memory-consistency
++ * problems on Alpha CPUs.  Regardless of the type of CPU, the
++ * list-traversal primitive must be guarded by rcu_read_lock().
++ */
++static inline void hlist_nulls_add_tail_rcu(struct hlist_nulls_node *n,
++					struct hlist_nulls_head *h)
++{
++	struct hlist_nulls_node *i, *last = NULL;
++
++	for (i = hlist_nulls_first_rcu(h); !is_a_nulls(i);
++	     i = hlist_nulls_next_rcu(i))
++		last = i;
++
++	if (last) {
++		n->next = last->next;
++		n->pprev = &last->next;
++		rcu_assign_pointer(hlist_nulls_next_rcu(last), n);
++	} else {
++		hlist_nulls_add_head_rcu(n, h);
++	}
++}
++
+ /**
+  * hlist_nulls_for_each_entry_rcu - iterate over rcu list of given type
+  * @tpos:	the type * to use as a loop cursor.
+diff --git a/include/linux/signal.h b/include/linux/signal.h
+index 92557bbce7e7..d80259afb9e5 100644
+--- a/include/linux/signal.h
++++ b/include/linux/signal.h
+@@ -28,6 +28,21 @@ struct sigpending {
+ 	sigset_t signal;
+ };
+ 
++#ifndef HAVE_ARCH_COPY_SIGINFO
++
++#include <linux/string.h>
++
++static inline void copy_siginfo(struct siginfo *to, struct siginfo *from)
++{
++	if (from->si_code < 0)
++		memcpy(to, from, sizeof(*to));
++	else
++		/* _sigchld is currently the largest know union member */
++		memcpy(to, from, __ARCH_SI_PREAMBLE_SIZE + sizeof(from->_sifields._sigchld));
++}
++
++#endif
++
+ /*
+  * Define some primitives to manipulate sigset_t.
+  */
+diff --git a/include/linux/swap.h b/include/linux/swap.h
+index d18b65c53dbb..b974a2106dd7 100644
+--- a/include/linux/swap.h
++++ b/include/linux/swap.h
+@@ -418,7 +418,7 @@ extern sector_t swapdev_block(int, pgoff_t);
+ extern int page_swapcount(struct page *);
+ extern int swp_swapcount(swp_entry_t entry);
+ extern struct swap_info_struct *page_swap_info(struct page *);
+-extern int reuse_swap_page(struct page *);
++extern bool reuse_swap_page(struct page *, int *);
+ extern int try_to_free_swap(struct page *);
+ struct backing_dev_info;
+ 
+@@ -513,8 +513,8 @@ static inline int swp_swapcount(swp_entry_t entry)
+ 	return 0;
+ }
+ 
+-#define reuse_swap_page(page) \
+-	(!PageTransCompound(page) && page_mapcount(page) == 1)
++#define reuse_swap_page(page, total_mapcount) \
++	(page_trans_huge_mapcount(page, total_mapcount) == 1)
+ 
+ static inline int try_to_free_swap(struct page *page)
+ {
+@@ -533,6 +533,10 @@ static inline swp_entry_t get_swap_page(void)
+ #ifdef CONFIG_MEMCG
+ static inline int mem_cgroup_swappiness(struct mem_cgroup *memcg)
+ {
++	/* Cgroup2 doesn't have per-cgroup swappiness */
++	if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
++		return vm_swappiness;
++
+ 	/* root ? */
+ 	if (mem_cgroup_disabled() || !memcg->css.parent)
+ 		return vm_swappiness;
+diff --git a/include/linux/thermal.h b/include/linux/thermal.h
+index e13a1ace50e9..4a849f19e6c9 100644
+--- a/include/linux/thermal.h
++++ b/include/linux/thermal.h
+@@ -156,6 +156,7 @@ struct thermal_attr {
+  * @trip_hyst_attrs:	attributes for trip points for sysfs: trip hysteresis
+  * @devdata:	private pointer for device private data
+  * @trips:	number of trip points the thermal zone supports
++ * @trips_disabled;	bitmap for disabled trips
+  * @passive_delay:	number of milliseconds to wait between polls when
+  *			performing passive cooling.
+  * @polling_delay:	number of milliseconds to wait between polls when
+@@ -191,6 +192,7 @@ struct thermal_zone_device {
+ 	struct thermal_attr *trip_hyst_attrs;
+ 	void *devdata;
+ 	int trips;
++	unsigned long trips_disabled;	/* bitmap for disabled trips */
+ 	int passive_delay;
+ 	int polling_delay;
+ 	int temperature;
+diff --git a/include/linux/tty.h b/include/linux/tty.h
+index d9fb4b043f56..e5b996d887ce 100644
+--- a/include/linux/tty.h
++++ b/include/linux/tty.h
+@@ -338,7 +338,6 @@ struct tty_file_private {
+ #define TTY_EXCLUSIVE 		3	/* Exclusive open mode */
+ #define TTY_DEBUG 		4	/* Debugging */
+ #define TTY_DO_WRITE_WAKEUP 	5	/* Call write_wakeup after queuing new */
+-#define TTY_OTHER_DONE		6	/* Closed pty has completed input processing */
+ #define TTY_LDISC_OPEN	 	11	/* Line discipline is open */
+ #define TTY_PTY_LOCK 		16	/* pty private */
+ #define TTY_NO_WRITE_SPLIT 	17	/* Preserve write boundaries to driver */
+@@ -464,6 +463,7 @@ extern void tty_buffer_init(struct tty_port *port);
+ extern void tty_buffer_set_lock_subclass(struct tty_port *port);
+ extern bool tty_buffer_restart_work(struct tty_port *port);
+ extern bool tty_buffer_cancel_work(struct tty_port *port);
++extern void tty_buffer_flush_work(struct tty_port *port);
+ extern speed_t tty_termios_baud_rate(struct ktermios *termios);
+ extern speed_t tty_termios_input_baud_rate(struct ktermios *termios);
+ extern void tty_termios_encode_baud_rate(struct ktermios *termios,
+@@ -589,7 +589,7 @@ static inline int tty_ldisc_receive_buf(struct tty_ldisc *ld, unsigned char *p,
+ 		count = ld->ops->receive_buf2(ld->tty, p, f, count);
+ 	else {
+ 		count = min_t(int, count, ld->tty->receive_room);
+-		if (count)
++		if (count && ld->ops->receive_buf)
+ 			ld->ops->receive_buf(ld->tty, p, f, count);
+ 	}
+ 	return count;
+diff --git a/include/linux/usb.h b/include/linux/usb.h
+index 89533ba38691..f3dbc217ff41 100644
+--- a/include/linux/usb.h
++++ b/include/linux/usb.h
+@@ -1066,7 +1066,7 @@ struct usbdrv_wrap {
+  *	for interfaces bound to this driver.
+  * @soft_unbind: if set to 1, the USB core will not kill URBs and disable
+  *	endpoints before calling the driver's disconnect method.
+- * @disable_hub_initiated_lpm: if set to 0, the USB core will not allow hubs
++ * @disable_hub_initiated_lpm: if set to 1, the USB core will not allow hubs
+  *	to initiate lower power link state transitions when an idle timeout
+  *	occurs.  Device-initiated USB 3.0 link PM will still be allowed.
+  *
+diff --git a/include/linux/usb_usual.h b/include/linux/usb_usual.h
+index 7f5f78bd15ad..245f57dbbb61 100644
+--- a/include/linux/usb_usual.h
++++ b/include/linux/usb_usual.h
+@@ -79,6 +79,8 @@
+ 		/* Cannot handle MI_REPORT_SUPPORTED_OPERATION_CODES */	\
+ 	US_FLAG(MAX_SECTORS_240,	0x08000000)		\
+ 		/* Sets max_sectors to 240 */			\
++	US_FLAG(NO_REPORT_LUNS,	0x10000000)			\
++		/* Cannot handle REPORT_LUNS */			\
+ 
+ #define US_FLAG(name, value)	US_FL_##name = value ,
+ enum { US_DO_ALL_FLAGS };
+diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h
+index 8a0f55b6c2ba..88e3ab496e8f 100644
+--- a/include/media/videobuf2-core.h
++++ b/include/media/videobuf2-core.h
+@@ -375,6 +375,9 @@ struct vb2_ops {
+ /**
+  * struct vb2_ops - driver-specific callbacks
+  *
++ * @verify_planes_array: Verify that a given user space structure contains
++ *			enough planes for the buffer. This is called
++ *			for each dequeued buffer.
+  * @fill_user_buffer:	given a vb2_buffer fill in the userspace structure.
+  *			For V4L2 this is a struct v4l2_buffer.
+  * @fill_vb2_buffer:	given a userspace structure, fill in the vb2_buffer.
+@@ -384,6 +387,7 @@ struct vb2_ops {
+  *			the vb2_buffer struct.
+  */
+ struct vb2_buf_ops {
++	int (*verify_planes_array)(struct vb2_buffer *vb, const void *pb);
+ 	void (*fill_user_buffer)(struct vb2_buffer *vb, void *pb);
+ 	int (*fill_vb2_buffer)(struct vb2_buffer *vb, const void *pb,
+ 				struct vb2_plane *planes);
+@@ -400,6 +404,9 @@ struct vb2_buf_ops {
+  * @fileio_read_once:		report EOF after reading the first buffer
+  * @fileio_write_immediately:	queue buffer after each write() call
+  * @allow_zero_bytesused:	allow bytesused == 0 to be passed to the driver
++ * @quirk_poll_must_check_waiting_for_buffers: Return POLLERR at poll when QBUF
++ *              has not been called. This is a vb1 idiom that has been adopted
++ *              also by vb2.
+  * @lock:	pointer to a mutex that protects the vb2_queue struct. The
+  *		driver can set this to a mutex to let the v4l2 core serialize
+  *		the queuing ioctls. If the driver wants to handle locking
+@@ -463,6 +470,7 @@ struct vb2_queue {
+ 	unsigned			fileio_read_once:1;
+ 	unsigned			fileio_write_immediately:1;
+ 	unsigned			allow_zero_bytesused:1;
++	unsigned		   quirk_poll_must_check_waiting_for_buffers:1;
+ 
+ 	struct mutex			*lock;
+ 	void				*owner;
+diff --git a/include/net/bonding.h b/include/net/bonding.h
+index ee6c52053aa3..791800ddd6d9 100644
+--- a/include/net/bonding.h
++++ b/include/net/bonding.h
+@@ -215,6 +215,7 @@ struct bonding {
+ 	 * ALB mode (6) - to sync the use and modifications of its hash table
+ 	 */
+ 	spinlock_t mode_lock;
++	spinlock_t stats_lock;
+ 	u8	 send_peer_notif;
+ 	u8       igmp_retrans;
+ #ifdef CONFIG_PROC_FS
+diff --git a/include/net/codel.h b/include/net/codel.h
+index 267e70210061..d168aca115cc 100644
+--- a/include/net/codel.h
++++ b/include/net/codel.h
+@@ -162,12 +162,14 @@ struct codel_vars {
+  * struct codel_stats - contains codel shared variables and stats
+  * @maxpacket:	largest packet we've seen so far
+  * @drop_count:	temp count of dropped packets in dequeue()
++ * @drop_len:	bytes of dropped packets in dequeue()
+  * ecn_mark:	number of packets we ECN marked instead of dropping
+  * ce_mark:	number of packets CE marked because sojourn time was above ce_threshold
+  */
+ struct codel_stats {
+ 	u32		maxpacket;
+ 	u32		drop_count;
++	u32		drop_len;
+ 	u32		ecn_mark;
+ 	u32		ce_mark;
+ };
+@@ -308,6 +310,7 @@ static struct sk_buff *codel_dequeue(struct Qdisc *sch,
+ 								  vars->rec_inv_sqrt);
+ 					goto end;
+ 				}
++				stats->drop_len += qdisc_pkt_len(skb);
+ 				qdisc_drop(skb, sch);
+ 				stats->drop_count++;
+ 				skb = dequeue_func(vars, sch);
+@@ -330,6 +333,7 @@ static struct sk_buff *codel_dequeue(struct Qdisc *sch,
+ 		if (params->ecn && INET_ECN_set_ce(skb)) {
+ 			stats->ecn_mark++;
+ 		} else {
++			stats->drop_len += qdisc_pkt_len(skb);
+ 			qdisc_drop(skb, sch);
+ 			stats->drop_count++;
+ 
+diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
+index 0816c872b689..a6cc576fd467 100644
+--- a/include/net/ip_vs.h
++++ b/include/net/ip_vs.h
+@@ -1588,6 +1588,23 @@ static inline void ip_vs_conn_drop_conntrack(struct ip_vs_conn *cp)
+ }
+ #endif /* CONFIG_IP_VS_NFCT */
+ 
++/* Really using conntrack? */
++static inline bool ip_vs_conn_uses_conntrack(struct ip_vs_conn *cp,
++					     struct sk_buff *skb)
++{
++#ifdef CONFIG_IP_VS_NFCT
++	enum ip_conntrack_info ctinfo;
++	struct nf_conn *ct;
++
++	if (!(cp->flags & IP_VS_CONN_F_NFCT))
++		return false;
++	ct = nf_ct_get(skb, &ctinfo);
++	if (ct && !nf_ct_is_untracked(ct))
++		return true;
++#endif
++	return false;
++}
++
+ static inline int
+ ip_vs_dest_conn_overhead(struct ip_vs_dest *dest)
+ {
+diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
+index 636a362a0e03..e5bba897d206 100644
+--- a/include/net/sch_generic.h
++++ b/include/net/sch_generic.h
+@@ -396,7 +396,8 @@ struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
+ 			      struct Qdisc *qdisc);
+ void qdisc_reset(struct Qdisc *qdisc);
+ void qdisc_destroy(struct Qdisc *qdisc);
+-void qdisc_tree_decrease_qlen(struct Qdisc *qdisc, unsigned int n);
++void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, unsigned int n,
++			       unsigned int len);
+ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
+ 			  const struct Qdisc_ops *ops);
+ struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
+@@ -707,6 +708,23 @@ static inline void qdisc_reset_queue(struct Qdisc *sch)
+ 	sch->qstats.backlog = 0;
+ }
+ 
++static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new,
++					  struct Qdisc **pold)
++{
++	struct Qdisc *old;
++
++	sch_tree_lock(sch);
++	old = *pold;
++	*pold = new;
++	if (old != NULL) {
++		qdisc_tree_reduce_backlog(old, old->q.qlen, old->qstats.backlog);
++		qdisc_reset(old);
++	}
++	sch_tree_unlock(sch);
++
++	return old;
++}
++
+ static inline unsigned int __qdisc_queue_drop(struct Qdisc *sch,
+ 					      struct sk_buff_head *list)
+ {
+diff --git a/include/net/sock.h b/include/net/sock.h
+index f5ea148853e2..3c688ca3456d 100644
+--- a/include/net/sock.h
++++ b/include/net/sock.h
+@@ -630,7 +630,11 @@ static inline void sk_add_node_rcu(struct sock *sk, struct hlist_head *list)
+ 
+ static inline void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
+ {
+-	hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list);
++	if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport &&
++	    sk->sk_family == AF_INET6)
++		hlist_nulls_add_tail_rcu(&sk->sk_nulls_node, list);
++	else
++		hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list);
+ }
+ 
+ static inline void sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
+diff --git a/include/rdma/ib.h b/include/rdma/ib.h
+index cf8f9e700e48..a6b93706b0fc 100644
+--- a/include/rdma/ib.h
++++ b/include/rdma/ib.h
+@@ -34,6 +34,7 @@
+ #define _RDMA_IB_H
+ 
+ #include <linux/types.h>
++#include <linux/sched.h>
+ 
+ struct ib_addr {
+ 	union {
+@@ -86,4 +87,19 @@ struct sockaddr_ib {
+ 	__u64			sib_scope_id;
+ };
+ 
++/*
++ * The IB interfaces that use write() as bi-directional ioctl() are
++ * fundamentally unsafe, since there are lots of ways to trigger "write()"
++ * calls from various contexts with elevated privileges. That includes the
++ * traditional suid executable error message writes, but also various kernel
++ * interfaces that can write to file descriptors.
++ *
++ * This function provides protection for the legacy API by restricting the
++ * calling context.
++ */
++static inline bool ib_safe_file_access(struct file *filp)
++{
++	return filp->f_cred == current_cred() && segment_eq(get_fs(), USER_DS);
++}
++
+ #endif /* _RDMA_IB_H */
+diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
+index f63a16760ae9..a5d31f794cac 100644
+--- a/include/scsi/scsi_device.h
++++ b/include/scsi/scsi_device.h
+@@ -240,6 +240,7 @@ scmd_printk(const char *, const struct scsi_cmnd *, const char *, ...);
+ enum scsi_target_state {
+ 	STARGET_CREATED = 1,
+ 	STARGET_RUNNING,
++	STARGET_REMOVE,
+ 	STARGET_DEL,
+ };
+ 
+@@ -513,6 +514,31 @@ static inline int scsi_device_tpgs(struct scsi_device *sdev)
+ 	return sdev->inquiry ? (sdev->inquiry[5] >> 4) & 0x3 : 0;
+ }
+ 
++/**
++ * scsi_device_supports_vpd - test if a device supports VPD pages
++ * @sdev: the &struct scsi_device to test
++ *
++ * If the 'try_vpd_pages' flag is set it takes precedence.
++ * Otherwise we will assume VPD pages are supported if the
++ * SCSI level is at least SPC-3 and 'skip_vpd_pages' is not set.
++ */
++static inline int scsi_device_supports_vpd(struct scsi_device *sdev)
++{
++	/* Attempt VPD inquiry if the device blacklist explicitly calls
++	 * for it.
++	 */
++	if (sdev->try_vpd_pages)
++		return 1;
++	/*
++	 * Although VPD inquiries can go to SCSI-2 type devices,
++	 * some USB ones crash on receiving them, and the pages
++	 * we currently ask for are for SPC-3 and beyond
++	 */
++	if (sdev->scsi_level > SCSI_SPC_2 && !sdev->skip_vpd_pages)
++		return 1;
++	return 0;
++}
++
+ #define MODULE_ALIAS_SCSI_DEVICE(type) \
+ 	MODULE_ALIAS("scsi:t-" __stringify(type) "*")
+ #define SCSI_DEVICE_MODALIAS_FMT "scsi:t-0x%02x"
+diff --git a/include/sound/hda_i915.h b/include/sound/hda_i915.h
+index fa341fcb5829..f5842bcd9c94 100644
+--- a/include/sound/hda_i915.h
++++ b/include/sound/hda_i915.h
+@@ -9,7 +9,7 @@
+ #ifdef CONFIG_SND_HDA_I915
+ int snd_hdac_set_codec_wakeup(struct hdac_bus *bus, bool enable);
+ int snd_hdac_display_power(struct hdac_bus *bus, bool enable);
+-int snd_hdac_get_display_clk(struct hdac_bus *bus);
++void snd_hdac_i915_set_bclk(struct hdac_bus *bus);
+ int snd_hdac_sync_audio_rate(struct hdac_bus *bus, hda_nid_t nid, int rate);
+ int snd_hdac_acomp_get_eld(struct hdac_bus *bus, hda_nid_t nid,
+ 			   bool *audio_enabled, char *buffer, int max_bytes);
+@@ -25,9 +25,8 @@ static inline int snd_hdac_display_power(struct hdac_bus *bus, bool enable)
+ {
+ 	return 0;
+ }
+-static inline int snd_hdac_get_display_clk(struct hdac_bus *bus)
++static inline void snd_hdac_i915_set_bclk(struct hdac_bus *bus)
+ {
+-	return 0;
+ }
+ static inline int snd_hdac_sync_audio_rate(struct hdac_bus *bus, hda_nid_t nid,
+ 					   int rate)
+diff --git a/include/sound/hdaudio.h b/include/sound/hdaudio.h
+index c21c38ce7450..93e63c56f48f 100644
+--- a/include/sound/hdaudio.h
++++ b/include/sound/hdaudio.h
+@@ -168,11 +168,13 @@ int snd_hdac_power_up(struct hdac_device *codec);
+ int snd_hdac_power_down(struct hdac_device *codec);
+ int snd_hdac_power_up_pm(struct hdac_device *codec);
+ int snd_hdac_power_down_pm(struct hdac_device *codec);
++int snd_hdac_keep_power_up(struct hdac_device *codec);
+ #else
+ static inline int snd_hdac_power_up(struct hdac_device *codec) { return 0; }
+ static inline int snd_hdac_power_down(struct hdac_device *codec) { return 0; }
+ static inline int snd_hdac_power_up_pm(struct hdac_device *codec) { return 0; }
+ static inline int snd_hdac_power_down_pm(struct hdac_device *codec) { return 0; }
++static inline int snd_hdac_keep_power_up(struct hdac_device *codec) { return 0; }
+ #endif
+ 
+ /*
+diff --git a/include/uapi/linux/if.h b/include/uapi/linux/if.h
+index 9cf2394f0bcf..752f5dc040a5 100644
+--- a/include/uapi/linux/if.h
++++ b/include/uapi/linux/if.h
+@@ -19,14 +19,20 @@
+ #ifndef _LINUX_IF_H
+ #define _LINUX_IF_H
+ 
++#include <linux/libc-compat.h>          /* for compatibility with glibc */
+ #include <linux/types.h>		/* for "__kernel_caddr_t" et al	*/
+ #include <linux/socket.h>		/* for "struct sockaddr" et al	*/
+ #include <linux/compiler.h>		/* for "__user" et al           */
+ 
++#if __UAPI_DEF_IF_IFNAMSIZ
+ #define	IFNAMSIZ	16
++#endif /* __UAPI_DEF_IF_IFNAMSIZ */
+ #define	IFALIASZ	256
+ #include <linux/hdlc/ioctl.h>
+ 
++/* For glibc compatibility. An empty enum does not compile. */
++#if __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO != 0 && \
++    __UAPI_DEF_IF_NET_DEVICE_FLAGS != 0
+ /**
+  * enum net_device_flags - &struct net_device flags
+  *
+@@ -68,6 +74,8 @@
+  * @IFF_ECHO: echo sent packets. Volatile.
+  */
+ enum net_device_flags {
++/* for compatibility with glibc net/if.h */
++#if __UAPI_DEF_IF_NET_DEVICE_FLAGS
+ 	IFF_UP				= 1<<0,  /* sysfs */
+ 	IFF_BROADCAST			= 1<<1,  /* volatile */
+ 	IFF_DEBUG			= 1<<2,  /* sysfs */
+@@ -84,11 +92,17 @@ enum net_device_flags {
+ 	IFF_PORTSEL			= 1<<13, /* sysfs */
+ 	IFF_AUTOMEDIA			= 1<<14, /* sysfs */
+ 	IFF_DYNAMIC			= 1<<15, /* sysfs */
++#endif /* __UAPI_DEF_IF_NET_DEVICE_FLAGS */
++#if __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO
+ 	IFF_LOWER_UP			= 1<<16, /* volatile */
+ 	IFF_DORMANT			= 1<<17, /* volatile */
+ 	IFF_ECHO			= 1<<18, /* volatile */
++#endif /* __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO */
+ };
++#endif /* __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO != 0 && __UAPI_DEF_IF_NET_DEVICE_FLAGS != 0 */
+ 
++/* for compatibility with glibc net/if.h */
++#if __UAPI_DEF_IF_NET_DEVICE_FLAGS
+ #define IFF_UP				IFF_UP
+ #define IFF_BROADCAST			IFF_BROADCAST
+ #define IFF_DEBUG			IFF_DEBUG
+@@ -105,9 +119,13 @@ enum net_device_flags {
+ #define IFF_PORTSEL			IFF_PORTSEL
+ #define IFF_AUTOMEDIA			IFF_AUTOMEDIA
+ #define IFF_DYNAMIC			IFF_DYNAMIC
++#endif /* __UAPI_DEF_IF_NET_DEVICE_FLAGS */
++
++#if __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO
+ #define IFF_LOWER_UP			IFF_LOWER_UP
+ #define IFF_DORMANT			IFF_DORMANT
+ #define IFF_ECHO			IFF_ECHO
++#endif /* __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO */
+ 
+ #define IFF_VOLATILE	(IFF_LOOPBACK|IFF_POINTOPOINT|IFF_BROADCAST|IFF_ECHO|\
+ 		IFF_MASTER|IFF_SLAVE|IFF_RUNNING|IFF_LOWER_UP|IFF_DORMANT)
+@@ -166,6 +184,8 @@ enum {
+  *	being very small might be worth keeping for clean configuration.
+  */
+ 
++/* for compatibility with glibc net/if.h */
++#if __UAPI_DEF_IF_IFMAP
+ struct ifmap {
+ 	unsigned long mem_start;
+ 	unsigned long mem_end;
+@@ -175,6 +195,7 @@ struct ifmap {
+ 	unsigned char port;
+ 	/* 3 bytes spare */
+ };
++#endif /* __UAPI_DEF_IF_IFMAP */
+ 
+ struct if_settings {
+ 	unsigned int type;	/* Type of physical device or protocol */
+@@ -200,6 +221,8 @@ struct if_settings {
+  * remainder may be interface specific.
+  */
+ 
++/* for compatibility with glibc net/if.h */
++#if __UAPI_DEF_IF_IFREQ
+ struct ifreq {
+ #define IFHWADDRLEN	6
+ 	union
+@@ -223,6 +246,7 @@ struct ifreq {
+ 		struct	if_settings ifru_settings;
+ 	} ifr_ifru;
+ };
++#endif /* __UAPI_DEF_IF_IFREQ */
+ 
+ #define ifr_name	ifr_ifrn.ifrn_name	/* interface name 	*/
+ #define ifr_hwaddr	ifr_ifru.ifru_hwaddr	/* MAC address 		*/
+@@ -249,6 +273,8 @@ struct ifreq {
+  * must know all networks accessible).
+  */
+ 
++/* for compatibility with glibc net/if.h */
++#if __UAPI_DEF_IF_IFCONF
+ struct ifconf  {
+ 	int	ifc_len;			/* size of buffer	*/
+ 	union {
+@@ -256,6 +282,8 @@ struct ifconf  {
+ 		struct ifreq __user *ifcu_req;
+ 	} ifc_ifcu;
+ };
++#endif /* __UAPI_DEF_IF_IFCONF */
++
+ #define	ifc_buf	ifc_ifcu.ifcu_buf		/* buffer address	*/
+ #define	ifc_req	ifc_ifcu.ifcu_req		/* array of structures	*/
+ 
+diff --git a/include/uapi/linux/libc-compat.h b/include/uapi/linux/libc-compat.h
+index 7d024ceb075d..d5e38c73377c 100644
+--- a/include/uapi/linux/libc-compat.h
++++ b/include/uapi/linux/libc-compat.h
+@@ -51,6 +51,40 @@
+ /* We have included glibc headers... */
+ #if defined(__GLIBC__)
+ 
++/* Coordinate with glibc net/if.h header. */
++#if defined(_NET_IF_H)
++
++/* GLIBC headers included first so don't define anything
++ * that would already be defined. */
++
++#define __UAPI_DEF_IF_IFCONF 0
++#define __UAPI_DEF_IF_IFMAP 0
++#define __UAPI_DEF_IF_IFNAMSIZ 0
++#define __UAPI_DEF_IF_IFREQ 0
++/* Everything up to IFF_DYNAMIC, matches net/if.h until glibc 2.23 */
++#define __UAPI_DEF_IF_NET_DEVICE_FLAGS 0
++/* For the future if glibc adds IFF_LOWER_UP, IFF_DORMANT and IFF_ECHO */
++#ifndef __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO
++#define __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO 1
++#endif /* __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO */
++
++#else /* _NET_IF_H */
++
++/* Linux headers included first, and we must define everything
++ * we need. The expectation is that glibc will check the
++ * __UAPI_DEF_* defines and adjust appropriately. */
++
++#define __UAPI_DEF_IF_IFCONF 1
++#define __UAPI_DEF_IF_IFMAP 1
++#define __UAPI_DEF_IF_IFNAMSIZ 1
++#define __UAPI_DEF_IF_IFREQ 1
++/* Everything up to IFF_DYNAMIC, matches net/if.h until glibc 2.23 */
++#define __UAPI_DEF_IF_NET_DEVICE_FLAGS 1
++/* For the future if glibc adds IFF_LOWER_UP, IFF_DORMANT and IFF_ECHO */
++#define __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO 1
++
++#endif /* _NET_IF_H */
++
+ /* Coordinate with glibc netinet/in.h header. */
+ #if defined(_NETINET_IN_H)
+ 
+@@ -117,6 +151,16 @@
+  * that we need. */
+ #else /* !defined(__GLIBC__) */
+ 
++/* Definitions for if.h */
++#define __UAPI_DEF_IF_IFCONF 1
++#define __UAPI_DEF_IF_IFMAP 1
++#define __UAPI_DEF_IF_IFNAMSIZ 1
++#define __UAPI_DEF_IF_IFREQ 1
++/* Everything up to IFF_DYNAMIC, matches net/if.h until glibc 2.23 */
++#define __UAPI_DEF_IF_NET_DEVICE_FLAGS 1
++/* For the future if glibc adds IFF_LOWER_UP, IFF_DORMANT and IFF_ECHO */
++#define __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO 1
++
+ /* Definitions for in.h */
+ #define __UAPI_DEF_IN_ADDR		1
+ #define __UAPI_DEF_IN_IPPROTO		1
+diff --git a/include/uapi/linux/v4l2-dv-timings.h b/include/uapi/linux/v4l2-dv-timings.h
+index c039f1d68a09..086168e18ca8 100644
+--- a/include/uapi/linux/v4l2-dv-timings.h
++++ b/include/uapi/linux/v4l2-dv-timings.h
+@@ -183,7 +183,8 @@
+ 
+ #define V4L2_DV_BT_CEA_3840X2160P24 { \
+ 	.type = V4L2_DV_BT_656_1120, \
+-	V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
++	V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
++		V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 		297000000, 1276, 88, 296, 8, 10, 72, 0, 0, 0, \
+ 		V4L2_DV_BT_STD_CEA861, \
+ 		V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
+@@ -191,14 +192,16 @@
+ 
+ #define V4L2_DV_BT_CEA_3840X2160P25 { \
+ 	.type = V4L2_DV_BT_656_1120, \
+-	V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
++	V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
++		V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 		297000000, 1056, 88, 296, 8, 10, 72, 0, 0, 0, \
+ 		V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
+ }
+ 
+ #define V4L2_DV_BT_CEA_3840X2160P30 { \
+ 	.type = V4L2_DV_BT_656_1120, \
+-	V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
++	V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
++		V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 		297000000, 176, 88, 296, 8, 10, 72, 0, 0, 0, \
+ 		V4L2_DV_BT_STD_CEA861, \
+ 		V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
+@@ -206,14 +209,16 @@
+ 
+ #define V4L2_DV_BT_CEA_3840X2160P50 { \
+ 	.type = V4L2_DV_BT_656_1120, \
+-	V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
++	V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
++		V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 		594000000, 1056, 88, 296, 8, 10, 72, 0, 0, 0, \
+ 		V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
+ }
+ 
+ #define V4L2_DV_BT_CEA_3840X2160P60 { \
+ 	.type = V4L2_DV_BT_656_1120, \
+-	V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
++	V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
++		V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 		594000000, 176, 88, 296, 8, 10, 72, 0, 0, 0, \
+ 		V4L2_DV_BT_STD_CEA861, \
+ 		V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
+@@ -221,7 +226,8 @@
+ 
+ #define V4L2_DV_BT_CEA_4096X2160P24 { \
+ 	.type = V4L2_DV_BT_656_1120, \
+-	V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
++	V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
++		V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 		297000000, 1020, 88, 296, 8, 10, 72, 0, 0, 0, \
+ 		V4L2_DV_BT_STD_CEA861, \
+ 		V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
+@@ -229,14 +235,16 @@
+ 
+ #define V4L2_DV_BT_CEA_4096X2160P25 { \
+ 	.type = V4L2_DV_BT_656_1120, \
+-	V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
++	V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
++		V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 		297000000, 968, 88, 128, 8, 10, 72, 0, 0, 0, \
+ 		V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
+ }
+ 
+ #define V4L2_DV_BT_CEA_4096X2160P30 { \
+ 	.type = V4L2_DV_BT_656_1120, \
+-	V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
++	V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
++		V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 		297000000, 88, 88, 128, 8, 10, 72, 0, 0, 0, \
+ 		V4L2_DV_BT_STD_CEA861, \
+ 		V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
+@@ -244,14 +252,16 @@
+ 
+ #define V4L2_DV_BT_CEA_4096X2160P50 { \
+ 	.type = V4L2_DV_BT_656_1120, \
+-	V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
++	V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
++		V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 		594000000, 968, 88, 128, 8, 10, 72, 0, 0, 0, \
+ 		V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
+ }
+ 
+ #define V4L2_DV_BT_CEA_4096X2160P60 { \
+ 	.type = V4L2_DV_BT_656_1120, \
+-	V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
++	V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
++		V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 		594000000, 88, 88, 128, 8, 10, 72, 0, 0, 0, \
+ 		V4L2_DV_BT_STD_CEA861, \
+ 		V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
+diff --git a/include/xen/page.h b/include/xen/page.h
+index 96294ac93755..9dc46cb8a0fd 100644
+--- a/include/xen/page.h
++++ b/include/xen/page.h
+@@ -15,9 +15,9 @@
+  */
+ 
+ #define xen_pfn_to_page(xen_pfn)	\
+-	((pfn_to_page(((unsigned long)(xen_pfn) << XEN_PAGE_SHIFT) >> PAGE_SHIFT)))
++	(pfn_to_page((unsigned long)(xen_pfn) >> (PAGE_SHIFT - XEN_PAGE_SHIFT)))
+ #define page_to_xen_pfn(page)		\
+-	(((page_to_pfn(page)) << PAGE_SHIFT) >> XEN_PAGE_SHIFT)
++	((page_to_pfn(page)) << (PAGE_SHIFT - XEN_PAGE_SHIFT))
+ 
+ #define XEN_PFN_PER_PAGE	(PAGE_SIZE / XEN_PAGE_SIZE)
+ 
+diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
+index 4504ca66118d..50da680c479f 100644
+--- a/kernel/bpf/helpers.c
++++ b/kernel/bpf/helpers.c
+@@ -166,7 +166,7 @@ static u64 bpf_get_current_comm(u64 r1, u64 size, u64 r3, u64 r4, u64 r5)
+ 	if (!task)
+ 		return -EINVAL;
+ 
+-	memcpy(buf, task->comm, min_t(size_t, size, sizeof(task->comm)));
++	strlcpy(buf, task->comm, min_t(size_t, size, sizeof(task->comm)));
+ 	return 0;
+ }
+ 
+diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c
+index f2ece3c174a5..8f94ca1860cf 100644
+--- a/kernel/bpf/inode.c
++++ b/kernel/bpf/inode.c
+@@ -31,10 +31,10 @@ static void *bpf_any_get(void *raw, enum bpf_type type)
+ {
+ 	switch (type) {
+ 	case BPF_TYPE_PROG:
+-		atomic_inc(&((struct bpf_prog *)raw)->aux->refcnt);
++		raw = bpf_prog_inc(raw);
+ 		break;
+ 	case BPF_TYPE_MAP:
+-		bpf_map_inc(raw, true);
++		raw = bpf_map_inc(raw, true);
+ 		break;
+ 	default:
+ 		WARN_ON_ONCE(1);
+@@ -297,7 +297,8 @@ static void *bpf_obj_do_get(const struct filename *pathname,
+ 		goto out;
+ 
+ 	raw = bpf_any_get(inode->i_private, *type);
+-	touch_atime(&path);
++	if (!IS_ERR(raw))
++		touch_atime(&path);
+ 
+ 	path_put(&path);
+ 	return raw;
+diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
+index 637397059f76..aa5f39772ac4 100644
+--- a/kernel/bpf/syscall.c
++++ b/kernel/bpf/syscall.c
+@@ -201,11 +201,18 @@ struct bpf_map *__bpf_map_get(struct fd f)
+ 	return f.file->private_data;
+ }
+ 
+-void bpf_map_inc(struct bpf_map *map, bool uref)
++/* prog's and map's refcnt limit */
++#define BPF_MAX_REFCNT 32768
++
++struct bpf_map *bpf_map_inc(struct bpf_map *map, bool uref)
+ {
+-	atomic_inc(&map->refcnt);
++	if (atomic_inc_return(&map->refcnt) > BPF_MAX_REFCNT) {
++		atomic_dec(&map->refcnt);
++		return ERR_PTR(-EBUSY);
++	}
+ 	if (uref)
+ 		atomic_inc(&map->usercnt);
++	return map;
+ }
+ 
+ struct bpf_map *bpf_map_get_with_uref(u32 ufd)
+@@ -217,7 +224,7 @@ struct bpf_map *bpf_map_get_with_uref(u32 ufd)
+ 	if (IS_ERR(map))
+ 		return map;
+ 
+-	bpf_map_inc(map, true);
++	map = bpf_map_inc(map, true);
+ 	fdput(f);
+ 
+ 	return map;
+@@ -600,6 +607,15 @@ static struct bpf_prog *__bpf_prog_get(struct fd f)
+ 	return f.file->private_data;
+ }
+ 
++struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog)
++{
++	if (atomic_inc_return(&prog->aux->refcnt) > BPF_MAX_REFCNT) {
++		atomic_dec(&prog->aux->refcnt);
++		return ERR_PTR(-EBUSY);
++	}
++	return prog;
++}
++
+ /* called by sockets/tracing/seccomp before attaching program to an event
+  * pairs with bpf_prog_put()
+  */
+@@ -612,7 +628,7 @@ struct bpf_prog *bpf_prog_get(u32 ufd)
+ 	if (IS_ERR(prog))
+ 		return prog;
+ 
+-	atomic_inc(&prog->aux->refcnt);
++	prog = bpf_prog_inc(prog);
+ 	fdput(f);
+ 
+ 	return prog;
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index 2e7f7ab739e4..2cbfba78d3db 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -239,15 +239,6 @@ static const char * const reg_type_str[] = {
+ 	[CONST_IMM]		= "imm",
+ };
+ 
+-static const struct {
+-	int map_type;
+-	int func_id;
+-} func_limit[] = {
+-	{BPF_MAP_TYPE_PROG_ARRAY, BPF_FUNC_tail_call},
+-	{BPF_MAP_TYPE_PERF_EVENT_ARRAY, BPF_FUNC_perf_event_read},
+-	{BPF_MAP_TYPE_PERF_EVENT_ARRAY, BPF_FUNC_perf_event_output},
+-};
+-
+ static void print_verifier_state(struct verifier_env *env)
+ {
+ 	enum bpf_reg_type t;
+@@ -898,24 +889,44 @@ static int check_func_arg(struct verifier_env *env, u32 regno,
+ 
+ static int check_map_func_compatibility(struct bpf_map *map, int func_id)
+ {
+-	bool bool_map, bool_func;
+-	int i;
+-
+ 	if (!map)
+ 		return 0;
+ 
+-	for (i = 0; i < ARRAY_SIZE(func_limit); i++) {
+-		bool_map = (map->map_type == func_limit[i].map_type);
+-		bool_func = (func_id == func_limit[i].func_id);
+-		/* only when map & func pair match it can continue.
+-		 * don't allow any other map type to be passed into
+-		 * the special func;
+-		 */
+-		if (bool_func && bool_map != bool_func)
+-			return -EINVAL;
++	/* We need a two way check, first is from map perspective ... */
++	switch (map->map_type) {
++	case BPF_MAP_TYPE_PROG_ARRAY:
++		if (func_id != BPF_FUNC_tail_call)
++			goto error;
++		break;
++	case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
++		if (func_id != BPF_FUNC_perf_event_read &&
++		    func_id != BPF_FUNC_perf_event_output)
++			goto error;
++		break;
++	default:
++		break;
++	}
++
++	/* ... and second from the function itself. */
++	switch (func_id) {
++	case BPF_FUNC_tail_call:
++		if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
++			goto error;
++		break;
++	case BPF_FUNC_perf_event_read:
++	case BPF_FUNC_perf_event_output:
++		if (map->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY)
++			goto error;
++		break;
++	default:
++		break;
+ 	}
+ 
+ 	return 0;
++error:
++	verbose("cannot pass map_type %d into func %d\n",
++		map->map_type, func_id);
++	return -EINVAL;
+ }
+ 
+ static int check_call(struct verifier_env *env, int func_id)
+@@ -1348,6 +1359,7 @@ static int check_ld_abs(struct verifier_env *env, struct bpf_insn *insn)
+ 	}
+ 
+ 	if (insn->dst_reg != BPF_REG_0 || insn->off != 0 ||
++	    BPF_SIZE(insn->code) == BPF_DW ||
+ 	    (mode == BPF_ABS && insn->src_reg != BPF_REG_0)) {
+ 		verbose("BPF_LD_ABS uses reserved fields\n");
+ 		return -EINVAL;
+@@ -2003,7 +2015,6 @@ static int replace_map_fd_with_map_ptr(struct verifier_env *env)
+ 			if (IS_ERR(map)) {
+ 				verbose("fd %d is not pointing to valid bpf_map\n",
+ 					insn->imm);
+-				fdput(f);
+ 				return PTR_ERR(map);
+ 			}
+ 
+@@ -2023,15 +2034,18 @@ static int replace_map_fd_with_map_ptr(struct verifier_env *env)
+ 				return -E2BIG;
+ 			}
+ 
+-			/* remember this map */
+-			env->used_maps[env->used_map_cnt++] = map;
+-
+ 			/* hold the map. If the program is rejected by verifier,
+ 			 * the map will be released by release_maps() or it
+ 			 * will be used by the valid program until it's unloaded
+ 			 * and all maps are released in free_bpf_prog_info()
+ 			 */
+-			bpf_map_inc(map, false);
++			map = bpf_map_inc(map, false);
++			if (IS_ERR(map)) {
++				fdput(f);
++				return PTR_ERR(map);
++			}
++			env->used_maps[env->used_map_cnt++] = map;
++
+ 			fdput(f);
+ next_insn:
+ 			insn++;
+diff --git a/kernel/cgroup.c b/kernel/cgroup.c
+index d27904c193da..355cd5f2b416 100644
+--- a/kernel/cgroup.c
++++ b/kernel/cgroup.c
+@@ -2474,6 +2474,14 @@ static void cgroup_migrate_add_src(struct css_set *src_cset,
+ 	lockdep_assert_held(&cgroup_mutex);
+ 	lockdep_assert_held(&css_set_lock);
+ 
++	/*
++	 * If ->dead, @src_set is associated with one or more dead cgroups
++	 * and doesn't contain any migratable tasks.  Ignore it early so
++	 * that the rest of migration path doesn't get confused by it.
++	 */
++	if (src_cset->dead)
++		return;
++
+ 	src_cgrp = cset_cgroup_from_root(src_cset, dst_cgrp->root);
+ 
+ 	if (!list_empty(&src_cset->mg_preload_node))
+@@ -2689,9 +2697,10 @@ static ssize_t __cgroup_procs_write(struct kernfs_open_file *of, char *buf,
+ 				    size_t nbytes, loff_t off, bool threadgroup)
+ {
+ 	struct task_struct *tsk;
++	struct cgroup_subsys *ss;
+ 	struct cgroup *cgrp;
+ 	pid_t pid;
+-	int ret;
++	int ssid, ret;
+ 
+ 	if (kstrtoint(strstrip(buf), 0, &pid) || pid < 0)
+ 		return -EINVAL;
+@@ -2739,8 +2748,10 @@ out_unlock_rcu:
+ 	rcu_read_unlock();
+ out_unlock_threadgroup:
+ 	percpu_up_write(&cgroup_threadgroup_rwsem);
++	for_each_subsys(ss, ssid)
++		if (ss->post_attach)
++			ss->post_attach();
+ 	cgroup_kn_unlock(of->kn);
+-	cpuset_post_attach_flush();
+ 	return ret ?: nbytes;
+ }
+ 
+@@ -5114,6 +5125,7 @@ static int cgroup_destroy_locked(struct cgroup *cgrp)
+ 	__releases(&cgroup_mutex) __acquires(&cgroup_mutex)
+ {
+ 	struct cgroup_subsys_state *css;
++	struct cgrp_cset_link *link;
+ 	int ssid;
+ 
+ 	lockdep_assert_held(&cgroup_mutex);
+@@ -5134,11 +5146,18 @@ static int cgroup_destroy_locked(struct cgroup *cgrp)
+ 		return -EBUSY;
+ 
+ 	/*
+-	 * Mark @cgrp dead.  This prevents further task migration and child
+-	 * creation by disabling cgroup_lock_live_group().
++	 * Mark @cgrp and the associated csets dead.  The former prevents
++	 * further task migration and child creation by disabling
++	 * cgroup_lock_live_group().  The latter makes the csets ignored by
++	 * the migration path.
+ 	 */
+ 	cgrp->self.flags &= ~CSS_ONLINE;
+ 
++	spin_lock_bh(&css_set_lock);
++	list_for_each_entry(link, &cgrp->cset_links, cset_link)
++		link->cset->dead = true;
++	spin_unlock_bh(&css_set_lock);
++
+ 	/* initiate massacre of all css's */
+ 	for_each_css(css, ssid, cgrp)
+ 		kill_css(css);
+diff --git a/kernel/cpuset.c b/kernel/cpuset.c
+index 41989ab4db57..df16d0c9349f 100644
+--- a/kernel/cpuset.c
++++ b/kernel/cpuset.c
+@@ -58,7 +58,6 @@
+ #include <asm/uaccess.h>
+ #include <linux/atomic.h>
+ #include <linux/mutex.h>
+-#include <linux/workqueue.h>
+ #include <linux/cgroup.h>
+ #include <linux/wait.h>
+ 
+@@ -1016,7 +1015,7 @@ static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
+ 	}
+ }
+ 
+-void cpuset_post_attach_flush(void)
++static void cpuset_post_attach(void)
+ {
+ 	flush_workqueue(cpuset_migrate_mm_wq);
+ }
+@@ -2087,6 +2086,7 @@ struct cgroup_subsys cpuset_cgrp_subsys = {
+ 	.can_attach	= cpuset_can_attach,
+ 	.cancel_attach	= cpuset_cancel_attach,
+ 	.attach		= cpuset_attach,
++	.post_attach	= cpuset_post_attach,
+ 	.bind		= cpuset_bind,
+ 	.legacy_cftypes	= files,
+ 	.early_init	= 1,
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index 614614821f00..477fb6b8ca20 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -1090,6 +1090,7 @@ static void put_ctx(struct perf_event_context *ctx)
+  * function.
+  *
+  * Lock order:
++ *    cred_guard_mutex
+  *	task_struct::perf_event_mutex
+  *	  perf_event_context::mutex
+  *	    perf_event::child_mutex;
+@@ -2402,14 +2403,24 @@ static void ctx_sched_out(struct perf_event_context *ctx,
+ 			cpuctx->task_ctx = NULL;
+ 	}
+ 
+-	is_active ^= ctx->is_active; /* changed bits */
+-
++	/*
++	 * Always update time if it was set; not only when it changes.
++	 * Otherwise we can 'forget' to update time for any but the last
++	 * context we sched out. For example:
++	 *
++	 *   ctx_sched_out(.event_type = EVENT_FLEXIBLE)
++	 *   ctx_sched_out(.event_type = EVENT_PINNED)
++	 *
++	 * would only update time for the pinned events.
++	 */
+ 	if (is_active & EVENT_TIME) {
+ 		/* update (and stop) ctx time */
+ 		update_context_time(ctx);
+ 		update_cgrp_time_from_cpuctx(cpuctx);
+ 	}
+ 
++	is_active ^= ctx->is_active; /* changed bits */
++
+ 	if (!ctx->nr_active || !(is_active & EVENT_ALL))
+ 		return;
+ 
+@@ -3405,7 +3416,6 @@ static struct task_struct *
+ find_lively_task_by_vpid(pid_t vpid)
+ {
+ 	struct task_struct *task;
+-	int err;
+ 
+ 	rcu_read_lock();
+ 	if (!vpid)
+@@ -3419,16 +3429,7 @@ find_lively_task_by_vpid(pid_t vpid)
+ 	if (!task)
+ 		return ERR_PTR(-ESRCH);
+ 
+-	/* Reuse ptrace permission checks for now. */
+-	err = -EACCES;
+-	if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS))
+-		goto errout;
+-
+ 	return task;
+-errout:
+-	put_task_struct(task);
+-	return ERR_PTR(err);
+-
+ }
+ 
+ /*
+@@ -8001,6 +8002,9 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
+ 		}
+ 	}
+ 
++	/* symmetric to unaccount_event() in _free_event() */
++	account_event(event);
++
+ 	return event;
+ 
+ err_per_task:
+@@ -8347,6 +8351,24 @@ SYSCALL_DEFINE5(perf_event_open,
+ 
+ 	get_online_cpus();
+ 
++	if (task) {
++		err = mutex_lock_interruptible(&task->signal->cred_guard_mutex);
++		if (err)
++			goto err_cpus;
++
++		/*
++		 * Reuse ptrace permission checks for now.
++		 *
++		 * We must hold cred_guard_mutex across this and any potential
++		 * perf_install_in_context() call for this new event to
++		 * serialize against exec() altering our credentials (and the
++		 * perf_event_exit_task() that could imply).
++		 */
++		err = -EACCES;
++		if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS))
++			goto err_cred;
++	}
++
+ 	if (flags & PERF_FLAG_PID_CGROUP)
+ 		cgroup_fd = pid;
+ 
+@@ -8354,7 +8376,7 @@ SYSCALL_DEFINE5(perf_event_open,
+ 				 NULL, NULL, cgroup_fd);
+ 	if (IS_ERR(event)) {
+ 		err = PTR_ERR(event);
+-		goto err_cpus;
++		goto err_cred;
+ 	}
+ 
+ 	if (is_sampling_event(event)) {
+@@ -8364,8 +8386,6 @@ SYSCALL_DEFINE5(perf_event_open,
+ 		}
+ 	}
+ 
+-	account_event(event);
+-
+ 	/*
+ 	 * Special case software events and allow them to be part of
+ 	 * any hardware group.
+@@ -8415,11 +8435,6 @@ SYSCALL_DEFINE5(perf_event_open,
+ 		goto err_context;
+ 	}
+ 
+-	if (task) {
+-		put_task_struct(task);
+-		task = NULL;
+-	}
+-
+ 	/*
+ 	 * Look up the group leader (we will attach this event to it):
+ 	 */
+@@ -8478,6 +8493,7 @@ SYSCALL_DEFINE5(perf_event_open,
+ 					f_flags);
+ 	if (IS_ERR(event_file)) {
+ 		err = PTR_ERR(event_file);
++		event_file = NULL;
+ 		goto err_context;
+ 	}
+ 
+@@ -8516,6 +8532,11 @@ SYSCALL_DEFINE5(perf_event_open,
+ 
+ 	WARN_ON_ONCE(ctx->parent_ctx);
+ 
++	/*
++	 * This is the point on no return; we cannot fail hereafter. This is
++	 * where we start modifying current state.
++	 */
++
+ 	if (move_group) {
+ 		/*
+ 		 * See perf_event_ctx_lock() for comments on the details
+@@ -8587,6 +8608,11 @@ SYSCALL_DEFINE5(perf_event_open,
+ 		mutex_unlock(&gctx->mutex);
+ 	mutex_unlock(&ctx->mutex);
+ 
++	if (task) {
++		mutex_unlock(&task->signal->cred_guard_mutex);
++		put_task_struct(task);
++	}
++
+ 	put_online_cpus();
+ 
+ 	mutex_lock(&current->perf_event_mutex);
+@@ -8619,6 +8645,9 @@ err_alloc:
+ 	 */
+ 	if (!event_file)
+ 		free_event(event);
++err_cred:
++	if (task)
++		mutex_unlock(&task->signal->cred_guard_mutex);
+ err_cpus:
+ 	put_online_cpus();
+ err_task:
+@@ -8662,8 +8691,6 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
+ 	/* Mark owner so we could distinguish it from user events. */
+ 	event->owner = TASK_TOMBSTONE;
+ 
+-	account_event(event);
+-
+ 	ctx = find_get_context(event->pmu, task, event);
+ 	if (IS_ERR(ctx)) {
+ 		err = PTR_ERR(ctx);
+@@ -8905,6 +8932,9 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
+ 
+ /*
+  * When a child task exits, feed back event values to parent events.
++ *
++ * Can be called with cred_guard_mutex held when called from
++ * install_exec_creds().
+  */
+ void perf_event_exit_task(struct task_struct *child)
+ {
+diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
+index 1faad2cfdb9e..287201a5d12f 100644
+--- a/kernel/events/ring_buffer.c
++++ b/kernel/events/ring_buffer.c
+@@ -347,6 +347,7 @@ void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size,
+ 			 bool truncated)
+ {
+ 	struct ring_buffer *rb = handle->rb;
++	bool wakeup = truncated;
+ 	unsigned long aux_head;
+ 	u64 flags = 0;
+ 
+@@ -375,9 +376,16 @@ void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size,
+ 	aux_head = rb->user_page->aux_head = local_read(&rb->aux_head);
+ 
+ 	if (aux_head - local_read(&rb->aux_wakeup) >= rb->aux_watermark) {
+-		perf_output_wakeup(handle);
++		wakeup = true;
+ 		local_add(rb->aux_watermark, &rb->aux_wakeup);
+ 	}
++
++	if (wakeup) {
++		if (truncated)
++			handle->event->pending_disable = 1;
++		perf_output_wakeup(handle);
++	}
++
+ 	handle->event = NULL;
+ 
+ 	local_set(&rb->aux_nest, 0);
+diff --git a/kernel/futex.c b/kernel/futex.c
+index 5d6ce6413ef1..11b502159f3a 100644
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -1212,10 +1212,20 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this,
+ 	if (unlikely(should_fail_futex(true)))
+ 		ret = -EFAULT;
+ 
+-	if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval))
++	if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)) {
+ 		ret = -EFAULT;
+-	else if (curval != uval)
+-		ret = -EINVAL;
++	} else if (curval != uval) {
++		/*
++		 * If a unconditional UNLOCK_PI operation (user space did not
++		 * try the TID->0 transition) raced with a waiter setting the
++		 * FUTEX_WAITERS flag between get_user() and locking the hash
++		 * bucket lock, retry the operation.
++		 */
++		if ((FUTEX_TID_MASK & curval) == uval)
++			ret = -EAGAIN;
++		else
++			ret = -EINVAL;
++	}
+ 	if (ret) {
+ 		raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
+ 		return ret;
+@@ -1442,8 +1452,8 @@ void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1,
+ 	if (likely(&hb1->chain != &hb2->chain)) {
+ 		plist_del(&q->list, &hb1->chain);
+ 		hb_waiters_dec(hb1);
+-		plist_add(&q->list, &hb2->chain);
+ 		hb_waiters_inc(hb2);
++		plist_add(&q->list, &hb2->chain);
+ 		q->lock_ptr = &hb2->lock;
+ 	}
+ 	get_futex_key_refs(key2);
+@@ -2536,6 +2546,15 @@ retry:
+ 		if (ret == -EFAULT)
+ 			goto pi_faulted;
+ 		/*
++		 * A unconditional UNLOCK_PI op raced against a waiter
++		 * setting the FUTEX_WAITERS bit. Try again.
++		 */
++		if (ret == -EAGAIN) {
++			spin_unlock(&hb->lock);
++			put_futex_key(&key);
++			goto retry;
++		}
++		/*
+ 		 * wake_futex_pi has detected invalid state. Tell user
+ 		 * space.
+ 		 */
+diff --git a/kernel/locking/mcs_spinlock.h b/kernel/locking/mcs_spinlock.h
+index 5b9102a47ea5..c835270f0c2f 100644
+--- a/kernel/locking/mcs_spinlock.h
++++ b/kernel/locking/mcs_spinlock.h
+@@ -67,7 +67,13 @@ void mcs_spin_lock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
+ 	node->locked = 0;
+ 	node->next   = NULL;
+ 
+-	prev = xchg_acquire(lock, node);
++	/*
++	 * We rely on the full barrier with global transitivity implied by the
++	 * below xchg() to order the initialization stores above against any
++	 * observation of @node. And to provide the ACQUIRE ordering associated
++	 * with a LOCK primitive.
++	 */
++	prev = xchg(lock, node);
+ 	if (likely(prev == NULL)) {
+ 		/*
+ 		 * Lock acquired, don't need to set node->locked to 1. Threads
+diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
+index b7342a24f559..b7dd5718836e 100644
+--- a/kernel/power/hibernate.c
++++ b/kernel/power/hibernate.c
+@@ -339,6 +339,7 @@ int hibernation_snapshot(int platform_mode)
+ 	pm_message_t msg;
+ 	int error;
+ 
++	pm_suspend_clear_flags();
+ 	error = platform_begin(platform_mode);
+ 	if (error)
+ 		goto Close;
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index 41f6b2215aa8..1c1d2a00ad95 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -5630,6 +5630,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
+ 
+ 	case CPU_UP_PREPARE:
+ 		rq->calc_load_update = calc_load_update;
++		account_reset_rq(rq);
+ 		break;
+ 
+ 	case CPU_ONLINE:
+@@ -7801,7 +7802,7 @@ void set_curr_task(int cpu, struct task_struct *p)
+ /* task_group_lock serializes the addition/removal of task groups */
+ static DEFINE_SPINLOCK(task_group_lock);
+ 
+-static void free_sched_group(struct task_group *tg)
++static void sched_free_group(struct task_group *tg)
+ {
+ 	free_fair_sched_group(tg);
+ 	free_rt_sched_group(tg);
+@@ -7827,7 +7828,7 @@ struct task_group *sched_create_group(struct task_group *parent)
+ 	return tg;
+ 
+ err:
+-	free_sched_group(tg);
++	sched_free_group(tg);
+ 	return ERR_PTR(-ENOMEM);
+ }
+ 
+@@ -7847,17 +7848,16 @@ void sched_online_group(struct task_group *tg, struct task_group *parent)
+ }
+ 
+ /* rcu callback to free various structures associated with a task group */
+-static void free_sched_group_rcu(struct rcu_head *rhp)
++static void sched_free_group_rcu(struct rcu_head *rhp)
+ {
+ 	/* now it should be safe to free those cfs_rqs */
+-	free_sched_group(container_of(rhp, struct task_group, rcu));
++	sched_free_group(container_of(rhp, struct task_group, rcu));
+ }
+ 
+-/* Destroy runqueue etc associated with a task group */
+ void sched_destroy_group(struct task_group *tg)
+ {
+ 	/* wait for possible concurrent references to cfs_rqs complete */
+-	call_rcu(&tg->rcu, free_sched_group_rcu);
++	call_rcu(&tg->rcu, sched_free_group_rcu);
+ }
+ 
+ void sched_offline_group(struct task_group *tg)
+@@ -8318,31 +8318,26 @@ cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
+ 	if (IS_ERR(tg))
+ 		return ERR_PTR(-ENOMEM);
+ 
++	sched_online_group(tg, parent);
++
+ 	return &tg->css;
+ }
+ 
+-static int cpu_cgroup_css_online(struct cgroup_subsys_state *css)
++static void cpu_cgroup_css_released(struct cgroup_subsys_state *css)
+ {
+ 	struct task_group *tg = css_tg(css);
+-	struct task_group *parent = css_tg(css->parent);
+ 
+-	if (parent)
+-		sched_online_group(tg, parent);
+-	return 0;
++	sched_offline_group(tg);
+ }
+ 
+ static void cpu_cgroup_css_free(struct cgroup_subsys_state *css)
+ {
+ 	struct task_group *tg = css_tg(css);
+ 
+-	sched_destroy_group(tg);
+-}
+-
+-static void cpu_cgroup_css_offline(struct cgroup_subsys_state *css)
+-{
+-	struct task_group *tg = css_tg(css);
+-
+-	sched_offline_group(tg);
++	/*
++	 * Relies on the RCU grace period between css_released() and this.
++	 */
++	sched_free_group(tg);
+ }
+ 
+ static void cpu_cgroup_fork(struct task_struct *task)
+@@ -8702,9 +8697,8 @@ static struct cftype cpu_files[] = {
+ 
+ struct cgroup_subsys cpu_cgrp_subsys = {
+ 	.css_alloc	= cpu_cgroup_css_alloc,
++	.css_released	= cpu_cgroup_css_released,
+ 	.css_free	= cpu_cgroup_css_free,
+-	.css_online	= cpu_cgroup_css_online,
+-	.css_offline	= cpu_cgroup_css_offline,
+ 	.fork		= cpu_cgroup_fork,
+ 	.can_attach	= cpu_cgroup_can_attach,
+ 	.attach		= cpu_cgroup_attach,
+diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
+index b2ab2ffb1adc..ab2b5fb9821d 100644
+--- a/kernel/sched/cputime.c
++++ b/kernel/sched/cputime.c
+@@ -262,21 +262,21 @@ static __always_inline bool steal_account_process_tick(void)
+ #ifdef CONFIG_PARAVIRT
+ 	if (static_key_false(&paravirt_steal_enabled)) {
+ 		u64 steal;
+-		cputime_t steal_ct;
++		unsigned long steal_jiffies;
+ 
+ 		steal = paravirt_steal_clock(smp_processor_id());
+ 		steal -= this_rq()->prev_steal_time;
+ 
+ 		/*
+-		 * cputime_t may be less precise than nsecs (eg: if it's
+-		 * based on jiffies). Lets cast the result to cputime
++		 * steal is in nsecs but our caller is expecting steal
++		 * time in jiffies. Lets cast the result to jiffies
+ 		 * granularity and account the rest on the next rounds.
+ 		 */
+-		steal_ct = nsecs_to_cputime(steal);
+-		this_rq()->prev_steal_time += cputime_to_nsecs(steal_ct);
++		steal_jiffies = nsecs_to_jiffies(steal);
++		this_rq()->prev_steal_time += jiffies_to_nsecs(steal_jiffies);
+ 
+-		account_steal_time(steal_ct);
+-		return steal_ct;
++		account_steal_time(jiffies_to_cputime(steal_jiffies));
++		return steal_jiffies;
+ 	}
+ #endif
+ 	return false;
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index 56b7d4b83947..adff850e5d42 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -4459,9 +4459,17 @@ static void __update_cpu_load(struct rq *this_rq, unsigned long this_load,
+ 
+ 		/* scale is effectively 1 << i now, and >> i divides by scale */
+ 
+-		old_load = this_rq->cpu_load[i] - tickless_load;
++		old_load = this_rq->cpu_load[i];
+ 		old_load = decay_load_missed(old_load, pending_updates - 1, i);
+-		old_load += tickless_load;
++		if (tickless_load) {
++			old_load -= decay_load_missed(tickless_load, pending_updates - 1, i);
++			/*
++			 * old_load can never be a negative value because a
++			 * decayed tickless_load cannot be greater than the
++			 * original tickless_load.
++			 */
++			old_load += tickless_load;
++		}
+ 		new_load = this_load;
+ 		/*
+ 		 * Round up the averaging division if load is increasing. This
+diff --git a/kernel/sched/loadavg.c b/kernel/sched/loadavg.c
+index ef7159012cf3..b0b93fd33af9 100644
+--- a/kernel/sched/loadavg.c
++++ b/kernel/sched/loadavg.c
+@@ -99,10 +99,13 @@ long calc_load_fold_active(struct rq *this_rq)
+ static unsigned long
+ calc_load(unsigned long load, unsigned long exp, unsigned long active)
+ {
+-	load *= exp;
+-	load += active * (FIXED_1 - exp);
+-	load += 1UL << (FSHIFT - 1);
+-	return load >> FSHIFT;
++	unsigned long newload;
++
++	newload = load * exp + active * (FIXED_1 - exp);
++	if (active >= load)
++		newload += FIXED_1-1;
++
++	return newload / FIXED_1;
+ }
+ 
+ #ifdef CONFIG_NO_HZ_COMMON
+diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
+index 10f16374df7f..ff87d887ff62 100644
+--- a/kernel/sched/sched.h
++++ b/kernel/sched/sched.h
+@@ -1738,3 +1738,16 @@ static inline u64 irq_time_read(int cpu)
+ }
+ #endif /* CONFIG_64BIT */
+ #endif /* CONFIG_IRQ_TIME_ACCOUNTING */
++
++static inline void account_reset_rq(struct rq *rq)
++{
++#ifdef CONFIG_IRQ_TIME_ACCOUNTING
++	rq->prev_irq_time = 0;
++#endif
++#ifdef CONFIG_PARAVIRT
++	rq->prev_steal_time = 0;
++#endif
++#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
++	rq->prev_steal_time_rq = 0;
++#endif
++}
+diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
+index 7e7746a42a62..10a1d7dc9313 100644
+--- a/kernel/sysctl_binary.c
++++ b/kernel/sysctl_binary.c
+@@ -1321,7 +1321,7 @@ static ssize_t binary_sysctl(const int *name, int nlen,
+ 	}
+ 
+ 	mnt = task_active_pid_ns(current)->proc_mnt;
+-	file = file_open_root(mnt->mnt_root, mnt, pathname, flags);
++	file = file_open_root(mnt->mnt_root, mnt, pathname, flags, 0);
+ 	result = PTR_ERR(file);
+ 	if (IS_ERR(file))
+ 		goto out_putname;
+diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
+index 95181e36891a..9c143739b8d7 100644
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -437,7 +437,7 @@ struct ring_buffer_per_cpu {
+ 	raw_spinlock_t			reader_lock;	/* serialize readers */
+ 	arch_spinlock_t			lock;
+ 	struct lock_class_key		lock_key;
+-	unsigned int			nr_pages;
++	unsigned long			nr_pages;
+ 	unsigned int			current_context;
+ 	struct list_head		*pages;
+ 	struct buffer_page		*head_page;	/* read from head */
+@@ -458,7 +458,7 @@ struct ring_buffer_per_cpu {
+ 	u64				write_stamp;
+ 	u64				read_stamp;
+ 	/* ring buffer pages to update, > 0 to add, < 0 to remove */
+-	int				nr_pages_to_update;
++	long				nr_pages_to_update;
+ 	struct list_head		new_pages; /* new pages to add */
+ 	struct work_struct		update_pages_work;
+ 	struct completion		update_done;
+@@ -1128,10 +1128,10 @@ static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
+ 	return 0;
+ }
+ 
+-static int __rb_allocate_pages(int nr_pages, struct list_head *pages, int cpu)
++static int __rb_allocate_pages(long nr_pages, struct list_head *pages, int cpu)
+ {
+-	int i;
+ 	struct buffer_page *bpage, *tmp;
++	long i;
+ 
+ 	for (i = 0; i < nr_pages; i++) {
+ 		struct page *page;
+@@ -1168,7 +1168,7 @@ free_pages:
+ }
+ 
+ static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
+-			     unsigned nr_pages)
++			     unsigned long nr_pages)
+ {
+ 	LIST_HEAD(pages);
+ 
+@@ -1193,7 +1193,7 @@ static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
+ }
+ 
+ static struct ring_buffer_per_cpu *
+-rb_allocate_cpu_buffer(struct ring_buffer *buffer, int nr_pages, int cpu)
++rb_allocate_cpu_buffer(struct ring_buffer *buffer, long nr_pages, int cpu)
+ {
+ 	struct ring_buffer_per_cpu *cpu_buffer;
+ 	struct buffer_page *bpage;
+@@ -1293,8 +1293,9 @@ struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
+ 					struct lock_class_key *key)
+ {
+ 	struct ring_buffer *buffer;
++	long nr_pages;
+ 	int bsize;
+-	int cpu, nr_pages;
++	int cpu;
+ 
+ 	/* keep it in its own cache line */
+ 	buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
+@@ -1420,12 +1421,12 @@ static inline unsigned long rb_page_write(struct buffer_page *bpage)
+ }
+ 
+ static int
+-rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned int nr_pages)
++rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages)
+ {
+ 	struct list_head *tail_page, *to_remove, *next_page;
+ 	struct buffer_page *to_remove_page, *tmp_iter_page;
+ 	struct buffer_page *last_page, *first_page;
+-	unsigned int nr_removed;
++	unsigned long nr_removed;
+ 	unsigned long head_bit;
+ 	int page_entries;
+ 
+@@ -1642,7 +1643,7 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size,
+ 			int cpu_id)
+ {
+ 	struct ring_buffer_per_cpu *cpu_buffer;
+-	unsigned nr_pages;
++	unsigned long nr_pages;
+ 	int cpu, err = 0;
+ 
+ 	/*
+@@ -1656,14 +1657,13 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size,
+ 	    !cpumask_test_cpu(cpu_id, buffer->cpumask))
+ 		return size;
+ 
+-	size = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
+-	size *= BUF_PAGE_SIZE;
++	nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
+ 
+ 	/* we need a minimum of two pages */
+-	if (size < BUF_PAGE_SIZE * 2)
+-		size = BUF_PAGE_SIZE * 2;
++	if (nr_pages < 2)
++		nr_pages = 2;
+ 
+-	nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
++	size = nr_pages * BUF_PAGE_SIZE;
+ 
+ 	/*
+ 	 * Don't succeed if resizing is disabled, as a reader might be
+@@ -4640,8 +4640,9 @@ static int rb_cpu_notify(struct notifier_block *self,
+ 	struct ring_buffer *buffer =
+ 		container_of(self, struct ring_buffer, cpu_notify);
+ 	long cpu = (long)hcpu;
+-	int cpu_i, nr_pages_same;
+-	unsigned int nr_pages;
++	long nr_pages_same;
++	int cpu_i;
++	unsigned long nr_pages;
+ 
+ 	switch (action) {
+ 	case CPU_UP_PREPARE:
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index d9293402ee68..8305cbb2d5a2 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -4949,7 +4949,10 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
+ 
+ 	spd.nr_pages = i;
+ 
+-	ret = splice_to_pipe(pipe, &spd);
++	if (i)
++		ret = splice_to_pipe(pipe, &spd);
++	else
++		ret = 0;
+ out:
+ 	splice_shrink_spd(&spd);
+ 	return ret;
+diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
+index 05ddc0820771..6f965864cc02 100644
+--- a/kernel/trace/trace_events.c
++++ b/kernel/trace/trace_events.c
+@@ -2095,8 +2095,13 @@ event_create_dir(struct dentry *parent, struct trace_event_file *file)
+ 	trace_create_file("filter", 0644, file->dir, file,
+ 			  &ftrace_event_filter_fops);
+ 
+-	trace_create_file("trigger", 0644, file->dir, file,
+-			  &event_trigger_fops);
++	/*
++	 * Only event directories that can be enabled should have
++	 * triggers.
++	 */
++	if (!(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
++		trace_create_file("trigger", 0644, file->dir, file,
++				  &event_trigger_fops);
+ 
+ 	trace_create_file("format", 0444, file->dir, call,
+ 			  &ftrace_event_format_fops);
+diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
+index e4e56589ec1d..be3222b7d72e 100644
+--- a/kernel/trace/trace_irqsoff.c
++++ b/kernel/trace/trace_irqsoff.c
+@@ -109,8 +109,12 @@ static int func_prolog_dec(struct trace_array *tr,
+ 		return 0;
+ 
+ 	local_save_flags(*flags);
+-	/* slight chance to get a false positive on tracing_cpu */
+-	if (!irqs_disabled_flags(*flags))
++	/*
++	 * Slight chance to get a false positive on tracing_cpu,
++	 * although I'm starting to think there isn't a chance.
++	 * Leave this for now just to be paranoid.
++	 */
++	if (!irqs_disabled_flags(*flags) && !preempt_count())
+ 		return 0;
+ 
+ 	*data = per_cpu_ptr(tr->trace_buffer.data, cpu);
+diff --git a/kernel/trace/trace_printk.c b/kernel/trace/trace_printk.c
+index 060df67dbdd1..f96f0383f6c6 100644
+--- a/kernel/trace/trace_printk.c
++++ b/kernel/trace/trace_printk.c
+@@ -296,6 +296,9 @@ static int t_show(struct seq_file *m, void *v)
+ 	const char *str = *fmt;
+ 	int i;
+ 
++	if (!*fmt)
++		return 0;
++
+ 	seq_printf(m, "0x%lx : \"", *(unsigned long *)fmt);
+ 
+ 	/*
+diff --git a/kernel/watchdog.c b/kernel/watchdog.c
+index b3ace6ebbba3..9acb29f280ec 100644
+--- a/kernel/watchdog.c
++++ b/kernel/watchdog.c
+@@ -923,6 +923,9 @@ static int proc_watchdog_common(int which, struct ctl_table *table, int write,
+ 		 * both lockup detectors are disabled if proc_watchdog_update()
+ 		 * returns an error.
+ 		 */
++		if (old == new)
++			goto out;
++
+ 		err = proc_watchdog_update();
+ 	}
+ out:
+@@ -967,7 +970,7 @@ int proc_soft_watchdog(struct ctl_table *table, int write,
+ int proc_watchdog_thresh(struct ctl_table *table, int write,
+ 			 void __user *buffer, size_t *lenp, loff_t *ppos)
+ {
+-	int err, old;
++	int err, old, new;
+ 
+ 	get_online_cpus();
+ 	mutex_lock(&watchdog_proc_mutex);
+@@ -987,6 +990,10 @@ int proc_watchdog_thresh(struct ctl_table *table, int write,
+ 	/*
+ 	 * Update the sample period. Restore on failure.
+ 	 */
++	new = ACCESS_ONCE(watchdog_thresh);
++	if (old == new)
++		goto out;
++
+ 	set_sample_period();
+ 	err = proc_watchdog_update();
+ 	if (err) {
+diff --git a/kernel/workqueue.c b/kernel/workqueue.c
+index 7ff5dc7d2ac5..f48c80e4ba75 100644
+--- a/kernel/workqueue.c
++++ b/kernel/workqueue.c
+@@ -667,6 +667,35 @@ static void set_work_pool_and_clear_pending(struct work_struct *work,
+ 	 */
+ 	smp_wmb();
+ 	set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 0);
++	/*
++	 * The following mb guarantees that previous clear of a PENDING bit
++	 * will not be reordered with any speculative LOADS or STORES from
++	 * work->current_func, which is executed afterwards.  This possible
++	 * reordering can lead to a missed execution on attempt to qeueue
++	 * the same @work.  E.g. consider this case:
++	 *
++	 *   CPU#0                         CPU#1
++	 *   ----------------------------  --------------------------------
++	 *
++	 * 1  STORE event_indicated
++	 * 2  queue_work_on() {
++	 * 3    test_and_set_bit(PENDING)
++	 * 4 }                             set_..._and_clear_pending() {
++	 * 5                                 set_work_data() # clear bit
++	 * 6                                 smp_mb()
++	 * 7                               work->current_func() {
++	 * 8				      LOAD event_indicated
++	 *				   }
++	 *
++	 * Without an explicit full barrier speculative LOAD on line 8 can
++	 * be executed before CPU#0 does STORE on line 1.  If that happens,
++	 * CPU#0 observes the PENDING bit is still set and new execution of
++	 * a @work is not queued in a hope, that CPU#1 will eventually
++	 * finish the queued @work.  Meanwhile CPU#1 does not see
++	 * event_indicated is set, because speculative LOAD was executed
++	 * before actual STORE.
++	 */
++	smp_mb();
+ }
+ 
+ static void clear_work_data(struct work_struct *work)
+@@ -4527,6 +4556,17 @@ static void rebind_workers(struct worker_pool *pool)
+ 						  pool->attrs->cpumask) < 0);
+ 
+ 	spin_lock_irq(&pool->lock);
++
++	/*
++	 * XXX: CPU hotplug notifiers are weird and can call DOWN_FAILED
++	 * w/o preceding DOWN_PREPARE.  Work around it.  CPU hotplug is
++	 * being reworked and this can go away in time.
++	 */
++	if (!(pool->flags & POOL_DISASSOCIATED)) {
++		spin_unlock_irq(&pool->lock);
++		return;
++	}
++
+ 	pool->flags &= ~POOL_DISASSOCIATED;
+ 
+ 	for_each_pool_worker(worker, pool) {
+diff --git a/lib/assoc_array.c b/lib/assoc_array.c
+index 03dd576e6773..59fd7c0b119c 100644
+--- a/lib/assoc_array.c
++++ b/lib/assoc_array.c
+@@ -524,7 +524,9 @@ static bool assoc_array_insert_into_terminal_node(struct assoc_array_edit *edit,
+ 			free_slot = i;
+ 			continue;
+ 		}
+-		if (ops->compare_object(assoc_array_ptr_to_leaf(ptr), index_key)) {
++		if (assoc_array_ptr_is_leaf(ptr) &&
++		    ops->compare_object(assoc_array_ptr_to_leaf(ptr),
++					index_key)) {
+ 			pr_devel("replace in slot %d\n", i);
+ 			edit->leaf_p = &node->slots[i];
+ 			edit->dead_leaf = node->slots[i];
+diff --git a/lib/lz4/lz4defs.h b/lib/lz4/lz4defs.h
+index abcecdc2d0f2..0710a62ad2f6 100644
+--- a/lib/lz4/lz4defs.h
++++ b/lib/lz4/lz4defs.h
+@@ -11,8 +11,7 @@
+ /*
+  * Detects 64 bits mode
+  */
+-#if (defined(__x86_64__) || defined(__x86_64) || defined(__amd64__) \
+-	|| defined(__ppc64__) || defined(__LP64__))
++#if defined(CONFIG_64BIT)
+ #define LZ4_ARCH64 1
+ #else
+ #define LZ4_ARCH64 0
+@@ -35,6 +34,10 @@ typedef struct _U64_S { u64 v; } U64_S;
+ 
+ #define PUT4(s, d) (A32(d) = A32(s))
+ #define PUT8(s, d) (A64(d) = A64(s))
++
++#define LZ4_READ_LITTLEENDIAN_16(d, s, p)	\
++	(d = s - A16(p))
++
+ #define LZ4_WRITE_LITTLEENDIAN_16(p, v)	\
+ 	do {	\
+ 		A16(p) = v; \
+@@ -51,10 +54,13 @@ typedef struct _U64_S { u64 v; } U64_S;
+ #define PUT8(s, d) \
+ 	put_unaligned(get_unaligned((const u64 *) s), (u64 *) d)
+ 
+-#define LZ4_WRITE_LITTLEENDIAN_16(p, v)	\
+-	do {	\
+-		put_unaligned(v, (u16 *)(p)); \
+-		p += 2; \
++#define LZ4_READ_LITTLEENDIAN_16(d, s, p)	\
++	(d = s - get_unaligned_le16(p))
++
++#define LZ4_WRITE_LITTLEENDIAN_16(p, v)			\
++	do {						\
++		put_unaligned_le16(v, (u16 *)(p));	\
++		p += 2;					\
+ 	} while (0)
+ #endif
+ 
+@@ -140,9 +146,6 @@ typedef struct _U64_S { u64 v; } U64_S;
+ 
+ #endif
+ 
+-#define LZ4_READ_LITTLEENDIAN_16(d, s, p) \
+-	(d = s - get_unaligned_le16(p))
+-
+ #define LZ4_WILDCOPY(s, d, e)		\
+ 	do {				\
+ 		LZ4_COPYPACKET(s, d);	\
+diff --git a/lib/mpi/mpicoder.c b/lib/mpi/mpicoder.c
+index ec533a6c77b5..eb15e7dc7b65 100644
+--- a/lib/mpi/mpicoder.c
++++ b/lib/mpi/mpicoder.c
+@@ -128,6 +128,23 @@ leave:
+ }
+ EXPORT_SYMBOL_GPL(mpi_read_from_buffer);
+ 
++static int count_lzeros(MPI a)
++{
++	mpi_limb_t alimb;
++	int i, lzeros = 0;
++
++	for (i = a->nlimbs - 1; i >= 0; i--) {
++		alimb = a->d[i];
++		if (alimb == 0) {
++			lzeros += sizeof(mpi_limb_t);
++		} else {
++			lzeros += count_leading_zeros(alimb) / 8;
++			break;
++		}
++	}
++	return lzeros;
++}
++
+ /**
+  * mpi_read_buffer() - read MPI to a bufer provided by user (msb first)
+  *
+@@ -148,7 +165,7 @@ int mpi_read_buffer(MPI a, uint8_t *buf, unsigned buf_len, unsigned *nbytes,
+ 	uint8_t *p;
+ 	mpi_limb_t alimb;
+ 	unsigned int n = mpi_get_size(a);
+-	int i, lzeros = 0;
++	int i, lzeros;
+ 
+ 	if (!buf || !nbytes)
+ 		return -EINVAL;
+@@ -156,14 +173,7 @@ int mpi_read_buffer(MPI a, uint8_t *buf, unsigned buf_len, unsigned *nbytes,
+ 	if (sign)
+ 		*sign = a->sign;
+ 
+-	p = (void *)&a->d[a->nlimbs] - 1;
+-
+-	for (i = a->nlimbs * sizeof(alimb) - 1; i >= 0; i--, p--) {
+-		if (!*p)
+-			lzeros++;
+-		else
+-			break;
+-	}
++	lzeros = count_lzeros(a);
+ 
+ 	if (buf_len < n - lzeros) {
+ 		*nbytes = n - lzeros;
+@@ -351,7 +361,7 @@ int mpi_write_to_sgl(MPI a, struct scatterlist *sgl, unsigned *nbytes,
+ 	u8 *p, *p2;
+ 	mpi_limb_t alimb, alimb2;
+ 	unsigned int n = mpi_get_size(a);
+-	int i, x, y = 0, lzeros = 0, buf_len;
++	int i, x, y = 0, lzeros, buf_len;
+ 
+ 	if (!nbytes)
+ 		return -EINVAL;
+@@ -359,14 +369,7 @@ int mpi_write_to_sgl(MPI a, struct scatterlist *sgl, unsigned *nbytes,
+ 	if (sign)
+ 		*sign = a->sign;
+ 
+-	p = (void *)&a->d[a->nlimbs] - 1;
+-
+-	for (i = a->nlimbs * sizeof(alimb) - 1; i >= 0; i--, p--) {
+-		if (!*p)
+-			lzeros++;
+-		else
+-			break;
+-	}
++	lzeros = count_lzeros(a);
+ 
+ 	if (*nbytes < n - lzeros) {
+ 		*nbytes = n - lzeros;
+diff --git a/mm/compaction.c b/mm/compaction.c
+index 585de54dbe8c..29fb26970fba 100644
+--- a/mm/compaction.c
++++ b/mm/compaction.c
+@@ -880,16 +880,8 @@ isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn,
+ 		pfn = isolate_migratepages_block(cc, pfn, block_end_pfn,
+ 							ISOLATE_UNEVICTABLE);
+ 
+-		/*
+-		 * In case of fatal failure, release everything that might
+-		 * have been isolated in the previous iteration, and signal
+-		 * the failure back to caller.
+-		 */
+-		if (!pfn) {
+-			putback_movable_pages(&cc->migratepages);
+-			cc->nr_migratepages = 0;
++		if (!pfn)
+ 			break;
+-		}
+ 
+ 		if (cc->nr_migratepages == COMPACT_CLUSTER_MAX)
+ 			break;
+diff --git a/mm/huge_memory.c b/mm/huge_memory.c
+index e10a4fee88d2..a82a87b3f9c6 100644
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -1257,15 +1257,9 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
+ 	VM_BUG_ON_PAGE(!PageCompound(page) || !PageHead(page), page);
+ 	/*
+ 	 * We can only reuse the page if nobody else maps the huge page or it's
+-	 * part. We can do it by checking page_mapcount() on each sub-page, but
+-	 * it's expensive.
+-	 * The cheaper way is to check page_count() to be equal 1: every
+-	 * mapcount takes page reference reference, so this way we can
+-	 * guarantee, that the PMD is the only mapping.
+-	 * This can give false negative if somebody pinned the page, but that's
+-	 * fine.
++	 * part.
+ 	 */
+-	if (page_mapcount(page) == 1 && page_count(page) == 1) {
++	if (page_trans_huge_mapcount(page, NULL) == 1) {
+ 		pmd_t entry;
+ 		entry = pmd_mkyoung(orig_pmd);
+ 		entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
+@@ -1919,10 +1913,9 @@ int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
+ 		 * page fault if needed.
+ 		 */
+ 		return 0;
+-	if (vma->vm_ops)
++	if (vma->vm_ops || (vm_flags & VM_NO_THP))
+ 		/* khugepaged not yet working on file or special mappings */
+ 		return 0;
+-	VM_BUG_ON_VMA(vm_flags & VM_NO_THP, vma);
+ 	hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
+ 	hend = vma->vm_end & HPAGE_PMD_MASK;
+ 	if (hstart < hend)
+@@ -2039,7 +2032,8 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
+ 		if (pte_write(pteval)) {
+ 			writable = true;
+ 		} else {
+-			if (PageSwapCache(page) && !reuse_swap_page(page)) {
++			if (PageSwapCache(page) &&
++			    !reuse_swap_page(page, NULL)) {
+ 				unlock_page(page);
+ 				result = SCAN_SWAP_CACHE_PAGE;
+ 				goto out;
+@@ -2310,8 +2304,7 @@ static bool hugepage_vma_check(struct vm_area_struct *vma)
+ 		return false;
+ 	if (is_vma_temporary_stack(vma))
+ 		return false;
+-	VM_BUG_ON_VMA(vma->vm_flags & VM_NO_THP, vma);
+-	return true;
++	return !(vma->vm_flags & VM_NO_THP);
+ }
+ 
+ static void collapse_huge_page(struct mm_struct *mm,
+@@ -3340,6 +3333,64 @@ int total_mapcount(struct page *page)
+ }
+ 
+ /*
++ * This calculates accurately how many mappings a transparent hugepage
++ * has (unlike page_mapcount() which isn't fully accurate). This full
++ * accuracy is primarily needed to know if copy-on-write faults can
++ * reuse the page and change the mapping to read-write instead of
++ * copying them. At the same time this returns the total_mapcount too.
++ *
++ * The function returns the highest mapcount any one of the subpages
++ * has. If the return value is one, even if different processes are
++ * mapping different subpages of the transparent hugepage, they can
++ * all reuse it, because each process is reusing a different subpage.
++ *
++ * The total_mapcount is instead counting all virtual mappings of the
++ * subpages. If the total_mapcount is equal to "one", it tells the
++ * caller all mappings belong to the same "mm" and in turn the
++ * anon_vma of the transparent hugepage can become the vma->anon_vma
++ * local one as no other process may be mapping any of the subpages.
++ *
++ * It would be more accurate to replace page_mapcount() with
++ * page_trans_huge_mapcount(), however we only use
++ * page_trans_huge_mapcount() in the copy-on-write faults where we
++ * need full accuracy to avoid breaking page pinning, because
++ * page_trans_huge_mapcount() is slower than page_mapcount().
++ */
++int page_trans_huge_mapcount(struct page *page, int *total_mapcount)
++{
++	int i, ret, _total_mapcount, mapcount;
++
++	/* hugetlbfs shouldn't call it */
++	VM_BUG_ON_PAGE(PageHuge(page), page);
++
++	if (likely(!PageTransCompound(page))) {
++		mapcount = atomic_read(&page->_mapcount) + 1;
++		if (total_mapcount)
++			*total_mapcount = mapcount;
++		return mapcount;
++	}
++
++	page = compound_head(page);
++
++	_total_mapcount = ret = 0;
++	for (i = 0; i < HPAGE_PMD_NR; i++) {
++		mapcount = atomic_read(&page[i]._mapcount) + 1;
++		ret = max(ret, mapcount);
++		_total_mapcount += mapcount;
++	}
++	if (PageDoubleMap(page)) {
++		ret -= 1;
++		_total_mapcount -= HPAGE_PMD_NR;
++	}
++	mapcount = compound_mapcount(page);
++	ret += mapcount;
++	_total_mapcount += mapcount;
++	if (total_mapcount)
++		*total_mapcount = _total_mapcount;
++	return ret;
++}
++
++/*
+  * This function splits huge page into normal pages. @page can point to any
+  * subpage of huge page to split. Split doesn't change the position of @page.
+  *
+diff --git a/mm/memcontrol.c b/mm/memcontrol.c
+index d06cae2de783..a65ad1d59232 100644
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -207,6 +207,7 @@ static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
+ /* "mc" and its members are protected by cgroup_mutex */
+ static struct move_charge_struct {
+ 	spinlock_t	  lock; /* for from, to */
++	struct mm_struct  *mm;
+ 	struct mem_cgroup *from;
+ 	struct mem_cgroup *to;
+ 	unsigned long flags;
+@@ -1262,7 +1263,7 @@ static unsigned long mem_cgroup_get_limit(struct mem_cgroup *memcg)
+ 	return limit;
+ }
+ 
+-static void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
++static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
+ 				     int order)
+ {
+ 	struct oom_control oc = {
+@@ -1340,6 +1341,7 @@ static void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
+ 	}
+ unlock:
+ 	mutex_unlock(&oom_lock);
++	return chosen;
+ }
+ 
+ #if MAX_NUMNODES > 1
+@@ -4729,6 +4731,8 @@ static void __mem_cgroup_clear_mc(void)
+ 
+ static void mem_cgroup_clear_mc(void)
+ {
++	struct mm_struct *mm = mc.mm;
++
+ 	/*
+ 	 * we must clear moving_task before waking up waiters at the end of
+ 	 * task migration.
+@@ -4738,7 +4742,10 @@ static void mem_cgroup_clear_mc(void)
+ 	spin_lock(&mc.lock);
+ 	mc.from = NULL;
+ 	mc.to = NULL;
++	mc.mm = NULL;
+ 	spin_unlock(&mc.lock);
++
++	mmput(mm);
+ }
+ 
+ static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
+@@ -4795,6 +4802,7 @@ static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
+ 		VM_BUG_ON(mc.moved_swap);
+ 
+ 		spin_lock(&mc.lock);
++		mc.mm = mm;
+ 		mc.from = from;
+ 		mc.to = memcg;
+ 		mc.flags = move_flags;
+@@ -4804,8 +4812,9 @@ static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
+ 		ret = mem_cgroup_precharge_mc(mm);
+ 		if (ret)
+ 			mem_cgroup_clear_mc();
++	} else {
++		mmput(mm);
+ 	}
+-	mmput(mm);
+ 	return ret;
+ }
+ 
+@@ -4914,11 +4923,11 @@ put:			/* get_mctgt_type() gets the page */
+ 	return ret;
+ }
+ 
+-static void mem_cgroup_move_charge(struct mm_struct *mm)
++static void mem_cgroup_move_charge(void)
+ {
+ 	struct mm_walk mem_cgroup_move_charge_walk = {
+ 		.pmd_entry = mem_cgroup_move_charge_pte_range,
+-		.mm = mm,
++		.mm = mc.mm,
+ 	};
+ 
+ 	lru_add_drain_all();
+@@ -4930,7 +4939,7 @@ static void mem_cgroup_move_charge(struct mm_struct *mm)
+ 	atomic_inc(&mc.from->moving_account);
+ 	synchronize_rcu();
+ retry:
+-	if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
++	if (unlikely(!down_read_trylock(&mc.mm->mmap_sem))) {
+ 		/*
+ 		 * Someone who are holding the mmap_sem might be waiting in
+ 		 * waitq. So we cancel all extra charges, wake up all waiters,
+@@ -4947,23 +4956,16 @@ retry:
+ 	 * additional charge, the page walk just aborts.
+ 	 */
+ 	walk_page_range(0, ~0UL, &mem_cgroup_move_charge_walk);
+-	up_read(&mm->mmap_sem);
++	up_read(&mc.mm->mmap_sem);
+ 	atomic_dec(&mc.from->moving_account);
+ }
+ 
+-static void mem_cgroup_move_task(struct cgroup_taskset *tset)
++static void mem_cgroup_move_task(void)
+ {
+-	struct cgroup_subsys_state *css;
+-	struct task_struct *p = cgroup_taskset_first(tset, &css);
+-	struct mm_struct *mm = get_task_mm(p);
+-
+-	if (mm) {
+-		if (mc.to)
+-			mem_cgroup_move_charge(mm);
+-		mmput(mm);
+-	}
+-	if (mc.to)
++	if (mc.to) {
++		mem_cgroup_move_charge();
+ 		mem_cgroup_clear_mc();
++	}
+ }
+ #else	/* !CONFIG_MMU */
+ static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
+@@ -4973,7 +4975,7 @@ static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
+ static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
+ {
+ }
+-static void mem_cgroup_move_task(struct cgroup_taskset *tset)
++static void mem_cgroup_move_task(void)
+ {
+ }
+ #endif
+@@ -5051,6 +5053,7 @@ static ssize_t memory_high_write(struct kernfs_open_file *of,
+ 				 char *buf, size_t nbytes, loff_t off)
+ {
+ 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
++	unsigned long nr_pages;
+ 	unsigned long high;
+ 	int err;
+ 
+@@ -5061,6 +5064,11 @@ static ssize_t memory_high_write(struct kernfs_open_file *of,
+ 
+ 	memcg->high = high;
+ 
++	nr_pages = page_counter_read(&memcg->memory);
++	if (nr_pages > high)
++		try_to_free_mem_cgroup_pages(memcg, nr_pages - high,
++					     GFP_KERNEL, true);
++
+ 	memcg_wb_domain_size_changed(memcg);
+ 	return nbytes;
+ }
+@@ -5082,6 +5090,8 @@ static ssize_t memory_max_write(struct kernfs_open_file *of,
+ 				char *buf, size_t nbytes, loff_t off)
+ {
+ 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
++	unsigned int nr_reclaims = MEM_CGROUP_RECLAIM_RETRIES;
++	bool drained = false;
+ 	unsigned long max;
+ 	int err;
+ 
+@@ -5090,9 +5100,36 @@ static ssize_t memory_max_write(struct kernfs_open_file *of,
+ 	if (err)
+ 		return err;
+ 
+-	err = mem_cgroup_resize_limit(memcg, max);
+-	if (err)
+-		return err;
++	xchg(&memcg->memory.limit, max);
++
++	for (;;) {
++		unsigned long nr_pages = page_counter_read(&memcg->memory);
++
++		if (nr_pages <= max)
++			break;
++
++		if (signal_pending(current)) {
++			err = -EINTR;
++			break;
++		}
++
++		if (!drained) {
++			drain_all_stock(memcg);
++			drained = true;
++			continue;
++		}
++
++		if (nr_reclaims) {
++			if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max,
++							  GFP_KERNEL, true))
++				nr_reclaims--;
++			continue;
++		}
++
++		mem_cgroup_events(memcg, MEMCG_OOM, 1);
++		if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0))
++			break;
++	}
+ 
+ 	memcg_wb_domain_size_changed(memcg);
+ 	return nbytes;
+@@ -5210,7 +5247,7 @@ struct cgroup_subsys memory_cgrp_subsys = {
+ 	.css_reset = mem_cgroup_css_reset,
+ 	.can_attach = mem_cgroup_can_attach,
+ 	.cancel_attach = mem_cgroup_cancel_attach,
+-	.attach = mem_cgroup_move_task,
++	.post_attach = mem_cgroup_move_task,
+ 	.bind = mem_cgroup_bind,
+ 	.dfl_cftypes = memory_files,
+ 	.legacy_cftypes = mem_cgroup_legacy_files,
+diff --git a/mm/memory.c b/mm/memory.c
+index 8132787ae4d5..c1aa0e4b4096 100644
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -792,6 +792,46 @@ out:
+ 	return pfn_to_page(pfn);
+ }
+ 
++#ifdef CONFIG_TRANSPARENT_HUGEPAGE
++struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
++				pmd_t pmd)
++{
++	unsigned long pfn = pmd_pfn(pmd);
++
++	/*
++	 * There is no pmd_special() but there may be special pmds, e.g.
++	 * in a direct-access (dax) mapping, so let's just replicate the
++	 * !HAVE_PTE_SPECIAL case from vm_normal_page() here.
++	 */
++	if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
++		if (vma->vm_flags & VM_MIXEDMAP) {
++			if (!pfn_valid(pfn))
++				return NULL;
++			goto out;
++		} else {
++			unsigned long off;
++			off = (addr - vma->vm_start) >> PAGE_SHIFT;
++			if (pfn == vma->vm_pgoff + off)
++				return NULL;
++			if (!is_cow_mapping(vma->vm_flags))
++				return NULL;
++		}
++	}
++
++	if (is_zero_pfn(pfn))
++		return NULL;
++	if (unlikely(pfn > highest_memmap_pfn))
++		return NULL;
++
++	/*
++	 * NOTE! We still have PageReserved() pages in the page tables.
++	 * eg. VDSO mappings can cause them to exist.
++	 */
++out:
++	return pfn_to_page(pfn);
++}
++#endif
++
+ /*
+  * copy one vm_area from one task to the other. Assumes the page tables
+  * already present in the new task to be cleared in the whole range
+@@ -2317,6 +2357,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
+ 	 * not dirty accountable.
+ 	 */
+ 	if (PageAnon(old_page) && !PageKsm(old_page)) {
++		int total_mapcount;
+ 		if (!trylock_page(old_page)) {
+ 			page_cache_get(old_page);
+ 			pte_unmap_unlock(page_table, ptl);
+@@ -2331,13 +2372,18 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
+ 			}
+ 			page_cache_release(old_page);
+ 		}
+-		if (reuse_swap_page(old_page)) {
+-			/*
+-			 * The page is all ours.  Move it to our anon_vma so
+-			 * the rmap code will not search our parent or siblings.
+-			 * Protected against the rmap code by the page lock.
+-			 */
+-			page_move_anon_rmap(old_page, vma, address);
++		if (reuse_swap_page(old_page, &total_mapcount)) {
++			if (total_mapcount == 1) {
++				/*
++				 * The page is all ours. Move it to
++				 * our anon_vma so the rmap code will
++				 * not search our parent or siblings.
++				 * Protected against the rmap code by
++				 * the page lock.
++				 */
++				page_move_anon_rmap(compound_head(old_page),
++						    vma, address);
++			}
+ 			unlock_page(old_page);
+ 			return wp_page_reuse(mm, vma, address, page_table, ptl,
+ 					     orig_pte, old_page, 0, 0);
+@@ -2562,7 +2608,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
+ 	inc_mm_counter_fast(mm, MM_ANONPAGES);
+ 	dec_mm_counter_fast(mm, MM_SWAPENTS);
+ 	pte = mk_pte(page, vma->vm_page_prot);
+-	if ((flags & FAULT_FLAG_WRITE) && reuse_swap_page(page)) {
++	if ((flags & FAULT_FLAG_WRITE) && reuse_swap_page(page, NULL)) {
+ 		pte = maybe_mkwrite(pte_mkdirty(pte), vma);
+ 		flags &= ~FAULT_FLAG_WRITE;
+ 		ret |= VM_FAULT_WRITE;
+diff --git a/mm/migrate.c b/mm/migrate.c
+index 3ad0fea5c438..625741faa068 100644
+--- a/mm/migrate.c
++++ b/mm/migrate.c
+@@ -967,7 +967,13 @@ out:
+ 		dec_zone_page_state(page, NR_ISOLATED_ANON +
+ 				page_is_file_cache(page));
+ 		/* Soft-offlined page shouldn't go through lru cache list */
+-		if (reason == MR_MEMORY_FAILURE) {
++		if (reason == MR_MEMORY_FAILURE && rc == MIGRATEPAGE_SUCCESS) {
++			/*
++			 * With this release, we free successfully migrated
++			 * page and set PG_HWPoison on just freed page
++			 * intentionally. Although it's rather weird, it's how
++			 * HWPoison flag works at the moment.
++			 */
+ 			put_page(page);
+ 			if (!test_set_page_hwpoison(page))
+ 				num_poisoned_pages_inc();
+diff --git a/mm/page-writeback.c b/mm/page-writeback.c
+index 6fe7d15bd1f7..62bbf350ddf7 100644
+--- a/mm/page-writeback.c
++++ b/mm/page-writeback.c
+@@ -1909,7 +1909,8 @@ bool wb_over_bg_thresh(struct bdi_writeback *wb)
+ 	if (gdtc->dirty > gdtc->bg_thresh)
+ 		return true;
+ 
+-	if (wb_stat(wb, WB_RECLAIMABLE) > __wb_calc_thresh(gdtc))
++	if (wb_stat(wb, WB_RECLAIMABLE) >
++	    wb_calc_thresh(gdtc->wb, gdtc->bg_thresh))
+ 		return true;
+ 
+ 	if (mdtc) {
+@@ -1923,7 +1924,8 @@ bool wb_over_bg_thresh(struct bdi_writeback *wb)
+ 		if (mdtc->dirty > mdtc->bg_thresh)
+ 			return true;
+ 
+-		if (wb_stat(wb, WB_RECLAIMABLE) > __wb_calc_thresh(mdtc))
++		if (wb_stat(wb, WB_RECLAIMABLE) >
++		    wb_calc_thresh(mdtc->wb, mdtc->bg_thresh))
+ 			return true;
+ 	}
+ 
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index 838ca8bb64f7..629ce645cffd 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -660,34 +660,28 @@ static inline void __free_one_page(struct page *page,
+ 	unsigned long combined_idx;
+ 	unsigned long uninitialized_var(buddy_idx);
+ 	struct page *buddy;
+-	unsigned int max_order = MAX_ORDER;
++	unsigned int max_order;
++
++	max_order = min_t(unsigned int, MAX_ORDER, pageblock_order + 1);
+ 
+ 	VM_BUG_ON(!zone_is_initialized(zone));
+ 	VM_BUG_ON_PAGE(page->flags & PAGE_FLAGS_CHECK_AT_PREP, page);
+ 
+ 	VM_BUG_ON(migratetype == -1);
+-	if (is_migrate_isolate(migratetype)) {
+-		/*
+-		 * We restrict max order of merging to prevent merge
+-		 * between freepages on isolate pageblock and normal
+-		 * pageblock. Without this, pageblock isolation
+-		 * could cause incorrect freepage accounting.
+-		 */
+-		max_order = min_t(unsigned int, MAX_ORDER, pageblock_order + 1);
+-	} else {
++	if (likely(!is_migrate_isolate(migratetype)))
+ 		__mod_zone_freepage_state(zone, 1 << order, migratetype);
+-	}
+ 
+-	page_idx = pfn & ((1 << max_order) - 1);
++	page_idx = pfn & ((1 << MAX_ORDER) - 1);
+ 
+ 	VM_BUG_ON_PAGE(page_idx & ((1 << order) - 1), page);
+ 	VM_BUG_ON_PAGE(bad_range(zone, page), page);
+ 
++continue_merging:
+ 	while (order < max_order - 1) {
+ 		buddy_idx = __find_buddy_index(page_idx, order);
+ 		buddy = page + (buddy_idx - page_idx);
+ 		if (!page_is_buddy(page, buddy, order))
+-			break;
++			goto done_merging;
+ 		/*
+ 		 * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page,
+ 		 * merge with it and move up one order.
+@@ -704,6 +698,32 @@ static inline void __free_one_page(struct page *page,
+ 		page_idx = combined_idx;
+ 		order++;
+ 	}
++	if (max_order < MAX_ORDER) {
++		/* If we are here, it means order is >= pageblock_order.
++		 * We want to prevent merge between freepages on isolate
++		 * pageblock and normal pageblock. Without this, pageblock
++		 * isolation could cause incorrect freepage or CMA accounting.
++		 *
++		 * We don't want to hit this code for the more frequent
++		 * low-order merging.
++		 */
++		if (unlikely(has_isolate_pageblock(zone))) {
++			int buddy_mt;
++
++			buddy_idx = __find_buddy_index(page_idx, order);
++			buddy = page + (buddy_idx - page_idx);
++			buddy_mt = get_pageblock_migratetype(buddy);
++
++			if (migratetype != buddy_mt
++					&& (is_migrate_isolate(migratetype) ||
++						is_migrate_isolate(buddy_mt)))
++				goto done_merging;
++		}
++		max_order++;
++		goto continue_merging;
++	}
++
++done_merging:
+ 	set_page_order(page, order);
+ 
+ 	/*
+@@ -6194,7 +6214,7 @@ int __meminit init_per_zone_wmark_min(void)
+ 	setup_per_zone_inactive_ratio();
+ 	return 0;
+ }
+-module_init(init_per_zone_wmark_min)
++core_initcall(init_per_zone_wmark_min)
+ 
+ /*
+  * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
+diff --git a/mm/page_isolation.c b/mm/page_isolation.c
+index 92c4c36501e7..31555b689eb9 100644
+--- a/mm/page_isolation.c
++++ b/mm/page_isolation.c
+@@ -289,11 +289,11 @@ struct page *alloc_migrate_target(struct page *page, unsigned long private,
+ 	 * now as a simple work-around, we use the next node for destination.
+ 	 */
+ 	if (PageHuge(page)) {
+-		nodemask_t src = nodemask_of_node(page_to_nid(page));
+-		nodemask_t dst;
+-		nodes_complement(dst, src);
++		int node = next_online_node(page_to_nid(page));
++		if (node == MAX_NUMNODES)
++			node = first_online_node;
+ 		return alloc_huge_page_node(page_hstate(compound_head(page)),
+-					    next_node(page_to_nid(page), dst));
++					    node);
+ 	}
+ 
+ 	if (PageHighMem(page))
+diff --git a/mm/slub.c b/mm/slub.c
+index d8fbd4a6ed59..2a722e141958 100644
+--- a/mm/slub.c
++++ b/mm/slub.c
+@@ -2815,6 +2815,7 @@ struct detached_freelist {
+ 	void *tail;
+ 	void *freelist;
+ 	int cnt;
++	struct kmem_cache *s;
+ };
+ 
+ /*
+@@ -2829,8 +2830,9 @@ struct detached_freelist {
+  * synchronization primitive.  Look ahead in the array is limited due
+  * to performance reasons.
+  */
+-static int build_detached_freelist(struct kmem_cache *s, size_t size,
+-				   void **p, struct detached_freelist *df)
++static inline
++int build_detached_freelist(struct kmem_cache *s, size_t size,
++			    void **p, struct detached_freelist *df)
+ {
+ 	size_t first_skipped_index = 0;
+ 	int lookahead = 3;
+@@ -2846,8 +2848,11 @@ static int build_detached_freelist(struct kmem_cache *s, size_t size,
+ 	if (!object)
+ 		return 0;
+ 
++	/* Support for memcg, compiler can optimize this out */
++	df->s = cache_from_obj(s, object);
++
+ 	/* Start new detached freelist */
+-	set_freepointer(s, object, NULL);
++	set_freepointer(df->s, object, NULL);
+ 	df->page = virt_to_head_page(object);
+ 	df->tail = object;
+ 	df->freelist = object;
+@@ -2862,7 +2867,7 @@ static int build_detached_freelist(struct kmem_cache *s, size_t size,
+ 		/* df->page is always set at this point */
+ 		if (df->page == virt_to_head_page(object)) {
+ 			/* Opportunity build freelist */
+-			set_freepointer(s, object, df->freelist);
++			set_freepointer(df->s, object, df->freelist);
+ 			df->freelist = object;
+ 			df->cnt++;
+ 			p[size] = NULL; /* mark object processed */
+@@ -2881,25 +2886,20 @@ static int build_detached_freelist(struct kmem_cache *s, size_t size,
+ 	return first_skipped_index;
+ }
+ 
+-
+ /* Note that interrupts must be enabled when calling this function. */
+-void kmem_cache_free_bulk(struct kmem_cache *orig_s, size_t size, void **p)
++void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
+ {
+ 	if (WARN_ON(!size))
+ 		return;
+ 
+ 	do {
+ 		struct detached_freelist df;
+-		struct kmem_cache *s;
+-
+-		/* Support for memcg */
+-		s = cache_from_obj(orig_s, p[size - 1]);
+ 
+ 		size = build_detached_freelist(s, size, p, &df);
+ 		if (unlikely(!df.page))
+ 			continue;
+ 
+-		slab_free(s, df.page, df.freelist, df.tail, df.cnt, _RET_IP_);
++		slab_free(df.s, df.page, df.freelist, df.tail, df.cnt,_RET_IP_);
+ 	} while (likely(size));
+ }
+ EXPORT_SYMBOL(kmem_cache_free_bulk);
+diff --git a/mm/swapfile.c b/mm/swapfile.c
+index d2c37365e2d6..954fd8f72b79 100644
+--- a/mm/swapfile.c
++++ b/mm/swapfile.c
+@@ -916,18 +916,19 @@ out:
+  * to it.  And as a side-effect, free up its swap: because the old content
+  * on disk will never be read, and seeking back there to write new content
+  * later would only waste time away from clustering.
++ *
++ * NOTE: total_mapcount should not be relied upon by the caller if
++ * reuse_swap_page() returns false, but it may be always overwritten
++ * (see the other implementation for CONFIG_SWAP=n).
+  */
+-int reuse_swap_page(struct page *page)
++bool reuse_swap_page(struct page *page, int *total_mapcount)
+ {
+ 	int count;
+ 
+ 	VM_BUG_ON_PAGE(!PageLocked(page), page);
+ 	if (unlikely(PageKsm(page)))
+-		return 0;
+-	/* The page is part of THP and cannot be reused */
+-	if (PageTransCompound(page))
+-		return 0;
+-	count = page_mapcount(page);
++		return false;
++	count = page_trans_huge_mapcount(page, total_mapcount);
+ 	if (count <= 1 && PageSwapCache(page)) {
+ 		count += page_swapcount(page);
+ 		if (count == 1 && !PageWriteback(page)) {
+diff --git a/mm/vmscan.c b/mm/vmscan.c
+index 71b1c29948db..c712b016e0ab 100644
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -2561,7 +2561,7 @@ static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
+ 		sc->gfp_mask |= __GFP_HIGHMEM;
+ 
+ 	for_each_zone_zonelist_nodemask(zone, z, zonelist,
+-					requested_highidx, sc->nodemask) {
++					gfp_zone(sc->gfp_mask), sc->nodemask) {
+ 		enum zone_type classzone_idx;
+ 
+ 		if (!populated_zone(zone))
+diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
+index 2d7c4c11fc63..336149ffd07d 100644
+--- a/mm/zsmalloc.c
++++ b/mm/zsmalloc.c
+@@ -1732,10 +1732,13 @@ static struct page *isolate_source_page(struct size_class *class)
+ static unsigned long zs_can_compact(struct size_class *class)
+ {
+ 	unsigned long obj_wasted;
++	unsigned long obj_allocated = zs_stat_get(class, OBJ_ALLOCATED);
++	unsigned long obj_used = zs_stat_get(class, OBJ_USED);
+ 
+-	obj_wasted = zs_stat_get(class, OBJ_ALLOCATED) -
+-		zs_stat_get(class, OBJ_USED);
++	if (obj_allocated <= obj_used)
++		return 0;
+ 
++	obj_wasted = obj_allocated - obj_used;
+ 	obj_wasted /= get_maxobj_per_zspage(class->size,
+ 			class->pages_per_zspage);
+ 
+diff --git a/mm/zswap.c b/mm/zswap.c
+index bf14508afd64..340261946fda 100644
+--- a/mm/zswap.c
++++ b/mm/zswap.c
+@@ -170,6 +170,8 @@ static struct zswap_tree *zswap_trees[MAX_SWAPFILES];
+ static LIST_HEAD(zswap_pools);
+ /* protects zswap_pools list modification */
+ static DEFINE_SPINLOCK(zswap_pools_lock);
++/* pool counter to provide unique names to zpool */
++static atomic_t zswap_pools_count = ATOMIC_INIT(0);
+ 
+ /* used by param callback function */
+ static bool zswap_init_started;
+@@ -565,6 +567,7 @@ static struct zswap_pool *zswap_pool_find_get(char *type, char *compressor)
+ static struct zswap_pool *zswap_pool_create(char *type, char *compressor)
+ {
+ 	struct zswap_pool *pool;
++	char name[38]; /* 'zswap' + 32 char (max) num + \0 */
+ 	gfp_t gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM;
+ 
+ 	pool = kzalloc(sizeof(*pool), GFP_KERNEL);
+@@ -573,7 +576,10 @@ static struct zswap_pool *zswap_pool_create(char *type, char *compressor)
+ 		return NULL;
+ 	}
+ 
+-	pool->zpool = zpool_create_pool(type, "zswap", gfp, &zswap_zpool_ops);
++	/* unique name for each pool specifically required by zsmalloc */
++	snprintf(name, 38, "zswap%x", atomic_inc_return(&zswap_pools_count));
++
++	pool->zpool = zpool_create_pool(type, name, gfp, &zswap_zpool_ops);
+ 	if (!pool->zpool) {
+ 		pr_err("%s zpool not available\n", type);
+ 		goto error;
+diff --git a/net/ax25/ax25_ip.c b/net/ax25/ax25_ip.c
+index b563a3f5f2a8..2fa3be965101 100644
+--- a/net/ax25/ax25_ip.c
++++ b/net/ax25/ax25_ip.c
+@@ -228,8 +228,23 @@ netdev_tx_t ax25_ip_xmit(struct sk_buff *skb)
+ }
+ #endif
+ 
++static bool ax25_validate_header(const char *header, unsigned int len)
++{
++	ax25_digi digi;
++
++	if (!len)
++		return false;
++
++	if (header[0])
++		return true;
++
++	return ax25_addr_parse(header + 1, len - 1, NULL, NULL, &digi, NULL,
++			       NULL);
++}
++
+ const struct header_ops ax25_header_ops = {
+ 	.create = ax25_hard_header,
++	.validate = ax25_validate_header,
+ };
+ 
+ EXPORT_SYMBOL(ax25_header_ops);
+diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c
+index a49c705fb86b..5f19133c5530 100644
+--- a/net/batman-adv/distributed-arp-table.c
++++ b/net/batman-adv/distributed-arp-table.c
+@@ -553,6 +553,7 @@ static void batadv_choose_next_candidate(struct batadv_priv *bat_priv,
+  * be sent to
+  * @bat_priv: the bat priv with all the soft interface information
+  * @ip_dst: ipv4 to look up in the DHT
++ * @vid: VLAN identifier
+  *
+  * An originator O is selected if and only if its DHT_ID value is one of three
+  * closest values (from the LEFT, with wrap around if needed) then the hash
+@@ -561,7 +562,8 @@ static void batadv_choose_next_candidate(struct batadv_priv *bat_priv,
+  * Returns the candidate array of size BATADV_DAT_CANDIDATE_NUM.
+  */
+ static struct batadv_dat_candidate *
+-batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst)
++batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst,
++			     unsigned short vid)
+ {
+ 	int select;
+ 	batadv_dat_addr_t last_max = BATADV_DAT_ADDR_MAX, ip_key;
+@@ -577,7 +579,7 @@ batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst)
+ 		return NULL;
+ 
+ 	dat.ip = ip_dst;
+-	dat.vid = 0;
++	dat.vid = vid;
+ 	ip_key = (batadv_dat_addr_t)batadv_hash_dat(&dat,
+ 						    BATADV_DAT_ADDR_MAX);
+ 
+@@ -597,6 +599,7 @@ batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst)
+  * @bat_priv: the bat priv with all the soft interface information
+  * @skb: payload to send
+  * @ip: the DHT key
++ * @vid: VLAN identifier
+  * @packet_subtype: unicast4addr packet subtype to use
+  *
+  * This function copies the skb with pskb_copy() and is sent as unicast packet
+@@ -607,7 +610,7 @@ batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst)
+  */
+ static bool batadv_dat_send_data(struct batadv_priv *bat_priv,
+ 				 struct sk_buff *skb, __be32 ip,
+-				 int packet_subtype)
++				 unsigned short vid, int packet_subtype)
+ {
+ 	int i;
+ 	bool ret = false;
+@@ -616,7 +619,7 @@ static bool batadv_dat_send_data(struct batadv_priv *bat_priv,
+ 	struct sk_buff *tmp_skb;
+ 	struct batadv_dat_candidate *cand;
+ 
+-	cand = batadv_dat_select_candidates(bat_priv, ip);
++	cand = batadv_dat_select_candidates(bat_priv, ip, vid);
+ 	if (!cand)
+ 		goto out;
+ 
+@@ -1004,7 +1007,7 @@ bool batadv_dat_snoop_outgoing_arp_request(struct batadv_priv *bat_priv,
+ 		ret = true;
+ 	} else {
+ 		/* Send the request to the DHT */
+-		ret = batadv_dat_send_data(bat_priv, skb, ip_dst,
++		ret = batadv_dat_send_data(bat_priv, skb, ip_dst, vid,
+ 					   BATADV_P_DAT_DHT_GET);
+ 	}
+ out:
+@@ -1132,8 +1135,8 @@ void batadv_dat_snoop_outgoing_arp_reply(struct batadv_priv *bat_priv,
+ 	/* Send the ARP reply to the candidates for both the IP addresses that
+ 	 * the node obtained from the ARP reply
+ 	 */
+-	batadv_dat_send_data(bat_priv, skb, ip_src, BATADV_P_DAT_DHT_PUT);
+-	batadv_dat_send_data(bat_priv, skb, ip_dst, BATADV_P_DAT_DHT_PUT);
++	batadv_dat_send_data(bat_priv, skb, ip_src, vid, BATADV_P_DAT_DHT_PUT);
++	batadv_dat_send_data(bat_priv, skb, ip_dst, vid, BATADV_P_DAT_DHT_PUT);
+ }
+ 
+ /**
+diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
+index e4f2646d9246..43d15d6c4b6a 100644
+--- a/net/batman-adv/routing.c
++++ b/net/batman-adv/routing.c
+@@ -104,6 +104,15 @@ static void _batadv_update_route(struct batadv_priv *bat_priv,
+ 		neigh_node = NULL;
+ 
+ 	spin_lock_bh(&orig_node->neigh_list_lock);
++	/* curr_router used earlier may not be the current orig_ifinfo->router
++	 * anymore because it was dereferenced outside of the neigh_list_lock
++	 * protected region. After the new best neighbor has replace the current
++	 * best neighbor the reference counter needs to decrease. Consequently,
++	 * the code needs to ensure the curr_router variable contains a pointer
++	 * to the replaced best neighbor.
++	 */
++	curr_router = rcu_dereference_protected(orig_ifinfo->router, true);
++
+ 	rcu_assign_pointer(orig_ifinfo->router, neigh_node);
+ 	spin_unlock_bh(&orig_node->neigh_list_lock);
+ 	batadv_orig_ifinfo_free_ref(orig_ifinfo);
+diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c
+index 782fa33ec296..45bfdefa15a5 100644
+--- a/net/batman-adv/send.c
++++ b/net/batman-adv/send.c
+@@ -629,6 +629,9 @@ batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
+ 
+ 		if (pending) {
+ 			hlist_del(&forw_packet->list);
++			if (!forw_packet->own)
++				atomic_inc(&bat_priv->bcast_queue_left);
++
+ 			batadv_forw_packet_free(forw_packet);
+ 		}
+ 	}
+@@ -656,6 +659,9 @@ batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
+ 
+ 		if (pending) {
+ 			hlist_del(&forw_packet->list);
++			if (!forw_packet->own)
++				atomic_inc(&bat_priv->batman_queue_left);
++
+ 			batadv_forw_packet_free(forw_packet);
+ 		}
+ 	}
+diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
+index ac4d08de5df4..720f1a5b81ac 100644
+--- a/net/batman-adv/soft-interface.c
++++ b/net/batman-adv/soft-interface.c
+@@ -407,11 +407,17 @@ void batadv_interface_rx(struct net_device *soft_iface,
+ 	 */
+ 	nf_reset(skb);
+ 
++	if (unlikely(!pskb_may_pull(skb, ETH_HLEN)))
++		goto dropped;
++
+ 	vid = batadv_get_vid(skb, 0);
+ 	ethhdr = eth_hdr(skb);
+ 
+ 	switch (ntohs(ethhdr->h_proto)) {
+ 	case ETH_P_8021Q:
++		if (!pskb_may_pull(skb, VLAN_ETH_HLEN))
++			goto dropped;
++
+ 		vhdr = (struct vlan_ethhdr *)skb->data;
+ 
+ 		if (vhdr->h_vlan_encapsulated_proto != ethertype)
+@@ -423,8 +429,6 @@ void batadv_interface_rx(struct net_device *soft_iface,
+ 	}
+ 
+ 	/* skb->dev & skb->pkt_type are set here */
+-	if (unlikely(!pskb_may_pull(skb, ETH_HLEN)))
+-		goto dropped;
+ 	skb->protocol = eth_type_trans(skb, soft_iface);
+ 
+ 	/* should not be necessary anymore as we use skb_pull_rcsum()
+diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
+index 5a5089cb6570..1363b8ffd89c 100644
+--- a/net/bluetooth/mgmt.c
++++ b/net/bluetooth/mgmt.c
+@@ -5979,6 +5979,10 @@ static int add_advertising(struct sock *sk, struct hci_dev *hdev,
+ 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
+ 				       MGMT_STATUS_INVALID_PARAMS);
+ 
++	if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
++		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
++				       MGMT_STATUS_INVALID_PARAMS);
++
+ 	flags = __le32_to_cpu(cp->flags);
+ 	timeout = __le16_to_cpu(cp->timeout);
+ 	duration = __le16_to_cpu(cp->duration);
+diff --git a/net/bridge/br_ioctl.c b/net/bridge/br_ioctl.c
+index 263b4de4de57..60a3dbfca8a1 100644
+--- a/net/bridge/br_ioctl.c
++++ b/net/bridge/br_ioctl.c
+@@ -21,18 +21,19 @@
+ #include <asm/uaccess.h>
+ #include "br_private.h"
+ 
+-/* called with RTNL */
+ static int get_bridge_ifindices(struct net *net, int *indices, int num)
+ {
+ 	struct net_device *dev;
+ 	int i = 0;
+ 
+-	for_each_netdev(net, dev) {
++	rcu_read_lock();
++	for_each_netdev_rcu(net, dev) {
+ 		if (i >= num)
+ 			break;
+ 		if (dev->priv_flags & IFF_EBRIDGE)
+ 			indices[i++] = dev->ifindex;
+ 	}
++	rcu_read_unlock();
+ 
+ 	return i;
+ }
+diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
+index 03661d97463c..ea9893743a0f 100644
+--- a/net/bridge/br_multicast.c
++++ b/net/bridge/br_multicast.c
+@@ -1270,6 +1270,7 @@ static int br_ip4_multicast_query(struct net_bridge *br,
+ 	struct br_ip saddr;
+ 	unsigned long max_delay;
+ 	unsigned long now = jiffies;
++	unsigned int offset = skb_transport_offset(skb);
+ 	__be32 group;
+ 	int err = 0;
+ 
+@@ -1280,14 +1281,14 @@ static int br_ip4_multicast_query(struct net_bridge *br,
+ 
+ 	group = ih->group;
+ 
+-	if (skb->len == sizeof(*ih)) {
++	if (skb->len == offset + sizeof(*ih)) {
+ 		max_delay = ih->code * (HZ / IGMP_TIMER_SCALE);
+ 
+ 		if (!max_delay) {
+ 			max_delay = 10 * HZ;
+ 			group = 0;
+ 		}
+-	} else if (skb->len >= sizeof(*ih3)) {
++	} else if (skb->len >= offset + sizeof(*ih3)) {
+ 		ih3 = igmpv3_query_hdr(skb);
+ 		if (ih3->nsrcs)
+ 			goto out;
+@@ -1348,6 +1349,7 @@ static int br_ip6_multicast_query(struct net_bridge *br,
+ 	struct br_ip saddr;
+ 	unsigned long max_delay;
+ 	unsigned long now = jiffies;
++	unsigned int offset = skb_transport_offset(skb);
+ 	const struct in6_addr *group = NULL;
+ 	bool is_general_query;
+ 	int err = 0;
+@@ -1357,8 +1359,8 @@ static int br_ip6_multicast_query(struct net_bridge *br,
+ 	    (port && port->state == BR_STATE_DISABLED))
+ 		goto out;
+ 
+-	if (skb->len == sizeof(*mld)) {
+-		if (!pskb_may_pull(skb, sizeof(*mld))) {
++	if (skb->len == offset + sizeof(*mld)) {
++		if (!pskb_may_pull(skb, offset + sizeof(*mld))) {
+ 			err = -EINVAL;
+ 			goto out;
+ 		}
+@@ -1367,7 +1369,7 @@ static int br_ip6_multicast_query(struct net_bridge *br,
+ 		if (max_delay)
+ 			group = &mld->mld_mca;
+ 	} else {
+-		if (!pskb_may_pull(skb, sizeof(*mld2q))) {
++		if (!pskb_may_pull(skb, offset + sizeof(*mld2q))) {
+ 			err = -EINVAL;
+ 			goto out;
+ 		}
+diff --git a/net/bridge/br_stp.c b/net/bridge/br_stp.c
+index b3cca126b103..e2670c5baafd 100644
+--- a/net/bridge/br_stp.c
++++ b/net/bridge/br_stp.c
+@@ -568,6 +568,14 @@ int br_set_max_age(struct net_bridge *br, unsigned long val)
+ 
+ }
+ 
++/* Set time interval that dynamic forwarding entries live
++ * For pure software bridge, allow values outside the 802.1
++ * standard specification for special cases:
++ *  0 - entry never ages (all permanant)
++ *  1 - entry disappears (no persistance)
++ *
++ * Offloaded switch entries maybe more restrictive
++ */
+ int br_set_ageing_time(struct net_bridge *br, u32 ageing_time)
+ {
+ 	struct switchdev_attr attr = {
+@@ -579,11 +587,8 @@ int br_set_ageing_time(struct net_bridge *br, u32 ageing_time)
+ 	unsigned long t = clock_t_to_jiffies(ageing_time);
+ 	int err;
+ 
+-	if (t < BR_MIN_AGEING_TIME || t > BR_MAX_AGEING_TIME)
+-		return -ERANGE;
+-
+ 	err = switchdev_port_attr_set(br->dev, &attr);
+-	if (err)
++	if (err && err != -EOPNOTSUPP)
+ 		return err;
+ 
+ 	br->ageing_time = t;
+diff --git a/net/core/filter.c b/net/core/filter.c
+index bba502f7cd57..fb2951c3532d 100644
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -1147,7 +1147,8 @@ void bpf_prog_destroy(struct bpf_prog *fp)
+ }
+ EXPORT_SYMBOL_GPL(bpf_prog_destroy);
+ 
+-static int __sk_attach_prog(struct bpf_prog *prog, struct sock *sk)
++static int __sk_attach_prog(struct bpf_prog *prog, struct sock *sk,
++			    bool locked)
+ {
+ 	struct sk_filter *fp, *old_fp;
+ 
+@@ -1163,10 +1164,8 @@ static int __sk_attach_prog(struct bpf_prog *prog, struct sock *sk)
+ 		return -ENOMEM;
+ 	}
+ 
+-	old_fp = rcu_dereference_protected(sk->sk_filter,
+-					   sock_owned_by_user(sk));
++	old_fp = rcu_dereference_protected(sk->sk_filter, locked);
+ 	rcu_assign_pointer(sk->sk_filter, fp);
+-
+ 	if (old_fp)
+ 		sk_filter_uncharge(sk, old_fp);
+ 
+@@ -1245,7 +1244,8 @@ struct bpf_prog *__get_filter(struct sock_fprog *fprog, struct sock *sk)
+  * occurs or there is insufficient memory for the filter a negative
+  * errno code is returned. On success the return is zero.
+  */
+-int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
++int __sk_attach_filter(struct sock_fprog *fprog, struct sock *sk,
++		       bool locked)
+ {
+ 	struct bpf_prog *prog = __get_filter(fprog, sk);
+ 	int err;
+@@ -1253,7 +1253,7 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
+ 	if (IS_ERR(prog))
+ 		return PTR_ERR(prog);
+ 
+-	err = __sk_attach_prog(prog, sk);
++	err = __sk_attach_prog(prog, sk, locked);
+ 	if (err < 0) {
+ 		__bpf_prog_release(prog);
+ 		return err;
+@@ -1261,7 +1261,12 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
+ 
+ 	return 0;
+ }
+-EXPORT_SYMBOL_GPL(sk_attach_filter);
++EXPORT_SYMBOL_GPL(__sk_attach_filter);
++
++int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
++{
++	return __sk_attach_filter(fprog, sk, sock_owned_by_user(sk));
++}
+ 
+ int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk)
+ {
+@@ -1307,7 +1312,7 @@ int sk_attach_bpf(u32 ufd, struct sock *sk)
+ 	if (IS_ERR(prog))
+ 		return PTR_ERR(prog);
+ 
+-	err = __sk_attach_prog(prog, sk);
++	err = __sk_attach_prog(prog, sk, sock_owned_by_user(sk));
+ 	if (err < 0) {
+ 		bpf_prog_put(prog);
+ 		return err;
+@@ -2105,7 +2110,7 @@ static int __init register_sk_filter_ops(void)
+ }
+ late_initcall(register_sk_filter_ops);
+ 
+-int sk_detach_filter(struct sock *sk)
++int __sk_detach_filter(struct sock *sk, bool locked)
+ {
+ 	int ret = -ENOENT;
+ 	struct sk_filter *filter;
+@@ -2113,8 +2118,7 @@ int sk_detach_filter(struct sock *sk)
+ 	if (sock_flag(sk, SOCK_FILTER_LOCKED))
+ 		return -EPERM;
+ 
+-	filter = rcu_dereference_protected(sk->sk_filter,
+-					   sock_owned_by_user(sk));
++	filter = rcu_dereference_protected(sk->sk_filter, locked);
+ 	if (filter) {
+ 		RCU_INIT_POINTER(sk->sk_filter, NULL);
+ 		sk_filter_uncharge(sk, filter);
+@@ -2123,7 +2127,12 @@ int sk_detach_filter(struct sock *sk)
+ 
+ 	return ret;
+ }
+-EXPORT_SYMBOL_GPL(sk_detach_filter);
++EXPORT_SYMBOL_GPL(__sk_detach_filter);
++
++int sk_detach_filter(struct sock *sk)
++{
++	return __sk_detach_filter(sk, sock_owned_by_user(sk));
++}
+ 
+ int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf,
+ 		  unsigned int len)
+diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
+index 8261d95dd846..482c3717a45e 100644
+--- a/net/core/rtnetlink.c
++++ b/net/core/rtnetlink.c
+@@ -905,6 +905,7 @@ static noinline size_t if_nlmsg_size(const struct net_device *dev,
+ 	       + rtnl_link_get_af_size(dev, ext_filter_mask) /* IFLA_AF_SPEC */
+ 	       + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_PORT_ID */
+ 	       + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_SWITCH_ID */
++	       + nla_total_size(IFNAMSIZ) /* IFLA_PHYS_PORT_NAME */
+ 	       + nla_total_size(1); /* IFLA_PROTO_DOWN */
+ 
+ }
+@@ -1175,14 +1176,16 @@ static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb,
+ 
+ static int rtnl_fill_link_ifmap(struct sk_buff *skb, struct net_device *dev)
+ {
+-	struct rtnl_link_ifmap map = {
+-		.mem_start   = dev->mem_start,
+-		.mem_end     = dev->mem_end,
+-		.base_addr   = dev->base_addr,
+-		.irq         = dev->irq,
+-		.dma         = dev->dma,
+-		.port        = dev->if_port,
+-	};
++	struct rtnl_link_ifmap map;
++
++	memset(&map, 0, sizeof(map));
++	map.mem_start   = dev->mem_start;
++	map.mem_end     = dev->mem_end;
++	map.base_addr   = dev->base_addr;
++	map.irq         = dev->irq;
++	map.dma         = dev->dma;
++	map.port        = dev->if_port;
++
+ 	if (nla_put(skb, IFLA_MAP, sizeof(map), &map))
+ 		return -EMSGSIZE;
+ 
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index 8616d1147c93..9835d9a8a7a4 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -4427,15 +4427,16 @@ int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci)
+ 		__skb_push(skb, offset);
+ 		err = __vlan_insert_tag(skb, skb->vlan_proto,
+ 					skb_vlan_tag_get(skb));
+-		if (err)
++		if (err) {
++			__skb_pull(skb, offset);
+ 			return err;
++		}
++
+ 		skb->protocol = skb->vlan_proto;
+ 		skb->mac_len += VLAN_HLEN;
+-		__skb_pull(skb, offset);
+ 
+-		if (skb->ip_summed == CHECKSUM_COMPLETE)
+-			skb->csum = csum_add(skb->csum, csum_partial(skb->data
+-					+ (2 * ETH_ALEN), VLAN_HLEN, 0));
++		skb_postpush_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN);
++		__skb_pull(skb, offset);
+ 	}
+ 	__vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci);
+ 	return 0;
+diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
+index 902d606324a0..8be8f27bfacc 100644
+--- a/net/dccp/ipv4.c
++++ b/net/dccp/ipv4.c
+@@ -204,8 +204,6 @@ void dccp_req_err(struct sock *sk, u64 seq)
+ 	 * ICMPs are not backlogged, hence we cannot get an established
+ 	 * socket here.
+ 	 */
+-	WARN_ON(req->sk);
+-
+ 	if (!between48(seq, dccp_rsk(req)->dreq_iss, dccp_rsk(req)->dreq_gss)) {
+ 		NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
+ 	} else {
+diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
+index 607a14f20d88..b1dc096d22f8 100644
+--- a/net/decnet/dn_route.c
++++ b/net/decnet/dn_route.c
+@@ -1034,10 +1034,13 @@ source_ok:
+ 	if (!fld.daddr) {
+ 		fld.daddr = fld.saddr;
+ 
+-		err = -EADDRNOTAVAIL;
+ 		if (dev_out)
+ 			dev_put(dev_out);
++		err = -EINVAL;
+ 		dev_out = init_net.loopback_dev;
++		if (!dev_out->dn_ptr)
++			goto out;
++		err = -EADDRNOTAVAIL;
+ 		dev_hold(dev_out);
+ 		if (!fld.daddr) {
+ 			fld.daddr =
+@@ -1110,6 +1113,8 @@ source_ok:
+ 		if (dev_out == NULL)
+ 			goto out;
+ 		dn_db = rcu_dereference_raw(dev_out->dn_ptr);
++		if (!dn_db)
++			goto e_inval;
+ 		/* Possible improvement - check all devices for local addr */
+ 		if (dn_dev_islocal(dev_out, fld.daddr)) {
+ 			dev_put(dev_out);
+@@ -1151,6 +1156,8 @@ select_source:
+ 			dev_put(dev_out);
+ 		dev_out = init_net.loopback_dev;
+ 		dev_hold(dev_out);
++		if (!dev_out->dn_ptr)
++			goto e_inval;
+ 		fld.flowidn_oif = dev_out->ifindex;
+ 		if (res.fi)
+ 			dn_fib_info_put(res.fi);
+diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
+index fa4daba8db55..d8fb47fcad05 100644
+--- a/net/dsa/dsa.c
++++ b/net/dsa/dsa.c
+@@ -935,6 +935,14 @@ static void dsa_remove_dst(struct dsa_switch_tree *dst)
+ {
+ 	int i;
+ 
++	dst->master_netdev->dsa_ptr = NULL;
++
++	/* If we used a tagging format that doesn't have an ethertype
++	 * field, make sure that all packets from this point get sent
++	 * without the tag and go through the regular receive path.
++	 */
++	wmb();
++
+ 	for (i = 0; i < dst->pd->nr_chips; i++) {
+ 		struct dsa_switch *ds = dst->ds[i];
+ 
+@@ -988,14 +996,6 @@ static int dsa_suspend(struct device *d)
+ 	struct dsa_switch_tree *dst = platform_get_drvdata(pdev);
+ 	int i, ret = 0;
+ 
+-	dst->master_netdev->dsa_ptr = NULL;
+-
+-	/* If we used a tagging format that doesn't have an ethertype
+-	 * field, make sure that all packets from this point get sent
+-	 * without the tag and go through the regular receive path.
+-	 */
+-	wmb();
+-
+ 	for (i = 0; i < dst->pd->nr_chips; i++) {
+ 		struct dsa_switch *ds = dst->ds[i];
+ 
+diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
+index f6303b17546b..0212591b0077 100644
+--- a/net/ipv4/devinet.c
++++ b/net/ipv4/devinet.c
+@@ -334,6 +334,9 @@ static void __inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap,
+ 
+ 	ASSERT_RTNL();
+ 
++	if (in_dev->dead)
++		goto no_promotions;
++
+ 	/* 1. Deleting primary ifaddr forces deletion all secondaries
+ 	 * unless alias promotion is set
+ 	 **/
+@@ -380,6 +383,7 @@ static void __inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap,
+ 			fib_del_ifaddr(ifa, ifa1);
+ 	}
+ 
++no_promotions:
+ 	/* 2. Unlink it */
+ 
+ 	*ifap = ifa1->ifa_next;
+diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
+index 473447593060..63566ec54794 100644
+--- a/net/ipv4/fib_frontend.c
++++ b/net/ipv4/fib_frontend.c
+@@ -280,7 +280,6 @@ __be32 fib_compute_spec_dst(struct sk_buff *skb)
+ 	struct in_device *in_dev;
+ 	struct fib_result res;
+ 	struct rtable *rt;
+-	struct flowi4 fl4;
+ 	struct net *net;
+ 	int scope;
+ 
+@@ -296,14 +295,13 @@ __be32 fib_compute_spec_dst(struct sk_buff *skb)
+ 
+ 	scope = RT_SCOPE_UNIVERSE;
+ 	if (!ipv4_is_zeronet(ip_hdr(skb)->saddr)) {
+-		fl4.flowi4_oif = 0;
+-		fl4.flowi4_iif = LOOPBACK_IFINDEX;
+-		fl4.daddr = ip_hdr(skb)->saddr;
+-		fl4.saddr = 0;
+-		fl4.flowi4_tos = RT_TOS(ip_hdr(skb)->tos);
+-		fl4.flowi4_scope = scope;
+-		fl4.flowi4_mark = IN_DEV_SRC_VMARK(in_dev) ? skb->mark : 0;
+-		fl4.flowi4_tun_key.tun_id = 0;
++		struct flowi4 fl4 = {
++			.flowi4_iif = LOOPBACK_IFINDEX,
++			.daddr = ip_hdr(skb)->saddr,
++			.flowi4_tos = RT_TOS(ip_hdr(skb)->tos),
++			.flowi4_scope = scope,
++			.flowi4_mark = IN_DEV_SRC_VMARK(in_dev) ? skb->mark : 0,
++		};
+ 		if (!fib_lookup(net, &fl4, &res, 0))
+ 			return FIB_RES_PREFSRC(net, res);
+ 	} else {
+@@ -906,7 +904,11 @@ void fib_del_ifaddr(struct in_ifaddr *ifa, struct in_ifaddr *iprim)
+ 	if (ifa->ifa_flags & IFA_F_SECONDARY) {
+ 		prim = inet_ifa_byprefix(in_dev, any, ifa->ifa_mask);
+ 		if (!prim) {
+-			pr_warn("%s: bug: prim == NULL\n", __func__);
++			/* if the device has been deleted, we don't perform
++			 * address promotion
++			 */
++			if (!in_dev->dead)
++				pr_warn("%s: bug: prim == NULL\n", __func__);
+ 			return;
+ 		}
+ 		if (iprim && iprim != prim) {
+@@ -922,6 +924,9 @@ void fib_del_ifaddr(struct in_ifaddr *ifa, struct in_ifaddr *iprim)
+ 		subnet = 1;
+ 	}
+ 
++	if (in_dev->dead)
++		goto no_promotions;
++
+ 	/* Deletion is more complicated than add.
+ 	 * We should take care of not to delete too much :-)
+ 	 *
+@@ -997,6 +1002,7 @@ void fib_del_ifaddr(struct in_ifaddr *ifa, struct in_ifaddr *iprim)
+ 		}
+ 	}
+ 
++no_promotions:
+ 	if (!(ok & BRD_OK))
+ 		fib_magic(RTM_DELROUTE, RTN_BROADCAST, ifa->ifa_broadcast, 32, prim);
+ 	if (subnet && ifa->ifa_prefixlen < 31) {
+diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
+index d97268e8ff10..2b68418c7198 100644
+--- a/net/ipv4/fib_semantics.c
++++ b/net/ipv4/fib_semantics.c
+@@ -975,6 +975,8 @@ fib_convert_metrics(struct fib_info *fi, const struct fib_config *cfg)
+ 			val = 65535 - 40;
+ 		if (type == RTAX_MTU && val > 65535 - 15)
+ 			val = 65535 - 15;
++		if (type == RTAX_HOPLIMIT && val > 255)
++			val = 255;
+ 		if (type == RTAX_FEATURES && (val & ~RTAX_FEATURE_MASK))
+ 			return -EINVAL;
+ 		fi->fib_metrics[type - 1] = val;
+diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
+index 41ba68de46d8..d0c1e7766214 100644
+--- a/net/ipv4/ip_gre.c
++++ b/net/ipv4/ip_gre.c
+@@ -179,6 +179,7 @@ static __be16 tnl_flags_to_gre_flags(__be16 tflags)
+ 	return flags;
+ }
+ 
++/* Fills in tpi and returns header length to be pulled. */
+ static int parse_gre_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
+ 			    bool *csum_err)
+ {
+@@ -238,7 +239,7 @@ static int parse_gre_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
+ 				return -EINVAL;
+ 		}
+ 	}
+-	return iptunnel_pull_header(skb, hdr_len, tpi->proto);
++	return hdr_len;
+ }
+ 
+ static void ipgre_err(struct sk_buff *skb, u32 info,
+@@ -341,7 +342,7 @@ static void gre_err(struct sk_buff *skb, u32 info)
+ 	struct tnl_ptk_info tpi;
+ 	bool csum_err = false;
+ 
+-	if (parse_gre_header(skb, &tpi, &csum_err)) {
++	if (parse_gre_header(skb, &tpi, &csum_err) < 0) {
+ 		if (!csum_err)		/* ignore csum errors. */
+ 			return;
+ 	}
+@@ -419,6 +420,7 @@ static int gre_rcv(struct sk_buff *skb)
+ {
+ 	struct tnl_ptk_info tpi;
+ 	bool csum_err = false;
++	int hdr_len;
+ 
+ #ifdef CONFIG_NET_IPGRE_BROADCAST
+ 	if (ipv4_is_multicast(ip_hdr(skb)->daddr)) {
+@@ -428,7 +430,10 @@ static int gre_rcv(struct sk_buff *skb)
+ 	}
+ #endif
+ 
+-	if (parse_gre_header(skb, &tpi, &csum_err) < 0)
++	hdr_len = parse_gre_header(skb, &tpi, &csum_err);
++	if (hdr_len < 0)
++		goto drop;
++	if (iptunnel_pull_header(skb, hdr_len, tpi.proto) < 0)
+ 		goto drop;
+ 
+ 	if (ipgre_rcv(skb, &tpi) == PACKET_RCVD)
+diff --git a/net/ipv4/netfilter/nf_nat_masquerade_ipv4.c b/net/ipv4/netfilter/nf_nat_masquerade_ipv4.c
+index c6eb42100e9a..ea91058b5f6f 100644
+--- a/net/ipv4/netfilter/nf_nat_masquerade_ipv4.c
++++ b/net/ipv4/netfilter/nf_nat_masquerade_ipv4.c
+@@ -108,10 +108,18 @@ static int masq_inet_event(struct notifier_block *this,
+ 			   unsigned long event,
+ 			   void *ptr)
+ {
+-	struct net_device *dev = ((struct in_ifaddr *)ptr)->ifa_dev->dev;
++	struct in_device *idev = ((struct in_ifaddr *)ptr)->ifa_dev;
+ 	struct netdev_notifier_info info;
+ 
+-	netdev_notifier_info_init(&info, dev);
++	/* The masq_dev_notifier will catch the case of the device going
++	 * down.  So if the inetdev is dead and being destroyed we have
++	 * no work to do.  Otherwise this is an individual address removal
++	 * and we have to perform the flush.
++	 */
++	if (idev->dead)
++		return NOTIFY_DONE;
++
++	netdev_notifier_info_init(&info, idev->dev);
+ 	return masq_device_event(this, event, &info);
+ }
+ 
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c
+index 02c62299d717..b050cf980a57 100644
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -2045,6 +2045,18 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
+ 		 */
+ 		if (fi && res->prefixlen < 4)
+ 			fi = NULL;
++	} else if ((type == RTN_LOCAL) && (orig_oif != 0) &&
++		   (orig_oif != dev_out->ifindex)) {
++		/* For local routes that require a particular output interface
++		 * we do not want to cache the result.  Caching the result
++		 * causes incorrect behaviour when there are multiple source
++		 * addresses on the interface, the end result being that if the
++		 * intended recipient is waiting on that interface for the
++		 * packet he won't receive it because it will be delivered on
++		 * the loopback interface and the IP_PKTINFO ipi_ifindex will
++		 * be set to the loopback interface as well.
++		 */
++		fi = NULL;
+ 	}
+ 
+ 	fnhe = NULL;
+diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
+index 487ac67059e2..a7b1a905580b 100644
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -319,8 +319,6 @@ void tcp_req_err(struct sock *sk, u32 seq, bool abort)
+ 	/* ICMPs are not backlogged, hence we cannot get
+ 	 * an established socket here.
+ 	 */
+-	WARN_ON(req->sk);
+-
+ 	if (seq != tcp_rsk(req)->snt_isn) {
+ 		NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
+ 	} else if (abort) {
+diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
+index fda379cd600d..b6876f2f4cf2 100644
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -2625,8 +2625,10 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
+ 	 */
+ 	if (unlikely((NET_IP_ALIGN && ((unsigned long)skb->data & 3)) ||
+ 		     skb_headroom(skb) >= 0xFFFF)) {
+-		struct sk_buff *nskb = __pskb_copy(skb, MAX_TCP_HEADER,
+-						   GFP_ATOMIC);
++		struct sk_buff *nskb;
++
++		skb_mstamp_get(&skb->skb_mstamp);
++		nskb = __pskb_copy(skb, MAX_TCP_HEADER, GFP_ATOMIC);
+ 		err = nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) :
+ 			     -ENOBUFS;
+ 	} else {
+diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
+index 95d2f198017e..56218ff388c7 100644
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -339,8 +339,13 @@ found:
+ 
+ 		hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash);
+ 		spin_lock(&hslot2->lock);
+-		hlist_nulls_add_head_rcu(&udp_sk(sk)->udp_portaddr_node,
+-					 &hslot2->head);
++		if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport &&
++			sk->sk_family == AF_INET6)
++			hlist_nulls_add_tail_rcu(&udp_sk(sk)->udp_portaddr_node,
++						 &hslot2->head);
++		else
++			hlist_nulls_add_head_rcu(&udp_sk(sk)->udp_portaddr_node,
++						 &hslot2->head);
+ 		hslot2->count++;
+ 		spin_unlock(&hslot2->lock);
+ 	}
+@@ -2082,10 +2087,14 @@ void udp_v4_early_demux(struct sk_buff *skb)
+ 		if (!in_dev)
+ 			return;
+ 
+-		ours = ip_check_mc_rcu(in_dev, iph->daddr, iph->saddr,
+-				       iph->protocol);
+-		if (!ours)
+-			return;
++		/* we are supposed to accept bcast packets */
++		if (skb->pkt_type == PACKET_MULTICAST) {
++			ours = ip_check_mc_rcu(in_dev, iph->daddr, iph->saddr,
++					       iph->protocol);
++			if (!ours)
++				return;
++		}
++
+ 		sk = __udp4_lib_mcast_demux_lookup(net, uh->dest, iph->daddr,
+ 						   uh->source, iph->saddr, dif);
+ 	} else if (skb->pkt_type == PACKET_HOST) {
+diff --git a/net/ipv6/ila/ila_lwt.c b/net/ipv6/ila/ila_lwt.c
+index 2ae3c4fd8aab..41f18de5dcc2 100644
+--- a/net/ipv6/ila/ila_lwt.c
++++ b/net/ipv6/ila/ila_lwt.c
+@@ -120,8 +120,7 @@ nla_put_failure:
+ 
+ static int ila_encap_nlsize(struct lwtunnel_state *lwtstate)
+ {
+-	/* No encapsulation overhead */
+-	return 0;
++	return nla_total_size(sizeof(u64)); /* ILA_ATTR_LOCATOR */
+ }
+ 
+ static int ila_encap_cmp(struct lwtunnel_state *a, struct lwtunnel_state *b)
+diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
+index a163102f1803..2a6606c935cc 100644
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -1091,8 +1091,8 @@ static inline int ip6_ufo_append_data(struct sock *sk,
+ 			int getfrag(void *from, char *to, int offset, int len,
+ 			int odd, struct sk_buff *skb),
+ 			void *from, int length, int hh_len, int fragheaderlen,
+-			int transhdrlen, int mtu, unsigned int flags,
+-			const struct flowi6 *fl6)
++			int exthdrlen, int transhdrlen, int mtu,
++			unsigned int flags, const struct flowi6 *fl6)
+ 
+ {
+ 	struct sk_buff *skb;
+@@ -1117,7 +1117,7 @@ static inline int ip6_ufo_append_data(struct sock *sk,
+ 		skb_put(skb, fragheaderlen + transhdrlen);
+ 
+ 		/* initialize network header pointer */
+-		skb_reset_network_header(skb);
++		skb_set_network_header(skb, exthdrlen);
+ 
+ 		/* initialize protocol header pointer */
+ 		skb->transport_header = skb->network_header + fragheaderlen;
+@@ -1359,7 +1359,7 @@ emsgsize:
+ 	    (rt->dst.dev->features & NETIF_F_UFO) &&
+ 	    (sk->sk_type == SOCK_DGRAM) && !udp_get_no_check6_tx(sk)) {
+ 		err = ip6_ufo_append_data(sk, queue, getfrag, from, length,
+-					  hh_len, fragheaderlen,
++					  hh_len, fragheaderlen, exthdrlen,
+ 					  transhdrlen, mtu, flags, fl6);
+ 		if (err)
+ 			goto error;
+diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
+index 6c5dfec7a377..3991b21e24ad 100644
+--- a/net/ipv6/ip6_tunnel.c
++++ b/net/ipv6/ip6_tunnel.c
+@@ -343,12 +343,12 @@ static int ip6_tnl_create2(struct net_device *dev)
+ 
+ 	t = netdev_priv(dev);
+ 
++	dev->rtnl_link_ops = &ip6_link_ops;
+ 	err = register_netdevice(dev);
+ 	if (err < 0)
+ 		goto out;
+ 
+ 	strcpy(t->parms.name, dev->name);
+-	dev->rtnl_link_ops = &ip6_link_ops;
+ 
+ 	dev_hold(dev);
+ 	ip6_tnl_link(ip6n, t);
+diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
+index 18f3498a6c80..e2ea31175ef9 100644
+--- a/net/ipv6/reassembly.c
++++ b/net/ipv6/reassembly.c
+@@ -496,10 +496,8 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
+ 	IP6CB(head)->flags |= IP6SKB_FRAGMENTED;
+ 
+ 	/* Yes, and fold redundant checksum back. 8) */
+-	if (head->ip_summed == CHECKSUM_COMPLETE)
+-		head->csum = csum_partial(skb_network_header(head),
+-					  skb_network_header_len(head),
+-					  head->csum);
++	skb_postpush_rcsum(head, skb_network_header(head),
++			   skb_network_header_len(head));
+ 
+ 	rcu_read_lock();
+ 	IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMOKS);
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index ed446639219c..18e29e2f8877 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -1737,6 +1737,8 @@ static int ip6_convert_metrics(struct mx6_config *mxc,
+ 		} else {
+ 			val = nla_get_u32(nla);
+ 		}
++		if (type == RTAX_HOPLIMIT && val > 255)
++			val = 255;
+ 		if (type == RTAX_FEATURES && (val & ~RTAX_FEATURE_MASK))
+ 			goto err;
+ 
+diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
+index 422dd014aa2c..6794120f53b8 100644
+--- a/net/ipv6/udp.c
++++ b/net/ipv6/udp.c
+@@ -883,8 +883,8 @@ start_lookup:
+ 		flush_stack(stack, count, skb, count - 1);
+ 	} else {
+ 		if (!inner_flushed)
+-			UDP_INC_STATS_BH(net, UDP_MIB_IGNOREDMULTI,
+-					 proto == IPPROTO_UDPLITE);
++			UDP6_INC_STATS_BH(net, UDP_MIB_IGNOREDMULTI,
++					  proto == IPPROTO_UDPLITE);
+ 		consume_skb(skb);
+ 	}
+ 	return 0;
+diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
+index ec22078b0914..42de4ccd159f 100644
+--- a/net/l2tp/l2tp_ip.c
++++ b/net/l2tp/l2tp_ip.c
+@@ -123,12 +123,11 @@ static int l2tp_ip_recv(struct sk_buff *skb)
+ 	struct l2tp_tunnel *tunnel = NULL;
+ 	int length;
+ 
+-	/* Point to L2TP header */
+-	optr = ptr = skb->data;
+-
+ 	if (!pskb_may_pull(skb, 4))
+ 		goto discard;
+ 
++	/* Point to L2TP header */
++	optr = ptr = skb->data;
+ 	session_id = ntohl(*((__be32 *) ptr));
+ 	ptr += 4;
+ 
+@@ -156,6 +155,9 @@ static int l2tp_ip_recv(struct sk_buff *skb)
+ 		if (!pskb_may_pull(skb, length))
+ 			goto discard;
+ 
++		/* Point to L2TP header */
++		optr = ptr = skb->data;
++		ptr += 4;
+ 		pr_debug("%s: ip recv\n", tunnel->name);
+ 		print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length);
+ 	}
+diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
+index a2c8747d2936..9ee4ddb6b397 100644
+--- a/net/l2tp/l2tp_ip6.c
++++ b/net/l2tp/l2tp_ip6.c
+@@ -135,12 +135,11 @@ static int l2tp_ip6_recv(struct sk_buff *skb)
+ 	struct l2tp_tunnel *tunnel = NULL;
+ 	int length;
+ 
+-	/* Point to L2TP header */
+-	optr = ptr = skb->data;
+-
+ 	if (!pskb_may_pull(skb, 4))
+ 		goto discard;
+ 
++	/* Point to L2TP header */
++	optr = ptr = skb->data;
+ 	session_id = ntohl(*((__be32 *) ptr));
+ 	ptr += 4;
+ 
+@@ -168,6 +167,9 @@ static int l2tp_ip6_recv(struct sk_buff *skb)
+ 		if (!pskb_may_pull(skb, length))
+ 			goto discard;
+ 
++		/* Point to L2TP header */
++		optr = ptr = skb->data;
++		ptr += 4;
+ 		pr_debug("%s: ip recv\n", tunnel->name);
+ 		print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length);
+ 	}
+diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
+index 8dab4e569571..bb8edb9ef506 100644
+--- a/net/llc/af_llc.c
++++ b/net/llc/af_llc.c
+@@ -626,6 +626,7 @@ static void llc_cmsg_rcv(struct msghdr *msg, struct sk_buff *skb)
+ 	if (llc->cmsg_flags & LLC_CMSG_PKTINFO) {
+ 		struct llc_pktinfo info;
+ 
++		memset(&info, 0, sizeof(info));
+ 		info.lpi_ifindex = llc_sk(skb->sk)->dev->ifindex;
+ 		llc_pdu_decode_dsap(skb, &info.lpi_sap);
+ 		llc_pdu_decode_da(skb, info.lpi_mac);
+diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
+index 978d3bc31df7..1b33d89906e1 100644
+--- a/net/mac80211/ibss.c
++++ b/net/mac80211/ibss.c
+@@ -7,6 +7,7 @@
+  * Copyright 2007, Michael Wu <flamingice@sourmilk.net>
+  * Copyright 2009, Johannes Berg <johannes@sipsolutions.net>
+  * Copyright 2013-2014  Intel Mobile Communications GmbH
++ * Copyright(c) 2016 Intel Deutschland GmbH
+  *
+  * This program is free software; you can redistribute it and/or modify
+  * it under the terms of the GNU General Public License version 2 as
+@@ -1485,14 +1486,21 @@ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata)
+ 
+ 		sdata_info(sdata, "Trigger new scan to find an IBSS to join\n");
+ 
+-		num = ieee80211_ibss_setup_scan_channels(local->hw.wiphy,
+-							 &ifibss->chandef,
+-							 channels,
+-							 ARRAY_SIZE(channels));
+ 		scan_width = cfg80211_chandef_to_scan_width(&ifibss->chandef);
+-		ieee80211_request_ibss_scan(sdata, ifibss->ssid,
+-					    ifibss->ssid_len, channels, num,
+-					    scan_width);
++
++		if (ifibss->fixed_channel) {
++			num = ieee80211_ibss_setup_scan_channels(local->hw.wiphy,
++								 &ifibss->chandef,
++								 channels,
++								 ARRAY_SIZE(channels));
++			ieee80211_request_ibss_scan(sdata, ifibss->ssid,
++						    ifibss->ssid_len, channels,
++						    num, scan_width);
++		} else {
++			ieee80211_request_ibss_scan(sdata, ifibss->ssid,
++						    ifibss->ssid_len, NULL,
++						    0, scan_width);
++		}
+ 	} else {
+ 		int interval = IEEE80211_SCAN_INTERVAL;
+ 
+diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
+index c9e325d2e120..bcb0a1b64556 100644
+--- a/net/mac80211/iface.c
++++ b/net/mac80211/iface.c
+@@ -977,7 +977,10 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
+ 	if (sdata->vif.txq) {
+ 		struct txq_info *txqi = to_txq_info(sdata->vif.txq);
+ 
++		spin_lock_bh(&txqi->queue.lock);
+ 		ieee80211_purge_tx_queue(&local->hw, &txqi->queue);
++		spin_unlock_bh(&txqi->queue.lock);
++
+ 		atomic_set(&sdata->txqs_len[txqi->txq.ac], 0);
+ 	}
+ 
+@@ -1747,7 +1750,7 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
+ 
+ 		ret = dev_alloc_name(ndev, ndev->name);
+ 		if (ret < 0) {
+-			free_netdev(ndev);
++			ieee80211_if_free(ndev);
+ 			return ret;
+ 		}
+ 
+@@ -1833,7 +1836,7 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
+ 
+ 		ret = register_netdevice(ndev);
+ 		if (ret) {
+-			free_netdev(ndev);
++			ieee80211_if_free(ndev);
+ 			return ret;
+ 		}
+ 	}
+diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
+index 60d093f40f1d..261df74eaf38 100644
+--- a/net/mac80211/rx.c
++++ b/net/mac80211/rx.c
+@@ -2249,7 +2249,7 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
+ 	struct ieee80211_local *local = rx->local;
+ 	struct ieee80211_sub_if_data *sdata = rx->sdata;
+ 	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
+-	u16 q, hdrlen;
++	u16 ac, q, hdrlen;
+ 
+ 	hdr = (struct ieee80211_hdr *) skb->data;
+ 	hdrlen = ieee80211_hdrlen(hdr->frame_control);
+@@ -2318,7 +2318,8 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
+ 	    ether_addr_equal(sdata->vif.addr, hdr->addr3))
+ 		return RX_CONTINUE;
+ 
+-	q = ieee80211_select_queue_80211(sdata, skb, hdr);
++	ac = ieee80211_select_queue_80211(sdata, skb, hdr);
++	q = sdata->vif.hw_queue[ac];
+ 	if (ieee80211_queue_stopped(&local->hw, q)) {
+ 		IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_congestion);
+ 		return RX_DROP_MONITOR;
+diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
+index a4a4f89d3ba0..23ed038cf7f9 100644
+--- a/net/mac80211/sta_info.c
++++ b/net/mac80211/sta_info.c
+@@ -257,11 +257,11 @@ void sta_info_free(struct ieee80211_local *local, struct sta_info *sta)
+ }
+ 
+ /* Caller must hold local->sta_mtx */
+-static void sta_info_hash_add(struct ieee80211_local *local,
+-			      struct sta_info *sta)
++static int sta_info_hash_add(struct ieee80211_local *local,
++			     struct sta_info *sta)
+ {
+-	rhashtable_insert_fast(&local->sta_hash, &sta->hash_node,
+-			       sta_rht_params);
++	return rhashtable_insert_fast(&local->sta_hash, &sta->hash_node,
++				      sta_rht_params);
+ }
+ 
+ static void sta_deliver_ps_frames(struct work_struct *wk)
+@@ -498,11 +498,17 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU)
+ {
+ 	struct ieee80211_local *local = sta->local;
+ 	struct ieee80211_sub_if_data *sdata = sta->sdata;
+-	struct station_info sinfo;
++	struct station_info *sinfo;
+ 	int err = 0;
+ 
+ 	lockdep_assert_held(&local->sta_mtx);
+ 
++	sinfo = kzalloc(sizeof(struct station_info), GFP_KERNEL);
++	if (!sinfo) {
++		err = -ENOMEM;
++		goto out_err;
++	}
++
+ 	/* check if STA exists already */
+ 	if (sta_info_get_bss(sdata, sta->sta.addr)) {
+ 		err = -EEXIST;
+@@ -517,7 +523,9 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU)
+ 	set_sta_flag(sta, WLAN_STA_BLOCK_BA);
+ 
+ 	/* make the station visible */
+-	sta_info_hash_add(local, sta);
++	err = sta_info_hash_add(local, sta);
++	if (err)
++		goto out_drop_sta;
+ 
+ 	list_add_tail_rcu(&sta->list, &local->sta_list);
+ 
+@@ -534,10 +542,9 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU)
+ 	ieee80211_sta_debugfs_add(sta);
+ 	rate_control_add_sta_debugfs(sta);
+ 
+-	memset(&sinfo, 0, sizeof(sinfo));
+-	sinfo.filled = 0;
+-	sinfo.generation = local->sta_generation;
+-	cfg80211_new_sta(sdata->dev, sta->sta.addr, &sinfo, GFP_KERNEL);
++	sinfo->generation = local->sta_generation;
++	cfg80211_new_sta(sdata->dev, sta->sta.addr, sinfo, GFP_KERNEL);
++	kfree(sinfo);
+ 
+ 	sta_dbg(sdata, "Inserted STA %pM\n", sta->sta.addr);
+ 
+@@ -552,6 +559,7 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU)
+  out_remove:
+ 	sta_info_hash_del(local, sta);
+ 	list_del_rcu(&sta->list);
++ out_drop_sta:
+ 	local->num_sta--;
+ 	synchronize_net();
+ 	__cleanup_single_sta(sta);
+@@ -898,7 +906,7 @@ static void __sta_info_destroy_part2(struct sta_info *sta)
+ {
+ 	struct ieee80211_local *local = sta->local;
+ 	struct ieee80211_sub_if_data *sdata = sta->sdata;
+-	struct station_info sinfo = {};
++	struct station_info *sinfo;
+ 	int ret;
+ 
+ 	/*
+@@ -936,8 +944,11 @@ static void __sta_info_destroy_part2(struct sta_info *sta)
+ 
+ 	sta_dbg(sdata, "Removed STA %pM\n", sta->sta.addr);
+ 
+-	sta_set_sinfo(sta, &sinfo);
+-	cfg80211_del_sta_sinfo(sdata->dev, sta->sta.addr, &sinfo, GFP_KERNEL);
++	sinfo = kzalloc(sizeof(*sinfo), GFP_KERNEL);
++	if (sinfo)
++		sta_set_sinfo(sta, sinfo);
++	cfg80211_del_sta_sinfo(sdata->dev, sta->sta.addr, sinfo, GFP_KERNEL);
++	kfree(sinfo);
+ 
+ 	rate_control_remove_sta_debugfs(sta);
+ 	ieee80211_sta_debugfs_remove(sta);
+diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
+index b18c5ed42d95..0b80a7140cc4 100644
+--- a/net/mpls/af_mpls.c
++++ b/net/mpls/af_mpls.c
+@@ -543,6 +543,9 @@ static struct net_device *find_outdev(struct net *net,
+ 	if (!dev)
+ 		return ERR_PTR(-ENODEV);
+ 
++	if (IS_ERR(dev))
++		return dev;
++
+ 	/* The caller is holding rtnl anyways, so release the dev reference */
+ 	dev_put(dev);
+ 
+diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
+index f57b4dcdb233..4da560005b0e 100644
+--- a/net/netfilter/ipvs/ip_vs_core.c
++++ b/net/netfilter/ipvs/ip_vs_core.c
+@@ -1757,15 +1757,34 @@ ip_vs_in(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, int
+ 	cp = pp->conn_in_get(ipvs, af, skb, &iph);
+ 
+ 	conn_reuse_mode = sysctl_conn_reuse_mode(ipvs);
+-	if (conn_reuse_mode && !iph.fragoffs &&
+-	    is_new_conn(skb, &iph) && cp &&
+-	    ((unlikely(sysctl_expire_nodest_conn(ipvs)) && cp->dest &&
+-	      unlikely(!atomic_read(&cp->dest->weight))) ||
+-	     unlikely(is_new_conn_expected(cp, conn_reuse_mode)))) {
+-		if (!atomic_read(&cp->n_control))
+-			ip_vs_conn_expire_now(cp);
+-		__ip_vs_conn_put(cp);
+-		cp = NULL;
++	if (conn_reuse_mode && !iph.fragoffs && is_new_conn(skb, &iph) && cp) {
++		bool uses_ct = false, resched = false;
++
++		if (unlikely(sysctl_expire_nodest_conn(ipvs)) && cp->dest &&
++		    unlikely(!atomic_read(&cp->dest->weight))) {
++			resched = true;
++			uses_ct = ip_vs_conn_uses_conntrack(cp, skb);
++		} else if (is_new_conn_expected(cp, conn_reuse_mode)) {
++			uses_ct = ip_vs_conn_uses_conntrack(cp, skb);
++			if (!atomic_read(&cp->n_control)) {
++				resched = true;
++			} else {
++				/* Do not reschedule controlling connection
++				 * that uses conntrack while it is still
++				 * referenced by controlled connection(s).
++				 */
++				resched = !uses_ct;
++			}
++		}
++
++		if (resched) {
++			if (!atomic_read(&cp->n_control))
++				ip_vs_conn_expire_now(cp);
++			__ip_vs_conn_put(cp);
++			if (uses_ct)
++				return NF_DROP;
++			cp = NULL;
++		}
+ 	}
+ 
+ 	if (unlikely(!cp)) {
+diff --git a/net/netfilter/ipvs/ip_vs_pe_sip.c b/net/netfilter/ipvs/ip_vs_pe_sip.c
+index 1b8d594e493a..0a6eb5c0d9e9 100644
+--- a/net/netfilter/ipvs/ip_vs_pe_sip.c
++++ b/net/netfilter/ipvs/ip_vs_pe_sip.c
+@@ -70,10 +70,10 @@ ip_vs_sip_fill_param(struct ip_vs_conn_param *p, struct sk_buff *skb)
+ 	const char *dptr;
+ 	int retc;
+ 
+-	ip_vs_fill_iph_skb(p->af, skb, false, &iph);
++	retc = ip_vs_fill_iph_skb(p->af, skb, false, &iph);
+ 
+ 	/* Only useful with UDP */
+-	if (iph.protocol != IPPROTO_UDP)
++	if (!retc || iph.protocol != IPPROTO_UDP)
+ 		return -EINVAL;
+ 	/* todo: IPv6 fragments:
+ 	 *       I think this only should be done for the first fragment. /HS
+@@ -88,7 +88,7 @@ ip_vs_sip_fill_param(struct ip_vs_conn_param *p, struct sk_buff *skb)
+ 	dptr = skb->data + dataoff;
+ 	datalen = skb->len - dataoff;
+ 
+-	if (get_callid(dptr, dataoff, datalen, &matchoff, &matchlen))
++	if (get_callid(dptr, 0, datalen, &matchoff, &matchlen))
+ 		return -EINVAL;
+ 
+ 	/* N.B: pe_data is only set on success,
+diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
+index f60b4fdeeb8c..6b94f0bc11b8 100644
+--- a/net/netfilter/nf_conntrack_core.c
++++ b/net/netfilter/nf_conntrack_core.c
+@@ -1780,6 +1780,7 @@ void nf_conntrack_init_end(void)
+ 
+ int nf_conntrack_init_net(struct net *net)
+ {
++	static atomic64_t unique_id;
+ 	int ret = -ENOMEM;
+ 	int cpu;
+ 
+@@ -1802,7 +1803,8 @@ int nf_conntrack_init_net(struct net *net)
+ 	if (!net->ct.stat)
+ 		goto err_pcpu_lists;
+ 
+-	net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%p", net);
++	net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%llu",
++				(u64)atomic64_inc_return(&unique_id));
+ 	if (!net->ct.slabname)
+ 		goto err_slabname;
+ 
+diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
+index f1ffb34e253f..d2bc03f0b4d7 100644
+--- a/net/netlink/af_netlink.c
++++ b/net/netlink/af_netlink.c
+@@ -1305,7 +1305,7 @@ static int netlink_release(struct socket *sock)
+ 
+ 	skb_queue_purge(&sk->sk_write_queue);
+ 
+-	if (nlk->portid) {
++	if (nlk->portid && nlk->bound) {
+ 		struct netlink_notify n = {
+ 						.net = sock_net(sk),
+ 						.protocol = sk->sk_protocol,
+diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
+index 2d59df521915..879185fe183f 100644
+--- a/net/openvswitch/actions.c
++++ b/net/openvswitch/actions.c
+@@ -158,9 +158,7 @@ static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key,
+ 	new_mpls_lse = (__be32 *)skb_mpls_header(skb);
+ 	*new_mpls_lse = mpls->mpls_lse;
+ 
+-	if (skb->ip_summed == CHECKSUM_COMPLETE)
+-		skb->csum = csum_add(skb->csum, csum_partial(new_mpls_lse,
+-							     MPLS_HLEN, 0));
++	skb_postpush_rcsum(skb, new_mpls_lse, MPLS_HLEN);
+ 
+ 	hdr = eth_hdr(skb);
+ 	hdr->h_proto = mpls->mpls_ethertype;
+@@ -280,7 +278,7 @@ static int set_eth_addr(struct sk_buff *skb, struct sw_flow_key *flow_key,
+ 	ether_addr_copy_masked(eth_hdr(skb)->h_dest, key->eth_dst,
+ 			       mask->eth_dst);
+ 
+-	ovs_skb_postpush_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
++	skb_postpush_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
+ 
+ 	ether_addr_copy(flow_key->eth.src, eth_hdr(skb)->h_source);
+ 	ether_addr_copy(flow_key->eth.dst, eth_hdr(skb)->h_dest);
+@@ -463,7 +461,7 @@ static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *flow_key,
+ 		mask_ipv6_addr(saddr, key->ipv6_src, mask->ipv6_src, masked);
+ 
+ 		if (unlikely(memcmp(saddr, masked, sizeof(masked)))) {
+-			set_ipv6_addr(skb, key->ipv6_proto, saddr, masked,
++			set_ipv6_addr(skb, flow_key->ip.proto, saddr, masked,
+ 				      true);
+ 			memcpy(&flow_key->ipv6.addr.src, masked,
+ 			       sizeof(flow_key->ipv6.addr.src));
+@@ -485,7 +483,7 @@ static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *flow_key,
+ 							     NULL, &flags)
+ 					       != NEXTHDR_ROUTING);
+ 
+-			set_ipv6_addr(skb, key->ipv6_proto, daddr, masked,
++			set_ipv6_addr(skb, flow_key->ip.proto, daddr, masked,
+ 				      recalc_csum);
+ 			memcpy(&flow_key->ipv6.addr.dst, masked,
+ 			       sizeof(flow_key->ipv6.addr.dst));
+@@ -639,7 +637,7 @@ static int ovs_vport_output(struct net *net, struct sock *sk, struct sk_buff *sk
+ 	/* Reconstruct the MAC header.  */
+ 	skb_push(skb, data->l2_len);
+ 	memcpy(skb->data, &data->l2_data, data->l2_len);
+-	ovs_skb_postpush_rcsum(skb, skb->data, data->l2_len);
++	skb_postpush_rcsum(skb, skb->data, data->l2_len);
+ 	skb_reset_mac_header(skb);
+ 
+ 	ovs_vport_send(vport, skb);
+diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
+index ee6ff8ffc12d..9e5b9fc805fb 100644
+--- a/net/openvswitch/conntrack.c
++++ b/net/openvswitch/conntrack.c
+@@ -320,6 +320,7 @@ static int handle_fragments(struct net *net, struct sw_flow_key *key,
+ 	} else if (key->eth.type == htons(ETH_P_IPV6)) {
+ 		enum ip6_defrag_users user = IP6_DEFRAG_CONNTRACK_IN + zone;
+ 
++		skb_orphan(skb);
+ 		memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm));
+ 		err = nf_ct_frag6_gather(net, skb, user);
+ 		if (err)
+diff --git a/net/openvswitch/vport-netdev.c b/net/openvswitch/vport-netdev.c
+index 6a6adf314363..4e3972344aa6 100644
+--- a/net/openvswitch/vport-netdev.c
++++ b/net/openvswitch/vport-netdev.c
+@@ -58,7 +58,7 @@ static void netdev_port_receive(struct sk_buff *skb)
+ 		return;
+ 
+ 	skb_push(skb, ETH_HLEN);
+-	ovs_skb_postpush_rcsum(skb, skb->data, ETH_HLEN);
++	skb_postpush_rcsum(skb, skb->data, ETH_HLEN);
+ 	ovs_vport_receive(vport, skb, skb_tunnel_info(skb));
+ 	return;
+ error:
+diff --git a/net/openvswitch/vport.h b/net/openvswitch/vport.h
+index c10899cb9040..f01f28a567ad 100644
+--- a/net/openvswitch/vport.h
++++ b/net/openvswitch/vport.h
+@@ -185,13 +185,6 @@ static inline struct vport *vport_from_priv(void *priv)
+ int ovs_vport_receive(struct vport *, struct sk_buff *,
+ 		      const struct ip_tunnel_info *);
+ 
+-static inline void ovs_skb_postpush_rcsum(struct sk_buff *skb,
+-				      const void *start, unsigned int len)
+-{
+-	if (skb->ip_summed == CHECKSUM_COMPLETE)
+-		skb->csum = csum_add(skb->csum, csum_partial(start, len, 0));
+-}
+-
+ static inline const char *ovs_vport_name(struct vport *vport)
+ {
+ 	return vport->dev->name;
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index 992396aa635c..9cc7b512b472 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -1916,6 +1916,10 @@ retry:
+ 		goto retry;
+ 	}
+ 
++	if (!dev_validate_header(dev, skb->data, len)) {
++		err = -EINVAL;
++		goto out_unlock;
++	}
+ 	if (len > (dev->mtu + dev->hard_header_len + extra_len) &&
+ 	    !packet_extra_vlan_len_allowed(dev, skb)) {
+ 		err = -EMSGSIZE;
+@@ -2326,18 +2330,6 @@ static void tpacket_destruct_skb(struct sk_buff *skb)
+ 	sock_wfree(skb);
+ }
+ 
+-static bool ll_header_truncated(const struct net_device *dev, int len)
+-{
+-	/* net device doesn't like empty head */
+-	if (unlikely(len < dev->hard_header_len)) {
+-		net_warn_ratelimited("%s: packet size is too short (%d < %d)\n",
+-				     current->comm, len, dev->hard_header_len);
+-		return true;
+-	}
+-
+-	return false;
+-}
+-
+ static void tpacket_set_protocol(const struct net_device *dev,
+ 				 struct sk_buff *skb)
+ {
+@@ -2420,19 +2412,19 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
+ 		if (unlikely(err < 0))
+ 			return -EINVAL;
+ 	} else if (dev->hard_header_len) {
+-		if (ll_header_truncated(dev, tp_len))
+-			return -EINVAL;
++		int hdrlen = min_t(int, dev->hard_header_len, tp_len);
+ 
+ 		skb_push(skb, dev->hard_header_len);
+-		err = skb_store_bits(skb, 0, data,
+-				dev->hard_header_len);
++		err = skb_store_bits(skb, 0, data, hdrlen);
+ 		if (unlikely(err))
+ 			return err;
++		if (!dev_validate_header(dev, skb->data, hdrlen))
++			return -EINVAL;
+ 		if (!skb->protocol)
+ 			tpacket_set_protocol(dev, skb);
+ 
+-		data += dev->hard_header_len;
+-		to_write -= dev->hard_header_len;
++		data += hdrlen;
++		to_write -= hdrlen;
+ 	}
+ 
+ 	offset = offset_in_page(data);
+@@ -2763,9 +2755,6 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
+ 		offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len);
+ 		if (unlikely(offset < 0))
+ 			goto out_free;
+-	} else {
+-		if (ll_header_truncated(dev, len))
+-			goto out_free;
+ 	}
+ 
+ 	/* Returns -EFAULT on error */
+@@ -2773,6 +2762,12 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
+ 	if (err)
+ 		goto out_free;
+ 
++	if (sock->type == SOCK_RAW &&
++	    !dev_validate_header(dev, skb->data, len)) {
++		err = -EINVAL;
++		goto out_free;
++	}
++
+ 	sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags);
+ 
+ 	if (!gso_type && (len > dev->mtu + reserve + extra_len) &&
+@@ -3441,6 +3436,7 @@ static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq)
+ 	i->ifindex = mreq->mr_ifindex;
+ 	i->alen = mreq->mr_alen;
+ 	memcpy(i->addr, mreq->mr_address, i->alen);
++	memset(i->addr + i->alen, 0, sizeof(i->addr) - i->alen);
+ 	i->count = 1;
+ 	i->next = po->mclist;
+ 	po->mclist = i;
+diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
+index af1acf009866..95b560f0b253 100644
+--- a/net/sched/sch_api.c
++++ b/net/sched/sch_api.c
+@@ -744,14 +744,15 @@ static u32 qdisc_alloc_handle(struct net_device *dev)
+ 	return 0;
+ }
+ 
+-void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n)
++void qdisc_tree_reduce_backlog(struct Qdisc *sch, unsigned int n,
++			       unsigned int len)
+ {
+ 	const struct Qdisc_class_ops *cops;
+ 	unsigned long cl;
+ 	u32 parentid;
+ 	int drops;
+ 
+-	if (n == 0)
++	if (n == 0 && len == 0)
+ 		return;
+ 	drops = max_t(int, n, 0);
+ 	rcu_read_lock();
+@@ -774,11 +775,12 @@ void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n)
+ 			cops->put(sch, cl);
+ 		}
+ 		sch->q.qlen -= n;
++		sch->qstats.backlog -= len;
+ 		__qdisc_qstats_drop(sch, drops);
+ 	}
+ 	rcu_read_unlock();
+ }
+-EXPORT_SYMBOL(qdisc_tree_decrease_qlen);
++EXPORT_SYMBOL(qdisc_tree_reduce_backlog);
+ 
+ static void notify_and_destroy(struct net *net, struct sk_buff *skb,
+ 			       struct nlmsghdr *n, u32 clid,
+diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
+index c538d9e4a8f6..baafddf229ce 100644
+--- a/net/sched/sch_cbq.c
++++ b/net/sched/sch_cbq.c
+@@ -1624,13 +1624,8 @@ static int cbq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
+ 			new->reshape_fail = cbq_reshape_fail;
+ #endif
+ 	}
+-	sch_tree_lock(sch);
+-	*old = cl->q;
+-	cl->q = new;
+-	qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
+-	qdisc_reset(*old);
+-	sch_tree_unlock(sch);
+ 
++	*old = qdisc_replace(sch, new, &cl->q);
+ 	return 0;
+ }
+ 
+@@ -1914,7 +1909,7 @@ static int cbq_delete(struct Qdisc *sch, unsigned long arg)
+ {
+ 	struct cbq_sched_data *q = qdisc_priv(sch);
+ 	struct cbq_class *cl = (struct cbq_class *)arg;
+-	unsigned int qlen;
++	unsigned int qlen, backlog;
+ 
+ 	if (cl->filters || cl->children || cl == &q->link)
+ 		return -EBUSY;
+@@ -1922,8 +1917,9 @@ static int cbq_delete(struct Qdisc *sch, unsigned long arg)
+ 	sch_tree_lock(sch);
+ 
+ 	qlen = cl->q->q.qlen;
++	backlog = cl->q->qstats.backlog;
+ 	qdisc_reset(cl->q);
+-	qdisc_tree_decrease_qlen(cl->q, qlen);
++	qdisc_tree_reduce_backlog(cl->q, qlen, backlog);
+ 
+ 	if (cl->next_alive)
+ 		cbq_deactivate_class(cl);
+diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c
+index 5ffb8b8337c7..0a08c860eee4 100644
+--- a/net/sched/sch_choke.c
++++ b/net/sched/sch_choke.c
+@@ -128,8 +128,8 @@ static void choke_drop_by_idx(struct Qdisc *sch, unsigned int idx)
+ 		choke_zap_tail_holes(q);
+ 
+ 	qdisc_qstats_backlog_dec(sch, skb);
++	qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(skb));
+ 	qdisc_drop(skb, sch);
+-	qdisc_tree_decrease_qlen(sch, 1);
+ 	--sch->q.qlen;
+ }
+ 
+@@ -456,6 +456,7 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt)
+ 		old = q->tab;
+ 		if (old) {
+ 			unsigned int oqlen = sch->q.qlen, tail = 0;
++			unsigned dropped = 0;
+ 
+ 			while (q->head != q->tail) {
+ 				struct sk_buff *skb = q->tab[q->head];
+@@ -467,11 +468,12 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt)
+ 					ntab[tail++] = skb;
+ 					continue;
+ 				}
++				dropped += qdisc_pkt_len(skb);
+ 				qdisc_qstats_backlog_dec(sch, skb);
+ 				--sch->q.qlen;
+ 				qdisc_drop(skb, sch);
+ 			}
+-			qdisc_tree_decrease_qlen(sch, oqlen - sch->q.qlen);
++			qdisc_tree_reduce_backlog(sch, oqlen - sch->q.qlen, dropped);
+ 			q->head = 0;
+ 			q->tail = tail;
+ 		}
+diff --git a/net/sched/sch_codel.c b/net/sched/sch_codel.c
+index 535007d5f0b5..9b7e2980ee5c 100644
+--- a/net/sched/sch_codel.c
++++ b/net/sched/sch_codel.c
+@@ -79,12 +79,13 @@ static struct sk_buff *codel_qdisc_dequeue(struct Qdisc *sch)
+ 
+ 	skb = codel_dequeue(sch, &q->params, &q->vars, &q->stats, dequeue);
+ 
+-	/* We cant call qdisc_tree_decrease_qlen() if our qlen is 0,
++	/* We cant call qdisc_tree_reduce_backlog() if our qlen is 0,
+ 	 * or HTB crashes. Defer it for next round.
+ 	 */
+ 	if (q->stats.drop_count && sch->q.qlen) {
+-		qdisc_tree_decrease_qlen(sch, q->stats.drop_count);
++		qdisc_tree_reduce_backlog(sch, q->stats.drop_count, q->stats.drop_len);
+ 		q->stats.drop_count = 0;
++		q->stats.drop_len = 0;
+ 	}
+ 	if (skb)
+ 		qdisc_bstats_update(sch, skb);
+@@ -116,7 +117,7 @@ static int codel_change(struct Qdisc *sch, struct nlattr *opt)
+ {
+ 	struct codel_sched_data *q = qdisc_priv(sch);
+ 	struct nlattr *tb[TCA_CODEL_MAX + 1];
+-	unsigned int qlen;
++	unsigned int qlen, dropped = 0;
+ 	int err;
+ 
+ 	if (!opt)
+@@ -156,10 +157,11 @@ static int codel_change(struct Qdisc *sch, struct nlattr *opt)
+ 	while (sch->q.qlen > sch->limit) {
+ 		struct sk_buff *skb = __skb_dequeue(&sch->q);
+ 
++		dropped += qdisc_pkt_len(skb);
+ 		qdisc_qstats_backlog_dec(sch, skb);
+ 		qdisc_drop(skb, sch);
+ 	}
+-	qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen);
++	qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped);
+ 
+ 	sch_tree_unlock(sch);
+ 	return 0;
+diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c
+index a1cd778240cd..a63e879e8975 100644
+--- a/net/sched/sch_drr.c
++++ b/net/sched/sch_drr.c
+@@ -53,9 +53,10 @@ static struct drr_class *drr_find_class(struct Qdisc *sch, u32 classid)
+ static void drr_purge_queue(struct drr_class *cl)
+ {
+ 	unsigned int len = cl->qdisc->q.qlen;
++	unsigned int backlog = cl->qdisc->qstats.backlog;
+ 
+ 	qdisc_reset(cl->qdisc);
+-	qdisc_tree_decrease_qlen(cl->qdisc, len);
++	qdisc_tree_reduce_backlog(cl->qdisc, len, backlog);
+ }
+ 
+ static const struct nla_policy drr_policy[TCA_DRR_MAX + 1] = {
+@@ -226,11 +227,7 @@ static int drr_graft_class(struct Qdisc *sch, unsigned long arg,
+ 			new = &noop_qdisc;
+ 	}
+ 
+-	sch_tree_lock(sch);
+-	drr_purge_queue(cl);
+-	*old = cl->qdisc;
+-	cl->qdisc = new;
+-	sch_tree_unlock(sch);
++	*old = qdisc_replace(sch, new, &cl->qdisc);
+ 	return 0;
+ }
+ 
+diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c
+index f357f34d02d2..d0dff0cd8186 100644
+--- a/net/sched/sch_dsmark.c
++++ b/net/sched/sch_dsmark.c
+@@ -73,13 +73,7 @@ static int dsmark_graft(struct Qdisc *sch, unsigned long arg,
+ 			new = &noop_qdisc;
+ 	}
+ 
+-	sch_tree_lock(sch);
+-	*old = p->q;
+-	p->q = new;
+-	qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
+-	qdisc_reset(*old);
+-	sch_tree_unlock(sch);
+-
++	*old = qdisc_replace(sch, new, &p->q);
+ 	return 0;
+ }
+ 
+@@ -264,6 +258,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch)
+ 		return err;
+ 	}
+ 
++	qdisc_qstats_backlog_inc(sch, skb);
+ 	sch->q.qlen++;
+ 
+ 	return NET_XMIT_SUCCESS;
+@@ -286,6 +281,7 @@ static struct sk_buff *dsmark_dequeue(struct Qdisc *sch)
+ 		return NULL;
+ 
+ 	qdisc_bstats_update(sch, skb);
++	qdisc_qstats_backlog_dec(sch, skb);
+ 	sch->q.qlen--;
+ 
+ 	index = skb->tc_index & (p->indices - 1);
+@@ -401,6 +397,7 @@ static void dsmark_reset(struct Qdisc *sch)
+ 
+ 	pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p);
+ 	qdisc_reset(p->q);
++	sch->qstats.backlog = 0;
+ 	sch->q.qlen = 0;
+ }
+ 
+diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
+index 109b2322778f..3c6a47d66a04 100644
+--- a/net/sched/sch_fq.c
++++ b/net/sched/sch_fq.c
+@@ -662,6 +662,7 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt)
+ 	struct fq_sched_data *q = qdisc_priv(sch);
+ 	struct nlattr *tb[TCA_FQ_MAX + 1];
+ 	int err, drop_count = 0;
++	unsigned drop_len = 0;
+ 	u32 fq_log;
+ 
+ 	if (!opt)
+@@ -736,10 +737,11 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt)
+ 
+ 		if (!skb)
+ 			break;
++		drop_len += qdisc_pkt_len(skb);
+ 		kfree_skb(skb);
+ 		drop_count++;
+ 	}
+-	qdisc_tree_decrease_qlen(sch, drop_count);
++	qdisc_tree_reduce_backlog(sch, drop_count, drop_len);
+ 
+ 	sch_tree_unlock(sch);
+ 	return err;
+diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c
+index 4c834e93dafb..d3fc8f9dd3d4 100644
+--- a/net/sched/sch_fq_codel.c
++++ b/net/sched/sch_fq_codel.c
+@@ -175,7 +175,7 @@ static unsigned int fq_codel_qdisc_drop(struct Qdisc *sch)
+ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
+ {
+ 	struct fq_codel_sched_data *q = qdisc_priv(sch);
+-	unsigned int idx;
++	unsigned int idx, prev_backlog;
+ 	struct fq_codel_flow *flow;
+ 	int uninitialized_var(ret);
+ 
+@@ -203,6 +203,7 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
+ 	if (++sch->q.qlen <= sch->limit)
+ 		return NET_XMIT_SUCCESS;
+ 
++	prev_backlog = sch->qstats.backlog;
+ 	q->drop_overlimit++;
+ 	/* Return Congestion Notification only if we dropped a packet
+ 	 * from this flow.
+@@ -211,7 +212,7 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
+ 		return NET_XMIT_CN;
+ 
+ 	/* As we dropped a packet, better let upper stack know this */
+-	qdisc_tree_decrease_qlen(sch, 1);
++	qdisc_tree_reduce_backlog(sch, 1, prev_backlog - sch->qstats.backlog);
+ 	return NET_XMIT_SUCCESS;
+ }
+ 
+@@ -241,6 +242,7 @@ static struct sk_buff *fq_codel_dequeue(struct Qdisc *sch)
+ 	struct fq_codel_flow *flow;
+ 	struct list_head *head;
+ 	u32 prev_drop_count, prev_ecn_mark;
++	unsigned int prev_backlog;
+ 
+ begin:
+ 	head = &q->new_flows;
+@@ -259,6 +261,7 @@ begin:
+ 
+ 	prev_drop_count = q->cstats.drop_count;
+ 	prev_ecn_mark = q->cstats.ecn_mark;
++	prev_backlog = sch->qstats.backlog;
+ 
+ 	skb = codel_dequeue(sch, &q->cparams, &flow->cvars, &q->cstats,
+ 			    dequeue);
+@@ -276,12 +279,14 @@ begin:
+ 	}
+ 	qdisc_bstats_update(sch, skb);
+ 	flow->deficit -= qdisc_pkt_len(skb);
+-	/* We cant call qdisc_tree_decrease_qlen() if our qlen is 0,
++	/* We cant call qdisc_tree_reduce_backlog() if our qlen is 0,
+ 	 * or HTB crashes. Defer it for next round.
+ 	 */
+ 	if (q->cstats.drop_count && sch->q.qlen) {
+-		qdisc_tree_decrease_qlen(sch, q->cstats.drop_count);
++		qdisc_tree_reduce_backlog(sch, q->cstats.drop_count,
++					  q->cstats.drop_len);
+ 		q->cstats.drop_count = 0;
++		q->cstats.drop_len = 0;
+ 	}
+ 	return skb;
+ }
+@@ -372,11 +377,13 @@ static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt)
+ 	while (sch->q.qlen > sch->limit) {
+ 		struct sk_buff *skb = fq_codel_dequeue(sch);
+ 
++		q->cstats.drop_len += qdisc_pkt_len(skb);
+ 		kfree_skb(skb);
+ 		q->cstats.drop_count++;
+ 	}
+-	qdisc_tree_decrease_qlen(sch, q->cstats.drop_count);
++	qdisc_tree_reduce_backlog(sch, q->cstats.drop_count, q->cstats.drop_len);
+ 	q->cstats.drop_count = 0;
++	q->cstats.drop_len = 0;
+ 
+ 	sch_tree_unlock(sch);
+ 	return 0;
+diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
+index 16bc83b2842a..aa4725038f94 100644
+--- a/net/sched/sch_generic.c
++++ b/net/sched/sch_generic.c
+@@ -159,12 +159,15 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
+ 	if (validate)
+ 		skb = validate_xmit_skb_list(skb, dev);
+ 
+-	if (skb) {
++	if (likely(skb)) {
+ 		HARD_TX_LOCK(dev, txq, smp_processor_id());
+ 		if (!netif_xmit_frozen_or_stopped(txq))
+ 			skb = dev_hard_start_xmit(skb, dev, txq, &ret);
+ 
+ 		HARD_TX_UNLOCK(dev, txq);
++	} else {
++		spin_lock(root_lock);
++		return qdisc_qlen(q);
+ 	}
+ 	spin_lock(root_lock);
+ 
+diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
+index b7ebe2c87586..d783d7cc3348 100644
+--- a/net/sched/sch_hfsc.c
++++ b/net/sched/sch_hfsc.c
+@@ -895,9 +895,10 @@ static void
+ hfsc_purge_queue(struct Qdisc *sch, struct hfsc_class *cl)
+ {
+ 	unsigned int len = cl->qdisc->q.qlen;
++	unsigned int backlog = cl->qdisc->qstats.backlog;
+ 
+ 	qdisc_reset(cl->qdisc);
+-	qdisc_tree_decrease_qlen(cl->qdisc, len);
++	qdisc_tree_reduce_backlog(cl->qdisc, len, backlog);
+ }
+ 
+ static void
+@@ -1215,11 +1216,7 @@ hfsc_graft_class(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
+ 			new = &noop_qdisc;
+ 	}
+ 
+-	sch_tree_lock(sch);
+-	hfsc_purge_queue(sch, cl);
+-	*old = cl->qdisc;
+-	cl->qdisc = new;
+-	sch_tree_unlock(sch);
++	*old = qdisc_replace(sch, new, &cl->qdisc);
+ 	return 0;
+ }
+ 
+diff --git a/net/sched/sch_hhf.c b/net/sched/sch_hhf.c
+index 86b04e31e60b..13d6f83ec491 100644
+--- a/net/sched/sch_hhf.c
++++ b/net/sched/sch_hhf.c
+@@ -382,6 +382,7 @@ static int hhf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
+ 	struct hhf_sched_data *q = qdisc_priv(sch);
+ 	enum wdrr_bucket_idx idx;
+ 	struct wdrr_bucket *bucket;
++	unsigned int prev_backlog;
+ 
+ 	idx = hhf_classify(skb, sch);
+ 
+@@ -409,6 +410,7 @@ static int hhf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
+ 	if (++sch->q.qlen <= sch->limit)
+ 		return NET_XMIT_SUCCESS;
+ 
++	prev_backlog = sch->qstats.backlog;
+ 	q->drop_overlimit++;
+ 	/* Return Congestion Notification only if we dropped a packet from this
+ 	 * bucket.
+@@ -417,7 +419,7 @@ static int hhf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
+ 		return NET_XMIT_CN;
+ 
+ 	/* As we dropped a packet, better let upper stack know this. */
+-	qdisc_tree_decrease_qlen(sch, 1);
++	qdisc_tree_reduce_backlog(sch, 1, prev_backlog - sch->qstats.backlog);
+ 	return NET_XMIT_SUCCESS;
+ }
+ 
+@@ -527,7 +529,7 @@ static int hhf_change(struct Qdisc *sch, struct nlattr *opt)
+ {
+ 	struct hhf_sched_data *q = qdisc_priv(sch);
+ 	struct nlattr *tb[TCA_HHF_MAX + 1];
+-	unsigned int qlen;
++	unsigned int qlen, prev_backlog;
+ 	int err;
+ 	u64 non_hh_quantum;
+ 	u32 new_quantum = q->quantum;
+@@ -577,12 +579,14 @@ static int hhf_change(struct Qdisc *sch, struct nlattr *opt)
+ 	}
+ 
+ 	qlen = sch->q.qlen;
++	prev_backlog = sch->qstats.backlog;
+ 	while (sch->q.qlen > sch->limit) {
+ 		struct sk_buff *skb = hhf_dequeue(sch);
+ 
+ 		kfree_skb(skb);
+ 	}
+-	qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen);
++	qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen,
++				  prev_backlog - sch->qstats.backlog);
+ 
+ 	sch_tree_unlock(sch);
+ 	return 0;
+diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
+index 15ccd7f8fb2a..87b02ed3d5f2 100644
+--- a/net/sched/sch_htb.c
++++ b/net/sched/sch_htb.c
+@@ -600,6 +600,7 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
+ 		htb_activate(q, cl);
+ 	}
+ 
++	qdisc_qstats_backlog_inc(sch, skb);
+ 	sch->q.qlen++;
+ 	return NET_XMIT_SUCCESS;
+ }
+@@ -889,6 +890,7 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch)
+ ok:
+ 		qdisc_bstats_update(sch, skb);
+ 		qdisc_unthrottled(sch);
++		qdisc_qstats_backlog_dec(sch, skb);
+ 		sch->q.qlen--;
+ 		return skb;
+ 	}
+@@ -955,6 +957,7 @@ static unsigned int htb_drop(struct Qdisc *sch)
+ 			unsigned int len;
+ 			if (cl->un.leaf.q->ops->drop &&
+ 			    (len = cl->un.leaf.q->ops->drop(cl->un.leaf.q))) {
++				sch->qstats.backlog -= len;
+ 				sch->q.qlen--;
+ 				if (!cl->un.leaf.q->q.qlen)
+ 					htb_deactivate(q, cl);
+@@ -984,12 +987,12 @@ static void htb_reset(struct Qdisc *sch)
+ 			}
+ 			cl->prio_activity = 0;
+ 			cl->cmode = HTB_CAN_SEND;
+-
+ 		}
+ 	}
+ 	qdisc_watchdog_cancel(&q->watchdog);
+ 	__skb_queue_purge(&q->direct_queue);
+ 	sch->q.qlen = 0;
++	sch->qstats.backlog = 0;
+ 	memset(q->hlevel, 0, sizeof(q->hlevel));
+ 	memset(q->row_mask, 0, sizeof(q->row_mask));
+ 	for (i = 0; i < TC_HTB_NUMPRIO; i++)
+@@ -1163,14 +1166,7 @@ static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
+ 				     cl->common.classid)) == NULL)
+ 		return -ENOBUFS;
+ 
+-	sch_tree_lock(sch);
+-	*old = cl->un.leaf.q;
+-	cl->un.leaf.q = new;
+-	if (*old != NULL) {
+-		qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
+-		qdisc_reset(*old);
+-	}
+-	sch_tree_unlock(sch);
++	*old = qdisc_replace(sch, new, &cl->un.leaf.q);
+ 	return 0;
+ }
+ 
+@@ -1272,7 +1268,6 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg)
+ {
+ 	struct htb_sched *q = qdisc_priv(sch);
+ 	struct htb_class *cl = (struct htb_class *)arg;
+-	unsigned int qlen;
+ 	struct Qdisc *new_q = NULL;
+ 	int last_child = 0;
+ 
+@@ -1292,9 +1287,11 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg)
+ 	sch_tree_lock(sch);
+ 
+ 	if (!cl->level) {
+-		qlen = cl->un.leaf.q->q.qlen;
++		unsigned int qlen = cl->un.leaf.q->q.qlen;
++		unsigned int backlog = cl->un.leaf.q->qstats.backlog;
++
+ 		qdisc_reset(cl->un.leaf.q);
+-		qdisc_tree_decrease_qlen(cl->un.leaf.q, qlen);
++		qdisc_tree_reduce_backlog(cl->un.leaf.q, qlen, backlog);
+ 	}
+ 
+ 	/* delete from hash and active; remainder in destroy_class */
+@@ -1428,10 +1425,11 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
+ 		sch_tree_lock(sch);
+ 		if (parent && !parent->level) {
+ 			unsigned int qlen = parent->un.leaf.q->q.qlen;
++			unsigned int backlog = parent->un.leaf.q->qstats.backlog;
+ 
+ 			/* turn parent into inner node */
+ 			qdisc_reset(parent->un.leaf.q);
+-			qdisc_tree_decrease_qlen(parent->un.leaf.q, qlen);
++			qdisc_tree_reduce_backlog(parent->un.leaf.q, qlen, backlog);
+ 			qdisc_destroy(parent->un.leaf.q);
+ 			if (parent->prio_activity)
+ 				htb_deactivate(q, parent);
+diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c
+index 4e904ca0af9d..bcdd54bb101c 100644
+--- a/net/sched/sch_multiq.c
++++ b/net/sched/sch_multiq.c
+@@ -218,7 +218,8 @@ static int multiq_tune(struct Qdisc *sch, struct nlattr *opt)
+ 		if (q->queues[i] != &noop_qdisc) {
+ 			struct Qdisc *child = q->queues[i];
+ 			q->queues[i] = &noop_qdisc;
+-			qdisc_tree_decrease_qlen(child, child->q.qlen);
++			qdisc_tree_reduce_backlog(child, child->q.qlen,
++						  child->qstats.backlog);
+ 			qdisc_destroy(child);
+ 		}
+ 	}
+@@ -238,8 +239,9 @@ static int multiq_tune(struct Qdisc *sch, struct nlattr *opt)
+ 				q->queues[i] = child;
+ 
+ 				if (old != &noop_qdisc) {
+-					qdisc_tree_decrease_qlen(old,
+-								 old->q.qlen);
++					qdisc_tree_reduce_backlog(old,
++								  old->q.qlen,
++								  old->qstats.backlog);
+ 					qdisc_destroy(old);
+ 				}
+ 				sch_tree_unlock(sch);
+@@ -303,13 +305,7 @@ static int multiq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
+ 	if (new == NULL)
+ 		new = &noop_qdisc;
+ 
+-	sch_tree_lock(sch);
+-	*old = q->queues[band];
+-	q->queues[band] = new;
+-	qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
+-	qdisc_reset(*old);
+-	sch_tree_unlock(sch);
+-
++	*old = qdisc_replace(sch, new, &q->queues[band]);
+ 	return 0;
+ }
+ 
+diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
+index 5abd1d9de989..4befe97a9034 100644
+--- a/net/sched/sch_netem.c
++++ b/net/sched/sch_netem.c
+@@ -395,6 +395,25 @@ static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
+ 	sch->q.qlen++;
+ }
+ 
++/* netem can't properly corrupt a megapacket (like we get from GSO), so instead
++ * when we statistically choose to corrupt one, we instead segment it, returning
++ * the first packet to be corrupted, and re-enqueue the remaining frames
++ */
++static struct sk_buff *netem_segment(struct sk_buff *skb, struct Qdisc *sch)
++{
++	struct sk_buff *segs;
++	netdev_features_t features = netif_skb_features(skb);
++
++	segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
++
++	if (IS_ERR_OR_NULL(segs)) {
++		qdisc_reshape_fail(skb, sch);
++		return NULL;
++	}
++	consume_skb(skb);
++	return segs;
++}
++
+ /*
+  * Insert one skb into qdisc.
+  * Note: parent depends on return value to account for queue length.
+@@ -407,7 +426,11 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
+ 	/* We don't fill cb now as skb_unshare() may invalidate it */
+ 	struct netem_skb_cb *cb;
+ 	struct sk_buff *skb2;
++	struct sk_buff *segs = NULL;
++	unsigned int len = 0, last_len, prev_len = qdisc_pkt_len(skb);
++	int nb = 0;
+ 	int count = 1;
++	int rc = NET_XMIT_SUCCESS;
+ 
+ 	/* Random duplication */
+ 	if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor))
+@@ -453,10 +476,23 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
+ 	 * do it now in software before we mangle it.
+ 	 */
+ 	if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) {
++		if (skb_is_gso(skb)) {
++			segs = netem_segment(skb, sch);
++			if (!segs)
++				return NET_XMIT_DROP;
++		} else {
++			segs = skb;
++		}
++
++		skb = segs;
++		segs = segs->next;
++
+ 		if (!(skb = skb_unshare(skb, GFP_ATOMIC)) ||
+ 		    (skb->ip_summed == CHECKSUM_PARTIAL &&
+-		     skb_checksum_help(skb)))
+-			return qdisc_drop(skb, sch);
++		     skb_checksum_help(skb))) {
++			rc = qdisc_drop(skb, sch);
++			goto finish_segs;
++		}
+ 
+ 		skb->data[prandom_u32() % skb_headlen(skb)] ^=
+ 			1<<(prandom_u32() % 8);
+@@ -516,6 +552,27 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
+ 		sch->qstats.requeues++;
+ 	}
+ 
++finish_segs:
++	if (segs) {
++		while (segs) {
++			skb2 = segs->next;
++			segs->next = NULL;
++			qdisc_skb_cb(segs)->pkt_len = segs->len;
++			last_len = segs->len;
++			rc = qdisc_enqueue(segs, sch);
++			if (rc != NET_XMIT_SUCCESS) {
++				if (net_xmit_drop_count(rc))
++					qdisc_qstats_drop(sch);
++			} else {
++				nb++;
++				len += last_len;
++			}
++			segs = skb2;
++		}
++		sch->q.qlen += nb;
++		if (nb > 1)
++			qdisc_tree_reduce_backlog(sch, 1 - nb, prev_len - len);
++	}
+ 	return NET_XMIT_SUCCESS;
+ }
+ 
+@@ -598,7 +655,8 @@ deliver:
+ 				if (unlikely(err != NET_XMIT_SUCCESS)) {
+ 					if (net_xmit_drop_count(err)) {
+ 						qdisc_qstats_drop(sch);
+-						qdisc_tree_decrease_qlen(sch, 1);
++						qdisc_tree_reduce_backlog(sch, 1,
++									  qdisc_pkt_len(skb));
+ 					}
+ 				}
+ 				goto tfifo_dequeue;
+@@ -1037,15 +1095,7 @@ static int netem_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
+ {
+ 	struct netem_sched_data *q = qdisc_priv(sch);
+ 
+-	sch_tree_lock(sch);
+-	*old = q->qdisc;
+-	q->qdisc = new;
+-	if (*old) {
+-		qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
+-		qdisc_reset(*old);
+-	}
+-	sch_tree_unlock(sch);
+-
++	*old = qdisc_replace(sch, new, &q->qdisc);
+ 	return 0;
+ }
+ 
+diff --git a/net/sched/sch_pie.c b/net/sched/sch_pie.c
+index b783a446d884..71ae3b9629f9 100644
+--- a/net/sched/sch_pie.c
++++ b/net/sched/sch_pie.c
+@@ -183,7 +183,7 @@ static int pie_change(struct Qdisc *sch, struct nlattr *opt)
+ {
+ 	struct pie_sched_data *q = qdisc_priv(sch);
+ 	struct nlattr *tb[TCA_PIE_MAX + 1];
+-	unsigned int qlen;
++	unsigned int qlen, dropped = 0;
+ 	int err;
+ 
+ 	if (!opt)
+@@ -232,10 +232,11 @@ static int pie_change(struct Qdisc *sch, struct nlattr *opt)
+ 	while (sch->q.qlen > sch->limit) {
+ 		struct sk_buff *skb = __skb_dequeue(&sch->q);
+ 
++		dropped += qdisc_pkt_len(skb);
+ 		qdisc_qstats_backlog_dec(sch, skb);
+ 		qdisc_drop(skb, sch);
+ 	}
+-	qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen);
++	qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped);
+ 
+ 	sch_tree_unlock(sch);
+ 	return 0;
+diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
+index ba6487f2741f..fee1b15506b2 100644
+--- a/net/sched/sch_prio.c
++++ b/net/sched/sch_prio.c
+@@ -191,7 +191,7 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt)
+ 		struct Qdisc *child = q->queues[i];
+ 		q->queues[i] = &noop_qdisc;
+ 		if (child != &noop_qdisc) {
+-			qdisc_tree_decrease_qlen(child, child->q.qlen);
++			qdisc_tree_reduce_backlog(child, child->q.qlen, child->qstats.backlog);
+ 			qdisc_destroy(child);
+ 		}
+ 	}
+@@ -210,8 +210,9 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt)
+ 				q->queues[i] = child;
+ 
+ 				if (old != &noop_qdisc) {
+-					qdisc_tree_decrease_qlen(old,
+-								 old->q.qlen);
++					qdisc_tree_reduce_backlog(old,
++								  old->q.qlen,
++								  old->qstats.backlog);
+ 					qdisc_destroy(old);
+ 				}
+ 				sch_tree_unlock(sch);
+@@ -268,13 +269,7 @@ static int prio_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
+ 	if (new == NULL)
+ 		new = &noop_qdisc;
+ 
+-	sch_tree_lock(sch);
+-	*old = q->queues[band];
+-	q->queues[band] = new;
+-	qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
+-	qdisc_reset(*old);
+-	sch_tree_unlock(sch);
+-
++	*old = qdisc_replace(sch, new, &q->queues[band]);
+ 	return 0;
+ }
+ 
+diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
+index 3dc3a6e56052..8d2d8d953432 100644
+--- a/net/sched/sch_qfq.c
++++ b/net/sched/sch_qfq.c
+@@ -220,9 +220,10 @@ static struct qfq_class *qfq_find_class(struct Qdisc *sch, u32 classid)
+ static void qfq_purge_queue(struct qfq_class *cl)
+ {
+ 	unsigned int len = cl->qdisc->q.qlen;
++	unsigned int backlog = cl->qdisc->qstats.backlog;
+ 
+ 	qdisc_reset(cl->qdisc);
+-	qdisc_tree_decrease_qlen(cl->qdisc, len);
++	qdisc_tree_reduce_backlog(cl->qdisc, len, backlog);
+ }
+ 
+ static const struct nla_policy qfq_policy[TCA_QFQ_MAX + 1] = {
+@@ -617,11 +618,7 @@ static int qfq_graft_class(struct Qdisc *sch, unsigned long arg,
+ 			new = &noop_qdisc;
+ 	}
+ 
+-	sch_tree_lock(sch);
+-	qfq_purge_queue(cl);
+-	*old = cl->qdisc;
+-	cl->qdisc = new;
+-	sch_tree_unlock(sch);
++	*old = qdisc_replace(sch, new, &cl->qdisc);
+ 	return 0;
+ }
+ 
+diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
+index 6c0534cc7758..8c0508c0e287 100644
+--- a/net/sched/sch_red.c
++++ b/net/sched/sch_red.c
+@@ -210,7 +210,8 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt)
+ 	q->flags = ctl->flags;
+ 	q->limit = ctl->limit;
+ 	if (child) {
+-		qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen);
++		qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen,
++					  q->qdisc->qstats.backlog);
+ 		qdisc_destroy(q->qdisc);
+ 		q->qdisc = child;
+ 	}
+@@ -313,12 +314,7 @@ static int red_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
+ 	if (new == NULL)
+ 		new = &noop_qdisc;
+ 
+-	sch_tree_lock(sch);
+-	*old = q->qdisc;
+-	q->qdisc = new;
+-	qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
+-	qdisc_reset(*old);
+-	sch_tree_unlock(sch);
++	*old = qdisc_replace(sch, new, &q->qdisc);
+ 	return 0;
+ }
+ 
+diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c
+index 5bbb6332ec57..c69611640fa5 100644
+--- a/net/sched/sch_sfb.c
++++ b/net/sched/sch_sfb.c
+@@ -510,7 +510,8 @@ static int sfb_change(struct Qdisc *sch, struct nlattr *opt)
+ 
+ 	sch_tree_lock(sch);
+ 
+-	qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen);
++	qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen,
++				  q->qdisc->qstats.backlog);
+ 	qdisc_destroy(q->qdisc);
+ 	q->qdisc = child;
+ 
+@@ -606,12 +607,7 @@ static int sfb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
+ 	if (new == NULL)
+ 		new = &noop_qdisc;
+ 
+-	sch_tree_lock(sch);
+-	*old = q->qdisc;
+-	q->qdisc = new;
+-	qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
+-	qdisc_reset(*old);
+-	sch_tree_unlock(sch);
++	*old = qdisc_replace(sch, new, &q->qdisc);
+ 	return 0;
+ }
+ 
+diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
+index 3abab534eb5c..498f0a2cb47f 100644
+--- a/net/sched/sch_sfq.c
++++ b/net/sched/sch_sfq.c
+@@ -346,7 +346,7 @@ static int
+ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
+ {
+ 	struct sfq_sched_data *q = qdisc_priv(sch);
+-	unsigned int hash;
++	unsigned int hash, dropped;
+ 	sfq_index x, qlen;
+ 	struct sfq_slot *slot;
+ 	int uninitialized_var(ret);
+@@ -461,7 +461,7 @@ enqueue:
+ 		return NET_XMIT_SUCCESS;
+ 
+ 	qlen = slot->qlen;
+-	sfq_drop(sch);
++	dropped = sfq_drop(sch);
+ 	/* Return Congestion Notification only if we dropped a packet
+ 	 * from this flow.
+ 	 */
+@@ -469,7 +469,7 @@ enqueue:
+ 		return NET_XMIT_CN;
+ 
+ 	/* As we dropped a packet, better let upper stack know this */
+-	qdisc_tree_decrease_qlen(sch, 1);
++	qdisc_tree_reduce_backlog(sch, 1, dropped);
+ 	return NET_XMIT_SUCCESS;
+ }
+ 
+@@ -537,6 +537,7 @@ static void sfq_rehash(struct Qdisc *sch)
+ 	struct sfq_slot *slot;
+ 	struct sk_buff_head list;
+ 	int dropped = 0;
++	unsigned int drop_len = 0;
+ 
+ 	__skb_queue_head_init(&list);
+ 
+@@ -565,6 +566,7 @@ static void sfq_rehash(struct Qdisc *sch)
+ 			if (x >= SFQ_MAX_FLOWS) {
+ drop:
+ 				qdisc_qstats_backlog_dec(sch, skb);
++				drop_len += qdisc_pkt_len(skb);
+ 				kfree_skb(skb);
+ 				dropped++;
+ 				continue;
+@@ -594,7 +596,7 @@ drop:
+ 		}
+ 	}
+ 	sch->q.qlen -= dropped;
+-	qdisc_tree_decrease_qlen(sch, dropped);
++	qdisc_tree_reduce_backlog(sch, dropped, drop_len);
+ }
+ 
+ static void sfq_perturbation(unsigned long arg)
+@@ -618,7 +620,7 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
+ 	struct sfq_sched_data *q = qdisc_priv(sch);
+ 	struct tc_sfq_qopt *ctl = nla_data(opt);
+ 	struct tc_sfq_qopt_v1 *ctl_v1 = NULL;
+-	unsigned int qlen;
++	unsigned int qlen, dropped = 0;
+ 	struct red_parms *p = NULL;
+ 
+ 	if (opt->nla_len < nla_attr_size(sizeof(*ctl)))
+@@ -667,8 +669,8 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
+ 
+ 	qlen = sch->q.qlen;
+ 	while (sch->q.qlen > q->limit)
+-		sfq_drop(sch);
+-	qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen);
++		dropped += sfq_drop(sch);
++	qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped);
+ 
+ 	del_timer(&q->perturb_timer);
+ 	if (q->perturb_period) {
+diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
+index a4afde14e865..c2fbde742f37 100644
+--- a/net/sched/sch_tbf.c
++++ b/net/sched/sch_tbf.c
+@@ -160,6 +160,7 @@ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch)
+ 	struct tbf_sched_data *q = qdisc_priv(sch);
+ 	struct sk_buff *segs, *nskb;
+ 	netdev_features_t features = netif_skb_features(skb);
++	unsigned int len = 0, prev_len = qdisc_pkt_len(skb);
+ 	int ret, nb;
+ 
+ 	segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
+@@ -172,6 +173,7 @@ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch)
+ 		nskb = segs->next;
+ 		segs->next = NULL;
+ 		qdisc_skb_cb(segs)->pkt_len = segs->len;
++		len += segs->len;
+ 		ret = qdisc_enqueue(segs, q->qdisc);
+ 		if (ret != NET_XMIT_SUCCESS) {
+ 			if (net_xmit_drop_count(ret))
+@@ -183,7 +185,7 @@ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch)
+ 	}
+ 	sch->q.qlen += nb;
+ 	if (nb > 1)
+-		qdisc_tree_decrease_qlen(sch, 1 - nb);
++		qdisc_tree_reduce_backlog(sch, 1 - nb, prev_len - len);
+ 	consume_skb(skb);
+ 	return nb > 0 ? NET_XMIT_SUCCESS : NET_XMIT_DROP;
+ }
+@@ -399,7 +401,8 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt)
+ 
+ 	sch_tree_lock(sch);
+ 	if (child) {
+-		qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen);
++		qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen,
++					  q->qdisc->qstats.backlog);
+ 		qdisc_destroy(q->qdisc);
+ 		q->qdisc = child;
+ 	}
+@@ -502,13 +505,7 @@ static int tbf_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
+ 	if (new == NULL)
+ 		new = &noop_qdisc;
+ 
+-	sch_tree_lock(sch);
+-	*old = q->qdisc;
+-	q->qdisc = new;
+-	qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
+-	qdisc_reset(*old);
+-	sch_tree_unlock(sch);
+-
++	*old = qdisc_replace(sch, new, &q->qdisc);
+ 	return 0;
+ }
+ 
+diff --git a/net/socket.c b/net/socket.c
+index c044d1e8508c..db13ae893dce 100644
+--- a/net/socket.c
++++ b/net/socket.c
+@@ -2240,31 +2240,31 @@ int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen,
+ 		cond_resched();
+ 	}
+ 
+-out_put:
+-	fput_light(sock->file, fput_needed);
+-
+ 	if (err == 0)
+-		return datagrams;
++		goto out_put;
+ 
+-	if (datagrams != 0) {
++	if (datagrams == 0) {
++		datagrams = err;
++		goto out_put;
++	}
++
++	/*
++	 * We may return less entries than requested (vlen) if the
++	 * sock is non block and there aren't enough datagrams...
++	 */
++	if (err != -EAGAIN) {
+ 		/*
+-		 * We may return less entries than requested (vlen) if the
+-		 * sock is non block and there aren't enough datagrams...
++		 * ... or  if recvmsg returns an error after we
++		 * received some datagrams, where we record the
++		 * error to return on the next call or if the
++		 * app asks about it using getsockopt(SO_ERROR).
+ 		 */
+-		if (err != -EAGAIN) {
+-			/*
+-			 * ... or  if recvmsg returns an error after we
+-			 * received some datagrams, where we record the
+-			 * error to return on the next call or if the
+-			 * app asks about it using getsockopt(SO_ERROR).
+-			 */
+-			sock->sk->sk_err = -err;
+-		}
+-
+-		return datagrams;
++		sock->sk->sk_err = -err;
+ 	}
++out_put:
++	fput_light(sock->file, fput_needed);
+ 
+-	return err;
++	return datagrams;
+ }
+ 
+ SYSCALL_DEFINE5(recvmmsg, int, fd, struct mmsghdr __user *, mmsg,
+diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
+index 273bc3a35425..008c25d1b9f9 100644
+--- a/net/sunrpc/cache.c
++++ b/net/sunrpc/cache.c
+@@ -1182,14 +1182,14 @@ int sunrpc_cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h)
+ 	}
+ 
+ 	crq->q.reader = 0;
+-	crq->item = cache_get(h);
+ 	crq->buf = buf;
+ 	crq->len = 0;
+ 	crq->readers = 0;
+ 	spin_lock(&queue_lock);
+-	if (test_bit(CACHE_PENDING, &h->flags))
++	if (test_bit(CACHE_PENDING, &h->flags)) {
++		crq->item = cache_get(h);
+ 		list_add_tail(&crq->q.list, &detail->queue);
+-	else
++	} else
+ 		/* Lost a race, no longer PENDING, so don't enqueue */
+ 		ret = -EAGAIN;
+ 	spin_unlock(&queue_lock);
+diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
+index bbe65dcb9738..c93e67beaea7 100644
+--- a/net/vmw_vsock/af_vsock.c
++++ b/net/vmw_vsock/af_vsock.c
+@@ -1789,27 +1789,8 @@ vsock_stream_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
+ 	else if (sk->sk_shutdown & RCV_SHUTDOWN)
+ 		err = 0;
+ 
+-	if (copied > 0) {
+-		/* We only do these additional bookkeeping/notification steps
+-		 * if we actually copied something out of the queue pair
+-		 * instead of just peeking ahead.
+-		 */
+-
+-		if (!(flags & MSG_PEEK)) {
+-			/* If the other side has shutdown for sending and there
+-			 * is nothing more to read, then modify the socket
+-			 * state.
+-			 */
+-			if (vsk->peer_shutdown & SEND_SHUTDOWN) {
+-				if (vsock_stream_has_data(vsk) <= 0) {
+-					sk->sk_state = SS_UNCONNECTED;
+-					sock_set_flag(sk, SOCK_DONE);
+-					sk->sk_state_change(sk);
+-				}
+-			}
+-		}
++	if (copied > 0)
+ 		err = copied;
+-	}
+ 
+ out:
+ 	release_sock(sk);
+diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
+index 711cb7ad6ae0..ab62d305b48b 100644
+--- a/net/wireless/nl80211.c
++++ b/net/wireless/nl80211.c
+@@ -13201,7 +13201,7 @@ static int nl80211_netlink_notify(struct notifier_block * nb,
+ 	struct wireless_dev *wdev;
+ 	struct cfg80211_beacon_registration *reg, *tmp;
+ 
+-	if (state != NETLINK_URELEASE)
++	if (state != NETLINK_URELEASE || notify->protocol != NETLINK_GENERIC)
+ 		return NOTIFY_DONE;
+ 
+ 	rcu_read_lock();
+diff --git a/net/x25/x25_facilities.c b/net/x25/x25_facilities.c
+index 7ecd04c21360..997ff7b2509b 100644
+--- a/net/x25/x25_facilities.c
++++ b/net/x25/x25_facilities.c
+@@ -277,6 +277,7 @@ int x25_negotiate_facilities(struct sk_buff *skb, struct sock *sk,
+ 
+ 	memset(&theirs, 0, sizeof(theirs));
+ 	memcpy(new, ours, sizeof(*new));
++	memset(dte, 0, sizeof(*dte));
+ 
+ 	len = x25_parse_facilities(skb, &theirs, dte, &x25->vc_facil_mask);
+ 	if (len < 0)
+diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
+index ad7f5b3f9b61..1c4ad477ce93 100644
+--- a/net/xfrm/xfrm_input.c
++++ b/net/xfrm/xfrm_input.c
+@@ -292,12 +292,15 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
+ 		XFRM_SKB_CB(skb)->seq.input.hi = seq_hi;
+ 
+ 		skb_dst_force(skb);
++		dev_hold(skb->dev);
+ 
+ 		nexthdr = x->type->input(x, skb);
+ 
+ 		if (nexthdr == -EINPROGRESS)
+ 			return 0;
+ resume:
++		dev_put(skb->dev);
++
+ 		spin_lock(&x->lock);
+ 		if (nexthdr <= 0) {
+ 			if (nexthdr == -EBADMSG) {
+diff --git a/samples/bpf/trace_output_kern.c b/samples/bpf/trace_output_kern.c
+index 8d8d1ec429eb..9b96f4fb8cea 100644
+--- a/samples/bpf/trace_output_kern.c
++++ b/samples/bpf/trace_output_kern.c
+@@ -18,7 +18,6 @@ int bpf_prog1(struct pt_regs *ctx)
+ 		u64 cookie;
+ 	} data;
+ 
+-	memset(&data, 0, sizeof(data));
+ 	data.pid = bpf_get_current_pid_tgid();
+ 	data.cookie = 0x12345678;
+ 
+diff --git a/scripts/Makefile.extrawarn b/scripts/Makefile.extrawarn
+index f9e47a70509c..53449a6ff6aa 100644
+--- a/scripts/Makefile.extrawarn
++++ b/scripts/Makefile.extrawarn
+@@ -24,6 +24,7 @@ warning-1 += $(call cc-option, -Wmissing-prototypes)
+ warning-1 += -Wold-style-definition
+ warning-1 += $(call cc-option, -Wmissing-include-dirs)
+ warning-1 += $(call cc-option, -Wunused-but-set-variable)
++warning-1 += $(call cc-option, -Wunused-const-variable)
+ warning-1 += $(call cc-disable-warning, missing-field-initializers)
+ warning-1 += $(call cc-disable-warning, sign-compare)
+ 
+diff --git a/scripts/coccinelle/iterators/use_after_iter.cocci b/scripts/coccinelle/iterators/use_after_iter.cocci
+index f085f5968c52..ce8cc9c006e5 100644
+--- a/scripts/coccinelle/iterators/use_after_iter.cocci
++++ b/scripts/coccinelle/iterators/use_after_iter.cocci
+@@ -123,7 +123,7 @@ list_remove_head(x,c,...)
+ |
+ sizeof(<+...c...+>)
+ |
+-&c->member
++ &c->member
+ |
+ c = E
+ |
+diff --git a/scripts/gdb/linux/modules.py b/scripts/gdb/linux/modules.py
+index 25db8cff44a2..0a35d6dbfb80 100644
+--- a/scripts/gdb/linux/modules.py
++++ b/scripts/gdb/linux/modules.py
+@@ -73,10 +73,11 @@ class LxLsmod(gdb.Command):
+                 "        " if utils.get_long_type().sizeof == 8 else ""))
+ 
+         for module in module_list():
++            layout = module['core_layout']
+             gdb.write("{address} {name:<19} {size:>8}  {ref}".format(
+-                address=str(module['module_core']).split()[0],
++                address=str(layout['base']).split()[0],
+                 name=module['name'].string(),
+-                size=str(module['core_size']),
++                size=str(layout['size']),
+                 ref=str(module['refcnt']['counter'])))
+ 
+             source_list = module['source_list']
+diff --git a/scripts/gdb/linux/symbols.py b/scripts/gdb/linux/symbols.py
+index 627750cb420d..9a0f8923f67c 100644
+--- a/scripts/gdb/linux/symbols.py
++++ b/scripts/gdb/linux/symbols.py
+@@ -108,7 +108,7 @@ lx-symbols command."""
+ 
+     def load_module_symbols(self, module):
+         module_name = module['name'].string()
+-        module_addr = str(module['module_core']).split()[0]
++        module_addr = str(module['core_layout']['base']).split()[0]
+ 
+         module_file = self._get_module_file(module_name)
+         if not module_file and not self.module_files_updated:
+diff --git a/scripts/kconfig/Makefile b/scripts/kconfig/Makefile
+index d79cba4ce3eb..ebced77deb9c 100644
+--- a/scripts/kconfig/Makefile
++++ b/scripts/kconfig/Makefile
+@@ -96,13 +96,15 @@ savedefconfig: $(obj)/conf
+ defconfig: $(obj)/conf
+ ifeq ($(KBUILD_DEFCONFIG),)
+ 	$< $(silent) --defconfig $(Kconfig)
+-else ifneq ($(wildcard $(srctree)/arch/$(SRCARCH)/configs/$(KBUILD_DEFCONFIG)),)
++else
++ifneq ($(wildcard $(srctree)/arch/$(SRCARCH)/configs/$(KBUILD_DEFCONFIG)),)
+ 	@$(kecho) "*** Default configuration is based on '$(KBUILD_DEFCONFIG)'"
+ 	$(Q)$< $(silent) --defconfig=arch/$(SRCARCH)/configs/$(KBUILD_DEFCONFIG) $(Kconfig)
+ else
+ 	@$(kecho) "*** Default configuration is based on target '$(KBUILD_DEFCONFIG)'"
+ 	$(Q)$(MAKE) -f $(srctree)/Makefile $(KBUILD_DEFCONFIG)
+ endif
++endif
+ 
+ %_defconfig: $(obj)/conf
+ 	$(Q)$< $(silent) --defconfig=arch/$(SRCARCH)/configs/$@ $(Kconfig)
+diff --git a/scripts/kconfig/confdata.c b/scripts/kconfig/confdata.c
+index 0b7dc2fd7bac..dd243d2abd87 100644
+--- a/scripts/kconfig/confdata.c
++++ b/scripts/kconfig/confdata.c
+@@ -267,10 +267,8 @@ int conf_read_simple(const char *name, int def)
+ 		if (in)
+ 			goto load;
+ 		sym_add_change_count(1);
+-		if (!sym_defconfig_list) {
+-			sym_calc_value(modules_sym);
++		if (!sym_defconfig_list)
+ 			return 1;
+-		}
+ 
+ 		for_all_defaults(sym_defconfig_list, prop) {
+ 			if (expr_calc_value(prop->visible.expr) == no ||
+@@ -403,7 +401,6 @@ setsym:
+ 	}
+ 	free(line);
+ 	fclose(in);
+-	sym_calc_value(modules_sym);
+ 	return 0;
+ }
+ 
+@@ -414,8 +411,12 @@ int conf_read(const char *name)
+ 
+ 	sym_set_change_count(0);
+ 
+-	if (conf_read_simple(name, S_DEF_USER))
++	if (conf_read_simple(name, S_DEF_USER)) {
++		sym_calc_value(modules_sym);
+ 		return 1;
++	}
++
++	sym_calc_value(modules_sym);
+ 
+ 	for_all_symbols(i, sym) {
+ 		sym_calc_value(sym);
+@@ -846,6 +847,7 @@ static int conf_split_config(void)
+ 
+ 	name = conf_get_autoconfig_name();
+ 	conf_read_simple(name, S_DEF_AUTO);
++	sym_calc_value(modules_sym);
+ 
+ 	if (chdir("include/config"))
+ 		return 1;
+diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
+index 161dd0d67da8..a9155077feef 100644
+--- a/scripts/mod/file2alias.c
++++ b/scripts/mod/file2alias.c
+@@ -371,6 +371,49 @@ static void do_usb_table(void *symval, unsigned long size,
+ 		do_usb_entry_multi(symval + i, mod);
+ }
+ 
++static void do_of_entry_multi(void *symval, struct module *mod)
++{
++	char alias[500];
++	int len;
++	char *tmp;
++
++	DEF_FIELD_ADDR(symval, of_device_id, name);
++	DEF_FIELD_ADDR(symval, of_device_id, type);
++	DEF_FIELD_ADDR(symval, of_device_id, compatible);
++
++	len = sprintf(alias, "of:N%sT%s", (*name)[0] ? *name : "*",
++		      (*type)[0] ? *type : "*");
++
++	if (compatible[0])
++		sprintf(&alias[len], "%sC%s", (*type)[0] ? "*" : "",
++			*compatible);
++
++	/* Replace all whitespace with underscores */
++	for (tmp = alias; tmp && *tmp; tmp++)
++		if (isspace(*tmp))
++			*tmp = '_';
++
++	buf_printf(&mod->dev_table_buf, "MODULE_ALIAS(\"%s\");\n", alias);
++	strcat(alias, "C");
++	add_wildcard(alias);
++	buf_printf(&mod->dev_table_buf, "MODULE_ALIAS(\"%s\");\n", alias);
++}
++
++static void do_of_table(void *symval, unsigned long size,
++			struct module *mod)
++{
++	unsigned int i;
++	const unsigned long id_size = SIZE_of_device_id;
++
++	device_id_check(mod->name, "of", size, id_size, symval);
++
++	/* Leave last one: it's the terminator. */
++	size -= id_size;
++
++	for (i = 0; i < size; i += id_size)
++		do_of_entry_multi(symval + i, mod);
++}
++
+ /* Looks like: hid:bNvNpN */
+ static int do_hid_entry(const char *filename,
+ 			     void *symval, char *alias)
+@@ -684,30 +727,6 @@ static int do_pcmcia_entry(const char *filename,
+ }
+ ADD_TO_DEVTABLE("pcmcia", pcmcia_device_id, do_pcmcia_entry);
+ 
+-static int do_of_entry (const char *filename, void *symval, char *alias)
+-{
+-	int len;
+-	char *tmp;
+-	DEF_FIELD_ADDR(symval, of_device_id, name);
+-	DEF_FIELD_ADDR(symval, of_device_id, type);
+-	DEF_FIELD_ADDR(symval, of_device_id, compatible);
+-
+-	len = sprintf(alias, "of:N%sT%s", (*name)[0] ? *name : "*",
+-		      (*type)[0] ? *type : "*");
+-
+-	if (compatible[0])
+-		sprintf(&alias[len], "%sC%s", (*type)[0] ? "*" : "",
+-			*compatible);
+-
+-	/* Replace all whitespace with underscores */
+-	for (tmp = alias; tmp && *tmp; tmp++)
+-		if (isspace (*tmp))
+-			*tmp = '_';
+-
+-	return 1;
+-}
+-ADD_TO_DEVTABLE("of", of_device_id, do_of_entry);
+-
+ static int do_vio_entry(const char *filename, void *symval,
+ 		char *alias)
+ {
+@@ -1348,6 +1367,8 @@ void handle_moddevtable(struct module *mod, struct elf_info *info,
+ 	/* First handle the "special" cases */
+ 	if (sym_is(name, namelen, "usb"))
+ 		do_usb_table(symval, sym->st_size, mod);
++	if (sym_is(name, namelen, "of"))
++		do_of_table(symval, sym->st_size, mod);
+ 	else if (sym_is(name, namelen, "pnp"))
+ 		do_pnp_device_entry(symval, sym->st_size, mod);
+ 	else if (sym_is(name, namelen, "pnp_card"))
+diff --git a/scripts/package/mkspec b/scripts/package/mkspec
+index 71004daefe31..fe44d68e9344 100755
+--- a/scripts/package/mkspec
++++ b/scripts/package/mkspec
+@@ -131,11 +131,11 @@ echo 'rm -rf $RPM_BUILD_ROOT'
+ echo ""
+ echo "%post"
+ echo "if [ -x /sbin/installkernel -a -r /boot/vmlinuz-$KERNELRELEASE -a -r /boot/System.map-$KERNELRELEASE ]; then"
+-echo "cp /boot/vmlinuz-$KERNELRELEASE /boot/vmlinuz-$KERNELRELEASE-rpm"
+-echo "cp /boot/System.map-$KERNELRELEASE /boot/System.map-$KERNELRELEASE-rpm"
++echo "cp /boot/vmlinuz-$KERNELRELEASE /boot/.vmlinuz-$KERNELRELEASE-rpm"
++echo "cp /boot/System.map-$KERNELRELEASE /boot/.System.map-$KERNELRELEASE-rpm"
+ echo "rm -f /boot/vmlinuz-$KERNELRELEASE /boot/System.map-$KERNELRELEASE"
+-echo "/sbin/installkernel $KERNELRELEASE /boot/vmlinuz-$KERNELRELEASE-rpm /boot/System.map-$KERNELRELEASE-rpm"
+-echo "rm -f /boot/vmlinuz-$KERNELRELEASE-rpm /boot/System.map-$KERNELRELEASE-rpm"
++echo "/sbin/installkernel $KERNELRELEASE /boot/.vmlinuz-$KERNELRELEASE-rpm /boot/.System.map-$KERNELRELEASE-rpm"
++echo "rm -f /boot/.vmlinuz-$KERNELRELEASE-rpm /boot/.System.map-$KERNELRELEASE-rpm"
+ echo "fi"
+ echo ""
+ echo "%files"
+diff --git a/security/keys/trusted.c b/security/keys/trusted.c
+index 0dcab20cdacd..90d61751ff12 100644
+--- a/security/keys/trusted.c
++++ b/security/keys/trusted.c
+@@ -744,6 +744,7 @@ static int getoptions(char *c, struct trusted_key_payload *pay,
+ 	unsigned long handle;
+ 	unsigned long lock;
+ 	unsigned long token_mask = 0;
++	unsigned int digest_len;
+ 	int i;
+ 	int tpm2;
+ 
+@@ -752,7 +753,6 @@ static int getoptions(char *c, struct trusted_key_payload *pay,
+ 		return tpm2;
+ 
+ 	opt->hash = tpm2 ? HASH_ALGO_SHA256 : HASH_ALGO_SHA1;
+-	opt->digest_len = hash_digest_size[opt->hash];
+ 
+ 	while ((p = strsep(&c, " \t"))) {
+ 		if (*p == '\0' || *p == ' ' || *p == '\t')
+@@ -812,8 +812,6 @@ static int getoptions(char *c, struct trusted_key_payload *pay,
+ 			for (i = 0; i < HASH_ALGO__LAST; i++) {
+ 				if (!strcmp(args[0].from, hash_algo_name[i])) {
+ 					opt->hash = i;
+-					opt->digest_len =
+-						hash_digest_size[opt->hash];
+ 					break;
+ 				}
+ 			}
+@@ -825,13 +823,14 @@ static int getoptions(char *c, struct trusted_key_payload *pay,
+ 			}
+ 			break;
+ 		case Opt_policydigest:
+-			if (!tpm2 ||
+-			    strlen(args[0].from) != (2 * opt->digest_len))
++			digest_len = hash_digest_size[opt->hash];
++			if (!tpm2 || strlen(args[0].from) != (2 * digest_len))
+ 				return -EINVAL;
+ 			res = hex2bin(opt->policydigest, args[0].from,
+-				      opt->digest_len);
++				      digest_len);
+ 			if (res < 0)
+ 				return -EINVAL;
++			opt->policydigest_len = digest_len;
+ 			break;
+ 		case Opt_policyhandle:
+ 			if (!tpm2)
+diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
+index 6b5a811e01a5..3a9b66c6e09c 100644
+--- a/sound/core/pcm_lib.c
++++ b/sound/core/pcm_lib.c
+@@ -322,7 +322,7 @@ static int snd_pcm_update_hw_ptr0(struct snd_pcm_substream *substream,
+ 			char name[16];
+ 			snd_pcm_debug_name(substream, name, sizeof(name));
+ 			pcm_err(substream->pcm,
+-				"BUG: %s, pos = %ld, buffer size = %ld, period size = %ld\n",
++				"invalid position: %s, pos = %ld, buffer size = %ld, period size = %ld\n",
+ 				name, pos, runtime->buffer_size,
+ 				runtime->period_size);
+ 		}
+diff --git a/sound/core/timer.c b/sound/core/timer.c
+index dca817fc7894..e5e7e4368996 100644
+--- a/sound/core/timer.c
++++ b/sound/core/timer.c
+@@ -1041,8 +1041,8 @@ static int snd_timer_s_start(struct snd_timer * timer)
+ 		njiff += timer->sticks - priv->correction;
+ 		priv->correction = 0;
+ 	}
+-	priv->last_expires = priv->tlist.expires = njiff;
+-	add_timer(&priv->tlist);
++	priv->last_expires = njiff;
++	mod_timer(&priv->tlist, njiff);
+ 	return 0;
+ }
+ 
+diff --git a/sound/hda/hdac_device.c b/sound/hda/hdac_device.c
+index e361024eabb6..d1a4d6973330 100644
+--- a/sound/hda/hdac_device.c
++++ b/sound/hda/hdac_device.c
+@@ -611,6 +611,22 @@ int snd_hdac_power_up_pm(struct hdac_device *codec)
+ }
+ EXPORT_SYMBOL_GPL(snd_hdac_power_up_pm);
+ 
++/* like snd_hdac_power_up_pm(), but only increment the pm count when
++ * already powered up.  Returns -1 if not powered up, 1 if incremented
++ * or 0 if unchanged.  Only used in hdac_regmap.c
++ */
++int snd_hdac_keep_power_up(struct hdac_device *codec)
++{
++	if (!atomic_inc_not_zero(&codec->in_pm)) {
++		int ret = pm_runtime_get_if_in_use(&codec->dev);
++		if (!ret)
++			return -1;
++		if (ret < 0)
++			return 0;
++	}
++	return 1;
++}
++
+ /**
+  * snd_hdac_power_down_pm - power down the codec
+  * @codec: the codec object
+diff --git a/sound/hda/hdac_i915.c b/sound/hda/hdac_i915.c
+index f6854dbd7d8d..69ead7150a5c 100644
+--- a/sound/hda/hdac_i915.c
++++ b/sound/hda/hdac_i915.c
+@@ -20,6 +20,7 @@
+ #include <sound/core.h>
+ #include <sound/hdaudio.h>
+ #include <sound/hda_i915.h>
++#include <sound/hda_register.h>
+ 
+ static struct i915_audio_component *hdac_acomp;
+ 
+@@ -97,26 +98,65 @@ int snd_hdac_display_power(struct hdac_bus *bus, bool enable)
+ }
+ EXPORT_SYMBOL_GPL(snd_hdac_display_power);
+ 
++#define CONTROLLER_IN_GPU(pci) (((pci)->device == 0x0a0c) || \
++				((pci)->device == 0x0c0c) || \
++				((pci)->device == 0x0d0c) || \
++				((pci)->device == 0x160c))
++
+ /**
+- * snd_hdac_get_display_clk - Get CDCLK in kHz
++ * snd_hdac_i915_set_bclk - Reprogram BCLK for HSW/BDW
+  * @bus: HDA core bus
+  *
+- * This function is supposed to be used only by a HD-audio controller
+- * driver that needs the interaction with i915 graphics.
++ * Intel HSW/BDW display HDA controller is in GPU. Both its power and link BCLK
++ * depends on GPU. Two Extended Mode registers EM4 (M value) and EM5 (N Value)
++ * are used to convert CDClk (Core Display Clock) to 24MHz BCLK:
++ * BCLK = CDCLK * M / N
++ * The values will be lost when the display power well is disabled and need to
++ * be restored to avoid abnormal playback speed.
+  *
+- * This function queries CDCLK value in kHz from the graphics driver and
+- * returns the value.  A negative code is returned in error.
++ * Call this function at initializing and changing power well, as well as
++ * at ELD notifier for the hotplug.
+  */
+-int snd_hdac_get_display_clk(struct hdac_bus *bus)
++void snd_hdac_i915_set_bclk(struct hdac_bus *bus)
+ {
+ 	struct i915_audio_component *acomp = bus->audio_component;
++	struct pci_dev *pci = to_pci_dev(bus->dev);
++	int cdclk_freq;
++	unsigned int bclk_m, bclk_n;
++
++	if (!acomp || !acomp->ops || !acomp->ops->get_cdclk_freq)
++		return; /* only for i915 binding */
++	if (!CONTROLLER_IN_GPU(pci))
++		return; /* only HSW/BDW */
++
++	cdclk_freq = acomp->ops->get_cdclk_freq(acomp->dev);
++	switch (cdclk_freq) {
++	case 337500:
++		bclk_m = 16;
++		bclk_n = 225;
++		break;
++
++	case 450000:
++	default: /* default CDCLK 450MHz */
++		bclk_m = 4;
++		bclk_n = 75;
++		break;
++
++	case 540000:
++		bclk_m = 4;
++		bclk_n = 90;
++		break;
++
++	case 675000:
++		bclk_m = 8;
++		bclk_n = 225;
++		break;
++	}
+ 
+-	if (!acomp || !acomp->ops)
+-		return -ENODEV;
+-
+-	return acomp->ops->get_cdclk_freq(acomp->dev);
++	snd_hdac_chip_writew(bus, HSW_EM4, bclk_m);
++	snd_hdac_chip_writew(bus, HSW_EM5, bclk_n);
+ }
+-EXPORT_SYMBOL_GPL(snd_hdac_get_display_clk);
++EXPORT_SYMBOL_GPL(snd_hdac_i915_set_bclk);
+ 
+ /* There is a fixed mapping between audio pin node and display port
+  * on current Intel platforms:
+diff --git a/sound/hda/hdac_regmap.c b/sound/hda/hdac_regmap.c
+index eb8f7c30cb09..bdbcd6b75ff6 100644
+--- a/sound/hda/hdac_regmap.c
++++ b/sound/hda/hdac_regmap.c
+@@ -21,13 +21,16 @@
+ #include <sound/hdaudio.h>
+ #include <sound/hda_regmap.h>
+ 
+-#ifdef CONFIG_PM
+-#define codec_is_running(codec)				\
+-	(atomic_read(&(codec)->in_pm) ||		\
+-	 !pm_runtime_suspended(&(codec)->dev))
+-#else
+-#define codec_is_running(codec)		true
+-#endif
++static int codec_pm_lock(struct hdac_device *codec)
++{
++	return snd_hdac_keep_power_up(codec);
++}
++
++static void codec_pm_unlock(struct hdac_device *codec, int lock)
++{
++	if (lock == 1)
++		snd_hdac_power_down_pm(codec);
++}
+ 
+ #define get_verb(reg)	(((reg) >> 8) & 0xfff)
+ 
+@@ -238,20 +241,28 @@ static int hda_reg_read(void *context, unsigned int reg, unsigned int *val)
+ 	struct hdac_device *codec = context;
+ 	int verb = get_verb(reg);
+ 	int err;
++	int pm_lock = 0;
+ 
+-	if (!codec_is_running(codec) && verb != AC_VERB_GET_POWER_STATE)
+-		return -EAGAIN;
++	if (verb != AC_VERB_GET_POWER_STATE) {
++		pm_lock = codec_pm_lock(codec);
++		if (pm_lock < 0)
++			return -EAGAIN;
++	}
+ 	reg |= (codec->addr << 28);
+-	if (is_stereo_amp_verb(reg))
+-		return hda_reg_read_stereo_amp(codec, reg, val);
+-	if (verb == AC_VERB_GET_PROC_COEF)
+-		return hda_reg_read_coef(codec, reg, val);
++	if (is_stereo_amp_verb(reg)) {
++		err = hda_reg_read_stereo_amp(codec, reg, val);
++		goto out;
++	}
++	if (verb == AC_VERB_GET_PROC_COEF) {
++		err = hda_reg_read_coef(codec, reg, val);
++		goto out;
++	}
+ 	if ((verb & 0x700) == AC_VERB_SET_AMP_GAIN_MUTE)
+ 		reg &= ~AC_AMP_FAKE_MUTE;
+ 
+ 	err = snd_hdac_exec_verb(codec, reg, 0, val);
+ 	if (err < 0)
+-		return err;
++		goto out;
+ 	/* special handling for asymmetric reads */
+ 	if (verb == AC_VERB_GET_POWER_STATE) {
+ 		if (*val & AC_PWRST_ERROR)
+@@ -259,7 +270,9 @@ static int hda_reg_read(void *context, unsigned int reg, unsigned int *val)
+ 		else /* take only the actual state */
+ 			*val = (*val >> 4) & 0x0f;
+ 	}
+-	return 0;
++ out:
++	codec_pm_unlock(codec, pm_lock);
++	return err;
+ }
+ 
+ static int hda_reg_write(void *context, unsigned int reg, unsigned int val)
+@@ -267,6 +280,7 @@ static int hda_reg_write(void *context, unsigned int reg, unsigned int val)
+ 	struct hdac_device *codec = context;
+ 	unsigned int verb;
+ 	int i, bytes, err;
++	int pm_lock = 0;
+ 
+ 	if (codec->caps_overwriting)
+ 		return 0;
+@@ -275,14 +289,21 @@ static int hda_reg_write(void *context, unsigned int reg, unsigned int val)
+ 	reg |= (codec->addr << 28);
+ 	verb = get_verb(reg);
+ 
+-	if (!codec_is_running(codec) && verb != AC_VERB_SET_POWER_STATE)
+-		return codec->lazy_cache ? 0 : -EAGAIN;
++	if (verb != AC_VERB_SET_POWER_STATE) {
++		pm_lock = codec_pm_lock(codec);
++		if (pm_lock < 0)
++			return codec->lazy_cache ? 0 : -EAGAIN;
++	}
+ 
+-	if (is_stereo_amp_verb(reg))
+-		return hda_reg_write_stereo_amp(codec, reg, val);
++	if (is_stereo_amp_verb(reg)) {
++		err = hda_reg_write_stereo_amp(codec, reg, val);
++		goto out;
++	}
+ 
+-	if (verb == AC_VERB_SET_PROC_COEF)
+-		return hda_reg_write_coef(codec, reg, val);
++	if (verb == AC_VERB_SET_PROC_COEF) {
++		err = hda_reg_write_coef(codec, reg, val);
++		goto out;
++	}
+ 
+ 	switch (verb & 0xf00) {
+ 	case AC_VERB_SET_AMP_GAIN_MUTE:
+@@ -319,10 +340,12 @@ static int hda_reg_write(void *context, unsigned int reg, unsigned int val)
+ 		reg |= (verb + i) << 8 | ((val >> (8 * i)) & 0xff);
+ 		err = snd_hdac_exec_verb(codec, reg, 0, NULL);
+ 		if (err < 0)
+-			return err;
++			goto out;
+ 	}
+ 
+-	return 0;
++ out:
++	codec_pm_unlock(codec, pm_lock);
++	return err;
+ }
+ 
+ static const struct regmap_config hda_regmap_cfg = {
+diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
+index 7ca5b89f088a..dfaf1a93fb8a 100644
+--- a/sound/pci/hda/hda_generic.c
++++ b/sound/pci/hda/hda_generic.c
+@@ -826,7 +826,7 @@ static hda_nid_t path_power_update(struct hda_codec *codec,
+ 				   bool allow_powerdown)
+ {
+ 	hda_nid_t nid, changed = 0;
+-	int i, state;
++	int i, state, power;
+ 
+ 	for (i = 0; i < path->depth; i++) {
+ 		nid = path->path[i];
+@@ -838,7 +838,9 @@ static hda_nid_t path_power_update(struct hda_codec *codec,
+ 			state = AC_PWRST_D0;
+ 		else
+ 			state = AC_PWRST_D3;
+-		if (!snd_hda_check_power_state(codec, nid, state)) {
++		power = snd_hda_codec_read(codec, nid, 0,
++					   AC_VERB_GET_POWER_STATE, 0);
++		if (power != (state | (state << 4))) {
+ 			snd_hda_codec_write(codec, nid, 0,
+ 					    AC_VERB_SET_POWER_STATE, state);
+ 			changed = nid;
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
+index e5240cb3749f..c0b772bb49af 100644
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -857,50 +857,6 @@ static int param_set_xint(const char *val, const struct kernel_param *kp)
+ #define azx_del_card_list(chip) /* NOP */
+ #endif /* CONFIG_PM */
+ 
+-/* Intel HSW/BDW display HDA controller is in GPU. Both its power and link BCLK
+- * depends on GPU. Two Extended Mode registers EM4 (M value) and EM5 (N Value)
+- * are used to convert CDClk (Core Display Clock) to 24MHz BCLK:
+- * BCLK = CDCLK * M / N
+- * The values will be lost when the display power well is disabled and need to
+- * be restored to avoid abnormal playback speed.
+- */
+-static void haswell_set_bclk(struct hda_intel *hda)
+-{
+-	struct azx *chip = &hda->chip;
+-	int cdclk_freq;
+-	unsigned int bclk_m, bclk_n;
+-
+-	if (!hda->need_i915_power)
+-		return;
+-
+-	cdclk_freq = snd_hdac_get_display_clk(azx_bus(chip));
+-	switch (cdclk_freq) {
+-	case 337500:
+-		bclk_m = 16;
+-		bclk_n = 225;
+-		break;
+-
+-	case 450000:
+-	default: /* default CDCLK 450MHz */
+-		bclk_m = 4;
+-		bclk_n = 75;
+-		break;
+-
+-	case 540000:
+-		bclk_m = 4;
+-		bclk_n = 90;
+-		break;
+-
+-	case 675000:
+-		bclk_m = 8;
+-		bclk_n = 225;
+-		break;
+-	}
+-
+-	azx_writew(chip, HSW_EM4, bclk_m);
+-	azx_writew(chip, HSW_EM5, bclk_n);
+-}
+-
+ #if defined(CONFIG_PM_SLEEP) || defined(SUPPORT_VGA_SWITCHEROO)
+ /*
+  * power management
+@@ -958,7 +914,7 @@ static int azx_resume(struct device *dev)
+ 	if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL
+ 		&& hda->need_i915_power) {
+ 		snd_hdac_display_power(azx_bus(chip), true);
+-		haswell_set_bclk(hda);
++		snd_hdac_i915_set_bclk(azx_bus(chip));
+ 	}
+ 	if (chip->msi)
+ 		if (pci_enable_msi(pci) < 0)
+@@ -1058,7 +1014,7 @@ static int azx_runtime_resume(struct device *dev)
+ 		bus = azx_bus(chip);
+ 		if (hda->need_i915_power) {
+ 			snd_hdac_display_power(bus, true);
+-			haswell_set_bclk(hda);
++			snd_hdac_i915_set_bclk(bus);
+ 		} else {
+ 			/* toggle codec wakeup bit for STATESTS read */
+ 			snd_hdac_set_codec_wakeup(bus, true);
+@@ -1796,12 +1752,8 @@ static int azx_first_init(struct azx *chip)
+ 	/* initialize chip */
+ 	azx_init_pci(chip);
+ 
+-	if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) {
+-		struct hda_intel *hda;
+-
+-		hda = container_of(chip, struct hda_intel, chip);
+-		haswell_set_bclk(hda);
+-	}
++	if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL)
++		snd_hdac_i915_set_bclk(bus);
+ 
+ 	hda_intel_init_chip(chip, (probe_only[dev] & 2) == 0);
+ 
+@@ -2232,6 +2184,9 @@ static const struct pci_device_id azx_ids[] = {
+ 	/* Broxton-P(Apollolake) */
+ 	{ PCI_DEVICE(0x8086, 0x5a98),
+ 	  .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_BROXTON },
++	/* Broxton-T */
++	{ PCI_DEVICE(0x8086, 0x1a98),
++	  .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_BROXTON },
+ 	/* Haswell */
+ 	{ PCI_DEVICE(0x8086, 0x0a0c),
+ 	  .driver_data = AZX_DRIVER_HDMI | AZX_DCAPS_INTEL_HASWELL },
+diff --git a/sound/pci/hda/hda_sysfs.c b/sound/pci/hda/hda_sysfs.c
+index 64e0d1d81ca5..9739fce9e032 100644
+--- a/sound/pci/hda/hda_sysfs.c
++++ b/sound/pci/hda/hda_sysfs.c
+@@ -141,14 +141,6 @@ static int reconfig_codec(struct hda_codec *codec)
+ 	err = snd_hda_codec_configure(codec);
+ 	if (err < 0)
+ 		goto error;
+-	/* rebuild PCMs */
+-	err = snd_hda_codec_build_pcms(codec);
+-	if (err < 0)
+-		goto error;
+-	/* rebuild mixers */
+-	err = snd_hda_codec_build_controls(codec);
+-	if (err < 0)
+-		goto error;
+ 	err = snd_card_register(codec->card);
+  error:
+ 	snd_hda_power_down(codec);
+diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
+index c1c855a6c0af..80bbadc83721 100644
+--- a/sound/pci/hda/patch_cirrus.c
++++ b/sound/pci/hda/patch_cirrus.c
+@@ -174,8 +174,12 @@ static void cs_automute(struct hda_codec *codec)
+ 	snd_hda_gen_update_outputs(codec);
+ 
+ 	if (spec->gpio_eapd_hp || spec->gpio_eapd_speaker) {
+-		spec->gpio_data = spec->gen.hp_jack_present ?
+-			spec->gpio_eapd_hp : spec->gpio_eapd_speaker;
++		if (spec->gen.automute_speaker)
++			spec->gpio_data = spec->gen.hp_jack_present ?
++				spec->gpio_eapd_hp : spec->gpio_eapd_speaker;
++		else
++			spec->gpio_data =
++				spec->gpio_eapd_hp | spec->gpio_eapd_speaker;
+ 		snd_hda_codec_write(codec, 0x01, 0,
+ 				    AC_VERB_SET_GPIO_DATA, spec->gpio_data);
+ 	}
+@@ -357,6 +361,7 @@ static int cs_parse_auto_config(struct hda_codec *codec)
+ {
+ 	struct cs_spec *spec = codec->spec;
+ 	int err;
++	int i;
+ 
+ 	err = snd_hda_parse_pin_defcfg(codec, &spec->gen.autocfg, NULL, 0);
+ 	if (err < 0)
+@@ -366,6 +371,19 @@ static int cs_parse_auto_config(struct hda_codec *codec)
+ 	if (err < 0)
+ 		return err;
+ 
++	/* keep the ADCs powered up when it's dynamically switchable */
++	if (spec->gen.dyn_adc_switch) {
++		unsigned int done = 0;
++		for (i = 0; i < spec->gen.input_mux.num_items; i++) {
++			int idx = spec->gen.dyn_adc_idx[i];
++			if (done & (1 << idx))
++				continue;
++			snd_hda_gen_fix_pin_power(codec,
++						  spec->gen.adc_nids[idx]);
++			done |= 1 << idx;
++		}
++	}
++
+ 	return 0;
+ }
+ 
+diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
+index 6122b8ca872f..56fefbd85782 100644
+--- a/sound/pci/hda/patch_conexant.c
++++ b/sound/pci/hda/patch_conexant.c
+@@ -204,8 +204,13 @@ static void cx_auto_reboot_notify(struct hda_codec *codec)
+ {
+ 	struct conexant_spec *spec = codec->spec;
+ 
+-	if (codec->core.vendor_id != 0x14f150f2)
++	switch (codec->core.vendor_id) {
++	case 0x14f150f2: /* CX20722 */
++	case 0x14f150f4: /* CX20724 */
++		break;
++	default:
+ 		return;
++	}
+ 
+ 	/* Turn the CX20722 codec into D3 to avoid spurious noises
+ 	   from the internal speaker during (and after) reboot */
+diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
+index bcbc4ee10130..c98e404afbe0 100644
+--- a/sound/pci/hda/patch_hdmi.c
++++ b/sound/pci/hda/patch_hdmi.c
+@@ -152,13 +152,17 @@ struct hdmi_spec {
+ 	struct hda_pcm_stream pcm_playback;
+ 
+ 	/* i915/powerwell (Haswell+/Valleyview+) specific */
++	bool use_acomp_notifier; /* use i915 eld_notify callback for hotplug */
+ 	struct i915_audio_component_audio_ops i915_audio_ops;
+ 	bool i915_bound; /* was i915 bound in this driver? */
+ };
+ 
+ #ifdef CONFIG_SND_HDA_I915
+-#define codec_has_acomp(codec) \
+-	((codec)->bus->core.audio_component != NULL)
++static inline bool codec_has_acomp(struct hda_codec *codec)
++{
++	struct hdmi_spec *spec = codec->spec;
++	return spec->use_acomp_notifier;
++}
+ #else
+ #define codec_has_acomp(codec)	false
+ #endif
+@@ -1562,6 +1566,7 @@ static void update_eld(struct hda_codec *codec,
+ 			   eld->eld_size) != 0)
+ 			eld_changed = true;
+ 
++	pin_eld->monitor_present = eld->monitor_present;
+ 	pin_eld->eld_valid = eld->eld_valid;
+ 	pin_eld->eld_size = eld->eld_size;
+ 	if (eld->eld_valid)
+@@ -1618,6 +1623,8 @@ static bool hdmi_present_sense_via_verbs(struct hdmi_spec_per_pin *per_pin,
+ 
+ 	mutex_lock(&per_pin->lock);
+ 	pin_eld->monitor_present = !!(present & AC_PINSENSE_PRESENCE);
++	eld->monitor_present = pin_eld->monitor_present;
++
+ 	if (pin_eld->monitor_present)
+ 		eld->eld_valid  = !!(present & AC_PINSENSE_ELDV);
+ 	else
+@@ -1665,11 +1672,10 @@ static void sync_eld_via_acomp(struct hda_codec *codec,
+ 	int size;
+ 
+ 	mutex_lock(&per_pin->lock);
++	eld->monitor_present = false;
+ 	size = snd_hdac_acomp_get_eld(&codec->bus->core, per_pin->pin_nid,
+ 				      &eld->monitor_present, eld->eld_buffer,
+ 				      ELD_MAX_SIZE);
+-	if (size < 0)
+-		goto unlock;
+ 	if (size > 0) {
+ 		size = min(size, ELD_MAX_SIZE);
+ 		if (snd_hdmi_parse_eld(codec, &eld->info,
+@@ -1873,7 +1879,8 @@ static int generic_hdmi_playback_pcm_prepare(struct hda_pcm_stream *hinfo,
+ 
+ 	/* Call sync_audio_rate to set the N/CTS/M manually if necessary */
+ 	/* Todo: add DP1.2 MST audio support later */
+-	snd_hdac_sync_audio_rate(&codec->bus->core, pin_nid, runtime->rate);
++	if (codec_has_acomp(codec))
++		snd_hdac_sync_audio_rate(&codec->bus->core, pin_nid, runtime->rate);
+ 
+ 	non_pcm = check_non_pcm_per_cvt(codec, cvt_nid);
+ 	mutex_lock(&per_pin->lock);
+@@ -2432,6 +2439,10 @@ static void intel_pin_eld_notify(void *audio_ptr, int port)
+ 	struct hda_codec *codec = audio_ptr;
+ 	int pin_nid = port + 0x04;
+ 
++	/* we assume only from port-B to port-D */
++	if (port < 1 || port > 3)
++		return;
++
+ 	/* skip notification during system suspend (but not in runtime PM);
+ 	 * the state will be updated at resume
+ 	 */
+@@ -2441,6 +2452,7 @@ static void intel_pin_eld_notify(void *audio_ptr, int port)
+ 	if (atomic_read(&(codec)->core.in_pm))
+ 		return;
+ 
++	snd_hdac_i915_set_bclk(&codec->bus->core);
+ 	check_presence_and_report(codec, pin_nid);
+ }
+ 
+@@ -2456,11 +2468,24 @@ static int patch_generic_hdmi(struct hda_codec *codec)
+ 	codec->spec = spec;
+ 	hdmi_array_init(spec, 4);
+ 
+-	/* Try to bind with i915 for any Intel codecs (if not done yet) */
+-	if (!codec_has_acomp(codec) &&
+-	    (codec->core.vendor_id >> 16) == 0x8086)
+-		if (!snd_hdac_i915_init(&codec->bus->core))
+-			spec->i915_bound = true;
++#ifdef CONFIG_SND_HDA_I915
++	/* Try to bind with i915 for Intel HSW+ codecs (if not done yet) */
++	if ((codec->core.vendor_id >> 16) == 0x8086 &&
++	    is_haswell_plus(codec)) {
++#if 0
++		/* on-demand binding leads to an unbalanced refcount when
++		 * both i915 and hda drivers are probed concurrently;
++		 * disabled temporarily for now
++		 */
++		if (!codec->bus->core.audio_component)
++			if (!snd_hdac_i915_init(&codec->bus->core))
++				spec->i915_bound = true;
++#endif
++		/* use i915 audio component notifier for hotplug */
++		if (codec->bus->core.audio_component)
++			spec->use_acomp_notifier = true;
++	}
++#endif
+ 
+ 	if (is_haswell_plus(codec)) {
+ 		intel_haswell_enable_all_pins(codec, true);
+@@ -3659,6 +3684,7 @@ HDA_CODEC_ENTRY(0x10de0070, "GPU 70 HDMI/DP",	patch_nvhdmi),
+ HDA_CODEC_ENTRY(0x10de0071, "GPU 71 HDMI/DP",	patch_nvhdmi),
+ HDA_CODEC_ENTRY(0x10de0072, "GPU 72 HDMI/DP",	patch_nvhdmi),
+ HDA_CODEC_ENTRY(0x10de007d, "GPU 7d HDMI/DP",	patch_nvhdmi),
++HDA_CODEC_ENTRY(0x10de0082, "GPU 82 HDMI/DP",	patch_nvhdmi),
+ HDA_CODEC_ENTRY(0x10de0083, "GPU 83 HDMI/DP",	patch_nvhdmi),
+ HDA_CODEC_ENTRY(0x10de8001, "MCP73 HDMI",	patch_nvhdmi_2ch),
+ HDA_CODEC_ENTRY(0x11069f80, "VX900 HDMI/DP",	patch_via_hdmi),
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 93d2156b6241..d53c25e7a1c1 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -335,6 +335,7 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
+ 	case 0x10ec0283:
+ 	case 0x10ec0286:
+ 	case 0x10ec0288:
++	case 0x10ec0295:
+ 	case 0x10ec0298:
+ 		alc_update_coef_idx(codec, 0x10, 1<<9, 0);
+ 		break;
+@@ -342,6 +343,11 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
+ 	case 0x10ec0293:
+ 		alc_update_coef_idx(codec, 0xa, 1<<13, 0);
+ 		break;
++	case 0x10ec0234:
++	case 0x10ec0274:
++	case 0x10ec0294:
++		alc_update_coef_idx(codec, 0x10, 1<<15, 0);
++		break;
+ 	case 0x10ec0662:
+ 		if ((coef & 0x00f0) == 0x0030)
+ 			alc_update_coef_idx(codec, 0x4, 1<<10, 0); /* EAPD Ctrl */
+@@ -902,6 +908,7 @@ static struct alc_codec_rename_pci_table rename_pci_tbl[] = {
+ 	{ 0x10ec0298, 0x1028, 0, "ALC3266" },
+ 	{ 0x10ec0256, 0x1028, 0, "ALC3246" },
+ 	{ 0x10ec0225, 0x1028, 0, "ALC3253" },
++	{ 0x10ec0295, 0x1028, 0, "ALC3254" },
+ 	{ 0x10ec0670, 0x1025, 0, "ALC669X" },
+ 	{ 0x10ec0676, 0x1025, 0, "ALC679X" },
+ 	{ 0x10ec0282, 0x1043, 0, "ALC3229" },
+@@ -2647,6 +2654,7 @@ enum {
+ 	ALC269_TYPE_ALC255,
+ 	ALC269_TYPE_ALC256,
+ 	ALC269_TYPE_ALC225,
++	ALC269_TYPE_ALC294,
+ };
+ 
+ /*
+@@ -2677,6 +2685,7 @@ static int alc269_parse_auto_config(struct hda_codec *codec)
+ 	case ALC269_TYPE_ALC255:
+ 	case ALC269_TYPE_ALC256:
+ 	case ALC269_TYPE_ALC225:
++	case ALC269_TYPE_ALC294:
+ 		ssids = alc269_ssids;
+ 		break;
+ 	default:
+@@ -3690,6 +3699,7 @@ static void alc_headset_mode_unplugged(struct hda_codec *codec)
+ 		alc_process_coef_fw(codec, coef0668);
+ 		break;
+ 	case 0x10ec0225:
++	case 0x10ec0295:
+ 		alc_process_coef_fw(codec, coef0225);
+ 		break;
+ 	}
+@@ -3790,6 +3800,7 @@ static void alc_headset_mode_mic_in(struct hda_codec *codec, hda_nid_t hp_pin,
+ 		snd_hda_set_pin_ctl_cache(codec, mic_pin, PIN_VREF50);
+ 		break;
+ 	case 0x10ec0225:
++	case 0x10ec0295:
+ 		alc_update_coef_idx(codec, 0x45, 0x3f<<10, 0x31<<10);
+ 		snd_hda_set_pin_ctl_cache(codec, hp_pin, 0);
+ 		alc_process_coef_fw(codec, coef0225);
+@@ -3847,6 +3858,7 @@ static void alc_headset_mode_default(struct hda_codec *codec)
+ 
+ 	switch (codec->core.vendor_id) {
+ 	case 0x10ec0225:
++	case 0x10ec0295:
+ 		alc_process_coef_fw(codec, coef0225);
+ 		break;
+ 	case 0x10ec0255:
+@@ -3950,6 +3962,7 @@ static void alc_headset_mode_ctia(struct hda_codec *codec)
+ 		alc_process_coef_fw(codec, coef0688);
+ 		break;
+ 	case 0x10ec0225:
++	case 0x10ec0295:
+ 		alc_process_coef_fw(codec, coef0225);
+ 		break;
+ 	}
+@@ -4031,6 +4044,7 @@ static void alc_headset_mode_omtp(struct hda_codec *codec)
+ 		alc_process_coef_fw(codec, coef0688);
+ 		break;
+ 	case 0x10ec0225:
++	case 0x10ec0295:
+ 		alc_process_coef_fw(codec, coef0225);
+ 		break;
+ 	}
+@@ -4114,6 +4128,7 @@ static void alc_determine_headset_type(struct hda_codec *codec)
+ 		is_ctia = (val & 0x1c02) == 0x1c02;
+ 		break;
+ 	case 0x10ec0225:
++	case 0x10ec0295:
+ 		alc_process_coef_fw(codec, coef0225);
+ 		msleep(800);
+ 		val = alc_read_coef_idx(codec, 0x46);
+@@ -4759,6 +4774,8 @@ enum {
+ 	ALC255_FIXUP_DELL_SPK_NOISE,
+ 	ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
+ 	ALC280_FIXUP_HP_HEADSET_MIC,
++	ALC221_FIXUP_HP_FRONT_MIC,
++	ALC292_FIXUP_TPT460,
+ };
+ 
+ static const struct hda_fixup alc269_fixups[] = {
+@@ -5401,6 +5418,19 @@ static const struct hda_fixup alc269_fixups[] = {
+ 		.chained = true,
+ 		.chain_id = ALC269_FIXUP_HEADSET_MIC,
+ 	},
++	[ALC221_FIXUP_HP_FRONT_MIC] = {
++		.type = HDA_FIXUP_PINS,
++		.v.pins = (const struct hda_pintbl[]) {
++			{ 0x19, 0x02a19020 }, /* Front Mic */
++			{ }
++		},
++	},
++	[ALC292_FIXUP_TPT460] = {
++		.type = HDA_FIXUP_FUNC,
++		.v.func = alc_fixup_tpt440_dock,
++		.chained = true,
++		.chain_id = ALC293_FIXUP_LENOVO_SPK_NOISE,
++	},
+ };
+ 
+ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+@@ -5434,6 +5464,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1028, 0x064a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1028, 0x064b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1028, 0x0665, "Dell XPS 13", ALC288_FIXUP_DELL_XPS_13),
++	SND_PCI_QUIRK(0x1028, 0x0669, "Dell Optiplex 9020m", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1028, 0x069a, "Dell Vostro 5480", ALC290_FIXUP_SUBWOOFER_HSJACK),
+ 	SND_PCI_QUIRK(0x1028, 0x06c7, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1028, 0x06d9, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
+@@ -5443,8 +5474,9 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1028, 0x06de, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK),
+ 	SND_PCI_QUIRK(0x1028, 0x06df, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK),
+ 	SND_PCI_QUIRK(0x1028, 0x06e0, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK),
+-	SND_PCI_QUIRK(0x1028, 0x0704, "Dell XPS 13", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
++	SND_PCI_QUIRK(0x1028, 0x0704, "Dell XPS 13 9350", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
+ 	SND_PCI_QUIRK(0x1028, 0x0725, "Dell Inspiron 3162", ALC255_FIXUP_DELL_SPK_NOISE),
++	SND_PCI_QUIRK(0x1028, 0x075b, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
+ 	SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
+@@ -5506,6 +5538,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x2336, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ 	SND_PCI_QUIRK(0x103c, 0x2337, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ 	SND_PCI_QUIRK(0x103c, 0x221c, "HP EliteBook 755 G2", ALC280_FIXUP_HP_HEADSET_MIC),
++	SND_PCI_QUIRK(0x103c, 0x8256, "HP", ALC221_FIXUP_HP_FRONT_MIC),
+ 	SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
+ 	SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+ 	SND_PCI_QUIRK(0x1043, 0x115d, "Asus 1015E", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+@@ -5554,8 +5587,9 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x17aa, 0x2218, "Thinkpad X1 Carbon 2nd", ALC292_FIXUP_TPT440_DOCK),
+ 	SND_PCI_QUIRK(0x17aa, 0x2223, "ThinkPad T550", ALC292_FIXUP_TPT440_DOCK),
+ 	SND_PCI_QUIRK(0x17aa, 0x2226, "ThinkPad X250", ALC292_FIXUP_TPT440_DOCK),
+-	SND_PCI_QUIRK(0x17aa, 0x2233, "Thinkpad", ALC293_FIXUP_LENOVO_SPK_NOISE),
++	SND_PCI_QUIRK(0x17aa, 0x2233, "Thinkpad", ALC292_FIXUP_TPT460),
+ 	SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
++	SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
+ 	SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
+ 	SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
+ 	SND_PCI_QUIRK(0x17aa, 0x3978, "IdeaPad Y410P", ALC269_FIXUP_NO_SHUTUP),
+@@ -5566,6 +5600,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x17aa, 0x5034, "Thinkpad T450", ALC292_FIXUP_TPT440_DOCK),
+ 	SND_PCI_QUIRK(0x17aa, 0x5036, "Thinkpad T450s", ALC292_FIXUP_TPT440_DOCK),
+ 	SND_PCI_QUIRK(0x17aa, 0x503c, "Thinkpad L450", ALC292_FIXUP_TPT440_DOCK),
++	SND_PCI_QUIRK(0x17aa, 0x504a, "ThinkPad X260", ALC292_FIXUP_TPT440_DOCK),
+ 	SND_PCI_QUIRK(0x17aa, 0x504b, "Thinkpad", ALC293_FIXUP_LENOVO_SPK_NOISE),
+ 	SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+ 	SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
+@@ -5648,6 +5683,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
+ 	{.id = ALC283_FIXUP_SENSE_COMBO_JACK, .name = "alc283-sense-combo"},
+ 	{.id = ALC292_FIXUP_TPT440_DOCK, .name = "tpt440-dock"},
+ 	{.id = ALC292_FIXUP_TPT440, .name = "tpt440"},
++	{.id = ALC292_FIXUP_TPT460, .name = "tpt460"},
+ 	{}
+ };
+ #define ALC225_STANDARD_PINS \
+@@ -5684,6 +5720,9 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
+ 		{0x14, 0x90170110},
+ 		{0x21, 0x02211020}),
+ 	SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
++		{0x14, 0x90170130},
++		{0x21, 0x02211040}),
++	SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+ 		{0x12, 0x90a60140},
+ 		{0x14, 0x90170110},
+ 		{0x21, 0x02211020}),
+@@ -6006,8 +6045,14 @@ static int patch_alc269(struct hda_codec *codec)
+ 		alc_update_coef_idx(codec, 0x36, 1 << 13, 1 << 5); /* Switch pcbeep path to Line in path*/
+ 		break;
+ 	case 0x10ec0225:
++	case 0x10ec0295:
+ 		spec->codec_variant = ALC269_TYPE_ALC225;
+ 		break;
++	case 0x10ec0234:
++	case 0x10ec0274:
++	case 0x10ec0294:
++		spec->codec_variant = ALC269_TYPE_ALC294;
++		break;
+ 	}
+ 
+ 	if (snd_hda_codec_read(codec, 0x51, 0, AC_VERB_PARAMETERS, 0) == 0x10ec5505) {
+@@ -6405,6 +6450,8 @@ enum {
+ 	ALC668_FIXUP_AUTO_MUTE,
+ 	ALC668_FIXUP_DELL_DISABLE_AAMIX,
+ 	ALC668_FIXUP_DELL_XPS13,
++	ALC662_FIXUP_ASUS_Nx50,
++	ALC668_FIXUP_ASUS_Nx51,
+ };
+ 
+ static const struct hda_fixup alc662_fixups[] = {
+@@ -6645,6 +6692,21 @@ static const struct hda_fixup alc662_fixups[] = {
+ 		.type = HDA_FIXUP_FUNC,
+ 		.v.func = alc_fixup_bass_chmap,
+ 	},
++	[ALC662_FIXUP_ASUS_Nx50] = {
++		.type = HDA_FIXUP_FUNC,
++		.v.func = alc_fixup_auto_mute_via_amp,
++		.chained = true,
++		.chain_id = ALC662_FIXUP_BASS_1A
++	},
++	[ALC668_FIXUP_ASUS_Nx51] = {
++		.type = HDA_FIXUP_PINS,
++		.v.pins = (const struct hda_pintbl[]) {
++			{0x1a, 0x90170151}, /* bass speaker */
++			{}
++		},
++		.chained = true,
++		.chain_id = ALC662_FIXUP_BASS_CHMAP,
++	},
+ };
+ 
+ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
+@@ -6667,10 +6729,14 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1028, 0x0698, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1028, 0x069f, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x103c, 0x1632, "HP RP5800", ALC662_FIXUP_HP_RP5800),
+-	SND_PCI_QUIRK(0x1043, 0x11cd, "Asus N550", ALC662_FIXUP_BASS_1A),
++	SND_PCI_QUIRK(0x1043, 0x1080, "Asus UX501VW", ALC668_FIXUP_HEADSET_MODE),
++	SND_PCI_QUIRK(0x1043, 0x11cd, "Asus N550", ALC662_FIXUP_ASUS_Nx50),
+ 	SND_PCI_QUIRK(0x1043, 0x13df, "Asus N550JX", ALC662_FIXUP_BASS_1A),
++	SND_PCI_QUIRK(0x1043, 0x129d, "Asus N750", ALC662_FIXUP_ASUS_Nx50),
+ 	SND_PCI_QUIRK(0x1043, 0x1477, "ASUS N56VZ", ALC662_FIXUP_BASS_MODE4_CHMAP),
+ 	SND_PCI_QUIRK(0x1043, 0x15a7, "ASUS UX51VZH", ALC662_FIXUP_BASS_16),
++	SND_PCI_QUIRK(0x1043, 0x177d, "ASUS N551", ALC668_FIXUP_ASUS_Nx51),
++	SND_PCI_QUIRK(0x1043, 0x17bd, "ASUS N751", ALC668_FIXUP_ASUS_Nx51),
+ 	SND_PCI_QUIRK(0x1043, 0x1b73, "ASUS N55SF", ALC662_FIXUP_BASS_16),
+ 	SND_PCI_QUIRK(0x1043, 0x1bf3, "ASUS N76VZ", ALC662_FIXUP_BASS_MODE4_CHMAP),
+ 	SND_PCI_QUIRK(0x1043, 0x8469, "ASUS mobo", ALC662_FIXUP_NO_JACK_DETECT),
+@@ -6901,6 +6967,7 @@ static const struct hda_device_id snd_hda_id_realtek[] = {
+ 	HDA_CODEC_ENTRY(0x10ec0225, "ALC225", patch_alc269),
+ 	HDA_CODEC_ENTRY(0x10ec0231, "ALC231", patch_alc269),
+ 	HDA_CODEC_ENTRY(0x10ec0233, "ALC233", patch_alc269),
++	HDA_CODEC_ENTRY(0x10ec0234, "ALC234", patch_alc269),
+ 	HDA_CODEC_ENTRY(0x10ec0235, "ALC233", patch_alc269),
+ 	HDA_CODEC_ENTRY(0x10ec0255, "ALC255", patch_alc269),
+ 	HDA_CODEC_ENTRY(0x10ec0256, "ALC256", patch_alc269),
+@@ -6911,6 +6978,7 @@ static const struct hda_device_id snd_hda_id_realtek[] = {
+ 	HDA_CODEC_ENTRY(0x10ec0269, "ALC269", patch_alc269),
+ 	HDA_CODEC_ENTRY(0x10ec0270, "ALC270", patch_alc269),
+ 	HDA_CODEC_ENTRY(0x10ec0272, "ALC272", patch_alc662),
++	HDA_CODEC_ENTRY(0x10ec0274, "ALC274", patch_alc269),
+ 	HDA_CODEC_ENTRY(0x10ec0275, "ALC275", patch_alc269),
+ 	HDA_CODEC_ENTRY(0x10ec0276, "ALC276", patch_alc269),
+ 	HDA_CODEC_ENTRY(0x10ec0280, "ALC280", patch_alc269),
+@@ -6923,6 +6991,8 @@ static const struct hda_device_id snd_hda_id_realtek[] = {
+ 	HDA_CODEC_ENTRY(0x10ec0290, "ALC290", patch_alc269),
+ 	HDA_CODEC_ENTRY(0x10ec0292, "ALC292", patch_alc269),
+ 	HDA_CODEC_ENTRY(0x10ec0293, "ALC293", patch_alc269),
++	HDA_CODEC_ENTRY(0x10ec0294, "ALC294", patch_alc269),
++	HDA_CODEC_ENTRY(0x10ec0295, "ALC295", patch_alc269),
+ 	HDA_CODEC_ENTRY(0x10ec0298, "ALC298", patch_alc269),
+ 	HDA_CODEC_REV_ENTRY(0x10ec0861, 0x100340, "ALC660", patch_alc861),
+ 	HDA_CODEC_ENTRY(0x10ec0660, "ALC660-VD", patch_alc861vd),
+diff --git a/sound/pci/intel8x0.c b/sound/pci/intel8x0.c
+index 42bcbac801a3..ccdab29a8b66 100644
+--- a/sound/pci/intel8x0.c
++++ b/sound/pci/intel8x0.c
+@@ -2879,6 +2879,7 @@ static void intel8x0_measure_ac97_clock(struct intel8x0 *chip)
+ 
+ static struct snd_pci_quirk intel8x0_clock_list[] = {
+ 	SND_PCI_QUIRK(0x0e11, 0x008a, "AD1885", 41000),
++	SND_PCI_QUIRK(0x1014, 0x0581, "AD1981B", 48000),
+ 	SND_PCI_QUIRK(0x1028, 0x00be, "AD1885", 44100),
+ 	SND_PCI_QUIRK(0x1028, 0x0177, "AD1980", 48000),
+ 	SND_PCI_QUIRK(0x1028, 0x01ad, "AD1981B", 48000),
+diff --git a/sound/pci/pcxhr/pcxhr_core.c b/sound/pci/pcxhr/pcxhr_core.c
+index c5194f5b150a..d7e71f309299 100644
+--- a/sound/pci/pcxhr/pcxhr_core.c
++++ b/sound/pci/pcxhr/pcxhr_core.c
+@@ -1341,5 +1341,6 @@ irqreturn_t pcxhr_threaded_irq(int irq, void *dev_id)
+ 	}
+ 
+ 	pcxhr_msg_thread(mgr);
++	mutex_unlock(&mgr->lock);
+ 	return IRQ_HANDLED;
+ }
+diff --git a/sound/soc/codecs/rt5640.c b/sound/soc/codecs/rt5640.c
+index 11d032cdc658..48dbb2fdeb09 100644
+--- a/sound/soc/codecs/rt5640.c
++++ b/sound/soc/codecs/rt5640.c
+@@ -359,7 +359,7 @@ static const DECLARE_TLV_DB_RANGE(bst_tlv,
+ 
+ /* Interface data select */
+ static const char * const rt5640_data_select[] = {
+-	"Normal", "left copy to right", "right copy to left", "Swap"};
++	"Normal", "Swap", "left copy to right", "right copy to left"};
+ 
+ static SOC_ENUM_SINGLE_DECL(rt5640_if1_dac_enum, RT5640_DIG_INF_DATA,
+ 			    RT5640_IF1_DAC_SEL_SFT, rt5640_data_select);
+diff --git a/sound/soc/codecs/rt5640.h b/sound/soc/codecs/rt5640.h
+index 83a7150ddc24..f84231e7d1dd 100644
+--- a/sound/soc/codecs/rt5640.h
++++ b/sound/soc/codecs/rt5640.h
+@@ -442,39 +442,39 @@
+ #define RT5640_IF1_DAC_SEL_MASK			(0x3 << 14)
+ #define RT5640_IF1_DAC_SEL_SFT			14
+ #define RT5640_IF1_DAC_SEL_NOR			(0x0 << 14)
+-#define RT5640_IF1_DAC_SEL_L2R			(0x1 << 14)
+-#define RT5640_IF1_DAC_SEL_R2L			(0x2 << 14)
+-#define RT5640_IF1_DAC_SEL_SWAP			(0x3 << 14)
++#define RT5640_IF1_DAC_SEL_SWAP			(0x1 << 14)
++#define RT5640_IF1_DAC_SEL_L2R			(0x2 << 14)
++#define RT5640_IF1_DAC_SEL_R2L			(0x3 << 14)
+ #define RT5640_IF1_ADC_SEL_MASK			(0x3 << 12)
+ #define RT5640_IF1_ADC_SEL_SFT			12
+ #define RT5640_IF1_ADC_SEL_NOR			(0x0 << 12)
+-#define RT5640_IF1_ADC_SEL_L2R			(0x1 << 12)
+-#define RT5640_IF1_ADC_SEL_R2L			(0x2 << 12)
+-#define RT5640_IF1_ADC_SEL_SWAP			(0x3 << 12)
++#define RT5640_IF1_ADC_SEL_SWAP			(0x1 << 12)
++#define RT5640_IF1_ADC_SEL_L2R			(0x2 << 12)
++#define RT5640_IF1_ADC_SEL_R2L			(0x3 << 12)
+ #define RT5640_IF2_DAC_SEL_MASK			(0x3 << 10)
+ #define RT5640_IF2_DAC_SEL_SFT			10
+ #define RT5640_IF2_DAC_SEL_NOR			(0x0 << 10)
+-#define RT5640_IF2_DAC_SEL_L2R			(0x1 << 10)
+-#define RT5640_IF2_DAC_SEL_R2L			(0x2 << 10)
+-#define RT5640_IF2_DAC_SEL_SWAP			(0x3 << 10)
++#define RT5640_IF2_DAC_SEL_SWAP			(0x1 << 10)
++#define RT5640_IF2_DAC_SEL_L2R			(0x2 << 10)
++#define RT5640_IF2_DAC_SEL_R2L			(0x3 << 10)
+ #define RT5640_IF2_ADC_SEL_MASK			(0x3 << 8)
+ #define RT5640_IF2_ADC_SEL_SFT			8
+ #define RT5640_IF2_ADC_SEL_NOR			(0x0 << 8)
+-#define RT5640_IF2_ADC_SEL_L2R			(0x1 << 8)
+-#define RT5640_IF2_ADC_SEL_R2L			(0x2 << 8)
+-#define RT5640_IF2_ADC_SEL_SWAP			(0x3 << 8)
++#define RT5640_IF2_ADC_SEL_SWAP			(0x1 << 8)
++#define RT5640_IF2_ADC_SEL_L2R			(0x2 << 8)
++#define RT5640_IF2_ADC_SEL_R2L			(0x3 << 8)
+ #define RT5640_IF3_DAC_SEL_MASK			(0x3 << 6)
+ #define RT5640_IF3_DAC_SEL_SFT			6
+ #define RT5640_IF3_DAC_SEL_NOR			(0x0 << 6)
+-#define RT5640_IF3_DAC_SEL_L2R			(0x1 << 6)
+-#define RT5640_IF3_DAC_SEL_R2L			(0x2 << 6)
+-#define RT5640_IF3_DAC_SEL_SWAP			(0x3 << 6)
++#define RT5640_IF3_DAC_SEL_SWAP			(0x1 << 6)
++#define RT5640_IF3_DAC_SEL_L2R			(0x2 << 6)
++#define RT5640_IF3_DAC_SEL_R2L			(0x3 << 6)
+ #define RT5640_IF3_ADC_SEL_MASK			(0x3 << 4)
+ #define RT5640_IF3_ADC_SEL_SFT			4
+ #define RT5640_IF3_ADC_SEL_NOR			(0x0 << 4)
+-#define RT5640_IF3_ADC_SEL_L2R			(0x1 << 4)
+-#define RT5640_IF3_ADC_SEL_R2L			(0x2 << 4)
+-#define RT5640_IF3_ADC_SEL_SWAP			(0x3 << 4)
++#define RT5640_IF3_ADC_SEL_SWAP			(0x1 << 4)
++#define RT5640_IF3_ADC_SEL_L2R			(0x2 << 4)
++#define RT5640_IF3_ADC_SEL_R2L			(0x3 << 4)
+ 
+ /* REC Left Mixer Control 1 (0x3b) */
+ #define RT5640_G_HP_L_RM_L_MASK			(0x7 << 13)
+diff --git a/sound/soc/codecs/ssm4567.c b/sound/soc/codecs/ssm4567.c
+index e619d5651b09..080c78e88e10 100644
+--- a/sound/soc/codecs/ssm4567.c
++++ b/sound/soc/codecs/ssm4567.c
+@@ -352,6 +352,11 @@ static int ssm4567_set_power(struct ssm4567 *ssm4567, bool enable)
+ 	regcache_cache_only(ssm4567->regmap, !enable);
+ 
+ 	if (enable) {
++		ret = regmap_write(ssm4567->regmap, SSM4567_REG_SOFT_RESET,
++			0x00);
++		if (ret)
++			return ret;
++
+ 		ret = regmap_update_bits(ssm4567->regmap,
+ 			SSM4567_REG_POWER_CTRL,
+ 			SSM4567_POWER_SPWDN, 0x00);
+diff --git a/sound/soc/samsung/s3c-i2s-v2.c b/sound/soc/samsung/s3c-i2s-v2.c
+index df65c5b494b1..b6ab3fc5789e 100644
+--- a/sound/soc/samsung/s3c-i2s-v2.c
++++ b/sound/soc/samsung/s3c-i2s-v2.c
+@@ -709,7 +709,7 @@ static int s3c2412_i2s_resume(struct snd_soc_dai *dai)
+ #endif
+ 
+ int s3c_i2sv2_register_component(struct device *dev, int id,
+-			   struct snd_soc_component_driver *cmp_drv,
++			   const struct snd_soc_component_driver *cmp_drv,
+ 			   struct snd_soc_dai_driver *dai_drv)
+ {
+ 	struct snd_soc_dai_ops *ops = (struct snd_soc_dai_ops *)dai_drv->ops;
+diff --git a/sound/soc/samsung/s3c-i2s-v2.h b/sound/soc/samsung/s3c-i2s-v2.h
+index 90abab364b49..d0684145ed1f 100644
+--- a/sound/soc/samsung/s3c-i2s-v2.h
++++ b/sound/soc/samsung/s3c-i2s-v2.h
+@@ -101,7 +101,7 @@ extern int s3c_i2sv2_probe(struct snd_soc_dai *dai,
+  * soc core.
+  */
+ extern int s3c_i2sv2_register_component(struct device *dev, int id,
+-					struct snd_soc_component_driver *cmp_drv,
++					const struct snd_soc_component_driver *cmp_drv,
+ 					struct snd_soc_dai_driver *dai_drv);
+ 
+ #endif /* __SND_SOC_S3C24XX_S3C_I2SV2_I2S_H */
+diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
+index 581175a51ecf..5e811dc02fb9 100644
+--- a/sound/soc/soc-dapm.c
++++ b/sound/soc/soc-dapm.c
+@@ -2188,6 +2188,13 @@ static ssize_t dapm_widget_show_component(struct snd_soc_component *cmpnt,
+ 	int count = 0;
+ 	char *state = "not set";
+ 
++	/* card won't be set for the dummy component, as a spot fix
++	 * we're checking for that case specifically here but in future
++	 * we will ensure that the dummy component looks like others.
++	 */
++	if (!cmpnt->card)
++		return 0;
++
+ 	list_for_each_entry(w, &cmpnt->card->widgets, list) {
+ 		if (w->dapm != dapm)
+ 			continue;
+diff --git a/sound/usb/clock.c b/sound/usb/clock.c
+index 2ed260b10f6d..7ccbcaf6a147 100644
+--- a/sound/usb/clock.c
++++ b/sound/usb/clock.c
+@@ -285,6 +285,8 @@ static int set_sample_rate_v1(struct snd_usb_audio *chip, int iface,
+ 	unsigned char data[3];
+ 	int err, crate;
+ 
++	if (get_iface_desc(alts)->bNumEndpoints < 1)
++		return -EINVAL;
+ 	ep = get_endpoint(alts, 0)->bEndpointAddress;
+ 
+ 	/* if endpoint doesn't have sampling rate control, bail out */
+diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
+index 7b1cb365ffab..c07a7eda42a2 100644
+--- a/sound/usb/endpoint.c
++++ b/sound/usb/endpoint.c
+@@ -438,6 +438,9 @@ exit_clear:
+  *
+  * New endpoints will be added to chip->ep_list and must be freed by
+  * calling snd_usb_endpoint_free().
++ *
++ * For SND_USB_ENDPOINT_TYPE_SYNC, the caller needs to guarantee that
++ * bNumEndpoints > 1 beforehand.
+  */
+ struct snd_usb_endpoint *snd_usb_add_endpoint(struct snd_usb_audio *chip,
+ 					      struct usb_host_interface *alts,
+diff --git a/sound/usb/mixer_maps.c b/sound/usb/mixer_maps.c
+index ddca6547399b..1f8fb0d904e0 100644
+--- a/sound/usb/mixer_maps.c
++++ b/sound/usb/mixer_maps.c
+@@ -349,6 +349,16 @@ static struct usbmix_name_map bose_companion5_map[] = {
+ };
+ 
+ /*
++ * Dell usb dock with ALC4020 codec had a firmware problem where it got
++ * screwed up when zero volume is passed; just skip it as a workaround
++ */
++static const struct usbmix_name_map dell_alc4020_map[] = {
++	{ 16, NULL },
++	{ 19, NULL },
++	{ 0 }
++};
++
++/*
+  * Control map entries
+  */
+ 
+@@ -431,6 +441,10 @@ static struct usbmix_ctl_map usbmix_ctl_maps[] = {
+ 		.map = aureon_51_2_map,
+ 	},
+ 	{
++		.id = USB_ID(0x0bda, 0x4014),
++		.map = dell_alc4020_map,
++	},
++	{
+ 		.id = USB_ID(0x0dba, 0x1000),
+ 		.map = mbox1_map,
+ 	},
+diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c
+index 279025650568..f6c3bf79af9a 100644
+--- a/sound/usb/mixer_quirks.c
++++ b/sound/usb/mixer_quirks.c
+@@ -1519,7 +1519,11 @@ static int snd_microii_spdif_default_get(struct snd_kcontrol *kcontrol,
+ 
+ 	/* use known values for that card: interface#1 altsetting#1 */
+ 	iface = usb_ifnum_to_if(chip->dev, 1);
++	if (!iface || iface->num_altsetting < 2)
++		return -EINVAL;
+ 	alts = &iface->altsetting[1];
++	if (get_iface_desc(alts)->bNumEndpoints < 1)
++		return -EINVAL;
+ 	ep = get_endpoint(alts, 0)->bEndpointAddress;
+ 
+ 	err = snd_usb_ctl_msg(chip->dev,
+diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
+index 9245f52d43bd..44d178ee9177 100644
+--- a/sound/usb/pcm.c
++++ b/sound/usb/pcm.c
+@@ -159,6 +159,8 @@ static int init_pitch_v1(struct snd_usb_audio *chip, int iface,
+ 	unsigned char data[1];
+ 	int err;
+ 
++	if (get_iface_desc(alts)->bNumEndpoints < 1)
++		return -EINVAL;
+ 	ep = get_endpoint(alts, 0)->bEndpointAddress;
+ 
+ 	data[0] = 1;
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index c458d60d5030..db11ecf0b74d 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -150,6 +150,7 @@ static int create_fixed_stream_quirk(struct snd_usb_audio *chip,
+ 		usb_audio_err(chip, "cannot memdup\n");
+ 		return -ENOMEM;
+ 	}
++	INIT_LIST_HEAD(&fp->list);
+ 	if (fp->nr_rates > MAX_NR_RATES) {
+ 		kfree(fp);
+ 		return -EINVAL;
+@@ -167,19 +168,20 @@ static int create_fixed_stream_quirk(struct snd_usb_audio *chip,
+ 	stream = (fp->endpoint & USB_DIR_IN)
+ 		? SNDRV_PCM_STREAM_CAPTURE : SNDRV_PCM_STREAM_PLAYBACK;
+ 	err = snd_usb_add_audio_stream(chip, stream, fp);
+-	if (err < 0) {
+-		kfree(fp);
+-		kfree(rate_table);
+-		return err;
+-	}
++	if (err < 0)
++		goto error;
+ 	if (fp->iface != get_iface_desc(&iface->altsetting[0])->bInterfaceNumber ||
+ 	    fp->altset_idx >= iface->num_altsetting) {
+-		kfree(fp);
+-		kfree(rate_table);
+-		return -EINVAL;
++		err = -EINVAL;
++		goto error;
+ 	}
+ 	alts = &iface->altsetting[fp->altset_idx];
+ 	altsd = get_iface_desc(alts);
++	if (altsd->bNumEndpoints < 1) {
++		err = -EINVAL;
++		goto error;
++	}
++
+ 	fp->protocol = altsd->bInterfaceProtocol;
+ 
+ 	if (fp->datainterval == 0)
+@@ -190,6 +192,12 @@ static int create_fixed_stream_quirk(struct snd_usb_audio *chip,
+ 	snd_usb_init_pitch(chip, fp->iface, alts, fp);
+ 	snd_usb_init_sample_rate(chip, fp->iface, alts, fp, fp->rate_max);
+ 	return 0;
++
++ error:
++	list_del(&fp->list); /* unlink for avoiding double-free */
++	kfree(fp);
++	kfree(rate_table);
++	return err;
+ }
+ 
+ static int create_auto_pcm_quirk(struct snd_usb_audio *chip,
+@@ -462,6 +470,7 @@ static int create_uaxx_quirk(struct snd_usb_audio *chip,
+ 	fp->ep_attr = get_endpoint(alts, 0)->bmAttributes;
+ 	fp->datainterval = 0;
+ 	fp->maxpacksize = le16_to_cpu(get_endpoint(alts, 0)->wMaxPacketSize);
++	INIT_LIST_HEAD(&fp->list);
+ 
+ 	switch (fp->maxpacksize) {
+ 	case 0x120:
+@@ -485,6 +494,7 @@ static int create_uaxx_quirk(struct snd_usb_audio *chip,
+ 		? SNDRV_PCM_STREAM_CAPTURE : SNDRV_PCM_STREAM_PLAYBACK;
+ 	err = snd_usb_add_audio_stream(chip, stream, fp);
+ 	if (err < 0) {
++		list_del(&fp->list); /* unlink for avoiding double-free */
+ 		kfree(fp);
+ 		return err;
+ 	}
+@@ -1121,12 +1131,18 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
+ 	switch (chip->usb_id) {
+ 	case USB_ID(0x045E, 0x075D): /* MS Lifecam Cinema  */
+ 	case USB_ID(0x045E, 0x076D): /* MS Lifecam HD-5000 */
++	case USB_ID(0x045E, 0x076E): /* MS Lifecam HD-5001 */
+ 	case USB_ID(0x045E, 0x076F): /* MS Lifecam HD-6000 */
+ 	case USB_ID(0x045E, 0x0772): /* MS Lifecam Studio */
+ 	case USB_ID(0x045E, 0x0779): /* MS Lifecam HD-3000 */
++	case USB_ID(0x047F, 0x0415): /* Plantronics BT-300 */
+ 	case USB_ID(0x047F, 0xAA05): /* Plantronics DA45 */
+ 	case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */
++	case USB_ID(0x0556, 0x0014): /* Phoenix Audio TMX320VC */
+ 	case USB_ID(0x074D, 0x3553): /* Outlaw RR2150 (Micronas UAC3553B) */
++	case USB_ID(0x1de7, 0x0013): /* Phoenix Audio MT202exe */
++	case USB_ID(0x1de7, 0x0014): /* Phoenix Audio TMX320 */
++	case USB_ID(0x1de7, 0x0114): /* Phoenix Audio MT202pcs */
+ 	case USB_ID(0x21B4, 0x0081): /* AudioQuest DragonFly */
+ 		return true;
+ 	}
+diff --git a/sound/usb/stream.c b/sound/usb/stream.c
+index c4dc577ab1bd..8e9548bc1f1a 100644
+--- a/sound/usb/stream.c
++++ b/sound/usb/stream.c
+@@ -314,7 +314,9 @@ static struct snd_pcm_chmap_elem *convert_chmap(int channels, unsigned int bits,
+ /*
+  * add this endpoint to the chip instance.
+  * if a stream with the same endpoint already exists, append to it.
+- * if not, create a new pcm stream.
++ * if not, create a new pcm stream. note, fp is added to the substream
++ * fmt_list and will be freed on the chip instance release. do not free
++ * fp or do remove it from the substream fmt_list to avoid double-free.
+  */
+ int snd_usb_add_audio_stream(struct snd_usb_audio *chip,
+ 			     int stream,
+@@ -675,6 +677,7 @@ int snd_usb_parse_audio_interface(struct snd_usb_audio *chip, int iface_no)
+ 					* (fp->maxpacksize & 0x7ff);
+ 		fp->attributes = parse_uac_endpoint_attributes(chip, alts, protocol, iface_no);
+ 		fp->clock = clock;
++		INIT_LIST_HEAD(&fp->list);
+ 
+ 		/* some quirks for attributes here */
+ 
+@@ -723,6 +726,7 @@ int snd_usb_parse_audio_interface(struct snd_usb_audio *chip, int iface_no)
+ 		dev_dbg(&dev->dev, "%u:%d: add audio endpoint %#x\n", iface_no, altno, fp->endpoint);
+ 		err = snd_usb_add_audio_stream(chip, stream, fp);
+ 		if (err < 0) {
++			list_del(&fp->list); /* unlink for avoiding double-free */
+ 			kfree(fp->rate_table);
+ 			kfree(fp->chmap);
+ 			kfree(fp);
+diff --git a/tools/hv/Makefile b/tools/hv/Makefile
+index a8ab79556926..a8c4644022a6 100644
+--- a/tools/hv/Makefile
++++ b/tools/hv/Makefile
+@@ -5,6 +5,8 @@ PTHREAD_LIBS = -lpthread
+ WARNINGS = -Wall -Wextra
+ CFLAGS = $(WARNINGS) -g $(PTHREAD_LIBS) $(shell getconf LFS_CFLAGS)
+ 
++CFLAGS += -D__EXPORTED_HEADERS__ -I../../include/uapi -I../../include
++
+ all: hv_kvp_daemon hv_vss_daemon hv_fcopy_daemon
+ %: %.c
+ 	$(CC) $(CFLAGS) -o $@ $^
+diff --git a/tools/lib/traceevent/parse-filter.c b/tools/lib/traceevent/parse-filter.c
+index 0144b3d1bb77..88cccea3ca99 100644
+--- a/tools/lib/traceevent/parse-filter.c
++++ b/tools/lib/traceevent/parse-filter.c
+@@ -1164,11 +1164,11 @@ process_filter(struct event_format *event, struct filter_arg **parg,
+ 		current_op = current_exp;
+ 
+ 	ret = collapse_tree(current_op, parg, error_str);
++	/* collapse_tree() may free current_op, and updates parg accordingly */
++	current_op = NULL;
+ 	if (ret < 0)
+ 		goto fail;
+ 
+-	*parg = current_op;
+-
+ 	free(token);
+ 	return 0;
+ 
+diff --git a/tools/perf/Documentation/perf-stat.txt b/tools/perf/Documentation/perf-stat.txt
+index 52ef7a9d50aa..14d9e8ffaff7 100644
+--- a/tools/perf/Documentation/perf-stat.txt
++++ b/tools/perf/Documentation/perf-stat.txt
+@@ -69,6 +69,14 @@ report::
+ --scale::
+ 	scale/normalize counter values
+ 
++-d::
++--detailed::
++	print more detailed statistics, can be specified up to 3 times
++
++	   -d:          detailed events, L1 and LLC data cache
++        -d -d:     more detailed events, dTLB and iTLB events
++     -d -d -d:     very detailed events, adding prefetch events
++
+ -r::
+ --repeat=<n>::
+ 	repeat command and print average + stddev (max: 100). 0 means forever.
+diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c
+index 08c09ad755d2..7bb47424bc49 100644
+--- a/tools/perf/ui/browsers/hists.c
++++ b/tools/perf/ui/browsers/hists.c
+@@ -302,7 +302,7 @@ static void callchain_node__init_have_children(struct callchain_node *node,
+ 	chain = list_entry(node->val.next, struct callchain_list, list);
+ 	chain->has_children = has_sibling;
+ 
+-	if (node->val.next != node->val.prev) {
++	if (!list_empty(&node->val)) {
+ 		chain = list_entry(node->val.prev, struct callchain_list, list);
+ 		chain->has_children = !RB_EMPTY_ROOT(&node->rb_root);
+ 	}
+@@ -844,7 +844,7 @@ next:
+ 	return row - first_row;
+ }
+ 
+-static int hist_browser__show_callchain(struct hist_browser *browser,
++static int hist_browser__show_callchain_graph(struct hist_browser *browser,
+ 					struct rb_root *root, int level,
+ 					unsigned short row, u64 total,
+ 					print_callchain_entry_fn print,
+@@ -898,7 +898,7 @@ static int hist_browser__show_callchain(struct hist_browser *browser,
+ 			else
+ 				new_total = total;
+ 
+-			row += hist_browser__show_callchain(browser, &child->rb_root,
++			row += hist_browser__show_callchain_graph(browser, &child->rb_root,
+ 							    new_level, row, new_total,
+ 							    print, arg, is_output_full);
+ 		}
+@@ -910,6 +910,43 @@ out:
+ 	return row - first_row;
+ }
+ 
++static int hist_browser__show_callchain(struct hist_browser *browser,
++					struct hist_entry *entry, int level,
++					unsigned short row,
++					print_callchain_entry_fn print,
++					struct callchain_print_arg *arg,
++					check_output_full_fn is_output_full)
++{
++	u64 total = hists__total_period(entry->hists);
++	int printed;
++
++	if (callchain_param.mode == CHAIN_GRAPH_REL) {
++		if (symbol_conf.cumulate_callchain)
++			total = entry->stat_acc->period;
++		else
++			total = entry->stat.period;
++	}
++
++	if (callchain_param.mode == CHAIN_FLAT) {
++		printed = hist_browser__show_callchain_flat(browser,
++						&entry->sorted_chain, row, total,
++						print, arg, is_output_full);
++	} else if (callchain_param.mode == CHAIN_FOLDED) {
++		printed = hist_browser__show_callchain_folded(browser,
++						&entry->sorted_chain, row, total,
++						print, arg, is_output_full);
++	} else {
++		printed = hist_browser__show_callchain_graph(browser,
++						&entry->sorted_chain, level, row, total,
++						print, arg, is_output_full);
++	}
++
++	if (arg->is_current_entry)
++		browser->he_selection = entry;
++
++	return printed;
++}
++
+ struct hpp_arg {
+ 	struct ui_browser *b;
+ 	char folded_sign;
+@@ -1084,38 +1121,14 @@ static int hist_browser__show_entry(struct hist_browser *browser,
+ 		--row_offset;
+ 
+ 	if (folded_sign == '-' && row != browser->b.rows) {
+-		u64 total = hists__total_period(entry->hists);
+ 		struct callchain_print_arg arg = {
+ 			.row_offset = row_offset,
+ 			.is_current_entry = current_entry,
+ 		};
+ 
+-		if (callchain_param.mode == CHAIN_GRAPH_REL) {
+-			if (symbol_conf.cumulate_callchain)
+-				total = entry->stat_acc->period;
+-			else
+-				total = entry->stat.period;
+-		}
+-
+-		if (callchain_param.mode == CHAIN_FLAT) {
+-			printed += hist_browser__show_callchain_flat(browser,
+-					&entry->sorted_chain, row, total,
++		printed += hist_browser__show_callchain(browser, entry, 1, row,
+ 					hist_browser__show_callchain_entry, &arg,
+ 					hist_browser__check_output_full);
+-		} else if (callchain_param.mode == CHAIN_FOLDED) {
+-			printed += hist_browser__show_callchain_folded(browser,
+-					&entry->sorted_chain, row, total,
+-					hist_browser__show_callchain_entry, &arg,
+-					hist_browser__check_output_full);
+-		} else {
+-			printed += hist_browser__show_callchain(browser,
+-					&entry->sorted_chain, 1, row, total,
+-					hist_browser__show_callchain_entry, &arg,
+-					hist_browser__check_output_full);
+-		}
+-
+-		if (arg.is_current_entry)
+-			browser->he_selection = entry;
+ 	}
+ 
+ 	return printed;
+@@ -1380,15 +1393,11 @@ do_offset:
+ static int hist_browser__fprintf_callchain(struct hist_browser *browser,
+ 					   struct hist_entry *he, FILE *fp)
+ {
+-	u64 total = hists__total_period(he->hists);
+ 	struct callchain_print_arg arg  = {
+ 		.fp = fp,
+ 	};
+ 
+-	if (symbol_conf.cumulate_callchain)
+-		total = he->stat_acc->period;
+-
+-	hist_browser__show_callchain(browser, &he->sorted_chain, 1, 0, total,
++	hist_browser__show_callchain(browser, he, 1, 0,
+ 				     hist_browser__fprintf_callchain_entry, &arg,
+ 				     hist_browser__check_dump_full);
+ 	return arg.printed;
+@@ -2320,10 +2329,12 @@ skip_annotation:
+ 			 *
+ 			 * See hist_browser__show_entry.
+ 			 */
+-			nr_options += add_script_opt(browser,
+-						     &actions[nr_options],
+-						     &options[nr_options],
+-						     NULL, browser->selection->sym);
++			if (sort__has_sym && browser->selection->sym) {
++				nr_options += add_script_opt(browser,
++							     &actions[nr_options],
++							     &options[nr_options],
++							     NULL, browser->selection->sym);
++			}
+ 		}
+ 		nr_options += add_script_opt(browser, &actions[nr_options],
+ 					     &options[nr_options], NULL, NULL);
+diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
+index 85155e91b61b..7bad5c3fa7b7 100644
+--- a/tools/perf/util/event.c
++++ b/tools/perf/util/event.c
+@@ -282,7 +282,7 @@ int perf_event__synthesize_mmap_events(struct perf_tool *tool,
+ 		strcpy(execname, "");
+ 
+ 		/* 00400000-0040c000 r-xp 00000000 fd:01 41038  /bin/cat */
+-		n = sscanf(bf, "%"PRIx64"-%"PRIx64" %s %"PRIx64" %x:%x %u %s\n",
++		n = sscanf(bf, "%"PRIx64"-%"PRIx64" %s %"PRIx64" %x:%x %u %[^\n]\n",
+ 		       &event->mmap2.start, &event->mmap2.len, prot,
+ 		       &event->mmap2.pgoff, &event->mmap2.maj,
+ 		       &event->mmap2.min,
+diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
+index d81f13de2476..a7eb0eae9938 100644
+--- a/tools/perf/util/evlist.c
++++ b/tools/perf/util/evlist.c
+@@ -1181,12 +1181,12 @@ void perf_evlist__set_maps(struct perf_evlist *evlist, struct cpu_map *cpus,
+ 	 */
+ 	if (cpus != evlist->cpus) {
+ 		cpu_map__put(evlist->cpus);
+-		evlist->cpus = cpus;
++		evlist->cpus = cpu_map__get(cpus);
+ 	}
+ 
+ 	if (threads != evlist->threads) {
+ 		thread_map__put(evlist->threads);
+-		evlist->threads = threads;
++		evlist->threads = thread_map__get(threads);
+ 	}
+ 
+ 	perf_evlist__propagate_maps(evlist);
+diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
+index 8e75434bd01c..4d8037a3d8a4 100644
+--- a/tools/perf/util/evsel.h
++++ b/tools/perf/util/evsel.h
+@@ -93,10 +93,8 @@ struct perf_evsel {
+ 	const char		*unit;
+ 	struct event_format	*tp_format;
+ 	off_t			id_offset;
+-	union {
+-		void		*priv;
+-		u64		db_id;
+-	};
++	void			*priv;
++	u64			db_id;
+ 	struct cgroup_sel	*cgrp;
+ 	void			*handler;
+ 	struct cpu_map		*cpus;
+diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
+index 05d815851be1..4e1590ba8902 100644
+--- a/tools/perf/util/intel-pt.c
++++ b/tools/perf/util/intel-pt.c
+@@ -1127,7 +1127,7 @@ static int intel_pt_synth_transaction_sample(struct intel_pt_queue *ptq)
+ 		pr_err("Intel Processor Trace: failed to deliver transaction event, error %d\n",
+ 		       ret);
+ 
+-	if (pt->synth_opts.callchain)
++	if (pt->synth_opts.last_branch)
+ 		intel_pt_reset_last_branch_rb(ptq);
+ 
+ 	return ret;
+diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
+index 813d9b272c81..48a1c5e7dc0d 100644
+--- a/tools/perf/util/parse-events.c
++++ b/tools/perf/util/parse-events.c
+@@ -2101,11 +2101,11 @@ char *parse_events_formats_error_string(char *additional_terms)
+ 
+ 	/* valid terms */
+ 	if (additional_terms) {
+-		if (!asprintf(&str, "valid terms: %s,%s",
+-			      additional_terms, static_terms))
++		if (asprintf(&str, "valid terms: %s,%s",
++			     additional_terms, static_terms) < 0)
+ 			goto fail;
+ 	} else {
+-		if (!asprintf(&str, "valid terms: %s", static_terms))
++		if (asprintf(&str, "valid terms: %s", static_terms) < 0)
+ 			goto fail;
+ 	}
+ 	return str;
+diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
+index b597bcc8fc78..37b4f5070353 100644
+--- a/tools/perf/util/pmu.c
++++ b/tools/perf/util/pmu.c
+@@ -284,13 +284,12 @@ static int pmu_aliases_parse(char *dir, struct list_head *head)
+ {
+ 	struct dirent *evt_ent;
+ 	DIR *event_dir;
+-	int ret = 0;
+ 
+ 	event_dir = opendir(dir);
+ 	if (!event_dir)
+ 		return -EINVAL;
+ 
+-	while (!ret && (evt_ent = readdir(event_dir))) {
++	while ((evt_ent = readdir(event_dir))) {
+ 		char path[PATH_MAX];
+ 		char *name = evt_ent->d_name;
+ 		FILE *file;
+@@ -306,17 +305,19 @@ static int pmu_aliases_parse(char *dir, struct list_head *head)
+ 
+ 		snprintf(path, PATH_MAX, "%s/%s", dir, name);
+ 
+-		ret = -EINVAL;
+ 		file = fopen(path, "r");
+-		if (!file)
+-			break;
++		if (!file) {
++			pr_debug("Cannot open %s\n", path);
++			continue;
++		}
+ 
+-		ret = perf_pmu__new_alias(head, dir, name, file);
++		if (perf_pmu__new_alias(head, dir, name, file) < 0)
++			pr_debug("Cannot set up %s\n", name);
+ 		fclose(file);
+ 	}
+ 
+ 	closedir(event_dir);
+-	return ret;
++	return 0;
+ }
+ 
+ /*
+diff --git a/tools/perf/util/setup.py b/tools/perf/util/setup.py
+index 1833103768cb..c8680984d2d6 100644
+--- a/tools/perf/util/setup.py
++++ b/tools/perf/util/setup.py
+@@ -22,6 +22,7 @@ cflags = getenv('CFLAGS', '').split()
+ # switch off several checks (need to be at the end of cflags list)
+ cflags += ['-fno-strict-aliasing', '-Wno-write-strings', '-Wno-unused-parameter' ]
+ 
++src_perf  = getenv('srctree') + '/tools/perf'
+ build_lib = getenv('PYTHON_EXTBUILD_LIB')
+ build_tmp = getenv('PYTHON_EXTBUILD_TMP')
+ libtraceevent = getenv('LIBTRACEEVENT')
+@@ -30,6 +31,9 @@ libapikfs = getenv('LIBAPI')
+ ext_sources = [f.strip() for f in file('util/python-ext-sources')
+ 				if len(f.strip()) > 0 and f[0] != '#']
+ 
++# use full paths with source files
++ext_sources = map(lambda x: '%s/%s' % (src_perf, x) , ext_sources)
++
+ perf = Extension('perf',
+ 		  sources = ext_sources,
+ 		  include_dirs = ['util/include'],
+diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c
+index ec722346e6ff..16892a7ca27e 100644
+--- a/tools/perf/util/sort.c
++++ b/tools/perf/util/sort.c
+@@ -2272,6 +2272,9 @@ static char *prefix_if_not_in(const char *pre, char *str)
+ 
+ static char *setup_overhead(char *keys)
+ {
++	if (sort__mode == SORT_MODE__DIFF)
++		return keys;
++
+ 	keys = prefix_if_not_in("overhead", keys);
+ 
+ 	if (symbol_conf.cumulate_callchain)
+diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c
+index ea6064696fe4..a7b9022b5c8f 100644
+--- a/virt/kvm/arm/arch_timer.c
++++ b/virt/kvm/arm/arch_timer.c
+@@ -86,6 +86,8 @@ static void kvm_timer_inject_irq_work(struct work_struct *work)
+ 	vcpu = container_of(work, struct kvm_vcpu, arch.timer_cpu.expired);
+ 	vcpu->arch.timer_cpu.armed = false;
+ 
++	WARN_ON(!kvm_timer_should_fire(vcpu));
++
+ 	/*
+ 	 * If the vcpu is blocked we want to wake it up so that it will see
+ 	 * the timer has expired when entering the guest.
+@@ -93,10 +95,46 @@ static void kvm_timer_inject_irq_work(struct work_struct *work)
+ 	kvm_vcpu_kick(vcpu);
+ }
+ 
++static u64 kvm_timer_compute_delta(struct kvm_vcpu *vcpu)
++{
++	cycle_t cval, now;
++
++	cval = vcpu->arch.timer_cpu.cntv_cval;
++	now = kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff;
++
++	if (now < cval) {
++		u64 ns;
++
++		ns = cyclecounter_cyc2ns(timecounter->cc,
++					 cval - now,
++					 timecounter->mask,
++					 &timecounter->frac);
++		return ns;
++	}
++
++	return 0;
++}
++
+ static enum hrtimer_restart kvm_timer_expire(struct hrtimer *hrt)
+ {
+ 	struct arch_timer_cpu *timer;
++	struct kvm_vcpu *vcpu;
++	u64 ns;
++
+ 	timer = container_of(hrt, struct arch_timer_cpu, timer);
++	vcpu = container_of(timer, struct kvm_vcpu, arch.timer_cpu);
++
++	/*
++	 * Check that the timer has really expired from the guest's
++	 * PoV (NTP on the host may have forced it to expire
++	 * early). If we should have slept longer, restart it.
++	 */
++	ns = kvm_timer_compute_delta(vcpu);
++	if (unlikely(ns)) {
++		hrtimer_forward_now(hrt, ns_to_ktime(ns));
++		return HRTIMER_RESTART;
++	}
++
+ 	queue_work(wqueue, &timer->expired);
+ 	return HRTIMER_NORESTART;
+ }
+@@ -170,8 +208,6 @@ static int kvm_timer_update_state(struct kvm_vcpu *vcpu)
+ void kvm_timer_schedule(struct kvm_vcpu *vcpu)
+ {
+ 	struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
+-	u64 ns;
+-	cycle_t cval, now;
+ 
+ 	BUG_ON(timer_is_armed(timer));
+ 
+@@ -191,14 +227,7 @@ void kvm_timer_schedule(struct kvm_vcpu *vcpu)
+ 		return;
+ 
+ 	/*  The timer has not yet expired, schedule a background timer */
+-	cval = timer->cntv_cval;
+-	now = kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff;
+-
+-	ns = cyclecounter_cyc2ns(timecounter->cc,
+-				 cval - now,
+-				 timecounter->mask,
+-				 &timecounter->frac);
+-	timer_arm(timer, ns);
++	timer_arm(timer, kvm_timer_compute_delta(vcpu));
+ }
+ 
+ void kvm_timer_unschedule(struct kvm_vcpu *vcpu)
+diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
+index 9102ae172d2a..298473707f17 100644
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -537,6 +537,16 @@ static struct kvm *kvm_create_vm(unsigned long type)
+ 	if (!kvm)
+ 		return ERR_PTR(-ENOMEM);
+ 
++	spin_lock_init(&kvm->mmu_lock);
++	atomic_inc(&current->mm->mm_count);
++	kvm->mm = current->mm;
++	kvm_eventfd_init(kvm);
++	mutex_init(&kvm->lock);
++	mutex_init(&kvm->irq_lock);
++	mutex_init(&kvm->slots_lock);
++	atomic_set(&kvm->users_count, 1);
++	INIT_LIST_HEAD(&kvm->devices);
++
+ 	r = kvm_arch_init_vm(kvm, type);
+ 	if (r)
+ 		goto out_err_no_disable;
+@@ -569,16 +579,6 @@ static struct kvm *kvm_create_vm(unsigned long type)
+ 			goto out_err;
+ 	}
+ 
+-	spin_lock_init(&kvm->mmu_lock);
+-	kvm->mm = current->mm;
+-	atomic_inc(&kvm->mm->mm_count);
+-	kvm_eventfd_init(kvm);
+-	mutex_init(&kvm->lock);
+-	mutex_init(&kvm->irq_lock);
+-	mutex_init(&kvm->slots_lock);
+-	atomic_set(&kvm->users_count, 1);
+-	INIT_LIST_HEAD(&kvm->devices);
+-
+ 	r = kvm_init_mmu_notifier(kvm);
+ 	if (r)
+ 		goto out_err;
+@@ -603,6 +603,7 @@ out_err_no_disable:
+ 	for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++)
+ 		kvm_free_memslots(kvm, kvm->memslots[i]);
+ 	kvm_arch_free_vm(kvm);
++	mmdrop(current->mm);
+ 	return ERR_PTR(r);
+ }
+ 


^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [gentoo-commits] proj/linux-patches:4.5 commit in: /
@ 2016-06-02 19:12 Mike Pagano
  0 siblings, 0 replies; 12+ messages in thread
From: Mike Pagano @ 2016-06-02 19:12 UTC (permalink / raw
  To: gentoo-commits

commit:     b897bcdfc22bddef00a1c5dd7684a5ad72ca3d75
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu Jun  2 19:12:47 2016 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu Jun  2 19:12:47 2016 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=b897bcdf

Replace full 4.5.6 patch with incremental.

 1005_linux-4.5.6.patch | 34160 +++++------------------------------------------
 1 file changed, 2980 insertions(+), 31180 deletions(-)

diff --git a/1005_linux-4.5.6.patch b/1005_linux-4.5.6.patch
index 339ef1c..4b62797 100644
--- a/1005_linux-4.5.6.patch
+++ b/1005_linux-4.5.6.patch
@@ -1,71 +1,3 @@
-diff --git a/Documentation/cgroup-v2.txt b/Documentation/cgroup-v2.txt
-index ff49cf901148..81eb378210c6 100644
---- a/Documentation/cgroup-v2.txt
-+++ b/Documentation/cgroup-v2.txt
-@@ -1368,6 +1368,12 @@ system than killing the group.  Otherwise, memory.max is there to
- limit this type of spillover and ultimately contain buggy or even
- malicious applications.
- 
-+Setting the original memory.limit_in_bytes below the current usage was
-+subject to a race condition, where concurrent charges could cause the
-+limit setting to fail. memory.max on the other hand will first set the
-+limit to prevent new charges, and then reclaim and OOM kill until the
-+new limit is met - or the task writing to memory.max is killed.
-+
- The combined memory+swap accounting and limiting is replaced by real
- control over swap space.
- 
-diff --git a/Documentation/devicetree/bindings/ata/ahci-platform.txt b/Documentation/devicetree/bindings/ata/ahci-platform.txt
-index c2340eeeb97f..c000832a7fb9 100644
---- a/Documentation/devicetree/bindings/ata/ahci-platform.txt
-+++ b/Documentation/devicetree/bindings/ata/ahci-platform.txt
-@@ -30,6 +30,10 @@ Optional properties:
- - target-supply     : regulator for SATA target power
- - phys              : reference to the SATA PHY node
- - phy-names         : must be "sata-phy"
-+- ports-implemented : Mask that indicates which ports that the HBA supports
-+		      are available for software to use. Useful if PORTS_IMPL
-+		      is not programmed by the BIOS, which is true with
-+		      some embedded SOC's.
- 
- Required properties when using sub-nodes:
- - #address-cells    : number of cells to encode an address
-diff --git a/Documentation/devicetree/bindings/pinctrl/img,pistachio-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/img,pistachio-pinctrl.txt
-index 08a4a32c8eb0..0326154c7925 100644
---- a/Documentation/devicetree/bindings/pinctrl/img,pistachio-pinctrl.txt
-+++ b/Documentation/devicetree/bindings/pinctrl/img,pistachio-pinctrl.txt
-@@ -134,12 +134,12 @@ mfio80		ddr_debug, mips_trace_data, mips_debug
- mfio81		dreq0, mips_trace_data, eth_debug
- mfio82		dreq1, mips_trace_data, eth_debug
- mfio83		mips_pll_lock, mips_trace_data, usb_debug
--mfio84		sys_pll_lock, mips_trace_data, usb_debug
--mfio85		wifi_pll_lock, mips_trace_data, sdhost_debug
--mfio86		bt_pll_lock, mips_trace_data, sdhost_debug
--mfio87		rpu_v_pll_lock, dreq2, socif_debug
--mfio88		rpu_l_pll_lock, dreq3, socif_debug
--mfio89		audio_pll_lock, dreq4, dreq5
-+mfio84		audio_pll_lock, mips_trace_data, usb_debug
-+mfio85		rpu_v_pll_lock, mips_trace_data, sdhost_debug
-+mfio86		rpu_l_pll_lock, mips_trace_data, sdhost_debug
-+mfio87		sys_pll_lock, dreq2, socif_debug
-+mfio88		wifi_pll_lock, dreq3, socif_debug
-+mfio89		bt_pll_lock, dreq4, dreq5
- tck
- trstn
- tdi
-diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
-index 9a53c929f017..21e4b4861331 100644
---- a/Documentation/kernel-parameters.txt
-+++ b/Documentation/kernel-parameters.txt
-@@ -4016,6 +4016,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
- 					sector if the number is odd);
- 				i = IGNORE_DEVICE (don't bind to this
- 					device);
-+				j = NO_REPORT_LUNS (don't use report luns
-+					command, uas only);
- 				l = NOT_LOCKABLE (don't try to lock and
- 					unlock ejectable media);
- 				m = MAX_SECTORS_64 (don't transfer more
 diff --git a/Documentation/serial/tty.txt b/Documentation/serial/tty.txt
 index bc3842dc323a..e2dea3dc4307 100644
 --- a/Documentation/serial/tty.txt
@@ -80,430 +12,14 @@ index bc3842dc323a..e2dea3dc4307 100644
  TTY_NO_WRITE_SPLIT	Prevent driver from splitting up writes into
  			smaller chunks.
  
-diff --git a/MAINTAINERS b/MAINTAINERS
-index 6ee06ea47be4..77e4c10b4c06 100644
---- a/MAINTAINERS
-+++ b/MAINTAINERS
-@@ -228,13 +228,13 @@ F:	kernel/sys_ni.c
- 
- ABIT UGURU 1,2 HARDWARE MONITOR DRIVER
- M:	Hans de Goede <hdegoede@redhat.com>
--L:	lm-sensors@lm-sensors.org
-+L:	linux-hwmon@vger.kernel.org
- S:	Maintained
- F:	drivers/hwmon/abituguru.c
- 
- ABIT UGURU 3 HARDWARE MONITOR DRIVER
- M:	Alistair John Strachan <alistair@devzero.co.uk>
--L:	lm-sensors@lm-sensors.org
-+L:	linux-hwmon@vger.kernel.org
- S:	Maintained
- F:	drivers/hwmon/abituguru3.c
- 
-@@ -386,14 +386,14 @@ F:	Documentation/devicetree/bindings/net/ieee802154/adf7242.txt
- 
- ADM1025 HARDWARE MONITOR DRIVER
- M:	Jean Delvare <jdelvare@suse.com>
--L:	lm-sensors@lm-sensors.org
-+L:	linux-hwmon@vger.kernel.org
- S:	Maintained
- F:	Documentation/hwmon/adm1025
- F:	drivers/hwmon/adm1025.c
- 
- ADM1029 HARDWARE MONITOR DRIVER
- M:	Corentin Labbe <clabbe.montjoie@gmail.com>
--L:	lm-sensors@lm-sensors.org
-+L:	linux-hwmon@vger.kernel.org
- S:	Maintained
- F:	drivers/hwmon/adm1029.c
- 
-@@ -438,7 +438,7 @@ F:	drivers/video/backlight/adp8860_bl.c
- 
- ADS1015 HARDWARE MONITOR DRIVER
- M:	Dirk Eibach <eibach@gdsys.de>
--L:	lm-sensors@lm-sensors.org
-+L:	linux-hwmon@vger.kernel.org
- S:	Maintained
- F:	Documentation/hwmon/ads1015
- F:	drivers/hwmon/ads1015.c
-@@ -451,7 +451,7 @@ F:	drivers/macintosh/therm_adt746x.c
- 
- ADT7475 HARDWARE MONITOR DRIVER
- M:	Jean Delvare <jdelvare@suse.com>
--L:	lm-sensors@lm-sensors.org
-+L:	linux-hwmon@vger.kernel.org
- S:	Maintained
- F:	Documentation/hwmon/adt7475
- F:	drivers/hwmon/adt7475.c
-@@ -628,7 +628,7 @@ F:	include/linux/ccp.h
- 
- AMD FAM15H PROCESSOR POWER MONITORING DRIVER
- M:	Huang Rui <ray.huang@amd.com>
--L:	lm-sensors@lm-sensors.org
-+L:	linux-hwmon@vger.kernel.org
- S:	Supported
- F:	Documentation/hwmon/fam15h_power
- F:	drivers/hwmon/fam15h_power.c
-@@ -786,7 +786,7 @@ F:	drivers/input/mouse/bcm5974.c
- 
- APPLE SMC DRIVER
- M:	Henrik Rydberg <rydberg@bitmath.org>
--L:	lm-sensors@lm-sensors.org
-+L:	linux-hwmon@vger.kernel.org
- S:	Odd fixes
- F:	drivers/hwmon/applesmc.c
- 
-@@ -1825,7 +1825,7 @@ F:	include/media/i2c/as3645a.h
- 
- ASC7621 HARDWARE MONITOR DRIVER
- M:	George Joseph <george.joseph@fairview5.com>
--L:	lm-sensors@lm-sensors.org
-+L:	linux-hwmon@vger.kernel.org
- S:	Maintained
- F:	Documentation/hwmon/asc7621
- F:	drivers/hwmon/asc7621.c
-@@ -1918,7 +1918,7 @@ F:	drivers/net/wireless/ath/carl9170/
- 
- ATK0110 HWMON DRIVER
- M:	Luca Tettamanti <kronos.it@gmail.com>
--L:	lm-sensors@lm-sensors.org
-+L:	linux-hwmon@vger.kernel.org
- S:	Maintained
- F:	drivers/hwmon/asus_atk0110.c
- 
-@@ -3037,7 +3037,7 @@ F:	mm/swap_cgroup.c
- 
- CORETEMP HARDWARE MONITORING DRIVER
- M:	Fenghua Yu <fenghua.yu@intel.com>
--L:	lm-sensors@lm-sensors.org
-+L:	linux-hwmon@vger.kernel.org
- S:	Maintained
- F:	Documentation/hwmon/coretemp
- F:	drivers/hwmon/coretemp.c
-@@ -3625,7 +3625,7 @@ T:	git git://git.infradead.org/users/vkoul/slave-dma.git
- 
- DME1737 HARDWARE MONITOR DRIVER
- M:	Juerg Haefliger <juergh@gmail.com>
--L:	lm-sensors@lm-sensors.org
-+L:	linux-hwmon@vger.kernel.org
- S:	Maintained
- F:	Documentation/hwmon/dme1737
- F:	drivers/hwmon/dme1737.c
-@@ -4163,8 +4163,8 @@ F:	Documentation/efi-stub.txt
- F:	arch/ia64/kernel/efi.c
- F:	arch/x86/boot/compressed/eboot.[ch]
- F:	arch/x86/include/asm/efi.h
--F:	arch/x86/platform/efi/*
--F:	drivers/firmware/efi/*
-+F:	arch/x86/platform/efi/
-+F:	drivers/firmware/efi/
- F:	include/linux/efi*.h
- 
- EFI VARIABLE FILESYSTEM
-@@ -4322,7 +4322,7 @@ F:	include/video/exynos_mipi*
- 
- F71805F HARDWARE MONITORING DRIVER
- M:	Jean Delvare <jdelvare@suse.com>
--L:	lm-sensors@lm-sensors.org
-+L:	linux-hwmon@vger.kernel.org
- S:	Maintained
- F:	Documentation/hwmon/f71805f
- F:	drivers/hwmon/f71805f.c
-@@ -4401,7 +4401,7 @@ F:	fs/*
- 
- FINTEK F75375S HARDWARE MONITOR AND FAN CONTROLLER DRIVER
- M:	Riku Voipio <riku.voipio@iki.fi>
--L:	lm-sensors@lm-sensors.org
-+L:	linux-hwmon@vger.kernel.org
- S:	Maintained
- F:	drivers/hwmon/f75375s.c
- F:	include/linux/f75375s.h
-@@ -4958,8 +4958,8 @@ F:	drivers/media/usb/hackrf/
- HARDWARE MONITORING
- M:	Jean Delvare <jdelvare@suse.com>
- M:	Guenter Roeck <linux@roeck-us.net>
--L:	lm-sensors@lm-sensors.org
--W:	http://www.lm-sensors.org/
-+L:	linux-hwmon@vger.kernel.org
-+W:	http://hwmon.wiki.kernel.org/
- T:	quilt http://jdelvare.nerim.net/devel/linux/jdelvare-hwmon/
- T:	git git://git.kernel.org/pub/scm/linux/kernel/git/groeck/linux-staging.git
- S:	Maintained
-@@ -5484,7 +5484,7 @@ F:	drivers/usb/atm/ueagle-atm.c
- 
- INA209 HARDWARE MONITOR DRIVER
- M:	Guenter Roeck <linux@roeck-us.net>
--L:	lm-sensors@lm-sensors.org
-+L:	linux-hwmon@vger.kernel.org
- S:	Maintained
- F:	Documentation/hwmon/ina209
- F:	Documentation/devicetree/bindings/i2c/ina209.txt
-@@ -5492,7 +5492,7 @@ F:	drivers/hwmon/ina209.c
- 
- INA2XX HARDWARE MONITOR DRIVER
- M:	Guenter Roeck <linux@roeck-us.net>
--L:	lm-sensors@lm-sensors.org
-+L:	linux-hwmon@vger.kernel.org
- S:	Maintained
- F:	Documentation/hwmon/ina2xx
- F:	drivers/hwmon/ina2xx.c
-@@ -5985,7 +5985,7 @@ F:	drivers/isdn/hardware/eicon/
- 
- IT87 HARDWARE MONITORING DRIVER
- M:	Jean Delvare <jdelvare@suse.com>
--L:	lm-sensors@lm-sensors.org
-+L:	linux-hwmon@vger.kernel.org
- S:	Maintained
- F:	Documentation/hwmon/it87
- F:	drivers/hwmon/it87.c
-@@ -6021,7 +6021,7 @@ F:	drivers/media/dvb-frontends/ix2505v*
- 
- JC42.4 TEMPERATURE SENSOR DRIVER
- M:	Guenter Roeck <linux@roeck-us.net>
--L:	lm-sensors@lm-sensors.org
-+L:	linux-hwmon@vger.kernel.org
- S:	Maintained
- F:	drivers/hwmon/jc42.c
- F:	Documentation/hwmon/jc42
-@@ -6071,14 +6071,14 @@ F:	drivers/tty/serial/jsm/
- 
- K10TEMP HARDWARE MONITORING DRIVER
- M:	Clemens Ladisch <clemens@ladisch.de>
--L:	lm-sensors@lm-sensors.org
-+L:	linux-hwmon@vger.kernel.org
- S:	Maintained
- F:	Documentation/hwmon/k10temp
- F:	drivers/hwmon/k10temp.c
- 
- K8TEMP HARDWARE MONITORING DRIVER
- M:	Rudolf Marek <r.marek@assembler.cz>
--L:	lm-sensors@lm-sensors.org
-+L:	linux-hwmon@vger.kernel.org
- S:	Maintained
- F:	Documentation/hwmon/k8temp
- F:	drivers/hwmon/k8temp.c
-@@ -6605,27 +6605,27 @@ F:	net/llc/
- 
- LM73 HARDWARE MONITOR DRIVER
- M:	Guillaume Ligneul <guillaume.ligneul@gmail.com>
--L:	lm-sensors@lm-sensors.org
-+L:	linux-hwmon@vger.kernel.org
- S:	Maintained
- F:	drivers/hwmon/lm73.c
- 
- LM78 HARDWARE MONITOR DRIVER
- M:	Jean Delvare <jdelvare@suse.com>
--L:	lm-sensors@lm-sensors.org
-+L:	linux-hwmon@vger.kernel.org
- S:	Maintained
- F:	Documentation/hwmon/lm78
- F:	drivers/hwmon/lm78.c
- 
- LM83 HARDWARE MONITOR DRIVER
- M:	Jean Delvare <jdelvare@suse.com>
--L:	lm-sensors@lm-sensors.org
-+L:	linux-hwmon@vger.kernel.org
- S:	Maintained
- F:	Documentation/hwmon/lm83
- F:	drivers/hwmon/lm83.c
- 
- LM90 HARDWARE MONITOR DRIVER
- M:	Jean Delvare <jdelvare@suse.com>
--L:	lm-sensors@lm-sensors.org
-+L:	linux-hwmon@vger.kernel.org
- S:	Maintained
- F:	Documentation/hwmon/lm90
- F:	Documentation/devicetree/bindings/hwmon/lm90.txt
-@@ -6633,7 +6633,7 @@ F:	drivers/hwmon/lm90.c
- 
- LM95234 HARDWARE MONITOR DRIVER
- M:	Guenter Roeck <linux@roeck-us.net>
--L:	lm-sensors@lm-sensors.org
-+L:	linux-hwmon@vger.kernel.org
- S:	Maintained
- F:	Documentation/hwmon/lm95234
- F:	drivers/hwmon/lm95234.c
-@@ -6700,7 +6700,7 @@ F:	drivers/scsi/sym53c8xx_2/
- 
- LTC4261 HARDWARE MONITOR DRIVER
- M:	Guenter Roeck <linux@roeck-us.net>
--L:	lm-sensors@lm-sensors.org
-+L:	linux-hwmon@vger.kernel.org
- S:	Maintained
- F:	Documentation/hwmon/ltc4261
- F:	drivers/hwmon/ltc4261.c
-@@ -6870,28 +6870,28 @@ F:	include/uapi/linux/matroxfb.h
- 
- MAX16065 HARDWARE MONITOR DRIVER
- M:	Guenter Roeck <linux@roeck-us.net>
--L:	lm-sensors@lm-sensors.org
-+L:	linux-hwmon@vger.kernel.org
- S:	Maintained
- F:	Documentation/hwmon/max16065
- F:	drivers/hwmon/max16065.c
- 
- MAX20751 HARDWARE MONITOR DRIVER
- M:	Guenter Roeck <linux@roeck-us.net>
--L:	lm-sensors@lm-sensors.org
-+L:	linux-hwmon@vger.kernel.org
- S:	Maintained
- F:	Documentation/hwmon/max20751
- F:	drivers/hwmon/max20751.c
- 
- MAX6650 HARDWARE MONITOR AND FAN CONTROLLER DRIVER
- M:	"Hans J. Koch" <hjk@hansjkoch.de>
--L:	lm-sensors@lm-sensors.org
-+L:	linux-hwmon@vger.kernel.org
- S:	Maintained
- F:	Documentation/hwmon/max6650
- F:	drivers/hwmon/max6650.c
- 
- MAX6697 HARDWARE MONITOR DRIVER
- M:	Guenter Roeck <linux@roeck-us.net>
--L:	lm-sensors@lm-sensors.org
-+L:	linux-hwmon@vger.kernel.org
- S:	Maintained
- F:	Documentation/hwmon/max6697
- F:	Documentation/devicetree/bindings/i2c/max6697.txt
-@@ -7455,7 +7455,7 @@ F:	drivers/scsi/NCR_D700.*
- 
- NCT6775 HARDWARE MONITOR DRIVER
- M:	Guenter Roeck <linux@roeck-us.net>
--L:	lm-sensors@lm-sensors.org
-+L:	linux-hwmon@vger.kernel.org
- S:	Maintained
- F:	Documentation/hwmon/nct6775
- F:	drivers/hwmon/nct6775.c
-@@ -8235,7 +8235,7 @@ F:	drivers/video/logo/logo_parisc*
- 
- PC87360 HARDWARE MONITORING DRIVER
- M:	Jim Cromie <jim.cromie@gmail.com>
--L:	lm-sensors@lm-sensors.org
-+L:	linux-hwmon@vger.kernel.org
- S:	Maintained
- F:	Documentation/hwmon/pc87360
- F:	drivers/hwmon/pc87360.c
-@@ -8247,7 +8247,7 @@ F:	drivers/char/pc8736x_gpio.c
- 
- PC87427 HARDWARE MONITORING DRIVER
- M:	Jean Delvare <jdelvare@suse.com>
--L:	lm-sensors@lm-sensors.org
-+L:	linux-hwmon@vger.kernel.org
- S:	Maintained
- F:	Documentation/hwmon/pc87427
- F:	drivers/hwmon/pc87427.c
-@@ -8601,8 +8601,8 @@ F:	drivers/rtc/rtc-puv3.c
- 
- PMBUS HARDWARE MONITORING DRIVERS
- M:	Guenter Roeck <linux@roeck-us.net>
--L:	lm-sensors@lm-sensors.org
--W:	http://www.lm-sensors.org/
-+L:	linux-hwmon@vger.kernel.org
-+W:	http://hwmon.wiki.kernel.org/
- W:	http://www.roeck-us.net/linux/drivers/
- T:	git git://git.kernel.org/pub/scm/linux/kernel/git/groeck/linux-staging.git
- S:	Maintained
-@@ -8807,7 +8807,7 @@ F:	drivers/media/usb/pwc/*
- 
- PWM FAN DRIVER
- M:	Kamil Debski <k.debski@samsung.com>
--L:	lm-sensors@lm-sensors.org
-+L:	linux-hwmon@vger.kernel.org
- S:	Supported
- F:	Documentation/devicetree/bindings/hwmon/pwm-fan.txt
- F:	Documentation/hwmon/pwm-fan
-@@ -10113,28 +10113,28 @@ F:	Documentation/devicetree/bindings/media/i2c/nokia,smia.txt
- 
- SMM665 HARDWARE MONITOR DRIVER
- M:	Guenter Roeck <linux@roeck-us.net>
--L:	lm-sensors@lm-sensors.org
-+L:	linux-hwmon@vger.kernel.org
- S:	Maintained
- F:	Documentation/hwmon/smm665
- F:	drivers/hwmon/smm665.c
- 
- SMSC EMC2103 HARDWARE MONITOR DRIVER
- M:	Steve Glendinning <steve.glendinning@shawell.net>
--L:	lm-sensors@lm-sensors.org
-+L:	linux-hwmon@vger.kernel.org
- S:	Maintained
- F:	Documentation/hwmon/emc2103
- F:	drivers/hwmon/emc2103.c
- 
- SMSC SCH5627 HARDWARE MONITOR DRIVER
- M:	Hans de Goede <hdegoede@redhat.com>
--L:	lm-sensors@lm-sensors.org
-+L:	linux-hwmon@vger.kernel.org
- S:	Supported
- F:	Documentation/hwmon/sch5627
- F:	drivers/hwmon/sch5627.c
- 
- SMSC47B397 HARDWARE MONITOR DRIVER
- M:	Jean Delvare <jdelvare@suse.com>
--L:	lm-sensors@lm-sensors.org
-+L:	linux-hwmon@vger.kernel.org
- S:	Maintained
- F:	Documentation/hwmon/smsc47b397
- F:	drivers/hwmon/smsc47b397.c
-@@ -11067,7 +11067,7 @@ F:	include/linux/mmc/sh_mobile_sdhi.h
- 
- TMP401 HARDWARE MONITOR DRIVER
- M:	Guenter Roeck <linux@roeck-us.net>
--L:	lm-sensors@lm-sensors.org
-+L:	linux-hwmon@vger.kernel.org
- S:	Maintained
- F:	Documentation/hwmon/tmp401
- F:	drivers/hwmon/tmp401.c
-@@ -11812,14 +11812,14 @@ F:	Documentation/networking/vrf.txt
- 
- VT1211 HARDWARE MONITOR DRIVER
- M:	Juerg Haefliger <juergh@gmail.com>
--L:	lm-sensors@lm-sensors.org
-+L:	linux-hwmon@vger.kernel.org
- S:	Maintained
- F:	Documentation/hwmon/vt1211
- F:	drivers/hwmon/vt1211.c
- 
- VT8231 HARDWARE MONITOR DRIVER
- M:	Roger Lucas <vt8231@hiddenengine.co.uk>
--L:	lm-sensors@lm-sensors.org
-+L:	linux-hwmon@vger.kernel.org
- S:	Maintained
- F:	drivers/hwmon/vt8231.c
- 
-@@ -11838,21 +11838,21 @@ F:	drivers/w1/
- 
- W83791D HARDWARE MONITORING DRIVER
- M:	Marc Hulsman <m.hulsman@tudelft.nl>
--L:	lm-sensors@lm-sensors.org
-+L:	linux-hwmon@vger.kernel.org
- S:	Maintained
- F:	Documentation/hwmon/w83791d
- F:	drivers/hwmon/w83791d.c
- 
- W83793 HARDWARE MONITORING DRIVER
- M:	Rudolf Marek <r.marek@assembler.cz>
--L:	lm-sensors@lm-sensors.org
-+L:	linux-hwmon@vger.kernel.org
- S:	Maintained
- F:	Documentation/hwmon/w83793
- F:	drivers/hwmon/w83793.c
- 
- W83795 HARDWARE MONITORING DRIVER
- M:	Jean Delvare <jdelvare@suse.com>
--L:	lm-sensors@lm-sensors.org
-+L:	linux-hwmon@vger.kernel.org
- S:	Maintained
- F:	drivers/hwmon/w83795.c
- 
 diff --git a/Makefile b/Makefile
-index 7b3ecdcdc6c1..07a1786f695a 100644
+index a23df411d393..07a1786f695a 100644
 --- a/Makefile
 +++ b/Makefile
 @@ -1,6 +1,6 @@
  VERSION = 4
  PATCHLEVEL = 5
--SUBLEVEL = 0
+-SUBLEVEL = 5
 +SUBLEVEL = 6
  EXTRAVERSION =
  NAME = Blurry Fish Butt
@@ -521,294 +37,6 @@ index 7b3ecdcdc6c1..07a1786f695a 100644
  endif
  
  ifdef CONFIG_FRAME_POINTER
-diff --git a/arch/arc/boot/dts/axs10x_mb.dtsi b/arch/arc/boot/dts/axs10x_mb.dtsi
-index 44a578c10732..ab5d5701e11d 100644
---- a/arch/arc/boot/dts/axs10x_mb.dtsi
-+++ b/arch/arc/boot/dts/axs10x_mb.dtsi
-@@ -47,6 +47,14 @@
- 			clocks = <&apbclk>;
- 			clock-names = "stmmaceth";
- 			max-speed = <100>;
-+			mdio0 {
-+				#address-cells = <1>;
-+				#size-cells = <0>;
-+				compatible = "snps,dwmac-mdio";
-+				phy1: ethernet-phy@1 {
-+					reg = <1>;
-+				};
-+			};
- 		};
- 
- 		ehci@0x40000 {
-diff --git a/arch/arc/include/asm/bitops.h b/arch/arc/include/asm/bitops.h
-index 57c1f33844d4..0352fb8d21b9 100644
---- a/arch/arc/include/asm/bitops.h
-+++ b/arch/arc/include/asm/bitops.h
-@@ -35,21 +35,6 @@ static inline void op##_bit(unsigned long nr, volatile unsigned long *m)\
- 									\
- 	m += nr >> 5;							\
- 									\
--	/*								\
--	 * ARC ISA micro-optimization:					\
--	 *								\
--	 * Instructions dealing with bitpos only consider lower 5 bits	\
--	 * e.g (x << 33) is handled like (x << 1) by ASL instruction	\
--	 *  (mem pointer still needs adjustment to point to next word)	\
--	 *								\
--	 * Hence the masking to clamp @nr arg can be elided in general.	\
--	 *								\
--	 * However if @nr is a constant (above assumed in a register),	\
--	 * and greater than 31, gcc can optimize away (x << 33) to 0,	\
--	 * as overflow, given the 32-bit ISA. Thus masking needs to be	\
--	 * done for const @nr, but no code is generated due to gcc	\
--	 * const prop.							\
--	 */								\
- 	nr &= 0x1f;							\
- 									\
- 	__asm__ __volatile__(						\
-diff --git a/arch/arc/include/asm/io.h b/arch/arc/include/asm/io.h
-index 694ece8a0243..cb69299a492e 100644
---- a/arch/arc/include/asm/io.h
-+++ b/arch/arc/include/asm/io.h
-@@ -13,6 +13,15 @@
- #include <asm/byteorder.h>
- #include <asm/page.h>
- 
-+#ifdef CONFIG_ISA_ARCV2
-+#include <asm/barrier.h>
-+#define __iormb()		rmb()
-+#define __iowmb()		wmb()
-+#else
-+#define __iormb()		do { } while (0)
-+#define __iowmb()		do { } while (0)
-+#endif
-+
- extern void __iomem *ioremap(unsigned long physaddr, unsigned long size);
- extern void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size,
- 				  unsigned long flags);
-@@ -22,6 +31,15 @@ extern void iounmap(const void __iomem *addr);
- #define ioremap_wc(phy, sz)		ioremap(phy, sz)
- #define ioremap_wt(phy, sz)		ioremap(phy, sz)
- 
-+/*
-+ * io{read,write}{16,32}be() macros
-+ */
-+#define ioread16be(p)		({ u16 __v = be16_to_cpu((__force __be16)__raw_readw(p)); __iormb(); __v; })
-+#define ioread32be(p)		({ u32 __v = be32_to_cpu((__force __be32)__raw_readl(p)); __iormb(); __v; })
-+
-+#define iowrite16be(v,p)	({ __iowmb(); __raw_writew((__force u16)cpu_to_be16(v), p); })
-+#define iowrite32be(v,p)	({ __iowmb(); __raw_writel((__force u32)cpu_to_be32(v), p); })
-+
- /* Change struct page to physical address */
- #define page_to_phys(page)		(page_to_pfn(page) << PAGE_SHIFT)
- 
-@@ -99,15 +117,6 @@ static inline void __raw_writel(u32 w, volatile void __iomem *addr)
- 
- }
- 
--#ifdef CONFIG_ISA_ARCV2
--#include <asm/barrier.h>
--#define __iormb()		rmb()
--#define __iowmb()		wmb()
--#else
--#define __iormb()		do { } while (0)
--#define __iowmb()		do { } while (0)
--#endif
--
- /*
-  * MMIO can also get buffered/optimized in micro-arch, so barriers needed
-  * Based on ARM model for the typical use case
-@@ -129,15 +138,23 @@ static inline void __raw_writel(u32 w, volatile void __iomem *addr)
- #define writel(v,c)		({ __iowmb(); writel_relaxed(v,c); })
- 
- /*
-- * Relaxed API for drivers which can handle any ordering themselves
-+ * Relaxed API for drivers which can handle barrier ordering themselves
-+ *
-+ * Also these are defined to perform little endian accesses.
-+ * To provide the typical device register semantics of fixed endian,
-+ * swap the byte order for Big Endian
-+ *
-+ * http://lkml.kernel.org/r/201603100845.30602.arnd@arndb.de
-  */
- #define readb_relaxed(c)	__raw_readb(c)
--#define readw_relaxed(c)	__raw_readw(c)
--#define readl_relaxed(c)	__raw_readl(c)
-+#define readw_relaxed(c) ({ u16 __r = le16_to_cpu((__force __le16) \
-+					__raw_readw(c)); __r; })
-+#define readl_relaxed(c) ({ u32 __r = le32_to_cpu((__force __le32) \
-+					__raw_readl(c)); __r; })
- 
- #define writeb_relaxed(v,c)	__raw_writeb(v,c)
--#define writew_relaxed(v,c)	__raw_writew(v,c)
--#define writel_relaxed(v,c)	__raw_writel(v,c)
-+#define writew_relaxed(v,c)	__raw_writew((__force u16) cpu_to_le16(v),c)
-+#define writel_relaxed(v,c)	__raw_writel((__force u32) cpu_to_le32(v),c)
- 
- #include <asm-generic/io.h>
- 
-diff --git a/arch/arm/boot/dts/am33xx.dtsi b/arch/arm/boot/dts/am33xx.dtsi
-index 1fafaad516ba..97471d62d5e4 100644
---- a/arch/arm/boot/dts/am33xx.dtsi
-+++ b/arch/arm/boot/dts/am33xx.dtsi
-@@ -860,7 +860,7 @@
- 			ti,no-idle-on-init;
- 			reg = <0x50000000 0x2000>;
- 			interrupts = <100>;
--			dmas = <&edma 52>;
-+			dmas = <&edma 52 0>;
- 			dma-names = "rxtx";
- 			gpmc,num-cs = <7>;
- 			gpmc,num-waitpins = <2>;
-diff --git a/arch/arm/boot/dts/am4372.dtsi b/arch/arm/boot/dts/am4372.dtsi
-index 92068fbf8b57..6bd38a28e26c 100644
---- a/arch/arm/boot/dts/am4372.dtsi
-+++ b/arch/arm/boot/dts/am4372.dtsi
-@@ -207,7 +207,7 @@
- 			ti,tptcs = <&edma_tptc0 7>, <&edma_tptc1 5>,
- 				   <&edma_tptc2 0>;
- 
--			ti,edma-memcpy-channels = <32 33>;
-+			ti,edma-memcpy-channels = <58 59>;
- 		};
- 
- 		edma_tptc0: tptc@49800000 {
-@@ -884,7 +884,7 @@
- 		gpmc: gpmc@50000000 {
- 			compatible = "ti,am3352-gpmc";
- 			ti,hwmods = "gpmc";
--			dmas = <&edma 52>;
-+			dmas = <&edma 52 0>;
- 			dma-names = "rxtx";
- 			clocks = <&l3s_gclk>;
- 			clock-names = "fck";
-diff --git a/arch/arm/boot/dts/am43x-epos-evm.dts b/arch/arm/boot/dts/am43x-epos-evm.dts
-index d580e2b70f9a..637dc5dbc8ac 100644
---- a/arch/arm/boot/dts/am43x-epos-evm.dts
-+++ b/arch/arm/boot/dts/am43x-epos-evm.dts
-@@ -792,3 +792,8 @@
- 	tx-num-evt = <32>;
- 	rx-num-evt = <32>;
- };
-+
-+&synctimer_32kclk {
-+	assigned-clocks = <&mux_synctimer32k_ck>;
-+	assigned-clock-parents = <&clkdiv32k_ick>;
-+};
-diff --git a/arch/arm/boot/dts/armada-375.dtsi b/arch/arm/boot/dts/armada-375.dtsi
-index 7ccce7529b0c..cc952cf8ec30 100644
---- a/arch/arm/boot/dts/armada-375.dtsi
-+++ b/arch/arm/boot/dts/armada-375.dtsi
-@@ -529,7 +529,7 @@
- 			};
- 
- 			sata@a0000 {
--				compatible = "marvell,orion-sata";
-+				compatible = "marvell,armada-370-sata";
- 				reg = <0xa0000 0x5000>;
- 				interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>;
- 				clocks = <&gateclk 14>, <&gateclk 20>;
-diff --git a/arch/arm/boot/dts/armada-385-linksys.dtsi b/arch/arm/boot/dts/armada-385-linksys.dtsi
-index 3710755c6d76..85d2c377c332 100644
---- a/arch/arm/boot/dts/armada-385-linksys.dtsi
-+++ b/arch/arm/boot/dts/armada-385-linksys.dtsi
-@@ -117,7 +117,7 @@
- 			};
- 
- 			/* USB part of the eSATA/USB 2.0 port */
--			usb@50000 {
-+			usb@58000 {
- 				status = "okay";
- 			};
- 
-diff --git a/arch/arm/boot/dts/at91-sama5d3_xplained.dts b/arch/arm/boot/dts/at91-sama5d3_xplained.dts
-index ff888d21c786..f3e2b96c06a3 100644
---- a/arch/arm/boot/dts/at91-sama5d3_xplained.dts
-+++ b/arch/arm/boot/dts/at91-sama5d3_xplained.dts
-@@ -303,6 +303,7 @@
- 		regulator-name = "mmc0-card-supply";
- 		regulator-min-microvolt = <3300000>;
- 		regulator-max-microvolt = <3300000>;
-+		regulator-always-on;
- 	};
- 
- 	gpio_keys {
-diff --git a/arch/arm/boot/dts/at91-sama5d4_xplained.dts b/arch/arm/boot/dts/at91-sama5d4_xplained.dts
-index 569026e8f96c..da84e65b56ef 100644
---- a/arch/arm/boot/dts/at91-sama5d4_xplained.dts
-+++ b/arch/arm/boot/dts/at91-sama5d4_xplained.dts
-@@ -268,5 +268,6 @@
- 		regulator-min-microvolt = <3300000>;
- 		regulator-max-microvolt = <3300000>;
- 		vin-supply = <&vcc_3v3_reg>;
-+		regulator-always-on;
- 	};
- };
-diff --git a/arch/arm/boot/dts/at91sam9x5.dtsi b/arch/arm/boot/dts/at91sam9x5.dtsi
-index 0827d594b1f0..cd0cd5fd09a3 100644
---- a/arch/arm/boot/dts/at91sam9x5.dtsi
-+++ b/arch/arm/boot/dts/at91sam9x5.dtsi
-@@ -106,7 +106,7 @@
- 
- 			pmc: pmc@fffffc00 {
- 				compatible = "atmel,at91sam9x5-pmc", "syscon";
--				reg = <0xfffffc00 0x100>;
-+				reg = <0xfffffc00 0x200>;
- 				interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
- 				interrupt-controller;
- 				#address-cells = <1>;
-diff --git a/arch/arm/boot/dts/pxa3xx.dtsi b/arch/arm/boot/dts/pxa3xx.dtsi
-index cf6998a0804d..564341af7e97 100644
---- a/arch/arm/boot/dts/pxa3xx.dtsi
-+++ b/arch/arm/boot/dts/pxa3xx.dtsi
-@@ -30,7 +30,7 @@
- 			reg = <0x43100000 90>;
- 			interrupts = <45>;
- 			clocks = <&clks CLK_NAND>;
--			dmas = <&pdma 97>;
-+			dmas = <&pdma 97 3>;
- 			dma-names = "data";
- 			#address-cells = <1>;
- 			#size-cells = <1>;	
-diff --git a/arch/arm/boot/dts/qcom-apq8064.dtsi b/arch/arm/boot/dts/qcom-apq8064.dtsi
-index ed521e85e208..e8bc7e8bedd2 100644
---- a/arch/arm/boot/dts/qcom-apq8064.dtsi
-+++ b/arch/arm/boot/dts/qcom-apq8064.dtsi
-@@ -665,7 +665,7 @@
- 		};
- 
- 		sata0: sata@29000000 {
--			compatible		= "generic-ahci";
-+			compatible		= "qcom,apq8064-ahci", "generic-ahci";
- 			status			= "disabled";
- 			reg			= <0x29000000 0x180>;
- 			interrupts		= <GIC_SPI 209 IRQ_TYPE_NONE>;
-@@ -687,6 +687,7 @@
- 
- 			phys			= <&sata_phy0>;
- 			phy-names		= "sata-phy";
-+			ports-implemented	= <0x1>;
- 		};
- 
- 		/* Temporary fixed regulator */
-diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
-index 7d0cba6f1cc5..c86ea8aac203 100644
---- a/arch/arm/kernel/setup.c
-+++ b/arch/arm/kernel/setup.c
-@@ -430,11 +430,13 @@ static void __init patch_aeabi_idiv(void)
- 	pr_info("CPU: div instructions available: patching division code\n");
- 
- 	fn_addr = ((uintptr_t)&__aeabi_uidiv) & ~1;
-+	asm ("" : "+g" (fn_addr));
- 	((u32 *)fn_addr)[0] = udiv_instruction();
- 	((u32 *)fn_addr)[1] = bx_lr_instruction();
- 	flush_icache_range(fn_addr, fn_addr + 8);
- 
- 	fn_addr = ((uintptr_t)&__aeabi_idiv) & ~1;
-+	asm ("" : "+g" (fn_addr));
- 	((u32 *)fn_addr)[0] = sdiv_instruction();
- 	((u32 *)fn_addr)[1] = bx_lr_instruction();
- 	flush_icache_range(fn_addr, fn_addr + 8);
 diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
 index aba61fd3697a..88323ffb1ee0 100644
 --- a/arch/arm/kvm/mmu.c
@@ -849,216 +77,6 @@ index aba61fd3697a..88323ffb1ee0 100644
  	return 0;
  }
  
-diff --git a/arch/arm/mach-cns3xxx/pcie.c b/arch/arm/mach-cns3xxx/pcie.c
-index 47905a50e075..318394ed5c7a 100644
---- a/arch/arm/mach-cns3xxx/pcie.c
-+++ b/arch/arm/mach-cns3xxx/pcie.c
-@@ -220,13 +220,13 @@ static void cns3xxx_write_config(struct cns3xxx_pcie *cnspci,
- 	u32 mask = (0x1ull << (size * 8)) - 1;
- 	int shift = (where % 4) * 8;
- 
--	v = readl_relaxed(base + (where & 0xffc));
-+	v = readl_relaxed(base);
- 
- 	v &= ~(mask << shift);
- 	v |= (val & mask) << shift;
- 
--	writel_relaxed(v, base + (where & 0xffc));
--	readl_relaxed(base + (where & 0xffc));
-+	writel_relaxed(v, base);
-+	readl_relaxed(base);
- }
- 
- static void __init cns3xxx_pcie_hw_init(struct cns3xxx_pcie *cnspci)
-diff --git a/arch/arm/mach-exynos/Kconfig b/arch/arm/mach-exynos/Kconfig
-index 652a0bb11578..5189bcecad12 100644
---- a/arch/arm/mach-exynos/Kconfig
-+++ b/arch/arm/mach-exynos/Kconfig
-@@ -27,6 +27,7 @@ menuconfig ARCH_EXYNOS
- 	select S5P_DEV_MFC
- 	select SRAM
- 	select THERMAL
-+	select THERMAL_OF
- 	select MFD_SYSCON
- 	select CLKSRC_EXYNOS_MCT
- 	select POWER_RESET
-diff --git a/arch/arm/mach-exynos/pm_domains.c b/arch/arm/mach-exynos/pm_domains.c
-index 7c21760f590f..875a2bab64f6 100644
---- a/arch/arm/mach-exynos/pm_domains.c
-+++ b/arch/arm/mach-exynos/pm_domains.c
-@@ -92,7 +92,7 @@ static int exynos_pd_power(struct generic_pm_domain *domain, bool power_on)
- 			if (IS_ERR(pd->clk[i]))
- 				break;
- 
--			if (IS_ERR(pd->clk[i]))
-+			if (IS_ERR(pd->pclk[i]))
- 				continue; /* Skip on first power up */
- 			if (clk_set_parent(pd->clk[i], pd->pclk[i]))
- 				pr_err("%s: error setting parent to clock%d\n",
-diff --git a/arch/arm/mach-omap2/cpuidle34xx.c b/arch/arm/mach-omap2/cpuidle34xx.c
-index aa7b379e2661..2a3db0bd9e15 100644
---- a/arch/arm/mach-omap2/cpuidle34xx.c
-+++ b/arch/arm/mach-omap2/cpuidle34xx.c
-@@ -34,6 +34,7 @@
- #include "pm.h"
- #include "control.h"
- #include "common.h"
-+#include "soc.h"
- 
- /* Mach specific information to be recorded in the C-state driver_data */
- struct omap3_idle_statedata {
-@@ -315,6 +316,69 @@ static struct cpuidle_driver omap3_idle_driver = {
- 	.safe_state_index = 0,
- };
- 
-+/*
-+ * Numbers based on measurements made in October 2009 for PM optimized kernel
-+ * with CPU freq enabled on device Nokia N900. Assumes OPP2 (main idle OPP,
-+ * and worst case latencies).
-+ */
-+static struct cpuidle_driver omap3430_idle_driver = {
-+	.name             = "omap3430_idle",
-+	.owner            = THIS_MODULE,
-+	.states = {
-+		{
-+			.enter		  = omap3_enter_idle_bm,
-+			.exit_latency	  = 110 + 162,
-+			.target_residency = 5,
-+			.name		  = "C1",
-+			.desc		  = "MPU ON + CORE ON",
-+		},
-+		{
-+			.enter		  = omap3_enter_idle_bm,
-+			.exit_latency	  = 106 + 180,
-+			.target_residency = 309,
-+			.name		  = "C2",
-+			.desc		  = "MPU ON + CORE ON",
-+		},
-+		{
-+			.enter		  = omap3_enter_idle_bm,
-+			.exit_latency	  = 107 + 410,
-+			.target_residency = 46057,
-+			.name		  = "C3",
-+			.desc		  = "MPU RET + CORE ON",
-+		},
-+		{
-+			.enter		  = omap3_enter_idle_bm,
-+			.exit_latency	  = 121 + 3374,
-+			.target_residency = 46057,
-+			.name		  = "C4",
-+			.desc		  = "MPU OFF + CORE ON",
-+		},
-+		{
-+			.enter		  = omap3_enter_idle_bm,
-+			.exit_latency	  = 855 + 1146,
-+			.target_residency = 46057,
-+			.name		  = "C5",
-+			.desc		  = "MPU RET + CORE RET",
-+		},
-+		{
-+			.enter		  = omap3_enter_idle_bm,
-+			.exit_latency	  = 7580 + 4134,
-+			.target_residency = 484329,
-+			.name		  = "C6",
-+			.desc		  = "MPU OFF + CORE RET",
-+		},
-+		{
-+			.enter		  = omap3_enter_idle_bm,
-+			.exit_latency	  = 7505 + 15274,
-+			.target_residency = 484329,
-+			.name		  = "C7",
-+			.desc		  = "MPU OFF + CORE OFF",
-+		},
-+	},
-+	.state_count = ARRAY_SIZE(omap3_idle_data),
-+	.safe_state_index = 0,
-+};
-+
- /* Public functions */
- 
- /**
-@@ -333,5 +397,8 @@ int __init omap3_idle_init(void)
- 	if (!mpu_pd || !core_pd || !per_pd || !cam_pd)
- 		return -ENODEV;
- 
--	return cpuidle_register(&omap3_idle_driver, NULL);
-+	if (cpu_is_omap3430())
-+		return cpuidle_register(&omap3430_idle_driver, NULL);
-+	else
-+		return cpuidle_register(&omap3_idle_driver, NULL);
- }
-diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c
-index 3c87e40650cf..9821be6dfd5e 100644
---- a/arch/arm/mach-omap2/io.c
-+++ b/arch/arm/mach-omap2/io.c
-@@ -368,6 +368,7 @@ void __init omap5_map_io(void)
- void __init dra7xx_map_io(void)
- {
- 	iotable_init(dra7xx_io_desc, ARRAY_SIZE(dra7xx_io_desc));
-+	omap_barriers_init();
- }
- #endif
- /*
-diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
-index b6d62e4cdfdd..2af6ff63e3b4 100644
---- a/arch/arm/mach-omap2/omap_hwmod.c
-+++ b/arch/arm/mach-omap2/omap_hwmod.c
-@@ -1416,9 +1416,7 @@ static void _enable_sysc(struct omap_hwmod *oh)
- 	    (sf & SYSC_HAS_CLOCKACTIVITY))
- 		_set_clockactivity(oh, oh->class->sysc->clockact, &v);
- 
--	/* If the cached value is the same as the new value, skip the write */
--	if (oh->_sysc_cache != v)
--		_write_sysconfig(v, oh);
-+	_write_sysconfig(v, oh);
- 
- 	/*
- 	 * Set the autoidle bit only after setting the smartidle bit
-@@ -1481,7 +1479,9 @@ static void _idle_sysc(struct omap_hwmod *oh)
- 		_set_master_standbymode(oh, idlemode, &v);
- 	}
- 
--	_write_sysconfig(v, oh);
-+	/* If the cached value is the same as the new value, skip the write */
-+	if (oh->_sysc_cache != v)
-+		_write_sysconfig(v, oh);
- }
- 
- /**
-diff --git a/arch/arm/mach-prima2/Kconfig b/arch/arm/mach-prima2/Kconfig
-index f998eb1c698e..0cf4426183cf 100644
---- a/arch/arm/mach-prima2/Kconfig
-+++ b/arch/arm/mach-prima2/Kconfig
-@@ -2,6 +2,7 @@ menuconfig ARCH_SIRF
- 	bool "CSR SiRF"
- 	depends on ARCH_MULTI_V7
- 	select ARCH_HAS_RESET_CONTROLLER
-+	select RESET_CONTROLLER
- 	select ARCH_REQUIRE_GPIOLIB
- 	select GENERIC_IRQ_CHIP
- 	select NO_IOPORT_MAP
-diff --git a/arch/arm/mach-socfpga/headsmp.S b/arch/arm/mach-socfpga/headsmp.S
-index 5d94b7a2fb10..c160fa3007e9 100644
---- a/arch/arm/mach-socfpga/headsmp.S
-+++ b/arch/arm/mach-socfpga/headsmp.S
-@@ -13,6 +13,7 @@
- #include <asm/assembler.h>
- 
- 	.arch	armv7-a
-+	.arm
- 
- ENTRY(secondary_trampoline)
- 	/* CPU1 will always fetch from 0x0 when it is brought out of reset.
-diff --git a/arch/arm64/include/asm/opcodes.h b/arch/arm64/include/asm/opcodes.h
-index 4e603ea36ad3..123f45d92cd1 100644
---- a/arch/arm64/include/asm/opcodes.h
-+++ b/arch/arm64/include/asm/opcodes.h
-@@ -1 +1,5 @@
-+#ifdef CONFIG_CPU_BIG_ENDIAN
-+#define CONFIG_CPU_ENDIAN_BE8 CONFIG_CPU_BIG_ENDIAN
-+#endif
-+
- #include <../../arm/include/asm/opcodes.h>
 diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h
 index 5c25b831273d..9786f770088d 100644
 --- a/arch/arm64/include/asm/pgtable-hwdef.h
@@ -1072,18 +90,9 @@ index 5c25b831273d..9786f770088d 100644
  #define PMD_SECT_RDONLY		(_AT(pmdval_t, 1) << 7)		/* AP[2] */
  #define PMD_SECT_S		(_AT(pmdval_t, 3) << 8)
 diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
-index 819aff5d593f..137d3bf88e2a 100644
+index 7273210782d5..137d3bf88e2a 100644
 --- a/arch/arm64/include/asm/pgtable.h
 +++ b/arch/arm64/include/asm/pgtable.h
-@@ -279,7 +279,7 @@ extern void __sync_icache_dcache(pte_t pteval, unsigned long addr);
- static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
- 			      pte_t *ptep, pte_t pte)
- {
--	if (pte_valid(pte)) {
-+	if (pte_present(pte)) {
- 		if (pte_sw_dirty(pte) && pte_write(pte))
- 			pte_val(pte) &= ~PTE_RDONLY;
- 		else
 @@ -356,6 +356,7 @@ static inline pgprot_t mk_sect_prot(pgprot_t prot)
  #define pmd_trans_huge(pmd)	(pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT))
  #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
@@ -1144,14 +153,6 @@ index 819aff5d593f..137d3bf88e2a 100644
  {
  	return pte_pmd(ptep_get_and_clear(mm, address, (pte_t *)pmdp));
  }
-@@ -649,6 +664,7 @@ extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
-  *	bits 0-1:	present (must be zero)
-  *	bits 2-7:	swap type
-  *	bits 8-57:	swap offset
-+ *	bit  58:	PTE_PROT_NONE (must be zero)
-  */
- #define __SWP_TYPE_SHIFT	2
- #define __SWP_TYPE_BITS		6
 diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c
 index 212ae6361d8b..a5f234039616 100644
 --- a/arch/arm64/kernel/cpuinfo.c
@@ -1166,58 +167,6 @@ index 212ae6361d8b..a5f234039616 100644
  };
  
  static const char *const compat_hwcap2_str[] = {
-diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
-index c536c9e307b9..0931155f6406 100644
---- a/arch/arm64/kernel/debug-monitors.c
-+++ b/arch/arm64/kernel/debug-monitors.c
-@@ -186,20 +186,21 @@ static void clear_regs_spsr_ss(struct pt_regs *regs)
- 
- /* EL1 Single Step Handler hooks */
- static LIST_HEAD(step_hook);
--static DEFINE_RWLOCK(step_hook_lock);
-+static DEFINE_SPINLOCK(step_hook_lock);
- 
- void register_step_hook(struct step_hook *hook)
- {
--	write_lock(&step_hook_lock);
--	list_add(&hook->node, &step_hook);
--	write_unlock(&step_hook_lock);
-+	spin_lock(&step_hook_lock);
-+	list_add_rcu(&hook->node, &step_hook);
-+	spin_unlock(&step_hook_lock);
- }
- 
- void unregister_step_hook(struct step_hook *hook)
- {
--	write_lock(&step_hook_lock);
--	list_del(&hook->node);
--	write_unlock(&step_hook_lock);
-+	spin_lock(&step_hook_lock);
-+	list_del_rcu(&hook->node);
-+	spin_unlock(&step_hook_lock);
-+	synchronize_rcu();
- }
- 
- /*
-@@ -213,15 +214,15 @@ static int call_step_hook(struct pt_regs *regs, unsigned int esr)
- 	struct step_hook *hook;
- 	int retval = DBG_HOOK_ERROR;
- 
--	read_lock(&step_hook_lock);
-+	rcu_read_lock();
- 
--	list_for_each_entry(hook, &step_hook, node)	{
-+	list_for_each_entry_rcu(hook, &step_hook, node)	{
- 		retval = hook->fn(regs, esr);
- 		if (retval == DBG_HOOK_HANDLED)
- 			break;
- 	}
- 
--	read_unlock(&step_hook_lock);
-+	rcu_read_unlock();
- 
- 	return retval;
- }
 diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c
 index 4d1ac81870d2..e9e0e6db73f6 100644
 --- a/arch/arm64/kvm/inject_fault.c
@@ -1292,108 +241,6 @@ index abe2a9542b3a..a26e3acea6a9 100644
  /*
   * The kernel tried to access some page that wasn't present.
   */
-diff --git a/arch/ia64/include/asm/io.h b/arch/ia64/include/asm/io.h
-index a865d2a04f75..5de673ac9cb1 100644
---- a/arch/ia64/include/asm/io.h
-+++ b/arch/ia64/include/asm/io.h
-@@ -433,6 +433,7 @@ static inline void __iomem * ioremap_cache (unsigned long phys_addr, unsigned lo
- 	return ioremap(phys_addr, size);
- }
- #define ioremap_cache ioremap_cache
-+#define ioremap_uc ioremap_nocache
- 
- 
- /*
-diff --git a/arch/mips/alchemy/devboards/db1000.c b/arch/mips/alchemy/devboards/db1000.c
-index bdeed9d13c6f..433c4b9a9f0a 100644
---- a/arch/mips/alchemy/devboards/db1000.c
-+++ b/arch/mips/alchemy/devboards/db1000.c
-@@ -503,15 +503,15 @@ int __init db1000_dev_setup(void)
- 	if (board == BCSR_WHOAMI_DB1500) {
- 		c0 = AU1500_GPIO2_INT;
- 		c1 = AU1500_GPIO5_INT;
--		d0 = AU1500_GPIO0_INT;
--		d1 = AU1500_GPIO3_INT;
-+		d0 = 0;	/* GPIO number, NOT irq! */
-+		d1 = 3; /* GPIO number, NOT irq! */
- 		s0 = AU1500_GPIO1_INT;
- 		s1 = AU1500_GPIO4_INT;
- 	} else if (board == BCSR_WHOAMI_DB1100) {
- 		c0 = AU1100_GPIO2_INT;
- 		c1 = AU1100_GPIO5_INT;
--		d0 = AU1100_GPIO0_INT;
--		d1 = AU1100_GPIO3_INT;
-+		d0 = 0; /* GPIO number, NOT irq! */
-+		d1 = 3; /* GPIO number, NOT irq! */
- 		s0 = AU1100_GPIO1_INT;
- 		s1 = AU1100_GPIO4_INT;
- 
-@@ -545,15 +545,15 @@ int __init db1000_dev_setup(void)
- 	} else if (board == BCSR_WHOAMI_DB1000) {
- 		c0 = AU1000_GPIO2_INT;
- 		c1 = AU1000_GPIO5_INT;
--		d0 = AU1000_GPIO0_INT;
--		d1 = AU1000_GPIO3_INT;
-+		d0 = 0; /* GPIO number, NOT irq! */
-+		d1 = 3; /* GPIO number, NOT irq! */
- 		s0 = AU1000_GPIO1_INT;
- 		s1 = AU1000_GPIO4_INT;
- 		platform_add_devices(db1000_devs, ARRAY_SIZE(db1000_devs));
- 	} else if ((board == BCSR_WHOAMI_PB1500) ||
- 		   (board == BCSR_WHOAMI_PB1500R2)) {
- 		c0 = AU1500_GPIO203_INT;
--		d0 = AU1500_GPIO201_INT;
-+		d0 = 1; /* GPIO number, NOT irq! */
- 		s0 = AU1500_GPIO202_INT;
- 		twosocks = 0;
- 		flashsize = 64;
-@@ -566,7 +566,7 @@ int __init db1000_dev_setup(void)
- 		 */
- 	} else if (board == BCSR_WHOAMI_PB1100) {
- 		c0 = AU1100_GPIO11_INT;
--		d0 = AU1100_GPIO9_INT;
-+		d0 = 9; /* GPIO number, NOT irq! */
- 		s0 = AU1100_GPIO10_INT;
- 		twosocks = 0;
- 		flashsize = 64;
-@@ -583,7 +583,6 @@ int __init db1000_dev_setup(void)
- 	} else
- 		return 0; /* unknown board, no further dev setup to do */
- 
--	irq_set_irq_type(d0, IRQ_TYPE_EDGE_BOTH);
- 	irq_set_irq_type(c0, IRQ_TYPE_LEVEL_LOW);
- 	irq_set_irq_type(s0, IRQ_TYPE_LEVEL_LOW);
- 
-@@ -597,7 +596,6 @@ int __init db1000_dev_setup(void)
- 		c0, d0, /*s0*/0, 0, 0);
- 
- 	if (twosocks) {
--		irq_set_irq_type(d1, IRQ_TYPE_EDGE_BOTH);
- 		irq_set_irq_type(c1, IRQ_TYPE_LEVEL_LOW);
- 		irq_set_irq_type(s1, IRQ_TYPE_LEVEL_LOW);
- 
-diff --git a/arch/mips/alchemy/devboards/db1550.c b/arch/mips/alchemy/devboards/db1550.c
-index b518f029f5e7..1c01d6eadb08 100644
---- a/arch/mips/alchemy/devboards/db1550.c
-+++ b/arch/mips/alchemy/devboards/db1550.c
-@@ -514,7 +514,7 @@ static void __init db1550_devices(void)
- 		AU1000_PCMCIA_MEM_PHYS_ADDR  + 0x000400000 - 1,
- 		AU1000_PCMCIA_IO_PHYS_ADDR,
- 		AU1000_PCMCIA_IO_PHYS_ADDR   + 0x000010000 - 1,
--		AU1550_GPIO3_INT, AU1550_GPIO0_INT,
-+		AU1550_GPIO3_INT, 0,
- 		/*AU1550_GPIO21_INT*/0, 0, 0);
- 
- 	db1x_register_pcmcia_socket(
-@@ -524,7 +524,7 @@ static void __init db1550_devices(void)
- 		AU1000_PCMCIA_MEM_PHYS_ADDR  + 0x004400000 - 1,
- 		AU1000_PCMCIA_IO_PHYS_ADDR   + 0x004000000,
- 		AU1000_PCMCIA_IO_PHYS_ADDR   + 0x004010000 - 1,
--		AU1550_GPIO5_INT, AU1550_GPIO1_INT,
-+		AU1550_GPIO5_INT, 1,
- 		/*AU1550_GPIO22_INT*/0, 0, 1);
- 
- 	platform_device_register(&db1550_nand_dev);
 diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
 index f6b12790716c..942b8f6bf35b 100644
 --- a/arch/mips/include/asm/kvm_host.h
@@ -1407,76 +254,6 @@ index f6b12790716c..942b8f6bf35b 100644
  void kvm_mips_init_count(struct kvm_vcpu *vcpu);
  int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl);
  int kvm_mips_set_count_resume(struct kvm_vcpu *vcpu, s64 count_resume);
-diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c
-index 490cea569d57..5c62065cbf22 100644
---- a/arch/mips/kernel/unaligned.c
-+++ b/arch/mips/kernel/unaligned.c
-@@ -885,7 +885,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
- {
- 	union mips_instruction insn;
- 	unsigned long value;
--	unsigned int res;
-+	unsigned int res, preempted;
- 	unsigned long origpc;
- 	unsigned long orig31;
- 	void __user *fault_addr = NULL;
-@@ -1226,27 +1226,36 @@ static void emulate_load_store_insn(struct pt_regs *regs,
- 			if (!access_ok(VERIFY_READ, addr, sizeof(*fpr)))
- 				goto sigbus;
- 
--			/*
--			 * Disable preemption to avoid a race between copying
--			 * state from userland, migrating to another CPU and
--			 * updating the hardware vector register below.
--			 */
--			preempt_disable();
--
--			res = __copy_from_user_inatomic(fpr, addr,
--							sizeof(*fpr));
--			if (res)
--				goto fault;
--
--			/*
--			 * Update the hardware register if it is in use by the
--			 * task in this quantum, in order to avoid having to
--			 * save & restore the whole vector context.
--			 */
--			if (test_thread_flag(TIF_USEDMSA))
--				write_msa_wr(wd, fpr, df);
-+			do {
-+				/*
-+				 * If we have live MSA context keep track of
-+				 * whether we get preempted in order to avoid
-+				 * the register context we load being clobbered
-+				 * by the live context as it's saved during
-+				 * preemption. If we don't have live context
-+				 * then it can't be saved to clobber the value
-+				 * we load.
-+				 */
-+				preempted = test_thread_flag(TIF_USEDMSA);
-+
-+				res = __copy_from_user_inatomic(fpr, addr,
-+								sizeof(*fpr));
-+				if (res)
-+					goto fault;
- 
--			preempt_enable();
-+				/*
-+				 * Update the hardware register if it is in use
-+				 * by the task in this quantum, in order to
-+				 * avoid having to save & restore the whole
-+				 * vector context.
-+				 */
-+				preempt_disable();
-+				if (test_thread_flag(TIF_USEDMSA)) {
-+					write_msa_wr(wd, fpr, df);
-+					preempted = 0;
-+				}
-+				preempt_enable();
-+			} while (preempted);
- 			break;
- 
- 		case msa_st_op:
 diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c
 index b37954cc880d..b8b7860ec1a8 100644
 --- a/arch/mips/kvm/emulate.c
@@ -1633,29432 +410,3431 @@ index ad988000563f..1664589d4746 100644
  		break;
  	case KVM_REG_MIPS_CP0_CAUSE:
  		/*
-diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
-index 14f655cf542e..86ed37671ef5 100644
---- a/arch/parisc/Kconfig
-+++ b/arch/parisc/Kconfig
-@@ -29,6 +29,7 @@ config PARISC
- 	select TTY # Needed for pdc_cons.c
- 	select HAVE_DEBUG_STACKOVERFLOW
- 	select HAVE_ARCH_AUDITSYSCALL
-+	select HAVE_ARCH_SECCOMP_FILTER
- 	select ARCH_NO_COHERENT_DMA_MMAP
- 
- 	help
-diff --git a/arch/parisc/include/asm/compat.h b/arch/parisc/include/asm/compat.h
-index 0448a2c8eafb..3387307cc33e 100644
---- a/arch/parisc/include/asm/compat.h
-+++ b/arch/parisc/include/asm/compat.h
-@@ -183,6 +183,13 @@ typedef struct compat_siginfo {
- 			int _band;      /* POLL_IN, POLL_OUT, POLL_MSG */
- 			int _fd;
- 		} _sigpoll;
-+
-+		/* SIGSYS */
-+		struct {
-+			compat_uptr_t _call_addr; /* calling user insn */
-+			int _syscall;	/* triggering system call number */
-+			compat_uint_t _arch;	/* AUDIT_ARCH_* of syscall */
-+		} _sigsys;
- 	} _sifields;
- } compat_siginfo_t;
- 
-diff --git a/arch/parisc/include/asm/syscall.h b/arch/parisc/include/asm/syscall.h
-index a5eba95d87fe..637ce8d6f375 100644
---- a/arch/parisc/include/asm/syscall.h
-+++ b/arch/parisc/include/asm/syscall.h
-@@ -39,6 +39,19 @@ static inline void syscall_get_arguments(struct task_struct *tsk,
+diff --git a/arch/x86/kernel/cpu/perf_event_intel_pt.c b/arch/x86/kernel/cpu/perf_event_intel_pt.c
+index c0bbd1033b7c..a5286d0bbb43 100644
+--- a/arch/x86/kernel/cpu/perf_event_intel_pt.c
++++ b/arch/x86/kernel/cpu/perf_event_intel_pt.c
+@@ -695,6 +695,7 @@ static int pt_buffer_reset_markers(struct pt_buffer *buf,
+ 
+ 	/* clear STOP and INT from current entry */
+ 	buf->topa_index[buf->stop_pos]->stop = 0;
++	buf->topa_index[buf->stop_pos]->intr = 0;
+ 	buf->topa_index[buf->intr_pos]->intr = 0;
+ 
+ 	/* how many pages till the STOP marker */
+@@ -719,6 +720,7 @@ static int pt_buffer_reset_markers(struct pt_buffer *buf,
+ 	buf->intr_pos = idx;
+ 
+ 	buf->topa_index[buf->stop_pos]->stop = 1;
++	buf->topa_index[buf->stop_pos]->intr = 1;
+ 	buf->topa_index[buf->intr_pos]->intr = 1;
+ 
+ 	return 0;
+diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
+index 6525e926f566..2e1fd586b895 100644
+--- a/arch/x86/kvm/cpuid.c
++++ b/arch/x86/kvm/cpuid.c
+@@ -509,6 +509,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
+ 			do_cpuid_1_ent(&entry[i], function, idx);
+ 			if (idx == 1) {
+ 				entry[i].eax &= kvm_supported_word10_x86_features;
++				cpuid_mask(&entry[i].eax, 10);
+ 				entry[i].ebx = 0;
+ 				if (entry[i].eax & (F(XSAVES)|F(XSAVEC)))
+ 					entry[i].ebx =
+diff --git a/arch/x86/kvm/mtrr.c b/arch/x86/kvm/mtrr.c
+index 3f8c732117ec..c146f3c262c3 100644
+--- a/arch/x86/kvm/mtrr.c
++++ b/arch/x86/kvm/mtrr.c
+@@ -44,8 +44,6 @@ static bool msr_mtrr_valid(unsigned msr)
+ 	case MSR_MTRRdefType:
+ 	case MSR_IA32_CR_PAT:
+ 		return true;
+-	case 0x2f8:
+-		return true;
  	}
+ 	return false;
  }
+diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
+index 539062e24de1..60946a5d3812 100644
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -5021,8 +5021,8 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
+ 		vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
  
-+static inline void syscall_set_return_value(struct task_struct *task,
-+					    struct pt_regs *regs,
-+					    int error, long val)
-+{
-+	regs->gr[28] = error ? error : val;
-+}
-+
-+static inline void syscall_rollback(struct task_struct *task,
-+				    struct pt_regs *regs)
-+{
-+	/* do nothing */
-+}
-+
- static inline int syscall_get_arch(void)
- {
- 	int arch = AUDIT_ARCH_PARISC;
-diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
-index 0abdd4c607ed..1960b87c1c8b 100644
---- a/arch/parisc/include/asm/uaccess.h
-+++ b/arch/parisc/include/asm/uaccess.h
-@@ -76,6 +76,7 @@ struct exception_table_entry {
-  */
- struct exception_data {
- 	unsigned long fault_ip;
-+	unsigned long fault_gp;
- 	unsigned long fault_space;
- 	unsigned long fault_addr;
- };
-diff --git a/arch/parisc/kernel/asm-offsets.c b/arch/parisc/kernel/asm-offsets.c
-index d2f62570a7b1..78d30d2ea2d8 100644
---- a/arch/parisc/kernel/asm-offsets.c
-+++ b/arch/parisc/kernel/asm-offsets.c
-@@ -299,6 +299,7 @@ int main(void)
+ 	cr0 = X86_CR0_NW | X86_CR0_CD | X86_CR0_ET;
+-	vmx_set_cr0(vcpu, cr0); /* enter rmode */
+ 	vmx->vcpu.arch.cr0 = cr0;
++	vmx_set_cr0(vcpu, cr0); /* enter rmode */
+ 	vmx_set_cr4(vcpu, 0);
+ 	vmx_set_efer(vcpu, 0);
+ 	vmx_fpu_activate(vcpu);
+diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
+index beac4dfdade6..349b8ce92bf2 100644
+--- a/arch/x86/pci/xen.c
++++ b/arch/x86/pci/xen.c
+@@ -491,8 +491,11 @@ int __init pci_xen_initial_domain(void)
  #endif
- 	BLANK();
- 	DEFINE(EXCDATA_IP, offsetof(struct exception_data, fault_ip));
-+	DEFINE(EXCDATA_GP, offsetof(struct exception_data, fault_gp));
- 	DEFINE(EXCDATA_SPACE, offsetof(struct exception_data, fault_space));
- 	DEFINE(EXCDATA_ADDR, offsetof(struct exception_data, fault_addr));
- 	BLANK();
-diff --git a/arch/parisc/kernel/parisc_ksyms.c b/arch/parisc/kernel/parisc_ksyms.c
-index 568b2c61ea02..3cad8aadc69e 100644
---- a/arch/parisc/kernel/parisc_ksyms.c
-+++ b/arch/parisc/kernel/parisc_ksyms.c
-@@ -47,11 +47,11 @@ EXPORT_SYMBOL(__cmpxchg_u64);
- EXPORT_SYMBOL(lclear_user);
- EXPORT_SYMBOL(lstrnlen_user);
- 
--/* Global fixups */
--extern void fixup_get_user_skip_1(void);
--extern void fixup_get_user_skip_2(void);
--extern void fixup_put_user_skip_1(void);
--extern void fixup_put_user_skip_2(void);
-+/* Global fixups - defined as int to avoid creation of function pointers */
-+extern int fixup_get_user_skip_1;
-+extern int fixup_get_user_skip_2;
-+extern int fixup_put_user_skip_1;
-+extern int fixup_put_user_skip_2;
- EXPORT_SYMBOL(fixup_get_user_skip_1);
- EXPORT_SYMBOL(fixup_get_user_skip_2);
- EXPORT_SYMBOL(fixup_put_user_skip_1);
-diff --git a/arch/parisc/kernel/ptrace.c b/arch/parisc/kernel/ptrace.c
-index ce0b2b4075c7..8fb81a391599 100644
---- a/arch/parisc/kernel/ptrace.c
-+++ b/arch/parisc/kernel/ptrace.c
-@@ -270,7 +270,8 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
- long do_syscall_trace_enter(struct pt_regs *regs)
- {
- 	/* Do the secure computing check first. */
--	secure_computing_strict(regs->gr[20]);
-+	if (secure_computing() == -1)
-+		return -1;
- 
- 	if (test_thread_flag(TIF_SYSCALL_TRACE) &&
- 	    tracehook_report_syscall_entry(regs)) {
-@@ -296,7 +297,11 @@ long do_syscall_trace_enter(struct pt_regs *regs)
- 			regs->gr[23] & 0xffffffff);
- 
- out:
--	return regs->gr[20];
+ 	__acpi_register_gsi = acpi_register_gsi_xen;
+ 	__acpi_unregister_gsi = NULL;
+-	/* Pre-allocate legacy irqs */
+-	for (irq = 0; irq < nr_legacy_irqs(); irq++) {
 +	/*
-+	 * Sign extend the syscall number to 64bit since it may have been
-+	 * modified by a compat ptrace call
++	 * Pre-allocate the legacy IRQs.  Use NR_LEGACY_IRQS here
++	 * because we don't have a PIC and thus nr_legacy_irqs() is zero.
 +	 */
-+	return (int) ((u32) regs->gr[20]);
- }
- 
- void do_syscall_trace_exit(struct pt_regs *regs)
-diff --git a/arch/parisc/kernel/signal32.c b/arch/parisc/kernel/signal32.c
-index 984abbee71ca..c342b2e17492 100644
---- a/arch/parisc/kernel/signal32.c
-+++ b/arch/parisc/kernel/signal32.c
-@@ -371,6 +371,11 @@ copy_siginfo_to_user32 (compat_siginfo_t __user *to, const siginfo_t *from)
- 			val = (compat_int_t)from->si_int;
- 			err |= __put_user(val, &to->si_int);
- 			break;
-+		case __SI_SYS >> 16:
-+			err |= __put_user(ptr_to_compat(from->si_call_addr), &to->si_call_addr);
-+			err |= __put_user(from->si_syscall, &to->si_syscall);
-+			err |= __put_user(from->si_arch, &to->si_arch);
-+			break;
- 		}
- 	}
- 	return err;
-diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
-index fbafa0d0e2bf..57b4836b7ecd 100644
---- a/arch/parisc/kernel/syscall.S
-+++ b/arch/parisc/kernel/syscall.S
-@@ -329,6 +329,7 @@ tracesys_next:
- 
- 	ldo     -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1      /* get task ptr */
- 	LDREG	TI_TASK(%r1), %r1
-+	LDREG   TASK_PT_GR28(%r1), %r28		/* Restore return value */
- 	LDREG   TASK_PT_GR26(%r1), %r26		/* Restore the users args */
- 	LDREG   TASK_PT_GR25(%r1), %r25
- 	LDREG   TASK_PT_GR24(%r1), %r24
-@@ -342,7 +343,8 @@ tracesys_next:
- 	stw     %r21, -56(%r30)                 /* 6th argument */
- #endif
++	for (irq = 0; irq < NR_IRQS_LEGACY; irq++) {
+ 		int trigger, polarity;
  
--	comiclr,>>=	__NR_Linux_syscalls, %r20, %r0
-+	cmpib,COND(=),n -1,%r20,tracesys_exit /* seccomp may have returned -1 */
-+	comiclr,>>	__NR_Linux_syscalls, %r20, %r0
- 	b,n	.Ltracesys_nosys
- 
- 	LDREGX  %r20(%r19), %r19
-diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
-index 553b09855cfd..77e2262c97f6 100644
---- a/arch/parisc/kernel/traps.c
-+++ b/arch/parisc/kernel/traps.c
-@@ -798,6 +798,9 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
- 
- 	    if (fault_space == 0 && !faulthandler_disabled())
- 	    {
-+		/* Clean up and return if in exception table. */
-+		if (fixup_exception(regs))
-+			return;
- 		pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
- 		parisc_terminate("Kernel Fault", regs, code, fault_address);
- 	    }
-diff --git a/arch/parisc/lib/fixup.S b/arch/parisc/lib/fixup.S
-index 536ef66bb94b..1052b747e011 100644
---- a/arch/parisc/lib/fixup.S
-+++ b/arch/parisc/lib/fixup.S
-@@ -26,6 +26,7 @@
+ 		if (acpi_get_override_irq(irq, &trigger, &polarity) == -1)
+diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
+index cd2c3d6d40e0..993fd31394c8 100644
+--- a/drivers/acpi/device_pm.c
++++ b/drivers/acpi/device_pm.c
+@@ -319,6 +319,7 @@ int acpi_device_fix_up_power(struct acpi_device *device)
  
- #ifdef CONFIG_SMP
- 	.macro  get_fault_ip t1 t2
-+	loadgp
- 	addil LT%__per_cpu_offset,%r27
- 	LDREG RT%__per_cpu_offset(%r1),\t1
- 	/* t2 = smp_processor_id() */
-@@ -40,14 +41,19 @@
- 	LDREG RT%exception_data(%r1),\t1
- 	/* t1 = this_cpu_ptr(&exception_data) */
- 	add,l \t1,\t2,\t1
-+	/* %r27 = t1->fault_gp - restore gp */
-+	LDREG EXCDATA_GP(\t1), %r27
- 	/* t1 = t1->fault_ip */
- 	LDREG EXCDATA_IP(\t1), \t1
- 	.endm
- #else
- 	.macro  get_fault_ip t1 t2
-+	loadgp
- 	/* t1 = this_cpu_ptr(&exception_data) */
- 	addil LT%exception_data,%r27
- 	LDREG RT%exception_data(%r1),\t2
-+	/* %r27 = t2->fault_gp - restore gp */
-+	LDREG EXCDATA_GP(\t2), %r27
- 	/* t1 = t2->fault_ip */
- 	LDREG EXCDATA_IP(\t2), \t1
- 	.endm
-diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
-index a762864ec92e..f9064449908a 100644
---- a/arch/parisc/mm/fault.c
-+++ b/arch/parisc/mm/fault.c
-@@ -151,6 +151,7 @@ int fixup_exception(struct pt_regs *regs)
- 		struct exception_data *d;
- 		d = this_cpu_ptr(&exception_data);
- 		d->fault_ip = regs->iaoq[0];
-+		d->fault_gp = regs->gr[27];
- 		d->fault_space = regs->isr;
- 		d->fault_addr = regs->ior;
- 
-diff --git a/arch/powerpc/include/asm/word-at-a-time.h b/arch/powerpc/include/asm/word-at-a-time.h
-index e4396a7d0f7c..4afe66aa1400 100644
---- a/arch/powerpc/include/asm/word-at-a-time.h
-+++ b/arch/powerpc/include/asm/word-at-a-time.h
-@@ -82,7 +82,7 @@ static inline unsigned long create_zero_mask(unsigned long bits)
- 	    "andc	%1,%1,%2\n\t"
- 	    "popcntd	%0,%1"
- 		: "=r" (leading_zero_bits), "=&r" (trailing_zero_bit_mask)
--		: "r" (bits));
-+		: "b" (bits));
- 
- 	return leading_zero_bits;
+ 	return ret;
  }
-diff --git a/arch/powerpc/include/uapi/asm/cputable.h b/arch/powerpc/include/uapi/asm/cputable.h
-index 8dde19962a5b..f63c96cd3608 100644
---- a/arch/powerpc/include/uapi/asm/cputable.h
-+++ b/arch/powerpc/include/uapi/asm/cputable.h
-@@ -31,6 +31,7 @@
- #define PPC_FEATURE_PSERIES_PERFMON_COMPAT \
- 					0x00000040
- 
-+/* Reserved - do not use		0x00000004 */
- #define PPC_FEATURE_TRUE_LE		0x00000002
- #define PPC_FEATURE_PPC_LE		0x00000001
- 
-diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
-index 3c5736e52a14..54ed9c7663e6 100644
---- a/arch/powerpc/kernel/process.c
-+++ b/arch/powerpc/kernel/process.c
-@@ -854,7 +854,7 @@ void restore_tm_state(struct pt_regs *regs)
- static inline void save_sprs(struct thread_struct *t)
- {
- #ifdef CONFIG_ALTIVEC
--	if (cpu_has_feature(cpu_has_feature(CPU_FTR_ALTIVEC)))
-+	if (cpu_has_feature(CPU_FTR_ALTIVEC))
- 		t->vrsave = mfspr(SPRN_VRSAVE);
- #endif
- #ifdef CONFIG_PPC_BOOK3S_64
-diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
-index 7030b035905d..a15fe1d4e84a 100644
---- a/arch/powerpc/kernel/prom.c
-+++ b/arch/powerpc/kernel/prom.c
-@@ -148,23 +148,25 @@ static struct ibm_pa_feature {
- 	unsigned long	cpu_features;	/* CPU_FTR_xxx bit */
- 	unsigned long	mmu_features;	/* MMU_FTR_xxx bit */
- 	unsigned int	cpu_user_ftrs;	/* PPC_FEATURE_xxx bit */
-+	unsigned int	cpu_user_ftrs2;	/* PPC_FEATURE2_xxx bit */
- 	unsigned char	pabyte;		/* byte number in ibm,pa-features */
- 	unsigned char	pabit;		/* bit number (big-endian) */
- 	unsigned char	invert;		/* if 1, pa bit set => clear feature */
- } ibm_pa_features[] __initdata = {
--	{0, 0, PPC_FEATURE_HAS_MMU,	0, 0, 0},
--	{0, 0, PPC_FEATURE_HAS_FPU,	0, 1, 0},
--	{CPU_FTR_CTRL, 0, 0,		0, 3, 0},
--	{CPU_FTR_NOEXECUTE, 0, 0,	0, 6, 0},
--	{CPU_FTR_NODSISRALIGN, 0, 0,	1, 1, 1},
--	{0, MMU_FTR_CI_LARGE_PAGE, 0,	1, 2, 0},
--	{CPU_FTR_REAL_LE, PPC_FEATURE_TRUE_LE, 5, 0, 0},
-+	{0, 0, PPC_FEATURE_HAS_MMU, 0,		0, 0, 0},
-+	{0, 0, PPC_FEATURE_HAS_FPU, 0,		0, 1, 0},
-+	{CPU_FTR_CTRL, 0, 0, 0,			0, 3, 0},
-+	{CPU_FTR_NOEXECUTE, 0, 0, 0,		0, 6, 0},
-+	{CPU_FTR_NODSISRALIGN, 0, 0, 0,		1, 1, 1},
-+	{0, MMU_FTR_CI_LARGE_PAGE, 0, 0,		1, 2, 0},
-+	{CPU_FTR_REAL_LE, 0, PPC_FEATURE_TRUE_LE, 0, 5, 0, 0},
- 	/*
--	 * If the kernel doesn't support TM (ie. CONFIG_PPC_TRANSACTIONAL_MEM=n),
--	 * we don't want to turn on CPU_FTR_TM here, so we use CPU_FTR_TM_COMP
--	 * which is 0 if the kernel doesn't support TM.
-+	 * If the kernel doesn't support TM (ie CONFIG_PPC_TRANSACTIONAL_MEM=n),
-+	 * we don't want to turn on TM here, so we use the *_COMP versions
-+	 * which are 0 if the kernel doesn't support TM.
- 	 */
--	{CPU_FTR_TM_COMP, 0, 0,		22, 0, 0},
-+	{CPU_FTR_TM_COMP, 0, 0,
-+	 PPC_FEATURE2_HTM_COMP|PPC_FEATURE2_HTM_NOSC_COMP, 22, 0, 0},
- };
++EXPORT_SYMBOL_GPL(acpi_device_fix_up_power);
  
- static void __init scan_features(unsigned long node, const unsigned char *ftrs,
-@@ -195,10 +197,12 @@ static void __init scan_features(unsigned long node, const unsigned char *ftrs,
- 		if (bit ^ fp->invert) {
- 			cur_cpu_spec->cpu_features |= fp->cpu_features;
- 			cur_cpu_spec->cpu_user_features |= fp->cpu_user_ftrs;
-+			cur_cpu_spec->cpu_user_features2 |= fp->cpu_user_ftrs2;
- 			cur_cpu_spec->mmu_features |= fp->mmu_features;
- 		} else {
- 			cur_cpu_spec->cpu_features &= ~fp->cpu_features;
- 			cur_cpu_spec->cpu_user_features &= ~fp->cpu_user_ftrs;
-+			cur_cpu_spec->cpu_user_features2 &= ~fp->cpu_user_ftrs2;
- 			cur_cpu_spec->mmu_features &= ~fp->mmu_features;
- 		}
- 	}
-diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
-index 744e24bcb85c..4a811ca7ac9d 100644
---- a/arch/powerpc/mm/hugetlbpage.c
-+++ b/arch/powerpc/mm/hugetlbpage.c
-@@ -414,13 +414,13 @@ static void hugepd_free(struct mmu_gather *tlb, void *hugepte)
+ int acpi_device_update_power(struct acpi_device *device, int *state_p)
  {
- 	struct hugepd_freelist **batchp;
+diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
+index 67da6fb72274..c45fdc49ff28 100644
+--- a/drivers/acpi/osl.c
++++ b/drivers/acpi/osl.c
+@@ -135,7 +135,7 @@ static struct osi_linux {
+ 	unsigned int	enable:1;
+ 	unsigned int	dmi:1;
+ 	unsigned int	cmdline:1;
+-	unsigned int	default_disabling:1;
++	u8		default_disabling;
+ } osi_linux = {0, 0, 0, 0};
+ 
+ static u32 acpi_osi_handler(acpi_string interface, u32 supported)
+@@ -1713,10 +1713,13 @@ void __init acpi_osi_setup(char *str)
+ 	if (*str == '!') {
+ 		str++;
+ 		if (*str == '\0') {
+-			osi_linux.default_disabling = 1;
++			/* Do not override acpi_osi=!* */
++			if (!osi_linux.default_disabling)
++				osi_linux.default_disabling =
++					ACPI_DISABLE_ALL_VENDOR_STRINGS;
+ 			return;
+ 		} else if (*str == '*') {
+-			acpi_update_interfaces(ACPI_DISABLE_ALL_STRINGS);
++			osi_linux.default_disabling = ACPI_DISABLE_ALL_STRINGS;
+ 			for (i = 0; i < OSI_STRING_ENTRIES_MAX; i++) {
+ 				osi = &osi_setup_entries[i];
+ 				osi->enable = false;
+@@ -1789,10 +1792,13 @@ static void __init acpi_osi_setup_late(void)
+ 	acpi_status status;
  
--	batchp = this_cpu_ptr(&hugepd_freelist_cur);
-+	batchp = &get_cpu_var(hugepd_freelist_cur);
+ 	if (osi_linux.default_disabling) {
+-		status = acpi_update_interfaces(ACPI_DISABLE_ALL_VENDOR_STRINGS);
++		status = acpi_update_interfaces(osi_linux.default_disabling);
  
- 	if (atomic_read(&tlb->mm->mm_users) < 2 ||
- 	    cpumask_equal(mm_cpumask(tlb->mm),
- 			  cpumask_of(smp_processor_id()))) {
- 		kmem_cache_free(hugepte_cache, hugepte);
--        put_cpu_var(hugepd_freelist_cur);
-+		put_cpu_var(hugepd_freelist_cur);
- 		return;
+ 		if (ACPI_SUCCESS(status))
+-			printk(KERN_INFO PREFIX "Disabled all _OSI OS vendors\n");
++			printk(KERN_INFO PREFIX "Disabled all _OSI OS vendors%s\n",
++				osi_linux.default_disabling ==
++				ACPI_DISABLE_ALL_STRINGS ?
++				" and feature groups" : "");
  	}
  
-diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h
-index c873e682b67f..6dafabb6ae1a 100644
---- a/arch/s390/include/asm/pci.h
-+++ b/arch/s390/include/asm/pci.h
-@@ -45,7 +45,8 @@ struct zpci_fmb {
- 	u64 rpcit_ops;
- 	u64 dma_rbytes;
- 	u64 dma_wbytes;
--} __packed __aligned(16);
-+	u64 pad[2];
-+} __packed __aligned(128);
- 
- enum zpci_state {
- 	ZPCI_FN_STATE_RESERVED,
-diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
-index cd5a191381b9..c920b81be5bb 100644
---- a/arch/s390/kernel/entry.S
-+++ b/arch/s390/kernel/entry.S
-@@ -1199,114 +1199,12 @@ cleanup_critical:
- 	.quad	.Lpsw_idle_lpsw
- 
- .Lcleanup_save_fpu_regs:
--	TSTMSK	__LC_CPU_FLAGS,_CIF_FPU
--	bor	%r14
--	clg	%r9,BASED(.Lcleanup_save_fpu_regs_done)
--	jhe	5f
--	clg	%r9,BASED(.Lcleanup_save_fpu_regs_fp)
--	jhe	4f
--	clg	%r9,BASED(.Lcleanup_save_fpu_regs_vx_high)
--	jhe	3f
--	clg	%r9,BASED(.Lcleanup_save_fpu_regs_vx_low)
--	jhe	2f
--	clg	%r9,BASED(.Lcleanup_save_fpu_fpc_end)
--	jhe	1f
--	lg	%r2,__LC_CURRENT
--	aghi	%r2,__TASK_thread
--0:	# Store floating-point controls
--	stfpc	__THREAD_FPU_fpc(%r2)
--1:	# Load register save area and check if VX is active
--	lg	%r3,__THREAD_FPU_regs(%r2)
--	TSTMSK	__LC_MACHINE_FLAGS,MACHINE_FLAG_VX
--	jz	4f			  # no VX -> store FP regs
--2:	# Store vector registers (V0-V15)
--	VSTM	%v0,%v15,0,%r3		  # vstm 0,15,0(3)
--3:	# Store vector registers (V16-V31)
--	VSTM	%v16,%v31,256,%r3	  # vstm 16,31,256(3)
--	j	5f			  # -> done, set CIF_FPU flag
--4:	# Store floating-point registers
--	std	0,0(%r3)
--	std	1,8(%r3)
--	std	2,16(%r3)
--	std	3,24(%r3)
--	std	4,32(%r3)
--	std	5,40(%r3)
--	std	6,48(%r3)
--	std	7,56(%r3)
--	std	8,64(%r3)
--	std	9,72(%r3)
--	std	10,80(%r3)
--	std	11,88(%r3)
--	std	12,96(%r3)
--	std	13,104(%r3)
--	std	14,112(%r3)
--	std	15,120(%r3)
--5:	# Set CIF_FPU flag
--	oi	__LC_CPU_FLAGS+7,_CIF_FPU
--	lg	%r9,48(%r11)		# return from save_fpu_regs
-+	larl	%r9,save_fpu_regs
- 	br	%r14
--.Lcleanup_save_fpu_fpc_end:
--	.quad	.Lsave_fpu_regs_fpc_end
--.Lcleanup_save_fpu_regs_vx_low:
--	.quad	.Lsave_fpu_regs_vx_low
--.Lcleanup_save_fpu_regs_vx_high:
--	.quad	.Lsave_fpu_regs_vx_high
--.Lcleanup_save_fpu_regs_fp:
--	.quad	.Lsave_fpu_regs_fp
--.Lcleanup_save_fpu_regs_done:
--	.quad	.Lsave_fpu_regs_done
- 
- .Lcleanup_load_fpu_regs:
--	TSTMSK	__LC_CPU_FLAGS,_CIF_FPU
--	bnor	%r14
--	clg	%r9,BASED(.Lcleanup_load_fpu_regs_done)
--	jhe	1f
--	clg	%r9,BASED(.Lcleanup_load_fpu_regs_fp)
--	jhe	2f
--	clg	%r9,BASED(.Lcleanup_load_fpu_regs_vx_high)
--	jhe	3f
--	clg	%r9,BASED(.Lcleanup_load_fpu_regs_vx)
--	jhe	4f
--	lg	%r4,__LC_CURRENT
--	aghi	%r4,__TASK_thread
--	lfpc	__THREAD_FPU_fpc(%r4)
--	TSTMSK	__LC_MACHINE_FLAGS,MACHINE_FLAG_VX
--	lg	%r4,__THREAD_FPU_regs(%r4)	# %r4 <- reg save area
--	jz	2f				# -> no VX, load FP regs
--4:	# Load V0 ..V15 registers
--	VLM	%v0,%v15,0,%r4
--3:	# Load V16..V31 registers
--	VLM	%v16,%v31,256,%r4
--	j	1f
--2:	# Load floating-point registers
--	ld	0,0(%r4)
--	ld	1,8(%r4)
--	ld	2,16(%r4)
--	ld	3,24(%r4)
--	ld	4,32(%r4)
--	ld	5,40(%r4)
--	ld	6,48(%r4)
--	ld	7,56(%r4)
--	ld	8,64(%r4)
--	ld	9,72(%r4)
--	ld	10,80(%r4)
--	ld	11,88(%r4)
--	ld	12,96(%r4)
--	ld	13,104(%r4)
--	ld	14,112(%r4)
--	ld	15,120(%r4)
--1:	# Clear CIF_FPU bit
--	ni	__LC_CPU_FLAGS+7,255-_CIF_FPU
--	lg	%r9,48(%r11)		# return from load_fpu_regs
-+	larl	%r9,load_fpu_regs
- 	br	%r14
--.Lcleanup_load_fpu_regs_vx:
--	.quad	.Lload_fpu_regs_vx
--.Lcleanup_load_fpu_regs_vx_high:
--	.quad	.Lload_fpu_regs_vx_high
--.Lcleanup_load_fpu_regs_fp:
--	.quad	.Lload_fpu_regs_fp
--.Lcleanup_load_fpu_regs_done:
--	.quad	.Lload_fpu_regs_done
+ 	for (i = 0; i < OSI_STRING_ENTRIES_MAX; i++) {
+diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c
+index 80783dcb7f57..aba31210c802 100644
+--- a/drivers/bluetooth/hci_vhci.c
++++ b/drivers/bluetooth/hci_vhci.c
+@@ -50,6 +50,7 @@ struct vhci_data {
+ 	wait_queue_head_t read_wait;
+ 	struct sk_buff_head readq;
  
- /*
-  * Integer constants
-diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
-index 9220db5c996a..93fc63ef6e95 100644
---- a/arch/s390/kernel/setup.c
-+++ b/arch/s390/kernel/setup.c
-@@ -327,6 +327,7 @@ static void __init setup_lowcore(void)
- 		+ PAGE_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
- 	lc->current_task = (unsigned long) init_thread_union.thread_info.task;
- 	lc->thread_info = (unsigned long) &init_thread_union;
-+	lc->lpp = LPP_MAGIC;
- 	lc->machine_flags = S390_lowcore.machine_flags;
- 	lc->stfl_fac_list = S390_lowcore.stfl_fac_list;
- 	memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
-diff --git a/arch/s390/mm/gup.c b/arch/s390/mm/gup.c
-index 13dab0c1645c..3776aca22082 100644
---- a/arch/s390/mm/gup.c
-+++ b/arch/s390/mm/gup.c
-@@ -20,9 +20,9 @@
- static inline int gup_pte_range(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
- 		unsigned long end, int write, struct page **pages, int *nr)
- {
-+	struct page *head, *page;
- 	unsigned long mask;
- 	pte_t *ptep, pte;
--	struct page *page;
- 
- 	mask = (write ? _PAGE_PROTECT : 0) | _PAGE_INVALID | _PAGE_SPECIAL;
- 
-@@ -37,12 +37,14 @@ static inline int gup_pte_range(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
- 			return 0;
- 		VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
- 		page = pte_page(pte);
--		if (!page_cache_get_speculative(page))
-+		head = compound_head(page);
-+		if (!page_cache_get_speculative(head))
- 			return 0;
- 		if (unlikely(pte_val(pte) != pte_val(*ptep))) {
--			put_page(page);
-+			put_page(head);
- 			return 0;
- 		}
-+		VM_BUG_ON_PAGE(compound_head(page) != head, page);
- 		pages[*nr] = page;
- 		(*nr)++;
++	struct mutex open_mutex;
+ 	struct delayed_work open_timeout;
+ };
  
-diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
-index 8f19c8f9d660..8f75edc998ff 100644
---- a/arch/s390/pci/pci.c
-+++ b/arch/s390/pci/pci.c
-@@ -864,8 +864,11 @@ static inline int barsize(u8 size)
+@@ -87,12 +88,15 @@ static int vhci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
+ 	return 0;
+ }
  
- static int zpci_mem_init(void)
+-static int vhci_create_device(struct vhci_data *data, __u8 opcode)
++static int __vhci_create_device(struct vhci_data *data, __u8 opcode)
  {
-+	BUILD_BUG_ON(!is_power_of_2(__alignof__(struct zpci_fmb)) ||
-+		     __alignof__(struct zpci_fmb) < sizeof(struct zpci_fmb));
-+
- 	zdev_fmb_cache = kmem_cache_create("PCI_FMB_cache", sizeof(struct zpci_fmb),
--				16, 0, NULL);
-+					   __alignof__(struct zpci_fmb), 0, NULL);
- 	if (!zdev_fmb_cache)
- 		goto error_fmb;
- 
-diff --git a/arch/sh/mm/kmap.c b/arch/sh/mm/kmap.c
-index ec29e14ec5a8..bf25d7c79a2d 100644
---- a/arch/sh/mm/kmap.c
-+++ b/arch/sh/mm/kmap.c
-@@ -36,6 +36,7 @@ void *kmap_coherent(struct page *page, unsigned long addr)
- 
- 	BUG_ON(!test_bit(PG_dcache_clean, &page->flags));
- 
-+	preempt_disable();
- 	pagefault_disable();
+ 	struct hci_dev *hdev;
+ 	struct sk_buff *skb;
+ 	__u8 dev_type;
  
- 	idx = FIX_CMAP_END -
-@@ -64,4 +65,5 @@ void kunmap_coherent(void *kvaddr)
- 	}
- 
- 	pagefault_enable();
-+	preempt_enable();
- }
-diff --git a/arch/um/drivers/mconsole_kern.c b/arch/um/drivers/mconsole_kern.c
-index b821b13d343a..8a6b57108ac2 100644
---- a/arch/um/drivers/mconsole_kern.c
-+++ b/arch/um/drivers/mconsole_kern.c
-@@ -133,7 +133,7 @@ void mconsole_proc(struct mc_request *req)
- 	ptr += strlen("proc");
- 	ptr = skip_spaces(ptr);
- 
--	file = file_open_root(mnt->mnt_root, mnt, ptr, O_RDONLY);
-+	file = file_open_root(mnt->mnt_root, mnt, ptr, O_RDONLY, 0);
- 	if (IS_ERR(file)) {
- 		mconsole_reply(req, "Failed to open file", 1, 0);
- 		printk(KERN_ERR "open /proc/%s: %ld\n", ptr, PTR_ERR(file));
-diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
-index c46662f64c39..3bf45a0cd69e 100644
---- a/arch/x86/Kconfig
-+++ b/arch/x86/Kconfig
-@@ -1160,22 +1160,23 @@ config MICROCODE
- 	bool "CPU microcode loading support"
- 	default y
- 	depends on CPU_SUP_AMD || CPU_SUP_INTEL
--	depends on BLK_DEV_INITRD
- 	select FW_LOADER
- 	---help---
--
- 	  If you say Y here, you will be able to update the microcode on
--	  certain Intel and AMD processors. The Intel support is for the
--	  IA32 family, e.g. Pentium Pro, Pentium II, Pentium III, Pentium 4,
--	  Xeon etc. The AMD support is for families 0x10 and later. You will
--	  obviously need the actual microcode binary data itself which is not
--	  shipped with the Linux kernel.
--
--	  This option selects the general module only, you need to select
--	  at least one vendor specific module as well.
--
--	  To compile this driver as a module, choose M here: the module
--	  will be called microcode.
-+	  Intel and AMD processors. The Intel support is for the IA32 family,
-+	  e.g. Pentium Pro, Pentium II, Pentium III, Pentium 4, Xeon etc. The
-+	  AMD support is for families 0x10 and later. You will obviously need
-+	  the actual microcode binary data itself which is not shipped with
-+	  the Linux kernel.
-+
-+	  The preferred method to load microcode from a detached initrd is described
-+	  in Documentation/x86/early-microcode.txt. For that you need to enable
-+	  CONFIG_BLK_DEV_INITRD in order for the loader to be able to scan the
-+	  initrd for microcode blobs.
-+
-+	  In addition, you can build-in the microcode into the kernel. For that you
-+	  need to enable FIRMWARE_IN_KERNEL and add the vendor-supplied microcode
-+	  to the CONFIG_EXTRA_FIRMWARE config option.
- 
- config MICROCODE_INTEL
- 	bool "Intel microcode loading support"
-diff --git a/arch/x86/crypto/sha-mb/sha1_mb.c b/arch/x86/crypto/sha-mb/sha1_mb.c
-index a841e9765bd6..8381c09d2870 100644
---- a/arch/x86/crypto/sha-mb/sha1_mb.c
-+++ b/arch/x86/crypto/sha-mb/sha1_mb.c
-@@ -453,10 +453,10 @@ static int sha_complete_job(struct mcryptd_hash_request_ctx *rctx,
- 
- 			req = cast_mcryptd_ctx_to_req(req_ctx);
- 			if (irqs_disabled())
--				rctx->complete(&req->base, ret);
-+				req_ctx->complete(&req->base, ret);
- 			else {
- 				local_bh_disable();
--				rctx->complete(&req->base, ret);
-+				req_ctx->complete(&req->base, ret);
- 				local_bh_enable();
- 			}
- 		}
-diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c
-index 03663740c866..1a4477cedc49 100644
---- a/arch/x86/entry/common.c
-+++ b/arch/x86/entry/common.c
-@@ -268,6 +268,7 @@ static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags)
- /* Called with IRQs disabled. */
- __visible inline void prepare_exit_to_usermode(struct pt_regs *regs)
- {
-+	struct thread_info *ti = pt_regs_to_thread_info(regs);
- 	u32 cached_flags;
- 
- 	if (IS_ENABLED(CONFIG_PROVE_LOCKING) && WARN_ON(!irqs_disabled()))
-@@ -275,12 +276,22 @@ __visible inline void prepare_exit_to_usermode(struct pt_regs *regs)
- 
- 	lockdep_sys_exit();
- 
--	cached_flags =
--		READ_ONCE(pt_regs_to_thread_info(regs)->flags);
-+	cached_flags = READ_ONCE(ti->flags);
- 
- 	if (unlikely(cached_flags & EXIT_TO_USERMODE_LOOP_FLAGS))
- 		exit_to_usermode_loop(regs, cached_flags);
- 
-+#ifdef CONFIG_COMPAT
-+	/*
-+	 * Compat syscalls set TS_COMPAT.  Make sure we clear it before
-+	 * returning to user mode.  We need to clear it *after* signal
-+	 * handling, because syscall restart has a fixup for compat
-+	 * syscalls.  The fixup is exercised by the ptrace_syscall_32
-+	 * selftest.
-+	 */
-+	ti->status &= ~TS_COMPAT;
-+#endif
++	if (data->hdev)
++		return -EBADFD;
 +
- 	user_enter();
- }
- 
-@@ -332,14 +343,6 @@ __visible inline void syscall_return_slowpath(struct pt_regs *regs)
- 	if (unlikely(cached_flags & SYSCALL_EXIT_WORK_FLAGS))
- 		syscall_slow_exit_work(regs, cached_flags);
- 
--#ifdef CONFIG_COMPAT
--	/*
--	 * Compat syscalls set TS_COMPAT.  Make sure we clear it before
--	 * returning to user mode.
--	 */
--	ti->status &= ~TS_COMPAT;
--#endif
--
- 	local_irq_disable();
- 	prepare_exit_to_usermode(regs);
- }
-diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
-index c80f6b6f3da2..e8c4fba52d3d 100644
---- a/arch/x86/include/asm/apic.h
-+++ b/arch/x86/include/asm/apic.h
-@@ -644,8 +644,8 @@ static inline void entering_irq(void)
+ 	/* bits 0-1 are dev_type (BR/EDR or AMP) */
+ 	dev_type = opcode & 0x03;
  
- static inline void entering_ack_irq(void)
- {
--	ack_APIC_irq();
- 	entering_irq();
-+	ack_APIC_irq();
+@@ -151,6 +155,17 @@ static int vhci_create_device(struct vhci_data *data, __u8 opcode)
+ 	return 0;
  }
  
- static inline void ipi_entering_ack_irq(void)
-diff --git a/arch/x86/include/asm/hugetlb.h b/arch/x86/include/asm/hugetlb.h
-index f8a29d2c97b0..e6a8613fbfb0 100644
---- a/arch/x86/include/asm/hugetlb.h
-+++ b/arch/x86/include/asm/hugetlb.h
-@@ -4,6 +4,7 @@
- #include <asm/page.h>
- #include <asm-generic/hugetlb.h>
- 
-+#define hugepages_supported() cpu_has_pse
- 
- static inline int is_hugepage_only_range(struct mm_struct *mm,
- 					 unsigned long addr,
-diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
-index 1815b736269d..84b3d194a958 100644
---- a/arch/x86/include/asm/hw_irq.h
-+++ b/arch/x86/include/asm/hw_irq.h
-@@ -141,6 +141,7 @@ struct irq_alloc_info {
- struct irq_cfg {
- 	unsigned int		dest_apicid;
- 	u8			vector;
-+	u8			old_vector;
- };
- 
- extern struct irq_cfg *irq_cfg(unsigned int irq);
-diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
-index 44adbb819041..f8dba2097c40 100644
---- a/arch/x86/include/asm/kvm_host.h
-+++ b/arch/x86/include/asm/kvm_host.h
-@@ -42,7 +42,7 @@
- 
- #define KVM_PIO_PAGE_OFFSET 1
- #define KVM_COALESCED_MMIO_PAGE_OFFSET 2
--#define KVM_HALT_POLL_NS_DEFAULT 500000
-+#define KVM_HALT_POLL_NS_DEFAULT 400000
- 
- #define KVM_IRQCHIP_NUM_PINS  KVM_IOAPIC_NUM_PINS
- 
-diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h
-index 1e1b07a5a738..9d3a96c4da78 100644
---- a/arch/x86/include/asm/microcode.h
-+++ b/arch/x86/include/asm/microcode.h
-@@ -3,6 +3,7 @@
- 
- #include <asm/cpu.h>
- #include <linux/earlycpio.h>
-+#include <linux/initrd.h>
- 
- #define native_rdmsr(msr, val1, val2)			\
- do {							\
-@@ -143,4 +144,29 @@ static inline void reload_early_microcode(void)			{ }
- static inline bool
- get_builtin_firmware(struct cpio_data *cd, const char *name)	{ return false; }
- #endif
-+
-+static inline unsigned long get_initrd_start(void)
++static int vhci_create_device(struct vhci_data *data, __u8 opcode)
 +{
-+#ifdef CONFIG_BLK_DEV_INITRD
-+	return initrd_start;
-+#else
-+	return 0;
-+#endif
-+}
++	int err;
 +
-+static inline unsigned long get_initrd_start_addr(void)
-+{
-+#ifdef CONFIG_BLK_DEV_INITRD
-+#ifdef CONFIG_X86_32
-+	unsigned long *initrd_start_p = (unsigned long *)__pa_nodebug(&initrd_start);
++	mutex_lock(&data->open_mutex);
++	err = __vhci_create_device(data, opcode);
++	mutex_unlock(&data->open_mutex);
 +
-+	return (unsigned long)__pa_nodebug(*initrd_start_p);
-+#else
-+	return get_initrd_start();
-+#endif
-+#else /* CONFIG_BLK_DEV_INITRD */
-+	return 0;
-+#endif
++	return err;
 +}
 +
- #endif /* _ASM_X86_MICROCODE_H */
-diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
-index 7bcb861a04e5..5a2ed3ed2f26 100644
---- a/arch/x86/include/asm/perf_event.h
-+++ b/arch/x86/include/asm/perf_event.h
-@@ -165,6 +165,7 @@ struct x86_pmu_capability {
- #define GLOBAL_STATUS_ASIF				BIT_ULL(60)
- #define GLOBAL_STATUS_COUNTERS_FROZEN			BIT_ULL(59)
- #define GLOBAL_STATUS_LBRS_FROZEN			BIT_ULL(58)
-+#define GLOBAL_STATUS_TRACE_TOPAPMI			BIT_ULL(55)
+ static inline ssize_t vhci_get_user(struct vhci_data *data,
+ 				    struct iov_iter *from)
+ {
+@@ -189,11 +204,6 @@ static inline ssize_t vhci_get_user(struct vhci_data *data,
+ 		break;
  
- /*
-  * IBS cpuid feature detection
-diff --git a/arch/x86/include/asm/xen/hypervisor.h b/arch/x86/include/asm/xen/hypervisor.h
-index 8b2d4bea9962..39171b3646bb 100644
---- a/arch/x86/include/asm/xen/hypervisor.h
-+++ b/arch/x86/include/asm/xen/hypervisor.h
-@@ -62,4 +62,6 @@ void xen_arch_register_cpu(int num);
- void xen_arch_unregister_cpu(int num);
- #endif
+ 	case HCI_VENDOR_PKT:
+-		if (data->hdev) {
+-			kfree_skb(skb);
+-			return -EBADFD;
+-		}
+-
+ 		cancel_delayed_work_sync(&data->open_timeout);
  
-+extern void xen_set_iopl_mask(unsigned mask);
-+
- #endif /* _ASM_X86_XEN_HYPERVISOR_H */
-diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
-index 3b670df4ba7b..ef495511f019 100644
---- a/arch/x86/kernel/apic/vector.c
-+++ b/arch/x86/kernel/apic/vector.c
-@@ -213,6 +213,7 @@ update:
- 	 */
- 	cpumask_and(d->old_domain, d->old_domain, cpu_online_mask);
- 	d->move_in_progress = !cpumask_empty(d->old_domain);
-+	d->cfg.old_vector = d->move_in_progress ? d->cfg.vector : 0;
- 	d->cfg.vector = vector;
- 	cpumask_copy(d->domain, vector_cpumask);
- success:
-@@ -255,7 +256,8 @@ static void clear_irq_vector(int irq, struct apic_chip_data *data)
- 	struct irq_desc *desc;
- 	int cpu, vector;
- 
--	BUG_ON(!data->cfg.vector);
-+	if (!data->cfg.vector)
-+		return;
+ 		opcode = *((__u8 *) skb->data);
+@@ -320,6 +330,7 @@ static int vhci_open(struct inode *inode, struct file *file)
+ 	skb_queue_head_init(&data->readq);
+ 	init_waitqueue_head(&data->read_wait);
  
- 	vector = data->cfg.vector;
- 	for_each_cpu_and(cpu, data->domain, cpu_online_mask)
-@@ -655,46 +657,97 @@ void irq_complete_move(struct irq_cfg *cfg)
- }
++	mutex_init(&data->open_mutex);
+ 	INIT_DELAYED_WORK(&data->open_timeout, vhci_open_timeout);
  
- /*
-- * Called with @desc->lock held and interrupts disabled.
-+ * Called from fixup_irqs() with @desc->lock held and interrupts disabled.
-  */
- void irq_force_complete_move(struct irq_desc *desc)
+ 	file->private_data = data;
+@@ -333,15 +344,18 @@ static int vhci_open(struct inode *inode, struct file *file)
+ static int vhci_release(struct inode *inode, struct file *file)
  {
- 	struct irq_data *irqdata = irq_desc_get_irq_data(desc);
- 	struct apic_chip_data *data = apic_chip_data(irqdata);
- 	struct irq_cfg *cfg = data ? &data->cfg : NULL;
-+	unsigned int cpu;
+ 	struct vhci_data *data = file->private_data;
+-	struct hci_dev *hdev = data->hdev;
++	struct hci_dev *hdev;
  
- 	if (!cfg)
- 		return;
+ 	cancel_delayed_work_sync(&data->open_timeout);
  
--	__irq_complete_move(cfg, cfg->vector);
--
- 	/*
- 	 * This is tricky. If the cleanup of @data->old_domain has not been
- 	 * done yet, then the following setaffinity call will fail with
- 	 * -EBUSY. This can leave the interrupt in a stale state.
- 	 *
--	 * The cleanup cannot make progress because we hold @desc->lock. So in
--	 * case @data->old_domain is not yet cleaned up, we need to drop the
--	 * lock and acquire it again. @desc cannot go away, because the
--	 * hotplug code holds the sparse irq lock.
-+	 * All CPUs are stuck in stop machine with interrupts disabled so
-+	 * calling __irq_complete_move() would be completely pointless.
- 	 */
- 	raw_spin_lock(&vector_lock);
--	/* Clean out all offline cpus (including ourself) first. */
-+	/*
-+	 * Clean out all offline cpus (including the outgoing one) from the
-+	 * old_domain mask.
-+	 */
- 	cpumask_and(data->old_domain, data->old_domain, cpu_online_mask);
--	while (!cpumask_empty(data->old_domain)) {
-+
-+	/*
-+	 * If move_in_progress is cleared and the old_domain mask is empty,
-+	 * then there is nothing to cleanup. fixup_irqs() will take care of
-+	 * the stale vectors on the outgoing cpu.
-+	 */
-+	if (!data->move_in_progress && cpumask_empty(data->old_domain)) {
- 		raw_spin_unlock(&vector_lock);
--		raw_spin_unlock(&desc->lock);
--		cpu_relax();
--		raw_spin_lock(&desc->lock);
-+		return;
-+	}
++	hdev = data->hdev;
 +
-+	/*
-+	 * 1) The interrupt is in move_in_progress state. That means that we
-+	 *    have not seen an interrupt since the io_apic was reprogrammed to
-+	 *    the new vector.
-+	 *
-+	 * 2) The interrupt has fired on the new vector, but the cleanup IPIs
-+	 *    have not been processed yet.
-+	 */
-+	if (data->move_in_progress) {
- 		/*
--		 * Reevaluate apic_chip_data. It might have been cleared after
--		 * we dropped @desc->lock.
-+		 * In theory there is a race:
-+		 *
-+		 * set_ioapic(new_vector) <-- Interrupt is raised before update
-+		 *			      is effective, i.e. it's raised on
-+		 *			      the old vector.
-+		 *
-+		 * So if the target cpu cannot handle that interrupt before
-+		 * the old vector is cleaned up, we get a spurious interrupt
-+		 * and in the worst case the ioapic irq line becomes stale.
-+		 *
-+		 * But in case of cpu hotplug this should be a non issue
-+		 * because if the affinity update happens right before all
-+		 * cpus rendevouz in stop machine, there is no way that the
-+		 * interrupt can be blocked on the target cpu because all cpus
-+		 * loops first with interrupts enabled in stop machine, so the
-+		 * old vector is not yet cleaned up when the interrupt fires.
-+		 *
-+		 * So the only way to run into this issue is if the delivery
-+		 * of the interrupt on the apic/system bus would be delayed
-+		 * beyond the point where the target cpu disables interrupts
-+		 * in stop machine. I doubt that it can happen, but at least
-+		 * there is a theroretical chance. Virtualization might be
-+		 * able to expose this, but AFAICT the IOAPIC emulation is not
-+		 * as stupid as the real hardware.
-+		 *
-+		 * Anyway, there is nothing we can do about that at this point
-+		 * w/o refactoring the whole fixup_irq() business completely.
-+		 * We print at least the irq number and the old vector number,
-+		 * so we have the necessary information when a problem in that
-+		 * area arises.
- 		 */
--		data = apic_chip_data(irqdata);
--		if (!data)
--			return;
--		raw_spin_lock(&vector_lock);
-+		pr_warn("IRQ fixup: irq %d move in progress, old vector %d\n",
-+			irqdata->irq, cfg->old_vector);
+ 	if (hdev) {
+ 		hci_unregister_dev(hdev);
+ 		hci_free_dev(hdev);
  	}
-+	/*
-+	 * If old_domain is not empty, then other cpus still have the irq
-+	 * descriptor set in their vector array. Clean it up.
-+	 */
-+	for_each_cpu(cpu, data->old_domain)
-+		per_cpu(vector_irq, cpu)[cfg->old_vector] = VECTOR_UNUSED;
-+
-+	/* Cleanup the left overs of the (half finished) move */
-+	cpumask_clear(data->old_domain);
-+	data->move_in_progress = 0;
- 	raw_spin_unlock(&vector_lock);
- }
- #endif
-diff --git a/arch/x86/kernel/cpu/mcheck/mce-genpool.c b/arch/x86/kernel/cpu/mcheck/mce-genpool.c
-index 0a850100c594..2658e2af74ec 100644
---- a/arch/x86/kernel/cpu/mcheck/mce-genpool.c
-+++ b/arch/x86/kernel/cpu/mcheck/mce-genpool.c
-@@ -29,7 +29,7 @@ static char gen_pool_buf[MCE_POOLSZ];
- void mce_gen_pool_process(void)
- {
- 	struct llist_node *head;
--	struct mce_evt_llist *node;
-+	struct mce_evt_llist *node, *tmp;
- 	struct mce *mce;
- 
- 	head = llist_del_all(&mce_event_llist);
-@@ -37,7 +37,7 @@ void mce_gen_pool_process(void)
- 		return;
- 
- 	head = llist_reverse_order(head);
--	llist_for_each_entry(node, head, llnode) {
-+	llist_for_each_entry_safe(node, tmp, head, llnode) {
- 		mce = &node->mce;
- 		atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, mce);
- 		gen_pool_free(mce_evt_pool, (unsigned long)node, sizeof(*node));
-diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c
-index 2c5aaf8c2e2f..05538582a809 100644
---- a/arch/x86/kernel/cpu/mcheck/therm_throt.c
-+++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c
-@@ -385,6 +385,9 @@ static void intel_thermal_interrupt(void)
- {
- 	__u64 msr_val;
  
-+	if (static_cpu_has(X86_FEATURE_HWP))
-+		wrmsrl_safe(MSR_HWP_STATUS, 0);
-+
- 	rdmsrl(MSR_IA32_THERM_STATUS, msr_val);
- 
- 	/* Check for violation of core thermal thresholds*/
-diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
-index ee81c544ee0d..4f4735bd8698 100644
---- a/arch/x86/kernel/cpu/microcode/intel.c
-+++ b/arch/x86/kernel/cpu/microcode/intel.c
-@@ -551,10 +551,14 @@ scan_microcode(struct mc_saved_data *mc_saved_data, unsigned long *initrd,
- 	cd.data = NULL;
- 	cd.size = 0;
- 
--	cd = find_cpio_data(p, (void *)start, size, &offset);
--	if (!cd.data) {
-+	/* try built-in microcode if no initrd */
-+	if (!size) {
- 		if (!load_builtin_intel_microcode(&cd))
- 			return UCODE_ERROR;
-+	} else {
-+		cd = find_cpio_data(p, (void *)start, size, &offset);
-+		if (!cd.data)
-+			return UCODE_ERROR;
- 	}
++	skb_queue_purge(&data->readq);
+ 	file->private_data = NULL;
+ 	kfree(data);
  
- 	return get_matching_model_microcode(0, start, cd.data, cd.size,
-@@ -690,7 +694,7 @@ int __init save_microcode_in_initrd_intel(void)
- 	if (count == 0)
- 		return ret;
+diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c
+index 9f4df8f645f8..dd2856b5633c 100644
+--- a/drivers/clk/bcm/clk-bcm2835.c
++++ b/drivers/clk/bcm/clk-bcm2835.c
+@@ -1078,10 +1078,12 @@ static void bcm2835_pll_divider_off(struct clk_hw *hw)
+ 	struct bcm2835_cprman *cprman = divider->cprman;
+ 	const struct bcm2835_pll_divider_data *data = divider->data;
  
--	copy_initrd_ptrs(mc_saved, mc_saved_in_initrd, initrd_start, count);
-+	copy_initrd_ptrs(mc_saved, mc_saved_in_initrd, get_initrd_start(), count);
- 	ret = save_microcode(&mc_saved_data, mc_saved, count);
- 	if (ret)
- 		pr_err("Cannot save microcode patches from initrd.\n");
-@@ -728,16 +732,20 @@ void __init load_ucode_intel_bsp(void)
- 	struct boot_params *p;
- 
- 	p	= (struct boot_params *)__pa_nodebug(&boot_params);
--	start	= p->hdr.ramdisk_image;
- 	size	= p->hdr.ramdisk_size;
- 
--	_load_ucode_intel_bsp(
--			(struct mc_saved_data *)__pa_nodebug(&mc_saved_data),
--			(unsigned long *)__pa_nodebug(&mc_saved_in_initrd),
--			start, size);
-+	/*
-+	 * Set start only if we have an initrd image. We cannot use initrd_start
-+	 * because it is not set that early yet.
-+	 */
-+	start	= (size ? p->hdr.ramdisk_image : 0);
-+
-+	_load_ucode_intel_bsp((struct mc_saved_data *)__pa_nodebug(&mc_saved_data),
-+			      (unsigned long *)__pa_nodebug(&mc_saved_in_initrd),
-+			      start, size);
- #else
--	start	= boot_params.hdr.ramdisk_image + PAGE_OFFSET;
- 	size	= boot_params.hdr.ramdisk_size;
-+	start	= (size ? boot_params.hdr.ramdisk_image + PAGE_OFFSET : 0);
++	spin_lock(&cprman->regs_lock);
+ 	cprman_write(cprman, data->cm_reg,
+ 		     (cprman_read(cprman, data->cm_reg) &
+ 		      ~data->load_mask) | data->hold_mask);
+ 	cprman_write(cprman, data->a2w_reg, A2W_PLL_CHANNEL_DISABLE);
++	spin_unlock(&cprman->regs_lock);
+ }
  
- 	_load_ucode_intel_bsp(&mc_saved_data, mc_saved_in_initrd, start, size);
- #endif
-@@ -748,20 +756,14 @@ void load_ucode_intel_ap(void)
- 	struct mc_saved_data *mc_saved_data_p;
- 	struct ucode_cpu_info uci;
- 	unsigned long *mc_saved_in_initrd_p;
--	unsigned long initrd_start_addr;
- 	enum ucode_state ret;
- #ifdef CONFIG_X86_32
--	unsigned long *initrd_start_p;
- 
--	mc_saved_in_initrd_p =
--		(unsigned long *)__pa_nodebug(mc_saved_in_initrd);
-+	mc_saved_in_initrd_p = (unsigned long *)__pa_nodebug(mc_saved_in_initrd);
- 	mc_saved_data_p = (struct mc_saved_data *)__pa_nodebug(&mc_saved_data);
--	initrd_start_p = (unsigned long *)__pa_nodebug(&initrd_start);
--	initrd_start_addr = (unsigned long)__pa_nodebug(*initrd_start_p);
- #else
--	mc_saved_data_p = &mc_saved_data;
- 	mc_saved_in_initrd_p = mc_saved_in_initrd;
--	initrd_start_addr = initrd_start;
-+	mc_saved_data_p = &mc_saved_data;
- #endif
+ static int bcm2835_pll_divider_on(struct clk_hw *hw)
+@@ -1090,12 +1092,14 @@ static int bcm2835_pll_divider_on(struct clk_hw *hw)
+ 	struct bcm2835_cprman *cprman = divider->cprman;
+ 	const struct bcm2835_pll_divider_data *data = divider->data;
  
- 	/*
-@@ -773,7 +775,7 @@ void load_ucode_intel_ap(void)
++	spin_lock(&cprman->regs_lock);
+ 	cprman_write(cprman, data->a2w_reg,
+ 		     cprman_read(cprman, data->a2w_reg) &
+ 		     ~A2W_PLL_CHANNEL_DISABLE);
  
- 	collect_cpu_info_early(&uci);
- 	ret = load_microcode(mc_saved_data_p, mc_saved_in_initrd_p,
--			     initrd_start_addr, &uci);
-+			     get_initrd_start_addr(), &uci);
+ 	cprman_write(cprman, data->cm_reg,
+ 		     cprman_read(cprman, data->cm_reg) & ~data->hold_mask);
++	spin_unlock(&cprman->regs_lock);
  
- 	if (ret != UCODE_OK)
- 		return;
-diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
-index 1b443db2db50..6532f5b40646 100644
---- a/arch/x86/kernel/cpu/perf_event.c
-+++ b/arch/x86/kernel/cpu/perf_event.c
-@@ -596,6 +596,19 @@ void x86_pmu_disable_all(void)
- 	}
+ 	return 0;
  }
- 
-+/*
-+ * There may be PMI landing after enabled=0. The PMI hitting could be before or
-+ * after disable_all.
-+ *
-+ * If PMI hits before disable_all, the PMU will be disabled in the NMI handler.
-+ * It will not be re-enabled in the NMI handler again, because enabled=0. After
-+ * handling the NMI, disable_all will be called, which will not change the
-+ * state either. If PMI hits after disable_all, the PMU is already disabled
-+ * before entering NMI handler. The NMI handler will not change the state
-+ * either.
-+ *
-+ * So either situation is harmless.
-+ */
- static void x86_pmu_disable(struct pmu *pmu)
+diff --git a/drivers/clk/qcom/gcc-msm8916.c b/drivers/clk/qcom/gcc-msm8916.c
+index 8cc9b2868b41..5f56d6aae31d 100644
+--- a/drivers/clk/qcom/gcc-msm8916.c
++++ b/drivers/clk/qcom/gcc-msm8916.c
+@@ -2346,6 +2346,7 @@ static struct clk_branch gcc_crypto_ahb_clk = {
+ 				"pcnoc_bfdcd_clk_src",
+ 			},
+ 			.num_parents = 1,
++			.flags = CLK_SET_RATE_PARENT,
+ 			.ops = &clk_branch2_ops,
+ 		},
+ 	},
+@@ -2381,6 +2382,7 @@ static struct clk_branch gcc_crypto_clk = {
+ 				"crypto_clk_src",
+ 			},
+ 			.num_parents = 1,
++			.flags = CLK_SET_RATE_PARENT,
+ 			.ops = &clk_branch2_ops,
+ 		},
+ 	},
+diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
+index f7e0d8d4c3da..8f50a02ff68d 100644
+--- a/drivers/crypto/caam/jr.c
++++ b/drivers/crypto/caam/jr.c
+@@ -248,7 +248,7 @@ static void caam_jr_dequeue(unsigned long devarg)
+ struct device *caam_jr_alloc(void)
  {
- 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
-diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h
-index 7bb61e32fb29..98be6d6d32fa 100644
---- a/arch/x86/kernel/cpu/perf_event.h
-+++ b/arch/x86/kernel/cpu/perf_event.h
-@@ -586,6 +586,7 @@ struct x86_pmu {
- 			pebs_broken	:1,
- 			pebs_prec_dist	:1;
- 	int		pebs_record_size;
-+	int		pebs_buffer_size;
- 	void		(*drain_pebs)(struct pt_regs *regs);
- 	struct event_constraint *pebs_constraints;
- 	void		(*pebs_aliases)(struct perf_event *event);
-@@ -904,6 +905,8 @@ void intel_pmu_lbr_init_skl(void);
- 
- void intel_pmu_lbr_init_knl(void);
- 
-+void intel_pmu_pebs_data_source_nhm(void);
-+
- int intel_pmu_setup_lbr_filter(struct perf_event *event);
- 
- void intel_pt_interrupt(void);
-diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
-index fed2ab1f1065..760aec1e8f82 100644
---- a/arch/x86/kernel/cpu/perf_event_intel.c
-+++ b/arch/x86/kernel/cpu/perf_event_intel.c
-@@ -1502,7 +1502,15 @@ static __initconst const u64 knl_hw_cache_extra_regs
- };
+ 	struct caam_drv_private_jr *jrpriv, *min_jrpriv = NULL;
+-	struct device *dev = NULL;
++	struct device *dev = ERR_PTR(-ENODEV);
+ 	int min_tfm_cnt	= INT_MAX;
+ 	int tfm_cnt;
  
- /*
-- * Use from PMIs where the LBRs are already disabled.
-+ * Used from PMIs where the LBRs are already disabled.
-+ *
-+ * This function could be called consecutively. It is required to remain in
-+ * disabled state if called consecutively.
-+ *
-+ * During consecutive calls, the same disable value will be written to related
-+ * registers, so the PMU state remains unchanged. hw.state in
-+ * intel_bts_disable_local will remain PERF_HES_STOPPED too in consecutive
-+ * calls.
-  */
- static void __intel_pmu_disable_all(void)
- {
-@@ -1884,6 +1892,16 @@ again:
- 	if (__test_and_clear_bit(62, (unsigned long *)&status)) {
- 		handled++;
- 		x86_pmu.drain_pebs(regs);
-+		/*
-+		 * There are cases where, even though, the PEBS ovfl bit is set
-+		 * in GLOBAL_OVF_STATUS, the PEBS events may also have their
-+		 * overflow bits set for their counters. We must clear them
-+		 * here because they have been processed as exact samples in
-+		 * the drain_pebs() routine. They must not be processed again
-+		 * in the for_each_bit_set() loop for regular samples below.
-+		 */
-+		status &= ~cpuc->pebs_enabled;
-+		status &= x86_pmu.intel_ctrl | GLOBAL_STATUS_TRACE_TOPAPMI;
+diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c b/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
+index a19ee127edca..e72fea737a0d 100644
+--- a/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
++++ b/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
+@@ -35,6 +35,7 @@ static int sun4i_ss_opti_poll(struct ablkcipher_request *areq)
+ 	unsigned int todo;
+ 	struct sg_mapping_iter mi, mo;
+ 	unsigned int oi, oo; /* offset for in and out */
++	unsigned long flags;
+ 
+ 	if (areq->nbytes == 0)
+ 		return 0;
+@@ -49,7 +50,7 @@ static int sun4i_ss_opti_poll(struct ablkcipher_request *areq)
+ 		return -EINVAL;
  	}
  
- 	/*
-@@ -1929,7 +1947,10 @@ again:
- 		goto again;
- 
- done:
--	__intel_pmu_enable_all(0, true);
-+	/* Only restore PMU state when it's active. See x86_pmu_disable(). */
-+	if (cpuc->enabled)
-+		__intel_pmu_enable_all(0, true);
-+
- 	/*
- 	 * Only unmask the NMI after the overflow counters
- 	 * have been reset. This avoids spurious NMIs on
-@@ -3396,6 +3417,7 @@ __init int intel_pmu_init(void)
- 		intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
- 			X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1);
- 
-+		intel_pmu_pebs_data_source_nhm();
- 		x86_add_quirk(intel_nehalem_quirk);
- 
- 		pr_cont("Nehalem events, ");
-@@ -3459,6 +3481,7 @@ __init int intel_pmu_init(void)
- 		intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
- 			X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1);
- 
-+		intel_pmu_pebs_data_source_nhm();
- 		pr_cont("Westmere events, ");
- 		break;
+-	spin_lock_bh(&ss->slock);
++	spin_lock_irqsave(&ss->slock, flags);
  
-diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c
-index 10602f0a438f..955140140fd4 100644
---- a/arch/x86/kernel/cpu/perf_event_intel_ds.c
-+++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c
-@@ -51,7 +51,8 @@ union intel_x86_pebs_dse {
- #define OP_LH (P(OP, LOAD) | P(LVL, HIT))
- #define SNOOP_NONE_MISS (P(SNOOP, NONE) | P(SNOOP, MISS))
- 
--static const u64 pebs_data_source[] = {
-+/* Version for Sandy Bridge and later */
-+static u64 pebs_data_source[] = {
- 	P(OP, LOAD) | P(LVL, MISS) | P(LVL, L3) | P(SNOOP, NA),/* 0x00:ukn L3 */
- 	OP_LH | P(LVL, L1)  | P(SNOOP, NONE),	/* 0x01: L1 local */
- 	OP_LH | P(LVL, LFB) | P(SNOOP, NONE),	/* 0x02: LFB hit */
-@@ -70,6 +71,14 @@ static const u64 pebs_data_source[] = {
- 	OP_LH | P(LVL, UNC) | P(SNOOP, NONE), /* 0x0f: uncached */
- };
+ 	for (i = 0; i < op->keylen; i += 4)
+ 		writel(*(op->key + i / 4), ss->base + SS_KEY0 + i);
+@@ -117,7 +118,7 @@ release_ss:
+ 	sg_miter_stop(&mi);
+ 	sg_miter_stop(&mo);
+ 	writel(0, ss->base + SS_CTL);
+-	spin_unlock_bh(&ss->slock);
++	spin_unlock_irqrestore(&ss->slock, flags);
+ 	return err;
+ }
  
-+/* Patch up minor differences in the bits */
-+void __init intel_pmu_pebs_data_source_nhm(void)
-+{
-+	pebs_data_source[0x05] = OP_LH | P(LVL, L3)  | P(SNOOP, HIT);
-+	pebs_data_source[0x06] = OP_LH | P(LVL, L3)  | P(SNOOP, HITM);
-+	pebs_data_source[0x07] = OP_LH | P(LVL, L3)  | P(SNOOP, HITM);
-+}
-+
- static u64 precise_store_data(u64 status)
- {
- 	union intel_x86_pebs_dse dse;
-@@ -269,7 +278,7 @@ static int alloc_pebs_buffer(int cpu)
- 	if (!x86_pmu.pebs)
- 		return 0;
+@@ -149,6 +150,7 @@ static int sun4i_ss_cipher_poll(struct ablkcipher_request *areq)
+ 	unsigned int ob = 0;	/* offset in buf */
+ 	unsigned int obo = 0;	/* offset in bufo*/
+ 	unsigned int obl = 0;	/* length of data in bufo */
++	unsigned long flags;
  
--	buffer = kzalloc_node(PEBS_BUFFER_SIZE, GFP_KERNEL, node);
-+	buffer = kzalloc_node(x86_pmu.pebs_buffer_size, GFP_KERNEL, node);
- 	if (unlikely(!buffer))
- 		return -ENOMEM;
+ 	if (areq->nbytes == 0)
+ 		return 0;
+@@ -181,7 +183,7 @@ static int sun4i_ss_cipher_poll(struct ablkcipher_request *areq)
+ 	if (no_chunk == 1)
+ 		return sun4i_ss_opti_poll(areq);
  
-@@ -286,7 +295,7 @@ static int alloc_pebs_buffer(int cpu)
- 		per_cpu(insn_buffer, cpu) = ibuffer;
- 	}
+-	spin_lock_bh(&ss->slock);
++	spin_lock_irqsave(&ss->slock, flags);
  
--	max = PEBS_BUFFER_SIZE / x86_pmu.pebs_record_size;
-+	max = x86_pmu.pebs_buffer_size / x86_pmu.pebs_record_size;
- 
- 	ds->pebs_buffer_base = (u64)(unsigned long)buffer;
- 	ds->pebs_index = ds->pebs_buffer_base;
-@@ -1319,6 +1328,7 @@ void __init intel_ds_init(void)
- 
- 	x86_pmu.bts  = boot_cpu_has(X86_FEATURE_BTS);
- 	x86_pmu.pebs = boot_cpu_has(X86_FEATURE_PEBS);
-+	x86_pmu.pebs_buffer_size = PEBS_BUFFER_SIZE;
- 	if (x86_pmu.pebs) {
- 		char pebs_type = x86_pmu.intel_cap.pebs_trap ?  '+' : '-';
- 		int format = x86_pmu.intel_cap.pebs_format;
-@@ -1327,6 +1337,14 @@ void __init intel_ds_init(void)
- 		case 0:
- 			printk(KERN_CONT "PEBS fmt0%c, ", pebs_type);
- 			x86_pmu.pebs_record_size = sizeof(struct pebs_record_core);
-+			/*
-+			 * Using >PAGE_SIZE buffers makes the WRMSR to
-+			 * PERF_GLOBAL_CTRL in intel_pmu_enable_all()
-+			 * mysteriously hang on Core2.
-+			 *
-+			 * As a workaround, we don't do this.
-+			 */
-+			x86_pmu.pebs_buffer_size = PAGE_SIZE;
- 			x86_pmu.drain_pebs = intel_pmu_drain_pebs_core;
- 			break;
- 
-diff --git a/arch/x86/kernel/cpu/perf_event_intel_pt.c b/arch/x86/kernel/cpu/perf_event_intel_pt.c
-index c0bbd1033b7c..a5286d0bbb43 100644
---- a/arch/x86/kernel/cpu/perf_event_intel_pt.c
-+++ b/arch/x86/kernel/cpu/perf_event_intel_pt.c
-@@ -695,6 +695,7 @@ static int pt_buffer_reset_markers(struct pt_buffer *buf,
- 
- 	/* clear STOP and INT from current entry */
- 	buf->topa_index[buf->stop_pos]->stop = 0;
-+	buf->topa_index[buf->stop_pos]->intr = 0;
- 	buf->topa_index[buf->intr_pos]->intr = 0;
- 
- 	/* how many pages till the STOP marker */
-@@ -719,6 +720,7 @@ static int pt_buffer_reset_markers(struct pt_buffer *buf,
- 	buf->intr_pos = idx;
- 
- 	buf->topa_index[buf->stop_pos]->stop = 1;
-+	buf->topa_index[buf->stop_pos]->intr = 1;
- 	buf->topa_index[buf->intr_pos]->intr = 1;
- 
- 	return 0;
-diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c b/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c
-index 33acb884ccf1..4547b2cca71b 100644
---- a/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c
-+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c
-@@ -2875,11 +2875,13 @@ static struct intel_uncore_type bdx_uncore_sbox = {
- 	.format_group		= &hswep_uncore_sbox_format_group,
- };
+ 	for (i = 0; i < op->keylen; i += 4)
+ 		writel(*(op->key + i / 4), ss->base + SS_KEY0 + i);
+@@ -308,7 +310,7 @@ release_ss:
+ 	sg_miter_stop(&mi);
+ 	sg_miter_stop(&mo);
+ 	writel(0, ss->base + SS_CTL);
+-	spin_unlock_bh(&ss->slock);
++	spin_unlock_irqrestore(&ss->slock, flags);
  
-+#define BDX_MSR_UNCORE_SBOX	3
-+
- static struct intel_uncore_type *bdx_msr_uncores[] = {
- 	&bdx_uncore_ubox,
- 	&bdx_uncore_cbox,
--	&bdx_uncore_sbox,
- 	&hswep_uncore_pcu,
-+	&bdx_uncore_sbox,
- 	NULL,
+ 	return err;
+ }
+diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
+index aae05547b924..b7ee8d30147d 100644
+--- a/drivers/crypto/talitos.c
++++ b/drivers/crypto/talitos.c
+@@ -835,6 +835,16 @@ struct talitos_ahash_req_ctx {
+ 	struct scatterlist *psrc;
  };
  
-@@ -2888,6 +2890,10 @@ void bdx_uncore_cpu_init(void)
- 	if (bdx_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
- 		bdx_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
- 	uncore_msr_uncores = bdx_msr_uncores;
++struct talitos_export_state {
++	u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
++	u8 buf[HASH_MAX_BLOCK_SIZE];
++	unsigned int swinit;
++	unsigned int first;
++	unsigned int last;
++	unsigned int to_hash_later;
++	unsigned int nbuf;
++};
 +
-+	/* BDX-DE doesn't have SBOX */
-+	if (boot_cpu_data.x86_model == 86)
-+		uncore_msr_uncores[BDX_MSR_UNCORE_SBOX] = NULL;
- }
- 
- static struct intel_uncore_type bdx_uncore_ha = {
-diff --git a/arch/x86/kernel/cpu/perf_event_knc.c b/arch/x86/kernel/cpu/perf_event_knc.c
-index 5b0c232d1ee6..b931095e86d4 100644
---- a/arch/x86/kernel/cpu/perf_event_knc.c
-+++ b/arch/x86/kernel/cpu/perf_event_knc.c
-@@ -263,7 +263,9 @@ again:
- 		goto again;
- 
- done:
--	knc_pmu_enable_all(0);
-+	/* Only restore PMU state when it's active. See x86_pmu_disable(). */
-+	if (cpuc->enabled)
-+		knc_pmu_enable_all(0);
- 
- 	return handled;
- }
-diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
-index 37dae792dbbe..589b3193f102 100644
---- a/arch/x86/kernel/ioport.c
-+++ b/arch/x86/kernel/ioport.c
-@@ -96,9 +96,14 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
- SYSCALL_DEFINE1(iopl, unsigned int, level)
+ static int aead_setkey(struct crypto_aead *authenc,
+ 		       const u8 *key, unsigned int keylen)
  {
- 	struct pt_regs *regs = current_pt_regs();
--	unsigned int old = (regs->flags >> 12) & 3;
- 	struct thread_struct *t = &current->thread;
+@@ -1981,6 +1991,46 @@ static int ahash_digest(struct ahash_request *areq)
+ 	return ahash_process_req(areq, areq->nbytes);
+ }
  
-+	/*
-+	 * Careful: the IOPL bits in regs->flags are undefined under Xen PV
-+	 * and changing them has no effect.
-+	 */
-+	unsigned int old = t->iopl >> X86_EFLAGS_IOPL_BIT;
++static int ahash_export(struct ahash_request *areq, void *out)
++{
++	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
++	struct talitos_export_state *export = out;
 +
- 	if (level > 3)
- 		return -EINVAL;
- 	/* Trying to gain more privileges? */
-@@ -106,8 +111,9 @@ SYSCALL_DEFINE1(iopl, unsigned int, level)
- 		if (!capable(CAP_SYS_RAWIO))
- 			return -EPERM;
- 	}
--	regs->flags = (regs->flags & ~X86_EFLAGS_IOPL) | (level << 12);
--	t->iopl = level << 12;
-+	regs->flags = (regs->flags & ~X86_EFLAGS_IOPL) |
-+		(level << X86_EFLAGS_IOPL_BIT);
-+	t->iopl = level << X86_EFLAGS_IOPL_BIT;
- 	set_iopl_mask(t->iopl);
- 
- 	return 0;
-diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
-index b9d99e0f82c4..9f751876066f 100644
---- a/arch/x86/kernel/process_64.c
-+++ b/arch/x86/kernel/process_64.c
-@@ -48,6 +48,7 @@
- #include <asm/syscalls.h>
- #include <asm/debugreg.h>
- #include <asm/switch_to.h>
-+#include <asm/xen/hypervisor.h>
- 
- asmlinkage extern void ret_from_fork(void);
- 
-@@ -411,6 +412,17 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
- 		     task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV))
- 		__switch_to_xtra(prev_p, next_p, tss);
- 
-+#ifdef CONFIG_XEN
-+	/*
-+	 * On Xen PV, IOPL bits in pt_regs->flags have no effect, and
-+	 * current_pt_regs()->flags may not match the current task's
-+	 * intended IOPL.  We need to switch it manually.
-+	 */
-+	if (unlikely(static_cpu_has(X86_FEATURE_XENPV) &&
-+		     prev->iopl != next->iopl))
-+		xen_set_iopl_mask(next->iopl);
-+#endif
++	memcpy(export->hw_context, req_ctx->hw_context,
++	       req_ctx->hw_context_size);
++	memcpy(export->buf, req_ctx->buf, req_ctx->nbuf);
++	export->swinit = req_ctx->swinit;
++	export->first = req_ctx->first;
++	export->last = req_ctx->last;
++	export->to_hash_later = req_ctx->to_hash_later;
++	export->nbuf = req_ctx->nbuf;
 +
- 	if (static_cpu_has_bug(X86_BUG_SYSRET_SS_ATTRS)) {
- 		/*
- 		 * AMD CPUs have a misfeature: SYSRET sets the SS selector but
-diff --git a/arch/x86/kernel/sysfb_efi.c b/arch/x86/kernel/sysfb_efi.c
-index b285d4e8c68e..5da924bbf0a0 100644
---- a/arch/x86/kernel/sysfb_efi.c
-+++ b/arch/x86/kernel/sysfb_efi.c
-@@ -106,14 +106,24 @@ static int __init efifb_set_system(const struct dmi_system_id *id)
- 					continue;
- 				for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
- 					resource_size_t start, end;
-+					unsigned long flags;
++	return 0;
++}
 +
-+					flags = pci_resource_flags(dev, i);
-+					if (!(flags & IORESOURCE_MEM))
-+						continue;
++static int ahash_import(struct ahash_request *areq, const void *in)
++{
++	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
++	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
++	const struct talitos_export_state *export = in;
 +
-+					if (flags & IORESOURCE_UNSET)
-+						continue;
++	memset(req_ctx, 0, sizeof(*req_ctx));
++	req_ctx->hw_context_size =
++		(crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
++			? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
++			: TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
++	memcpy(req_ctx->hw_context, export->hw_context,
++	       req_ctx->hw_context_size);
++	memcpy(req_ctx->buf, export->buf, export->nbuf);
++	req_ctx->swinit = export->swinit;
++	req_ctx->first = export->first;
++	req_ctx->last = export->last;
++	req_ctx->to_hash_later = export->to_hash_later;
++	req_ctx->nbuf = export->nbuf;
 +
-+					if (pci_resource_len(dev, i) == 0)
-+						continue;
- 
- 					start = pci_resource_start(dev, i);
--					if (start == 0)
--						break;
- 					end = pci_resource_end(dev, i);
- 					if (screen_info.lfb_base >= start &&
- 					    screen_info.lfb_base < end) {
- 						found_bar = 1;
-+						break;
- 					}
- 				}
- 			}
-diff --git a/arch/x86/kernel/tsc_msr.c b/arch/x86/kernel/tsc_msr.c
-index 92ae6acac8a7..6aa0f4d9eea6 100644
---- a/arch/x86/kernel/tsc_msr.c
-+++ b/arch/x86/kernel/tsc_msr.c
-@@ -92,7 +92,7 @@ unsigned long try_msr_calibrate_tsc(void)
- 
- 	if (freq_desc_tables[cpu_index].msr_plat) {
- 		rdmsr(MSR_PLATFORM_INFO, lo, hi);
--		ratio = (lo >> 8) & 0x1f;
-+		ratio = (lo >> 8) & 0xff;
- 	} else {
- 		rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
- 		ratio = (hi >> 8) & 0x1f;
-diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
-index 6525e926f566..2e1fd586b895 100644
---- a/arch/x86/kvm/cpuid.c
-+++ b/arch/x86/kvm/cpuid.c
-@@ -509,6 +509,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
- 			do_cpuid_1_ent(&entry[i], function, idx);
- 			if (idx == 1) {
- 				entry[i].eax &= kvm_supported_word10_x86_features;
-+				cpuid_mask(&entry[i].eax, 10);
- 				entry[i].ebx = 0;
- 				if (entry[i].eax & (F(XSAVES)|F(XSAVEC)))
- 					entry[i].ebx =
-diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
-index b0ea42b78ccd..ab5318727579 100644
---- a/arch/x86/kvm/i8254.c
-+++ b/arch/x86/kvm/i8254.c
-@@ -245,7 +245,7 @@ static void kvm_pit_ack_irq(struct kvm_irq_ack_notifier *kian)
- 		 * PIC is being reset.  Handle it gracefully here
- 		 */
- 		atomic_inc(&ps->pending);
--	else if (value > 0)
-+	else if (value > 0 && ps->reinject)
- 		/* in this case, we had multiple outstanding pit interrupts
- 		 * that we needed to inject.  Reinject
- 		 */
-@@ -288,7 +288,9 @@ static void pit_do_work(struct kthread_work *work)
- 	 * last one has been acked.
- 	 */
- 	spin_lock(&ps->inject_lock);
--	if (ps->irq_ack) {
-+	if (!ps->reinject)
-+		inject = 1;
-+	else if (ps->irq_ack) {
- 		ps->irq_ack = 0;
- 		inject = 1;
- 	}
-@@ -317,10 +319,10 @@ static enum hrtimer_restart pit_timer_fn(struct hrtimer *data)
- 	struct kvm_kpit_state *ps = container_of(data, struct kvm_kpit_state, timer);
- 	struct kvm_pit *pt = ps->kvm->arch.vpit;
- 
--	if (ps->reinject || !atomic_read(&ps->pending)) {
-+	if (ps->reinject)
- 		atomic_inc(&ps->pending);
--		queue_kthread_work(&pt->worker, &pt->expired);
--	}
++	return 0;
++}
 +
-+	queue_kthread_work(&pt->worker, &pt->expired);
+ struct keyhash_result {
+ 	struct completion completion;
+ 	int err;
+@@ -2458,6 +2508,7 @@ static struct talitos_alg_template driver_algs[] = {
+ 	{	.type = CRYPTO_ALG_TYPE_AHASH,
+ 		.alg.hash = {
+ 			.halg.digestsize = MD5_DIGEST_SIZE,
++			.halg.statesize = sizeof(struct talitos_export_state),
+ 			.halg.base = {
+ 				.cra_name = "md5",
+ 				.cra_driver_name = "md5-talitos",
+@@ -2473,6 +2524,7 @@ static struct talitos_alg_template driver_algs[] = {
+ 	{	.type = CRYPTO_ALG_TYPE_AHASH,
+ 		.alg.hash = {
+ 			.halg.digestsize = SHA1_DIGEST_SIZE,
++			.halg.statesize = sizeof(struct talitos_export_state),
+ 			.halg.base = {
+ 				.cra_name = "sha1",
+ 				.cra_driver_name = "sha1-talitos",
+@@ -2488,6 +2540,7 @@ static struct talitos_alg_template driver_algs[] = {
+ 	{	.type = CRYPTO_ALG_TYPE_AHASH,
+ 		.alg.hash = {
+ 			.halg.digestsize = SHA224_DIGEST_SIZE,
++			.halg.statesize = sizeof(struct talitos_export_state),
+ 			.halg.base = {
+ 				.cra_name = "sha224",
+ 				.cra_driver_name = "sha224-talitos",
+@@ -2503,6 +2556,7 @@ static struct talitos_alg_template driver_algs[] = {
+ 	{	.type = CRYPTO_ALG_TYPE_AHASH,
+ 		.alg.hash = {
+ 			.halg.digestsize = SHA256_DIGEST_SIZE,
++			.halg.statesize = sizeof(struct talitos_export_state),
+ 			.halg.base = {
+ 				.cra_name = "sha256",
+ 				.cra_driver_name = "sha256-talitos",
+@@ -2518,6 +2572,7 @@ static struct talitos_alg_template driver_algs[] = {
+ 	{	.type = CRYPTO_ALG_TYPE_AHASH,
+ 		.alg.hash = {
+ 			.halg.digestsize = SHA384_DIGEST_SIZE,
++			.halg.statesize = sizeof(struct talitos_export_state),
+ 			.halg.base = {
+ 				.cra_name = "sha384",
+ 				.cra_driver_name = "sha384-talitos",
+@@ -2533,6 +2588,7 @@ static struct talitos_alg_template driver_algs[] = {
+ 	{	.type = CRYPTO_ALG_TYPE_AHASH,
+ 		.alg.hash = {
+ 			.halg.digestsize = SHA512_DIGEST_SIZE,
++			.halg.statesize = sizeof(struct talitos_export_state),
+ 			.halg.base = {
+ 				.cra_name = "sha512",
+ 				.cra_driver_name = "sha512-talitos",
+@@ -2548,6 +2604,7 @@ static struct talitos_alg_template driver_algs[] = {
+ 	{	.type = CRYPTO_ALG_TYPE_AHASH,
+ 		.alg.hash = {
+ 			.halg.digestsize = MD5_DIGEST_SIZE,
++			.halg.statesize = sizeof(struct talitos_export_state),
+ 			.halg.base = {
+ 				.cra_name = "hmac(md5)",
+ 				.cra_driver_name = "hmac-md5-talitos",
+@@ -2563,6 +2620,7 @@ static struct talitos_alg_template driver_algs[] = {
+ 	{	.type = CRYPTO_ALG_TYPE_AHASH,
+ 		.alg.hash = {
+ 			.halg.digestsize = SHA1_DIGEST_SIZE,
++			.halg.statesize = sizeof(struct talitos_export_state),
+ 			.halg.base = {
+ 				.cra_name = "hmac(sha1)",
+ 				.cra_driver_name = "hmac-sha1-talitos",
+@@ -2578,6 +2636,7 @@ static struct talitos_alg_template driver_algs[] = {
+ 	{	.type = CRYPTO_ALG_TYPE_AHASH,
+ 		.alg.hash = {
+ 			.halg.digestsize = SHA224_DIGEST_SIZE,
++			.halg.statesize = sizeof(struct talitos_export_state),
+ 			.halg.base = {
+ 				.cra_name = "hmac(sha224)",
+ 				.cra_driver_name = "hmac-sha224-talitos",
+@@ -2593,6 +2652,7 @@ static struct talitos_alg_template driver_algs[] = {
+ 	{	.type = CRYPTO_ALG_TYPE_AHASH,
+ 		.alg.hash = {
+ 			.halg.digestsize = SHA256_DIGEST_SIZE,
++			.halg.statesize = sizeof(struct talitos_export_state),
+ 			.halg.base = {
+ 				.cra_name = "hmac(sha256)",
+ 				.cra_driver_name = "hmac-sha256-talitos",
+@@ -2608,6 +2668,7 @@ static struct talitos_alg_template driver_algs[] = {
+ 	{	.type = CRYPTO_ALG_TYPE_AHASH,
+ 		.alg.hash = {
+ 			.halg.digestsize = SHA384_DIGEST_SIZE,
++			.halg.statesize = sizeof(struct talitos_export_state),
+ 			.halg.base = {
+ 				.cra_name = "hmac(sha384)",
+ 				.cra_driver_name = "hmac-sha384-talitos",
+@@ -2623,6 +2684,7 @@ static struct talitos_alg_template driver_algs[] = {
+ 	{	.type = CRYPTO_ALG_TYPE_AHASH,
+ 		.alg.hash = {
+ 			.halg.digestsize = SHA512_DIGEST_SIZE,
++			.halg.statesize = sizeof(struct talitos_export_state),
+ 			.halg.base = {
+ 				.cra_name = "hmac(sha512)",
+ 				.cra_driver_name = "hmac-sha512-talitos",
+@@ -2814,6 +2876,8 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
+ 		t_alg->algt.alg.hash.finup = ahash_finup;
+ 		t_alg->algt.alg.hash.digest = ahash_digest;
+ 		t_alg->algt.alg.hash.setkey = ahash_setkey;
++		t_alg->algt.alg.hash.import = ahash_import;
++		t_alg->algt.alg.hash.export = ahash_export;
  
- 	if (ps->is_periodic) {
- 		hrtimer_add_expires_ns(&ps->timer, ps->period);
-diff --git a/arch/x86/kvm/mtrr.c b/arch/x86/kvm/mtrr.c
-index 3f8c732117ec..c146f3c262c3 100644
---- a/arch/x86/kvm/mtrr.c
-+++ b/arch/x86/kvm/mtrr.c
-@@ -44,8 +44,6 @@ static bool msr_mtrr_valid(unsigned msr)
- 	case MSR_MTRRdefType:
- 	case MSR_IA32_CR_PAT:
- 		return true;
--	case 0x2f8:
--		return true;
- 	}
- 	return false;
- }
-diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
-index 9bd8f44baded..60946a5d3812 100644
---- a/arch/x86/kvm/vmx.c
-+++ b/arch/x86/kvm/vmx.c
-@@ -2702,8 +2702,15 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
- 	} else
- 		vmx->nested.nested_vmx_ept_caps = 0;
+ 		if (!(priv->features & TALITOS_FTR_HMAC_OK) &&
+ 		    !strncmp(alg->cra_name, "hmac", 4)) {
+diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
+index 03022f6420d7..a09841abae50 100644
+--- a/drivers/infiniband/ulp/srp/ib_srp.c
++++ b/drivers/infiniband/ulp/srp/ib_srp.c
+@@ -1541,7 +1541,7 @@ static int srp_map_idb(struct srp_rdma_ch *ch, struct srp_request *req,
  
-+	/*
-+	 * Old versions of KVM use the single-context version without
-+	 * checking for support, so declare that it is supported even
-+	 * though it is treated as global context.  The alternative is
-+	 * not failing the single-context invvpid, and it is worse.
-+	 */
- 	if (enable_vpid)
- 		vmx->nested.nested_vmx_vpid_caps = VMX_VPID_INVVPID_BIT |
-+				VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT |
- 				VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT;
- 	else
- 		vmx->nested.nested_vmx_vpid_caps = 0;
-@@ -5014,8 +5021,8 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
- 		vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
+ 	if (dev->use_fast_reg) {
+ 		state.sg = idb_sg;
+-		sg_set_buf(idb_sg, req->indirect_desc, idb_len);
++		sg_init_one(idb_sg, req->indirect_desc, idb_len);
+ 		idb_sg->dma_address = req->indirect_dma_addr; /* hack! */
+ #ifdef CONFIG_NEED_SG_DMA_LENGTH
+ 		idb_sg->dma_length = idb_sg->length;	      /* hack^2 */
+diff --git a/drivers/input/misc/pwm-beeper.c b/drivers/input/misc/pwm-beeper.c
+index f2261ab54701..18663d4edae5 100644
+--- a/drivers/input/misc/pwm-beeper.c
++++ b/drivers/input/misc/pwm-beeper.c
+@@ -20,21 +20,40 @@
+ #include <linux/platform_device.h>
+ #include <linux/pwm.h>
+ #include <linux/slab.h>
++#include <linux/workqueue.h>
  
- 	cr0 = X86_CR0_NW | X86_CR0_CD | X86_CR0_ET;
--	vmx_set_cr0(vcpu, cr0); /* enter rmode */
- 	vmx->vcpu.arch.cr0 = cr0;
-+	vmx_set_cr0(vcpu, cr0); /* enter rmode */
- 	vmx_set_cr4(vcpu, 0);
- 	vmx_set_efer(vcpu, 0);
- 	vmx_fpu_activate(vcpu);
-@@ -7398,6 +7405,7 @@ static int handle_invept(struct kvm_vcpu *vcpu)
- 	if (!(types & (1UL << type))) {
- 		nested_vmx_failValid(vcpu,
- 				VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
-+		skip_emulated_instruction(vcpu);
- 		return 1;
- 	}
+ struct pwm_beeper {
+ 	struct input_dev *input;
+ 	struct pwm_device *pwm;
++	struct work_struct work;
+ 	unsigned long period;
+ };
  
-@@ -7456,6 +7464,7 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
- 	if (!(types & (1UL << type))) {
- 		nested_vmx_failValid(vcpu,
- 			VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
-+		skip_emulated_instruction(vcpu);
- 		return 1;
- 	}
+ #define HZ_TO_NANOSECONDS(x) (1000000000UL/(x))
  
-@@ -7472,12 +7481,17 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
- 	}
++static void __pwm_beeper_set(struct pwm_beeper *beeper)
++{
++	unsigned long period = beeper->period;
++
++	if (period) {
++		pwm_config(beeper->pwm, period / 2, period);
++		pwm_enable(beeper->pwm);
++	} else
++		pwm_disable(beeper->pwm);
++}
++
++static void pwm_beeper_work(struct work_struct *work)
++{
++	struct pwm_beeper *beeper =
++		container_of(work, struct pwm_beeper, work);
++
++	__pwm_beeper_set(beeper);
++}
++
+ static int pwm_beeper_event(struct input_dev *input,
+ 			    unsigned int type, unsigned int code, int value)
+ {
+-	int ret = 0;
+ 	struct pwm_beeper *beeper = input_get_drvdata(input);
+-	unsigned long period;
  
- 	switch (type) {
-+	case VMX_VPID_EXTENT_SINGLE_CONTEXT:
-+		/*
-+		 * Old versions of KVM use the single-context version so we
-+		 * have to support it; just treat it the same as all-context.
-+		 */
- 	case VMX_VPID_EXTENT_ALL_CONTEXT:
- 		__vmx_flush_tlb(vcpu, to_vmx(vcpu)->nested.vpid02);
- 		nested_vmx_succeed(vcpu);
- 		break;
- 	default:
--		/* Trap single context invalidation invvpid calls */
-+		/* Trap individual address invalidation invvpid calls */
- 		BUG_ON(1);
- 		break;
- 	}
-diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
-index eaf6ee8c28b8..ac4963c38aa3 100644
---- a/arch/x86/kvm/x86.c
-+++ b/arch/x86/kvm/x86.c
-@@ -697,7 +697,6 @@ static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
- 		if ((xcr0 & XFEATURE_MASK_AVX512) != XFEATURE_MASK_AVX512)
- 			return 1;
+ 	if (type != EV_SND || value < 0)
+ 		return -EINVAL;
+@@ -49,22 +68,31 @@ static int pwm_beeper_event(struct input_dev *input,
+ 		return -EINVAL;
  	}
--	kvm_put_guest_xcr0(vcpu);
- 	vcpu->arch.xcr0 = xcr0;
  
- 	if ((xcr0 ^ old_xcr0) & XFEATURE_MASK_EXTEND)
-@@ -2752,6 +2751,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
- 	}
+-	if (value == 0) {
+-		pwm_disable(beeper->pwm);
+-	} else {
+-		period = HZ_TO_NANOSECONDS(value);
+-		ret = pwm_config(beeper->pwm, period / 2, period);
+-		if (ret)
+-			return ret;
+-		ret = pwm_enable(beeper->pwm);
+-		if (ret)
+-			return ret;
+-		beeper->period = period;
+-	}
++	if (value == 0)
++		beeper->period = 0;
++	else
++		beeper->period = HZ_TO_NANOSECONDS(value);
++
++	schedule_work(&beeper->work);
  
- 	kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
-+	vcpu->arch.switch_db_regs |= KVM_DEBUGREG_RELOAD;
+ 	return 0;
  }
  
- void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
-@@ -6073,12 +6073,10 @@ static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win)
- 	}
- 
- 	/* try to inject new event if pending */
--	if (vcpu->arch.nmi_pending) {
--		if (kvm_x86_ops->nmi_allowed(vcpu)) {
--			--vcpu->arch.nmi_pending;
--			vcpu->arch.nmi_injected = true;
--			kvm_x86_ops->set_nmi(vcpu);
--		}
-+	if (vcpu->arch.nmi_pending && kvm_x86_ops->nmi_allowed(vcpu)) {
-+		--vcpu->arch.nmi_pending;
-+		vcpu->arch.nmi_injected = true;
-+		kvm_x86_ops->set_nmi(vcpu);
- 	} else if (kvm_cpu_has_injectable_intr(vcpu)) {
- 		/*
- 		 * Because interrupts can be injected asynchronously, we are
-@@ -6547,10 +6545,12 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
- 		if (inject_pending_event(vcpu, req_int_win) != 0)
- 			req_immediate_exit = true;
- 		/* enable NMI/IRQ window open exits if needed */
--		else if (vcpu->arch.nmi_pending)
--			kvm_x86_ops->enable_nmi_window(vcpu);
--		else if (kvm_cpu_has_injectable_intr(vcpu) || req_int_win)
--			kvm_x86_ops->enable_irq_window(vcpu);
-+		else {
-+			if (vcpu->arch.nmi_pending)
-+				kvm_x86_ops->enable_nmi_window(vcpu);
-+			if (kvm_cpu_has_injectable_intr(vcpu) || req_int_win)
-+				kvm_x86_ops->enable_irq_window(vcpu);
-+		}
- 
- 		if (kvm_lapic_enabled(vcpu)) {
- 			update_cr8_intercept(vcpu);
-@@ -6568,8 +6568,6 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
- 	kvm_x86_ops->prepare_guest_switch(vcpu);
- 	if (vcpu->fpu_active)
- 		kvm_load_guest_fpu(vcpu);
--	kvm_load_guest_xcr0(vcpu);
--
- 	vcpu->mode = IN_GUEST_MODE;
- 
- 	srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
-@@ -6592,6 +6590,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
- 		goto cancel_injection;
- 	}
- 
-+	kvm_load_guest_xcr0(vcpu);
++static void pwm_beeper_stop(struct pwm_beeper *beeper)
++{
++	cancel_work_sync(&beeper->work);
 +
- 	if (req_immediate_exit)
- 		smp_send_reschedule(vcpu->cpu);
- 
-@@ -6641,6 +6641,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
- 	vcpu->mode = OUTSIDE_GUEST_MODE;
- 	smp_wmb();
- 
-+	kvm_put_guest_xcr0(vcpu);
++	if (beeper->period)
++		pwm_disable(beeper->pwm);
++}
 +
- 	/* Interrupt is enabled by handle_external_intr() */
- 	kvm_x86_ops->handle_external_intr(vcpu);
- 
-@@ -7288,7 +7290,6 @@ void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
- 	 * and assume host would use all available bits.
- 	 * Guest xcr0 would be loaded later.
- 	 */
--	kvm_put_guest_xcr0(vcpu);
- 	vcpu->guest_fpu_loaded = 1;
- 	__kernel_fpu_begin();
- 	__copy_kernel_to_fpregs(&vcpu->arch.guest_fpu.state);
-@@ -7297,8 +7298,6 @@ void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
- 
- void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
- {
--	kvm_put_guest_xcr0(vcpu);
--
- 	if (!vcpu->guest_fpu_loaded) {
- 		vcpu->fpu_counter = 0;
- 		return;
-diff --git a/arch/x86/mm/kmmio.c b/arch/x86/mm/kmmio.c
-index 637ab34ed632..ddb2244b06a1 100644
---- a/arch/x86/mm/kmmio.c
-+++ b/arch/x86/mm/kmmio.c
-@@ -33,7 +33,7 @@
- struct kmmio_fault_page {
- 	struct list_head list;
- 	struct kmmio_fault_page *release_next;
--	unsigned long page; /* location of the fault page */
-+	unsigned long addr; /* the requested address */
- 	pteval_t old_presence; /* page presence prior to arming */
- 	bool armed;
- 
-@@ -70,9 +70,16 @@ unsigned int kmmio_count;
- static struct list_head kmmio_page_table[KMMIO_PAGE_TABLE_SIZE];
- static LIST_HEAD(kmmio_probes);
- 
--static struct list_head *kmmio_page_list(unsigned long page)
-+static struct list_head *kmmio_page_list(unsigned long addr)
- {
--	return &kmmio_page_table[hash_long(page, KMMIO_PAGE_HASH_BITS)];
-+	unsigned int l;
-+	pte_t *pte = lookup_address(addr, &l);
++static void pwm_beeper_close(struct input_dev *input)
++{
++	struct pwm_beeper *beeper = input_get_drvdata(input);
 +
-+	if (!pte)
-+		return NULL;
-+	addr &= page_level_mask(l);
++	pwm_beeper_stop(beeper);
++}
 +
-+	return &kmmio_page_table[hash_long(addr, KMMIO_PAGE_HASH_BITS)];
- }
- 
- /* Accessed per-cpu */
-@@ -98,15 +105,19 @@ static struct kmmio_probe *get_kmmio_probe(unsigned long addr)
- }
- 
- /* You must be holding RCU read lock. */
--static struct kmmio_fault_page *get_kmmio_fault_page(unsigned long page)
-+static struct kmmio_fault_page *get_kmmio_fault_page(unsigned long addr)
- {
- 	struct list_head *head;
- 	struct kmmio_fault_page *f;
-+	unsigned int l;
-+	pte_t *pte = lookup_address(addr, &l);
- 
--	page &= PAGE_MASK;
--	head = kmmio_page_list(page);
-+	if (!pte)
-+		return NULL;
-+	addr &= page_level_mask(l);
-+	head = kmmio_page_list(addr);
- 	list_for_each_entry_rcu(f, head, list) {
--		if (f->page == page)
-+		if (f->addr == addr)
- 			return f;
- 	}
- 	return NULL;
-@@ -137,10 +148,10 @@ static void clear_pte_presence(pte_t *pte, bool clear, pteval_t *old)
- static int clear_page_presence(struct kmmio_fault_page *f, bool clear)
+ static int pwm_beeper_probe(struct platform_device *pdev)
  {
- 	unsigned int level;
--	pte_t *pte = lookup_address(f->page, &level);
-+	pte_t *pte = lookup_address(f->addr, &level);
- 
- 	if (!pte) {
--		pr_err("no pte for page 0x%08lx\n", f->page);
-+		pr_err("no pte for addr 0x%08lx\n", f->addr);
- 		return -1;
+ 	unsigned long pwm_id = (unsigned long)dev_get_platdata(&pdev->dev);
+@@ -87,6 +115,8 @@ static int pwm_beeper_probe(struct platform_device *pdev)
+ 		goto err_free;
  	}
  
-@@ -156,7 +167,7 @@ static int clear_page_presence(struct kmmio_fault_page *f, bool clear)
- 		return -1;
- 	}
++	INIT_WORK(&beeper->work, pwm_beeper_work);
++
+ 	beeper->input = input_allocate_device();
+ 	if (!beeper->input) {
+ 		dev_err(&pdev->dev, "Failed to allocate input device\n");
+@@ -106,6 +136,7 @@ static int pwm_beeper_probe(struct platform_device *pdev)
+ 	beeper->input->sndbit[0] = BIT(SND_TONE) | BIT(SND_BELL);
  
--	__flush_tlb_one(f->page);
-+	__flush_tlb_one(f->addr);
- 	return 0;
- }
+ 	beeper->input->event = pwm_beeper_event;
++	beeper->input->close = pwm_beeper_close;
  
-@@ -176,12 +187,12 @@ static int arm_kmmio_fault_page(struct kmmio_fault_page *f)
- 	int ret;
- 	WARN_ONCE(f->armed, KERN_ERR pr_fmt("kmmio page already armed.\n"));
- 	if (f->armed) {
--		pr_warning("double-arm: page 0x%08lx, ref %d, old %d\n",
--			   f->page, f->count, !!f->old_presence);
-+		pr_warning("double-arm: addr 0x%08lx, ref %d, old %d\n",
-+			   f->addr, f->count, !!f->old_presence);
- 	}
- 	ret = clear_page_presence(f, true);
--	WARN_ONCE(ret < 0, KERN_ERR pr_fmt("arming 0x%08lx failed.\n"),
--		  f->page);
-+	WARN_ONCE(ret < 0, KERN_ERR pr_fmt("arming at 0x%08lx failed.\n"),
-+		  f->addr);
- 	f->armed = true;
- 	return ret;
- }
-@@ -191,7 +202,7 @@ static void disarm_kmmio_fault_page(struct kmmio_fault_page *f)
- {
- 	int ret = clear_page_presence(f, false);
- 	WARN_ONCE(ret < 0,
--			KERN_ERR "kmmio disarming 0x%08lx failed.\n", f->page);
-+			KERN_ERR "kmmio disarming at 0x%08lx failed.\n", f->addr);
- 	f->armed = false;
- }
+ 	input_set_drvdata(beeper->input, beeper);
  
-@@ -215,6 +226,12 @@ int kmmio_handler(struct pt_regs *regs, unsigned long addr)
- 	struct kmmio_context *ctx;
- 	struct kmmio_fault_page *faultpage;
- 	int ret = 0; /* default to fault not handled */
-+	unsigned long page_base = addr;
-+	unsigned int l;
-+	pte_t *pte = lookup_address(addr, &l);
-+	if (!pte)
-+		return -EINVAL;
-+	page_base &= page_level_mask(l);
+@@ -135,7 +166,6 @@ static int pwm_beeper_remove(struct platform_device *pdev)
  
- 	/*
- 	 * Preemption is now disabled to prevent process switch during
-@@ -227,7 +244,7 @@ int kmmio_handler(struct pt_regs *regs, unsigned long addr)
- 	preempt_disable();
- 	rcu_read_lock();
+ 	input_unregister_device(beeper->input);
  
--	faultpage = get_kmmio_fault_page(addr);
-+	faultpage = get_kmmio_fault_page(page_base);
- 	if (!faultpage) {
- 		/*
- 		 * Either this page fault is not caused by kmmio, or
-@@ -239,7 +256,7 @@ int kmmio_handler(struct pt_regs *regs, unsigned long addr)
- 
- 	ctx = &get_cpu_var(kmmio_ctx);
- 	if (ctx->active) {
--		if (addr == ctx->addr) {
-+		if (page_base == ctx->addr) {
- 			/*
- 			 * A second fault on the same page means some other
- 			 * condition needs handling by do_page_fault(), the
-@@ -267,9 +284,9 @@ int kmmio_handler(struct pt_regs *regs, unsigned long addr)
- 	ctx->active++;
- 
- 	ctx->fpage = faultpage;
--	ctx->probe = get_kmmio_probe(addr);
-+	ctx->probe = get_kmmio_probe(page_base);
- 	ctx->saved_flags = (regs->flags & (X86_EFLAGS_TF | X86_EFLAGS_IF));
--	ctx->addr = addr;
-+	ctx->addr = page_base;
- 
- 	if (ctx->probe && ctx->probe->pre_handler)
- 		ctx->probe->pre_handler(ctx->probe, regs, addr);
-@@ -354,12 +371,11 @@ out:
- }
+-	pwm_disable(beeper->pwm);
+ 	pwm_free(beeper->pwm);
  
- /* You must be holding kmmio_lock. */
--static int add_kmmio_fault_page(unsigned long page)
-+static int add_kmmio_fault_page(unsigned long addr)
+ 	kfree(beeper);
+@@ -147,8 +177,7 @@ static int __maybe_unused pwm_beeper_suspend(struct device *dev)
  {
- 	struct kmmio_fault_page *f;
- 
--	page &= PAGE_MASK;
--	f = get_kmmio_fault_page(page);
-+	f = get_kmmio_fault_page(addr);
- 	if (f) {
- 		if (!f->count)
- 			arm_kmmio_fault_page(f);
-@@ -372,26 +388,25 @@ static int add_kmmio_fault_page(unsigned long page)
- 		return -1;
- 
- 	f->count = 1;
--	f->page = page;
-+	f->addr = addr;
- 
- 	if (arm_kmmio_fault_page(f)) {
- 		kfree(f);
- 		return -1;
- 	}
+ 	struct pwm_beeper *beeper = dev_get_drvdata(dev);
  
--	list_add_rcu(&f->list, kmmio_page_list(f->page));
-+	list_add_rcu(&f->list, kmmio_page_list(f->addr));
+-	if (beeper->period)
+-		pwm_disable(beeper->pwm);
++	pwm_beeper_stop(beeper);
  
  	return 0;
  }
- 
- /* You must be holding kmmio_lock. */
--static void release_kmmio_fault_page(unsigned long page,
-+static void release_kmmio_fault_page(unsigned long addr,
- 				struct kmmio_fault_page **release_list)
+@@ -157,10 +186,8 @@ static int __maybe_unused pwm_beeper_resume(struct device *dev)
  {
- 	struct kmmio_fault_page *f;
- 
--	page &= PAGE_MASK;
--	f = get_kmmio_fault_page(page);
-+	f = get_kmmio_fault_page(addr);
- 	if (!f)
- 		return;
- 
-@@ -420,18 +435,27 @@ int register_kmmio_probe(struct kmmio_probe *p)
- 	int ret = 0;
- 	unsigned long size = 0;
- 	const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK);
-+	unsigned int l;
-+	pte_t *pte;
- 
- 	spin_lock_irqsave(&kmmio_lock, flags);
- 	if (get_kmmio_probe(p->addr)) {
- 		ret = -EEXIST;
- 		goto out;
- 	}
-+
-+	pte = lookup_address(p->addr, &l);
-+	if (!pte) {
-+		ret = -EINVAL;
-+		goto out;
-+	}
-+
- 	kmmio_count++;
- 	list_add_rcu(&p->list, &kmmio_probes);
- 	while (size < size_lim) {
- 		if (add_kmmio_fault_page(p->addr + size))
- 			pr_err("Unable to set page fault.\n");
--		size += PAGE_SIZE;
-+		size += page_level_size(l);
- 	}
- out:
- 	spin_unlock_irqrestore(&kmmio_lock, flags);
-@@ -506,11 +530,17 @@ void unregister_kmmio_probe(struct kmmio_probe *p)
- 	const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK);
- 	struct kmmio_fault_page *release_list = NULL;
- 	struct kmmio_delayed_release *drelease;
-+	unsigned int l;
-+	pte_t *pte;
-+
-+	pte = lookup_address(p->addr, &l);
-+	if (!pte)
-+		return;
+ 	struct pwm_beeper *beeper = dev_get_drvdata(dev);
  
- 	spin_lock_irqsave(&kmmio_lock, flags);
- 	while (size < size_lim) {
- 		release_kmmio_fault_page(p->addr + size, &release_list);
--		size += PAGE_SIZE;
-+		size += page_level_size(l);
- 	}
- 	list_del_rcu(&p->list);
- 	kmmio_count--;
-diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
-index 8f4cc3dfac32..5fb6adaaa796 100644
---- a/arch/x86/mm/tlb.c
-+++ b/arch/x86/mm/tlb.c
-@@ -106,8 +106,6 @@ static void flush_tlb_func(void *info)
- 
- 	if (f->flush_mm != this_cpu_read(cpu_tlbstate.active_mm))
- 		return;
--	if (!f->flush_end)
--		f->flush_end = f->flush_start + PAGE_SIZE;
+-	if (beeper->period) {
+-		pwm_config(beeper->pwm, beeper->period / 2, beeper->period);
+-		pwm_enable(beeper->pwm);
+-	}
++	if (beeper->period)
++		__pwm_beeper_set(beeper);
  
- 	count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
- 	if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) {
-@@ -135,12 +133,20 @@ void native_flush_tlb_others(const struct cpumask *cpumask,
- 				 unsigned long end)
- {
- 	struct flush_tlb_info info;
-+
-+	if (end == 0)
-+		end = start + PAGE_SIZE;
- 	info.flush_mm = mm;
- 	info.flush_start = start;
- 	info.flush_end = end;
- 
- 	count_vm_tlb_event(NR_TLB_REMOTE_FLUSH);
--	trace_tlb_flush(TLB_REMOTE_SEND_IPI, end - start);
-+	if (end == TLB_FLUSH_ALL)
-+		trace_tlb_flush(TLB_REMOTE_SEND_IPI, TLB_FLUSH_ALL);
-+	else
-+		trace_tlb_flush(TLB_REMOTE_SEND_IPI,
-+				(end - start) >> PAGE_SHIFT);
-+
- 	if (is_uv_system()) {
- 		unsigned int cpu;
- 
-diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c
-index e58565556703..0ae7e9fa348d 100644
---- a/arch/x86/pci/fixup.c
-+++ b/arch/x86/pci/fixup.c
-@@ -540,3 +540,10 @@ static void twinhead_reserve_killing_zone(struct pci_dev *dev)
-         }
+ 	return 0;
  }
- DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x27B9, twinhead_reserve_killing_zone);
-+
-+static void pci_bdwep_bar(struct pci_dev *dev)
-+{
-+	dev->non_compliant_bars = 1;
-+}
-+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fa0, pci_bdwep_bar);
-+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fc0, pci_bdwep_bar);
-diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
-index beac4dfdade6..349b8ce92bf2 100644
---- a/arch/x86/pci/xen.c
-+++ b/arch/x86/pci/xen.c
-@@ -491,8 +491,11 @@ int __init pci_xen_initial_domain(void)
- #endif
- 	__acpi_register_gsi = acpi_register_gsi_xen;
- 	__acpi_unregister_gsi = NULL;
--	/* Pre-allocate legacy irqs */
--	for (irq = 0; irq < nr_legacy_irqs(); irq++) {
+diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
+index d7be6ddc34f6..2fc499a2207e 100644
+--- a/drivers/irqchip/irq-gic-v3.c
++++ b/drivers/irqchip/irq-gic-v3.c
+@@ -361,6 +361,13 @@ static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs
+ 			if (static_key_true(&supports_deactivate))
+ 				gic_write_dir(irqnr);
+ #ifdef CONFIG_SMP
++			/*
++			 * Unlike GICv2, we don't need an smp_rmb() here.
++			 * The control dependency from gic_read_iar to
++			 * the ISB in gic_write_eoir is enough to ensure
++			 * that any shared data read by handle_IPI will
++			 * be read after the ACK.
++			 */
+ 			handle_IPI(irqnr, regs);
+ #else
+ 			WARN_ONCE(true, "Unexpected SGI received!\n");
+@@ -380,6 +387,15 @@ static void __init gic_dist_init(void)
+ 	writel_relaxed(0, base + GICD_CTLR);
+ 	gic_dist_wait_for_rwp();
+ 
 +	/*
-+	 * Pre-allocate the legacy IRQs.  Use NR_LEGACY_IRQS here
-+	 * because we don't have a PIC and thus nr_legacy_irqs() is zero.
++	 * Configure SPIs as non-secure Group-1. This will only matter
++	 * if the GIC only has a single security state. This will not
++	 * do the right thing if the kernel is running in secure mode,
++	 * but that's not the intended use case anyway.
 +	 */
-+	for (irq = 0; irq < NR_IRQS_LEGACY; irq++) {
- 		int trigger, polarity;
- 
- 		if (acpi_get_override_irq(irq, &trigger, &polarity) == -1)
-diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
-index d09e4c9d7cc5..e3679db17545 100644
---- a/arch/x86/xen/enlighten.c
-+++ b/arch/x86/xen/enlighten.c
-@@ -961,7 +961,7 @@ static void xen_load_sp0(struct tss_struct *tss,
- 	tss->x86_tss.sp0 = thread->sp0;
- }
- 
--static void xen_set_iopl_mask(unsigned mask)
-+void xen_set_iopl_mask(unsigned mask)
- {
- 	struct physdev_set_iopl set_iopl;
- 
-diff --git a/arch/xtensa/kernel/head.S b/arch/xtensa/kernel/head.S
-index 9ed55649ac8e..05e1df943856 100644
---- a/arch/xtensa/kernel/head.S
-+++ b/arch/xtensa/kernel/head.S
-@@ -128,7 +128,7 @@ ENTRY(_startup)
- 	wsr	a0, icountlevel
- 
- 	.set	_index, 0
--	.rept	XCHAL_NUM_DBREAK - 1
-+	.rept	XCHAL_NUM_DBREAK
- 	wsr	a0, SREG_DBREAKC + _index
- 	.set	_index, _index + 1
- 	.endr
-diff --git a/arch/xtensa/mm/cache.c b/arch/xtensa/mm/cache.c
-index d75aa1476da7..1a804a2f9a5b 100644
---- a/arch/xtensa/mm/cache.c
-+++ b/arch/xtensa/mm/cache.c
-@@ -97,11 +97,11 @@ void clear_user_highpage(struct page *page, unsigned long vaddr)
- 	unsigned long paddr;
- 	void *kvaddr = coherent_kvaddr(page, TLBTEMP_BASE_1, vaddr, &paddr);
- 
--	pagefault_disable();
-+	preempt_disable();
- 	kmap_invalidate_coherent(page, vaddr);
- 	set_bit(PG_arch_1, &page->flags);
- 	clear_page_alias(kvaddr, paddr);
--	pagefault_enable();
-+	preempt_enable();
- }
- 
- void copy_user_highpage(struct page *dst, struct page *src,
-@@ -113,11 +113,11 @@ void copy_user_highpage(struct page *dst, struct page *src,
- 	void *src_vaddr = coherent_kvaddr(src, TLBTEMP_BASE_2, vaddr,
- 					  &src_paddr);
- 
--	pagefault_disable();
-+	preempt_disable();
- 	kmap_invalidate_coherent(dst, vaddr);
- 	set_bit(PG_arch_1, &dst->flags);
- 	copy_page_alias(dst_vaddr, src_vaddr, dst_paddr, src_paddr);
--	pagefault_enable();
-+	preempt_enable();
- }
- 
- #endif /* DCACHE_WAY_SIZE > PAGE_SIZE */
-diff --git a/arch/xtensa/platforms/iss/console.c b/arch/xtensa/platforms/iss/console.c
-index 70cb408bc20d..92d785fefb6d 100644
---- a/arch/xtensa/platforms/iss/console.c
-+++ b/arch/xtensa/platforms/iss/console.c
-@@ -100,21 +100,23 @@ static void rs_poll(unsigned long priv)
- {
- 	struct tty_port *port = (struct tty_port *)priv;
- 	int i = 0;
-+	int rd = 1;
- 	unsigned char c;
- 
- 	spin_lock(&timer_lock);
- 
- 	while (simc_poll(0)) {
--		simc_read(0, &c, 1);
-+		rd = simc_read(0, &c, 1);
-+		if (rd <= 0)
-+			break;
- 		tty_insert_flip_char(port, c, TTY_NORMAL);
- 		i++;
- 	}
- 
- 	if (i)
- 		tty_flip_buffer_push(port);
--
--
--	mod_timer(&serial_timer, jiffies + SERIAL_TIMER_VALUE);
-+	if (rd)
-+		mod_timer(&serial_timer, jiffies + SERIAL_TIMER_VALUE);
- 	spin_unlock(&timer_lock);
- }
- 
-diff --git a/block/blk-core.c b/block/blk-core.c
-index b83d29755b5a..45f4d7efbf34 100644
---- a/block/blk-core.c
-+++ b/block/blk-core.c
-@@ -2198,7 +2198,7 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
- 	if (q->mq_ops) {
- 		if (blk_queue_io_stat(q))
- 			blk_account_io_start(rq, true);
--		blk_mq_insert_request(rq, false, true, true);
-+		blk_mq_insert_request(rq, false, true, false);
- 		return 0;
- 	}
- 
-diff --git a/block/partition-generic.c b/block/partition-generic.c
-index fefd01b496a0..cfcfe1b0ecbc 100644
---- a/block/partition-generic.c
-+++ b/block/partition-generic.c
-@@ -350,15 +350,20 @@ struct hd_struct *add_partition(struct gendisk *disk, int partno,
- 			goto out_del;
- 	}
- 
-+	err = hd_ref_init(p);
-+	if (err) {
-+		if (flags & ADDPART_FLAG_WHOLEDISK)
-+			goto out_remove_file;
-+		goto out_del;
-+	}
++	for (i = 32; i < gic_data.irq_nr; i += 32)
++		writel_relaxed(~0, base + GICD_IGROUPR + i / 8);
 +
- 	/* everything is up and running, commence */
- 	rcu_assign_pointer(ptbl->part[partno], p);
+ 	gic_dist_config(base, gic_data.irq_nr, gic_dist_wait_for_rwp);
  
- 	/* suppress uevent if the disk suppresses it */
- 	if (!dev_get_uevent_suppress(ddev))
- 		kobject_uevent(&pdev->kobj, KOBJ_ADD);
--
--	if (!hd_ref_init(p))
--		return p;
-+	return p;
- 
- out_free_info:
- 	free_part_info(p);
-@@ -367,6 +372,8 @@ out_free_stats:
- out_free:
- 	kfree(p);
- 	return ERR_PTR(err);
-+out_remove_file:
-+	device_remove_file(pdev, &dev_attr_whole_disk);
- out_del:
- 	kobject_put(p->holder_dir);
- 	device_del(pdev);
-diff --git a/crypto/ahash.c b/crypto/ahash.c
-index d19b52324cf5..dac1c24e9c3e 100644
---- a/crypto/ahash.c
-+++ b/crypto/ahash.c
-@@ -69,8 +69,9 @@ static int hash_walk_new_entry(struct crypto_hash_walk *walk)
- 	struct scatterlist *sg;
- 
- 	sg = walk->sg;
--	walk->pg = sg_page(sg);
- 	walk->offset = sg->offset;
-+	walk->pg = sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT);
-+	walk->offset = offset_in_page(walk->offset);
- 	walk->entrylen = sg->length;
- 
- 	if (walk->entrylen > walk->total)
-diff --git a/crypto/asymmetric_keys/pkcs7_trust.c b/crypto/asymmetric_keys/pkcs7_trust.c
-index 90d6d47965b0..ecdb5a2ce085 100644
---- a/crypto/asymmetric_keys/pkcs7_trust.c
-+++ b/crypto/asymmetric_keys/pkcs7_trust.c
-@@ -178,6 +178,8 @@ int pkcs7_validate_trust(struct pkcs7_message *pkcs7,
- 	int cached_ret = -ENOKEY;
- 	int ret;
+ 	/* Enable distributor with ARE, Group1 */
+@@ -494,6 +510,9 @@ static void gic_cpu_init(void)
  
-+	*_trusted = false;
-+
- 	for (p = pkcs7->certs; p; p = p->next)
- 		p->seen = false;
- 
-diff --git a/crypto/asymmetric_keys/x509_cert_parser.c b/crypto/asymmetric_keys/x509_cert_parser.c
-index 021d39c0ba75..13c4e5a5fe8c 100644
---- a/crypto/asymmetric_keys/x509_cert_parser.c
-+++ b/crypto/asymmetric_keys/x509_cert_parser.c
-@@ -494,7 +494,7 @@ int x509_decode_time(time64_t *_t,  size_t hdrlen,
- 		     unsigned char tag,
- 		     const unsigned char *value, size_t vlen)
- {
--	static const unsigned char month_lengths[] = { 31, 29, 31, 30, 31, 30,
-+	static const unsigned char month_lengths[] = { 31, 28, 31, 30, 31, 30,
- 						       31, 31, 30, 31, 30, 31 };
- 	const unsigned char *p = value;
- 	unsigned year, mon, day, hour, min, sec, mon_len;
-@@ -540,9 +540,9 @@ int x509_decode_time(time64_t *_t,  size_t hdrlen,
- 		if (year % 4 == 0) {
- 			mon_len = 29;
- 			if (year % 100 == 0) {
--				year /= 100;
--				if (year % 4 != 0)
--					mon_len = 28;
-+				mon_len = 28;
-+				if (year % 400 == 0)
-+					mon_len = 29;
- 			}
- 		}
- 	}
-diff --git a/crypto/keywrap.c b/crypto/keywrap.c
-index b1d106ce55f3..72014f963ba7 100644
---- a/crypto/keywrap.c
-+++ b/crypto/keywrap.c
-@@ -212,7 +212,7 @@ static int crypto_kw_decrypt(struct blkcipher_desc *desc,
- 			  SEMIBSIZE))
- 		ret = -EBADMSG;
+ 	rbase = gic_data_rdist_sgi_base();
  
--	memzero_explicit(&block, sizeof(struct crypto_kw_block));
-+	memzero_explicit(block, sizeof(struct crypto_kw_block));
++	/* Configure SGIs/PPIs as non-secure Group-1 */
++	writel_relaxed(~0, rbase + GICR_IGROUPR0);
++
+ 	gic_cpu_config(rbase, gic_redist_wait_for_rwp);
  
- 	return ret;
- }
-@@ -297,7 +297,7 @@ static int crypto_kw_encrypt(struct blkcipher_desc *desc,
- 	/* establish the IV for the caller to pick up */
- 	memcpy(desc->info, block->A, SEMIBSIZE);
- 
--	memzero_explicit(&block, sizeof(struct crypto_kw_block));
-+	memzero_explicit(block, sizeof(struct crypto_kw_block));
- 
- 	return 0;
- }
-diff --git a/crypto/rsa-pkcs1pad.c b/crypto/rsa-pkcs1pad.c
-index 50f5c97e1087..0cbc5a5025c2 100644
---- a/crypto/rsa-pkcs1pad.c
-+++ b/crypto/rsa-pkcs1pad.c
-@@ -310,16 +310,16 @@ static int pkcs1pad_decrypt(struct akcipher_request *req)
- 	req_ctx->child_req.src = req->src;
- 	req_ctx->child_req.src_len = req->src_len;
- 	req_ctx->child_req.dst = req_ctx->out_sg;
--	req_ctx->child_req.dst_len = ctx->key_size - 1;
-+	req_ctx->child_req.dst_len = ctx->key_size ;
- 
--	req_ctx->out_buf = kmalloc(ctx->key_size - 1,
-+	req_ctx->out_buf = kmalloc(ctx->key_size,
- 			(req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
- 			GFP_KERNEL : GFP_ATOMIC);
- 	if (!req_ctx->out_buf)
- 		return -ENOMEM;
- 
- 	pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf,
--			ctx->key_size - 1, NULL);
-+			    ctx->key_size, NULL);
- 
- 	akcipher_request_set_tfm(&req_ctx->child_req, ctx->child);
- 	akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
-@@ -491,16 +491,16 @@ static int pkcs1pad_verify(struct akcipher_request *req)
- 	req_ctx->child_req.src = req->src;
- 	req_ctx->child_req.src_len = req->src_len;
- 	req_ctx->child_req.dst = req_ctx->out_sg;
--	req_ctx->child_req.dst_len = ctx->key_size - 1;
-+	req_ctx->child_req.dst_len = ctx->key_size;
- 
--	req_ctx->out_buf = kmalloc(ctx->key_size - 1,
-+	req_ctx->out_buf = kmalloc(ctx->key_size,
- 			(req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
- 			GFP_KERNEL : GFP_ATOMIC);
- 	if (!req_ctx->out_buf)
- 		return -ENOMEM;
- 
- 	pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf,
--			ctx->key_size - 1, NULL);
-+			    ctx->key_size, NULL);
- 
- 	akcipher_request_set_tfm(&req_ctx->child_req, ctx->child);
- 	akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
-diff --git a/crypto/testmgr.c b/crypto/testmgr.c
-index ae8c57fd8bc7..d4944318ca1f 100644
---- a/crypto/testmgr.c
-+++ b/crypto/testmgr.c
-@@ -1849,6 +1849,7 @@ static int alg_test_drbg(const struct alg_test_desc *desc, const char *driver,
- static int do_test_rsa(struct crypto_akcipher *tfm,
- 		       struct akcipher_testvec *vecs)
- {
-+	char *xbuf[XBUFSIZE];
- 	struct akcipher_request *req;
- 	void *outbuf_enc = NULL;
- 	void *outbuf_dec = NULL;
-@@ -1857,9 +1858,12 @@ static int do_test_rsa(struct crypto_akcipher *tfm,
- 	int err = -ENOMEM;
- 	struct scatterlist src, dst, src_tab[2];
- 
-+	if (testmgr_alloc_buf(xbuf))
-+		return err;
-+
- 	req = akcipher_request_alloc(tfm, GFP_KERNEL);
- 	if (!req)
--		return err;
-+		goto free_xbuf;
- 
- 	init_completion(&result.completion);
+ 	/* Give LPIs a spin */
+diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
+index 8f9ebf714e2b..eef950046ac0 100644
+--- a/drivers/irqchip/irq-gic.c
++++ b/drivers/irqchip/irq-gic.c
+@@ -344,6 +344,14 @@ static void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
+ 			if (static_key_true(&supports_deactivate))
+ 				writel_relaxed(irqstat, cpu_base + GIC_CPU_DEACTIVATE);
+ #ifdef CONFIG_SMP
++			/*
++			 * Ensure any shared data written by the CPU sending
++			 * the IPI is read after we've read the ACK register
++			 * on the GIC.
++			 *
++			 * Pairs with the write barrier in gic_raise_softirq
++			 */
++			smp_rmb();
+ 			handle_IPI(irqnr, regs);
+ #endif
+ 			continue;
+diff --git a/drivers/mcb/mcb-parse.c b/drivers/mcb/mcb-parse.c
+index 004926955263..b0155b05cddb 100644
+--- a/drivers/mcb/mcb-parse.c
++++ b/drivers/mcb/mcb-parse.c
+@@ -57,7 +57,7 @@ static int chameleon_parse_gdd(struct mcb_bus *bus,
+ 	mdev->id = GDD_DEV(reg1);
+ 	mdev->rev = GDD_REV(reg1);
+ 	mdev->var = GDD_VAR(reg1);
+-	mdev->bar = GDD_BAR(reg1);
++	mdev->bar = GDD_BAR(reg2);
+ 	mdev->group = GDD_GRP(reg2);
+ 	mdev->inst = GDD_INS(reg2);
  
-@@ -1877,9 +1881,14 @@ static int do_test_rsa(struct crypto_akcipher *tfm,
- 	if (!outbuf_enc)
- 		goto free_req;
+diff --git a/drivers/mfd/omap-usb-tll.c b/drivers/mfd/omap-usb-tll.c
+index b7b3e8ee64f2..c30290f33430 100644
+--- a/drivers/mfd/omap-usb-tll.c
++++ b/drivers/mfd/omap-usb-tll.c
+@@ -269,6 +269,8 @@ static int usbtll_omap_probe(struct platform_device *pdev)
  
-+	if (WARN_ON(vecs->m_size > PAGE_SIZE))
-+		goto free_all;
-+
-+	memcpy(xbuf[0], vecs->m, vecs->m_size);
-+
- 	sg_init_table(src_tab, 2);
--	sg_set_buf(&src_tab[0], vecs->m, 8);
--	sg_set_buf(&src_tab[1], vecs->m + 8, vecs->m_size - 8);
-+	sg_set_buf(&src_tab[0], xbuf[0], 8);
-+	sg_set_buf(&src_tab[1], xbuf[0] + 8, vecs->m_size - 8);
- 	sg_init_one(&dst, outbuf_enc, out_len_max);
- 	akcipher_request_set_crypt(req, src_tab, &dst, vecs->m_size,
- 				   out_len_max);
-@@ -1898,7 +1907,7 @@ static int do_test_rsa(struct crypto_akcipher *tfm,
- 		goto free_all;
- 	}
- 	/* verify that encrypted message is equal to expected */
--	if (memcmp(vecs->c, sg_virt(req->dst), vecs->c_size)) {
-+	if (memcmp(vecs->c, outbuf_enc, vecs->c_size)) {
- 		pr_err("alg: rsa: encrypt test failed. Invalid output\n");
- 		err = -EINVAL;
- 		goto free_all;
-@@ -1913,7 +1922,13 @@ static int do_test_rsa(struct crypto_akcipher *tfm,
- 		err = -ENOMEM;
- 		goto free_all;
+ 		if (IS_ERR(tll->ch_clk[i]))
+ 			dev_dbg(dev, "can't get clock : %s\n", clkname);
++		else
++			clk_prepare(tll->ch_clk[i]);
  	}
--	sg_init_one(&src, vecs->c, vecs->c_size);
-+
-+	if (WARN_ON(vecs->c_size > PAGE_SIZE))
-+		goto free_all;
-+
-+	memcpy(xbuf[0], vecs->c, vecs->c_size);
-+
-+	sg_init_one(&src, xbuf[0], vecs->c_size);
- 	sg_init_one(&dst, outbuf_dec, out_len_max);
- 	init_completion(&result.completion);
- 	akcipher_request_set_crypt(req, &src, &dst, vecs->c_size, out_len_max);
-@@ -1940,6 +1955,8 @@ free_all:
- 	kfree(outbuf_enc);
- free_req:
- 	akcipher_request_free(req);
-+free_xbuf:
-+	testmgr_free_buf(xbuf);
- 	return err;
- }
- 
-diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c
-index 6979186dbd4b..9f77943653fb 100644
---- a/drivers/acpi/acpi_processor.c
-+++ b/drivers/acpi/acpi_processor.c
-@@ -491,6 +491,58 @@ static void acpi_processor_remove(struct acpi_device *device)
- }
- #endif /* CONFIG_ACPI_HOTPLUG_CPU */
- 
-+#ifdef CONFIG_X86
-+static bool acpi_hwp_native_thermal_lvt_set;
-+static acpi_status __init acpi_hwp_native_thermal_lvt_osc(acpi_handle handle,
-+							  u32 lvl,
-+							  void *context,
-+							  void **rv)
-+{
-+	u8 sb_uuid_str[] = "4077A616-290C-47BE-9EBD-D87058713953";
-+	u32 capbuf[2];
-+	struct acpi_osc_context osc_context = {
-+		.uuid_str = sb_uuid_str,
-+		.rev = 1,
-+		.cap.length = 8,
-+		.cap.pointer = capbuf,
-+	};
-+
-+	if (acpi_hwp_native_thermal_lvt_set)
-+		return AE_CTRL_TERMINATE;
-+
-+	capbuf[0] = 0x0000;
-+	capbuf[1] = 0x1000; /* set bit 12 */
-+
-+	if (ACPI_SUCCESS(acpi_run_osc(handle, &osc_context))) {
-+		if (osc_context.ret.pointer && osc_context.ret.length > 1) {
-+			u32 *capbuf_ret = osc_context.ret.pointer;
-+
-+			if (capbuf_ret[1] & 0x1000) {
-+				acpi_handle_info(handle,
-+					"_OSC native thermal LVT Acked\n");
-+				acpi_hwp_native_thermal_lvt_set = true;
-+			}
-+		}
-+		kfree(osc_context.ret.pointer);
-+	}
-+
-+	return AE_OK;
-+}
-+
-+void __init acpi_early_processor_osc(void)
-+{
-+	if (boot_cpu_has(X86_FEATURE_HWP)) {
-+		acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT,
-+				    ACPI_UINT32_MAX,
-+				    acpi_hwp_native_thermal_lvt_osc,
-+				    NULL, NULL, NULL);
-+		acpi_get_devices(ACPI_PROCESSOR_DEVICE_HID,
-+				 acpi_hwp_native_thermal_lvt_osc,
-+				 NULL, NULL);
-+	}
-+}
-+#endif
-+
- /*
-  * The following ACPI IDs are known to be suitable for representing as
-  * processor devices.
-diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c
-index 6a72047aae1c..c3a052d43317 100644
---- a/drivers/acpi/acpica/dsmethod.c
-+++ b/drivers/acpi/acpica/dsmethod.c
-@@ -428,6 +428,9 @@ acpi_ds_begin_method_execution(struct acpi_namespace_node *method_node,
- 				obj_desc->method.mutex->mutex.
- 				    original_sync_level =
- 				    obj_desc->method.mutex->mutex.sync_level;
-+
-+				obj_desc->method.mutex->mutex.thread_id =
-+				    acpi_os_get_thread_id();
- 			}
- 		}
  
-diff --git a/drivers/acpi/acpica/nsinit.c b/drivers/acpi/acpica/nsinit.c
-index bd75d46234a4..ddb436f86415 100644
---- a/drivers/acpi/acpica/nsinit.c
-+++ b/drivers/acpi/acpica/nsinit.c
-@@ -83,6 +83,8 @@ acpi_status acpi_ns_initialize_objects(void)
- 
- 	ACPI_FUNCTION_TRACE(ns_initialize_objects);
- 
-+	ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
-+			  "[Init] Completing Initialization of ACPI Objects\n"));
- 	ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
- 			  "**** Starting initialization of namespace objects ****\n"));
- 	ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT,
-diff --git a/drivers/acpi/acpica/tbxfload.c b/drivers/acpi/acpica/tbxfload.c
-index 278666e39563..c37d47982fbe 100644
---- a/drivers/acpi/acpica/tbxfload.c
-+++ b/drivers/acpi/acpica/tbxfload.c
-@@ -83,6 +83,20 @@ acpi_status __init acpi_load_tables(void)
- 				"While loading namespace from ACPI tables"));
- 	}
+ 	pm_runtime_put_sync(dev);
+@@ -301,9 +303,12 @@ static int usbtll_omap_remove(struct platform_device *pdev)
+ 	tll_dev = NULL;
+ 	spin_unlock(&tll_lock);
  
-+	if (!acpi_gbl_group_module_level_code) {
-+		/*
-+		 * Initialize the objects that remain uninitialized. This
-+		 * runs the executable AML that may be part of the
-+		 * declaration of these objects:
-+		 * operation_regions, buffer_fields, Buffers, and Packages.
-+		 */
-+		status = acpi_ns_initialize_objects();
-+		if (ACPI_FAILURE(status)) {
-+			return_ACPI_STATUS(status);
+-	for (i = 0; i < tll->nch; i++)
+-		if (!IS_ERR(tll->ch_clk[i]))
++	for (i = 0; i < tll->nch; i++) {
++		if (!IS_ERR(tll->ch_clk[i])) {
++			clk_unprepare(tll->ch_clk[i]);
+ 			clk_put(tll->ch_clk[i]);
 +		}
 +	}
-+
-+	acpi_gbl_reg_methods_enabled = TRUE;
- 	return_ACPI_STATUS(status);
- }
  
-diff --git a/drivers/acpi/acpica/utxfinit.c b/drivers/acpi/acpica/utxfinit.c
-index 721b87cce908..638fbd4ad72b 100644
---- a/drivers/acpi/acpica/utxfinit.c
-+++ b/drivers/acpi/acpica/utxfinit.c
-@@ -267,7 +267,6 @@ acpi_status __init acpi_initialize_objects(u32 flags)
- 	 * initialized, even if they contain executable AML (see the call to
- 	 * acpi_ns_initialize_objects below).
- 	 */
--	acpi_gbl_reg_methods_enabled = TRUE;
- 	if (!(flags & ACPI_NO_ADDRESS_SPACE_INIT)) {
- 		ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
- 				  "[Init] Executing _REG OpRegion methods\n"));
-@@ -299,20 +298,18 @@ acpi_status __init acpi_initialize_objects(u32 flags)
- 	 */
- 	if (acpi_gbl_group_module_level_code) {
- 		acpi_ns_exec_module_code_list();
--	}
+ 	pm_runtime_disable(&pdev->dev);
+ 	return 0;
+@@ -420,7 +425,7 @@ int omap_tll_enable(struct usbhs_omap_platform_data *pdata)
+ 			if (IS_ERR(tll->ch_clk[i]))
+ 				continue;
  
--	/*
--	 * Initialize the objects that remain uninitialized. This runs the
--	 * executable AML that may be part of the declaration of these objects:
--	 * operation_regions, buffer_fields, Buffers, and Packages.
--	 */
--	if (!(flags & ACPI_NO_OBJECT_INIT)) {
--		ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
--				  "[Init] Completing Initialization of ACPI Objects\n"));
--
--		status = acpi_ns_initialize_objects();
--		if (ACPI_FAILURE(status)) {
--			return_ACPI_STATUS(status);
-+		/*
-+		 * Initialize the objects that remain uninitialized. This
-+		 * runs the executable AML that may be part of the
-+		 * declaration of these objects:
-+		 * operation_regions, buffer_fields, Buffers, and Packages.
-+		 */
-+		if (!(flags & ACPI_NO_OBJECT_INIT)) {
-+			status = acpi_ns_initialize_objects();
-+			if (ACPI_FAILURE(status)) {
-+				return_ACPI_STATUS(status);
-+			}
+-			r = clk_prepare_enable(tll->ch_clk[i]);
++			r = clk_enable(tll->ch_clk[i]);
+ 			if (r) {
+ 				dev_err(tll_dev,
+ 				 "Error enabling ch %d clock: %d\n", i, r);
+@@ -448,7 +453,7 @@ int omap_tll_disable(struct usbhs_omap_platform_data *pdata)
+ 	for (i = 0; i < tll->nch; i++) {
+ 		if (omap_usb_mode_needs_tll(pdata->port_mode[i])) {
+ 			if (!IS_ERR(tll->ch_clk[i]))
+-				clk_disable_unprepare(tll->ch_clk[i]);
++				clk_disable(tll->ch_clk[i]);
  		}
  	}
  
-diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
-index 891c42d1cd65..f9081b791b81 100644
---- a/drivers/acpi/bus.c
-+++ b/drivers/acpi/bus.c
-@@ -1005,6 +1005,9 @@ static int __init acpi_bus_init(void)
- 		goto error1;
- 	}
+diff --git a/drivers/misc/mei/amthif.c b/drivers/misc/mei/amthif.c
+index cd0403f09267..e79c0371ee6f 100644
+--- a/drivers/misc/mei/amthif.c
++++ b/drivers/misc/mei/amthif.c
+@@ -417,8 +417,10 @@ int mei_amthif_irq_read_msg(struct mei_cl *cl,
  
-+	/* Set capability bits for _OSC under processor scope */
-+	acpi_early_processor_osc();
-+
- 	/*
- 	 * _OSC method may exist in module level code,
- 	 * so it must be run after ACPI_FULL_INITIALIZATION
-diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
-index cd2c3d6d40e0..993fd31394c8 100644
---- a/drivers/acpi/device_pm.c
-+++ b/drivers/acpi/device_pm.c
-@@ -319,6 +319,7 @@ int acpi_device_fix_up_power(struct acpi_device *device)
+ 	dev = cl->dev;
  
- 	return ret;
- }
-+EXPORT_SYMBOL_GPL(acpi_device_fix_up_power);
+-	if (dev->iamthif_state != MEI_IAMTHIF_READING)
++	if (dev->iamthif_state != MEI_IAMTHIF_READING) {
++		mei_irq_discard_msg(dev, mei_hdr);
+ 		return 0;
++	}
  
- int acpi_device_update_power(struct acpi_device *device, int *state_p)
+ 	ret = mei_cl_irq_read_msg(cl, mei_hdr, cmpl_list);
+ 	if (ret)
+diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
+index 1a173d0af694..a77643954523 100644
+--- a/drivers/misc/mei/bus.c
++++ b/drivers/misc/mei/bus.c
+@@ -222,17 +222,23 @@ EXPORT_SYMBOL_GPL(mei_cldev_recv);
+ static void mei_cl_bus_event_work(struct work_struct *work)
  {
-diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
-index 1e6833a5cd44..6f41c73f82bb 100644
---- a/drivers/acpi/internal.h
-+++ b/drivers/acpi/internal.h
-@@ -138,6 +138,12 @@ void acpi_early_processor_set_pdc(void);
- static inline void acpi_early_processor_set_pdc(void) {}
- #endif
- 
-+#ifdef CONFIG_X86
-+void acpi_early_processor_osc(void);
-+#else
-+static inline void acpi_early_processor_osc(void) {}
-+#endif
-+
- /* --------------------------------------------------------------------------
-                                   Embedded Controller
-    -------------------------------------------------------------------------- */
-diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
-index 67da6fb72274..c45fdc49ff28 100644
---- a/drivers/acpi/osl.c
-+++ b/drivers/acpi/osl.c
-@@ -135,7 +135,7 @@ static struct osi_linux {
- 	unsigned int	enable:1;
- 	unsigned int	dmi:1;
- 	unsigned int	cmdline:1;
--	unsigned int	default_disabling:1;
-+	u8		default_disabling;
- } osi_linux = {0, 0, 0, 0};
- 
- static u32 acpi_osi_handler(acpi_string interface, u32 supported)
-@@ -1713,10 +1713,13 @@ void __init acpi_osi_setup(char *str)
- 	if (*str == '!') {
- 		str++;
- 		if (*str == '\0') {
--			osi_linux.default_disabling = 1;
-+			/* Do not override acpi_osi=!* */
-+			if (!osi_linux.default_disabling)
-+				osi_linux.default_disabling =
-+					ACPI_DISABLE_ALL_VENDOR_STRINGS;
- 			return;
- 		} else if (*str == '*') {
--			acpi_update_interfaces(ACPI_DISABLE_ALL_STRINGS);
-+			osi_linux.default_disabling = ACPI_DISABLE_ALL_STRINGS;
- 			for (i = 0; i < OSI_STRING_ENTRIES_MAX; i++) {
- 				osi = &osi_setup_entries[i];
- 				osi->enable = false;
-@@ -1789,10 +1792,13 @@ static void __init acpi_osi_setup_late(void)
- 	acpi_status status;
- 
- 	if (osi_linux.default_disabling) {
--		status = acpi_update_interfaces(ACPI_DISABLE_ALL_VENDOR_STRINGS);
-+		status = acpi_update_interfaces(osi_linux.default_disabling);
- 
- 		if (ACPI_SUCCESS(status))
--			printk(KERN_INFO PREFIX "Disabled all _OSI OS vendors\n");
-+			printk(KERN_INFO PREFIX "Disabled all _OSI OS vendors%s\n",
-+				osi_linux.default_disabling ==
-+				ACPI_DISABLE_ALL_STRINGS ?
-+				" and feature groups" : "");
- 	}
- 
- 	for (i = 0; i < OSI_STRING_ENTRIES_MAX; i++) {
-diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
-index d02fd53042a5..56241eb341f4 100644
---- a/drivers/acpi/resource.c
-+++ b/drivers/acpi/resource.c
-@@ -27,8 +27,20 @@
- 
- #ifdef CONFIG_X86
- #define valid_IRQ(i) (((i) != 0) && ((i) != 2))
-+static inline bool acpi_iospace_resource_valid(struct resource *res)
-+{
-+	/* On X86 IO space is limited to the [0 - 64K] IO port range */
-+	return res->end < 0x10003;
-+}
- #else
- #define valid_IRQ(i) (true)
-+/*
-+ * ACPI IO descriptors on arches other than X86 contain MMIO CPU physical
-+ * addresses mapping IO space in CPU physical address space, IO space
-+ * resources can be placed anywhere in the 64-bit physical address space.
-+ */
-+static inline bool
-+acpi_iospace_resource_valid(struct resource *res) { return true; }
- #endif
- 
- static bool acpi_dev_resource_len_valid(u64 start, u64 end, u64 len, bool io)
-@@ -127,7 +139,7 @@ static void acpi_dev_ioresource_flags(struct resource *res, u64 len,
- 	if (!acpi_dev_resource_len_valid(res->start, res->end, len, true))
- 		res->flags |= IORESOURCE_DISABLED | IORESOURCE_UNSET;
- 
--	if (res->end >= 0x10003)
-+	if (!acpi_iospace_resource_valid(res))
- 		res->flags |= IORESOURCE_DISABLED | IORESOURCE_UNSET;
+ 	struct mei_cl_device *cldev;
++	struct mei_device *bus;
  
- 	if (io_decode == ACPI_DECODE_16)
-diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
-index 9cb975200cac..f054cadf30d8 100644
---- a/drivers/acpi/sleep.c
-+++ b/drivers/acpi/sleep.c
-@@ -714,6 +714,7 @@ static int acpi_hibernation_enter(void)
+ 	cldev = container_of(work, struct mei_cl_device, event_work);
  
- static void acpi_hibernation_leave(void)
- {
-+	pm_set_resume_via_firmware();
- 	/*
- 	 * If ACPI is not enabled by the BIOS and the boot kernel, we need to
- 	 * enable it here.
-diff --git a/drivers/ata/ahci_platform.c b/drivers/ata/ahci_platform.c
-index 04975b851c23..639adb1f8abd 100644
---- a/drivers/ata/ahci_platform.c
-+++ b/drivers/ata/ahci_platform.c
-@@ -51,6 +51,9 @@ static int ahci_probe(struct platform_device *pdev)
- 	if (rc)
- 		return rc;
- 
-+	of_property_read_u32(dev->of_node,
-+			     "ports-implemented", &hpriv->force_port_map);
++	bus = cldev->bus;
 +
- 	if (of_device_is_compatible(dev->of_node, "hisilicon,hisi-ahci"))
- 		hpriv->flags |= AHCI_HFLAG_NO_FBS | AHCI_HFLAG_NO_NCQ;
- 
-diff --git a/drivers/ata/ahci_xgene.c b/drivers/ata/ahci_xgene.c
-index 8e3f7faf00d3..73b19b277138 100644
---- a/drivers/ata/ahci_xgene.c
-+++ b/drivers/ata/ahci_xgene.c
-@@ -821,9 +821,9 @@ static int xgene_ahci_probe(struct platform_device *pdev)
- 				dev_warn(&pdev->dev, "%s: Error reading device info. Assume version1\n",
- 					__func__);
- 				version = XGENE_AHCI_V1;
--			}
--			if (info->valid & ACPI_VALID_CID)
-+			} else if (info->valid & ACPI_VALID_CID) {
- 				version = XGENE_AHCI_V2;
-+			}
- 		}
- 	}
- #endif
-diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
-index 85ea5142a095..bb050ea26101 100644
---- a/drivers/ata/libahci.c
-+++ b/drivers/ata/libahci.c
-@@ -469,6 +469,7 @@ void ahci_save_initial_config(struct device *dev, struct ahci_host_priv *hpriv)
- 		dev_info(dev, "forcing port_map 0x%x -> 0x%x\n",
- 			 port_map, hpriv->force_port_map);
- 		port_map = hpriv->force_port_map;
-+		hpriv->saved_port_map = port_map;
- 	}
+ 	if (cldev->event_cb)
+ 		cldev->event_cb(cldev, cldev->events, cldev->event_context);
  
- 	if (hpriv->mask_port_map) {
-diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
-index 301b785f9f56..0caf92ae25f3 100644
---- a/drivers/base/power/domain.c
-+++ b/drivers/base/power/domain.c
-@@ -1378,7 +1378,7 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
- 	mutex_lock(&subdomain->lock);
- 	mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
- 
--	if (!list_empty(&subdomain->slave_links) || subdomain->device_count) {
-+	if (!list_empty(&subdomain->master_links) || subdomain->device_count) {
- 		pr_warn("%s: unable to remove subdomain %s\n", genpd->name,
- 			subdomain->name);
- 		ret = -EBUSY;
-diff --git a/drivers/base/power/opp/core.c b/drivers/base/power/opp/core.c
-index cf351d3dab1c..0708f301ad97 100644
---- a/drivers/base/power/opp/core.c
-+++ b/drivers/base/power/opp/core.c
-@@ -844,8 +844,14 @@ static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev,
- 	}
+ 	cldev->events = 0;
  
- 	opp->u_volt = microvolt[0];
--	opp->u_volt_min = microvolt[1];
--	opp->u_volt_max = microvolt[2];
-+
-+	if (count == 1) {
-+		opp->u_volt_min = opp->u_volt;
-+		opp->u_volt_max = opp->u_volt;
-+	} else {
-+		opp->u_volt_min = microvolt[1];
-+		opp->u_volt_max = microvolt[2];
+ 	/* Prepare for the next read */
+-	if (cldev->events_mask & BIT(MEI_CL_EVENT_RX))
++	if (cldev->events_mask & BIT(MEI_CL_EVENT_RX)) {
++		mutex_lock(&bus->device_lock);
+ 		mei_cl_read_start(cldev->cl, 0, NULL);
++		mutex_unlock(&bus->device_lock);
 +	}
+ }
  
- 	/* Search for "opp-microamp-<name>" */
- 	prop = NULL;
-diff --git a/drivers/base/regmap/regmap-spmi.c b/drivers/base/regmap/regmap-spmi.c
-index 7e58f6560399..4a36e415e938 100644
---- a/drivers/base/regmap/regmap-spmi.c
-+++ b/drivers/base/regmap/regmap-spmi.c
-@@ -142,7 +142,7 @@ static int regmap_spmi_ext_read(void *context,
- 	while (val_size) {
- 		len = min_t(size_t, val_size, 8);
- 
--		err = spmi_ext_register_readl(context, addr, val, val_size);
-+		err = spmi_ext_register_readl(context, addr, val, len);
- 		if (err)
- 			goto err_out;
- 
-diff --git a/drivers/block/brd.c b/drivers/block/brd.c
-index cb27190e9f39..f7ecc287d733 100644
---- a/drivers/block/brd.c
-+++ b/drivers/block/brd.c
-@@ -341,7 +341,7 @@ static blk_qc_t brd_make_request(struct request_queue *q, struct bio *bio)
- 
- 	if (unlikely(bio->bi_rw & REQ_DISCARD)) {
- 		if (sector & ((PAGE_SIZE >> SECTOR_SHIFT) - 1) ||
--		    bio->bi_iter.bi_size & PAGE_MASK)
-+		    bio->bi_iter.bi_size & ~PAGE_MASK)
- 			goto io_error;
- 		discard_from_brd(brd, sector, bio->bi_iter.bi_size);
- 		goto out;
-diff --git a/drivers/block/loop.c b/drivers/block/loop.c
-index 423f4ca7d712..80cf8add46ff 100644
---- a/drivers/block/loop.c
-+++ b/drivers/block/loop.c
-@@ -488,6 +488,12 @@ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd,
- 	bvec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
- 	iov_iter_bvec(&iter, ITER_BVEC | rw, bvec,
- 		      bio_segments(bio), blk_rq_bytes(cmd->rq));
-+	/*
-+	 * This bio may be started from the middle of the 'bvec'
-+	 * because of bio splitting, so offset from the bvec must
-+	 * be passed to iov iterator
-+	 */
-+	iter.iov_offset = bio->bi_iter.bi_bvec_done;
- 
- 	cmd->iocb.ki_pos = pos;
- 	cmd->iocb.ki_filp = file;
-diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
-index 9b180dbbd03c..1c330b61f05d 100644
---- a/drivers/block/mtip32xx/mtip32xx.c
-+++ b/drivers/block/mtip32xx/mtip32xx.c
-@@ -173,7 +173,13 @@ static struct mtip_cmd *mtip_get_int_command(struct driver_data *dd)
+ /**
+@@ -296,6 +302,7 @@ int mei_cldev_register_event_cb(struct mei_cl_device *cldev,
+ 				unsigned long events_mask,
+ 				mei_cldev_event_cb_t event_cb, void *context)
  {
- 	struct request *rq;
++	struct mei_device *bus = cldev->bus;
+ 	int ret;
  
-+	if (mtip_check_surprise_removal(dd->pdev))
-+		return NULL;
-+
- 	rq = blk_mq_alloc_request(dd->queue, 0, BLK_MQ_REQ_RESERVED);
-+	if (IS_ERR(rq))
-+		return NULL;
-+
- 	return blk_mq_rq_to_pdu(rq);
- }
+ 	if (cldev->event_cb)
+@@ -308,15 +315,17 @@ int mei_cldev_register_event_cb(struct mei_cl_device *cldev,
+ 	INIT_WORK(&cldev->event_work, mei_cl_bus_event_work);
  
-@@ -233,15 +239,9 @@ static void mtip_async_complete(struct mtip_port *port,
- 			"Command tag %d failed due to TFE\n", tag);
+ 	if (cldev->events_mask & BIT(MEI_CL_EVENT_RX)) {
++		mutex_lock(&bus->device_lock);
+ 		ret = mei_cl_read_start(cldev->cl, 0, NULL);
++		mutex_unlock(&bus->device_lock);
+ 		if (ret && ret != -EBUSY)
+ 			return ret;
  	}
  
--	/* Unmap the DMA scatter list entries */
--	dma_unmap_sg(&dd->pdev->dev, cmd->sg, cmd->scatter_ents, cmd->direction);
--
- 	rq = mtip_rq_from_tag(dd, tag);
- 
--	if (unlikely(cmd->unaligned))
--		up(&port->cmd_slot_unal);
--
--	blk_mq_end_request(rq, status ? -EIO : 0);
-+	blk_mq_complete_request(rq, status);
- }
- 
- /*
-@@ -581,6 +581,8 @@ static void mtip_completion(struct mtip_port *port,
- 		dev_warn(&port->dd->pdev->dev,
- 			"Internal command %d completed with TFE\n", tag);
- 
-+	command->comp_func = NULL;
-+	command->comp_data = NULL;
- 	complete(waiting);
- }
- 
-@@ -618,8 +620,6 @@ static void mtip_handle_tfe(struct driver_data *dd)
- 
- 	port = dd->port;
- 
--	set_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags);
--
- 	if (test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags)) {
- 		cmd = mtip_cmd_from_tag(dd, MTIP_TAG_INTERNAL);
- 		dbg_printk(MTIP_DRV_NAME " TFE for the internal command\n");
-@@ -628,7 +628,7 @@ static void mtip_handle_tfe(struct driver_data *dd)
- 			cmd->comp_func(port, MTIP_TAG_INTERNAL,
- 					cmd, PORT_IRQ_TF_ERR);
- 		}
--		goto handle_tfe_exit;
-+		return;
+ 	if (cldev->events_mask & BIT(MEI_CL_EVENT_NOTIF)) {
+-		mutex_lock(&cldev->cl->dev->device_lock);
++		mutex_lock(&bus->device_lock);
+ 		ret = mei_cl_notify_request(cldev->cl, NULL, event_cb ? 1 : 0);
+-		mutex_unlock(&cldev->cl->dev->device_lock);
++		mutex_unlock(&bus->device_lock);
+ 		if (ret)
+ 			return ret;
  	}
+diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
+index a6c87c713193..958af84884b5 100644
+--- a/drivers/misc/mei/client.c
++++ b/drivers/misc/mei/client.c
+@@ -1735,6 +1735,10 @@ void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb)
+ 			wake_up(&cl->wait);
  
- 	/* clear the tag accumulator */
-@@ -701,7 +701,7 @@ static void mtip_handle_tfe(struct driver_data *dd)
- 			fail_reason = "thermal shutdown";
- 		}
- 		if (buf[288] == 0xBF) {
--			set_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag);
-+			set_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag);
- 			dev_info(&dd->pdev->dev,
- 				"Drive indicates rebuild has failed. Secure erase required.\n");
- 			fail_all_ncq_cmds = 1;
-@@ -771,11 +771,6 @@ static void mtip_handle_tfe(struct driver_data *dd)
- 		}
+ 		break;
++	case MEI_FOP_DISCONNECT_RSP:
++		mei_io_cb_free(cb);
++		mei_cl_set_disconnected(cl);
++		break;
+ 	default:
+ 		BUG_ON(0);
  	}
- 	print_tags(dd, "reissued (TFE)", tagaccum, cmd_cnt);
--
--handle_tfe_exit:
--	/* clear eh_active */
--	clear_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags);
--	wake_up_interruptible(&port->svc_wait);
+diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c
+index e7b7aad0999b..fd8a9f057ea6 100644
+--- a/drivers/misc/mei/hbm.c
++++ b/drivers/misc/mei/hbm.c
+@@ -873,8 +873,7 @@ static int mei_hbm_fw_disconnect_req(struct mei_device *dev,
+ 		cb = mei_io_cb_init(cl, MEI_FOP_DISCONNECT_RSP, NULL);
+ 		if (!cb)
+ 			return -ENOMEM;
+-		cl_dbg(dev, cl, "add disconnect response as first\n");
+-		list_add(&cb->list, &dev->ctrl_wr_list.list);
++		list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
+ 	}
+ 	return 0;
  }
- 
- /*
-@@ -1007,6 +1002,7 @@ static bool mtip_pause_ncq(struct mtip_port *port,
- 			(fis->features == 0x27 || fis->features == 0x72 ||
- 			 fis->features == 0x62 || fis->features == 0x26))) {
- 		clear_bit(MTIP_DDF_SEC_LOCK_BIT, &port->dd->dd_flag);
-+		clear_bit(MTIP_DDF_REBUILD_FAILED_BIT, &port->dd->dd_flag);
- 		/* Com reset after secure erase or lowlevel format */
- 		mtip_restart_port(port);
- 		clear_bit(MTIP_PF_SE_ACTIVE_BIT, &port->flags);
-@@ -1021,12 +1017,14 @@ static bool mtip_pause_ncq(struct mtip_port *port,
-  *
-  * @port    Pointer to port data structure
-  * @timeout Max duration to wait (ms)
-+ * @atomic  gfp_t flag to indicate blockable context or not
-  *
-  * return value
-  *	0	Success
-  *	-EBUSY  Commands still active
+diff --git a/drivers/misc/mei/interrupt.c b/drivers/misc/mei/interrupt.c
+index 64b568a0268d..d1df797c7568 100644
+--- a/drivers/misc/mei/interrupt.c
++++ b/drivers/misc/mei/interrupt.c
+@@ -76,7 +76,6 @@ static inline int mei_cl_hbm_equal(struct mei_cl *cl,
+  * @dev: mei device
+  * @hdr: message header
   */
--static int mtip_quiesce_io(struct mtip_port *port, unsigned long timeout)
-+static int mtip_quiesce_io(struct mtip_port *port, unsigned long timeout,
-+								gfp_t atomic)
+-static inline
+ void mei_irq_discard_msg(struct mei_device *dev, struct mei_msg_hdr *hdr)
  {
- 	unsigned long to;
- 	unsigned int n;
-@@ -1037,16 +1035,21 @@ static int mtip_quiesce_io(struct mtip_port *port, unsigned long timeout)
- 	to = jiffies + msecs_to_jiffies(timeout);
- 	do {
- 		if (test_bit(MTIP_PF_SVC_THD_ACTIVE_BIT, &port->flags) &&
--			test_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags)) {
-+			test_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags) &&
-+			atomic == GFP_KERNEL) {
- 			msleep(20);
- 			continue; /* svc thd is actively issuing commands */
- 		}
- 
--		msleep(100);
-+		if (atomic == GFP_KERNEL)
-+			msleep(100);
-+		else {
-+			cpu_relax();
-+			udelay(100);
-+		}
-+
- 		if (mtip_check_surprise_removal(port->dd->pdev))
- 			goto err_fault;
--		if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &port->dd->dd_flag))
--			goto err_fault;
- 
- 		/*
- 		 * Ignore s_active bit 0 of array element 0.
-@@ -1099,6 +1102,7 @@ static int mtip_exec_internal_command(struct mtip_port *port,
- 	struct mtip_cmd *int_cmd;
- 	struct driver_data *dd = port->dd;
- 	int rv = 0;
-+	unsigned long start;
- 
- 	/* Make sure the buffer is 8 byte aligned. This is asic specific. */
- 	if (buffer & 0x00000007) {
-@@ -1107,6 +1111,10 @@ static int mtip_exec_internal_command(struct mtip_port *port,
- 	}
- 
- 	int_cmd = mtip_get_int_command(dd);
-+	if (!int_cmd) {
-+		dbg_printk(MTIP_DRV_NAME "Unable to allocate tag for PIO cmd\n");
-+		return -EFAULT;
-+	}
+ 	/*
+@@ -184,10 +183,7 @@ static int mei_cl_irq_disconnect_rsp(struct mei_cl *cl, struct mei_cl_cb *cb,
+ 		return -EMSGSIZE;
  
- 	set_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags);
- 
-@@ -1119,7 +1127,7 @@ static int mtip_exec_internal_command(struct mtip_port *port,
- 		if (fis->command != ATA_CMD_STANDBYNOW1) {
- 			/* wait for io to complete if non atomic */
- 			if (mtip_quiesce_io(port,
--					MTIP_QUIESCE_IO_TIMEOUT_MS) < 0) {
-+				MTIP_QUIESCE_IO_TIMEOUT_MS, atomic) < 0) {
- 				dev_warn(&dd->pdev->dev,
- 					"Failed to quiesce IO\n");
- 				mtip_put_int_command(dd, int_cmd);
-@@ -1162,6 +1170,8 @@ static int mtip_exec_internal_command(struct mtip_port *port,
- 	/* Populate the command header */
- 	int_cmd->command_header->byte_count = 0;
- 
-+	start = jiffies;
-+
- 	/* Issue the command to the hardware */
- 	mtip_issue_non_ncq_command(port, MTIP_TAG_INTERNAL);
+ 	ret = mei_hbm_cl_disconnect_rsp(dev, cl);
+-	mei_cl_set_disconnected(cl);
+-	mei_io_cb_free(cb);
+-	mei_me_cl_put(cl->me_cl);
+-	cl->me_cl = NULL;
++	list_move_tail(&cb->list, &cmpl_list->list);
  
-@@ -1170,10 +1180,12 @@ static int mtip_exec_internal_command(struct mtip_port *port,
- 		if ((rv = wait_for_completion_interruptible_timeout(
- 				&wait,
- 				msecs_to_jiffies(timeout))) <= 0) {
-+
- 			if (rv == -ERESTARTSYS) { /* interrupted */
- 				dev_err(&dd->pdev->dev,
--					"Internal command [%02X] was interrupted after %lu ms\n",
--					fis->command, timeout);
-+					"Internal command [%02X] was interrupted after %u ms\n",
-+					fis->command,
-+					jiffies_to_msecs(jiffies - start));
- 				rv = -EINTR;
- 				goto exec_ic_exit;
- 			} else if (rv == 0) /* timeout */
-@@ -2890,6 +2902,42 @@ static int mtip_ftl_rebuild_poll(struct driver_data *dd)
- 	return -EFAULT;
+ 	return ret;
  }
+diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h
+index 4250555d5e72..1b06e2fd6858 100644
+--- a/drivers/misc/mei/mei_dev.h
++++ b/drivers/misc/mei/mei_dev.h
+@@ -782,6 +782,8 @@ bool mei_hbuf_acquire(struct mei_device *dev);
  
-+static void mtip_softirq_done_fn(struct request *rq)
-+{
-+	struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq);
-+	struct driver_data *dd = rq->q->queuedata;
-+
-+	/* Unmap the DMA scatter list entries */
-+	dma_unmap_sg(&dd->pdev->dev, cmd->sg, cmd->scatter_ents,
-+							cmd->direction);
-+
-+	if (unlikely(cmd->unaligned))
-+		up(&dd->port->cmd_slot_unal);
-+
-+	blk_mq_end_request(rq, rq->errors);
-+}
-+
-+static void mtip_abort_cmd(struct request *req, void *data,
-+							bool reserved)
-+{
-+	struct driver_data *dd = data;
-+
-+	dbg_printk(MTIP_DRV_NAME " Aborting request, tag = %d\n", req->tag);
-+
-+	clear_bit(req->tag, dd->port->cmds_to_issue);
-+	req->errors = -EIO;
-+	mtip_softirq_done_fn(req);
-+}
-+
-+static void mtip_queue_cmd(struct request *req, void *data,
-+							bool reserved)
-+{
-+	struct driver_data *dd = data;
-+
-+	set_bit(req->tag, dd->port->cmds_to_issue);
-+	blk_abort_request(req);
-+}
-+
- /*
-  * service thread to issue queued commands
-  *
-@@ -2902,7 +2950,7 @@ static int mtip_ftl_rebuild_poll(struct driver_data *dd)
- static int mtip_service_thread(void *data)
- {
- 	struct driver_data *dd = (struct driver_data *)data;
--	unsigned long slot, slot_start, slot_wrap;
-+	unsigned long slot, slot_start, slot_wrap, to;
- 	unsigned int num_cmd_slots = dd->slot_groups * 32;
- 	struct mtip_port *port = dd->port;
- 
-@@ -2917,9 +2965,7 @@ static int mtip_service_thread(void *data)
- 		 * is in progress nor error handling is active
- 		 */
- 		wait_event_interruptible(port->svc_wait, (port->flags) &&
--			!(port->flags & MTIP_PF_PAUSE_IO));
--
--		set_bit(MTIP_PF_SVC_THD_ACTIVE_BIT, &port->flags);
-+			(port->flags & MTIP_PF_SVC_THD_WORK));
- 
- 		if (kthread_should_stop() ||
- 			test_bit(MTIP_PF_SVC_THD_STOP_BIT, &port->flags))
-@@ -2929,6 +2975,8 @@ static int mtip_service_thread(void *data)
- 				&dd->dd_flag)))
- 			goto st_out;
+ bool mei_write_is_idle(struct mei_device *dev);
  
-+		set_bit(MTIP_PF_SVC_THD_ACTIVE_BIT, &port->flags);
-+
- restart_eh:
- 		/* Demux bits: start with error handling */
- 		if (test_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags)) {
-@@ -2939,6 +2987,32 @@ restart_eh:
- 		if (test_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags))
- 			goto restart_eh;
- 
-+		if (test_bit(MTIP_PF_TO_ACTIVE_BIT, &port->flags)) {
-+			to = jiffies + msecs_to_jiffies(5000);
-+
-+			do {
-+				mdelay(100);
-+			} while (atomic_read(&dd->irq_workers_active) != 0 &&
-+				time_before(jiffies, to));
-+
-+			if (atomic_read(&dd->irq_workers_active) != 0)
-+				dev_warn(&dd->pdev->dev,
-+					"Completion workers still active!");
-+
-+			spin_lock(dd->queue->queue_lock);
-+			blk_mq_all_tag_busy_iter(*dd->tags.tags,
-+							mtip_queue_cmd, dd);
-+			spin_unlock(dd->queue->queue_lock);
-+
-+			set_bit(MTIP_PF_ISSUE_CMDS_BIT, &dd->port->flags);
-+
-+			if (mtip_device_reset(dd))
-+				blk_mq_all_tag_busy_iter(*dd->tags.tags,
-+							mtip_abort_cmd, dd);
-+
-+			clear_bit(MTIP_PF_TO_ACTIVE_BIT, &dd->port->flags);
-+		}
++void mei_irq_discard_msg(struct mei_device *dev, struct mei_msg_hdr *hdr);
 +
- 		if (test_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags)) {
- 			slot = 1;
- 			/* used to restrict the loop to one iteration */
-@@ -2971,10 +3045,8 @@ restart_eh:
- 		}
- 
- 		if (test_bit(MTIP_PF_REBUILD_BIT, &port->flags)) {
--			if (mtip_ftl_rebuild_poll(dd) < 0)
--				set_bit(MTIP_DDF_REBUILD_FAILED_BIT,
--							&dd->dd_flag);
--			clear_bit(MTIP_PF_REBUILD_BIT, &port->flags);
-+			if (mtip_ftl_rebuild_poll(dd) == 0)
-+				clear_bit(MTIP_PF_REBUILD_BIT, &port->flags);
- 		}
- 	}
- 
-@@ -3089,7 +3161,7 @@ static int mtip_hw_get_identify(struct driver_data *dd)
- 		if (buf[288] == 0xBF) {
- 			dev_info(&dd->pdev->dev,
- 				"Drive indicates rebuild has failed.\n");
--			/* TODO */
-+			set_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag);
- 		}
- 	}
- 
-@@ -3263,20 +3335,25 @@ out1:
- 	return rv;
- }
- 
--static void mtip_standby_drive(struct driver_data *dd)
-+static int mtip_standby_drive(struct driver_data *dd)
- {
--	if (dd->sr)
--		return;
-+	int rv = 0;
+ #if IS_ENABLED(CONFIG_DEBUG_FS)
+ int mei_dbgfs_register(struct mei_device *dev, const char *name);
+ void mei_dbgfs_deregister(struct mei_device *dev);
+diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
+index c6f36f3ca5d2..7630b6b7eb75 100644
+--- a/drivers/mmc/card/block.c
++++ b/drivers/mmc/card/block.c
+@@ -2494,11 +2494,12 @@ static const struct mmc_fixup blk_fixups[] =
+ 		  MMC_QUIRK_BLK_NO_CMD23),
  
-+	if (dd->sr || !dd->port)
-+		return -ENODEV;
  	/*
- 	 * Send standby immediate (E0h) to the drive so that it
- 	 * saves its state.
+-	 * Some Micron MMC cards needs longer data read timeout than
+-	 * indicated in CSD.
++	 * Some MMC cards need longer data read timeout than indicated in CSD.
  	 */
- 	if (!test_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags) &&
--	    !test_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag))
--		if (mtip_standby_immediate(dd->port))
-+	    !test_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag) &&
-+	    !test_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag)) {
-+		rv = mtip_standby_immediate(dd->port);
-+		if (rv)
- 			dev_warn(&dd->pdev->dev,
- 				"STANDBY IMMEDIATE failed\n");
-+	}
-+	return rv;
- }
+ 	MMC_FIXUP(CID_NAME_ANY, CID_MANFID_MICRON, 0x200, add_quirk_mmc,
+ 		  MMC_QUIRK_LONG_READ_TIME),
++	MMC_FIXUP("008GE0", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
++		  MMC_QUIRK_LONG_READ_TIME),
  
- /*
-@@ -3289,10 +3366,6 @@ static void mtip_standby_drive(struct driver_data *dd)
-  */
- static int mtip_hw_exit(struct driver_data *dd)
- {
--	/*
--	 * Send standby immediate (E0h) to the drive so that it
--	 * saves its state.
--	 */
- 	if (!dd->sr) {
- 		/* de-initialize the port. */
- 		mtip_deinit_port(dd->port);
-@@ -3334,8 +3407,7 @@ static int mtip_hw_shutdown(struct driver_data *dd)
- 	 * Send standby immediate (E0h) to the drive so that it
- 	 * saves its state.
+ 	/*
+ 	 * On these Samsung MoviNAND parts, performing secure erase or
+diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
+index f95d41ffc766..228a81bcea49 100644
+--- a/drivers/mmc/core/core.c
++++ b/drivers/mmc/core/core.c
+@@ -868,11 +868,11 @@ void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card)
+ 	/*
+ 	 * Some cards require longer data read timeout than indicated in CSD.
+ 	 * Address this by setting the read timeout to a "reasonably high"
+-	 * value. For the cards tested, 300ms has proven enough. If necessary,
++	 * value. For the cards tested, 600ms has proven enough. If necessary,
+ 	 * this value can be increased if other problematic cards require this.
  	 */
--	if (!dd->sr && dd->port)
--		mtip_standby_immediate(dd->port);
-+	mtip_standby_drive(dd);
+ 	if (mmc_card_long_read_time(card) && data->flags & MMC_DATA_READ) {
+-		data->timeout_ns = 300000000;
++		data->timeout_ns = 600000000;
+ 		data->timeout_clks = 0;
+ 	}
  
- 	return 0;
- }
-@@ -3358,7 +3430,7 @@ static int mtip_hw_suspend(struct driver_data *dd)
- 	 * Send standby immediate (E0h) to the drive
- 	 * so that it saves its state.
- 	 */
--	if (mtip_standby_immediate(dd->port) != 0) {
-+	if (mtip_standby_drive(dd) != 0) {
- 		dev_err(&dd->pdev->dev,
- 			"Failed standby-immediate command\n");
- 		return -EFAULT;
-@@ -3596,6 +3668,28 @@ static int mtip_block_getgeo(struct block_device *dev,
- 	return 0;
+diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
+index bf49e44571f2..07a419fedd43 100644
+--- a/drivers/mmc/core/mmc.c
++++ b/drivers/mmc/core/mmc.c
+@@ -333,6 +333,9 @@ static void mmc_manage_gp_partitions(struct mmc_card *card, u8 *ext_csd)
+ 	}
  }
  
-+static int mtip_block_open(struct block_device *dev, fmode_t mode)
-+{
-+	struct driver_data *dd;
-+
-+	if (dev && dev->bd_disk) {
-+		dd = (struct driver_data *) dev->bd_disk->private_data;
-+
-+		if (dd) {
-+			if (test_bit(MTIP_DDF_REMOVAL_BIT,
-+							&dd->dd_flag)) {
-+				return -ENODEV;
-+			}
-+			return 0;
-+		}
-+	}
-+	return -ENODEV;
-+}
-+
-+void mtip_block_release(struct gendisk *disk, fmode_t mode)
-+{
-+}
++/* Minimum partition switch timeout in milliseconds */
++#define MMC_MIN_PART_SWITCH_TIME	300
 +
  /*
-  * Block device operation function.
-  *
-@@ -3603,6 +3697,8 @@ static int mtip_block_getgeo(struct block_device *dev,
-  * layer.
+  * Decode extended CSD.
   */
- static const struct block_device_operations mtip_block_ops = {
-+	.open		= mtip_block_open,
-+	.release	= mtip_block_release,
- 	.ioctl		= mtip_block_ioctl,
- #ifdef CONFIG_COMPAT
- 	.compat_ioctl	= mtip_block_compat_ioctl,
-@@ -3664,10 +3760,9 @@ static int mtip_submit_request(struct blk_mq_hw_ctx *hctx, struct request *rq)
- 				rq_data_dir(rq))) {
- 			return -ENODATA;
- 		}
--		if (unlikely(test_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag)))
-+		if (unlikely(test_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag) ||
-+			test_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag)))
- 			return -ENODATA;
--		if (test_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag))
--			return -ENXIO;
- 	}
+@@ -397,6 +400,10 @@ static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd)
  
- 	if (rq->cmd_flags & REQ_DISCARD) {
-@@ -3779,11 +3874,33 @@ static int mtip_init_cmd(void *data, struct request *rq, unsigned int hctx_idx,
- 	return 0;
- }
+ 		/* EXT_CSD value is in units of 10ms, but we store in ms */
+ 		card->ext_csd.part_time = 10 * ext_csd[EXT_CSD_PART_SWITCH_TIME];
++		/* Some eMMC set the value too low so set a minimum */
++		if (card->ext_csd.part_time &&
++		    card->ext_csd.part_time < MMC_MIN_PART_SWITCH_TIME)
++			card->ext_csd.part_time = MMC_MIN_PART_SWITCH_TIME;
  
-+static enum blk_eh_timer_return mtip_cmd_timeout(struct request *req,
-+								bool reserved)
-+{
-+	struct driver_data *dd = req->q->queuedata;
-+	int ret = BLK_EH_RESET_TIMER;
-+
-+	if (reserved)
-+		goto exit_handler;
-+
-+	if (test_bit(req->tag, dd->port->cmds_to_issue))
-+		goto exit_handler;
-+
-+	if (test_and_set_bit(MTIP_PF_TO_ACTIVE_BIT, &dd->port->flags))
-+		goto exit_handler;
-+
-+	wake_up_interruptible(&dd->port->svc_wait);
-+exit_handler:
-+	return ret;
-+}
-+
- static struct blk_mq_ops mtip_mq_ops = {
- 	.queue_rq	= mtip_queue_rq,
- 	.map_queue	= blk_mq_map_queue,
- 	.init_request	= mtip_init_cmd,
- 	.exit_request	= mtip_free_cmd,
-+	.complete	= mtip_softirq_done_fn,
-+	.timeout        = mtip_cmd_timeout,
+ 		/* Sleep / awake timeout in 100ns units */
+ 		if (sa_shift > 0 && sa_shift <= 0x17)
+diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
+index 975139f97498..e517be7f03bf 100644
+--- a/drivers/mmc/host/sdhci-acpi.c
++++ b/drivers/mmc/host/sdhci-acpi.c
+@@ -307,7 +307,7 @@ static const struct sdhci_acpi_slot sdhci_acpi_slot_int_emmc = {
+ 	.chip    = &sdhci_acpi_chip_int,
+ 	.caps    = MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE |
+ 		   MMC_CAP_HW_RESET | MMC_CAP_1_8V_DDR |
+-		   MMC_CAP_BUS_WIDTH_TEST | MMC_CAP_WAIT_WHILE_BUSY,
++		   MMC_CAP_WAIT_WHILE_BUSY,
+ 	.caps2   = MMC_CAP2_HC_ERASE_SZ,
+ 	.flags   = SDHCI_ACPI_RUNTIME_PM,
+ 	.quirks  = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
+@@ -322,7 +322,7 @@ static const struct sdhci_acpi_slot sdhci_acpi_slot_int_sdio = {
+ 		   SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
+ 	.quirks2 = SDHCI_QUIRK2_HOST_OFF_CARD_ON,
+ 	.caps    = MMC_CAP_NONREMOVABLE | MMC_CAP_POWER_OFF_CARD |
+-		   MMC_CAP_BUS_WIDTH_TEST | MMC_CAP_WAIT_WHILE_BUSY,
++		   MMC_CAP_WAIT_WHILE_BUSY,
+ 	.flags   = SDHCI_ACPI_RUNTIME_PM,
+ 	.pm_caps = MMC_PM_KEEP_POWER,
+ 	.probe_slot	= sdhci_acpi_sdio_probe_slot,
+@@ -334,7 +334,7 @@ static const struct sdhci_acpi_slot sdhci_acpi_slot_int_sd = {
+ 	.quirks  = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
+ 	.quirks2 = SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON |
+ 		   SDHCI_QUIRK2_STOP_WITH_TC,
+-	.caps    = MMC_CAP_BUS_WIDTH_TEST | MMC_CAP_WAIT_WHILE_BUSY,
++	.caps    = MMC_CAP_WAIT_WHILE_BUSY,
+ 	.probe_slot	= sdhci_acpi_sd_probe_slot,
  };
  
- /*
-@@ -3850,7 +3967,6 @@ static int mtip_block_initialize(struct driver_data *dd)
- 
- 	mtip_hw_debugfs_init(dd);
- 
--skip_create_disk:
- 	memset(&dd->tags, 0, sizeof(dd->tags));
- 	dd->tags.ops = &mtip_mq_ops;
- 	dd->tags.nr_hw_queues = 1;
-@@ -3860,12 +3976,13 @@ skip_create_disk:
- 	dd->tags.numa_node = dd->numa_node;
- 	dd->tags.flags = BLK_MQ_F_SHOULD_MERGE;
- 	dd->tags.driver_data = dd;
-+	dd->tags.timeout = MTIP_NCQ_CMD_TIMEOUT_MS;
- 
- 	rv = blk_mq_alloc_tag_set(&dd->tags);
- 	if (rv) {
- 		dev_err(&dd->pdev->dev,
- 			"Unable to allocate request queue\n");
--		goto block_queue_alloc_init_error;
-+		goto block_queue_alloc_tag_error;
- 	}
- 
- 	/* Allocate the request queue. */
-@@ -3880,6 +3997,7 @@ skip_create_disk:
- 	dd->disk->queue		= dd->queue;
- 	dd->queue->queuedata	= dd;
- 
-+skip_create_disk:
- 	/* Initialize the protocol layer. */
- 	wait_for_rebuild = mtip_hw_get_identify(dd);
- 	if (wait_for_rebuild < 0) {
-@@ -3976,8 +4094,9 @@ kthread_run_error:
- read_capacity_error:
- init_hw_cmds_error:
- 	blk_cleanup_queue(dd->queue);
--	blk_mq_free_tag_set(&dd->tags);
- block_queue_alloc_init_error:
-+	blk_mq_free_tag_set(&dd->tags);
-+block_queue_alloc_tag_error:
- 	mtip_hw_debugfs_exit(dd);
- disk_index_error:
- 	spin_lock(&rssd_index_lock);
-@@ -3994,6 +4113,22 @@ protocol_init_error:
- 	return rv;
- }
+@@ -396,7 +396,7 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
+ {
+ 	struct device *dev = &pdev->dev;
+ 	acpi_handle handle = ACPI_HANDLE(dev);
+-	struct acpi_device *device;
++	struct acpi_device *device, *child;
+ 	struct sdhci_acpi_host *c;
+ 	struct sdhci_host *host;
+ 	struct resource *iomem;
+@@ -408,6 +408,11 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
+ 	if (acpi_bus_get_device(handle, &device))
+ 		return -ENODEV;
  
-+static void mtip_no_dev_cleanup(struct request *rq, void *data, bool reserv)
-+{
-+	struct driver_data *dd = (struct driver_data *)data;
-+	struct mtip_cmd *cmd;
-+
-+	if (likely(!reserv))
-+		blk_mq_complete_request(rq, -ENODEV);
-+	else if (test_bit(MTIP_PF_IC_ACTIVE_BIT, &dd->port->flags)) {
-+
-+		cmd = mtip_cmd_from_tag(dd, MTIP_TAG_INTERNAL);
-+		if (cmd->comp_func)
-+			cmd->comp_func(dd->port, MTIP_TAG_INTERNAL,
-+					cmd, -ENODEV);
-+	}
-+}
-+
- /*
-  * Block layer deinitialization function.
-  *
-@@ -4025,12 +4160,23 @@ static int mtip_block_remove(struct driver_data *dd)
- 		}
- 	}
- 
--	if (!dd->sr)
--		mtip_standby_drive(dd);
-+	if (!dd->sr) {
-+		/*
-+		 * Explicitly wait here for IOs to quiesce,
-+		 * as mtip_standby_drive usually won't wait for IOs.
-+		 */
-+		if (!mtip_quiesce_io(dd->port, MTIP_QUIESCE_IO_TIMEOUT_MS,
-+								GFP_KERNEL))
-+			mtip_standby_drive(dd);
-+	}
- 	else
- 		dev_info(&dd->pdev->dev, "device %s surprise removal\n",
- 						dd->disk->disk_name);
- 
-+	blk_mq_freeze_queue_start(dd->queue);
-+	blk_mq_stop_hw_queues(dd->queue);
-+	blk_mq_all_tag_busy_iter(dd->tags.tags[0], mtip_no_dev_cleanup, dd);
-+
- 	/*
- 	 * Delete our gendisk structure. This also removes the device
- 	 * from /dev
-@@ -4040,7 +4186,8 @@ static int mtip_block_remove(struct driver_data *dd)
- 		dd->bdev = NULL;
- 	}
- 	if (dd->disk) {
--		del_gendisk(dd->disk);
-+		if (test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag))
-+			del_gendisk(dd->disk);
- 		if (dd->disk->queue) {
- 			blk_cleanup_queue(dd->queue);
- 			blk_mq_free_tag_set(&dd->tags);
-@@ -4081,7 +4228,8 @@ static int mtip_block_shutdown(struct driver_data *dd)
- 		dev_info(&dd->pdev->dev,
- 			"Shutting down %s ...\n", dd->disk->disk_name);
- 
--		del_gendisk(dd->disk);
-+		if (test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag))
-+			del_gendisk(dd->disk);
- 		if (dd->disk->queue) {
- 			blk_cleanup_queue(dd->queue);
- 			blk_mq_free_tag_set(&dd->tags);
-@@ -4426,7 +4574,7 @@ static void mtip_pci_remove(struct pci_dev *pdev)
- 	struct driver_data *dd = pci_get_drvdata(pdev);
- 	unsigned long flags, to;
- 
--	set_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag);
-+	set_bit(MTIP_DDF_REMOVAL_BIT, &dd->dd_flag);
- 
- 	spin_lock_irqsave(&dev_lock, flags);
- 	list_del_init(&dd->online_list);
-@@ -4443,12 +4591,17 @@ static void mtip_pci_remove(struct pci_dev *pdev)
- 	} while (atomic_read(&dd->irq_workers_active) != 0 &&
- 		time_before(jiffies, to));
- 
-+	if (!dd->sr)
-+		fsync_bdev(dd->bdev);
-+
- 	if (atomic_read(&dd->irq_workers_active) != 0) {
- 		dev_warn(&dd->pdev->dev,
- 			"Completion workers still active!\n");
- 	}
- 
--	blk_mq_stop_hw_queues(dd->queue);
-+	blk_set_queue_dying(dd->queue);
-+	set_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag);
-+
- 	/* Clean up the block layer. */
- 	mtip_block_remove(dd);
- 
-diff --git a/drivers/block/mtip32xx/mtip32xx.h b/drivers/block/mtip32xx/mtip32xx.h
-index 3274784008eb..7617888f7944 100644
---- a/drivers/block/mtip32xx/mtip32xx.h
-+++ b/drivers/block/mtip32xx/mtip32xx.h
-@@ -134,16 +134,24 @@ enum {
- 	MTIP_PF_EH_ACTIVE_BIT       = 1, /* error handling */
- 	MTIP_PF_SE_ACTIVE_BIT       = 2, /* secure erase */
- 	MTIP_PF_DM_ACTIVE_BIT       = 3, /* download microcde */
-+	MTIP_PF_TO_ACTIVE_BIT       = 9, /* timeout handling */
- 	MTIP_PF_PAUSE_IO      =	((1 << MTIP_PF_IC_ACTIVE_BIT) |
- 				(1 << MTIP_PF_EH_ACTIVE_BIT) |
- 				(1 << MTIP_PF_SE_ACTIVE_BIT) |
--				(1 << MTIP_PF_DM_ACTIVE_BIT)),
-+				(1 << MTIP_PF_DM_ACTIVE_BIT) |
-+				(1 << MTIP_PF_TO_ACTIVE_BIT)),
- 
- 	MTIP_PF_SVC_THD_ACTIVE_BIT  = 4,
- 	MTIP_PF_ISSUE_CMDS_BIT      = 5,
- 	MTIP_PF_REBUILD_BIT         = 6,
- 	MTIP_PF_SVC_THD_STOP_BIT    = 8,
- 
-+	MTIP_PF_SVC_THD_WORK	= ((1 << MTIP_PF_EH_ACTIVE_BIT) |
-+				  (1 << MTIP_PF_ISSUE_CMDS_BIT) |
-+				  (1 << MTIP_PF_REBUILD_BIT) |
-+				  (1 << MTIP_PF_SVC_THD_STOP_BIT) |
-+				  (1 << MTIP_PF_TO_ACTIVE_BIT)),
-+
- 	/* below are bit numbers in 'dd_flag' defined in driver_data */
- 	MTIP_DDF_SEC_LOCK_BIT	    = 0,
- 	MTIP_DDF_REMOVE_PENDING_BIT = 1,
-@@ -153,6 +161,7 @@ enum {
- 	MTIP_DDF_RESUME_BIT         = 6,
- 	MTIP_DDF_INIT_DONE_BIT      = 7,
- 	MTIP_DDF_REBUILD_FAILED_BIT = 8,
-+	MTIP_DDF_REMOVAL_BIT	    = 9,
- 
- 	MTIP_DDF_STOP_IO      = ((1 << MTIP_DDF_REMOVE_PENDING_BIT) |
- 				(1 << MTIP_DDF_SEC_LOCK_BIT) |
-diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
-index e4c5cc107934..c65d41f4007a 100644
---- a/drivers/block/nbd.c
-+++ b/drivers/block/nbd.c
-@@ -618,8 +618,8 @@ static void nbd_request_handler(struct request_queue *q)
- 			req, req->cmd_type);
- 
- 		if (unlikely(!nbd->sock)) {
--			dev_err(disk_to_dev(nbd->disk),
--				"Attempted send on closed socket\n");
-+			dev_err_ratelimited(disk_to_dev(nbd->disk),
-+					    "Attempted send on closed socket\n");
- 			req->errors++;
- 			nbd_end_request(nbd, req);
- 			spin_lock_irq(q->queue_lock);
-diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
-index 64a7b5971b57..cab97593ba54 100644
---- a/drivers/block/null_blk.c
-+++ b/drivers/block/null_blk.c
-@@ -742,10 +742,11 @@ static int null_add_dev(void)
- 
- 	add_disk(disk);
- 
-+done:
- 	mutex_lock(&lock);
- 	list_add_tail(&nullb->list, &nullb_list);
- 	mutex_unlock(&lock);
--done:
++	/* Power on the SDHCI controller and its children */
++	acpi_device_fix_up_power(device);
++	list_for_each_entry(child, &device->children, node)
++		acpi_device_fix_up_power(child);
 +
- 	return 0;
- 
- out_cleanup_lightnvm:
-diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c
-index 562b5a4ca7b7..78a39f736c64 100644
---- a/drivers/block/paride/pd.c
-+++ b/drivers/block/paride/pd.c
-@@ -126,7 +126,7 @@
- */
- #include <linux/types.h>
- 
--static bool verbose = 0;
-+static int verbose = 0;
- static int major = PD_MAJOR;
- static char *name = PD_NAME;
- static int cluster = 64;
-@@ -161,7 +161,7 @@ enum {D_PRT, D_PRO, D_UNI, D_MOD, D_GEO, D_SBY, D_DLY, D_SLV};
- static DEFINE_MUTEX(pd_mutex);
- static DEFINE_SPINLOCK(pd_lock);
- 
--module_param(verbose, bool, 0);
-+module_param(verbose, int, 0);
- module_param(major, int, 0);
- module_param(name, charp, 0);
- module_param(cluster, int, 0);
-diff --git a/drivers/block/paride/pt.c b/drivers/block/paride/pt.c
-index 1740d75e8a32..216a94fed5b4 100644
---- a/drivers/block/paride/pt.c
-+++ b/drivers/block/paride/pt.c
-@@ -117,7 +117,7 @@
- 
- */
- 
--static bool verbose = 0;
-+static int verbose = 0;
- static int major = PT_MAJOR;
- static char *name = PT_NAME;
- static int disable = 0;
-@@ -152,7 +152,7 @@ static int (*drives[4])[6] = {&drive0, &drive1, &drive2, &drive3};
- 
- #include <asm/uaccess.h>
- 
--module_param(verbose, bool, 0);
-+module_param(verbose, int, 0);
- module_param(major, int, 0);
- module_param(name, charp, 0);
- module_param_array(drive0, int, NULL, 0);
-diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
-index 4a876785b68c..9745cf9fcd3c 100644
---- a/drivers/block/rbd.c
-+++ b/drivers/block/rbd.c
-@@ -1955,7 +1955,7 @@ static struct ceph_osd_request *rbd_osd_req_create(
- 
- 	osdc = &rbd_dev->rbd_client->client->osdc;
- 	osd_req = ceph_osdc_alloc_request(osdc, snapc, num_ops, false,
--					  GFP_ATOMIC);
-+					  GFP_NOIO);
- 	if (!osd_req)
- 		return NULL;	/* ENOMEM */
- 
-@@ -2004,7 +2004,7 @@ rbd_osd_req_create_copyup(struct rbd_obj_request *obj_request)
- 	rbd_dev = img_request->rbd_dev;
- 	osdc = &rbd_dev->rbd_client->client->osdc;
- 	osd_req = ceph_osdc_alloc_request(osdc, snapc, num_osd_ops,
--						false, GFP_ATOMIC);
-+						false, GFP_NOIO);
- 	if (!osd_req)
- 		return NULL;	/* ENOMEM */
- 
-@@ -2506,7 +2506,7 @@ static int rbd_img_request_fill(struct rbd_img_request *img_request,
- 					bio_chain_clone_range(&bio_list,
- 								&bio_offset,
- 								clone_size,
--								GFP_ATOMIC);
-+								GFP_NOIO);
- 			if (!obj_request->bio_list)
- 				goto out_unwind;
- 		} else if (type == OBJ_REQUEST_PAGES) {
-diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
-index fa893c3ec408..0beaa52df66b 100644
---- a/drivers/bluetooth/ath3k.c
-+++ b/drivers/bluetooth/ath3k.c
-@@ -82,6 +82,7 @@ static const struct usb_device_id ath3k_table[] = {
- 	{ USB_DEVICE(0x0489, 0xe05f) },
- 	{ USB_DEVICE(0x0489, 0xe076) },
- 	{ USB_DEVICE(0x0489, 0xe078) },
-+	{ USB_DEVICE(0x0489, 0xe095) },
- 	{ USB_DEVICE(0x04c5, 0x1330) },
- 	{ USB_DEVICE(0x04CA, 0x3004) },
- 	{ USB_DEVICE(0x04CA, 0x3005) },
-@@ -92,6 +93,7 @@ static const struct usb_device_id ath3k_table[] = {
- 	{ USB_DEVICE(0x04CA, 0x300d) },
- 	{ USB_DEVICE(0x04CA, 0x300f) },
- 	{ USB_DEVICE(0x04CA, 0x3010) },
-+	{ USB_DEVICE(0x04CA, 0x3014) },
- 	{ USB_DEVICE(0x0930, 0x0219) },
- 	{ USB_DEVICE(0x0930, 0x021c) },
- 	{ USB_DEVICE(0x0930, 0x0220) },
-@@ -113,10 +115,12 @@ static const struct usb_device_id ath3k_table[] = {
- 	{ USB_DEVICE(0x13d3, 0x3362) },
- 	{ USB_DEVICE(0x13d3, 0x3375) },
- 	{ USB_DEVICE(0x13d3, 0x3393) },
-+	{ USB_DEVICE(0x13d3, 0x3395) },
- 	{ USB_DEVICE(0x13d3, 0x3402) },
- 	{ USB_DEVICE(0x13d3, 0x3408) },
- 	{ USB_DEVICE(0x13d3, 0x3423) },
- 	{ USB_DEVICE(0x13d3, 0x3432) },
-+	{ USB_DEVICE(0x13d3, 0x3472) },
- 	{ USB_DEVICE(0x13d3, 0x3474) },
- 
- 	/* Atheros AR5BBU12 with sflash firmware */
-@@ -144,6 +148,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
- 	{ USB_DEVICE(0x0489, 0xe05f), .driver_info = BTUSB_ATH3012 },
- 	{ USB_DEVICE(0x0489, 0xe076), .driver_info = BTUSB_ATH3012 },
- 	{ USB_DEVICE(0x0489, 0xe078), .driver_info = BTUSB_ATH3012 },
-+	{ USB_DEVICE(0x0489, 0xe095), .driver_info = BTUSB_ATH3012 },
- 	{ USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 },
- 	{ USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 },
- 	{ USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
-@@ -154,6 +159,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
- 	{ USB_DEVICE(0x04ca, 0x300d), .driver_info = BTUSB_ATH3012 },
- 	{ USB_DEVICE(0x04ca, 0x300f), .driver_info = BTUSB_ATH3012 },
- 	{ USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 },
-+	{ USB_DEVICE(0x04ca, 0x3014), .driver_info = BTUSB_ATH3012 },
- 	{ USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
- 	{ USB_DEVICE(0x0930, 0x021c), .driver_info = BTUSB_ATH3012 },
- 	{ USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
-@@ -175,10 +181,12 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
- 	{ USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
- 	{ USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
- 	{ USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
-+	{ USB_DEVICE(0x13d3, 0x3395), .driver_info = BTUSB_ATH3012 },
- 	{ USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 },
- 	{ USB_DEVICE(0x13d3, 0x3408), .driver_info = BTUSB_ATH3012 },
- 	{ USB_DEVICE(0x13d3, 0x3423), .driver_info = BTUSB_ATH3012 },
- 	{ USB_DEVICE(0x13d3, 0x3432), .driver_info = BTUSB_ATH3012 },
-+	{ USB_DEVICE(0x13d3, 0x3472), .driver_info = BTUSB_ATH3012 },
- 	{ USB_DEVICE(0x13d3, 0x3474), .driver_info = BTUSB_ATH3012 },
- 
- 	/* Atheros AR5BBU22 with sflash firmware */
-diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
-index a191e318fab8..0d4e372e426d 100644
---- a/drivers/bluetooth/btusb.c
-+++ b/drivers/bluetooth/btusb.c
-@@ -196,6 +196,7 @@ static const struct usb_device_id blacklist_table[] = {
- 	{ USB_DEVICE(0x0489, 0xe05f), .driver_info = BTUSB_ATH3012 },
- 	{ USB_DEVICE(0x0489, 0xe076), .driver_info = BTUSB_ATH3012 },
- 	{ USB_DEVICE(0x0489, 0xe078), .driver_info = BTUSB_ATH3012 },
-+	{ USB_DEVICE(0x0489, 0xe095), .driver_info = BTUSB_ATH3012 },
- 	{ USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 },
- 	{ USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 },
- 	{ USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
-@@ -206,6 +207,7 @@ static const struct usb_device_id blacklist_table[] = {
- 	{ USB_DEVICE(0x04ca, 0x300d), .driver_info = BTUSB_ATH3012 },
- 	{ USB_DEVICE(0x04ca, 0x300f), .driver_info = BTUSB_ATH3012 },
- 	{ USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 },
-+	{ USB_DEVICE(0x04ca, 0x3014), .driver_info = BTUSB_ATH3012 },
- 	{ USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
- 	{ USB_DEVICE(0x0930, 0x021c), .driver_info = BTUSB_ATH3012 },
- 	{ USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
-@@ -227,10 +229,12 @@ static const struct usb_device_id blacklist_table[] = {
- 	{ USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
- 	{ USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
- 	{ USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
-+	{ USB_DEVICE(0x13d3, 0x3395), .driver_info = BTUSB_ATH3012 },
- 	{ USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 },
- 	{ USB_DEVICE(0x13d3, 0x3408), .driver_info = BTUSB_ATH3012 },
- 	{ USB_DEVICE(0x13d3, 0x3423), .driver_info = BTUSB_ATH3012 },
- 	{ USB_DEVICE(0x13d3, 0x3432), .driver_info = BTUSB_ATH3012 },
-+	{ USB_DEVICE(0x13d3, 0x3472), .driver_info = BTUSB_ATH3012 },
- 	{ USB_DEVICE(0x13d3, 0x3474), .driver_info = BTUSB_ATH3012 },
- 
- 	/* Atheros AR5BBU12 with sflash firmware */
-diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c
-index 80783dcb7f57..aba31210c802 100644
---- a/drivers/bluetooth/hci_vhci.c
-+++ b/drivers/bluetooth/hci_vhci.c
-@@ -50,6 +50,7 @@ struct vhci_data {
- 	wait_queue_head_t read_wait;
- 	struct sk_buff_head readq;
- 
-+	struct mutex open_mutex;
- 	struct delayed_work open_timeout;
- };
+ 	if (acpi_bus_get_status(device) || !device->status.present)
+ 		return -ENODEV;
  
-@@ -87,12 +88,15 @@ static int vhci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
+diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
+index a04d0f7ee00e..807f930a7c14 100644
+--- a/drivers/mmc/host/sdhci-pci-core.c
++++ b/drivers/mmc/host/sdhci-pci-core.c
+@@ -361,7 +361,6 @@ static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot)
+ {
+ 	slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE |
+ 				 MMC_CAP_HW_RESET | MMC_CAP_1_8V_DDR |
+-				 MMC_CAP_BUS_WIDTH_TEST |
+ 				 MMC_CAP_WAIT_WHILE_BUSY;
+ 	slot->host->mmc->caps2 |= MMC_CAP2_HC_ERASE_SZ;
+ 	slot->hw_reset = sdhci_pci_int_hw_reset;
+@@ -377,15 +376,13 @@ static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot)
+ static int byt_sdio_probe_slot(struct sdhci_pci_slot *slot)
+ {
+ 	slot->host->mmc->caps |= MMC_CAP_POWER_OFF_CARD | MMC_CAP_NONREMOVABLE |
+-				 MMC_CAP_BUS_WIDTH_TEST |
+ 				 MMC_CAP_WAIT_WHILE_BUSY;
  	return 0;
  }
  
--static int vhci_create_device(struct vhci_data *data, __u8 opcode)
-+static int __vhci_create_device(struct vhci_data *data, __u8 opcode)
+ static int byt_sd_probe_slot(struct sdhci_pci_slot *slot)
  {
- 	struct hci_dev *hdev;
- 	struct sk_buff *skb;
- 	__u8 dev_type;
+-	slot->host->mmc->caps |= MMC_CAP_BUS_WIDTH_TEST |
+-				 MMC_CAP_WAIT_WHILE_BUSY;
++	slot->host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
+ 	slot->cd_con_id = NULL;
+ 	slot->cd_idx = 0;
+ 	slot->cd_override_level = true;
+diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
+index 5b9834cf2820..96fddb016bf1 100644
+--- a/drivers/mtd/ubi/eba.c
++++ b/drivers/mtd/ubi/eba.c
+@@ -426,8 +426,25 @@ retry:
+ 						 pnum, vol_id, lnum);
+ 					err = -EBADMSG;
+ 				} else {
+-					err = -EINVAL;
+-					ubi_ro_mode(ubi);
++					/*
++					 * Ending up here in the non-Fastmap case
++					 * is a clear bug as the VID header had to
++					 * be present at scan time to have it referenced.
++					 * With fastmap the story is more complicated.
++					 * Fastmap has the mapping info without the need
++					 * of a full scan. So the LEB could have been
++					 * unmapped, Fastmap cannot know this and keeps
++					 * the LEB referenced.
++					 * This is valid and works as the layer above UBI
++					 * has to do bookkeeping about used/referenced
++					 * LEBs in any case.
++					 */
++					if (ubi->fast_attach) {
++						err = -EBADMSG;
++					} else {
++						err = -EINVAL;
++						ubi_ro_mode(ubi);
++					}
+ 				}
+ 			}
+ 			goto out_free;
+diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c
+index 263b439e21a8..990898b9dc72 100644
+--- a/drivers/mtd/ubi/fastmap.c
++++ b/drivers/mtd/ubi/fastmap.c
+@@ -1058,6 +1058,7 @@ int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
+ 	ubi_msg(ubi, "fastmap WL pool size: %d",
+ 		ubi->fm_wl_pool.max_size);
+ 	ubi->fm_disabled = 0;
++	ubi->fast_attach = 1;
  
-+	if (data->hdev)
-+		return -EBADFD;
+ 	ubi_free_vid_hdr(ubi, vh);
+ 	kfree(ech);
+diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
+index 2974b67f6c6c..de1ea2e4c37d 100644
+--- a/drivers/mtd/ubi/ubi.h
++++ b/drivers/mtd/ubi/ubi.h
+@@ -462,6 +462,7 @@ struct ubi_debug_info {
+  * @fm_eba_sem: allows ubi_update_fastmap() to block EBA table changes
+  * @fm_work: fastmap work queue
+  * @fm_work_scheduled: non-zero if fastmap work was scheduled
++ * @fast_attach: non-zero if UBI was attached by fastmap
+  *
+  * @used: RB-tree of used physical eraseblocks
+  * @erroneous: RB-tree of erroneous used physical eraseblocks
+@@ -570,6 +571,7 @@ struct ubi_device {
+ 	size_t fm_size;
+ 	struct work_struct fm_work;
+ 	int fm_work_scheduled;
++	int fast_attach;
+ 
+ 	/* Wear-leveling sub-system's stuff */
+ 	struct rb_root used;
+diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
+index 141c2a42d7ed..910c12e2638e 100644
+--- a/drivers/net/can/dev.c
++++ b/drivers/net/can/dev.c
+@@ -696,11 +696,17 @@ int can_change_mtu(struct net_device *dev, int new_mtu)
+ 	/* allow change of MTU according to the CANFD ability of the device */
+ 	switch (new_mtu) {
+ 	case CAN_MTU:
++		/* 'CANFD-only' controllers can not switch to CAN_MTU */
++		if (priv->ctrlmode_static & CAN_CTRLMODE_FD)
++			return -EINVAL;
 +
- 	/* bits 0-1 are dev_type (BR/EDR or AMP) */
- 	dev_type = opcode & 0x03;
+ 		priv->ctrlmode &= ~CAN_CTRLMODE_FD;
+ 		break;
  
-@@ -151,6 +155,17 @@ static int vhci_create_device(struct vhci_data *data, __u8 opcode)
- 	return 0;
- }
+ 	case CANFD_MTU:
+-		if (!(priv->ctrlmode_supported & CAN_CTRLMODE_FD))
++		/* check for potential CANFD ability */
++		if (!(priv->ctrlmode_supported & CAN_CTRLMODE_FD) &&
++		    !(priv->ctrlmode_static & CAN_CTRLMODE_FD))
+ 			return -EINVAL;
  
-+static int vhci_create_device(struct vhci_data *data, __u8 opcode)
+ 		priv->ctrlmode |= CAN_CTRLMODE_FD;
+@@ -782,6 +788,35 @@ static const struct nla_policy can_policy[IFLA_CAN_MAX + 1] = {
+ 				= { .len = sizeof(struct can_bittiming_const) },
+ };
+ 
++static int can_validate(struct nlattr *tb[], struct nlattr *data[])
 +{
-+	int err;
++	bool is_can_fd = false;
 +
-+	mutex_lock(&data->open_mutex);
-+	err = __vhci_create_device(data, opcode);
-+	mutex_unlock(&data->open_mutex);
++	/* Make sure that valid CAN FD configurations always consist of
++	 * - nominal/arbitration bittiming
++	 * - data bittiming
++	 * - control mode with CAN_CTRLMODE_FD set
++	 */
 +
-+	return err;
++	if (data[IFLA_CAN_CTRLMODE]) {
++		struct can_ctrlmode *cm = nla_data(data[IFLA_CAN_CTRLMODE]);
++
++		is_can_fd = cm->flags & cm->mask & CAN_CTRLMODE_FD;
++	}
++
++	if (is_can_fd) {
++		if (!data[IFLA_CAN_BITTIMING] || !data[IFLA_CAN_DATA_BITTIMING])
++			return -EOPNOTSUPP;
++	}
++
++	if (data[IFLA_CAN_DATA_BITTIMING]) {
++		if (!is_can_fd || !data[IFLA_CAN_BITTIMING])
++			return -EOPNOTSUPP;
++	}
++
++	return 0;
 +}
 +
- static inline ssize_t vhci_get_user(struct vhci_data *data,
- 				    struct iov_iter *from)
- {
-@@ -189,11 +204,6 @@ static inline ssize_t vhci_get_user(struct vhci_data *data,
- 		break;
- 
- 	case HCI_VENDOR_PKT:
--		if (data->hdev) {
--			kfree_skb(skb);
--			return -EBADFD;
--		}
--
- 		cancel_delayed_work_sync(&data->open_timeout);
- 
- 		opcode = *((__u8 *) skb->data);
-@@ -320,6 +330,7 @@ static int vhci_open(struct inode *inode, struct file *file)
- 	skb_queue_head_init(&data->readq);
- 	init_waitqueue_head(&data->read_wait);
- 
-+	mutex_init(&data->open_mutex);
- 	INIT_DELAYED_WORK(&data->open_timeout, vhci_open_timeout);
- 
- 	file->private_data = data;
-@@ -333,15 +344,18 @@ static int vhci_open(struct inode *inode, struct file *file)
- static int vhci_release(struct inode *inode, struct file *file)
+ static int can_changelink(struct net_device *dev,
+ 			  struct nlattr *tb[], struct nlattr *data[])
  {
- 	struct vhci_data *data = file->private_data;
--	struct hci_dev *hdev = data->hdev;
-+	struct hci_dev *hdev;
+@@ -813,19 +848,31 @@ static int can_changelink(struct net_device *dev,
  
- 	cancel_delayed_work_sync(&data->open_timeout);
+ 	if (data[IFLA_CAN_CTRLMODE]) {
+ 		struct can_ctrlmode *cm;
++		u32 ctrlstatic;
++		u32 maskedflags;
  
-+	hdev = data->hdev;
+ 		/* Do not allow changing controller mode while running */
+ 		if (dev->flags & IFF_UP)
+ 			return -EBUSY;
+ 		cm = nla_data(data[IFLA_CAN_CTRLMODE]);
++		ctrlstatic = priv->ctrlmode_static;
++		maskedflags = cm->flags & cm->mask;
 +
- 	if (hdev) {
- 		hci_unregister_dev(hdev);
- 		hci_free_dev(hdev);
- 	}
- 
-+	skb_queue_purge(&data->readq);
- 	file->private_data = NULL;
- 	kfree(data);
- 
-diff --git a/drivers/bus/imx-weim.c b/drivers/bus/imx-weim.c
-index e98d15eaa799..1827fc4d15c1 100644
---- a/drivers/bus/imx-weim.c
-+++ b/drivers/bus/imx-weim.c
-@@ -150,7 +150,7 @@ static int __init weim_parse_dt(struct platform_device *pdev,
- 			return ret;
- 	}
- 
--	for_each_child_of_node(pdev->dev.of_node, child) {
-+	for_each_available_child_of_node(pdev->dev.of_node, child) {
- 		if (!child->name)
- 			continue;
- 
-diff --git a/drivers/bus/uniphier-system-bus.c b/drivers/bus/uniphier-system-bus.c
-index 834a2aeaf27a..350b7309c26d 100644
---- a/drivers/bus/uniphier-system-bus.c
-+++ b/drivers/bus/uniphier-system-bus.c
-@@ -108,7 +108,7 @@ static int uniphier_system_bus_check_overlap(
- 
- 	for (i = 0; i < ARRAY_SIZE(priv->bank); i++) {
- 		for (j = i + 1; j < ARRAY_SIZE(priv->bank); j++) {
--			if (priv->bank[i].end > priv->bank[j].base ||
-+			if (priv->bank[i].end > priv->bank[j].base &&
- 			    priv->bank[i].base < priv->bank[j].end) {
- 				dev_err(priv->dev,
- 					"region overlap between bank%d and bank%d\n",
-diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
-index 45cc39aabeee..252142524ff2 100644
---- a/drivers/char/tpm/tpm-chip.c
-+++ b/drivers/char/tpm/tpm-chip.c
-@@ -136,11 +136,13 @@ struct tpm_chip *tpmm_chip_alloc(struct device *dev,
- 	chip->cdev.owner = chip->pdev->driver->owner;
- 	chip->cdev.kobj.parent = &chip->dev.kobj;
- 
-+	devm_add_action(dev, (void (*)(void *)) put_device, &chip->dev);
++		/* check whether provided bits are allowed to be passed */
++		if (cm->mask & ~(priv->ctrlmode_supported | ctrlstatic))
++			return -EOPNOTSUPP;
 +
- 	return chip;
- }
- EXPORT_SYMBOL_GPL(tpmm_chip_alloc);
++		/* do not check for static fd-non-iso if 'fd' is disabled */
++		if (!(maskedflags & CAN_CTRLMODE_FD))
++			ctrlstatic &= ~CAN_CTRLMODE_FD_NON_ISO;
  
--static int tpm_dev_add_device(struct tpm_chip *chip)
-+static int tpm_add_char_device(struct tpm_chip *chip)
- {
- 	int rc;
- 
-@@ -151,7 +153,6 @@ static int tpm_dev_add_device(struct tpm_chip *chip)
- 			chip->devname, MAJOR(chip->dev.devt),
- 			MINOR(chip->dev.devt), rc);
- 
--		device_unregister(&chip->dev);
- 		return rc;
- 	}
- 
-@@ -162,16 +163,17 @@ static int tpm_dev_add_device(struct tpm_chip *chip)
- 			chip->devname, MAJOR(chip->dev.devt),
- 			MINOR(chip->dev.devt), rc);
- 
-+		cdev_del(&chip->cdev);
- 		return rc;
- 	}
- 
- 	return rc;
- }
- 
--static void tpm_dev_del_device(struct tpm_chip *chip)
-+static void tpm_del_char_device(struct tpm_chip *chip)
- {
- 	cdev_del(&chip->cdev);
--	device_unregister(&chip->dev);
-+	device_del(&chip->dev);
- }
- 
- static int tpm1_chip_register(struct tpm_chip *chip)
-@@ -222,7 +224,7 @@ int tpm_chip_register(struct tpm_chip *chip)
- 
- 	tpm_add_ppi(chip);
- 
--	rc = tpm_dev_add_device(chip);
-+	rc = tpm_add_char_device(chip);
- 	if (rc)
- 		goto out_err;
- 
-@@ -274,6 +276,6 @@ void tpm_chip_unregister(struct tpm_chip *chip)
- 		sysfs_remove_link(&chip->pdev->kobj, "ppi");
- 
- 	tpm1_chip_unregister(chip);
--	tpm_dev_del_device(chip);
-+	tpm_del_char_device(chip);
- }
- EXPORT_SYMBOL_GPL(tpm_chip_unregister);
-diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
-index 45a634016f95..b28e4da3d2cf 100644
---- a/drivers/char/tpm/tpm2-cmd.c
-+++ b/drivers/char/tpm/tpm2-cmd.c
-@@ -20,7 +20,11 @@
- #include <keys/trusted-type.h>
- 
- enum tpm2_object_attributes {
--	TPM2_ATTR_USER_WITH_AUTH	= BIT(6),
-+	TPM2_OA_USER_WITH_AUTH		= BIT(6),
-+};
-+
-+enum tpm2_session_attributes {
-+	TPM2_SA_CONTINUE_SESSION	= BIT(0),
- };
- 
- struct tpm2_startup_in {
-@@ -478,22 +482,18 @@ int tpm2_seal_trusted(struct tpm_chip *chip,
- 	tpm_buf_append_u8(&buf, payload->migratable);
- 
- 	/* public */
--	if (options->policydigest)
--		tpm_buf_append_u16(&buf, 14 + options->digest_len);
--	else
--		tpm_buf_append_u16(&buf, 14);
--
-+	tpm_buf_append_u16(&buf, 14 + options->policydigest_len);
- 	tpm_buf_append_u16(&buf, TPM2_ALG_KEYEDHASH);
- 	tpm_buf_append_u16(&buf, hash);
- 
- 	/* policy */
--	if (options->policydigest) {
-+	if (options->policydigest_len) {
- 		tpm_buf_append_u32(&buf, 0);
--		tpm_buf_append_u16(&buf, options->digest_len);
-+		tpm_buf_append_u16(&buf, options->policydigest_len);
- 		tpm_buf_append(&buf, options->policydigest,
--			       options->digest_len);
-+			       options->policydigest_len);
- 	} else {
--		tpm_buf_append_u32(&buf, TPM2_ATTR_USER_WITH_AUTH);
-+		tpm_buf_append_u32(&buf, TPM2_OA_USER_WITH_AUTH);
- 		tpm_buf_append_u16(&buf, 0);
- 	}
- 
-@@ -631,7 +631,7 @@ static int tpm2_unseal(struct tpm_chip *chip,
- 			     options->policyhandle ?
- 			     options->policyhandle : TPM2_RS_PW,
- 			     NULL /* nonce */, 0,
--			     0 /* session_attributes */,
-+			     TPM2_SA_CONTINUE_SESSION,
- 			     options->blobauth /* hmac */,
- 			     TPM_DIGEST_SIZE);
- 
-diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c
-index 8342cf51ffdc..26bab5a2959f 100644
---- a/drivers/char/tpm/tpm_crb.c
-+++ b/drivers/char/tpm/tpm_crb.c
-@@ -302,11 +302,11 @@ static int crb_acpi_remove(struct acpi_device *device)
- 	struct device *dev = &device->dev;
- 	struct tpm_chip *chip = dev_get_drvdata(dev);
- 
--	tpm_chip_unregister(chip);
--
- 	if (chip->flags & TPM_CHIP_FLAG_TPM2)
- 		tpm2_shutdown(chip, TPM2_SU_CLEAR);
- 
-+	tpm_chip_unregister(chip);
-+
- 	return 0;
- }
- 
-diff --git a/drivers/char/tpm/tpm_eventlog.c b/drivers/char/tpm/tpm_eventlog.c
-index bd72fb04225e..4e6940acf639 100644
---- a/drivers/char/tpm/tpm_eventlog.c
-+++ b/drivers/char/tpm/tpm_eventlog.c
-@@ -232,7 +232,7 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
- {
- 	struct tcpa_event *event = v;
- 	struct tcpa_event temp_event;
--	char *tempPtr;
-+	char *temp_ptr;
- 	int i;
- 
- 	memcpy(&temp_event, event, sizeof(struct tcpa_event));
-@@ -242,10 +242,16 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
- 	temp_event.event_type = do_endian_conversion(event->event_type);
- 	temp_event.event_size = do_endian_conversion(event->event_size);
- 
--	tempPtr = (char *)&temp_event;
-+	temp_ptr = (char *) &temp_event;
- 
--	for (i = 0; i < sizeof(struct tcpa_event) + temp_event.event_size; i++)
--		seq_putc(m, tempPtr[i]);
-+	for (i = 0; i < (sizeof(struct tcpa_event) - 1) ; i++)
-+		seq_putc(m, temp_ptr[i]);
-+
-+	temp_ptr = (char *) v;
-+
-+	for (i = (sizeof(struct tcpa_event) - 1);
-+	     i < (sizeof(struct tcpa_event) + temp_event.event_size); i++)
-+		seq_putc(m, temp_ptr[i]);
- 
- 	return 0;
- 
-diff --git a/drivers/clk/bcm/clk-bcm2835-aux.c b/drivers/clk/bcm/clk-bcm2835-aux.c
-index e4f89e28b5ec..3a177ade6e6c 100644
---- a/drivers/clk/bcm/clk-bcm2835-aux.c
-+++ b/drivers/clk/bcm/clk-bcm2835-aux.c
-@@ -38,8 +38,8 @@ static int bcm2835_aux_clk_probe(struct platform_device *pdev)
- 
- 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- 	reg = devm_ioremap_resource(dev, res);
--	if (!reg)
--		return -ENODEV;
-+	if (IS_ERR(reg))
-+		return PTR_ERR(reg);
- 
- 	onecell = devm_kmalloc(dev, sizeof(*onecell), GFP_KERNEL);
- 	if (!onecell)
-diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c
-index 015e687ffabe..dd2856b5633c 100644
---- a/drivers/clk/bcm/clk-bcm2835.c
-+++ b/drivers/clk/bcm/clk-bcm2835.c
-@@ -1078,10 +1078,12 @@ static void bcm2835_pll_divider_off(struct clk_hw *hw)
- 	struct bcm2835_cprman *cprman = divider->cprman;
- 	const struct bcm2835_pll_divider_data *data = divider->data;
- 
-+	spin_lock(&cprman->regs_lock);
- 	cprman_write(cprman, data->cm_reg,
- 		     (cprman_read(cprman, data->cm_reg) &
- 		      ~data->load_mask) | data->hold_mask);
- 	cprman_write(cprman, data->a2w_reg, A2W_PLL_CHANNEL_DISABLE);
-+	spin_unlock(&cprman->regs_lock);
- }
- 
- static int bcm2835_pll_divider_on(struct clk_hw *hw)
-@@ -1090,12 +1092,14 @@ static int bcm2835_pll_divider_on(struct clk_hw *hw)
- 	struct bcm2835_cprman *cprman = divider->cprman;
- 	const struct bcm2835_pll_divider_data *data = divider->data;
- 
-+	spin_lock(&cprman->regs_lock);
- 	cprman_write(cprman, data->a2w_reg,
- 		     cprman_read(cprman, data->a2w_reg) &
- 		     ~A2W_PLL_CHANNEL_DISABLE);
- 
- 	cprman_write(cprman, data->cm_reg,
- 		     cprman_read(cprman, data->cm_reg) & ~data->hold_mask);
-+	spin_unlock(&cprman->regs_lock);
- 
- 	return 0;
- }
-@@ -1107,13 +1111,15 @@ static int bcm2835_pll_divider_set_rate(struct clk_hw *hw,
- 	struct bcm2835_pll_divider *divider = bcm2835_pll_divider_from_hw(hw);
- 	struct bcm2835_cprman *cprman = divider->cprman;
- 	const struct bcm2835_pll_divider_data *data = divider->data;
--	u32 cm;
--	int ret;
-+	u32 cm, div, max_div = 1 << A2W_PLL_DIV_BITS;
- 
--	ret = clk_divider_ops.set_rate(hw, rate, parent_rate);
--	if (ret)
--		return ret;
-+	div = DIV_ROUND_UP_ULL(parent_rate, rate);
-+
-+	div = min(div, max_div);
-+	if (div == max_div)
-+		div = 0;
- 
-+	cprman_write(cprman, data->a2w_reg, div);
- 	cm = cprman_read(cprman, data->cm_reg);
- 	cprman_write(cprman, data->cm_reg, cm | data->load_mask);
- 	cprman_write(cprman, data->cm_reg, cm & ~data->load_mask);
-diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c
-index ded3ff4b91b9..aa1dacdaa39d 100644
---- a/drivers/clk/clk-divider.c
-+++ b/drivers/clk/clk-divider.c
-@@ -423,6 +423,12 @@ const struct clk_ops clk_divider_ops = {
- };
- EXPORT_SYMBOL_GPL(clk_divider_ops);
- 
-+const struct clk_ops clk_divider_ro_ops = {
-+	.recalc_rate = clk_divider_recalc_rate,
-+	.round_rate = clk_divider_round_rate,
-+};
-+EXPORT_SYMBOL_GPL(clk_divider_ro_ops);
-+
- static struct clk *_register_divider(struct device *dev, const char *name,
- 		const char *parent_name, unsigned long flags,
- 		void __iomem *reg, u8 shift, u8 width,
-@@ -446,7 +452,10 @@ static struct clk *_register_divider(struct device *dev, const char *name,
- 		return ERR_PTR(-ENOMEM);
- 
- 	init.name = name;
--	init.ops = &clk_divider_ops;
-+	if (clk_divider_flags & CLK_DIVIDER_READ_ONLY)
-+		init.ops = &clk_divider_ro_ops;
-+	else
-+		init.ops = &clk_divider_ops;
- 	init.flags = flags | CLK_IS_BASIC;
- 	init.parent_names = (parent_name ? &parent_name: NULL);
- 	init.num_parents = (parent_name ? 1 : 0);
-diff --git a/drivers/clk/clk-xgene.c b/drivers/clk/clk-xgene.c
-index 10224b01b97c..b134a8b15e2c 100644
---- a/drivers/clk/clk-xgene.c
-+++ b/drivers/clk/clk-xgene.c
-@@ -351,8 +351,8 @@ static int xgene_clk_set_rate(struct clk_hw *hw, unsigned long rate,
- 		/* Set new divider */
- 		data = xgene_clk_read(pclk->param.divider_reg +
- 				pclk->param.reg_divider_offset);
--		data &= ~((1 << pclk->param.reg_divider_width) - 1)
--				<< pclk->param.reg_divider_shift;
-+		data &= ~(((1 << pclk->param.reg_divider_width) - 1)
-+				<< pclk->param.reg_divider_shift);
- 		data |= divider;
- 		xgene_clk_write(data, pclk->param.divider_reg +
- 					pclk->param.reg_divider_offset);
-diff --git a/drivers/clk/meson/clkc.c b/drivers/clk/meson/clkc.c
-index c83ae1367abc..d920d410b51d 100644
---- a/drivers/clk/meson/clkc.c
-+++ b/drivers/clk/meson/clkc.c
-@@ -198,7 +198,7 @@ meson_clk_register_fixed_rate(const struct clk_conf *clk_conf,
- }
- 
- void __init meson_clk_register_clks(const struct clk_conf *clk_confs,
--				    size_t nr_confs,
-+				    unsigned int nr_confs,
- 				    void __iomem *clk_base)
- {
- 	unsigned int i;
-diff --git a/drivers/clk/nxp/clk-lpc18xx-ccu.c b/drivers/clk/nxp/clk-lpc18xx-ccu.c
-index 13aabbb3acbe..558da89555af 100644
---- a/drivers/clk/nxp/clk-lpc18xx-ccu.c
-+++ b/drivers/clk/nxp/clk-lpc18xx-ccu.c
-@@ -222,7 +222,7 @@ static void lpc18xx_ccu_register_branch_gate_div(struct lpc18xx_clk_branch *bran
- 		div->width = 1;
- 
- 		div_hw = &div->hw;
--		div_ops = &clk_divider_ops;
-+		div_ops = &clk_divider_ro_ops;
- 	}
- 
- 	branch->gate.reg = branch->offset + reg_base;
-diff --git a/drivers/clk/qcom/gcc-msm8916.c b/drivers/clk/qcom/gcc-msm8916.c
-index 8cc9b2868b41..5f56d6aae31d 100644
---- a/drivers/clk/qcom/gcc-msm8916.c
-+++ b/drivers/clk/qcom/gcc-msm8916.c
-@@ -2346,6 +2346,7 @@ static struct clk_branch gcc_crypto_ahb_clk = {
- 				"pcnoc_bfdcd_clk_src",
- 			},
- 			.num_parents = 1,
-+			.flags = CLK_SET_RATE_PARENT,
- 			.ops = &clk_branch2_ops,
- 		},
- 	},
-@@ -2381,6 +2382,7 @@ static struct clk_branch gcc_crypto_clk = {
- 				"crypto_clk_src",
- 			},
- 			.num_parents = 1,
-+			.flags = CLK_SET_RATE_PARENT,
- 			.ops = &clk_branch2_ops,
- 		},
- 	},
-diff --git a/drivers/clk/qcom/gcc-msm8960.c b/drivers/clk/qcom/gcc-msm8960.c
-index 983dd7dc89a7..0a0c1f533249 100644
---- a/drivers/clk/qcom/gcc-msm8960.c
-+++ b/drivers/clk/qcom/gcc-msm8960.c
-@@ -2753,7 +2753,7 @@ static struct clk_rcg ce3_src = {
- 	},
- 	.freq_tbl = clk_tbl_ce3,
- 	.clkr = {
--		.enable_reg = 0x2c08,
-+		.enable_reg = 0x36c0,
- 		.enable_mask = BIT(7),
- 		.hw.init = &(struct clk_init_data){
- 			.name = "ce3_src",
-@@ -2769,7 +2769,7 @@ static struct clk_branch ce3_core_clk = {
- 	.halt_reg = 0x2fdc,
- 	.halt_bit = 5,
- 	.clkr = {
--		.enable_reg = 0x36c4,
-+		.enable_reg = 0x36cc,
- 		.enable_mask = BIT(4),
- 		.hw.init = &(struct clk_init_data){
- 			.name = "ce3_core_clk",
-diff --git a/drivers/clk/rockchip/clk-rk3188.c b/drivers/clk/rockchip/clk-rk3188.c
-index 7f7444cbf6fc..05263571c223 100644
---- a/drivers/clk/rockchip/clk-rk3188.c
-+++ b/drivers/clk/rockchip/clk-rk3188.c
-@@ -748,6 +748,7 @@ static const char *const rk3188_critical_clocks[] __initconst = {
- 	"hclk_peri",
- 	"pclk_cpu",
- 	"pclk_peri",
-+	"hclk_cpubus"
- };
- 
- static void __init rk3188_common_clk_init(struct device_node *np)
-diff --git a/drivers/clk/rockchip/clk-rk3228.c b/drivers/clk/rockchip/clk-rk3228.c
-index 981a50205339..97f49aab8d42 100644
---- a/drivers/clk/rockchip/clk-rk3228.c
-+++ b/drivers/clk/rockchip/clk-rk3228.c
-@@ -605,13 +605,13 @@ static struct rockchip_clk_branch rk3228_clk_branches[] __initdata = {
- 
- 	/* PD_MMC */
- 	MMC(SCLK_SDMMC_DRV,    "sdmmc_drv",    "sclk_sdmmc", RK3228_SDMMC_CON0, 1),
--	MMC(SCLK_SDMMC_SAMPLE, "sdmmc_sample", "sclk_sdmmc", RK3228_SDMMC_CON1, 1),
-+	MMC(SCLK_SDMMC_SAMPLE, "sdmmc_sample", "sclk_sdmmc", RK3228_SDMMC_CON1, 0),
- 
- 	MMC(SCLK_SDIO_DRV,     "sdio_drv",     "sclk_sdio",  RK3228_SDIO_CON0,  1),
--	MMC(SCLK_SDIO_SAMPLE,  "sdio_sample",  "sclk_sdio",  RK3228_SDIO_CON1,  1),
-+	MMC(SCLK_SDIO_SAMPLE,  "sdio_sample",  "sclk_sdio",  RK3228_SDIO_CON1,  0),
- 
- 	MMC(SCLK_EMMC_DRV,     "emmc_drv",     "sclk_emmc",  RK3228_EMMC_CON0,  1),
--	MMC(SCLK_EMMC_SAMPLE,  "emmc_sample",  "sclk_emmc",  RK3228_EMMC_CON1,  1),
-+	MMC(SCLK_EMMC_SAMPLE,  "emmc_sample",  "sclk_emmc",  RK3228_EMMC_CON1,  0),
- };
- 
- static const char *const rk3228_critical_clocks[] __initconst = {
-diff --git a/drivers/clk/rockchip/clk-rk3368.c b/drivers/clk/rockchip/clk-rk3368.c
-index 21f3ea909fab..57acb625c8ff 100644
---- a/drivers/clk/rockchip/clk-rk3368.c
-+++ b/drivers/clk/rockchip/clk-rk3368.c
-@@ -165,7 +165,7 @@ static const struct rockchip_cpuclk_reg_data rk3368_cpuclkb_data = {
- 	.core_reg = RK3368_CLKSEL_CON(0),
- 	.div_core_shift = 0,
- 	.div_core_mask = 0x1f,
--	.mux_core_shift = 15,
-+	.mux_core_shift = 7,
- };
+-		/* check whether changed bits are allowed to be modified */
+-		if (cm->mask & ~priv->ctrlmode_supported)
++		/* make sure static options are provided by configuration */
++		if ((maskedflags & ctrlstatic) != ctrlstatic)
+ 			return -EOPNOTSUPP;
  
- static const struct rockchip_cpuclk_reg_data rk3368_cpuclkl_data = {
-@@ -218,29 +218,29 @@ static const struct rockchip_cpuclk_reg_data rk3368_cpuclkl_data = {
- 	}
+ 		/* clear bits to be modified and copy the flag values */
+ 		priv->ctrlmode &= ~cm->mask;
+-		priv->ctrlmode |= (cm->flags & cm->mask);
++		priv->ctrlmode |= maskedflags;
  
- static struct rockchip_cpuclk_rate_table rk3368_cpuclkb_rates[] __initdata = {
--	RK3368_CPUCLKB_RATE(1512000000, 2, 6, 6),
--	RK3368_CPUCLKB_RATE(1488000000, 2, 5, 5),
--	RK3368_CPUCLKB_RATE(1416000000, 2, 5, 5),
--	RK3368_CPUCLKB_RATE(1200000000, 2, 4, 4),
--	RK3368_CPUCLKB_RATE(1008000000, 2, 4, 4),
--	RK3368_CPUCLKB_RATE( 816000000, 2, 3, 3),
--	RK3368_CPUCLKB_RATE( 696000000, 2, 3, 3),
--	RK3368_CPUCLKB_RATE( 600000000, 2, 2, 2),
--	RK3368_CPUCLKB_RATE( 408000000, 2, 2, 2),
--	RK3368_CPUCLKB_RATE( 312000000, 2, 2, 2),
-+	RK3368_CPUCLKB_RATE(1512000000, 1, 5, 5),
-+	RK3368_CPUCLKB_RATE(1488000000, 1, 4, 4),
-+	RK3368_CPUCLKB_RATE(1416000000, 1, 4, 4),
-+	RK3368_CPUCLKB_RATE(1200000000, 1, 3, 3),
-+	RK3368_CPUCLKB_RATE(1008000000, 1, 3, 3),
-+	RK3368_CPUCLKB_RATE( 816000000, 1, 2, 2),
-+	RK3368_CPUCLKB_RATE( 696000000, 1, 2, 2),
-+	RK3368_CPUCLKB_RATE( 600000000, 1, 1, 1),
-+	RK3368_CPUCLKB_RATE( 408000000, 1, 1, 1),
-+	RK3368_CPUCLKB_RATE( 312000000, 1, 1, 1),
- };
+ 		/* CAN_CTRLMODE_FD can only be set when driver supports FD */
+ 		if (priv->ctrlmode & CAN_CTRLMODE_FD)
+@@ -966,6 +1013,7 @@ static struct rtnl_link_ops can_link_ops __read_mostly = {
+ 	.maxtype	= IFLA_CAN_MAX,
+ 	.policy		= can_policy,
+ 	.setup		= can_setup,
++	.validate	= can_validate,
+ 	.newlink	= can_newlink,
+ 	.changelink	= can_changelink,
+ 	.get_size	= can_get_size,
+diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
+index 39cf911f7a1e..195f15edb32e 100644
+--- a/drivers/net/can/m_can/m_can.c
++++ b/drivers/net/can/m_can/m_can.c
+@@ -955,7 +955,7 @@ static struct net_device *alloc_m_can_dev(void)
+ 	priv->can.do_get_berr_counter = m_can_get_berr_counter;
  
- static struct rockchip_cpuclk_rate_table rk3368_cpuclkl_rates[] __initdata = {
--	RK3368_CPUCLKL_RATE(1512000000, 2, 7, 7),
--	RK3368_CPUCLKL_RATE(1488000000, 2, 6, 6),
--	RK3368_CPUCLKL_RATE(1416000000, 2, 6, 6),
--	RK3368_CPUCLKL_RATE(1200000000, 2, 5, 5),
--	RK3368_CPUCLKL_RATE(1008000000, 2, 5, 5),
--	RK3368_CPUCLKL_RATE( 816000000, 2, 4, 4),
--	RK3368_CPUCLKL_RATE( 696000000, 2, 3, 3),
--	RK3368_CPUCLKL_RATE( 600000000, 2, 3, 3),
--	RK3368_CPUCLKL_RATE( 408000000, 2, 2, 2),
--	RK3368_CPUCLKL_RATE( 312000000, 2, 2, 2),
-+	RK3368_CPUCLKL_RATE(1512000000, 1, 6, 6),
-+	RK3368_CPUCLKL_RATE(1488000000, 1, 5, 5),
-+	RK3368_CPUCLKL_RATE(1416000000, 1, 5, 5),
-+	RK3368_CPUCLKL_RATE(1200000000, 1, 4, 4),
-+	RK3368_CPUCLKL_RATE(1008000000, 1, 4, 4),
-+	RK3368_CPUCLKL_RATE( 816000000, 1, 3, 3),
-+	RK3368_CPUCLKL_RATE( 696000000, 1, 2, 2),
-+	RK3368_CPUCLKL_RATE( 600000000, 1, 2, 2),
-+	RK3368_CPUCLKL_RATE( 408000000, 1, 1, 1),
-+	RK3368_CPUCLKL_RATE( 312000000, 1, 1, 1),
- };
+ 	/* CAN_CTRLMODE_FD_NON_ISO is fixed with M_CAN IP v3.0.1 */
+-	priv->can.ctrlmode = CAN_CTRLMODE_FD_NON_ISO;
++	can_set_static_ctrlmode(dev, CAN_CTRLMODE_FD_NON_ISO);
  
- static struct rockchip_clk_branch rk3368_clk_branches[] __initdata = {
-@@ -384,10 +384,10 @@ static struct rockchip_clk_branch rk3368_clk_branches[] __initdata = {
- 	 * Clock-Architecture Diagram 3
+ 	/* CAN_CTRLMODE_FD_NON_ISO can not be changed with M_CAN IP v3.0.1 */
+ 	priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
+diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c
+index 0c5c22b84da8..7de5ab589e4e 100644
+--- a/drivers/net/usb/asix_common.c
++++ b/drivers/net/usb/asix_common.c
+@@ -66,7 +66,7 @@ int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb,
+ 	 * buffer.
  	 */
+ 	if (rx->remaining && (rx->remaining + sizeof(u32) <= skb->len)) {
+-		offset = ((rx->remaining + 1) & 0xfffe) + sizeof(u32);
++		offset = ((rx->remaining + 1) & 0xfffe);
+ 		rx->header = get_unaligned_le32(skb->data + offset);
+ 		offset = 0;
  
--	COMPOSITE(0, "aclk_vepu", mux_pll_src_cpll_gpll_usb_p, 0,
-+	COMPOSITE(0, "aclk_vepu", mux_pll_src_cpll_gpll_npll_usb_p, 0,
- 			RK3368_CLKSEL_CON(15), 6, 2, MFLAGS, 0, 5, DFLAGS,
- 			RK3368_CLKGATE_CON(4), 6, GFLAGS),
--	COMPOSITE(0, "aclk_vdpu", mux_pll_src_cpll_gpll_usb_p, 0,
-+	COMPOSITE(0, "aclk_vdpu", mux_pll_src_cpll_gpll_npll_usb_p, 0,
- 			RK3368_CLKSEL_CON(15), 14, 2, MFLAGS, 8, 5, DFLAGS,
- 			RK3368_CLKGATE_CON(4), 7, GFLAGS),
- 
-@@ -442,7 +442,7 @@ static struct rockchip_clk_branch rk3368_clk_branches[] __initdata = {
- 	GATE(SCLK_HDMI_HDCP, "sclk_hdmi_hdcp", "xin24m", 0,
- 			RK3368_CLKGATE_CON(4), 13, GFLAGS),
- 	GATE(SCLK_HDMI_CEC, "sclk_hdmi_cec", "xin32k", 0,
--			RK3368_CLKGATE_CON(5), 12, GFLAGS),
-+			RK3368_CLKGATE_CON(4), 12, GFLAGS),
- 
- 	COMPOSITE_NODIV(0, "vip_src", mux_pll_src_cpll_gpll_p, 0,
- 			RK3368_CLKSEL_CON(21), 15, 1, MFLAGS,
-diff --git a/drivers/clk/rockchip/clk.c b/drivers/clk/rockchip/clk.c
-index d9a0b5d4d47f..226af5720c9e 100644
---- a/drivers/clk/rockchip/clk.c
-+++ b/drivers/clk/rockchip/clk.c
-@@ -70,7 +70,7 @@ static struct clk *rockchip_clk_register_branch(const char *name,
- 	if (gate_offset >= 0) {
- 		gate = kzalloc(sizeof(*gate), GFP_KERNEL);
- 		if (!gate)
--			return ERR_PTR(-ENOMEM);
-+			goto err_gate;
- 
- 		gate->flags = gate_flags;
- 		gate->reg = base + gate_offset;
-@@ -82,7 +82,7 @@ static struct clk *rockchip_clk_register_branch(const char *name,
- 	if (div_width > 0) {
- 		div = kzalloc(sizeof(*div), GFP_KERNEL);
- 		if (!div)
--			return ERR_PTR(-ENOMEM);
-+			goto err_div;
- 
- 		div->flags = div_flags;
- 		div->reg = base + muxdiv_offset;
-@@ -90,7 +90,9 @@ static struct clk *rockchip_clk_register_branch(const char *name,
- 		div->width = div_width;
- 		div->lock = lock;
- 		div->table = div_table;
--		div_ops = &clk_divider_ops;
-+		div_ops = (div_flags & CLK_DIVIDER_READ_ONLY)
-+						? &clk_divider_ro_ops
-+						: &clk_divider_ops;
- 	}
- 
- 	clk = clk_register_composite(NULL, name, parent_names, num_parents,
-@@ -100,6 +102,11 @@ static struct clk *rockchip_clk_register_branch(const char *name,
- 				     flags);
- 
- 	return clk;
-+err_div:
-+	kfree(gate);
-+err_gate:
-+	kfree(mux);
-+	return ERR_PTR(-ENOMEM);
- }
- 
- struct rockchip_clk_frac {
-diff --git a/drivers/clk/sunxi/clk-sun8i-apb0.c b/drivers/clk/sunxi/clk-sun8i-apb0.c
-index 7ba61103a6f5..2ea61debffc1 100644
---- a/drivers/clk/sunxi/clk-sun8i-apb0.c
-+++ b/drivers/clk/sunxi/clk-sun8i-apb0.c
-@@ -36,7 +36,7 @@ static struct clk *sun8i_a23_apb0_register(struct device_node *node,
- 
- 	/* The A23 APB0 clock is a standard 2 bit wide divider clock */
- 	clk = clk_register_divider(NULL, clk_name, clk_parent, 0, reg,
--				   0, 2, CLK_DIVIDER_POWER_OF_TWO, NULL);
-+				   0, 2, 0, NULL);
- 	if (IS_ERR(clk))
- 		return clk;
- 
-diff --git a/drivers/clk/versatile/clk-sp810.c b/drivers/clk/versatile/clk-sp810.c
-index e78755e0ef78..1fe1e8d970cf 100644
---- a/drivers/clk/versatile/clk-sp810.c
-+++ b/drivers/clk/versatile/clk-sp810.c
-@@ -92,6 +92,7 @@ static void __init clk_sp810_of_setup(struct device_node *node)
- 	int num = ARRAY_SIZE(parent_names);
- 	char name[12];
- 	struct clk_init_data init;
-+	static int instance;
- 	int i;
- 	bool deprecated;
- 
-@@ -117,7 +118,7 @@ static void __init clk_sp810_of_setup(struct device_node *node)
- 	deprecated = !of_find_property(node, "assigned-clock-parents", NULL);
- 
- 	for (i = 0; i < ARRAY_SIZE(sp810->timerclken); i++) {
--		snprintf(name, ARRAY_SIZE(name), "timerclken%d", i);
-+		snprintf(name, sizeof(name), "sp810_%d_%d", instance, i);
- 
- 		sp810->timerclken[i].sp810 = sp810;
- 		sp810->timerclken[i].channel = i;
-@@ -138,5 +139,6 @@ static void __init clk_sp810_of_setup(struct device_node *node)
- 	}
- 
- 	of_clk_add_provider(node, clk_sp810_timerclken_of_get, sp810);
-+	instance++;
- }
- CLK_OF_DECLARE(sp810, "arm,sp810", clk_sp810_of_setup);
-diff --git a/drivers/clocksource/tango_xtal.c b/drivers/clocksource/tango_xtal.c
-index 2bcecafdeaea..c407c47a3232 100644
---- a/drivers/clocksource/tango_xtal.c
-+++ b/drivers/clocksource/tango_xtal.c
-@@ -42,7 +42,7 @@ static void __init tango_clocksource_init(struct device_node *np)
- 
- 	ret = clocksource_mmio_init(xtal_in_cnt, "tango-xtal", xtal_freq, 350,
- 				    32, clocksource_mmio_readl_up);
--	if (!ret) {
-+	if (ret) {
- 		pr_err("%s: registration failed\n", np->full_name);
- 		return;
- 	}
-diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
-index cd83d477e32d..e89512383c3c 100644
---- a/drivers/cpufreq/intel_pstate.c
-+++ b/drivers/cpufreq/intel_pstate.c
-@@ -673,6 +673,11 @@ static int core_get_max_pstate(void)
- 			if (err)
- 				goto skip_tar;
- 
-+			/* For level 1 and 2, bits[23:16] contain the ratio */
-+			if (tdp_ctrl)
-+				tdp_ratio >>= 16;
-+
-+			tdp_ratio &= 0xff; /* ratios are only 8 bits long */
- 			if (tdp_ratio - 1 == tar) {
- 				max_pstate = tar;
- 				pr_debug("max_pstate=TAC %x\n", max_pstate);
-diff --git a/drivers/cpufreq/sti-cpufreq.c b/drivers/cpufreq/sti-cpufreq.c
-index a9c659f58974..04042038ec4b 100644
---- a/drivers/cpufreq/sti-cpufreq.c
-+++ b/drivers/cpufreq/sti-cpufreq.c
-@@ -259,6 +259,10 @@ static int sti_cpufreq_init(void)
- {
- 	int ret;
- 
-+	if ((!of_machine_is_compatible("st,stih407")) &&
-+		(!of_machine_is_compatible("st,stih410")))
+diff --git a/drivers/nfc/st21nfca/i2c.c b/drivers/nfc/st21nfca/i2c.c
+index 1f44a151d206..d5a099b022e4 100644
+--- a/drivers/nfc/st21nfca/i2c.c
++++ b/drivers/nfc/st21nfca/i2c.c
+@@ -524,8 +524,10 @@ static int st21nfca_hci_i2c_acpi_request_resources(struct i2c_client *client)
+ 	/* Get EN GPIO from ACPI */
+ 	gpiod_ena = devm_gpiod_get_index(dev, ST21NFCA_GPIO_NAME_EN, 1,
+ 					 GPIOD_OUT_LOW);
+-	if (!IS_ERR(gpiod_ena))
+-		phy->gpio_ena = desc_to_gpio(gpiod_ena);
++	if (!IS_ERR(gpiod_ena)) {
++		nfc_err(dev, "Unable to get ENABLE GPIO\n");
 +		return -ENODEV;
-+
- 	ddata.cpu = get_cpu_device(0);
- 	if (!ddata.cpu) {
- 		dev_err(ddata.cpu, "Failed to get device for CPU0\n");
-diff --git a/drivers/cpuidle/cpuidle-arm.c b/drivers/cpuidle/cpuidle-arm.c
-index 545069d5fdfb..e342565e8715 100644
---- a/drivers/cpuidle/cpuidle-arm.c
-+++ b/drivers/cpuidle/cpuidle-arm.c
-@@ -50,7 +50,7 @@ static int arm_enter_idle_state(struct cpuidle_device *dev,
- 		 * call the CPU ops suspend protocol with idle index as a
- 		 * parameter.
- 		 */
--		arm_cpuidle_suspend(idx);
-+		ret = arm_cpuidle_suspend(idx);
- 
- 		cpu_pm_exit();
- 	}
-diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c
-index 3eb3f1279fb7..7de007abe46e 100644
---- a/drivers/crypto/atmel-aes.c
-+++ b/drivers/crypto/atmel-aes.c
-@@ -2085,9 +2085,9 @@ static int atmel_aes_probe(struct platform_device *pdev)
- 	}
- 
- 	aes_dd->io_base = devm_ioremap_resource(&pdev->dev, aes_res);
--	if (!aes_dd->io_base) {
-+	if (IS_ERR(aes_dd->io_base)) {
- 		dev_err(dev, "can't ioremap\n");
--		err = -ENOMEM;
-+		err = PTR_ERR(aes_dd->io_base);
- 		goto res_err;
- 	}
- 
-diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c
-index 8bf9914d4d15..68d47a2da4a1 100644
---- a/drivers/crypto/atmel-sha.c
-+++ b/drivers/crypto/atmel-sha.c
-@@ -1404,9 +1404,9 @@ static int atmel_sha_probe(struct platform_device *pdev)
- 	}
- 
- 	sha_dd->io_base = devm_ioremap_resource(&pdev->dev, sha_res);
--	if (!sha_dd->io_base) {
-+	if (IS_ERR(sha_dd->io_base)) {
- 		dev_err(dev, "can't ioremap\n");
--		err = -ENOMEM;
-+		err = PTR_ERR(sha_dd->io_base);
- 		goto res_err;
- 	}
++	}
  
-diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c
-index 2c7a628d0375..bf467d7be35c 100644
---- a/drivers/crypto/atmel-tdes.c
-+++ b/drivers/crypto/atmel-tdes.c
-@@ -1417,9 +1417,9 @@ static int atmel_tdes_probe(struct platform_device *pdev)
- 	}
+ 	phy->gpio_ena = desc_to_gpio(gpiod_ena);
  
- 	tdes_dd->io_base = devm_ioremap_resource(&pdev->dev, tdes_res);
--	if (!tdes_dd->io_base) {
-+	if (IS_ERR(tdes_dd->io_base)) {
- 		dev_err(dev, "can't ioremap\n");
--		err = -ENOMEM;
-+		err = PTR_ERR(tdes_dd->io_base);
- 		goto res_err;
- 	}
+diff --git a/drivers/platform/x86/dell-rbtn.c b/drivers/platform/x86/dell-rbtn.c
+index cd410e392550..d33e9ad3218f 100644
+--- a/drivers/platform/x86/dell-rbtn.c
++++ b/drivers/platform/x86/dell-rbtn.c
+@@ -28,6 +28,7 @@ struct rbtn_data {
+ 	enum rbtn_type type;
+ 	struct rfkill *rfkill;
+ 	struct input_dev *input_dev;
++	bool suspended;
+ };
  
-diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
-index f7e0d8d4c3da..8f50a02ff68d 100644
---- a/drivers/crypto/caam/jr.c
-+++ b/drivers/crypto/caam/jr.c
-@@ -248,7 +248,7 @@ static void caam_jr_dequeue(unsigned long devarg)
- struct device *caam_jr_alloc(void)
- {
- 	struct caam_drv_private_jr *jrpriv, *min_jrpriv = NULL;
--	struct device *dev = NULL;
-+	struct device *dev = ERR_PTR(-ENODEV);
- 	int min_tfm_cnt	= INT_MAX;
- 	int tfm_cnt;
  
-diff --git a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
-index d89f20c04266..60fc0fa26fd3 100644
---- a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
-+++ b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
-@@ -220,6 +220,42 @@ static int ccp_aes_cmac_digest(struct ahash_request *req)
- 	return ccp_aes_cmac_finup(req);
- }
+@@ -220,9 +221,55 @@ static const struct acpi_device_id rbtn_ids[] = {
+ 	{ "", 0 },
+ };
  
-+static int ccp_aes_cmac_export(struct ahash_request *req, void *out)
++#ifdef CONFIG_PM_SLEEP
++static void ACPI_SYSTEM_XFACE rbtn_clear_suspended_flag(void *context)
 +{
-+	struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req);
-+	struct ccp_aes_cmac_exp_ctx state;
-+
-+	/* Don't let anything leak to 'out' */
-+	memset(&state, 0, sizeof(state));
-+
-+	state.null_msg = rctx->null_msg;
-+	memcpy(state.iv, rctx->iv, sizeof(state.iv));
-+	state.buf_count = rctx->buf_count;
-+	memcpy(state.buf, rctx->buf, sizeof(state.buf));
-+
-+	/* 'out' may not be aligned so memcpy from local variable */
-+	memcpy(out, &state, sizeof(state));
++	struct rbtn_data *rbtn_data = context;
 +
-+	return 0;
++	rbtn_data->suspended = false;
 +}
 +
-+static int ccp_aes_cmac_import(struct ahash_request *req, const void *in)
++static int rbtn_suspend(struct device *dev)
 +{
-+	struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req);
-+	struct ccp_aes_cmac_exp_ctx state;
-+
-+	/* 'in' may not be aligned so memcpy to local variable */
-+	memcpy(&state, in, sizeof(state));
++	struct acpi_device *device = to_acpi_device(dev);
++	struct rbtn_data *rbtn_data = acpi_driver_data(device);
 +
-+	memset(rctx, 0, sizeof(*rctx));
-+	rctx->null_msg = state.null_msg;
-+	memcpy(rctx->iv, state.iv, sizeof(rctx->iv));
-+	rctx->buf_count = state.buf_count;
-+	memcpy(rctx->buf, state.buf, sizeof(rctx->buf));
++	rbtn_data->suspended = true;
 +
 +	return 0;
 +}
 +
- static int ccp_aes_cmac_setkey(struct crypto_ahash *tfm, const u8 *key,
- 			       unsigned int key_len)
- {
-@@ -352,10 +388,13 @@ int ccp_register_aes_cmac_algs(struct list_head *head)
- 	alg->final = ccp_aes_cmac_final;
- 	alg->finup = ccp_aes_cmac_finup;
- 	alg->digest = ccp_aes_cmac_digest;
-+	alg->export = ccp_aes_cmac_export;
-+	alg->import = ccp_aes_cmac_import;
- 	alg->setkey = ccp_aes_cmac_setkey;
- 
- 	halg = &alg->halg;
- 	halg->digestsize = AES_BLOCK_SIZE;
-+	halg->statesize = sizeof(struct ccp_aes_cmac_exp_ctx);
- 
- 	base = &halg->base;
- 	snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "cmac(aes)");
-diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c b/drivers/crypto/ccp/ccp-crypto-sha.c
-index d14b3f28e010..ab9945f2cb7a 100644
---- a/drivers/crypto/ccp/ccp-crypto-sha.c
-+++ b/drivers/crypto/ccp/ccp-crypto-sha.c
-@@ -207,6 +207,46 @@ static int ccp_sha_digest(struct ahash_request *req)
- 	return ccp_sha_finup(req);
- }
- 
-+static int ccp_sha_export(struct ahash_request *req, void *out)
++static int rbtn_resume(struct device *dev)
 +{
-+	struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req);
-+	struct ccp_sha_exp_ctx state;
-+
-+	/* Don't let anything leak to 'out' */
-+	memset(&state, 0, sizeof(state));
-+
-+	state.type = rctx->type;
-+	state.msg_bits = rctx->msg_bits;
-+	state.first = rctx->first;
-+	memcpy(state.ctx, rctx->ctx, sizeof(state.ctx));
-+	state.buf_count = rctx->buf_count;
-+	memcpy(state.buf, rctx->buf, sizeof(state.buf));
-+
-+	/* 'out' may not be aligned so memcpy from local variable */
-+	memcpy(out, &state, sizeof(state));
++	struct acpi_device *device = to_acpi_device(dev);
++	struct rbtn_data *rbtn_data = acpi_driver_data(device);
++	acpi_status status;
 +
-+	return 0;
-+}
-+
-+static int ccp_sha_import(struct ahash_request *req, const void *in)
-+{
-+	struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req);
-+	struct ccp_sha_exp_ctx state;
-+
-+	/* 'in' may not be aligned so memcpy to local variable */
-+	memcpy(&state, in, sizeof(state));
-+
-+	memset(rctx, 0, sizeof(*rctx));
-+	rctx->type = state.type;
-+	rctx->msg_bits = state.msg_bits;
-+	rctx->first = state.first;
-+	memcpy(rctx->ctx, state.ctx, sizeof(rctx->ctx));
-+	rctx->buf_count = state.buf_count;
-+	memcpy(rctx->buf, state.buf, sizeof(rctx->buf));
++	/*
++	 * Upon resume, some BIOSes send an ACPI notification thet triggers
++	 * an unwanted input event. In order to ignore it, we use a flag
++	 * that we set at suspend and clear once we have received the extra
++	 * ACPI notification. Since ACPI notifications are delivered
++	 * asynchronously to drivers, we clear the flag from the workqueue
++	 * used to deliver the notifications. This should be enough
++	 * to have the flag cleared only after we received the extra
++	 * notification, if any.
++	 */
++	status = acpi_os_execute(OSL_NOTIFY_HANDLER,
++			 rbtn_clear_suspended_flag, rbtn_data);
++	if (ACPI_FAILURE(status))
++		rbtn_clear_suspended_flag(rbtn_data);
 +
 +	return 0;
 +}
++#endif
 +
- static int ccp_sha_setkey(struct crypto_ahash *tfm, const u8 *key,
- 			  unsigned int key_len)
- {
-@@ -403,9 +443,12 @@ static int ccp_register_sha_alg(struct list_head *head,
- 	alg->final = ccp_sha_final;
- 	alg->finup = ccp_sha_finup;
- 	alg->digest = ccp_sha_digest;
-+	alg->export = ccp_sha_export;
-+	alg->import = ccp_sha_import;
- 
- 	halg = &alg->halg;
- 	halg->digestsize = def->digest_size;
-+	halg->statesize = sizeof(struct ccp_sha_exp_ctx);
- 
- 	base = &halg->base;
- 	snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
-diff --git a/drivers/crypto/ccp/ccp-crypto.h b/drivers/crypto/ccp/ccp-crypto.h
-index 76a96f0f44c6..a326ec20bfa8 100644
---- a/drivers/crypto/ccp/ccp-crypto.h
-+++ b/drivers/crypto/ccp/ccp-crypto.h
-@@ -129,6 +129,15 @@ struct ccp_aes_cmac_req_ctx {
- 	struct ccp_cmd cmd;
- };
- 
-+struct ccp_aes_cmac_exp_ctx {
-+	unsigned int null_msg;
-+
-+	u8 iv[AES_BLOCK_SIZE];
-+
-+	unsigned int buf_count;
-+	u8 buf[AES_BLOCK_SIZE];
-+};
++static SIMPLE_DEV_PM_OPS(rbtn_pm_ops, rbtn_suspend, rbtn_resume);
 +
- /***** SHA related defines *****/
- #define MAX_SHA_CONTEXT_SIZE	SHA256_DIGEST_SIZE
- #define MAX_SHA_BLOCK_SIZE	SHA256_BLOCK_SIZE
-@@ -171,6 +180,19 @@ struct ccp_sha_req_ctx {
- 	struct ccp_cmd cmd;
- };
+ static struct acpi_driver rbtn_driver = {
+ 	.name = "dell-rbtn",
+ 	.ids = rbtn_ids,
++	.drv.pm = &rbtn_pm_ops,
+ 	.ops = {
+ 		.add = rbtn_add,
+ 		.remove = rbtn_remove,
+@@ -384,6 +431,15 @@ static void rbtn_notify(struct acpi_device *device, u32 event)
+ {
+ 	struct rbtn_data *rbtn_data = device->driver_data;
  
-+struct ccp_sha_exp_ctx {
-+	enum ccp_sha_type type;
-+
-+	u64 msg_bits;
-+
-+	unsigned int first;
-+
-+	u8 ctx[MAX_SHA_CONTEXT_SIZE];
-+
-+	unsigned int buf_count;
-+	u8 buf[MAX_SHA_BLOCK_SIZE];
-+};
++	/*
++	 * Some BIOSes send a notification at resume.
++	 * Ignore it to prevent unwanted input events.
++	 */
++	if (rbtn_data->suspended) {
++		dev_dbg(&device->dev, "ACPI notification ignored\n");
++		return;
++	}
 +
- /***** Common Context Structure *****/
- struct ccp_ctx {
- 	int (*complete)(struct crypto_async_request *req, int ret);
-diff --git a/drivers/crypto/marvell/cesa.c b/drivers/crypto/marvell/cesa.c
-index c0656e7f37b5..80239ae69527 100644
---- a/drivers/crypto/marvell/cesa.c
-+++ b/drivers/crypto/marvell/cesa.c
-@@ -420,7 +420,7 @@ static int mv_cesa_probe(struct platform_device *pdev)
- 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
- 	cesa->regs = devm_ioremap_resource(dev, res);
- 	if (IS_ERR(cesa->regs))
--		return -ENOMEM;
-+		return PTR_ERR(cesa->regs);
+ 	if (event != 0x80) {
+ 		dev_info(&device->dev, "Received unknown event (0x%x)\n",
+ 			 event);
+diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
+index 6a820668d442..b7cc6027cb7b 100644
+--- a/drivers/scsi/scsi_scan.c
++++ b/drivers/scsi/scsi_scan.c
+@@ -315,6 +315,7 @@ static void scsi_target_destroy(struct scsi_target *starget)
+ 	struct Scsi_Host *shost = dev_to_shost(dev->parent);
+ 	unsigned long flags;
  
- 	ret = mv_cesa_dev_dma_init(cesa);
- 	if (ret)
-diff --git a/drivers/crypto/qat/qat_common/adf_common_drv.h b/drivers/crypto/qat/qat_common/adf_common_drv.h
-index 0e82ce3c383e..976b01e58afb 100644
---- a/drivers/crypto/qat/qat_common/adf_common_drv.h
-+++ b/drivers/crypto/qat/qat_common/adf_common_drv.h
-@@ -236,6 +236,8 @@ void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
- 				 uint32_t vf_mask);
- void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev);
- void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev);
-+int adf_init_pf_wq(void);
-+void adf_exit_pf_wq(void);
- #else
- static inline int adf_sriov_configure(struct pci_dev *pdev, int numvfs)
- {
-@@ -253,5 +255,14 @@ static inline void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
- static inline void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
++	BUG_ON(starget->state == STARGET_DEL);
+ 	starget->state = STARGET_DEL;
+ 	transport_destroy_device(dev);
+ 	spin_lock_irqsave(shost->host_lock, flags);
+diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
+index 00bc7218a7f8..9e5f893aa3ad 100644
+--- a/drivers/scsi/scsi_sysfs.c
++++ b/drivers/scsi/scsi_sysfs.c
+@@ -1272,18 +1272,18 @@ static void __scsi_remove_target(struct scsi_target *starget)
+ void scsi_remove_target(struct device *dev)
  {
- }
-+
-+static inline int adf_init_pf_wq(void)
-+{
-+	return 0;
-+}
-+
-+static inline void adf_exit_pf_wq(void)
-+{
-+}
- #endif
- #endif
-diff --git a/drivers/crypto/qat/qat_common/adf_ctl_drv.c b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
-index 5c897e6e7994..3c3f948290ca 100644
---- a/drivers/crypto/qat/qat_common/adf_ctl_drv.c
-+++ b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
-@@ -462,12 +462,17 @@ static int __init adf_register_ctl_device_driver(void)
- 	if (adf_init_aer())
- 		goto err_aer;
- 
-+	if (adf_init_pf_wq())
-+		goto err_pf_wq;
-+
- 	if (qat_crypto_register())
- 		goto err_crypto_register;
- 
- 	return 0;
+ 	struct Scsi_Host *shost = dev_to_shost(dev->parent);
+-	struct scsi_target *starget, *last_target = NULL;
++	struct scsi_target *starget;
+ 	unsigned long flags;
  
- err_crypto_register:
-+	adf_exit_pf_wq();
-+err_pf_wq:
- 	adf_exit_aer();
- err_aer:
- 	adf_chr_drv_destroy();
-@@ -480,6 +485,7 @@ static void __exit adf_unregister_ctl_device_driver(void)
- {
- 	adf_chr_drv_destroy();
- 	adf_exit_aer();
-+	adf_exit_pf_wq();
- 	qat_crypto_unregister();
- 	adf_clean_vf_map(false);
- 	mutex_destroy(&adf_ctl_lock);
-diff --git a/drivers/crypto/qat/qat_common/adf_sriov.c b/drivers/crypto/qat/qat_common/adf_sriov.c
-index 1117a8b58280..38a0415e767d 100644
---- a/drivers/crypto/qat/qat_common/adf_sriov.c
-+++ b/drivers/crypto/qat/qat_common/adf_sriov.c
-@@ -119,11 +119,6 @@ static int adf_enable_sriov(struct adf_accel_dev *accel_dev)
+ restart:
+ 	spin_lock_irqsave(shost->host_lock, flags);
+ 	list_for_each_entry(starget, &shost->__targets, siblings) {
+ 		if (starget->state == STARGET_DEL ||
+-		    starget == last_target)
++		    starget->state == STARGET_REMOVE)
+ 			continue;
+ 		if (starget->dev.parent == dev || &starget->dev == dev) {
+ 			kref_get(&starget->reap_ref);
+-			last_target = starget;
++			starget->state = STARGET_REMOVE;
+ 			spin_unlock_irqrestore(shost->host_lock, flags);
+ 			__scsi_remove_target(starget);
+ 			scsi_target_reap(starget);
+diff --git a/drivers/staging/comedi/drivers/das1800.c b/drivers/staging/comedi/drivers/das1800.c
+index 940781183fac..3be10963f98b 100644
+--- a/drivers/staging/comedi/drivers/das1800.c
++++ b/drivers/staging/comedi/drivers/das1800.c
+@@ -567,14 +567,17 @@ static int das1800_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
+ 	struct comedi_isadma_desc *desc;
  	int i;
- 	u32 reg;
  
--	/* Workqueue for PF2VF responses */
--	pf2vf_resp_wq = create_workqueue("qat_pf2vf_resp_wq");
--	if (!pf2vf_resp_wq)
--		return -ENOMEM;
--
- 	for (i = 0, vf_info = accel_dev->pf.vf_info; i < totalvfs;
- 	     i++, vf_info++) {
- 		/* This ptr will be populated when VFs will be created */
-@@ -216,11 +211,6 @@ void adf_disable_sriov(struct adf_accel_dev *accel_dev)
- 
- 	kfree(accel_dev->pf.vf_info);
- 	accel_dev->pf.vf_info = NULL;
+-	outb(0x0, dev->iobase + DAS1800_STATUS);	/* disable conversions */
+-	outb(0x0, dev->iobase + DAS1800_CONTROL_B);	/* disable interrupts and dma */
+-	outb(0x0, dev->iobase + DAS1800_CONTROL_A);	/* disable and clear fifo and stop triggering */
 -
--	if (pf2vf_resp_wq) {
--		destroy_workqueue(pf2vf_resp_wq);
--		pf2vf_resp_wq = NULL;
--	}
- }
- EXPORT_SYMBOL_GPL(adf_disable_sriov);
- 
-@@ -304,3 +294,19 @@ int adf_sriov_configure(struct pci_dev *pdev, int numvfs)
- 	return numvfs;
- }
- EXPORT_SYMBOL_GPL(adf_sriov_configure);
-+
-+int __init adf_init_pf_wq(void)
-+{
-+	/* Workqueue for PF2VF responses */
-+	pf2vf_resp_wq = create_workqueue("qat_pf2vf_resp_wq");
-+
-+	return !pf2vf_resp_wq ? -ENOMEM : 0;
-+}
+-	for (i = 0; i < 2; i++) {
+-		desc = &dma->desc[i];
+-		if (desc->chan)
+-			comedi_isadma_disable(desc->chan);
++	/* disable and stop conversions */
++	outb(0x0, dev->iobase + DAS1800_STATUS);
++	outb(0x0, dev->iobase + DAS1800_CONTROL_B);
++	outb(0x0, dev->iobase + DAS1800_CONTROL_A);
 +
-+void adf_exit_pf_wq(void)
-+{
-+	if (pf2vf_resp_wq) {
-+		destroy_workqueue(pf2vf_resp_wq);
-+		pf2vf_resp_wq = NULL;
-+	}
-+}
-diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c b/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
-index a19ee127edca..e72fea737a0d 100644
---- a/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
-+++ b/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
-@@ -35,6 +35,7 @@ static int sun4i_ss_opti_poll(struct ablkcipher_request *areq)
- 	unsigned int todo;
- 	struct sg_mapping_iter mi, mo;
- 	unsigned int oi, oo; /* offset for in and out */
-+	unsigned long flags;
- 
- 	if (areq->nbytes == 0)
- 		return 0;
-@@ -49,7 +50,7 @@ static int sun4i_ss_opti_poll(struct ablkcipher_request *areq)
- 		return -EINVAL;
++	if (dma) {
++		for (i = 0; i < 2; i++) {
++			desc = &dma->desc[i];
++			if (desc->chan)
++				comedi_isadma_disable(desc->chan);
++		}
  	}
  
--	spin_lock_bh(&ss->slock);
-+	spin_lock_irqsave(&ss->slock, flags);
- 
- 	for (i = 0; i < op->keylen; i += 4)
- 		writel(*(op->key + i / 4), ss->base + SS_KEY0 + i);
-@@ -117,7 +118,7 @@ release_ss:
- 	sg_miter_stop(&mi);
- 	sg_miter_stop(&mo);
- 	writel(0, ss->base + SS_CTL);
--	spin_unlock_bh(&ss->slock);
-+	spin_unlock_irqrestore(&ss->slock, flags);
- 	return err;
- }
- 
-@@ -149,6 +150,7 @@ static int sun4i_ss_cipher_poll(struct ablkcipher_request *areq)
- 	unsigned int ob = 0;	/* offset in buf */
- 	unsigned int obo = 0;	/* offset in bufo*/
- 	unsigned int obl = 0;	/* length of data in bufo */
-+	unsigned long flags;
+ 	return 0;
+@@ -934,13 +937,14 @@ static void das1800_ai_setup_dma(struct comedi_device *dev,
+ {
+ 	struct das1800_private *devpriv = dev->private;
+ 	struct comedi_isadma *dma = devpriv->dma;
+-	struct comedi_isadma_desc *desc = &dma->desc[0];
++	struct comedi_isadma_desc *desc;
+ 	unsigned int bytes;
  
- 	if (areq->nbytes == 0)
- 		return 0;
-@@ -181,7 +183,7 @@ static int sun4i_ss_cipher_poll(struct ablkcipher_request *areq)
- 	if (no_chunk == 1)
- 		return sun4i_ss_opti_poll(areq);
+ 	if ((devpriv->irq_dma_bits & DMA_ENABLED) == 0)
+ 		return;
  
--	spin_lock_bh(&ss->slock);
-+	spin_lock_irqsave(&ss->slock, flags);
+ 	dma->cur_dma = 0;
++	desc = &dma->desc[0];
  
- 	for (i = 0; i < op->keylen; i += 4)
- 		writel(*(op->key + i / 4), ss->base + SS_KEY0 + i);
-@@ -308,7 +310,7 @@ release_ss:
- 	sg_miter_stop(&mi);
- 	sg_miter_stop(&mo);
- 	writel(0, ss->base + SS_CTL);
--	spin_unlock_bh(&ss->slock);
-+	spin_unlock_irqrestore(&ss->slock, flags);
+ 	/* determine a dma transfer size to fill buffer in 0.3 sec */
+ 	bytes = das1800_ai_transfer_size(dev, s, desc->maxsize, 300000000);
+diff --git a/drivers/thunderbolt/eeprom.c b/drivers/thunderbolt/eeprom.c
+index 0dde34e3a7c5..545c60c826a1 100644
+--- a/drivers/thunderbolt/eeprom.c
++++ b/drivers/thunderbolt/eeprom.c
+@@ -444,6 +444,7 @@ int tb_drom_read(struct tb_switch *sw)
+ 	return tb_drom_parse_entries(sw);
+ err:
+ 	kfree(sw->drom);
++	sw->drom = NULL;
+ 	return -EIO;
  
- 	return err;
- }
-diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
-index a0d4a08313ae..b7ee8d30147d 100644
---- a/drivers/crypto/talitos.c
-+++ b/drivers/crypto/talitos.c
-@@ -63,6 +63,14 @@ static void to_talitos_ptr(struct talitos_ptr *ptr, dma_addr_t dma_addr,
- 		ptr->eptr = upper_32_bits(dma_addr);
  }
+diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
+index c3fe026d3168..9aff37186246 100644
+--- a/drivers/tty/n_gsm.c
++++ b/drivers/tty/n_gsm.c
+@@ -2045,7 +2045,9 @@ static void gsm_cleanup_mux(struct gsm_mux *gsm)
+ 		}
+ 	}
+ 	spin_unlock(&gsm_mux_lock);
+-	WARN_ON(i == MAX_MUX);
++	/* open failed before registering => nothing to do */
++	if (i == MAX_MUX)
++		return;
  
-+static void copy_talitos_ptr(struct talitos_ptr *dst_ptr,
-+			     struct talitos_ptr *src_ptr, bool is_sec1)
-+{
-+	dst_ptr->ptr = src_ptr->ptr;
-+	if (!is_sec1)
-+		dst_ptr->eptr = src_ptr->eptr;
-+}
-+
- static void to_talitos_ptr_len(struct talitos_ptr *ptr, unsigned int len,
- 			       bool is_sec1)
- {
-@@ -827,6 +835,16 @@ struct talitos_ahash_req_ctx {
- 	struct scatterlist *psrc;
- };
+ 	/* In theory disconnecting DLCI 0 is sufficient but for some
+ 	   modems this is apparently not the case. */
+diff --git a/drivers/tty/n_hdlc.c b/drivers/tty/n_hdlc.c
+index bbc4ce66c2c1..644ddb841d9f 100644
+--- a/drivers/tty/n_hdlc.c
++++ b/drivers/tty/n_hdlc.c
+@@ -600,7 +600,7 @@ static ssize_t n_hdlc_tty_read(struct tty_struct *tty, struct file *file,
+ 	add_wait_queue(&tty->read_wait, &wait);
  
-+struct talitos_export_state {
-+	u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
-+	u8 buf[HASH_MAX_BLOCK_SIZE];
-+	unsigned int swinit;
-+	unsigned int first;
-+	unsigned int last;
-+	unsigned int to_hash_later;
-+	unsigned int nbuf;
-+};
-+
- static int aead_setkey(struct crypto_aead *authenc,
- 		       const u8 *key, unsigned int keylen)
- {
-@@ -1083,21 +1101,20 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
- 	sg_count = dma_map_sg(dev, areq->src, edesc->src_nents ?: 1,
- 			      (areq->src == areq->dst) ? DMA_BIDIRECTIONAL
- 							   : DMA_TO_DEVICE);
--
- 	/* hmac data */
- 	desc->ptr[1].len = cpu_to_be16(areq->assoclen);
- 	if (sg_count > 1 &&
- 	    (ret = sg_to_link_tbl_offset(areq->src, sg_count, 0,
- 					 areq->assoclen,
- 					 &edesc->link_tbl[tbl_off])) > 1) {
--		tbl_off += ret;
--
- 		to_talitos_ptr(&desc->ptr[1], edesc->dma_link_tbl + tbl_off *
- 			       sizeof(struct talitos_ptr), 0);
- 		desc->ptr[1].j_extent = DESC_PTR_LNKTBL_JUMP;
- 
- 		dma_sync_single_for_device(dev, edesc->dma_link_tbl,
- 					   edesc->dma_len, DMA_BIDIRECTIONAL);
-+
-+		tbl_off += ret;
- 	} else {
- 		to_talitos_ptr(&desc->ptr[1], sg_dma_address(areq->src), 0);
- 		desc->ptr[1].j_extent = 0;
-@@ -1126,11 +1143,13 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
- 	if (edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV)
- 		sg_link_tbl_len += authsize;
- 
--	if (sg_count > 1 &&
--	    (ret = sg_to_link_tbl_offset(areq->src, sg_count, areq->assoclen,
--					 sg_link_tbl_len,
--					 &edesc->link_tbl[tbl_off])) > 1) {
--		tbl_off += ret;
-+	if (sg_count == 1) {
-+		to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src) +
-+			       areq->assoclen, 0);
-+	} else if ((ret = sg_to_link_tbl_offset(areq->src, sg_count,
-+						areq->assoclen, sg_link_tbl_len,
-+						&edesc->link_tbl[tbl_off])) >
-+		   1) {
- 		desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
- 		to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl +
- 					      tbl_off *
-@@ -1138,8 +1157,10 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
- 		dma_sync_single_for_device(dev, edesc->dma_link_tbl,
- 					   edesc->dma_len,
- 					   DMA_BIDIRECTIONAL);
--	} else
--		to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src), 0);
-+		tbl_off += ret;
-+	} else {
-+		copy_talitos_ptr(&desc->ptr[4], &edesc->link_tbl[tbl_off], 0);
-+	}
- 
- 	/* cipher out */
- 	desc->ptr[5].len = cpu_to_be16(cryptlen);
-@@ -1151,11 +1172,13 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
- 
- 	edesc->icv_ool = false;
- 
--	if (sg_count > 1 &&
--	    (sg_count = sg_to_link_tbl_offset(areq->dst, sg_count,
-+	if (sg_count == 1) {
-+		to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst) +
-+			       areq->assoclen, 0);
-+	} else if ((sg_count =
-+			sg_to_link_tbl_offset(areq->dst, sg_count,
- 					      areq->assoclen, cryptlen,
--					      &edesc->link_tbl[tbl_off])) >
--	    1) {
-+					      &edesc->link_tbl[tbl_off])) > 1) {
- 		struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
- 
- 		to_talitos_ptr(&desc->ptr[5], edesc->dma_link_tbl +
-@@ -1178,8 +1201,9 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
- 					   edesc->dma_len, DMA_BIDIRECTIONAL);
- 
- 		edesc->icv_ool = true;
--	} else
--		to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst), 0);
-+	} else {
-+		copy_talitos_ptr(&desc->ptr[5], &edesc->link_tbl[tbl_off], 0);
-+	}
- 
- 	/* iv out */
- 	map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv,
-@@ -1967,6 +1991,46 @@ static int ahash_digest(struct ahash_request *areq)
- 	return ahash_process_req(areq, areq->nbytes);
- }
- 
-+static int ahash_export(struct ahash_request *areq, void *out)
-+{
-+	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
-+	struct talitos_export_state *export = out;
-+
-+	memcpy(export->hw_context, req_ctx->hw_context,
-+	       req_ctx->hw_context_size);
-+	memcpy(export->buf, req_ctx->buf, req_ctx->nbuf);
-+	export->swinit = req_ctx->swinit;
-+	export->first = req_ctx->first;
-+	export->last = req_ctx->last;
-+	export->to_hash_later = req_ctx->to_hash_later;
-+	export->nbuf = req_ctx->nbuf;
-+
-+	return 0;
-+}
-+
-+static int ahash_import(struct ahash_request *areq, const void *in)
-+{
-+	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
-+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
-+	const struct talitos_export_state *export = in;
-+
-+	memset(req_ctx, 0, sizeof(*req_ctx));
-+	req_ctx->hw_context_size =
-+		(crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
-+			? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
-+			: TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
-+	memcpy(req_ctx->hw_context, export->hw_context,
-+	       req_ctx->hw_context_size);
-+	memcpy(req_ctx->buf, export->buf, export->nbuf);
-+	req_ctx->swinit = export->swinit;
-+	req_ctx->first = export->first;
-+	req_ctx->last = export->last;
-+	req_ctx->to_hash_later = export->to_hash_later;
-+	req_ctx->nbuf = export->nbuf;
-+
-+	return 0;
-+}
-+
- struct keyhash_result {
- 	struct completion completion;
- 	int err;
-@@ -2444,6 +2508,7 @@ static struct talitos_alg_template driver_algs[] = {
- 	{	.type = CRYPTO_ALG_TYPE_AHASH,
- 		.alg.hash = {
- 			.halg.digestsize = MD5_DIGEST_SIZE,
-+			.halg.statesize = sizeof(struct talitos_export_state),
- 			.halg.base = {
- 				.cra_name = "md5",
- 				.cra_driver_name = "md5-talitos",
-@@ -2459,6 +2524,7 @@ static struct talitos_alg_template driver_algs[] = {
- 	{	.type = CRYPTO_ALG_TYPE_AHASH,
- 		.alg.hash = {
- 			.halg.digestsize = SHA1_DIGEST_SIZE,
-+			.halg.statesize = sizeof(struct talitos_export_state),
- 			.halg.base = {
- 				.cra_name = "sha1",
- 				.cra_driver_name = "sha1-talitos",
-@@ -2474,6 +2540,7 @@ static struct talitos_alg_template driver_algs[] = {
- 	{	.type = CRYPTO_ALG_TYPE_AHASH,
- 		.alg.hash = {
- 			.halg.digestsize = SHA224_DIGEST_SIZE,
-+			.halg.statesize = sizeof(struct talitos_export_state),
- 			.halg.base = {
- 				.cra_name = "sha224",
- 				.cra_driver_name = "sha224-talitos",
-@@ -2489,6 +2556,7 @@ static struct talitos_alg_template driver_algs[] = {
- 	{	.type = CRYPTO_ALG_TYPE_AHASH,
- 		.alg.hash = {
- 			.halg.digestsize = SHA256_DIGEST_SIZE,
-+			.halg.statesize = sizeof(struct talitos_export_state),
- 			.halg.base = {
- 				.cra_name = "sha256",
- 				.cra_driver_name = "sha256-talitos",
-@@ -2504,6 +2572,7 @@ static struct talitos_alg_template driver_algs[] = {
- 	{	.type = CRYPTO_ALG_TYPE_AHASH,
- 		.alg.hash = {
- 			.halg.digestsize = SHA384_DIGEST_SIZE,
-+			.halg.statesize = sizeof(struct talitos_export_state),
- 			.halg.base = {
- 				.cra_name = "sha384",
- 				.cra_driver_name = "sha384-talitos",
-@@ -2519,6 +2588,7 @@ static struct talitos_alg_template driver_algs[] = {
- 	{	.type = CRYPTO_ALG_TYPE_AHASH,
- 		.alg.hash = {
- 			.halg.digestsize = SHA512_DIGEST_SIZE,
-+			.halg.statesize = sizeof(struct talitos_export_state),
- 			.halg.base = {
- 				.cra_name = "sha512",
- 				.cra_driver_name = "sha512-talitos",
-@@ -2534,6 +2604,7 @@ static struct talitos_alg_template driver_algs[] = {
- 	{	.type = CRYPTO_ALG_TYPE_AHASH,
- 		.alg.hash = {
- 			.halg.digestsize = MD5_DIGEST_SIZE,
-+			.halg.statesize = sizeof(struct talitos_export_state),
- 			.halg.base = {
- 				.cra_name = "hmac(md5)",
- 				.cra_driver_name = "hmac-md5-talitos",
-@@ -2549,6 +2620,7 @@ static struct talitos_alg_template driver_algs[] = {
- 	{	.type = CRYPTO_ALG_TYPE_AHASH,
- 		.alg.hash = {
- 			.halg.digestsize = SHA1_DIGEST_SIZE,
-+			.halg.statesize = sizeof(struct talitos_export_state),
- 			.halg.base = {
- 				.cra_name = "hmac(sha1)",
- 				.cra_driver_name = "hmac-sha1-talitos",
-@@ -2564,6 +2636,7 @@ static struct talitos_alg_template driver_algs[] = {
- 	{	.type = CRYPTO_ALG_TYPE_AHASH,
- 		.alg.hash = {
- 			.halg.digestsize = SHA224_DIGEST_SIZE,
-+			.halg.statesize = sizeof(struct talitos_export_state),
- 			.halg.base = {
- 				.cra_name = "hmac(sha224)",
- 				.cra_driver_name = "hmac-sha224-talitos",
-@@ -2579,6 +2652,7 @@ static struct talitos_alg_template driver_algs[] = {
- 	{	.type = CRYPTO_ALG_TYPE_AHASH,
- 		.alg.hash = {
- 			.halg.digestsize = SHA256_DIGEST_SIZE,
-+			.halg.statesize = sizeof(struct talitos_export_state),
- 			.halg.base = {
- 				.cra_name = "hmac(sha256)",
- 				.cra_driver_name = "hmac-sha256-talitos",
-@@ -2594,6 +2668,7 @@ static struct talitos_alg_template driver_algs[] = {
- 	{	.type = CRYPTO_ALG_TYPE_AHASH,
- 		.alg.hash = {
- 			.halg.digestsize = SHA384_DIGEST_SIZE,
-+			.halg.statesize = sizeof(struct talitos_export_state),
- 			.halg.base = {
- 				.cra_name = "hmac(sha384)",
- 				.cra_driver_name = "hmac-sha384-talitos",
-@@ -2609,6 +2684,7 @@ static struct talitos_alg_template driver_algs[] = {
- 	{	.type = CRYPTO_ALG_TYPE_AHASH,
- 		.alg.hash = {
- 			.halg.digestsize = SHA512_DIGEST_SIZE,
-+			.halg.statesize = sizeof(struct talitos_export_state),
- 			.halg.base = {
- 				.cra_name = "hmac(sha512)",
- 				.cra_driver_name = "hmac-sha512-talitos",
-@@ -2629,21 +2705,11 @@ struct talitos_crypto_alg {
- 	struct talitos_alg_template algt;
- };
- 
--static int talitos_cra_init(struct crypto_tfm *tfm)
-+static int talitos_init_common(struct talitos_ctx *ctx,
-+			       struct talitos_crypto_alg *talitos_alg)
- {
--	struct crypto_alg *alg = tfm->__crt_alg;
--	struct talitos_crypto_alg *talitos_alg;
--	struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
- 	struct talitos_private *priv;
- 
--	if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
--		talitos_alg = container_of(__crypto_ahash_alg(alg),
--					   struct talitos_crypto_alg,
--					   algt.alg.hash);
--	else
--		talitos_alg = container_of(alg, struct talitos_crypto_alg,
--					   algt.alg.crypto);
--
- 	/* update context with ptr to dev */
- 	ctx->dev = talitos_alg->dev;
- 
-@@ -2661,10 +2727,33 @@ static int talitos_cra_init(struct crypto_tfm *tfm)
- 	return 0;
- }
- 
-+static int talitos_cra_init(struct crypto_tfm *tfm)
-+{
-+	struct crypto_alg *alg = tfm->__crt_alg;
-+	struct talitos_crypto_alg *talitos_alg;
-+	struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
-+
-+	if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
-+		talitos_alg = container_of(__crypto_ahash_alg(alg),
-+					   struct talitos_crypto_alg,
-+					   algt.alg.hash);
-+	else
-+		talitos_alg = container_of(alg, struct talitos_crypto_alg,
-+					   algt.alg.crypto);
-+
-+	return talitos_init_common(ctx, talitos_alg);
-+}
-+
- static int talitos_cra_init_aead(struct crypto_aead *tfm)
- {
--	talitos_cra_init(crypto_aead_tfm(tfm));
--	return 0;
-+	struct aead_alg *alg = crypto_aead_alg(tfm);
-+	struct talitos_crypto_alg *talitos_alg;
-+	struct talitos_ctx *ctx = crypto_aead_ctx(tfm);
-+
-+	talitos_alg = container_of(alg, struct talitos_crypto_alg,
-+				   algt.alg.aead);
-+
-+	return talitos_init_common(ctx, talitos_alg);
- }
- 
- static int talitos_cra_init_ahash(struct crypto_tfm *tfm)
-@@ -2787,6 +2876,8 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
- 		t_alg->algt.alg.hash.finup = ahash_finup;
- 		t_alg->algt.alg.hash.digest = ahash_digest;
- 		t_alg->algt.alg.hash.setkey = ahash_setkey;
-+		t_alg->algt.alg.hash.import = ahash_import;
-+		t_alg->algt.alg.hash.export = ahash_export;
- 
- 		if (!(priv->features & TALITOS_FTR_HMAC_OK) &&
- 		    !strncmp(alg->cra_name, "hmac", 4)) {
-diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c
-index 4c243c1ffc7f..790f7cadc1ed 100644
---- a/drivers/crypto/ux500/cryp/cryp_core.c
-+++ b/drivers/crypto/ux500/cryp/cryp_core.c
-@@ -1440,9 +1440,9 @@ static int ux500_cryp_probe(struct platform_device *pdev)
- 
- 	device_data->phybase = res->start;
- 	device_data->base = devm_ioremap_resource(dev, res);
--	if (!device_data->base) {
-+	if (IS_ERR(device_data->base)) {
- 		dev_err(dev, "[%s]: ioremap failed!", __func__);
--		ret = -ENOMEM;
-+		ret = PTR_ERR(device_data->base);
- 		goto out;
- 	}
- 
-diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c
-index d6fdc583ce5d..574e87c7f2b8 100644
---- a/drivers/crypto/ux500/hash/hash_core.c
-+++ b/drivers/crypto/ux500/hash/hash_core.c
-@@ -1659,9 +1659,9 @@ static int ux500_hash_probe(struct platform_device *pdev)
- 
- 	device_data->phybase = res->start;
- 	device_data->base = devm_ioremap_resource(dev, res);
--	if (!device_data->base) {
-+	if (IS_ERR(device_data->base)) {
- 		dev_err(dev, "%s: ioremap() failed!\n", __func__);
--		ret = -ENOMEM;
-+		ret = PTR_ERR(device_data->base);
- 		goto out;
- 	}
- 	spin_lock_init(&device_data->ctx_lock);
-diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
-index 5ad0ec1f0e29..97199b3c25a2 100644
---- a/drivers/dma/dw/core.c
-+++ b/drivers/dma/dw/core.c
-@@ -130,26 +130,14 @@ static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
- static void dwc_initialize(struct dw_dma_chan *dwc)
- {
- 	struct dw_dma *dw = to_dw_dma(dwc->chan.device);
--	struct dw_dma_slave *dws = dwc->chan.private;
- 	u32 cfghi = DWC_CFGH_FIFO_MODE;
- 	u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority);
- 
- 	if (dwc->initialized == true)
- 		return;
- 
--	if (dws) {
--		/*
--		 * We need controller-specific data to set up slave
--		 * transfers.
--		 */
--		BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev);
--
--		cfghi |= DWC_CFGH_DST_PER(dws->dst_id);
--		cfghi |= DWC_CFGH_SRC_PER(dws->src_id);
--	} else {
--		cfghi |= DWC_CFGH_DST_PER(dwc->dst_id);
--		cfghi |= DWC_CFGH_SRC_PER(dwc->src_id);
--	}
-+	cfghi |= DWC_CFGH_DST_PER(dwc->dst_id);
-+	cfghi |= DWC_CFGH_SRC_PER(dwc->src_id);
- 
- 	channel_writel(dwc, CFG_LO, cfglo);
- 	channel_writel(dwc, CFG_HI, cfghi);
-@@ -941,7 +929,7 @@ bool dw_dma_filter(struct dma_chan *chan, void *param)
- 	struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
- 	struct dw_dma_slave *dws = param;
- 
--	if (!dws || dws->dma_dev != chan->device->dev)
-+	if (dws->dma_dev != chan->device->dev)
- 		return false;
- 
- 	/* We have to copy data since dws can be temporary storage */
-@@ -1165,6 +1153,14 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
- 	 * doesn't mean what you think it means), and status writeback.
- 	 */
- 
-+	/*
-+	 * We need controller-specific data to set up slave transfers.
-+	 */
-+	if (chan->private && !dw_dma_filter(chan, chan->private)) {
-+		dev_warn(chan2dev(chan), "Wrong controller-specific data\n");
-+		return -EINVAL;
-+	}
-+
- 	/* Enable controller here if needed */
- 	if (!dw->in_use)
- 		dw_dma_on(dw);
-@@ -1226,6 +1222,14 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
- 	spin_lock_irqsave(&dwc->lock, flags);
- 	list_splice_init(&dwc->free_list, &list);
- 	dwc->descs_allocated = 0;
-+
-+	/* Clear custom channel configuration */
-+	dwc->src_id = 0;
-+	dwc->dst_id = 0;
-+
-+	dwc->src_master = 0;
-+	dwc->dst_master = 0;
-+
- 	dwc->initialized = false;
- 
- 	/* Disable interrupts */
-diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
-index e3d7fcb69b4c..2dac314a2d7a 100644
---- a/drivers/dma/edma.c
-+++ b/drivers/dma/edma.c
-@@ -1563,32 +1563,6 @@ static irqreturn_t dma_ccerr_handler(int irq, void *data)
- 	return IRQ_HANDLED;
- }
- 
--static void edma_tc_set_pm_state(struct edma_tc *tc, bool enable)
--{
--	struct platform_device *tc_pdev;
--	int ret;
--
--	if (!IS_ENABLED(CONFIG_OF) || !tc)
--		return;
--
--	tc_pdev = of_find_device_by_node(tc->node);
--	if (!tc_pdev) {
--		pr_err("%s: TPTC device is not found\n", __func__);
--		return;
--	}
--	if (!pm_runtime_enabled(&tc_pdev->dev))
--		pm_runtime_enable(&tc_pdev->dev);
--
--	if (enable)
--		ret = pm_runtime_get_sync(&tc_pdev->dev);
--	else
--		ret = pm_runtime_put_sync(&tc_pdev->dev);
--
--	if (ret < 0)
--		pr_err("%s: pm_runtime_%s_sync() failed for %s\n", __func__,
--		       enable ? "get" : "put", dev_name(&tc_pdev->dev));
--}
--
- /* Alloc channel resources */
- static int edma_alloc_chan_resources(struct dma_chan *chan)
- {
-@@ -1625,8 +1599,6 @@ static int edma_alloc_chan_resources(struct dma_chan *chan)
- 		EDMA_CHAN_SLOT(echan->ch_num), chan->chan_id,
- 		echan->hw_triggered ? "HW" : "SW");
- 
--	edma_tc_set_pm_state(echan->tc, true);
--
- 	return 0;
- 
- err_slot:
-@@ -1663,7 +1635,6 @@ static void edma_free_chan_resources(struct dma_chan *chan)
- 		echan->alloced = false;
- 	}
- 
--	edma_tc_set_pm_state(echan->tc, false);
- 	echan->tc = NULL;
- 	echan->hw_triggered = false;
- 
-@@ -2408,10 +2379,8 @@ static int edma_pm_suspend(struct device *dev)
- 	int i;
- 
- 	for (i = 0; i < ecc->num_channels; i++) {
--		if (echan[i].alloced) {
-+		if (echan[i].alloced)
- 			edma_setup_interrupt(&echan[i], false);
--			edma_tc_set_pm_state(echan[i].tc, false);
--		}
- 	}
- 
- 	return 0;
-@@ -2441,8 +2410,6 @@ static int edma_pm_resume(struct device *dev)
- 
- 			/* Set up channel -> slot mapping for the entry slot */
- 			edma_set_chmap(&echan[i], echan[i].slot[0]);
--
--			edma_tc_set_pm_state(echan[i].tc, true);
- 		}
- 	}
- 
-@@ -2466,7 +2433,8 @@ static struct platform_driver edma_driver = {
- 
- static int edma_tptc_probe(struct platform_device *pdev)
- {
--	return 0;
-+	pm_runtime_enable(&pdev->dev);
-+	return pm_runtime_get_sync(&pdev->dev);
- }
- 
- static struct platform_driver edma_tptc_driver = {
-diff --git a/drivers/dma/hsu/hsu.c b/drivers/dma/hsu/hsu.c
-index eef145edb936..025d375fc3d7 100644
---- a/drivers/dma/hsu/hsu.c
-+++ b/drivers/dma/hsu/hsu.c
-@@ -135,7 +135,7 @@ static u32 hsu_dma_chan_get_sr(struct hsu_dma_chan *hsuc)
- 	sr = hsu_chan_readl(hsuc, HSU_CH_SR);
- 	spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
- 
--	return sr;
-+	return sr & ~(HSU_CH_SR_DESCE_ANY | HSU_CH_SR_CDESC_ANY);
- }
- 
- irqreturn_t hsu_dma_irq(struct hsu_dma_chip *chip, unsigned short nr)
-@@ -254,10 +254,13 @@ static void hsu_dma_issue_pending(struct dma_chan *chan)
- static size_t hsu_dma_active_desc_size(struct hsu_dma_chan *hsuc)
- {
- 	struct hsu_dma_desc *desc = hsuc->desc;
--	size_t bytes = desc->length;
-+	size_t bytes = 0;
- 	int i;
- 
--	i = desc->active % HSU_DMA_CHAN_NR_DESC;
-+	for (i = desc->active; i < desc->nents; i++)
-+		bytes += desc->sg[i].len;
-+
-+	i = HSU_DMA_CHAN_NR_DESC - 1;
- 	do {
- 		bytes += hsu_chan_readl(hsuc, HSU_CH_DxTSR(i));
- 	} while (--i >= 0);
-diff --git a/drivers/dma/hsu/hsu.h b/drivers/dma/hsu/hsu.h
-index 578a8ee8cd05..6b070c22b1df 100644
---- a/drivers/dma/hsu/hsu.h
-+++ b/drivers/dma/hsu/hsu.h
-@@ -41,6 +41,9 @@
- #define HSU_CH_SR_DESCTO(x)	BIT(8 + (x))
- #define HSU_CH_SR_DESCTO_ANY	(BIT(11) | BIT(10) | BIT(9) | BIT(8))
- #define HSU_CH_SR_CHE		BIT(15)
-+#define HSU_CH_SR_DESCE(x)	BIT(16 + (x))
-+#define HSU_CH_SR_DESCE_ANY	(BIT(19) | BIT(18) | BIT(17) | BIT(16))
-+#define HSU_CH_SR_CDESC_ANY	(BIT(31) | BIT(30))
- 
- /* Bits in HSU_CH_CR */
- #define HSU_CH_CR_CHA		BIT(0)
-diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c
-index 9794b073d7d7..a5ed9407c51b 100644
---- a/drivers/dma/omap-dma.c
-+++ b/drivers/dma/omap-dma.c
-@@ -48,6 +48,7 @@ struct omap_chan {
- 	unsigned dma_sig;
- 	bool cyclic;
- 	bool paused;
-+	bool running;
- 
- 	int dma_ch;
- 	struct omap_desc *desc;
-@@ -294,6 +295,8 @@ static void omap_dma_start(struct omap_chan *c, struct omap_desc *d)
- 
- 	/* Enable channel */
- 	omap_dma_chan_write(c, CCR, d->ccr | CCR_ENABLE);
-+
-+	c->running = true;
- }
- 
- static void omap_dma_stop(struct omap_chan *c)
-@@ -355,6 +358,8 @@ static void omap_dma_stop(struct omap_chan *c)
- 
- 		omap_dma_chan_write(c, CLNK_CTRL, val);
- 	}
-+
-+	c->running = false;
- }
- 
- static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d,
-@@ -673,15 +678,20 @@ static enum dma_status omap_dma_tx_status(struct dma_chan *chan,
- 	struct omap_chan *c = to_omap_dma_chan(chan);
- 	struct virt_dma_desc *vd;
- 	enum dma_status ret;
--	uint32_t ccr;
- 	unsigned long flags;
- 
--	ccr = omap_dma_chan_read(c, CCR);
--	/* The channel is no longer active, handle the completion right away */
--	if (!(ccr & CCR_ENABLE))
--		omap_dma_callback(c->dma_ch, 0, c);
--
- 	ret = dma_cookie_status(chan, cookie, txstate);
-+
-+	if (!c->paused && c->running) {
-+		uint32_t ccr = omap_dma_chan_read(c, CCR);
-+		/*
-+		 * The channel is no longer active, set the return value
-+		 * accordingly
-+		 */
-+		if (!(ccr & CCR_ENABLE))
-+			ret = DMA_COMPLETE;
-+	}
-+
- 	if (ret == DMA_COMPLETE || !txstate)
- 		return ret;
- 
-diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c
-index debca824bed6..77c1c44009d8 100644
---- a/drivers/dma/pxa_dma.c
-+++ b/drivers/dma/pxa_dma.c
-@@ -122,6 +122,7 @@ struct pxad_chan {
- struct pxad_device {
- 	struct dma_device		slave;
- 	int				nr_chans;
-+	int				nr_requestors;
- 	void __iomem			*base;
- 	struct pxad_phy			*phys;
- 	spinlock_t			phy_lock;	/* Phy association */
-@@ -473,7 +474,7 @@ static void pxad_free_phy(struct pxad_chan *chan)
- 		return;
- 
- 	/* clear the channel mapping in DRCMR */
--	if (chan->drcmr <= DRCMR_CHLNUM) {
-+	if (chan->drcmr <= pdev->nr_requestors) {
- 		reg = pxad_drcmr(chan->drcmr);
- 		writel_relaxed(0, chan->phy->base + reg);
- 	}
-@@ -509,6 +510,7 @@ static bool is_running_chan_misaligned(struct pxad_chan *chan)
- 
- static void phy_enable(struct pxad_phy *phy, bool misaligned)
- {
-+	struct pxad_device *pdev;
- 	u32 reg, dalgn;
- 
- 	if (!phy->vchan)
-@@ -518,7 +520,8 @@ static void phy_enable(struct pxad_phy *phy, bool misaligned)
- 		"%s(); phy=%p(%d) misaligned=%d\n", __func__,
- 		phy, phy->idx, misaligned);
- 
--	if (phy->vchan->drcmr <= DRCMR_CHLNUM) {
-+	pdev = to_pxad_dev(phy->vchan->vc.chan.device);
-+	if (phy->vchan->drcmr <= pdev->nr_requestors) {
- 		reg = pxad_drcmr(phy->vchan->drcmr);
- 		writel_relaxed(DRCMR_MAPVLD | phy->idx, phy->base + reg);
- 	}
-@@ -914,6 +917,7 @@ static void pxad_get_config(struct pxad_chan *chan,
- {
- 	u32 maxburst = 0, dev_addr = 0;
- 	enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
-+	struct pxad_device *pdev = to_pxad_dev(chan->vc.chan.device);
- 
- 	*dcmd = 0;
- 	if (dir == DMA_DEV_TO_MEM) {
-@@ -922,7 +926,7 @@ static void pxad_get_config(struct pxad_chan *chan,
- 		dev_addr = chan->cfg.src_addr;
- 		*dev_src = dev_addr;
- 		*dcmd |= PXA_DCMD_INCTRGADDR;
--		if (chan->drcmr <= DRCMR_CHLNUM)
-+		if (chan->drcmr <= pdev->nr_requestors)
- 			*dcmd |= PXA_DCMD_FLOWSRC;
- 	}
- 	if (dir == DMA_MEM_TO_DEV) {
-@@ -931,7 +935,7 @@ static void pxad_get_config(struct pxad_chan *chan,
- 		dev_addr = chan->cfg.dst_addr;
- 		*dev_dst = dev_addr;
- 		*dcmd |= PXA_DCMD_INCSRCADDR;
--		if (chan->drcmr <= DRCMR_CHLNUM)
-+		if (chan->drcmr <= pdev->nr_requestors)
- 			*dcmd |= PXA_DCMD_FLOWTRG;
- 	}
- 	if (dir == DMA_MEM_TO_MEM)
-@@ -1341,13 +1345,15 @@ static struct dma_chan *pxad_dma_xlate(struct of_phandle_args *dma_spec,
- 
- static int pxad_init_dmadev(struct platform_device *op,
- 			    struct pxad_device *pdev,
--			    unsigned int nr_phy_chans)
-+			    unsigned int nr_phy_chans,
-+			    unsigned int nr_requestors)
- {
- 	int ret;
- 	unsigned int i;
- 	struct pxad_chan *c;
- 
- 	pdev->nr_chans = nr_phy_chans;
-+	pdev->nr_requestors = nr_requestors;
- 	INIT_LIST_HEAD(&pdev->slave.channels);
- 	pdev->slave.device_alloc_chan_resources = pxad_alloc_chan_resources;
- 	pdev->slave.device_free_chan_resources = pxad_free_chan_resources;
-@@ -1382,7 +1388,7 @@ static int pxad_probe(struct platform_device *op)
- 	const struct of_device_id *of_id;
- 	struct mmp_dma_platdata *pdata = dev_get_platdata(&op->dev);
- 	struct resource *iores;
--	int ret, dma_channels = 0;
-+	int ret, dma_channels = 0, nb_requestors = 0;
- 	const enum dma_slave_buswidth widths =
- 		DMA_SLAVE_BUSWIDTH_1_BYTE   | DMA_SLAVE_BUSWIDTH_2_BYTES |
- 		DMA_SLAVE_BUSWIDTH_4_BYTES;
-@@ -1399,13 +1405,23 @@ static int pxad_probe(struct platform_device *op)
- 		return PTR_ERR(pdev->base);
- 
- 	of_id = of_match_device(pxad_dt_ids, &op->dev);
--	if (of_id)
-+	if (of_id) {
- 		of_property_read_u32(op->dev.of_node, "#dma-channels",
- 				     &dma_channels);
--	else if (pdata && pdata->dma_channels)
-+		ret = of_property_read_u32(op->dev.of_node, "#dma-requests",
-+					   &nb_requestors);
-+		if (ret) {
-+			dev_warn(pdev->slave.dev,
-+				 "#dma-requests set to default 32 as missing in OF: %d",
-+				 ret);
-+			nb_requestors = 32;
-+		};
-+	} else if (pdata && pdata->dma_channels) {
- 		dma_channels = pdata->dma_channels;
--	else
-+		nb_requestors = pdata->nb_requestors;
-+	} else {
- 		dma_channels = 32;	/* default 32 channel */
-+	}
- 
- 	dma_cap_set(DMA_SLAVE, pdev->slave.cap_mask);
- 	dma_cap_set(DMA_MEMCPY, pdev->slave.cap_mask);
-@@ -1423,7 +1439,7 @@ static int pxad_probe(struct platform_device *op)
- 	pdev->slave.descriptor_reuse = true;
- 
- 	pdev->slave.dev = &op->dev;
--	ret = pxad_init_dmadev(op, pdev, dma_channels);
-+	ret = pxad_init_dmadev(op, pdev, dma_channels, nb_requestors);
- 	if (ret) {
- 		dev_err(pdev->slave.dev, "unable to register\n");
- 		return ret;
-@@ -1442,7 +1458,8 @@ static int pxad_probe(struct platform_device *op)
- 
- 	platform_set_drvdata(op, pdev);
- 	pxad_init_debugfs(pdev);
--	dev_info(pdev->slave.dev, "initialized %d channels\n", dma_channels);
-+	dev_info(pdev->slave.dev, "initialized %d channels on %d requestors\n",
-+		 dma_channels, nb_requestors);
- 	return 0;
- }
- 
-diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
-index 9eee13ef83a5..d87a47547ba5 100644
---- a/drivers/edac/amd64_edac.c
-+++ b/drivers/edac/amd64_edac.c
-@@ -1452,7 +1452,7 @@ static u64 f1x_get_norm_dct_addr(struct amd64_pvt *pvt, u8 range,
- 	u64 chan_off;
- 	u64 dram_base		= get_dram_base(pvt, range);
- 	u64 hole_off		= f10_dhar_offset(pvt);
--	u64 dct_sel_base_off	= (pvt->dct_sel_hi & 0xFFFFFC00) << 16;
-+	u64 dct_sel_base_off	= (u64)(pvt->dct_sel_hi & 0xFFFFFC00) << 16;
- 
- 	if (hi_rng) {
- 		/*
-diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
-index 01087a38da22..792bdae2b91d 100644
---- a/drivers/edac/i7core_edac.c
-+++ b/drivers/edac/i7core_edac.c
-@@ -1866,7 +1866,7 @@ static int i7core_mce_check_error(struct notifier_block *nb, unsigned long val,
- 
- 	i7_dev = get_i7core_dev(mce->socketid);
- 	if (!i7_dev)
--		return NOTIFY_BAD;
-+		return NOTIFY_DONE;
- 
- 	mci = i7_dev->mci;
- 	pvt = mci->pvt_info;
-diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
-index f5c6b97c8958..8bf745d2da7e 100644
---- a/drivers/edac/sb_edac.c
-+++ b/drivers/edac/sb_edac.c
-@@ -362,6 +362,7 @@ struct sbridge_pvt {
- 
- 	/* Memory type detection */
- 	bool			is_mirrored, is_lockstep, is_close_pg;
-+	bool			is_chan_hash;
- 
- 	/* Fifo double buffers */
- 	struct mce		mce_entry[MCE_LOG_LEN];
-@@ -1060,6 +1061,20 @@ static inline u8 sad_pkg_ha(u8 pkg)
- 	return (pkg >> 2) & 0x1;
- }
- 
-+static int haswell_chan_hash(int idx, u64 addr)
-+{
-+	int i;
-+
-+	/*
-+	 * XOR even bits from 12:26 to bit0 of idx,
-+	 *     odd bits from 13:27 to bit1
-+	 */
-+	for (i = 12; i < 28; i += 2)
-+		idx ^= (addr >> i) & 3;
-+
-+	return idx;
-+}
-+
- /****************************************************************************
- 			Memory check routines
-  ****************************************************************************/
-@@ -1616,6 +1631,10 @@ static int get_dimm_config(struct mem_ctl_info *mci)
- 		KNL_MAX_CHANNELS : NUM_CHANNELS;
- 	u64 knl_mc_sizes[KNL_MAX_CHANNELS];
- 
-+	if (pvt->info.type == HASWELL || pvt->info.type == BROADWELL) {
-+		pci_read_config_dword(pvt->pci_ha0, HASWELL_HASYSDEFEATURE2, &reg);
-+		pvt->is_chan_hash = GET_BITFIELD(reg, 21, 21);
-+	}
- 	if (pvt->info.type == HASWELL || pvt->info.type == BROADWELL ||
- 			pvt->info.type == KNIGHTS_LANDING)
- 		pci_read_config_dword(pvt->pci_sad1, SAD_TARGET, &reg);
-@@ -1839,8 +1858,8 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
- 		edac_dbg(0, "TAD#%d: up to %u.%03u GB (0x%016Lx), socket interleave %d, memory interleave %d, TGT: %d, %d, %d, %d, reg=0x%08x\n",
- 			 n_tads, gb, (mb*1000)/1024,
- 			 ((u64)tmp_mb) << 20L,
--			 (u32)TAD_SOCK(reg),
--			 (u32)TAD_CH(reg),
-+			 (u32)(1 << TAD_SOCK(reg)),
-+			 (u32)TAD_CH(reg) + 1,
- 			 (u32)TAD_TGT0(reg),
- 			 (u32)TAD_TGT1(reg),
- 			 (u32)TAD_TGT2(reg),
-@@ -2118,12 +2137,15 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
- 	}
- 
- 	ch_way = TAD_CH(reg) + 1;
--	sck_way = TAD_SOCK(reg) + 1;
-+	sck_way = TAD_SOCK(reg);
- 
- 	if (ch_way == 3)
- 		idx = addr >> 6;
--	else
-+	else {
- 		idx = (addr >> (6 + sck_way + shiftup)) & 0x3;
-+		if (pvt->is_chan_hash)
-+			idx = haswell_chan_hash(idx, addr);
-+	}
- 	idx = idx % ch_way;
- 
- 	/*
-@@ -2157,7 +2179,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
- 		switch(ch_way) {
- 		case 2:
- 		case 4:
--			sck_xch = 1 << sck_way * (ch_way >> 1);
-+			sck_xch = (1 << sck_way) * (ch_way >> 1);
- 			break;
- 		default:
- 			sprintf(msg, "Invalid mirror set. Can't decode addr");
-@@ -2175,7 +2197,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
- 		 n_tads,
- 		 addr,
- 		 limit,
--		 (u32)TAD_SOCK(reg),
-+		 sck_way,
- 		 ch_way,
- 		 offset,
- 		 idx,
-@@ -2190,18 +2212,12 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
- 			offset, addr);
- 		return -EINVAL;
- 	}
--	addr -= offset;
--	/* Store the low bits [0:6] of the addr */
--	ch_addr = addr & 0x7f;
--	/* Remove socket wayness and remove 6 bits */
--	addr >>= 6;
--	addr = div_u64(addr, sck_xch);
--#if 0
--	/* Divide by channel way */
--	addr = addr / ch_way;
--#endif
--	/* Recover the last 6 bits */
--	ch_addr |= addr << 6;
-+
-+	ch_addr = addr - offset;
-+	ch_addr >>= (6 + shiftup);
-+	ch_addr /= sck_xch;
-+	ch_addr <<= (6 + shiftup);
-+	ch_addr |= addr & ((1 << (6 + shiftup)) - 1);
- 
- 	/*
- 	 * Step 3) Decode rank
-@@ -3152,7 +3168,7 @@ static int sbridge_mce_check_error(struct notifier_block *nb, unsigned long val,
- 
- 	mci = get_mci_for_node_id(mce->socketid);
- 	if (!mci)
--		return NOTIFY_BAD;
-+		return NOTIFY_DONE;
- 	pvt = mci->pvt_info;
- 
- 	/*
-diff --git a/drivers/extcon/extcon-max77843.c b/drivers/extcon/extcon-max77843.c
-index 74dfb7f4f277..d8cac4661cfe 100644
---- a/drivers/extcon/extcon-max77843.c
-+++ b/drivers/extcon/extcon-max77843.c
-@@ -803,7 +803,7 @@ static int max77843_muic_probe(struct platform_device *pdev)
- 	/* Clear IRQ bits before request IRQs */
- 	ret = regmap_bulk_read(max77843->regmap_muic,
- 			MAX77843_MUIC_REG_INT1, info->status,
--			MAX77843_MUIC_IRQ_NUM);
-+			MAX77843_MUIC_STATUS_NUM);
- 	if (ret) {
- 		dev_err(&pdev->dev, "Failed to Clear IRQ bits\n");
- 		goto err_muic_irq;
-diff --git a/drivers/firmware/broadcom/bcm47xx_nvram.c b/drivers/firmware/broadcom/bcm47xx_nvram.c
-index 0c2f0a61b0ea..0b631e5b5b84 100644
---- a/drivers/firmware/broadcom/bcm47xx_nvram.c
-+++ b/drivers/firmware/broadcom/bcm47xx_nvram.c
-@@ -94,15 +94,14 @@ static int nvram_find_and_copy(void __iomem *iobase, u32 lim)
- 
- found:
- 	__ioread32_copy(nvram_buf, header, sizeof(*header) / 4);
--	header = (struct nvram_header *)nvram_buf;
--	nvram_len = header->len;
-+	nvram_len = ((struct nvram_header *)(nvram_buf))->len;
- 	if (nvram_len > size) {
- 		pr_err("The nvram size according to the header seems to be bigger than the partition on flash\n");
- 		nvram_len = size;
- 	}
- 	if (nvram_len >= NVRAM_SPACE) {
- 		pr_err("nvram on flash (%i bytes) is bigger than the reserved space in memory, will just copy the first %i bytes\n",
--		       header->len, NVRAM_SPACE - 1);
-+		       nvram_len, NVRAM_SPACE - 1);
- 		nvram_len = NVRAM_SPACE - 1;
- 	}
- 	/* proceed reading data after header */
-diff --git a/drivers/firmware/efi/arm-init.c b/drivers/firmware/efi/arm-init.c
-index 9e15d571b53c..a76c35fc0b92 100644
---- a/drivers/firmware/efi/arm-init.c
-+++ b/drivers/firmware/efi/arm-init.c
-@@ -203,7 +203,19 @@ void __init efi_init(void)
- 
- 	reserve_regions();
- 	early_memunmap(memmap.map, params.mmap_size);
--	memblock_mark_nomap(params.mmap & PAGE_MASK,
--			    PAGE_ALIGN(params.mmap_size +
--				       (params.mmap & ~PAGE_MASK)));
-+
-+	if (IS_ENABLED(CONFIG_ARM)) {
-+		/*
-+		 * ARM currently does not allow ioremap_cache() to be called on
-+		 * memory regions that are covered by struct page. So remove the
-+		 * UEFI memory map from the linear mapping.
-+		 */
-+		memblock_mark_nomap(params.mmap & PAGE_MASK,
-+				    PAGE_ALIGN(params.mmap_size +
-+					       (params.mmap & ~PAGE_MASK)));
-+	} else {
-+		memblock_reserve(params.mmap & PAGE_MASK,
-+				 PAGE_ALIGN(params.mmap_size +
-+					    (params.mmap & ~PAGE_MASK)));
-+	}
- }
-diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
-index 2cd37dad67a6..c51f3b2fe3c0 100644
---- a/drivers/firmware/efi/efi.c
-+++ b/drivers/firmware/efi/efi.c
-@@ -182,6 +182,7 @@ static int generic_ops_register(void)
- {
- 	generic_ops.get_variable = efi.get_variable;
- 	generic_ops.set_variable = efi.set_variable;
-+	generic_ops.set_variable_nonblocking = efi.set_variable_nonblocking;
- 	generic_ops.get_next_variable = efi.get_next_variable;
- 	generic_ops.query_variable_store = efi_query_variable_store;
- 
-diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c
-index 7f2ea21c730d..6f182fd91a6d 100644
---- a/drivers/firmware/efi/vars.c
-+++ b/drivers/firmware/efi/vars.c
-@@ -202,29 +202,44 @@ static const struct variable_validate variable_validate[] = {
- 	{ NULL_GUID, "", NULL },
- };
- 
-+/*
-+ * Check if @var_name matches the pattern given in @match_name.
-+ *
-+ * @var_name: an array of @len non-NUL characters.
-+ * @match_name: a NUL-terminated pattern string, optionally ending in "*". A
-+ *              final "*" character matches any trailing characters @var_name,
-+ *              including the case when there are none left in @var_name.
-+ * @match: on output, the number of non-wildcard characters in @match_name
-+ *         that @var_name matches, regardless of the return value.
-+ * @return: whether @var_name fully matches @match_name.
-+ */
- static bool
- variable_matches(const char *var_name, size_t len, const char *match_name,
- 		 int *match)
- {
- 	for (*match = 0; ; (*match)++) {
- 		char c = match_name[*match];
--		char u = var_name[*match];
- 
--		/* Wildcard in the matching name means we've matched */
--		if (c == '*')
-+		switch (c) {
-+		case '*':
-+			/* Wildcard in @match_name means we've matched. */
- 			return true;
- 
--		/* Case sensitive match */
--		if (!c && *match == len)
--			return true;
-+		case '\0':
-+			/* @match_name has ended. Has @var_name too? */
-+			return (*match == len);
- 
--		if (c != u)
-+		default:
-+			/*
-+			 * We've reached a non-wildcard char in @match_name.
-+			 * Continue only if there's an identical character in
-+			 * @var_name.
-+			 */
-+			if (*match < len && c == var_name[*match])
-+				continue;
- 			return false;
--
--		if (!c)
--			return true;
-+		}
- 	}
--	return true;
- }
- 
- bool
-diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
-index 23196c5fc17c..90c1511d731f 100644
---- a/drivers/gpio/gpio-pca953x.c
-+++ b/drivers/gpio/gpio-pca953x.c
-@@ -18,6 +18,7 @@
- #include <linux/i2c.h>
- #include <linux/platform_data/pca953x.h>
- #include <linux/slab.h>
-+#include <asm/unaligned.h>
- #include <linux/of_platform.h>
- #include <linux/acpi.h>
- 
-@@ -159,7 +160,7 @@ static int pca953x_write_regs(struct pca953x_chip *chip, int reg, u8 *val)
- 		switch (chip->chip_type) {
- 		case PCA953X_TYPE:
- 			ret = i2c_smbus_write_word_data(chip->client,
--							reg << 1, (u16) *val);
-+			    reg << 1, cpu_to_le16(get_unaligned((u16 *)val)));
- 			break;
- 		case PCA957X_TYPE:
- 			ret = i2c_smbus_write_byte_data(chip->client, reg << 1,
-@@ -367,9 +368,11 @@ static void pca953x_gpio_set_multiple(struct gpio_chip *gc,
- 	memcpy(reg_val, chip->reg_output, NBANK(chip));
- 	mutex_lock(&chip->i2c_lock);
- 	for(bank=0; bank<NBANK(chip); bank++) {
--		unsigned bankmask = mask[bank/4] >> ((bank % 4) * 8);
-+		unsigned bankmask = mask[bank / sizeof(*mask)] >>
-+				    ((bank % sizeof(*mask)) * 8);
- 		if(bankmask) {
--			unsigned bankval  = bits[bank/4] >> ((bank % 4) * 8);
-+			unsigned bankval  = bits[bank / sizeof(*bits)] >>
-+					    ((bank % sizeof(*bits)) * 8);
- 			reg_val[bank] = (reg_val[bank] & ~bankmask) | bankval;
- 		}
- 	}
-diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c
-index b2b7b78664b8..76ac906b4d78 100644
---- a/drivers/gpio/gpio-pxa.c
-+++ b/drivers/gpio/gpio-pxa.c
-@@ -283,8 +283,8 @@ static int pxa_gpio_direction_output(struct gpio_chip *chip,
- 	writel_relaxed(mask, base + (value ? GPSR_OFFSET : GPCR_OFFSET));
- 
- 	ret = pinctrl_gpio_direction_output(chip->base + offset);
--	if (!ret)
--		return 0;
-+	if (ret)
-+		return ret;
- 
- 	spin_lock_irqsave(&gpio_lock, flags);
- 
-diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
-index 540cbc88c7a2..cc4d9bd0839e 100644
---- a/drivers/gpio/gpiolib-acpi.c
-+++ b/drivers/gpio/gpiolib-acpi.c
-@@ -977,7 +977,7 @@ bool acpi_can_fallback_to_crs(struct acpi_device *adev, const char *con_id)
- 		lookup = kmalloc(sizeof(*lookup), GFP_KERNEL);
- 		if (lookup) {
- 			lookup->adev = adev;
--			lookup->con_id = con_id;
-+			lookup->con_id = kstrdup(con_id, GFP_KERNEL);
- 			list_add_tail(&lookup->node, &acpi_crs_lookup_list);
- 		}
- 	}
-diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
-index 5e7770f9a415..ff299752d5e6 100644
---- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
-+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
-@@ -1619,6 +1619,7 @@ struct amdgpu_uvd {
- 	struct amdgpu_bo	*vcpu_bo;
- 	void			*cpu_addr;
- 	uint64_t		gpu_addr;
-+	unsigned		fw_version;
- 	atomic_t		handles[AMDGPU_MAX_UVD_HANDLES];
- 	struct drm_file		*filp[AMDGPU_MAX_UVD_HANDLES];
- 	struct delayed_work	idle_work;
-diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
-index 7a4b101e10c6..75cb5b9b88f8 100644
---- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
-+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
-@@ -816,10 +816,13 @@ static int amdgpu_cgs_get_active_displays_info(void *cgs_device,
- 	struct drm_device *ddev = adev->ddev;
- 	struct drm_crtc *crtc;
- 	uint32_t line_time_us, vblank_lines;
-+	struct cgs_mode_info *mode_info;
- 
- 	if (info == NULL)
- 		return -EINVAL;
- 
-+	mode_info = info->mode_info;
-+
- 	if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
- 		list_for_each_entry(crtc,
- 				&ddev->mode_config.crtc_list, head) {
-@@ -828,7 +831,7 @@ static int amdgpu_cgs_get_active_displays_info(void *cgs_device,
- 				info->active_display_mask |= (1 << amdgpu_crtc->crtc_id);
- 				info->display_count++;
- 			}
--			if (info->mode_info != NULL &&
-+			if (mode_info != NULL &&
- 				crtc->enabled && amdgpu_crtc->enabled &&
- 				amdgpu_crtc->hw_mode.clock) {
- 				line_time_us = (amdgpu_crtc->hw_mode.crtc_htotal * 1000) /
-@@ -836,10 +839,10 @@ static int amdgpu_cgs_get_active_displays_info(void *cgs_device,
- 				vblank_lines = amdgpu_crtc->hw_mode.crtc_vblank_end -
- 							amdgpu_crtc->hw_mode.crtc_vdisplay +
- 							(amdgpu_crtc->v_border * 2);
--				info->mode_info->vblank_time_us = vblank_lines * line_time_us;
--				info->mode_info->refresh_rate = drm_mode_vrefresh(&amdgpu_crtc->hw_mode);
--				info->mode_info->ref_clock = adev->clock.spll.reference_freq;
--				info->mode_info++;
-+				mode_info->vblank_time_us = vblank_lines * line_time_us;
-+				mode_info->refresh_rate = drm_mode_vrefresh(&amdgpu_crtc->hw_mode);
-+				mode_info->ref_clock = adev->clock.spll.reference_freq;
-+				mode_info = NULL;
- 			}
- 		}
- 	}
-diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
-index e23843f4d877..4488e82f87b0 100644
---- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
-+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
-@@ -303,7 +303,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
- 			fw_info.feature = adev->vce.fb_version;
- 			break;
- 		case AMDGPU_INFO_FW_UVD:
--			fw_info.ver = 0;
-+			fw_info.ver = adev->uvd.fw_version;
- 			fw_info.feature = 0;
- 			break;
- 		case AMDGPU_INFO_FW_GMC:
-diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
-index fdc1be8550da..3b2d75d96ea0 100644
---- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
-+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
-@@ -53,7 +53,7 @@ struct amdgpu_hpd;
- 
- #define AMDGPU_MAX_HPD_PINS 6
- #define AMDGPU_MAX_CRTCS 6
--#define AMDGPU_MAX_AFMT_BLOCKS 7
-+#define AMDGPU_MAX_AFMT_BLOCKS 9
- 
- enum amdgpu_rmx_type {
- 	RMX_OFF,
-@@ -309,8 +309,8 @@ struct amdgpu_mode_info {
- 	struct atom_context *atom_context;
- 	struct card_info *atom_card_info;
- 	bool mode_config_initialized;
--	struct amdgpu_crtc *crtcs[6];
--	struct amdgpu_afmt *afmt[7];
-+	struct amdgpu_crtc *crtcs[AMDGPU_MAX_CRTCS];
-+	struct amdgpu_afmt *afmt[AMDGPU_MAX_AFMT_BLOCKS];
- 	/* DVI-I properties */
- 	struct drm_property *coherent_mode_property;
- 	/* DAC enable load detect */
-diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
-index b8fbbd7699e4..73628c7599e7 100644
---- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
-+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
-@@ -540,6 +540,7 @@ int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
- 	if (!metadata_size) {
- 		if (bo->metadata_size) {
- 			kfree(bo->metadata);
-+			bo->metadata = NULL;
- 			bo->metadata_size = 0;
- 		}
- 		return 0;
-diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
-index 53f987aeeacf..3b35ad83867c 100644
---- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
-+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
-@@ -156,6 +156,9 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
- 	DRM_INFO("Found UVD firmware Version: %hu.%hu Family ID: %hu\n",
- 		version_major, version_minor, family_id);
- 
-+	adev->uvd.fw_version = ((version_major << 24) | (version_minor << 16) |
-+				(family_id << 8));
-+
- 	bo_size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8)
- 		 +  AMDGPU_UVD_STACK_SIZE + AMDGPU_UVD_HEAP_SIZE;
- 	r = amdgpu_bo_create(adev, bo_size, PAGE_SIZE, true,
-@@ -273,6 +276,8 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev)
- 	memcpy(adev->uvd.cpu_addr, (adev->uvd.fw->data) + offset,
- 		(adev->uvd.fw->size) - offset);
- 
-+	cancel_delayed_work_sync(&adev->uvd.idle_work);
-+
- 	size = amdgpu_bo_size(adev->uvd.vcpu_bo);
- 	size -= le32_to_cpu(hdr->ucode_size_bytes);
- 	ptr = adev->uvd.cpu_addr;
-diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
-index a745eeeb5d82..bb0da76051a1 100644
---- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
-+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
-@@ -220,6 +220,7 @@ int amdgpu_vce_suspend(struct amdgpu_device *adev)
- 	if (i == AMDGPU_MAX_VCE_HANDLES)
- 		return 0;
- 
-+	cancel_delayed_work_sync(&adev->vce.idle_work);
- 	/* TODO: suspending running encoding sessions isn't supported */
- 	return -EINVAL;
- }
-diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
-index bf731e9f643e..7f85c2c1d681 100644
---- a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
-+++ b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
-@@ -276,8 +276,8 @@ static int amdgpu_atombios_dp_get_dp_link_config(struct drm_connector *connector
- 			}
- 		}
- 	} else {
--		for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) {
--			for (i = 0; i < ARRAY_SIZE(link_rates) && link_rates[i] <= max_link_rate; i++) {
-+		for (i = 0; i < ARRAY_SIZE(link_rates) && link_rates[i] <= max_link_rate; i++) {
-+			for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) {
- 				max_pix_clock = (lane_num * link_rates[i] * 8) / bpp;
- 				if (max_pix_clock >= pix_clock) {
- 					*dp_lanes = lane_num;
-diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
-index 1e0bba29e167..1cd6de575305 100644
---- a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
-+++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
-@@ -298,6 +298,10 @@ bool amdgpu_atombios_encoder_mode_fixup(struct drm_encoder *encoder,
- 	    && (mode->crtc_vsync_start < (mode->crtc_vdisplay + 2)))
- 		adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2;
- 
-+	/* vertical FP must be at least 1 */
-+	if (mode->crtc_vsync_start == mode->crtc_vdisplay)
-+		adjusted_mode->crtc_vsync_start++;
-+
- 	/* get the native mode for scaling */
- 	if (amdgpu_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT))
- 		amdgpu_panel_mode_fixup(encoder, adjusted_mode);
-diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
-index 06602df707f8..9b1c43005c80 100644
---- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
-+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
-@@ -3628,7 +3628,7 @@ static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
- 					unsigned vm_id, uint64_t pd_addr)
- {
- 	int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
--	uint32_t seq = ring->fence_drv.sync_seq;
-+	uint32_t seq = ring->fence_drv.sync_seq[ring->idx];
- 	uint64_t addr = ring->fence_drv.gpu_addr;
- 
- 	amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
-diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
-index b8060795b27b..53964b14ff48 100644
---- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
-+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
-@@ -902,14 +902,6 @@ static int gmc_v7_0_early_init(void *handle)
- 	gmc_v7_0_set_gart_funcs(adev);
- 	gmc_v7_0_set_irq_funcs(adev);
- 
--	if (adev->flags & AMD_IS_APU) {
--		adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
--	} else {
--		u32 tmp = RREG32(mmMC_SEQ_MISC0);
--		tmp &= MC_SEQ_MISC0__MT__MASK;
--		adev->mc.vram_type = gmc_v7_0_convert_vram_type(tmp);
--	}
--
- 	return 0;
- }
- 
-@@ -930,6 +922,14 @@ static int gmc_v7_0_sw_init(void *handle)
- 	if (r)
- 		return r;
- 
-+	if (adev->flags & AMD_IS_APU) {
-+		adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
-+	} else {
-+		u32 tmp = RREG32(mmMC_SEQ_MISC0);
-+		tmp &= MC_SEQ_MISC0__MT__MASK;
-+		adev->mc.vram_type = gmc_v7_0_convert_vram_type(tmp);
-+	}
-+
- 	r = amdgpu_irq_add_id(adev, 146, &adev->mc.vm_fault);
- 	if (r)
- 		return r;
-diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
-index 3efd45546241..e59251f4a85d 100644
---- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
-+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
-@@ -856,14 +856,6 @@ static int gmc_v8_0_early_init(void *handle)
- 	gmc_v8_0_set_gart_funcs(adev);
- 	gmc_v8_0_set_irq_funcs(adev);
- 
--	if (adev->flags & AMD_IS_APU) {
--		adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
--	} else {
--		u32 tmp = RREG32(mmMC_SEQ_MISC0);
--		tmp &= MC_SEQ_MISC0__MT__MASK;
--		adev->mc.vram_type = gmc_v8_0_convert_vram_type(tmp);
--	}
--
- 	return 0;
- }
- 
-@@ -874,6 +866,8 @@ static int gmc_v8_0_late_init(void *handle)
- 	return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
- }
- 
-+#define mmMC_SEQ_MISC0_FIJI 0xA71
-+
- static int gmc_v8_0_sw_init(void *handle)
- {
- 	int r;
-@@ -884,6 +878,19 @@ static int gmc_v8_0_sw_init(void *handle)
- 	if (r)
- 		return r;
- 
-+	if (adev->flags & AMD_IS_APU) {
-+		adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
-+	} else {
-+		u32 tmp;
-+
-+		if (adev->asic_type == CHIP_FIJI)
-+			tmp = RREG32(mmMC_SEQ_MISC0_FIJI);
-+		else
-+			tmp = RREG32(mmMC_SEQ_MISC0);
-+		tmp &= MC_SEQ_MISC0__MT__MASK;
-+		adev->mc.vram_type = gmc_v8_0_convert_vram_type(tmp);
-+	}
-+
- 	r = amdgpu_irq_add_id(adev, 146, &adev->mc.vm_fault);
- 	if (r)
- 		return r;
-diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
-index 2cf50180cc51..b1c7a9b3631b 100644
---- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
-+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
-@@ -32,8 +32,8 @@
- #include "oss/oss_2_4_d.h"
- #include "oss/oss_2_4_sh_mask.h"
- 
--#include "gmc/gmc_8_1_d.h"
--#include "gmc/gmc_8_1_sh_mask.h"
-+#include "gmc/gmc_7_1_d.h"
-+#include "gmc/gmc_7_1_sh_mask.h"
- 
- #include "gca/gfx_8_0_d.h"
- #include "gca/gfx_8_0_enum.h"
-diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
-index c34c393e9aea..d5e19b5fbbfb 100644
---- a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
-+++ b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
-@@ -513,7 +513,7 @@ static int dbgdev_wave_control_set_registers(
- 				union SQ_CMD_BITS *in_reg_sq_cmd,
- 				union GRBM_GFX_INDEX_BITS *in_reg_gfx_index)
- {
--	int status;
-+	int status = 0;
- 	union SQ_CMD_BITS reg_sq_cmd;
- 	union GRBM_GFX_INDEX_BITS reg_gfx_index;
- 	struct HsaDbgWaveMsgAMDGen2 *pMsg;
-diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
-index cf01177ca3b5..2ea012e88991 100644
---- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
-+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
-@@ -241,6 +241,11 @@ static int cz_initialize_dpm_defaults(struct pp_hwmgr *hwmgr)
- 	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- 					PHM_PlatformCaps_DynamicUVDState);
- 
-+	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-+			PHM_PlatformCaps_UVDDPM);
-+	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-+			PHM_PlatformCaps_VCEDPM);
-+
- 	cz_hwmgr->cc6_settings.cpu_cc6_disable = false;
- 	cz_hwmgr->cc6_settings.cpu_pstate_disable = false;
- 	cz_hwmgr->cc6_settings.nb_pstate_switch_disable = false;
-diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
-index 9535c5b60387..7e5a97204051 100644
---- a/drivers/gpu/drm/drm_dp_helper.c
-+++ b/drivers/gpu/drm/drm_dp_helper.c
-@@ -178,7 +178,7 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request,
- {
- 	struct drm_dp_aux_msg msg;
- 	unsigned int retry;
--	int err;
-+	int err = 0;
- 
- 	memset(&msg, 0, sizeof(msg));
- 	msg.address = offset;
-@@ -186,6 +186,8 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request,
- 	msg.buffer = buffer;
- 	msg.size = size;
- 
-+	mutex_lock(&aux->hw_mutex);
-+
- 	/*
- 	 * The specification doesn't give any recommendation on how often to
- 	 * retry native transactions. We used to retry 7 times like for
-@@ -194,25 +196,24 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request,
- 	 */
- 	for (retry = 0; retry < 32; retry++) {
- 
--		mutex_lock(&aux->hw_mutex);
- 		err = aux->transfer(aux, &msg);
--		mutex_unlock(&aux->hw_mutex);
- 		if (err < 0) {
- 			if (err == -EBUSY)
- 				continue;
- 
--			return err;
-+			goto unlock;
- 		}
- 
- 
- 		switch (msg.reply & DP_AUX_NATIVE_REPLY_MASK) {
- 		case DP_AUX_NATIVE_REPLY_ACK:
- 			if (err < size)
--				return -EPROTO;
--			return err;
-+				err = -EPROTO;
-+			goto unlock;
- 
- 		case DP_AUX_NATIVE_REPLY_NACK:
--			return -EIO;
-+			err = -EIO;
-+			goto unlock;
- 
- 		case DP_AUX_NATIVE_REPLY_DEFER:
- 			usleep_range(AUX_RETRY_INTERVAL, AUX_RETRY_INTERVAL + 100);
-@@ -221,7 +222,11 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request,
- 	}
- 
- 	DRM_DEBUG_KMS("too many retries, giving up\n");
--	return -EIO;
-+	err = -EIO;
-+
-+unlock:
-+	mutex_unlock(&aux->hw_mutex);
-+	return err;
- }
- 
- /**
-@@ -543,9 +548,7 @@ static int drm_dp_i2c_do_msg(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
- 	int max_retries = max(7, drm_dp_i2c_retry_count(msg, dp_aux_i2c_speed_khz));
- 
- 	for (retry = 0, defer_i2c = 0; retry < (max_retries + defer_i2c); retry++) {
--		mutex_lock(&aux->hw_mutex);
- 		ret = aux->transfer(aux, msg);
--		mutex_unlock(&aux->hw_mutex);
- 		if (ret < 0) {
- 			if (ret == -EBUSY)
- 				continue;
-@@ -684,6 +687,8 @@ static int drm_dp_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
- 
- 	memset(&msg, 0, sizeof(msg));
- 
-+	mutex_lock(&aux->hw_mutex);
-+
- 	for (i = 0; i < num; i++) {
- 		msg.address = msgs[i].addr;
- 		drm_dp_i2c_msg_set_request(&msg, &msgs[i]);
-@@ -738,6 +743,8 @@ static int drm_dp_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
- 	msg.size = 0;
- 	(void)drm_dp_i2c_do_msg(aux, &msg);
- 
-+	mutex_unlock(&aux->hw_mutex);
-+
- 	return err;
- }
- 
-diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
-index 27fbd79d0daf..71ea0521ea96 100644
---- a/drivers/gpu/drm/drm_dp_mst_topology.c
-+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
-@@ -1672,13 +1672,19 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
- 	u8 sinks[DRM_DP_MAX_SDP_STREAMS];
- 	int i;
- 
-+	port = drm_dp_get_validated_port_ref(mgr, port);
-+	if (!port)
-+		return -EINVAL;
-+
- 	port_num = port->port_num;
- 	mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
- 	if (!mstb) {
- 		mstb = drm_dp_get_last_connected_port_and_mstb(mgr, port->parent, &port_num);
- 
--		if (!mstb)
-+		if (!mstb) {
-+			drm_dp_put_port(port);
- 			return -EINVAL;
-+		}
- 	}
- 
- 	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
-@@ -1707,6 +1713,7 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
- 	kfree(txmsg);
- fail_put:
- 	drm_dp_put_mst_branch_device(mstb);
-+	drm_dp_put_port(port);
- 	return ret;
- }
- 
-@@ -1789,6 +1796,11 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
- 		req_payload.start_slot = cur_slots;
- 		if (mgr->proposed_vcpis[i]) {
- 			port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
-+			port = drm_dp_get_validated_port_ref(mgr, port);
-+			if (!port) {
-+				mutex_unlock(&mgr->payload_lock);
-+				return -EINVAL;
-+			}
- 			req_payload.num_slots = mgr->proposed_vcpis[i]->num_slots;
- 			req_payload.vcpi = mgr->proposed_vcpis[i]->vcpi;
- 		} else {
-@@ -1816,6 +1828,9 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
- 			mgr->payloads[i].payload_state = req_payload.payload_state;
- 		}
- 		cur_slots += req_payload.num_slots;
-+
-+		if (port)
-+			drm_dp_put_port(port);
- 	}
- 
- 	for (i = 0; i < mgr->max_payloads; i++) {
-@@ -2121,6 +2136,8 @@ int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr)
- 
- 	if (mgr->mst_primary) {
- 		int sret;
-+		u8 guid[16];
-+
- 		sret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
- 		if (sret != DP_RECEIVER_CAP_SIZE) {
- 			DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
-@@ -2135,6 +2152,16 @@ int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr)
- 			ret = -1;
- 			goto out_unlock;
- 		}
-+
-+		/* Some hubs forget their guids after they resume */
-+		sret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16);
-+		if (sret != 16) {
-+			DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
-+			ret = -1;
-+			goto out_unlock;
-+		}
-+		drm_dp_check_mstb_guid(mgr->mst_primary, guid);
-+
- 		ret = 0;
- 	} else
- 		ret = -1;
-diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
-index cf39ed3133d6..4d0bc2a8843c 100644
---- a/drivers/gpu/drm/i915/i915_debugfs.c
-+++ b/drivers/gpu/drm/i915/i915_debugfs.c
-@@ -2860,20 +2860,6 @@ static void intel_dp_info(struct seq_file *m,
- 		intel_panel_info(m, &intel_connector->panel);
- }
- 
--static void intel_dp_mst_info(struct seq_file *m,
--			  struct intel_connector *intel_connector)
--{
--	struct intel_encoder *intel_encoder = intel_connector->encoder;
--	struct intel_dp_mst_encoder *intel_mst =
--		enc_to_mst(&intel_encoder->base);
--	struct intel_digital_port *intel_dig_port = intel_mst->primary;
--	struct intel_dp *intel_dp = &intel_dig_port->dp;
--	bool has_audio = drm_dp_mst_port_has_audio(&intel_dp->mst_mgr,
--					intel_connector->port);
--
--	seq_printf(m, "\taudio support: %s\n", yesno(has_audio));
--}
--
- static void intel_hdmi_info(struct seq_file *m,
- 			    struct intel_connector *intel_connector)
- {
-@@ -2917,8 +2903,6 @@ static void intel_connector_info(struct seq_file *m,
- 			intel_hdmi_info(m, intel_connector);
- 		else if (intel_encoder->type == INTEL_OUTPUT_LVDS)
- 			intel_lvds_info(m, intel_connector);
--		else if (intel_encoder->type == INTEL_OUTPUT_DP_MST)
--			intel_dp_mst_info(m, intel_connector);
- 	}
- 
- 	seq_printf(m, "\tmodes:\n");
-diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
-index f357058c74d9..2e832fa07e09 100644
---- a/drivers/gpu/drm/i915/i915_drv.c
-+++ b/drivers/gpu/drm/i915/i915_drv.c
-@@ -797,7 +797,7 @@ static int i915_drm_resume(struct drm_device *dev)
- static int i915_drm_resume_early(struct drm_device *dev)
- {
- 	struct drm_i915_private *dev_priv = dev->dev_private;
--	int ret = 0;
-+	int ret;
- 
- 	/*
- 	 * We have a resume ordering issue with the snd-hda driver also
-@@ -808,6 +808,36 @@ static int i915_drm_resume_early(struct drm_device *dev)
- 	 * FIXME: This should be solved with a special hdmi sink device or
- 	 * similar so that power domains can be employed.
- 	 */
-+
-+	/*
-+	 * Note that we need to set the power state explicitly, since we
-+	 * powered off the device during freeze and the PCI core won't power
-+	 * it back up for us during thaw. Powering off the device during
-+	 * freeze is not a hard requirement though, and during the
-+	 * suspend/resume phases the PCI core makes sure we get here with the
-+	 * device powered on. So in case we change our freeze logic and keep
-+	 * the device powered we can also remove the following set power state
-+	 * call.
-+	 */
-+	ret = pci_set_power_state(dev->pdev, PCI_D0);
-+	if (ret) {
-+		DRM_ERROR("failed to set PCI D0 power state (%d)\n", ret);
-+		goto out;
-+	}
-+
-+	/*
-+	 * Note that pci_enable_device() first enables any parent bridge
-+	 * device and only then sets the power state for this device. The
-+	 * bridge enabling is a nop though, since bridge devices are resumed
-+	 * first. The order of enabling power and enabling the device is
-+	 * imposed by the PCI core as described above, so here we preserve the
-+	 * same order for the freeze/thaw phases.
-+	 *
-+	 * TODO: eventually we should remove pci_disable_device() /
-+	 * pci_enable_enable_device() from suspend/resume. Due to how they
-+	 * depend on the device enable refcount we can't anyway depend on them
-+	 * disabling/enabling the device.
-+	 */
- 	if (pci_enable_device(dev->pdev)) {
- 		ret = -EIO;
- 		goto out;
-diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
-index 4897728713f6..5672b57e65d5 100644
---- a/drivers/gpu/drm/i915/i915_reg.h
-+++ b/drivers/gpu/drm/i915/i915_reg.h
-@@ -2898,7 +2898,14 @@ enum skl_disp_power_wells {
- #define GEN6_RP_STATE_CAP	_MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5998)
- #define BXT_RP_STATE_CAP        _MMIO(0x138170)
- 
--#define INTERVAL_1_28_US(us)	(((us) * 100) >> 7)
-+/*
-+ * Make these a multiple of magic 25 to avoid SNB (eg. Dell XPS
-+ * 8300) freezing up around GPU hangs. Looks as if even
-+ * scheduling/timer interrupts start misbehaving if the RPS
-+ * EI/thresholds are "bad", leading to a very sluggish or even
-+ * frozen machine.
-+ */
-+#define INTERVAL_1_28_US(us)	roundup(((us) * 100) >> 7, 25)
- #define INTERVAL_1_33_US(us)	(((us) * 3)   >> 2)
- #define INTERVAL_0_833_US(us)	(((us) * 6) / 5)
- #define GT_INTERVAL_FROM_US(dev_priv, us) (IS_GEN9(dev_priv) ? \
-@@ -7405,6 +7412,8 @@ enum skl_disp_power_wells {
- #define  TRANS_CLK_SEL_DISABLED		(0x0<<29)
- #define  TRANS_CLK_SEL_PORT(x)		(((x)+1)<<29)
- 
-+#define CDCLK_FREQ			_MMIO(0x46200)
-+
- #define _TRANSA_MSA_MISC		0x60410
- #define _TRANSB_MSA_MISC		0x61410
- #define _TRANSC_MSA_MISC		0x62410
-diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
-index 30f921421b0c..7d281b40064a 100644
---- a/drivers/gpu/drm/i915/intel_audio.c
-+++ b/drivers/gpu/drm/i915/intel_audio.c
-@@ -262,8 +262,7 @@ static void hsw_audio_codec_disable(struct intel_encoder *encoder)
- 	tmp |= AUD_CONFIG_N_PROG_ENABLE;
- 	tmp &= ~AUD_CONFIG_UPPER_N_MASK;
- 	tmp &= ~AUD_CONFIG_LOWER_N_MASK;
--	if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT) ||
--	    intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DP_MST))
-+	if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT))
- 		tmp |= AUD_CONFIG_N_VALUE_INDEX;
- 	I915_WRITE(HSW_AUD_CFG(pipe), tmp);
- 
-@@ -476,8 +475,7 @@ static void ilk_audio_codec_enable(struct drm_connector *connector,
- 	tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
- 	tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
- 	tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK;
--	if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT) ||
--	    intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DP_MST))
-+	if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT))
- 		tmp |= AUD_CONFIG_N_VALUE_INDEX;
- 	else
- 		tmp |= audio_config_hdmi_pixel_clock(adjusted_mode);
-@@ -515,8 +513,7 @@ void intel_audio_codec_enable(struct intel_encoder *intel_encoder)
- 
- 	/* ELD Conn_Type */
- 	connector->eld[5] &= ~(3 << 2);
--	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
--	    intel_pipe_has_type(crtc, INTEL_OUTPUT_DP_MST))
-+	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT))
- 		connector->eld[5] |= (1 << 2);
- 
- 	connector->eld[6] = drm_av_sync_delay(connector, adjusted_mode) / 2;
-diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
-index a7b4a524fadd..dbacc2901d47 100644
---- a/drivers/gpu/drm/i915/intel_crt.c
-+++ b/drivers/gpu/drm/i915/intel_crt.c
-@@ -255,8 +255,14 @@ static bool intel_crt_compute_config(struct intel_encoder *encoder,
- 		pipe_config->has_pch_encoder = true;
- 
- 	/* LPT FDI RX only supports 8bpc. */
--	if (HAS_PCH_LPT(dev))
-+	if (HAS_PCH_LPT(dev)) {
-+		if (pipe_config->bw_constrained && pipe_config->pipe_bpp < 24) {
-+			DRM_DEBUG_KMS("LPT only supports 24bpp\n");
-+			return false;
-+		}
-+
- 		pipe_config->pipe_bpp = 24;
-+	}
- 
- 	/* FDI must always be 2.7 GHz */
- 	if (HAS_DDI(dev)) {
-diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c
-index 647d85e77c2f..597cfb5ca847 100644
---- a/drivers/gpu/drm/i915/intel_csr.c
-+++ b/drivers/gpu/drm/i915/intel_csr.c
-@@ -177,7 +177,8 @@ static const struct stepping_info kbl_stepping_info[] = {
- static const struct stepping_info skl_stepping_info[] = {
- 	{'A', '0'}, {'B', '0'}, {'C', '0'},
- 	{'D', '0'}, {'E', '0'}, {'F', '0'},
--	{'G', '0'}, {'H', '0'}, {'I', '0'}
-+	{'G', '0'}, {'H', '0'}, {'I', '0'},
-+	{'J', '0'}, {'K', '0'}
- };
- 
- static const struct stepping_info bxt_stepping_info[] = {
-diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
-index 084d5586585d..40511493914c 100644
---- a/drivers/gpu/drm/i915/intel_ddi.c
-+++ b/drivers/gpu/drm/i915/intel_ddi.c
-@@ -464,9 +464,17 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port,
- 	} else if (IS_BROADWELL(dev)) {
- 		ddi_translations_fdi = bdw_ddi_translations_fdi;
- 		ddi_translations_dp = bdw_ddi_translations_dp;
--		ddi_translations_edp = bdw_ddi_translations_edp;
-+
-+		if (dev_priv->edp_low_vswing) {
-+			ddi_translations_edp = bdw_ddi_translations_edp;
-+			n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_edp);
-+		} else {
-+			ddi_translations_edp = bdw_ddi_translations_dp;
-+			n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
-+		}
-+
- 		ddi_translations_hdmi = bdw_ddi_translations_hdmi;
--		n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_edp);
-+
- 		n_dp_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
- 		n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
- 		hdmi_default_entry = 7;
-@@ -3157,23 +3165,6 @@ void intel_ddi_fdi_disable(struct drm_crtc *crtc)
- 	I915_WRITE(FDI_RX_CTL(PIPE_A), val);
- }
- 
--bool intel_ddi_is_audio_enabled(struct drm_i915_private *dev_priv,
--				 struct intel_crtc *intel_crtc)
--{
--	u32 temp;
--
--	if (intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_AUDIO)) {
--		temp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
--
--		intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
--
--		if (temp & AUDIO_OUTPUT_ENABLE(intel_crtc->pipe))
--			return true;
--	}
--
--	return false;
--}
--
- void intel_ddi_get_config(struct intel_encoder *encoder,
- 			  struct intel_crtc_state *pipe_config)
- {
-@@ -3234,8 +3225,11 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
- 		break;
- 	}
- 
--	pipe_config->has_audio =
--		intel_ddi_is_audio_enabled(dev_priv, intel_crtc);
-+	if (intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_AUDIO)) {
-+		temp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
-+		if (temp & AUDIO_OUTPUT_ENABLE(intel_crtc->pipe))
-+			pipe_config->has_audio = true;
-+	}
- 
- 	if (encoder->type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp_bpp &&
- 	    pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
-@@ -3260,12 +3254,6 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
- 	intel_ddi_clock_get(encoder, pipe_config);
- }
- 
--static void intel_ddi_destroy(struct drm_encoder *encoder)
--{
--	/* HDMI has nothing special to destroy, so we can go with this. */
--	intel_dp_encoder_destroy(encoder);
--}
--
- static bool intel_ddi_compute_config(struct intel_encoder *encoder,
- 				     struct intel_crtc_state *pipe_config)
- {
-@@ -3284,7 +3272,8 @@ static bool intel_ddi_compute_config(struct intel_encoder *encoder,
- }
- 
- static const struct drm_encoder_funcs intel_ddi_funcs = {
--	.destroy = intel_ddi_destroy,
-+	.reset = intel_dp_encoder_reset,
-+	.destroy = intel_dp_encoder_destroy,
- };
- 
- static struct intel_connector *
-@@ -3356,6 +3345,7 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
- 	intel_encoder->post_disable = intel_ddi_post_disable;
- 	intel_encoder->get_hw_state = intel_ddi_get_hw_state;
- 	intel_encoder->get_config = intel_ddi_get_config;
-+	intel_encoder->suspend = intel_dp_encoder_suspend;
- 
- 	intel_dig_port->port = port;
- 	intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) &
-diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
-index 46947fffd599..39b00b9daf2d 100644
---- a/drivers/gpu/drm/i915/intel_display.c
-+++ b/drivers/gpu/drm/i915/intel_display.c
-@@ -4455,7 +4455,7 @@ int skl_update_scaler_crtc(struct intel_crtc_state *state)
- 		      intel_crtc->base.base.id, intel_crtc->pipe, SKL_CRTC_INDEX);
- 
- 	return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
--		&state->scaler_state.scaler_id, DRM_ROTATE_0,
-+		&state->scaler_state.scaler_id, BIT(DRM_ROTATE_0),
- 		state->pipe_src_w, state->pipe_src_h,
- 		adjusted_mode->crtc_hdisplay, adjusted_mode->crtc_vdisplay);
- }
-@@ -9793,6 +9793,8 @@ static void broadwell_set_cdclk(struct drm_device *dev, int cdclk)
- 	sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, data);
- 	mutex_unlock(&dev_priv->rps.hw_lock);
- 
-+	I915_WRITE(CDCLK_FREQ, DIV_ROUND_CLOSEST(cdclk, 1000) - 1);
-+
- 	intel_update_cdclk(dev);
- 
- 	WARN(cdclk != dev_priv->cdclk_freq,
-@@ -13429,6 +13431,9 @@ static int intel_atomic_prepare_commit(struct drm_device *dev,
- 	}
- 
- 	for_each_crtc_in_state(state, crtc, crtc_state, i) {
-+		if (state->legacy_cursor_update)
-+			continue;
-+
- 		ret = intel_crtc_wait_for_pending_flips(crtc);
- 		if (ret)
- 			return ret;
-diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
-index cdc2c15873dc..3cd4996c791c 100644
---- a/drivers/gpu/drm/i915/intel_dp.c
-+++ b/drivers/gpu/drm/i915/intel_dp.c
-@@ -4905,7 +4905,7 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder)
- 	kfree(intel_dig_port);
- }
- 
--static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
-+void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
- {
- 	struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
- 
-@@ -4947,7 +4947,7 @@ static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
- 	edp_panel_vdd_schedule_off(intel_dp);
- }
- 
--static void intel_dp_encoder_reset(struct drm_encoder *encoder)
-+void intel_dp_encoder_reset(struct drm_encoder *encoder)
- {
- 	struct intel_dp *intel_dp;
- 
-diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
-index fa0dabf578dc..e8e6984b8053 100644
---- a/drivers/gpu/drm/i915/intel_dp_mst.c
-+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
-@@ -78,8 +78,6 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
- 		return false;
- 	}
- 
--	if (drm_dp_mst_port_has_audio(&intel_dp->mst_mgr, found->port))
--		pipe_config->has_audio = true;
- 	mst_pbn = drm_dp_calc_pbn_mode(adjusted_mode->crtc_clock, bpp);
- 
- 	pipe_config->pbn = mst_pbn;
-@@ -104,11 +102,6 @@ static void intel_mst_disable_dp(struct intel_encoder *encoder)
- 	struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
- 	struct intel_digital_port *intel_dig_port = intel_mst->primary;
- 	struct intel_dp *intel_dp = &intel_dig_port->dp;
--	struct drm_device *dev = encoder->base.dev;
--	struct drm_i915_private *dev_priv = dev->dev_private;
--	struct drm_crtc *crtc = encoder->base.crtc;
--	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
--
- 	int ret;
- 
- 	DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links);
-@@ -119,10 +112,6 @@ static void intel_mst_disable_dp(struct intel_encoder *encoder)
- 	if (ret) {
- 		DRM_ERROR("failed to update payload %d\n", ret);
- 	}
--	if (intel_crtc->config->has_audio) {
--		intel_audio_codec_disable(encoder);
--		intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
--	}
- }
- 
- static void intel_mst_post_disable_dp(struct intel_encoder *encoder)
-@@ -184,7 +173,7 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder)
- 	intel_mst->port = found->port;
- 
- 	if (intel_dp->active_mst_links == 0) {
--		intel_ddi_clk_select(encoder, intel_crtc->config);
-+		intel_ddi_clk_select(&intel_dig_port->base, intel_crtc->config);
- 
- 		intel_dp_set_link_params(intel_dp, intel_crtc->config);
- 
-@@ -219,7 +208,6 @@ static void intel_mst_enable_dp(struct intel_encoder *encoder)
- 	struct intel_dp *intel_dp = &intel_dig_port->dp;
- 	struct drm_device *dev = intel_dig_port->base.base.dev;
- 	struct drm_i915_private *dev_priv = dev->dev_private;
--	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
- 	enum port port = intel_dig_port->port;
- 	int ret;
- 
-@@ -232,13 +220,6 @@ static void intel_mst_enable_dp(struct intel_encoder *encoder)
- 	ret = drm_dp_check_act_status(&intel_dp->mst_mgr);
- 
- 	ret = drm_dp_update_payload_part2(&intel_dp->mst_mgr);
--
--	if (crtc->config->has_audio) {
--		DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
--				 pipe_name(crtc->pipe));
--		intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
--		intel_audio_codec_enable(encoder);
--	}
- }
- 
- static bool intel_dp_mst_enc_get_hw_state(struct intel_encoder *encoder,
-@@ -264,9 +245,6 @@ static void intel_dp_mst_enc_get_config(struct intel_encoder *encoder,
- 
- 	pipe_config->has_dp_encoder = true;
- 
--	pipe_config->has_audio =
--		intel_ddi_is_audio_enabled(dev_priv, crtc);
--
- 	temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
- 	if (temp & TRANS_DDI_PHSYNC)
- 		flags |= DRM_MODE_FLAG_PHSYNC;
-@@ -499,6 +477,8 @@ static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
- 	struct intel_connector *intel_connector = to_intel_connector(connector);
- 	struct drm_device *dev = connector->dev;
- 
-+	intel_connector->unregister(intel_connector);
-+
- 	/* need to nuke the connector */
- 	drm_modeset_lock_all(dev);
- 	if (connector->state->crtc) {
-@@ -512,11 +492,7 @@ static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
- 
- 		WARN(ret, "Disabling mst crtc failed with %i\n", ret);
- 	}
--	drm_modeset_unlock_all(dev);
--
--	intel_connector->unregister(intel_connector);
- 
--	drm_modeset_lock_all(dev);
- 	intel_connector_remove_from_fbdev(intel_connector);
- 	drm_connector_cleanup(connector);
- 	drm_modeset_unlock_all(dev);
-diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
-index df7f3cb66056..1ae61f488987 100644
---- a/drivers/gpu/drm/i915/intel_drv.h
-+++ b/drivers/gpu/drm/i915/intel_drv.h
-@@ -1013,8 +1013,6 @@ void intel_ddi_set_pipe_settings(struct drm_crtc *crtc);
- void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp);
- bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
- void intel_ddi_fdi_disable(struct drm_crtc *crtc);
--bool intel_ddi_is_audio_enabled(struct drm_i915_private *dev_priv,
--				 struct intel_crtc *intel_crtc);
- void intel_ddi_get_config(struct intel_encoder *encoder,
- 			  struct intel_crtc_state *pipe_config);
- struct intel_encoder *
-@@ -1234,6 +1232,8 @@ void intel_dp_set_link_params(struct intel_dp *intel_dp,
- void intel_dp_start_link_train(struct intel_dp *intel_dp);
- void intel_dp_stop_link_train(struct intel_dp *intel_dp);
- void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
-+void intel_dp_encoder_reset(struct drm_encoder *encoder);
-+void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder);
- void intel_dp_encoder_destroy(struct drm_encoder *encoder);
- int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc);
- bool intel_dp_compute_config(struct intel_encoder *encoder,
-diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
-index 616108c4bc3e..43fdae8ff3c3 100644
---- a/drivers/gpu/drm/i915/intel_hdmi.c
-+++ b/drivers/gpu/drm/i915/intel_hdmi.c
-@@ -1407,8 +1407,16 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
- 				hdmi_to_dig_port(intel_hdmi));
- 	}
- 
--	if (!live_status)
--		DRM_DEBUG_KMS("Live status not up!");
-+	if (!live_status) {
-+		DRM_DEBUG_KMS("HDMI live status down\n");
-+		/*
-+		 * Live status register is not reliable on all intel platforms.
-+		 * So consider live_status only for certain platforms, for
-+		 * others, read EDID to determine presence of sink.
-+		 */
-+		if (INTEL_INFO(dev_priv)->gen < 7 || IS_IVYBRIDGE(dev_priv))
-+			live_status = true;
-+	}
- 
- 	intel_hdmi_unset_edid(connector);
- 
-diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
-index f1fa756c5d5d..cfd5f9fff2f4 100644
---- a/drivers/gpu/drm/i915/intel_lrc.c
-+++ b/drivers/gpu/drm/i915/intel_lrc.c
-@@ -781,11 +781,11 @@ static int logical_ring_prepare(struct drm_i915_gem_request *req, int bytes)
- 		if (unlikely(total_bytes > remain_usable)) {
- 			/*
- 			 * The base request will fit but the reserved space
--			 * falls off the end. So only need to to wait for the
--			 * reserved size after flushing out the remainder.
-+			 * falls off the end. So don't need an immediate wrap
-+			 * and only need to effectively wait for the reserved
-+			 * size space from the start of ringbuffer.
- 			 */
- 			wait_bytes = remain_actual + ringbuf->reserved_size;
--			need_wrap = true;
- 		} else if (total_bytes > ringbuf->space) {
- 			/* No wrapping required, just waiting. */
- 			wait_bytes = total_bytes;
-diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
-index b28c29f20e75..0eae3994e5e3 100644
---- a/drivers/gpu/drm/i915/intel_pm.c
-+++ b/drivers/gpu/drm/i915/intel_pm.c
-@@ -2281,6 +2281,7 @@ static int ilk_compute_pipe_wm(struct intel_crtc *intel_crtc,
- 		return PTR_ERR(cstate);
- 
- 	pipe_wm = &cstate->wm.optimal.ilk;
-+	memset(pipe_wm, 0, sizeof(*pipe_wm));
- 
- 	for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
- 		ps = drm_atomic_get_plane_state(state,
-@@ -3606,23 +3607,43 @@ static void skl_update_wm(struct drm_crtc *crtc)
- 	dev_priv->wm.skl_hw = *results;
- }
- 
-+static void ilk_compute_wm_config(struct drm_device *dev,
-+				  struct intel_wm_config *config)
-+{
-+	struct intel_crtc *crtc;
-+
-+	/* Compute the currently _active_ config */
-+	for_each_intel_crtc(dev, crtc) {
-+		const struct intel_pipe_wm *wm = &crtc->wm.active.ilk;
-+
-+		if (!wm->pipe_enabled)
-+			continue;
-+
-+		config->sprites_enabled |= wm->sprites_enabled;
-+		config->sprites_scaled |= wm->sprites_scaled;
-+		config->num_pipes_active++;
-+	}
-+}
-+
- static void ilk_program_watermarks(struct drm_i915_private *dev_priv)
- {
- 	struct drm_device *dev = dev_priv->dev;
- 	struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
- 	struct ilk_wm_maximums max;
--	struct intel_wm_config *config = &dev_priv->wm.config;
-+	struct intel_wm_config config = {};
- 	struct ilk_wm_values results = {};
- 	enum intel_ddb_partitioning partitioning;
- 
--	ilk_compute_wm_maximums(dev, 1, config, INTEL_DDB_PART_1_2, &max);
--	ilk_wm_merge(dev, config, &max, &lp_wm_1_2);
-+	ilk_compute_wm_config(dev, &config);
-+
-+	ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_1_2, &max);
-+	ilk_wm_merge(dev, &config, &max, &lp_wm_1_2);
- 
- 	/* 5/6 split only in single pipe config on IVB+ */
- 	if (INTEL_INFO(dev)->gen >= 7 &&
--	    config->num_pipes_active == 1 && config->sprites_enabled) {
--		ilk_compute_wm_maximums(dev, 1, config, INTEL_DDB_PART_5_6, &max);
--		ilk_wm_merge(dev, config, &max, &lp_wm_5_6);
-+	    config.num_pipes_active == 1 && config.sprites_enabled) {
-+		ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_5_6, &max);
-+		ilk_wm_merge(dev, &config, &max, &lp_wm_5_6);
- 
- 		best_lp_wm = ilk_find_best_result(dev, &lp_wm_1_2, &lp_wm_5_6);
- 	} else {
-@@ -6544,6 +6565,12 @@ static void broadwell_init_clock_gating(struct drm_device *dev)
- 	misccpctl = I915_READ(GEN7_MISCCPCTL);
- 	I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
- 	I915_WRITE(GEN8_L3SQCREG1, BDW_WA_L3SQCREG1_DEFAULT);
-+	/*
-+	 * Wait at least 100 clocks before re-enabling clock gating. See
-+	 * the definition of L3SQCREG1 in BSpec.
-+	 */
-+	POSTING_READ(GEN8_L3SQCREG1);
-+	udelay(1);
- 	I915_WRITE(GEN7_MISCCPCTL, misccpctl);
- 
- 	/*
-diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
-index 40c6aff57256..549afa7bc75f 100644
---- a/drivers/gpu/drm/i915/intel_ringbuffer.c
-+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
-@@ -951,7 +951,7 @@ static int gen9_init_workarounds(struct intel_engine_cs *ring)
- 
- 	/* WaForceContextSaveRestoreNonCoherent:skl,bxt */
- 	tmp = HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT;
--	if (IS_SKL_REVID(dev, SKL_REVID_F0, SKL_REVID_F0) ||
-+	if (IS_SKL_REVID(dev, SKL_REVID_F0, REVID_FOREVER) ||
- 	    IS_BXT_REVID(dev, BXT_REVID_B0, REVID_FOREVER))
- 		tmp |= HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE;
- 	WA_SET_BIT_MASKED(HDC_CHICKEN0, tmp);
-@@ -1044,7 +1044,8 @@ static int skl_init_workarounds(struct intel_engine_cs *ring)
- 		WA_SET_BIT_MASKED(HIZ_CHICKEN,
- 				  BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE);
- 
--	if (IS_SKL_REVID(dev, 0, SKL_REVID_F0)) {
-+	/* This is tied to WaForceContextSaveRestoreNonCoherent */
-+	if (IS_SKL_REVID(dev, 0, REVID_FOREVER)) {
- 		/*
- 		 *Use Force Non-Coherent whenever executing a 3D context. This
- 		 * is a workaround for a possible hang in the unlikely event
-@@ -1901,6 +1902,17 @@ i915_dispatch_execbuffer(struct drm_i915_gem_request *req,
- 	return 0;
- }
- 
-+static void cleanup_phys_status_page(struct intel_engine_cs *ring)
-+{
-+	struct drm_i915_private *dev_priv = to_i915(ring->dev);
-+
-+	if (!dev_priv->status_page_dmah)
-+		return;
-+
-+	drm_pci_free(ring->dev, dev_priv->status_page_dmah);
-+	ring->status_page.page_addr = NULL;
-+}
-+
- static void cleanup_status_page(struct intel_engine_cs *ring)
- {
- 	struct drm_i915_gem_object *obj;
-@@ -1917,9 +1929,9 @@ static void cleanup_status_page(struct intel_engine_cs *ring)
- 
- static int init_status_page(struct intel_engine_cs *ring)
- {
--	struct drm_i915_gem_object *obj;
-+	struct drm_i915_gem_object *obj = ring->status_page.obj;
- 
--	if ((obj = ring->status_page.obj) == NULL) {
-+	if (obj == NULL) {
- 		unsigned flags;
- 		int ret;
- 
-@@ -2019,10 +2031,12 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
- {
- 	struct drm_i915_private *dev_priv = to_i915(dev);
- 	struct drm_i915_gem_object *obj = ringbuf->obj;
-+	/* Ring wraparound at offset 0 sometimes hangs. No idea why. */
-+	unsigned flags = PIN_OFFSET_BIAS | 4096;
- 	int ret;
- 
- 	if (HAS_LLC(dev_priv) && !obj->stolen) {
--		ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, 0);
-+		ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, flags);
- 		if (ret)
- 			return ret;
- 
-@@ -2038,7 +2052,8 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
- 			return -ENOMEM;
- 		}
- 	} else {
--		ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE);
-+		ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE,
-+					    flags | PIN_MAPPABLE);
- 		if (ret)
- 			return ret;
- 
-@@ -2164,7 +2179,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
- 		if (ret)
- 			goto error;
- 	} else {
--		BUG_ON(ring->id != RCS);
-+		WARN_ON(ring->id != RCS);
- 		ret = init_phys_status_page(ring);
- 		if (ret)
- 			goto error;
-@@ -2210,7 +2225,12 @@ void intel_cleanup_ring_buffer(struct intel_engine_cs *ring)
- 	if (ring->cleanup)
- 		ring->cleanup(ring);
- 
--	cleanup_status_page(ring);
-+	if (I915_NEED_GFX_HWS(ring->dev)) {
-+		cleanup_status_page(ring);
-+	} else {
-+		WARN_ON(ring->id != RCS);
-+		cleanup_phys_status_page(ring);
-+	}
- 
- 	i915_cmd_parser_fini_ring(ring);
- 	i915_gem_batch_pool_fini(&ring->batch_pool);
-@@ -2373,11 +2393,11 @@ static int __intel_ring_prepare(struct intel_engine_cs *ring, int bytes)
- 		if (unlikely(total_bytes > remain_usable)) {
- 			/*
- 			 * The base request will fit but the reserved space
--			 * falls off the end. So only need to to wait for the
--			 * reserved size after flushing out the remainder.
-+			 * falls off the end. So don't need an immediate wrap
-+			 * and only need to effectively wait for the reserved
-+			 * size space from the start of ringbuffer.
- 			 */
- 			wait_bytes = remain_actual + ringbuf->reserved_size;
--			need_wrap = true;
- 		} else if (total_bytes > ringbuf->space) {
- 			/* No wrapping required, just waiting. */
- 			wait_bytes = total_bytes;
-diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
-index 277e60ae0e47..08961f7d151c 100644
---- a/drivers/gpu/drm/i915/intel_uncore.c
-+++ b/drivers/gpu/drm/i915/intel_uncore.c
-@@ -1155,7 +1155,11 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev)
- 	} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
- 		dev_priv->uncore.funcs.force_wake_get =
- 			fw_domains_get_with_thread_status;
--		dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
-+		if (IS_HASWELL(dev))
-+			dev_priv->uncore.funcs.force_wake_put =
-+				fw_domains_put_with_fifo;
-+		else
-+			dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
- 		fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
- 			       FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
- 	} else if (IS_IVYBRIDGE(dev)) {
-diff --git a/drivers/gpu/drm/nouveau/nvkm/core/ramht.c b/drivers/gpu/drm/nouveau/nvkm/core/ramht.c
-index 3216e157a8a0..89da47234016 100644
---- a/drivers/gpu/drm/nouveau/nvkm/core/ramht.c
-+++ b/drivers/gpu/drm/nouveau/nvkm/core/ramht.c
-@@ -131,7 +131,7 @@ nvkm_ramht_del(struct nvkm_ramht **pramht)
- 	struct nvkm_ramht *ramht = *pramht;
- 	if (ramht) {
- 		nvkm_gpuobj_del(&ramht->gpuobj);
--		kfree(*pramht);
-+		vfree(*pramht);
- 		*pramht = NULL;
- 	}
- }
-@@ -143,8 +143,8 @@ nvkm_ramht_new(struct nvkm_device *device, u32 size, u32 align,
- 	struct nvkm_ramht *ramht;
- 	int ret, i;
- 
--	if (!(ramht = *pramht = kzalloc(sizeof(*ramht) + (size >> 3) *
--					sizeof(*ramht->data), GFP_KERNEL)))
-+	if (!(ramht = *pramht = vzalloc(sizeof(*ramht) +
-+					(size >> 3) * sizeof(*ramht->data))))
- 		return -ENOMEM;
- 
- 	ramht->device = device;
-diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
-index 1f81069edc58..332b5fe687fe 100644
---- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
-+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
-@@ -1807,6 +1807,8 @@ gf100_gr_init(struct gf100_gr *gr)
- 
- 	gf100_gr_mmio(gr, gr->func->mmio);
- 
-+	nvkm_mask(device, TPC_UNIT(0, 0, 0x05c), 0x00000001, 0x00000001);
-+
- 	memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
- 	for (i = 0, gpc = -1; i < gr->tpc_total; i++) {
- 		do {
-diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
-index 86276519b2ef..47e52647c9e5 100644
---- a/drivers/gpu/drm/qxl/qxl_display.c
-+++ b/drivers/gpu/drm/qxl/qxl_display.c
-@@ -375,10 +375,15 @@ static int qxl_crtc_cursor_set2(struct drm_crtc *crtc,
- 
- 	qxl_bo_kunmap(user_bo);
- 
-+	qcrtc->cur_x += qcrtc->hot_spot_x - hot_x;
-+	qcrtc->cur_y += qcrtc->hot_spot_y - hot_y;
-+	qcrtc->hot_spot_x = hot_x;
-+	qcrtc->hot_spot_y = hot_y;
-+
- 	cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release);
- 	cmd->type = QXL_CURSOR_SET;
--	cmd->u.set.position.x = qcrtc->cur_x;
--	cmd->u.set.position.y = qcrtc->cur_y;
-+	cmd->u.set.position.x = qcrtc->cur_x + qcrtc->hot_spot_x;
-+	cmd->u.set.position.y = qcrtc->cur_y + qcrtc->hot_spot_y;
- 
- 	cmd->u.set.shape = qxl_bo_physical_address(qdev, cursor_bo, 0);
- 
-@@ -441,8 +446,8 @@ static int qxl_crtc_cursor_move(struct drm_crtc *crtc,
- 
- 	cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release);
- 	cmd->type = QXL_CURSOR_MOVE;
--	cmd->u.position.x = qcrtc->cur_x;
--	cmd->u.position.y = qcrtc->cur_y;
-+	cmd->u.position.x = qcrtc->cur_x + qcrtc->hot_spot_x;
-+	cmd->u.position.y = qcrtc->cur_y + qcrtc->hot_spot_y;
- 	qxl_release_unmap(qdev, release, &cmd->release_info);
- 
- 	qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
-diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
-index 6e6b9b1519b8..3f3897eb458c 100644
---- a/drivers/gpu/drm/qxl/qxl_drv.h
-+++ b/drivers/gpu/drm/qxl/qxl_drv.h
-@@ -135,6 +135,8 @@ struct qxl_crtc {
- 	int index;
- 	int cur_x;
- 	int cur_y;
-+	int hot_spot_x;
-+	int hot_spot_y;
- };
- 
- struct qxl_output {
-diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
-index 801dd60ac192..7f52142d37d5 100644
---- a/drivers/gpu/drm/radeon/atombios_crtc.c
-+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
-@@ -1740,6 +1740,7 @@ static u32 radeon_get_pll_use_mask(struct drm_crtc *crtc)
- static int radeon_get_shared_dp_ppll(struct drm_crtc *crtc)
- {
- 	struct drm_device *dev = crtc->dev;
-+	struct radeon_device *rdev = dev->dev_private;
- 	struct drm_crtc *test_crtc;
- 	struct radeon_crtc *test_radeon_crtc;
- 
-@@ -1749,6 +1750,10 @@ static int radeon_get_shared_dp_ppll(struct drm_crtc *crtc)
- 		test_radeon_crtc = to_radeon_crtc(test_crtc);
- 		if (test_radeon_crtc->encoder &&
- 		    ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_radeon_crtc->encoder))) {
-+			/* PPLL2 is exclusive to UNIPHYA on DCE61 */
-+			if (ASIC_IS_DCE61(rdev) && !ASIC_IS_DCE8(rdev) &&
-+			    test_radeon_crtc->pll_id == ATOM_PPLL2)
-+				continue;
- 			/* for DP use the same PLL for all */
- 			if (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID)
- 				return test_radeon_crtc->pll_id;
-@@ -1770,6 +1775,7 @@ static int radeon_get_shared_nondp_ppll(struct drm_crtc *crtc)
- {
- 	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
- 	struct drm_device *dev = crtc->dev;
-+	struct radeon_device *rdev = dev->dev_private;
- 	struct drm_crtc *test_crtc;
- 	struct radeon_crtc *test_radeon_crtc;
- 	u32 adjusted_clock, test_adjusted_clock;
-@@ -1785,6 +1791,10 @@ static int radeon_get_shared_nondp_ppll(struct drm_crtc *crtc)
- 		test_radeon_crtc = to_radeon_crtc(test_crtc);
- 		if (test_radeon_crtc->encoder &&
- 		    !ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_radeon_crtc->encoder))) {
-+			/* PPLL2 is exclusive to UNIPHYA on DCE61 */
-+			if (ASIC_IS_DCE61(rdev) && !ASIC_IS_DCE8(rdev) &&
-+			    test_radeon_crtc->pll_id == ATOM_PPLL2)
-+				continue;
- 			/* check if we are already driving this connector with another crtc */
- 			if (test_radeon_crtc->connector == radeon_crtc->connector) {
- 				/* if we are, return that pll */
-diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
-index 6af832545bc5..b5760851195c 100644
---- a/drivers/gpu/drm/radeon/atombios_dp.c
-+++ b/drivers/gpu/drm/radeon/atombios_dp.c
-@@ -326,8 +326,8 @@ int radeon_dp_get_dp_link_config(struct drm_connector *connector,
- 			}
- 		}
- 	} else {
--		for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) {
--			for (i = 0; i < ARRAY_SIZE(link_rates) && link_rates[i] <= max_link_rate; i++) {
-+		for (i = 0; i < ARRAY_SIZE(link_rates) && link_rates[i] <= max_link_rate; i++) {
-+			for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) {
- 				max_pix_clock = (lane_num * link_rates[i] * 8) / bpp;
- 				if (max_pix_clock >= pix_clock) {
- 					*dp_lanes = lane_num;
-diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
-index 01b20e14a247..9de6503b10d8 100644
---- a/drivers/gpu/drm/radeon/atombios_encoders.c
-+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
-@@ -310,6 +310,10 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
- 	    && (mode->crtc_vsync_start < (mode->crtc_vdisplay + 2)))
- 		adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2;
- 
-+	/* vertical FP must be at least 1 */
-+	if (mode->crtc_vsync_start == mode->crtc_vdisplay)
-+		adjusted_mode->crtc_vsync_start++;
-+
- 	/* get the native mode for scaling */
- 	if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) {
- 		radeon_panel_mode_fixup(encoder, adjusted_mode);
-@@ -892,8 +896,6 @@ atombios_dig_encoder_setup2(struct drm_encoder *encoder, int action, int panel_m
- 			else
- 				args.v1.ucLaneNum = 4;
- 
--			if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode) && (dp_clock == 270000))
--				args.v1.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
- 			switch (radeon_encoder->encoder_id) {
- 			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
- 				args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER1;
-@@ -910,6 +912,10 @@ atombios_dig_encoder_setup2(struct drm_encoder *encoder, int action, int panel_m
- 				args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKB;
- 			else
- 				args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKA;
-+
-+			if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode) && (dp_clock == 270000))
-+				args.v1.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
-+
- 			break;
- 		case 2:
- 		case 3:
-diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
-index 2ad462896896..32491355a1d4 100644
---- a/drivers/gpu/drm/radeon/evergreen.c
-+++ b/drivers/gpu/drm/radeon/evergreen.c
-@@ -2608,10 +2608,152 @@ static void evergreen_agp_enable(struct radeon_device *rdev)
- 	WREG32(VM_CONTEXT1_CNTL, 0);
- }
- 
-+static const unsigned ni_dig_offsets[] =
-+{
-+	NI_DIG0_REGISTER_OFFSET,
-+	NI_DIG1_REGISTER_OFFSET,
-+	NI_DIG2_REGISTER_OFFSET,
-+	NI_DIG3_REGISTER_OFFSET,
-+	NI_DIG4_REGISTER_OFFSET,
-+	NI_DIG5_REGISTER_OFFSET
-+};
-+
-+static const unsigned ni_tx_offsets[] =
-+{
-+	NI_DCIO_UNIPHY0_UNIPHY_TX_CONTROL1,
-+	NI_DCIO_UNIPHY1_UNIPHY_TX_CONTROL1,
-+	NI_DCIO_UNIPHY2_UNIPHY_TX_CONTROL1,
-+	NI_DCIO_UNIPHY3_UNIPHY_TX_CONTROL1,
-+	NI_DCIO_UNIPHY4_UNIPHY_TX_CONTROL1,
-+	NI_DCIO_UNIPHY5_UNIPHY_TX_CONTROL1
-+};
-+
-+static const unsigned evergreen_dp_offsets[] =
-+{
-+	EVERGREEN_DP0_REGISTER_OFFSET,
-+	EVERGREEN_DP1_REGISTER_OFFSET,
-+	EVERGREEN_DP2_REGISTER_OFFSET,
-+	EVERGREEN_DP3_REGISTER_OFFSET,
-+	EVERGREEN_DP4_REGISTER_OFFSET,
-+	EVERGREEN_DP5_REGISTER_OFFSET
-+};
-+
-+
-+/*
-+ * Assumption is that EVERGREEN_CRTC_MASTER_EN enable for requested crtc
-+ * We go from crtc to connector and it is not relible  since it
-+ * should be an opposite direction .If crtc is enable then
-+ * find the dig_fe which selects this crtc and insure that it enable.
-+ * if such dig_fe is found then find dig_be which selects found dig_be and
-+ * insure that it enable and in DP_SST mode.
-+ * if UNIPHY_PLL_CONTROL1.enable then we should disconnect timing
-+ * from dp symbols clocks .
-+ */
-+static bool evergreen_is_dp_sst_stream_enabled(struct radeon_device *rdev,
-+					       unsigned crtc_id, unsigned *ret_dig_fe)
-+{
-+	unsigned i;
-+	unsigned dig_fe;
-+	unsigned dig_be;
-+	unsigned dig_en_be;
-+	unsigned uniphy_pll;
-+	unsigned digs_fe_selected;
-+	unsigned dig_be_mode;
-+	unsigned dig_fe_mask;
-+	bool is_enabled = false;
-+	bool found_crtc = false;
-+
-+	/* loop through all running dig_fe to find selected crtc */
-+	for (i = 0; i < ARRAY_SIZE(ni_dig_offsets); i++) {
-+		dig_fe = RREG32(NI_DIG_FE_CNTL + ni_dig_offsets[i]);
-+		if (dig_fe & NI_DIG_FE_CNTL_SYMCLK_FE_ON &&
-+		    crtc_id == NI_DIG_FE_CNTL_SOURCE_SELECT(dig_fe)) {
-+			/* found running pipe */
-+			found_crtc = true;
-+			dig_fe_mask = 1 << i;
-+			dig_fe = i;
-+			break;
-+		}
-+	}
-+
-+	if (found_crtc) {
-+		/* loop through all running dig_be to find selected dig_fe */
-+		for (i = 0; i < ARRAY_SIZE(ni_dig_offsets); i++) {
-+			dig_be = RREG32(NI_DIG_BE_CNTL + ni_dig_offsets[i]);
-+			/* if dig_fe_selected by dig_be? */
-+			digs_fe_selected = NI_DIG_BE_CNTL_FE_SOURCE_SELECT(dig_be);
-+			dig_be_mode = NI_DIG_FE_CNTL_MODE(dig_be);
-+			if (dig_fe_mask &  digs_fe_selected &&
-+			    /* if dig_be in sst mode? */
-+			    dig_be_mode == NI_DIG_BE_DPSST) {
-+				dig_en_be = RREG32(NI_DIG_BE_EN_CNTL +
-+						   ni_dig_offsets[i]);
-+				uniphy_pll = RREG32(NI_DCIO_UNIPHY0_PLL_CONTROL1 +
-+						    ni_tx_offsets[i]);
-+				/* dig_be enable and tx is running */
-+				if (dig_en_be & NI_DIG_BE_EN_CNTL_ENABLE &&
-+				    dig_en_be & NI_DIG_BE_EN_CNTL_SYMBCLK_ON &&
-+				    uniphy_pll & NI_DCIO_UNIPHY0_PLL_CONTROL1_ENABLE) {
-+					is_enabled = true;
-+					*ret_dig_fe = dig_fe;
-+					break;
-+				}
-+			}
-+		}
-+	}
-+
-+	return is_enabled;
-+}
-+
-+/*
-+ * Blank dig when in dp sst mode
-+ * Dig ignores crtc timing
-+ */
-+static void evergreen_blank_dp_output(struct radeon_device *rdev,
-+				      unsigned dig_fe)
-+{
-+	unsigned stream_ctrl;
-+	unsigned fifo_ctrl;
-+	unsigned counter = 0;
-+
-+	if (dig_fe >= ARRAY_SIZE(evergreen_dp_offsets)) {
-+		DRM_ERROR("invalid dig_fe %d\n", dig_fe);
-+		return;
-+	}
-+
-+	stream_ctrl = RREG32(EVERGREEN_DP_VID_STREAM_CNTL +
-+			     evergreen_dp_offsets[dig_fe]);
-+	if (!(stream_ctrl & EVERGREEN_DP_VID_STREAM_CNTL_ENABLE)) {
-+		DRM_ERROR("dig %d , should be enable\n", dig_fe);
-+		return;
-+	}
-+
-+	stream_ctrl &=~EVERGREEN_DP_VID_STREAM_CNTL_ENABLE;
-+	WREG32(EVERGREEN_DP_VID_STREAM_CNTL +
-+	       evergreen_dp_offsets[dig_fe], stream_ctrl);
-+
-+	stream_ctrl = RREG32(EVERGREEN_DP_VID_STREAM_CNTL +
-+			     evergreen_dp_offsets[dig_fe]);
-+	while (counter < 32 && stream_ctrl & EVERGREEN_DP_VID_STREAM_STATUS) {
-+		msleep(1);
-+		counter++;
-+		stream_ctrl = RREG32(EVERGREEN_DP_VID_STREAM_CNTL +
-+				     evergreen_dp_offsets[dig_fe]);
-+	}
-+	if (counter >= 32 )
-+		DRM_ERROR("counter exceeds %d\n", counter);
-+
-+	fifo_ctrl = RREG32(EVERGREEN_DP_STEER_FIFO + evergreen_dp_offsets[dig_fe]);
-+	fifo_ctrl |= EVERGREEN_DP_STEER_FIFO_RESET;
-+	WREG32(EVERGREEN_DP_STEER_FIFO + evergreen_dp_offsets[dig_fe], fifo_ctrl);
-+
-+}
-+
- void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save)
- {
- 	u32 crtc_enabled, tmp, frame_count, blackout;
- 	int i, j;
-+	unsigned dig_fe;
- 
- 	if (!ASIC_IS_NODCE(rdev)) {
- 		save->vga_render_control = RREG32(VGA_RENDER_CONTROL);
-@@ -2651,7 +2793,17 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
- 					break;
- 				udelay(1);
- 			}
--
-+			/*we should disable dig if it drives dp sst*/
-+			/*but we are in radeon_device_init and the topology is unknown*/
-+			/*and it is available after radeon_modeset_init*/
-+			/*the following method radeon_atom_encoder_dpms_dig*/
-+			/*does the job if we initialize it properly*/
-+			/*for now we do it this manually*/
-+			/**/
-+			if (ASIC_IS_DCE5(rdev) &&
-+			    evergreen_is_dp_sst_stream_enabled(rdev, i ,&dig_fe))
-+				evergreen_blank_dp_output(rdev, dig_fe);
-+			/*we could remove 6 lines below*/
- 			/* XXX this is a hack to avoid strange behavior with EFI on certain systems */
- 			WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
- 			tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
-diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h
-index aa939dfed3a3..b436badf9efa 100644
---- a/drivers/gpu/drm/radeon/evergreen_reg.h
-+++ b/drivers/gpu/drm/radeon/evergreen_reg.h
-@@ -250,8 +250,43 @@
- 
- /* HDMI blocks at 0x7030, 0x7c30, 0x10830, 0x11430, 0x12030, 0x12c30 */
- #define EVERGREEN_HDMI_BASE				0x7030
-+/*DIG block*/
-+#define NI_DIG0_REGISTER_OFFSET                 (0x7000  - 0x7000)
-+#define NI_DIG1_REGISTER_OFFSET                 (0x7C00  - 0x7000)
-+#define NI_DIG2_REGISTER_OFFSET                 (0x10800 - 0x7000)
-+#define NI_DIG3_REGISTER_OFFSET                 (0x11400 - 0x7000)
-+#define NI_DIG4_REGISTER_OFFSET                 (0x12000 - 0x7000)
-+#define NI_DIG5_REGISTER_OFFSET                 (0x12C00 - 0x7000)
-+
-+
-+#define NI_DIG_FE_CNTL                               0x7000
-+#       define NI_DIG_FE_CNTL_SOURCE_SELECT(x)        ((x) & 0x3)
-+#       define NI_DIG_FE_CNTL_SYMCLK_FE_ON            (1<<24)
-+
-+
-+#define NI_DIG_BE_CNTL                    0x7140
-+#       define NI_DIG_BE_CNTL_FE_SOURCE_SELECT(x)     (((x) >> 8 ) & 0x3F)
-+#       define NI_DIG_FE_CNTL_MODE(x)                 (((x) >> 16) & 0x7 )
-+
-+#define NI_DIG_BE_EN_CNTL                              0x7144
-+#       define NI_DIG_BE_EN_CNTL_ENABLE               (1 << 0)
-+#       define NI_DIG_BE_EN_CNTL_SYMBCLK_ON           (1 << 8)
-+#       define NI_DIG_BE_DPSST 0
- 
- /* Display Port block */
-+#define EVERGREEN_DP0_REGISTER_OFFSET                 (0x730C  - 0x730C)
-+#define EVERGREEN_DP1_REGISTER_OFFSET                 (0x7F0C  - 0x730C)
-+#define EVERGREEN_DP2_REGISTER_OFFSET                 (0x10B0C - 0x730C)
-+#define EVERGREEN_DP3_REGISTER_OFFSET                 (0x1170C - 0x730C)
-+#define EVERGREEN_DP4_REGISTER_OFFSET                 (0x1230C - 0x730C)
-+#define EVERGREEN_DP5_REGISTER_OFFSET                 (0x12F0C - 0x730C)
-+
-+
-+#define EVERGREEN_DP_VID_STREAM_CNTL                    0x730C
-+#       define EVERGREEN_DP_VID_STREAM_CNTL_ENABLE     (1 << 0)
-+#       define EVERGREEN_DP_VID_STREAM_STATUS          (1 <<16)
-+#define EVERGREEN_DP_STEER_FIFO                         0x7310
-+#       define EVERGREEN_DP_STEER_FIFO_RESET           (1 << 0)
- #define EVERGREEN_DP_SEC_CNTL                           0x7280
- #       define EVERGREEN_DP_SEC_STREAM_ENABLE           (1 << 0)
- #       define EVERGREEN_DP_SEC_ASP_ENABLE              (1 << 4)
-@@ -266,4 +301,15 @@
- #       define EVERGREEN_DP_SEC_N_BASE_MULTIPLE(x)      (((x) & 0xf) << 24)
- #       define EVERGREEN_DP_SEC_SS_EN                   (1 << 28)
- 
-+/*DCIO_UNIPHY block*/
-+#define NI_DCIO_UNIPHY0_UNIPHY_TX_CONTROL1            (0x6600  -0x6600)
-+#define NI_DCIO_UNIPHY1_UNIPHY_TX_CONTROL1            (0x6640  -0x6600)
-+#define NI_DCIO_UNIPHY2_UNIPHY_TX_CONTROL1            (0x6680 - 0x6600)
-+#define NI_DCIO_UNIPHY3_UNIPHY_TX_CONTROL1            (0x66C0 - 0x6600)
-+#define NI_DCIO_UNIPHY4_UNIPHY_TX_CONTROL1            (0x6700 - 0x6600)
-+#define NI_DCIO_UNIPHY5_UNIPHY_TX_CONTROL1            (0x6740 - 0x6600)
-+
-+#define NI_DCIO_UNIPHY0_PLL_CONTROL1                   0x6618
-+#       define NI_DCIO_UNIPHY0_PLL_CONTROL1_ENABLE     (1 << 0)
-+
- #endif
-diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
-index 340f3f549f29..9cfc1c3e1965 100644
---- a/drivers/gpu/drm/radeon/radeon_connectors.c
-+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
-@@ -1996,10 +1996,12 @@ radeon_add_atom_connector(struct drm_device *dev,
- 						   rdev->mode_info.dither_property,
- 						   RADEON_FMT_DITHER_DISABLE);
- 
--			if (radeon_audio != 0)
-+			if (radeon_audio != 0) {
- 				drm_object_attach_property(&radeon_connector->base.base,
- 							   rdev->mode_info.audio_property,
- 							   RADEON_AUDIO_AUTO);
-+				radeon_connector->audio = RADEON_AUDIO_AUTO;
-+			}
- 			if (ASIC_IS_DCE5(rdev))
- 				drm_object_attach_property(&radeon_connector->base.base,
- 							   rdev->mode_info.output_csc_property,
-@@ -2124,6 +2126,7 @@ radeon_add_atom_connector(struct drm_device *dev,
- 				drm_object_attach_property(&radeon_connector->base.base,
- 							   rdev->mode_info.audio_property,
- 							   RADEON_AUDIO_AUTO);
-+				radeon_connector->audio = RADEON_AUDIO_AUTO;
- 			}
- 			if (connector_type == DRM_MODE_CONNECTOR_DVII) {
- 				radeon_connector->dac_load_detect = true;
-@@ -2179,6 +2182,7 @@ radeon_add_atom_connector(struct drm_device *dev,
- 				drm_object_attach_property(&radeon_connector->base.base,
- 							   rdev->mode_info.audio_property,
- 							   RADEON_AUDIO_AUTO);
-+				radeon_connector->audio = RADEON_AUDIO_AUTO;
- 			}
- 			if (ASIC_IS_DCE5(rdev))
- 				drm_object_attach_property(&radeon_connector->base.base,
-@@ -2231,6 +2235,7 @@ radeon_add_atom_connector(struct drm_device *dev,
- 				drm_object_attach_property(&radeon_connector->base.base,
- 							   rdev->mode_info.audio_property,
- 							   RADEON_AUDIO_AUTO);
-+				radeon_connector->audio = RADEON_AUDIO_AUTO;
- 			}
- 			if (ASIC_IS_DCE5(rdev))
- 				drm_object_attach_property(&radeon_connector->base.base,
-diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
-index 2d9196a447fd..bfcef4db8138 100644
---- a/drivers/gpu/drm/radeon/radeon_display.c
-+++ b/drivers/gpu/drm/radeon/radeon_display.c
-@@ -1683,10 +1683,8 @@ int radeon_modeset_init(struct radeon_device *rdev)
- 	/* setup afmt */
- 	radeon_afmt_init(rdev);
- 
--	if (!list_empty(&rdev->ddev->mode_config.connector_list)) {
--		radeon_fbdev_init(rdev);
--		drm_kms_helper_poll_init(rdev->ddev);
--	}
-+	radeon_fbdev_init(rdev);
-+	drm_kms_helper_poll_init(rdev->ddev);
- 
- 	/* do pm late init */
- 	ret = radeon_pm_late_init(rdev);
-diff --git a/drivers/gpu/drm/radeon/radeon_dp_auxch.c b/drivers/gpu/drm/radeon/radeon_dp_auxch.c
-index 3b0c229d7dcd..db64e0062689 100644
---- a/drivers/gpu/drm/radeon/radeon_dp_auxch.c
-+++ b/drivers/gpu/drm/radeon/radeon_dp_auxch.c
-@@ -105,7 +105,7 @@ radeon_dp_aux_transfer_native(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg
- 
- 	tmp &= AUX_HPD_SEL(0x7);
- 	tmp |= AUX_HPD_SEL(chan->rec.hpd);
--	tmp |= AUX_EN | AUX_LS_READ_EN;
-+	tmp |= AUX_EN | AUX_LS_READ_EN | AUX_HPD_DISCON(0x1);
- 
- 	WREG32(AUX_CONTROL + aux_offset[instance], tmp);
- 
-diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c
-index df7a1719c841..9d210bbcab50 100644
---- a/drivers/gpu/drm/radeon/radeon_dp_mst.c
-+++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c
-@@ -525,17 +525,9 @@ static bool radeon_mst_mode_fixup(struct drm_encoder *encoder,
- 	drm_mode_set_crtcinfo(adjusted_mode, 0);
- 	{
- 	  struct radeon_connector_atom_dig *dig_connector;
--	  int ret;
--
- 	  dig_connector = mst_enc->connector->con_priv;
--	  ret = radeon_dp_get_dp_link_config(&mst_enc->connector->base,
--					     dig_connector->dpcd, adjusted_mode->clock,
--					     &dig_connector->dp_lane_count,
--					     &dig_connector->dp_clock);
--	  if (ret) {
--		  dig_connector->dp_lane_count = 0;
--		  dig_connector->dp_clock = 0;
--	  }
-+	  dig_connector->dp_lane_count = drm_dp_max_lane_count(dig_connector->dpcd);
-+	  dig_connector->dp_clock = drm_dp_max_link_rate(dig_connector->dpcd);
- 	  DRM_DEBUG_KMS("dig clock %p %d %d\n", dig_connector,
- 			dig_connector->dp_lane_count, dig_connector->dp_clock);
- 	}
-diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
-index d2e628eea53d..d179596334a7 100644
---- a/drivers/gpu/drm/radeon/radeon_fb.c
-+++ b/drivers/gpu/drm/radeon/radeon_fb.c
-@@ -292,7 +292,8 @@ out_unref:
- 
- void radeon_fb_output_poll_changed(struct radeon_device *rdev)
- {
--	drm_fb_helper_hotplug_event(&rdev->mode_info.rfbdev->helper);
-+	if (rdev->mode_info.rfbdev)
-+		drm_fb_helper_hotplug_event(&rdev->mode_info.rfbdev->helper);
- }
- 
- static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfbdev)
-@@ -325,6 +326,10 @@ int radeon_fbdev_init(struct radeon_device *rdev)
- 	int bpp_sel = 32;
- 	int ret;
- 
-+	/* don't enable fbdev if no connectors */
-+	if (list_empty(&rdev->ddev->mode_config.connector_list))
-+		return 0;
-+
- 	/* select 8 bpp console on RN50 or 16MB cards */
- 	if (ASIC_IS_RN50(rdev) || rdev->mc.real_vram_size <= (32*1024*1024))
- 		bpp_sel = 8;
-@@ -377,11 +382,15 @@ void radeon_fbdev_fini(struct radeon_device *rdev)
- 
- void radeon_fbdev_set_suspend(struct radeon_device *rdev, int state)
- {
--	fb_set_suspend(rdev->mode_info.rfbdev->helper.fbdev, state);
-+	if (rdev->mode_info.rfbdev)
-+		fb_set_suspend(rdev->mode_info.rfbdev->helper.fbdev, state);
- }
- 
- bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj)
- {
-+	if (!rdev->mode_info.rfbdev)
-+		return false;
-+
- 	if (robj == gem_to_radeon_bo(rdev->mode_info.rfbdev->rfb.obj))
- 		return true;
- 	return false;
-@@ -389,12 +398,14 @@ bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj)
- 
- void radeon_fb_add_connector(struct radeon_device *rdev, struct drm_connector *connector)
- {
--	drm_fb_helper_add_one_connector(&rdev->mode_info.rfbdev->helper, connector);
-+	if (rdev->mode_info.rfbdev)
-+		drm_fb_helper_add_one_connector(&rdev->mode_info.rfbdev->helper, connector);
- }
- 
- void radeon_fb_remove_connector(struct radeon_device *rdev, struct drm_connector *connector)
- {
--	drm_fb_helper_remove_one_connector(&rdev->mode_info.rfbdev->helper, connector);
-+	if (rdev->mode_info.rfbdev)
-+		drm_fb_helper_remove_one_connector(&rdev->mode_info.rfbdev->helper, connector);
- }
- 
- void radeon_fbdev_restore_mode(struct radeon_device *rdev)
-diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
-index e06ac546a90f..f342aad79cc6 100644
---- a/drivers/gpu/drm/radeon/radeon_ttm.c
-+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
-@@ -235,6 +235,8 @@ static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp)
- {
- 	struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo);
- 
-+	if (radeon_ttm_tt_has_userptr(bo->ttm))
-+		return -EPERM;
- 	return drm_vma_node_verify_access(&rbo->gem_base.vma_node, filp);
- }
- 
-diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
-index a82b891ae1fe..caa73de584a5 100644
---- a/drivers/gpu/drm/radeon/si_dpm.c
-+++ b/drivers/gpu/drm/radeon/si_dpm.c
-@@ -2926,9 +2926,12 @@ static struct si_dpm_quirk si_dpm_quirk_list[] = {
- 	/* PITCAIRN - https://bugs.freedesktop.org/show_bug.cgi?id=76490 */
- 	{ PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 },
- 	{ PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0xe271, 0, 120000 },
-+	{ PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0x2015, 0, 120000 },
- 	{ PCI_VENDOR_ID_ATI, 0x6810, 0x174b, 0xe271, 85000, 90000 },
- 	{ PCI_VENDOR_ID_ATI, 0x6811, 0x1462, 0x2015, 0, 120000 },
- 	{ PCI_VENDOR_ID_ATI, 0x6811, 0x1043, 0x2015, 0, 120000 },
-+	{ PCI_VENDOR_ID_ATI, 0x6811, 0x148c, 0x2015, 0, 120000 },
-+	{ PCI_VENDOR_ID_ATI, 0x6810, 0x1682, 0x9275, 0, 120000 },
- 	{ 0, 0, 0, 0 },
- };
- 
-@@ -3008,6 +3011,10 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
- 		}
- 		++p;
- 	}
-+	/* limit mclk on all R7 370 parts for stability */
-+	if (rdev->pdev->device == 0x6811 &&
-+	    rdev->pdev->revision == 0x81)
-+		max_mclk = 120000;
- 
- 	if (rps->vce_active) {
- 		rps->evclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].evclk;
-diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
-index 4cbf26555093..e3daafa1be13 100644
---- a/drivers/gpu/drm/ttm/ttm_bo.c
-+++ b/drivers/gpu/drm/ttm/ttm_bo.c
-@@ -230,22 +230,13 @@ EXPORT_SYMBOL(ttm_bo_del_sub_from_lru);
- 
- void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo)
- {
--	struct ttm_bo_device *bdev = bo->bdev;
--	struct ttm_mem_type_manager *man;
-+	int put_count = 0;
- 
- 	lockdep_assert_held(&bo->resv->lock.base);
- 
--	if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) {
--		list_del_init(&bo->swap);
--		list_del_init(&bo->lru);
--
--	} else {
--		if (bo->ttm && !(bo->ttm->page_flags & TTM_PAGE_FLAG_SG))
--			list_move_tail(&bo->swap, &bo->glob->swap_lru);
--
--		man = &bdev->man[bo->mem.mem_type];
--		list_move_tail(&bo->lru, &man->lru);
--	}
-+	put_count = ttm_bo_del_from_lru(bo);
-+	ttm_bo_list_ref_sub(bo, put_count, true);
-+	ttm_bo_add_to_lru(bo);
- }
- EXPORT_SYMBOL(ttm_bo_move_to_lru_tail);
- 
-diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
-index 200419d4d43c..18a2acbccb7d 100644
---- a/drivers/gpu/drm/udl/udl_fb.c
-+++ b/drivers/gpu/drm/udl/udl_fb.c
-@@ -538,7 +538,7 @@ static int udlfb_create(struct drm_fb_helper *helper,
- out_destroy_fbi:
- 	drm_fb_helper_release_fbi(helper);
- out_gfree:
--	drm_gem_object_unreference(&ufbdev->ufb.obj->base);
-+	drm_gem_object_unreference_unlocked(&ufbdev->ufb.obj->base);
- out:
- 	return ret;
- }
-diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
-index 2a0a784ab6ee..d7528e0d8442 100644
---- a/drivers/gpu/drm/udl/udl_gem.c
-+++ b/drivers/gpu/drm/udl/udl_gem.c
-@@ -52,7 +52,7 @@ udl_gem_create(struct drm_file *file,
- 		return ret;
- 	}
- 
--	drm_gem_object_unreference(&obj->base);
-+	drm_gem_object_unreference_unlocked(&obj->base);
- 	*handle_p = handle;
- 	return 0;
- }
-diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c
-index 22278bcfc60e..ac8eafea6361 100644
---- a/drivers/gpu/drm/vc4/vc4_bo.c
-+++ b/drivers/gpu/drm/vc4/vc4_bo.c
-@@ -499,11 +499,12 @@ vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data,
- 	if (IS_ERR(bo))
- 		return PTR_ERR(bo);
- 
--	ret = copy_from_user(bo->base.vaddr,
-+	if (copy_from_user(bo->base.vaddr,
- 			     (void __user *)(uintptr_t)args->data,
--			     args->size);
--	if (ret != 0)
-+			     args->size)) {
-+		ret = -EFAULT;
- 		goto fail;
-+	}
- 	/* Clear the rest of the memory from allocating from the BO
- 	 * cache.
- 	 */
-diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c
-index e00db3f510dd..abb98c77bad2 100644
---- a/drivers/gpu/ipu-v3/ipu-common.c
-+++ b/drivers/gpu/ipu-v3/ipu-common.c
-@@ -1068,7 +1068,6 @@ static int ipu_add_client_devices(struct ipu_soc *ipu, unsigned long ipu_base)
- 			goto err_register;
- 		}
- 
--		pdev->dev.of_node = of_node;
- 		pdev->dev.parent = dev;
- 
- 		ret = platform_device_add_data(pdev, &reg->pdata,
-@@ -1079,6 +1078,12 @@ static int ipu_add_client_devices(struct ipu_soc *ipu, unsigned long ipu_base)
- 			platform_device_put(pdev);
- 			goto err_register;
- 		}
-+
-+		/*
-+		 * Set of_node only after calling platform_device_add. Otherwise
-+		 * the platform:imx-ipuv3-crtc modalias won't be used.
-+		 */
-+		pdev->dev.of_node = of_node;
- 	}
- 
- 	return 0;
-diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
-index 7e89288b1537..99446ffd71fb 100644
---- a/drivers/hid/hid-core.c
-+++ b/drivers/hid/hid-core.c
-@@ -1891,6 +1891,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
- 	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_ELITE_KBD) },
- 	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_CORDLESS_DESKTOP_LX500) },
- 	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_EXTREME_3D) },
-+	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DUAL_ACTION) },
- 	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WHEEL) },
- 	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD_CORD) },
- 	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD) },
-@@ -2615,9 +2616,10 @@ int hid_add_device(struct hid_device *hdev)
- 	/*
- 	 * Scan generic devices for group information
- 	 */
--	if (hid_ignore_special_drivers ||
--	    (!hdev->group &&
--	     !hid_match_id(hdev, hid_have_special_driver))) {
-+	if (hid_ignore_special_drivers) {
-+		hdev->group = HID_GROUP_GENERIC;
-+	} else if (!hdev->group &&
-+		   !hid_match_id(hdev, hid_have_special_driver)) {
- 		ret = hid_scan_report(hdev);
- 		if (ret)
- 			hid_warn(hdev, "bad device descriptor (%d)\n", ret);
-diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
-index b6ff6e78ac54..14c14c82795c 100644
---- a/drivers/hid/hid-ids.h
-+++ b/drivers/hid/hid-ids.h
-@@ -255,6 +255,7 @@
- #define USB_DEVICE_ID_CORSAIR_K90	0x1b02
- 
- #define USB_VENDOR_ID_CREATIVELABS	0x041e
-+#define USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51	0x322c
- #define USB_DEVICE_ID_PRODIKEYS_PCMIDI	0x2801
- 
- #define USB_VENDOR_ID_CVTOUCH		0x1ff7
-diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
-index 296d4991560e..a20fc604ffd8 100644
---- a/drivers/hid/hid-multitouch.c
-+++ b/drivers/hid/hid-multitouch.c
-@@ -396,6 +396,11 @@ static void mt_feature_mapping(struct hid_device *hdev,
- 			td->is_buttonpad = true;
- 
- 		break;
-+	case 0xff0000c5:
-+		/* Retrieve the Win8 blob once to enable some devices */
-+		if (usage->usage_index == 0)
-+			mt_get_feature(hdev, field->report);
-+		break;
- 	}
- }
- 
-diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
-index b9216938a718..bb897497f008 100644
---- a/drivers/hid/i2c-hid/i2c-hid.c
-+++ b/drivers/hid/i2c-hid/i2c-hid.c
-@@ -283,17 +283,21 @@ static int i2c_hid_set_or_send_report(struct i2c_client *client, u8 reportType,
- 	u16 dataRegister = le16_to_cpu(ihid->hdesc.wDataRegister);
- 	u16 outputRegister = le16_to_cpu(ihid->hdesc.wOutputRegister);
- 	u16 maxOutputLength = le16_to_cpu(ihid->hdesc.wMaxOutputLength);
-+	u16 size;
-+	int args_len;
-+	int index = 0;
-+
-+	i2c_hid_dbg(ihid, "%s\n", __func__);
-+
-+	if (data_len > ihid->bufsize)
-+		return -EINVAL;
- 
--	/* hid_hw_* already checked that data_len < HID_MAX_BUFFER_SIZE */
--	u16 size =	2			/* size */ +
-+	size =		2			/* size */ +
- 			(reportID ? 1 : 0)	/* reportID */ +
- 			data_len		/* buf */;
--	int args_len =	(reportID >= 0x0F ? 1 : 0) /* optional third byte */ +
-+	args_len =	(reportID >= 0x0F ? 1 : 0) /* optional third byte */ +
- 			2			/* dataRegister */ +
- 			size			/* args */;
--	int index = 0;
--
--	i2c_hid_dbg(ihid, "%s\n", __func__);
- 
- 	if (!use_data && maxOutputLength == 0)
- 		return -ENOSYS;
-diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
-index ad71160b9ea4..ae83af649a60 100644
---- a/drivers/hid/usbhid/hid-core.c
-+++ b/drivers/hid/usbhid/hid-core.c
-@@ -951,14 +951,6 @@ static int usbhid_output_report(struct hid_device *hid, __u8 *buf, size_t count)
- 	return ret;
- }
- 
--static void usbhid_restart_queues(struct usbhid_device *usbhid)
--{
--	if (usbhid->urbout && !test_bit(HID_OUT_RUNNING, &usbhid->iofl))
--		usbhid_restart_out_queue(usbhid);
--	if (!test_bit(HID_CTRL_RUNNING, &usbhid->iofl))
--		usbhid_restart_ctrl_queue(usbhid);
--}
--
- static void hid_free_buffers(struct usb_device *dev, struct hid_device *hid)
- {
- 	struct usbhid_device *usbhid = hid->driver_data;
-@@ -1404,6 +1396,37 @@ static void hid_cease_io(struct usbhid_device *usbhid)
- 	usb_kill_urb(usbhid->urbout);
- }
- 
-+static void hid_restart_io(struct hid_device *hid)
-+{
-+	struct usbhid_device *usbhid = hid->driver_data;
-+	int clear_halt = test_bit(HID_CLEAR_HALT, &usbhid->iofl);
-+	int reset_pending = test_bit(HID_RESET_PENDING, &usbhid->iofl);
-+
-+	spin_lock_irq(&usbhid->lock);
-+	clear_bit(HID_SUSPENDED, &usbhid->iofl);
-+	usbhid_mark_busy(usbhid);
-+
-+	if (clear_halt || reset_pending)
-+		schedule_work(&usbhid->reset_work);
-+	usbhid->retry_delay = 0;
-+	spin_unlock_irq(&usbhid->lock);
-+
-+	if (reset_pending || !test_bit(HID_STARTED, &usbhid->iofl))
-+		return;
-+
-+	if (!clear_halt) {
-+		if (hid_start_in(hid) < 0)
-+			hid_io_error(hid);
-+	}
-+
-+	spin_lock_irq(&usbhid->lock);
-+	if (usbhid->urbout && !test_bit(HID_OUT_RUNNING, &usbhid->iofl))
-+		usbhid_restart_out_queue(usbhid);
-+	if (!test_bit(HID_CTRL_RUNNING, &usbhid->iofl))
-+		usbhid_restart_ctrl_queue(usbhid);
-+	spin_unlock_irq(&usbhid->lock);
-+}
-+
- /* Treat USB reset pretty much the same as suspend/resume */
- static int hid_pre_reset(struct usb_interface *intf)
- {
-@@ -1453,14 +1476,14 @@ static int hid_post_reset(struct usb_interface *intf)
- 		return 1;
- 	}
- 
-+	/* No need to do another reset or clear a halted endpoint */
- 	spin_lock_irq(&usbhid->lock);
- 	clear_bit(HID_RESET_PENDING, &usbhid->iofl);
-+	clear_bit(HID_CLEAR_HALT, &usbhid->iofl);
- 	spin_unlock_irq(&usbhid->lock);
- 	hid_set_idle(dev, intf->cur_altsetting->desc.bInterfaceNumber, 0, 0);
--	status = hid_start_in(hid);
--	if (status < 0)
--		hid_io_error(hid);
--	usbhid_restart_queues(usbhid);
-+
-+	hid_restart_io(hid);
- 
- 	return 0;
- }
-@@ -1483,25 +1506,9 @@ void usbhid_put_power(struct hid_device *hid)
- #ifdef CONFIG_PM
- static int hid_resume_common(struct hid_device *hid, bool driver_suspended)
- {
--	struct usbhid_device *usbhid = hid->driver_data;
--	int status;
--
--	spin_lock_irq(&usbhid->lock);
--	clear_bit(HID_SUSPENDED, &usbhid->iofl);
--	usbhid_mark_busy(usbhid);
--
--	if (test_bit(HID_CLEAR_HALT, &usbhid->iofl) ||
--			test_bit(HID_RESET_PENDING, &usbhid->iofl))
--		schedule_work(&usbhid->reset_work);
--	usbhid->retry_delay = 0;
--
--	usbhid_restart_queues(usbhid);
--	spin_unlock_irq(&usbhid->lock);
--
--	status = hid_start_in(hid);
--	if (status < 0)
--		hid_io_error(hid);
-+	int status = 0;
- 
-+	hid_restart_io(hid);
- 	if (driver_suspended && hid->driver && hid->driver->resume)
- 		status = hid->driver->resume(hid);
- 	return status;
-@@ -1570,12 +1577,8 @@ static int hid_suspend(struct usb_interface *intf, pm_message_t message)
- static int hid_resume(struct usb_interface *intf)
- {
- 	struct hid_device *hid = usb_get_intfdata (intf);
--	struct usbhid_device *usbhid = hid->driver_data;
- 	int status;
- 
--	if (!test_bit(HID_STARTED, &usbhid->iofl))
--		return 0;
--
- 	status = hid_resume_common(hid, true);
- 	dev_dbg(&intf->dev, "resume status %d\n", status);
- 	return 0;
-@@ -1584,10 +1587,8 @@ static int hid_resume(struct usb_interface *intf)
- static int hid_reset_resume(struct usb_interface *intf)
- {
- 	struct hid_device *hid = usb_get_intfdata(intf);
--	struct usbhid_device *usbhid = hid->driver_data;
- 	int status;
- 
--	clear_bit(HID_SUSPENDED, &usbhid->iofl);
- 	status = hid_post_reset(intf);
- 	if (status >= 0 && hid->driver && hid->driver->reset_resume) {
- 		int ret = hid->driver->reset_resume(hid);
-diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
-index 7dd0953cd70f..dc8e6adf95a4 100644
---- a/drivers/hid/usbhid/hid-quirks.c
-+++ b/drivers/hid/usbhid/hid-quirks.c
-@@ -70,6 +70,7 @@ static const struct hid_blacklist {
- 	{ USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_3AXIS_5BUTTON_STICK, HID_QUIRK_NOGET },
- 	{ USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_AXIS_295, HID_QUIRK_NOGET },
- 	{ USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_PIXART_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL },
-+	{ USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51, HID_QUIRK_NOGET },
- 	{ USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET },
- 	{ USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_WIIU, HID_QUIRK_MULTI_INPUT },
- 	{ USB_VENDOR_ID_ELAN, HID_ANY_ID, HID_QUIRK_ALWAYS_POLL },
-diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
-index 99ef77fcfb80..f71187aad0d0 100644
---- a/drivers/hid/wacom_wac.c
-+++ b/drivers/hid/wacom_wac.c
-@@ -2409,6 +2409,17 @@ void wacom_setup_device_quirks(struct wacom *wacom)
- 	}
- 
- 	/*
-+	 * Hack for the Bamboo One:
-+	 * the device presents a PAD/Touch interface as most Bamboos and even
-+	 * sends ghosts PAD data on it. However, later, we must disable this
-+	 * ghost interface, and we can not detect it unless we set it here
-+	 * to WACOM_DEVICETYPE_PAD or WACOM_DEVICETYPE_TOUCH.
-+	 */
-+	if (features->type == BAMBOO_PEN &&
-+	    features->pktlen == WACOM_PKGLEN_BBTOUCH3)
-+		features->device_type |= WACOM_DEVICETYPE_PAD;
-+
-+	/*
- 	 * Raw Wacom-mode pen and touch events both come from interface
- 	 * 0, whose HID descriptor has an application usage of 0xFF0D
- 	 * (i.e., WACOM_VENDORDEFINED_PEN). We route pen packets back
-@@ -3367,6 +3378,10 @@ static const struct wacom_features wacom_features_0x33E =
- 	{ "Wacom Intuos PT M 2", 21600, 13500, 2047, 63,
- 	  INTUOSHT2, WACOM_INTUOS_RES, WACOM_INTUOS_RES, .touch_max = 16,
- 	  .check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE };
-+static const struct wacom_features wacom_features_0x343 =
-+	{ "Wacom DTK1651", 34616, 19559, 1023, 0,
-+	  DTUS, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 4,
-+	  WACOM_DTU_OFFSET, WACOM_DTU_OFFSET };
- 
- static const struct wacom_features wacom_features_HID_ANY_ID =
- 	{ "Wacom HID", .type = HID_GENERIC };
-@@ -3532,6 +3547,7 @@ const struct hid_device_id wacom_ids[] = {
- 	{ USB_DEVICE_WACOM(0x33C) },
- 	{ USB_DEVICE_WACOM(0x33D) },
- 	{ USB_DEVICE_WACOM(0x33E) },
-+	{ USB_DEVICE_WACOM(0x343) },
- 	{ USB_DEVICE_WACOM(0x4001) },
- 	{ USB_DEVICE_WACOM(0x4004) },
- 	{ USB_DEVICE_WACOM(0x5000) },
-diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
-index b53702ce692f..e35560b955b1 100644
---- a/drivers/hv/ring_buffer.c
-+++ b/drivers/hv/ring_buffer.c
-@@ -103,15 +103,29 @@ static bool hv_need_to_signal(u32 old_write, struct hv_ring_buffer_info *rbi)
-  *    there is room for the producer to send the pending packet.
-  */
- 
--static bool hv_need_to_signal_on_read(u32 prev_write_sz,
--				      struct hv_ring_buffer_info *rbi)
-+static bool hv_need_to_signal_on_read(struct hv_ring_buffer_info *rbi)
- {
- 	u32 cur_write_sz;
- 	u32 r_size;
--	u32 write_loc = rbi->ring_buffer->write_index;
-+	u32 write_loc;
- 	u32 read_loc = rbi->ring_buffer->read_index;
--	u32 pending_sz = rbi->ring_buffer->pending_send_sz;
-+	u32 pending_sz;
- 
-+	/*
-+	 * Issue a full memory barrier before making the signaling decision.
-+	 * Here is the reason for having this barrier:
-+	 * If the reading of the pend_sz (in this function)
-+	 * were to be reordered and read before we commit the new read
-+	 * index (in the calling function)  we could
-+	 * have a problem. If the host were to set the pending_sz after we
-+	 * have sampled pending_sz and go to sleep before we commit the
-+	 * read index, we could miss sending the interrupt. Issue a full
-+	 * memory barrier to address this.
-+	 */
-+	mb();
-+
-+	pending_sz = rbi->ring_buffer->pending_send_sz;
-+	write_loc = rbi->ring_buffer->write_index;
- 	/* If the other end is not blocked on write don't bother. */
- 	if (pending_sz == 0)
- 		return false;
-@@ -120,7 +134,7 @@ static bool hv_need_to_signal_on_read(u32 prev_write_sz,
- 	cur_write_sz = write_loc >= read_loc ? r_size - (write_loc - read_loc) :
- 			read_loc - write_loc;
- 
--	if ((prev_write_sz < pending_sz) && (cur_write_sz >= pending_sz))
-+	if (cur_write_sz >= pending_sz)
- 		return true;
- 
- 	return false;
-@@ -458,7 +472,7 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info,
- 	/* Update the read index */
- 	hv_set_next_read_location(inring_info, next_read_location);
- 
--	*signal = hv_need_to_signal_on_read(bytes_avail_towrite, inring_info);
-+	*signal = hv_need_to_signal_on_read(inring_info);
- 
- out_unlock:
- 	spin_unlock_irqrestore(&inring_info->ring_lock, flags);
-diff --git a/drivers/hwmon/max1111.c b/drivers/hwmon/max1111.c
-index 36544c4f653c..303d0c9df907 100644
---- a/drivers/hwmon/max1111.c
-+++ b/drivers/hwmon/max1111.c
-@@ -85,6 +85,9 @@ static struct max1111_data *the_max1111;
- 
- int max1111_read_channel(int channel)
- {
-+	if (!the_max1111 || !the_max1111->spi)
-+		return -ENODEV;
-+
- 	return max1111_read(&the_max1111->spi->dev, channel);
- }
- EXPORT_SYMBOL(max1111_read_channel);
-@@ -258,6 +261,9 @@ static int max1111_remove(struct spi_device *spi)
- {
- 	struct max1111_data *data = spi_get_drvdata(spi);
- 
-+#ifdef CONFIG_SHARPSL_PM
-+	the_max1111 = NULL;
-+#endif
- 	hwmon_device_unregister(data->hwmon_dev);
- 	sysfs_remove_group(&spi->dev.kobj, &max1110_attr_group);
- 	sysfs_remove_group(&spi->dev.kobj, &max1111_attr_group);
-diff --git a/drivers/hwtracing/stm/Kconfig b/drivers/hwtracing/stm/Kconfig
-index 83e9f591a54b..e7a348807f0c 100644
---- a/drivers/hwtracing/stm/Kconfig
-+++ b/drivers/hwtracing/stm/Kconfig
-@@ -1,6 +1,7 @@
- config STM
- 	tristate "System Trace Module devices"
- 	select CONFIGFS_FS
-+	select SRCU
- 	help
- 	  A System Trace Module (STM) is a device exporting data in System
- 	  Trace Protocol (STP) format as defined by MIPI STP standards.
-diff --git a/drivers/i2c/busses/i2c-cpm.c b/drivers/i2c/busses/i2c-cpm.c
-index 714bdc837769..b167ab25310a 100644
---- a/drivers/i2c/busses/i2c-cpm.c
-+++ b/drivers/i2c/busses/i2c-cpm.c
-@@ -116,8 +116,8 @@ struct cpm_i2c {
- 	cbd_t __iomem *rbase;
- 	u_char *txbuf[CPM_MAXBD];
- 	u_char *rxbuf[CPM_MAXBD];
--	u32 txdma[CPM_MAXBD];
--	u32 rxdma[CPM_MAXBD];
-+	dma_addr_t txdma[CPM_MAXBD];
-+	dma_addr_t rxdma[CPM_MAXBD];
- };
- 
- static irqreturn_t cpm_i2c_interrupt(int irq, void *dev_id)
-diff --git a/drivers/i2c/busses/i2c-exynos5.c b/drivers/i2c/busses/i2c-exynos5.c
-index b29c7500461a..f54ece8fce78 100644
---- a/drivers/i2c/busses/i2c-exynos5.c
-+++ b/drivers/i2c/busses/i2c-exynos5.c
-@@ -671,7 +671,9 @@ static int exynos5_i2c_xfer(struct i2c_adapter *adap,
- 		return -EIO;
- 	}
- 
--	clk_prepare_enable(i2c->clk);
-+	ret = clk_enable(i2c->clk);
-+	if (ret)
-+		return ret;
- 
- 	for (i = 0; i < num; i++, msgs++) {
- 		stop = (i == num - 1);
-@@ -695,7 +697,7 @@ static int exynos5_i2c_xfer(struct i2c_adapter *adap,
- 	}
- 
-  out:
--	clk_disable_unprepare(i2c->clk);
-+	clk_disable(i2c->clk);
- 	return ret;
- }
- 
-@@ -747,7 +749,9 @@ static int exynos5_i2c_probe(struct platform_device *pdev)
- 		return -ENOENT;
- 	}
- 
--	clk_prepare_enable(i2c->clk);
-+	ret = clk_prepare_enable(i2c->clk);
-+	if (ret)
-+		return ret;
- 
- 	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- 	i2c->regs = devm_ioremap_resource(&pdev->dev, mem);
-@@ -799,6 +803,10 @@ static int exynos5_i2c_probe(struct platform_device *pdev)
- 
- 	platform_set_drvdata(pdev, i2c);
- 
-+	clk_disable(i2c->clk);
-+
-+	return 0;
-+
-  err_clk:
- 	clk_disable_unprepare(i2c->clk);
- 	return ret;
-@@ -810,6 +818,8 @@ static int exynos5_i2c_remove(struct platform_device *pdev)
- 
- 	i2c_del_adapter(&i2c->adap);
- 
-+	clk_unprepare(i2c->clk);
-+
- 	return 0;
- }
- 
-@@ -821,6 +831,8 @@ static int exynos5_i2c_suspend_noirq(struct device *dev)
- 
- 	i2c->suspended = 1;
- 
-+	clk_unprepare(i2c->clk);
-+
- 	return 0;
- }
- 
-@@ -830,7 +842,9 @@ static int exynos5_i2c_resume_noirq(struct device *dev)
- 	struct exynos5_i2c *i2c = platform_get_drvdata(pdev);
- 	int ret = 0;
- 
--	clk_prepare_enable(i2c->clk);
-+	ret = clk_prepare_enable(i2c->clk);
-+	if (ret)
-+		return ret;
- 
- 	ret = exynos5_hsi2c_clock_setup(i2c);
- 	if (ret) {
-@@ -839,7 +853,7 @@ static int exynos5_i2c_resume_noirq(struct device *dev)
- 	}
- 
- 	exynos5_i2c_init(i2c);
--	clk_disable_unprepare(i2c->clk);
-+	clk_disable(i2c->clk);
- 	i2c->suspended = 0;
- 
- 	return 0;
-diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
-index cd4510a63375..146eed70bdf4 100644
---- a/drivers/idle/intel_idle.c
-+++ b/drivers/idle/intel_idle.c
-@@ -65,7 +65,7 @@
- #include <asm/mwait.h>
- #include <asm/msr.h>
- 
--#define INTEL_IDLE_VERSION "0.4"
-+#define INTEL_IDLE_VERSION "0.4.1"
- #define PREFIX "intel_idle: "
- 
- static struct cpuidle_driver intel_idle_driver = {
-@@ -994,36 +994,92 @@ static void intel_idle_cpuidle_devices_uninit(void)
- }
- 
- /*
-- * intel_idle_state_table_update()
-- *
-- * Update the default state_table for this CPU-id
-+ * ivt_idle_state_table_update(void)
-  *
-- * Currently used to access tuned IVT multi-socket targets
-+ * Tune IVT multi-socket targets
-  * Assumption: num_sockets == (max_package_num + 1)
-  */
--void intel_idle_state_table_update(void)
-+static void ivt_idle_state_table_update(void)
- {
- 	/* IVT uses a different table for 1-2, 3-4, and > 4 sockets */
--	if (boot_cpu_data.x86_model == 0x3e) { /* IVT */
--		int cpu, package_num, num_sockets = 1;
--
--		for_each_online_cpu(cpu) {
--			package_num = topology_physical_package_id(cpu);
--			if (package_num + 1 > num_sockets) {
--				num_sockets = package_num + 1;
--
--				if (num_sockets > 4) {
--					cpuidle_state_table = ivt_cstates_8s;
--					return;
--				}
-+	int cpu, package_num, num_sockets = 1;
-+
-+	for_each_online_cpu(cpu) {
-+		package_num = topology_physical_package_id(cpu);
-+		if (package_num + 1 > num_sockets) {
-+			num_sockets = package_num + 1;
-+
-+			if (num_sockets > 4) {
-+				cpuidle_state_table = ivt_cstates_8s;
-+				return;
- 			}
- 		}
-+	}
-+
-+	if (num_sockets > 2)
-+		cpuidle_state_table = ivt_cstates_4s;
-+
-+	/* else, 1 and 2 socket systems use default ivt_cstates */
-+}
-+/*
-+ * sklh_idle_state_table_update(void)
-+ *
-+ * On SKL-H (model 0x5e) disable C8 and C9 if:
-+ * C10 is enabled and SGX disabled
-+ */
-+static void sklh_idle_state_table_update(void)
-+{
-+	unsigned long long msr;
-+	unsigned int eax, ebx, ecx, edx;
-+
-+
-+	/* if PC10 disabled via cmdline intel_idle.max_cstate=7 or shallower */
-+	if (max_cstate <= 7)
-+		return;
-+
-+	/* if PC10 not present in CPUID.MWAIT.EDX */
-+	if ((mwait_substates & (0xF << 28)) == 0)
-+		return;
-+
-+	rdmsrl(MSR_NHM_SNB_PKG_CST_CFG_CTL, msr);
-+
-+	/* PC10 is not enabled in PKG C-state limit */
-+	if ((msr & 0xF) != 8)
-+		return;
-+
-+	ecx = 0;
-+	cpuid(7, &eax, &ebx, &ecx, &edx);
-+
-+	/* if SGX is present */
-+	if (ebx & (1 << 2)) {
- 
--		if (num_sockets > 2)
--			cpuidle_state_table = ivt_cstates_4s;
--		/* else, 1 and 2 socket systems use default ivt_cstates */
-+		rdmsrl(MSR_IA32_FEATURE_CONTROL, msr);
-+
-+		/* if SGX is enabled */
-+		if (msr & (1 << 18))
-+			return;
-+	}
-+
-+	skl_cstates[5].disabled = 1;	/* C8-SKL */
-+	skl_cstates[6].disabled = 1;	/* C9-SKL */
-+}
-+/*
-+ * intel_idle_state_table_update()
-+ *
-+ * Update the default state_table for this CPU-id
-+ */
-+
-+static void intel_idle_state_table_update(void)
-+{
-+	switch (boot_cpu_data.x86_model) {
-+
-+	case 0x3e: /* IVT */
-+		ivt_idle_state_table_update();
-+		break;
-+	case 0x5e: /* SKL-H */
-+		sklh_idle_state_table_update();
-+		break;
- 	}
--	return;
- }
- 
- /*
-@@ -1063,6 +1119,14 @@ static int __init intel_idle_cpuidle_driver_init(void)
- 		if (num_substates == 0)
- 			continue;
- 
-+		/* if state marked as disabled, skip it */
-+		if (cpuidle_state_table[cstate].disabled != 0) {
-+			pr_debug(PREFIX "state %s is disabled",
-+				cpuidle_state_table[cstate].name);
-+			continue;
-+		}
-+
-+
- 		if (((mwait_cstate + 1) > 2) &&
- 			!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
- 			mark_tsc_unstable("TSC halts in idle"
-diff --git a/drivers/iio/accel/bmc150-accel-core.c b/drivers/iio/accel/bmc150-accel-core.c
-index c73331f7782b..2072a31e813b 100644
---- a/drivers/iio/accel/bmc150-accel-core.c
-+++ b/drivers/iio/accel/bmc150-accel-core.c
-@@ -547,7 +547,7 @@ static int bmc150_accel_get_axis(struct bmc150_accel_data *data,
- {
- 	int ret;
- 	int axis = chan->scan_index;
--	unsigned int raw_val;
-+	__le16 raw_val;
- 
- 	mutex_lock(&data->mutex);
- 	ret = bmc150_accel_set_power_state(data, true);
-@@ -557,14 +557,14 @@ static int bmc150_accel_get_axis(struct bmc150_accel_data *data,
- 	}
- 
- 	ret = regmap_bulk_read(data->regmap, BMC150_ACCEL_AXIS_TO_REG(axis),
--			       &raw_val, 2);
-+			       &raw_val, sizeof(raw_val));
- 	if (ret < 0) {
- 		dev_err(data->dev, "Error reading axis %d\n", axis);
- 		bmc150_accel_set_power_state(data, false);
- 		mutex_unlock(&data->mutex);
- 		return ret;
- 	}
--	*val = sign_extend32(raw_val >> chan->scan_type.shift,
-+	*val = sign_extend32(le16_to_cpu(raw_val) >> chan->scan_type.shift,
- 			     chan->scan_type.realbits - 1);
- 	ret = bmc150_accel_set_power_state(data, false);
- 	mutex_unlock(&data->mutex);
-@@ -988,6 +988,7 @@ static const struct iio_event_spec bmc150_accel_event = {
- 		.realbits = (bits),					\
- 		.storagebits = 16,					\
- 		.shift = 16 - (bits),					\
-+		.endianness = IIO_LE,					\
- 	},								\
- 	.event_spec = &bmc150_accel_event,				\
- 	.num_event_specs = 1						\
-diff --git a/drivers/iio/gyro/bmg160_core.c b/drivers/iio/gyro/bmg160_core.c
-index bbce3b09ac45..4dac567e75b4 100644
---- a/drivers/iio/gyro/bmg160_core.c
-+++ b/drivers/iio/gyro/bmg160_core.c
-@@ -452,7 +452,7 @@ static int bmg160_get_temp(struct bmg160_data *data, int *val)
- static int bmg160_get_axis(struct bmg160_data *data, int axis, int *val)
- {
- 	int ret;
--	unsigned int raw_val;
-+	__le16 raw_val;
- 
- 	mutex_lock(&data->mutex);
- 	ret = bmg160_set_power_state(data, true);
-@@ -462,7 +462,7 @@ static int bmg160_get_axis(struct bmg160_data *data, int axis, int *val)
- 	}
- 
- 	ret = regmap_bulk_read(data->regmap, BMG160_AXIS_TO_REG(axis), &raw_val,
--			       2);
-+			       sizeof(raw_val));
- 	if (ret < 0) {
- 		dev_err(data->dev, "Error reading axis %d\n", axis);
- 		bmg160_set_power_state(data, false);
-@@ -470,7 +470,7 @@ static int bmg160_get_axis(struct bmg160_data *data, int axis, int *val)
- 		return ret;
- 	}
- 
--	*val = sign_extend32(raw_val, 15);
-+	*val = sign_extend32(le16_to_cpu(raw_val), 15);
- 	ret = bmg160_set_power_state(data, false);
- 	mutex_unlock(&data->mutex);
- 	if (ret < 0)
-@@ -733,6 +733,7 @@ static const struct iio_event_spec bmg160_event = {
- 		.sign = 's',						\
- 		.realbits = 16,					\
- 		.storagebits = 16,					\
-+		.endianness = IIO_LE,					\
- 	},								\
- 	.event_spec = &bmg160_event,					\
- 	.num_event_specs = 1						\
-@@ -780,7 +781,7 @@ static irqreturn_t bmg160_trigger_handler(int irq, void *p)
- 			mutex_unlock(&data->mutex);
- 			goto err;
- 		}
--		data->buffer[i++] = ret;
-+		data->buffer[i++] = val;
- 	}
- 	mutex_unlock(&data->mutex);
- 
-diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
-index 139ae916225f..5b6abc541e8c 100644
---- a/drivers/iio/industrialio-buffer.c
-+++ b/drivers/iio/industrialio-buffer.c
-@@ -645,6 +645,7 @@ static int iio_verify_update(struct iio_dev *indio_dev,
- 	unsigned int modes;
- 
- 	memset(config, 0, sizeof(*config));
-+	config->watermark = ~0;
- 
- 	/*
- 	 * If there is just one buffer and we are removing it there is nothing
-diff --git a/drivers/iio/magnetometer/ak8975.c b/drivers/iio/magnetometer/ak8975.c
-index b13936dacc78..f2a7f72f7aa6 100644
---- a/drivers/iio/magnetometer/ak8975.c
-+++ b/drivers/iio/magnetometer/ak8975.c
-@@ -462,6 +462,8 @@ static int ak8975_setup_irq(struct ak8975_data *data)
- 	int rc;
- 	int irq;
- 
-+	init_waitqueue_head(&data->data_ready_queue);
-+	clear_bit(0, &data->flags);
- 	if (client->irq)
- 		irq = client->irq;
- 	else
-@@ -477,8 +479,6 @@ static int ak8975_setup_irq(struct ak8975_data *data)
- 		return rc;
- 	}
- 
--	init_waitqueue_head(&data->data_ready_queue);
--	clear_bit(0, &data->flags);
- 	data->eoc_irq = irq;
- 
- 	return rc;
-@@ -732,7 +732,7 @@ static int ak8975_probe(struct i2c_client *client,
- 	int eoc_gpio;
- 	int err;
- 	const char *name = NULL;
--	enum asahi_compass_chipset chipset;
-+	enum asahi_compass_chipset chipset = AK_MAX_TYPE;
- 
- 	/* Grab and set up the supplied GPIO. */
- 	if (client->dev.platform_data)
-diff --git a/drivers/iio/magnetometer/st_magn.h b/drivers/iio/magnetometer/st_magn.h
-index 06a4d9c35581..9daca4681922 100644
---- a/drivers/iio/magnetometer/st_magn.h
-+++ b/drivers/iio/magnetometer/st_magn.h
-@@ -44,6 +44,7 @@ static inline int st_magn_allocate_ring(struct iio_dev *indio_dev)
- static inline void st_magn_deallocate_ring(struct iio_dev *indio_dev)
- {
- }
-+#define ST_MAGN_TRIGGER_SET_STATE NULL
- #endif /* CONFIG_IIO_BUFFER */
- 
- #endif /* ST_MAGN_H */
-diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
-index 53343ffbff7a..1b109b2a235e 100644
---- a/drivers/infiniband/core/cache.c
-+++ b/drivers/infiniband/core/cache.c
-@@ -691,7 +691,8 @@ void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port,
- 			      NULL);
- 
- 		/* Coudn't find default GID location */
--		WARN_ON(ix < 0);
-+		if (WARN_ON(ix < 0))
-+			goto release;
- 
- 		zattr_type.gid_type = gid_type;
- 
-diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c
-index 6b4e8a008bc0..564adf3116e8 100644
---- a/drivers/infiniband/core/ucm.c
-+++ b/drivers/infiniband/core/ucm.c
-@@ -48,6 +48,7 @@
- 
- #include <asm/uaccess.h>
- 
-+#include <rdma/ib.h>
- #include <rdma/ib_cm.h>
- #include <rdma/ib_user_cm.h>
- #include <rdma/ib_marshall.h>
-@@ -1103,6 +1104,9 @@ static ssize_t ib_ucm_write(struct file *filp, const char __user *buf,
- 	struct ib_ucm_cmd_hdr hdr;
- 	ssize_t result;
- 
-+	if (WARN_ON_ONCE(!ib_safe_file_access(filp)))
-+		return -EACCES;
-+
- 	if (len < sizeof(hdr))
- 		return -EINVAL;
- 
-diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
-index 8b5a934e1133..886f61ea6cc7 100644
---- a/drivers/infiniband/core/ucma.c
-+++ b/drivers/infiniband/core/ucma.c
-@@ -1574,6 +1574,9 @@ static ssize_t ucma_write(struct file *filp, const char __user *buf,
- 	struct rdma_ucm_cmd_hdr hdr;
- 	ssize_t ret;
- 
-+	if (WARN_ON_ONCE(!ib_safe_file_access(filp)))
-+		return -EACCES;
-+
- 	if (len < sizeof(hdr))
- 		return -EINVAL;
- 
-diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
-index 39680aed99dd..d3fb8aa46c59 100644
---- a/drivers/infiniband/core/uverbs_main.c
-+++ b/drivers/infiniband/core/uverbs_main.c
-@@ -48,6 +48,8 @@
- 
- #include <asm/uaccess.h>
- 
-+#include <rdma/ib.h>
-+
- #include "uverbs.h"
- 
- MODULE_AUTHOR("Roland Dreier");
-@@ -693,6 +695,9 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
- 	int srcu_key;
- 	ssize_t ret;
- 
-+	if (WARN_ON_ONCE(!ib_safe_file_access(filp)))
-+		return -EACCES;
-+
- 	if (count < sizeof hdr)
- 		return -EINVAL;
- 
-diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
-index cf21df4a8bf5..4e94cff5ba71 100644
---- a/drivers/infiniband/hw/cxgb4/cq.c
-+++ b/drivers/infiniband/hw/cxgb4/cq.c
-@@ -162,7 +162,7 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
- 	cq->bar2_va = c4iw_bar2_addrs(rdev, cq->cqid, T4_BAR2_QTYPE_INGRESS,
- 				      &cq->bar2_qid,
- 				      user ? &cq->bar2_pa : NULL);
--	if (user && !cq->bar2_va) {
-+	if (user && !cq->bar2_pa) {
- 		pr_warn(MOD "%s: cqid %u not in BAR2 range.\n",
- 			pci_name(rdev->lldi.pdev), cq->cqid);
- 		ret = -EINVAL;
-diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
-index e99345eb875a..8ff690bf09d9 100644
---- a/drivers/infiniband/hw/cxgb4/qp.c
-+++ b/drivers/infiniband/hw/cxgb4/qp.c
-@@ -185,6 +185,10 @@ void __iomem *c4iw_bar2_addrs(struct c4iw_rdev *rdev, unsigned int qid,
- 
- 	if (pbar2_pa)
- 		*pbar2_pa = (rdev->bar2_pa + bar2_qoffset) & PAGE_MASK;
-+
-+	if (is_t4(rdev->lldi.adapter_type))
-+		return NULL;
-+
- 	return rdev->bar2_kva + bar2_qoffset;
- }
- 
-@@ -270,7 +274,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
- 	/*
- 	 * User mode must have bar2 access.
- 	 */
--	if (user && (!wq->sq.bar2_va || !wq->rq.bar2_va)) {
-+	if (user && (!wq->sq.bar2_pa || !wq->rq.bar2_pa)) {
- 		pr_warn(MOD "%s: sqid %u or rqid %u not in BAR2 range.\n",
- 			pci_name(rdev->lldi.pdev), wq->sq.qid, wq->rq.qid);
- 		goto free_dma;
-diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
-index 03c418ccbc98..eaed31d04468 100644
---- a/drivers/infiniband/hw/mlx5/main.c
-+++ b/drivers/infiniband/hw/mlx5/main.c
-@@ -517,7 +517,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
- 		     sizeof(struct mlx5_wqe_ctrl_seg)) /
- 		     sizeof(struct mlx5_wqe_data_seg);
- 	props->max_sge = min(max_rq_sg, max_sq_sg);
--	props->max_sge_rd = props->max_sge;
-+	props->max_sge_rd	   = MLX5_MAX_SGE_RD;
- 	props->max_cq		   = 1 << MLX5_CAP_GEN(mdev, log_max_cq);
- 	props->max_cqe = (1 << MLX5_CAP_GEN(mdev, log_max_cq_sz)) - 1;
- 	props->max_mr		   = 1 << MLX5_CAP_GEN(mdev, log_max_mkey);
-@@ -654,8 +654,8 @@ static int mlx5_query_hca_port(struct ib_device *ibdev, u8 port,
- 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
- 	struct mlx5_core_dev *mdev = dev->mdev;
- 	struct mlx5_hca_vport_context *rep;
--	int max_mtu;
--	int oper_mtu;
-+	u16 max_mtu;
-+	u16 oper_mtu;
- 	int err;
- 	u8 ib_link_width_oper;
- 	u8 vl_hw_cap;
-diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
-index e449e394963f..24f4a782e0f4 100644
---- a/drivers/infiniband/hw/qib/qib_file_ops.c
-+++ b/drivers/infiniband/hw/qib/qib_file_ops.c
-@@ -45,6 +45,8 @@
- #include <linux/export.h>
- #include <linux/uio.h>
- 
-+#include <rdma/ib.h>
-+
- #include "qib.h"
- #include "qib_common.h"
- #include "qib_user_sdma.h"
-@@ -2067,6 +2069,9 @@ static ssize_t qib_write(struct file *fp, const char __user *data,
- 	ssize_t ret = 0;
- 	void *dest;
- 
-+	if (WARN_ON_ONCE(!ib_safe_file_access(fp)))
-+		return -EACCES;
-+
- 	if (count < sizeof(cmd.type)) {
- 		ret = -EINVAL;
- 		goto bail;
-diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
-index f121e6129339..0e1a802c3618 100644
---- a/drivers/infiniband/ulp/isert/ib_isert.c
-+++ b/drivers/infiniband/ulp/isert/ib_isert.c
-@@ -65,6 +65,7 @@ isert_rdma_accept(struct isert_conn *isert_conn);
- struct rdma_cm_id *isert_setup_id(struct isert_np *isert_np);
- 
- static void isert_release_work(struct work_struct *work);
-+static void isert_wait4flush(struct isert_conn *isert_conn);
- 
- static inline bool
- isert_prot_cmd(struct isert_conn *conn, struct se_cmd *cmd)
-@@ -820,12 +821,31 @@ isert_put_conn(struct isert_conn *isert_conn)
- 	kref_put(&isert_conn->kref, isert_release_kref);
- }
- 
-+static void
-+isert_handle_unbound_conn(struct isert_conn *isert_conn)
-+{
-+	struct isert_np *isert_np = isert_conn->cm_id->context;
-+
-+	mutex_lock(&isert_np->mutex);
-+	if (!list_empty(&isert_conn->node)) {
-+		/*
-+		 * This means iscsi doesn't know this connection
-+		 * so schedule a cleanup ourselves
-+		 */
-+		list_del_init(&isert_conn->node);
-+		isert_put_conn(isert_conn);
-+		complete(&isert_conn->wait);
-+		queue_work(isert_release_wq, &isert_conn->release_work);
-+	}
-+	mutex_unlock(&isert_np->mutex);
-+}
-+
- /**
-  * isert_conn_terminate() - Initiate connection termination
-  * @isert_conn: isert connection struct
-  *
-  * Notes:
-- * In case the connection state is FULL_FEATURE, move state
-+ * In case the connection state is BOUND, move state
-  * to TEMINATING and start teardown sequence (rdma_disconnect).
-  * In case the connection state is UP, complete flush as well.
-  *
-@@ -837,23 +857,19 @@ isert_conn_terminate(struct isert_conn *isert_conn)
- {
- 	int err;
- 
--	switch (isert_conn->state) {
--	case ISER_CONN_TERMINATING:
--		break;
--	case ISER_CONN_UP:
--	case ISER_CONN_FULL_FEATURE: /* FALLTHRU */
--		isert_info("Terminating conn %p state %d\n",
--			   isert_conn, isert_conn->state);
--		isert_conn->state = ISER_CONN_TERMINATING;
--		err = rdma_disconnect(isert_conn->cm_id);
--		if (err)
--			isert_warn("Failed rdma_disconnect isert_conn %p\n",
--				   isert_conn);
--		break;
--	default:
--		isert_warn("conn %p teminating in state %d\n",
--			   isert_conn, isert_conn->state);
--	}
-+	if (isert_conn->state >= ISER_CONN_TERMINATING)
-+		return;
-+
-+	isert_info("Terminating conn %p state %d\n",
-+		   isert_conn, isert_conn->state);
-+	isert_conn->state = ISER_CONN_TERMINATING;
-+	err = rdma_disconnect(isert_conn->cm_id);
-+	if (err)
-+		isert_warn("Failed rdma_disconnect isert_conn %p\n",
-+			   isert_conn);
-+
-+	isert_info("conn %p completing wait\n", isert_conn);
-+	complete(&isert_conn->wait);
- }
- 
- static int
-@@ -887,35 +903,27 @@ static int
- isert_disconnected_handler(struct rdma_cm_id *cma_id,
- 			   enum rdma_cm_event_type event)
- {
--	struct isert_np *isert_np = cma_id->context;
--	struct isert_conn *isert_conn;
--	bool terminating = false;
--
--	if (isert_np->cm_id == cma_id)
--		return isert_np_cma_handler(cma_id->context, event);
--
--	isert_conn = cma_id->qp->qp_context;
-+	struct isert_conn *isert_conn = cma_id->qp->qp_context;
- 
- 	mutex_lock(&isert_conn->mutex);
--	terminating = (isert_conn->state == ISER_CONN_TERMINATING);
--	isert_conn_terminate(isert_conn);
--	mutex_unlock(&isert_conn->mutex);
--
--	isert_info("conn %p completing wait\n", isert_conn);
--	complete(&isert_conn->wait);
--
--	if (terminating)
--		goto out;
--
--	mutex_lock(&isert_np->mutex);
--	if (!list_empty(&isert_conn->node)) {
--		list_del_init(&isert_conn->node);
--		isert_put_conn(isert_conn);
--		queue_work(isert_release_wq, &isert_conn->release_work);
-+	switch (isert_conn->state) {
-+	case ISER_CONN_TERMINATING:
-+		break;
-+	case ISER_CONN_UP:
-+		isert_conn_terminate(isert_conn);
-+		isert_wait4flush(isert_conn);
-+		isert_handle_unbound_conn(isert_conn);
-+		break;
-+	case ISER_CONN_BOUND:
-+	case ISER_CONN_FULL_FEATURE: /* FALLTHRU */
-+		iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
-+		break;
-+	default:
-+		isert_warn("conn %p teminating in state %d\n",
-+			   isert_conn, isert_conn->state);
- 	}
--	mutex_unlock(&isert_np->mutex);
-+	mutex_unlock(&isert_conn->mutex);
- 
--out:
- 	return 0;
- }
- 
-@@ -934,12 +942,16 @@ isert_connect_error(struct rdma_cm_id *cma_id)
- static int
- isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
- {
-+	struct isert_np *isert_np = cma_id->context;
- 	int ret = 0;
- 
- 	isert_info("%s (%d): status %d id %p np %p\n",
- 		   rdma_event_msg(event->event), event->event,
- 		   event->status, cma_id, cma_id->context);
- 
-+	if (isert_np->cm_id == cma_id)
-+		return isert_np_cma_handler(cma_id->context, event->event);
-+
- 	switch (event->event) {
- 	case RDMA_CM_EVENT_CONNECT_REQUEST:
- 		ret = isert_connect_request(cma_id, event);
-@@ -985,13 +997,10 @@ isert_post_recvm(struct isert_conn *isert_conn, u32 count)
- 	rx_wr--;
- 	rx_wr->next = NULL; /* mark end of work requests list */
- 
--	isert_conn->post_recv_buf_count += count;
- 	ret = ib_post_recv(isert_conn->qp, isert_conn->rx_wr,
- 			   &rx_wr_failed);
--	if (ret) {
-+	if (ret)
- 		isert_err("ib_post_recv() failed with ret: %d\n", ret);
--		isert_conn->post_recv_buf_count -= count;
--	}
- 
- 	return ret;
- }
-@@ -1007,12 +1016,9 @@ isert_post_recv(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc)
- 	rx_wr.num_sge = 1;
- 	rx_wr.next = NULL;
- 
--	isert_conn->post_recv_buf_count++;
- 	ret = ib_post_recv(isert_conn->qp, &rx_wr, &rx_wr_failed);
--	if (ret) {
-+	if (ret)
- 		isert_err("ib_post_recv() failed with ret: %d\n", ret);
--		isert_conn->post_recv_buf_count--;
--	}
- 
- 	return ret;
- }
-@@ -1132,12 +1138,9 @@ isert_rdma_post_recvl(struct isert_conn *isert_conn)
- 	rx_wr.sg_list = &sge;
- 	rx_wr.num_sge = 1;
- 
--	isert_conn->post_recv_buf_count++;
- 	ret = ib_post_recv(isert_conn->qp, &rx_wr, &rx_wr_fail);
--	if (ret) {
-+	if (ret)
- 		isert_err("ib_post_recv() failed: %d\n", ret);
--		isert_conn->post_recv_buf_count--;
--	}
- 
- 	return ret;
- }
-@@ -1633,7 +1636,6 @@ isert_rcv_completion(struct iser_rx_desc *desc,
- 	ib_dma_sync_single_for_device(ib_dev, rx_dma, rx_buflen,
- 				      DMA_FROM_DEVICE);
- 
--	isert_conn->post_recv_buf_count--;
- }
- 
- static int
-@@ -2048,7 +2050,8 @@ is_isert_tx_desc(struct isert_conn *isert_conn, void *wr_id)
- 	void *start = isert_conn->rx_descs;
- 	int len = ISERT_QP_MAX_RECV_DTOS * sizeof(*isert_conn->rx_descs);
- 
--	if (wr_id >= start && wr_id < start + len)
-+	if ((wr_id >= start && wr_id < start + len) ||
-+	    (wr_id == isert_conn->login_req_buf))
- 		return false;
- 
- 	return true;
-@@ -2072,10 +2075,6 @@ isert_cq_comp_err(struct isert_conn *isert_conn, struct ib_wc *wc)
- 			isert_unmap_tx_desc(desc, ib_dev);
- 		else
- 			isert_completion_put(desc, isert_cmd, ib_dev, true);
--	} else {
--		isert_conn->post_recv_buf_count--;
--		if (!isert_conn->post_recv_buf_count)
--			iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
- 	}
- }
- 
-@@ -3214,6 +3213,7 @@ accept_wait:
- 
- 	conn->context = isert_conn;
- 	isert_conn->conn = conn;
-+	isert_conn->state = ISER_CONN_BOUND;
- 
- 	isert_set_conn_info(np, conn, isert_conn);
- 
-diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h
-index 8d50453eef66..1aa019ab9d78 100644
---- a/drivers/infiniband/ulp/isert/ib_isert.h
-+++ b/drivers/infiniband/ulp/isert/ib_isert.h
-@@ -84,6 +84,7 @@ enum iser_ib_op_code {
- enum iser_conn_state {
- 	ISER_CONN_INIT,
- 	ISER_CONN_UP,
-+	ISER_CONN_BOUND,
- 	ISER_CONN_FULL_FEATURE,
- 	ISER_CONN_TERMINATING,
- 	ISER_CONN_DOWN,
-@@ -179,7 +180,6 @@ struct isert_device;
- 
- struct isert_conn {
- 	enum iser_conn_state	state;
--	int			post_recv_buf_count;
- 	u32			responder_resources;
- 	u32			initiator_depth;
- 	bool			pi_support;
-diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
-index 03022f6420d7..a09841abae50 100644
---- a/drivers/infiniband/ulp/srp/ib_srp.c
-+++ b/drivers/infiniband/ulp/srp/ib_srp.c
-@@ -1541,7 +1541,7 @@ static int srp_map_idb(struct srp_rdma_ch *ch, struct srp_request *req,
- 
- 	if (dev->use_fast_reg) {
- 		state.sg = idb_sg;
--		sg_set_buf(idb_sg, req->indirect_desc, idb_len);
-+		sg_init_one(idb_sg, req->indirect_desc, idb_len);
- 		idb_sg->dma_address = req->indirect_dma_addr; /* hack! */
- #ifdef CONFIG_NEED_SG_DMA_LENGTH
- 		idb_sg->dma_length = idb_sg->length;	      /* hack^2 */
-diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
-index 0c37fee363b1..4328679a67a7 100644
---- a/drivers/infiniband/ulp/srpt/ib_srpt.c
-+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
-@@ -1670,47 +1670,6 @@ send_sense:
- 	return -1;
- }
- 
--/**
-- * srpt_rx_mgmt_fn_tag() - Process a task management function by tag.
-- * @ch: RDMA channel of the task management request.
-- * @fn: Task management function to perform.
-- * @req_tag: Tag of the SRP task management request.
-- * @mgmt_ioctx: I/O context of the task management request.
-- *
-- * Returns zero if the target core will process the task management
-- * request asynchronously.
-- *
-- * Note: It is assumed that the initiator serializes tag-based task management
-- * requests.
-- */
--static int srpt_rx_mgmt_fn_tag(struct srpt_send_ioctx *ioctx, u64 tag)
--{
--	struct srpt_device *sdev;
--	struct srpt_rdma_ch *ch;
--	struct srpt_send_ioctx *target;
--	int ret, i;
--
--	ret = -EINVAL;
--	ch = ioctx->ch;
--	BUG_ON(!ch);
--	BUG_ON(!ch->sport);
--	sdev = ch->sport->sdev;
--	BUG_ON(!sdev);
--	spin_lock_irq(&sdev->spinlock);
--	for (i = 0; i < ch->rq_size; ++i) {
--		target = ch->ioctx_ring[i];
--		if (target->cmd.se_lun == ioctx->cmd.se_lun &&
--		    target->cmd.tag == tag &&
--		    srpt_get_cmd_state(target) != SRPT_STATE_DONE) {
--			ret = 0;
--			/* now let the target core abort &target->cmd; */
--			break;
--		}
--	}
--	spin_unlock_irq(&sdev->spinlock);
--	return ret;
--}
--
- static int srp_tmr_to_tcm(int fn)
- {
- 	switch (fn) {
-@@ -1745,7 +1704,6 @@ static void srpt_handle_tsk_mgmt(struct srpt_rdma_ch *ch,
- 	struct se_cmd *cmd;
- 	struct se_session *sess = ch->sess;
- 	uint64_t unpacked_lun;
--	uint32_t tag = 0;
- 	int tcm_tmr;
- 	int rc;
- 
-@@ -1761,25 +1719,10 @@ static void srpt_handle_tsk_mgmt(struct srpt_rdma_ch *ch,
- 	srpt_set_cmd_state(send_ioctx, SRPT_STATE_MGMT);
- 	send_ioctx->cmd.tag = srp_tsk->tag;
- 	tcm_tmr = srp_tmr_to_tcm(srp_tsk->tsk_mgmt_func);
--	if (tcm_tmr < 0) {
--		send_ioctx->cmd.se_tmr_req->response =
--			TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED;
--		goto fail;
--	}
- 	unpacked_lun = srpt_unpack_lun((uint8_t *)&srp_tsk->lun,
- 				       sizeof(srp_tsk->lun));
--
--	if (srp_tsk->tsk_mgmt_func == SRP_TSK_ABORT_TASK) {
--		rc = srpt_rx_mgmt_fn_tag(send_ioctx, srp_tsk->task_tag);
--		if (rc < 0) {
--			send_ioctx->cmd.se_tmr_req->response =
--					TMR_TASK_DOES_NOT_EXIST;
--			goto fail;
--		}
--		tag = srp_tsk->task_tag;
--	}
- 	rc = target_submit_tmr(&send_ioctx->cmd, sess, NULL, unpacked_lun,
--				srp_tsk, tcm_tmr, GFP_KERNEL, tag,
-+				srp_tsk, tcm_tmr, GFP_KERNEL, srp_tsk->task_tag,
- 				TARGET_SCF_ACK_KREF);
- 	if (rc != 0) {
- 		send_ioctx->cmd.se_tmr_req->response = TMR_FUNCTION_REJECTED;
-diff --git a/drivers/input/misc/ati_remote2.c b/drivers/input/misc/ati_remote2.c
-index cfd58e87da26..1c5914cae853 100644
---- a/drivers/input/misc/ati_remote2.c
-+++ b/drivers/input/misc/ati_remote2.c
-@@ -817,26 +817,49 @@ static int ati_remote2_probe(struct usb_interface *interface, const struct usb_d
- 
- 	ar2->udev = udev;
- 
-+	/* Sanity check, first interface must have an endpoint */
-+	if (alt->desc.bNumEndpoints < 1 || !alt->endpoint) {
-+		dev_err(&interface->dev,
-+			"%s(): interface 0 must have an endpoint\n", __func__);
-+		r = -ENODEV;
-+		goto fail1;
-+	}
- 	ar2->intf[0] = interface;
- 	ar2->ep[0] = &alt->endpoint[0].desc;
- 
-+	/* Sanity check, the device must have two interfaces */
- 	ar2->intf[1] = usb_ifnum_to_if(udev, 1);
-+	if ((udev->actconfig->desc.bNumInterfaces < 2) || !ar2->intf[1]) {
-+		dev_err(&interface->dev, "%s(): need 2 interfaces, found %d\n",
-+			__func__, udev->actconfig->desc.bNumInterfaces);
-+		r = -ENODEV;
-+		goto fail1;
-+	}
-+
- 	r = usb_driver_claim_interface(&ati_remote2_driver, ar2->intf[1], ar2);
- 	if (r)
- 		goto fail1;
-+
-+	/* Sanity check, second interface must have an endpoint */
- 	alt = ar2->intf[1]->cur_altsetting;
-+	if (alt->desc.bNumEndpoints < 1 || !alt->endpoint) {
-+		dev_err(&interface->dev,
-+			"%s(): interface 1 must have an endpoint\n", __func__);
-+		r = -ENODEV;
-+		goto fail2;
-+	}
- 	ar2->ep[1] = &alt->endpoint[0].desc;
- 
- 	r = ati_remote2_urb_init(ar2);
- 	if (r)
--		goto fail2;
-+		goto fail3;
- 
- 	ar2->channel_mask = channel_mask;
- 	ar2->mode_mask = mode_mask;
- 
- 	r = ati_remote2_setup(ar2, ar2->channel_mask);
- 	if (r)
--		goto fail2;
-+		goto fail3;
- 
- 	usb_make_path(udev, ar2->phys, sizeof(ar2->phys));
- 	strlcat(ar2->phys, "/input0", sizeof(ar2->phys));
-@@ -845,11 +868,11 @@ static int ati_remote2_probe(struct usb_interface *interface, const struct usb_d
- 
- 	r = sysfs_create_group(&udev->dev.kobj, &ati_remote2_attr_group);
- 	if (r)
--		goto fail2;
-+		goto fail3;
- 
- 	r = ati_remote2_input_init(ar2);
- 	if (r)
--		goto fail3;
-+		goto fail4;
- 
- 	usb_set_intfdata(interface, ar2);
- 
-@@ -857,10 +880,11 @@ static int ati_remote2_probe(struct usb_interface *interface, const struct usb_d
- 
- 	return 0;
- 
-- fail3:
-+ fail4:
- 	sysfs_remove_group(&udev->dev.kobj, &ati_remote2_attr_group);
-- fail2:
-+ fail3:
- 	ati_remote2_urb_cleanup(ar2);
-+ fail2:
- 	usb_driver_release_interface(&ati_remote2_driver, ar2->intf[1]);
-  fail1:
- 	kfree(ar2);
-diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c
-index ac1fa5f44580..9c0ea36913b4 100644
---- a/drivers/input/misc/ims-pcu.c
-+++ b/drivers/input/misc/ims-pcu.c
-@@ -1663,6 +1663,8 @@ static int ims_pcu_parse_cdc_data(struct usb_interface *intf, struct ims_pcu *pc
- 
- 	pcu->ctrl_intf = usb_ifnum_to_if(pcu->udev,
- 					 union_desc->bMasterInterface0);
-+	if (!pcu->ctrl_intf)
-+		return -EINVAL;
- 
- 	alt = pcu->ctrl_intf->cur_altsetting;
- 	pcu->ep_ctrl = &alt->endpoint[0].desc;
-@@ -1670,6 +1672,8 @@ static int ims_pcu_parse_cdc_data(struct usb_interface *intf, struct ims_pcu *pc
- 
- 	pcu->data_intf = usb_ifnum_to_if(pcu->udev,
- 					 union_desc->bSlaveInterface0);
-+	if (!pcu->data_intf)
-+		return -EINVAL;
- 
- 	alt = pcu->data_intf->cur_altsetting;
- 	if (alt->desc.bNumEndpoints != 2) {
-diff --git a/drivers/input/misc/max8997_haptic.c b/drivers/input/misc/max8997_haptic.c
-index a806ba3818f7..8d6326d7e7be 100644
---- a/drivers/input/misc/max8997_haptic.c
-+++ b/drivers/input/misc/max8997_haptic.c
-@@ -255,12 +255,14 @@ static int max8997_haptic_probe(struct platform_device *pdev)
- 	struct max8997_dev *iodev = dev_get_drvdata(pdev->dev.parent);
- 	const struct max8997_platform_data *pdata =
- 					dev_get_platdata(iodev->dev);
--	const struct max8997_haptic_platform_data *haptic_pdata =
--					pdata->haptic_pdata;
-+	const struct max8997_haptic_platform_data *haptic_pdata = NULL;
- 	struct max8997_haptic *chip;
- 	struct input_dev *input_dev;
- 	int error;
- 
-+	if (pdata)
-+		haptic_pdata = pdata->haptic_pdata;
-+
- 	if (!haptic_pdata) {
- 		dev_err(&pdev->dev, "no haptic platform data\n");
- 		return -EINVAL;
-diff --git a/drivers/input/misc/pmic8xxx-pwrkey.c b/drivers/input/misc/pmic8xxx-pwrkey.c
-index 3f02e0e03d12..67aab86048ad 100644
---- a/drivers/input/misc/pmic8xxx-pwrkey.c
-+++ b/drivers/input/misc/pmic8xxx-pwrkey.c
-@@ -353,7 +353,8 @@ static int pmic8xxx_pwrkey_probe(struct platform_device *pdev)
- 	if (of_property_read_u32(pdev->dev.of_node, "debounce", &kpd_delay))
- 		kpd_delay = 15625;
- 
--	if (kpd_delay > 62500 || kpd_delay == 0) {
-+	/* Valid range of pwr key trigger delay is 1/64 sec to 2 seconds. */
-+	if (kpd_delay > USEC_PER_SEC * 2 || kpd_delay < USEC_PER_SEC / 64) {
- 		dev_err(&pdev->dev, "invalid power key trigger delay\n");
- 		return -EINVAL;
- 	}
-@@ -385,8 +386,8 @@ static int pmic8xxx_pwrkey_probe(struct platform_device *pdev)
- 	pwr->name = "pmic8xxx_pwrkey";
- 	pwr->phys = "pmic8xxx_pwrkey/input0";
- 
--	delay = (kpd_delay << 10) / USEC_PER_SEC;
--	delay = 1 + ilog2(delay);
-+	delay = (kpd_delay << 6) / USEC_PER_SEC;
-+	delay = ilog2(delay);
- 
- 	err = regmap_read(regmap, PON_CNTL_1, &pon_cntl);
- 	if (err < 0) {
-diff --git a/drivers/input/misc/powermate.c b/drivers/input/misc/powermate.c
-index 63b539d3daba..84909a12ff36 100644
---- a/drivers/input/misc/powermate.c
-+++ b/drivers/input/misc/powermate.c
-@@ -307,6 +307,9 @@ static int powermate_probe(struct usb_interface *intf, const struct usb_device_i
- 	int error = -ENOMEM;
- 
- 	interface = intf->cur_altsetting;
-+	if (interface->desc.bNumEndpoints < 1)
-+		return -EINVAL;
-+
- 	endpoint = &interface->endpoint[0].desc;
- 	if (!usb_endpoint_is_int_in(endpoint))
- 		return -EIO;
-diff --git a/drivers/input/misc/pwm-beeper.c b/drivers/input/misc/pwm-beeper.c
-index f2261ab54701..18663d4edae5 100644
---- a/drivers/input/misc/pwm-beeper.c
-+++ b/drivers/input/misc/pwm-beeper.c
-@@ -20,21 +20,40 @@
- #include <linux/platform_device.h>
- #include <linux/pwm.h>
- #include <linux/slab.h>
-+#include <linux/workqueue.h>
- 
- struct pwm_beeper {
- 	struct input_dev *input;
- 	struct pwm_device *pwm;
-+	struct work_struct work;
- 	unsigned long period;
- };
- 
- #define HZ_TO_NANOSECONDS(x) (1000000000UL/(x))
- 
-+static void __pwm_beeper_set(struct pwm_beeper *beeper)
-+{
-+	unsigned long period = beeper->period;
-+
-+	if (period) {
-+		pwm_config(beeper->pwm, period / 2, period);
-+		pwm_enable(beeper->pwm);
-+	} else
-+		pwm_disable(beeper->pwm);
-+}
-+
-+static void pwm_beeper_work(struct work_struct *work)
-+{
-+	struct pwm_beeper *beeper =
-+		container_of(work, struct pwm_beeper, work);
-+
-+	__pwm_beeper_set(beeper);
-+}
-+
- static int pwm_beeper_event(struct input_dev *input,
- 			    unsigned int type, unsigned int code, int value)
- {
--	int ret = 0;
- 	struct pwm_beeper *beeper = input_get_drvdata(input);
--	unsigned long period;
- 
- 	if (type != EV_SND || value < 0)
- 		return -EINVAL;
-@@ -49,22 +68,31 @@ static int pwm_beeper_event(struct input_dev *input,
- 		return -EINVAL;
- 	}
- 
--	if (value == 0) {
--		pwm_disable(beeper->pwm);
--	} else {
--		period = HZ_TO_NANOSECONDS(value);
--		ret = pwm_config(beeper->pwm, period / 2, period);
--		if (ret)
--			return ret;
--		ret = pwm_enable(beeper->pwm);
--		if (ret)
--			return ret;
--		beeper->period = period;
--	}
-+	if (value == 0)
-+		beeper->period = 0;
-+	else
-+		beeper->period = HZ_TO_NANOSECONDS(value);
-+
-+	schedule_work(&beeper->work);
- 
- 	return 0;
- }
- 
-+static void pwm_beeper_stop(struct pwm_beeper *beeper)
-+{
-+	cancel_work_sync(&beeper->work);
-+
-+	if (beeper->period)
-+		pwm_disable(beeper->pwm);
-+}
-+
-+static void pwm_beeper_close(struct input_dev *input)
-+{
-+	struct pwm_beeper *beeper = input_get_drvdata(input);
-+
-+	pwm_beeper_stop(beeper);
-+}
-+
- static int pwm_beeper_probe(struct platform_device *pdev)
- {
- 	unsigned long pwm_id = (unsigned long)dev_get_platdata(&pdev->dev);
-@@ -87,6 +115,8 @@ static int pwm_beeper_probe(struct platform_device *pdev)
- 		goto err_free;
- 	}
- 
-+	INIT_WORK(&beeper->work, pwm_beeper_work);
-+
- 	beeper->input = input_allocate_device();
- 	if (!beeper->input) {
- 		dev_err(&pdev->dev, "Failed to allocate input device\n");
-@@ -106,6 +136,7 @@ static int pwm_beeper_probe(struct platform_device *pdev)
- 	beeper->input->sndbit[0] = BIT(SND_TONE) | BIT(SND_BELL);
- 
- 	beeper->input->event = pwm_beeper_event;
-+	beeper->input->close = pwm_beeper_close;
- 
- 	input_set_drvdata(beeper->input, beeper);
- 
-@@ -135,7 +166,6 @@ static int pwm_beeper_remove(struct platform_device *pdev)
- 
- 	input_unregister_device(beeper->input);
- 
--	pwm_disable(beeper->pwm);
- 	pwm_free(beeper->pwm);
- 
- 	kfree(beeper);
-@@ -147,8 +177,7 @@ static int __maybe_unused pwm_beeper_suspend(struct device *dev)
- {
- 	struct pwm_beeper *beeper = dev_get_drvdata(dev);
- 
--	if (beeper->period)
--		pwm_disable(beeper->pwm);
-+	pwm_beeper_stop(beeper);
- 
- 	return 0;
- }
-@@ -157,10 +186,8 @@ static int __maybe_unused pwm_beeper_resume(struct device *dev)
- {
- 	struct pwm_beeper *beeper = dev_get_drvdata(dev);
- 
--	if (beeper->period) {
--		pwm_config(beeper->pwm, beeper->period / 2, beeper->period);
--		pwm_enable(beeper->pwm);
--	}
-+	if (beeper->period)
-+		__pwm_beeper_set(beeper);
- 
- 	return 0;
- }
-diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
-index 6025eb430c0a..a41d8328c064 100644
---- a/drivers/input/mouse/synaptics.c
-+++ b/drivers/input/mouse/synaptics.c
-@@ -862,8 +862,9 @@ static void synaptics_report_ext_buttons(struct psmouse *psmouse,
- 	if (!SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap))
- 		return;
- 
--	/* Bug in FW 8.1, buttons are reported only when ExtBit is 1 */
--	if (SYN_ID_FULL(priv->identity) == 0x801 &&
-+	/* Bug in FW 8.1 & 8.2, buttons are reported only when ExtBit is 1 */
-+	if ((SYN_ID_FULL(priv->identity) == 0x801 ||
-+	     SYN_ID_FULL(priv->identity) == 0x802) &&
- 	    !((psmouse->packet[0] ^ psmouse->packet[3]) & 0x02))
- 		return;
- 
-diff --git a/drivers/input/tablet/gtco.c b/drivers/input/tablet/gtco.c
-index 3a7f3a4a4396..7c18249d6c8e 100644
---- a/drivers/input/tablet/gtco.c
-+++ b/drivers/input/tablet/gtco.c
-@@ -858,6 +858,14 @@ static int gtco_probe(struct usb_interface *usbinterface,
- 		goto err_free_buf;
- 	}
- 
-+	/* Sanity check that a device has an endpoint */
-+	if (usbinterface->altsetting[0].desc.bNumEndpoints < 1) {
-+		dev_err(&usbinterface->dev,
-+			"Invalid number of endpoints\n");
-+		error = -EINVAL;
-+		goto err_free_urb;
-+	}
-+
- 	/*
- 	 * The endpoint is always altsetting 0, we know this since we know
- 	 * this device only has one interrupt endpoint
-@@ -879,7 +887,7 @@ static int gtco_probe(struct usb_interface *usbinterface,
- 	 * HID report descriptor
- 	 */
- 	if (usb_get_extra_descriptor(usbinterface->cur_altsetting,
--				     HID_DEVICE_TYPE, &hid_desc) != 0){
-+				     HID_DEVICE_TYPE, &hid_desc) != 0) {
- 		dev_err(&usbinterface->dev,
- 			"Can't retrieve exta USB descriptor to get hid report descriptor length\n");
- 		error = -EIO;
-diff --git a/drivers/input/touchscreen/zforce_ts.c b/drivers/input/touchscreen/zforce_ts.c
-index 9bbadaaf6bc3..7b3845aa5983 100644
---- a/drivers/input/touchscreen/zforce_ts.c
-+++ b/drivers/input/touchscreen/zforce_ts.c
-@@ -370,8 +370,8 @@ static int zforce_touch_event(struct zforce_ts *ts, u8 *payload)
- 			point.coord_x = point.coord_y = 0;
- 		}
- 
--		point.state = payload[9 * i + 5] & 0x03;
--		point.id = (payload[9 * i + 5] & 0xfc) >> 2;
-+		point.state = payload[9 * i + 5] & 0x0f;
-+		point.id = (payload[9 * i + 5] & 0xf0) >> 4;
- 
- 		/* determine touch major, minor and orientation */
- 		point.area_major = max(payload[9 * i + 6],
-diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
-index 374c129219ef..5efadad4615b 100644
---- a/drivers/iommu/amd_iommu.c
-+++ b/drivers/iommu/amd_iommu.c
-@@ -92,6 +92,7 @@ struct iommu_dev_data {
- 	struct list_head dev_data_list;	  /* For global dev_data_list */
- 	struct protection_domain *domain; /* Domain the device is bound to */
- 	u16 devid;			  /* PCI Device ID */
-+	u16 alias;			  /* Alias Device ID */
- 	bool iommu_v2;			  /* Device can make use of IOMMUv2 */
- 	bool passthrough;		  /* Device is identity mapped */
- 	struct {
-@@ -166,6 +167,13 @@ static struct protection_domain *to_pdomain(struct iommu_domain *dom)
- 	return container_of(dom, struct protection_domain, domain);
- }
- 
-+static inline u16 get_device_id(struct device *dev)
-+{
-+	struct pci_dev *pdev = to_pci_dev(dev);
-+
-+	return PCI_DEVID(pdev->bus->number, pdev->devfn);
-+}
-+
- static struct iommu_dev_data *alloc_dev_data(u16 devid)
- {
- 	struct iommu_dev_data *dev_data;
-@@ -203,6 +211,68 @@ out_unlock:
- 	return dev_data;
- }
- 
-+static int __last_alias(struct pci_dev *pdev, u16 alias, void *data)
-+{
-+	*(u16 *)data = alias;
-+	return 0;
-+}
-+
-+static u16 get_alias(struct device *dev)
-+{
-+	struct pci_dev *pdev = to_pci_dev(dev);
-+	u16 devid, ivrs_alias, pci_alias;
-+
-+	devid = get_device_id(dev);
-+	ivrs_alias = amd_iommu_alias_table[devid];
-+	pci_for_each_dma_alias(pdev, __last_alias, &pci_alias);
-+
-+	if (ivrs_alias == pci_alias)
-+		return ivrs_alias;
-+
-+	/*
-+	 * DMA alias showdown
-+	 *
-+	 * The IVRS is fairly reliable in telling us about aliases, but it
-+	 * can't know about every screwy device.  If we don't have an IVRS
-+	 * reported alias, use the PCI reported alias.  In that case we may
-+	 * still need to initialize the rlookup and dev_table entries if the
-+	 * alias is to a non-existent device.
-+	 */
-+	if (ivrs_alias == devid) {
-+		if (!amd_iommu_rlookup_table[pci_alias]) {
-+			amd_iommu_rlookup_table[pci_alias] =
-+				amd_iommu_rlookup_table[devid];
-+			memcpy(amd_iommu_dev_table[pci_alias].data,
-+			       amd_iommu_dev_table[devid].data,
-+			       sizeof(amd_iommu_dev_table[pci_alias].data));
-+		}
-+
-+		return pci_alias;
-+	}
-+
-+	pr_info("AMD-Vi: Using IVRS reported alias %02x:%02x.%d "
-+		"for device %s[%04x:%04x], kernel reported alias "
-+		"%02x:%02x.%d\n", PCI_BUS_NUM(ivrs_alias), PCI_SLOT(ivrs_alias),
-+		PCI_FUNC(ivrs_alias), dev_name(dev), pdev->vendor, pdev->device,
-+		PCI_BUS_NUM(pci_alias), PCI_SLOT(pci_alias),
-+		PCI_FUNC(pci_alias));
-+
-+	/*
-+	 * If we don't have a PCI DMA alias and the IVRS alias is on the same
-+	 * bus, then the IVRS table may know about a quirk that we don't.
-+	 */
-+	if (pci_alias == devid &&
-+	    PCI_BUS_NUM(ivrs_alias) == pdev->bus->number) {
-+		pdev->dev_flags |= PCI_DEV_FLAGS_DMA_ALIAS_DEVFN;
-+		pdev->dma_alias_devfn = ivrs_alias & 0xff;
-+		pr_info("AMD-Vi: Added PCI DMA alias %02x.%d for %s\n",
-+			PCI_SLOT(ivrs_alias), PCI_FUNC(ivrs_alias),
-+			dev_name(dev));
-+	}
-+
-+	return ivrs_alias;
-+}
-+
- static struct iommu_dev_data *find_dev_data(u16 devid)
- {
- 	struct iommu_dev_data *dev_data;
-@@ -215,13 +285,6 @@ static struct iommu_dev_data *find_dev_data(u16 devid)
- 	return dev_data;
- }
- 
--static inline u16 get_device_id(struct device *dev)
--{
--	struct pci_dev *pdev = to_pci_dev(dev);
--
--	return PCI_DEVID(pdev->bus->number, pdev->devfn);
--}
--
- static struct iommu_dev_data *get_dev_data(struct device *dev)
- {
- 	return dev->archdata.iommu;
-@@ -349,6 +412,8 @@ static int iommu_init_device(struct device *dev)
- 	if (!dev_data)
- 		return -ENOMEM;
- 
-+	dev_data->alias = get_alias(dev);
-+
- 	if (pci_iommuv2_capable(pdev)) {
- 		struct amd_iommu *iommu;
- 
-@@ -369,7 +434,7 @@ static void iommu_ignore_device(struct device *dev)
- 	u16 devid, alias;
- 
- 	devid = get_device_id(dev);
--	alias = amd_iommu_alias_table[devid];
-+	alias = get_alias(dev);
- 
- 	memset(&amd_iommu_dev_table[devid], 0, sizeof(struct dev_table_entry));
- 	memset(&amd_iommu_dev_table[alias], 0, sizeof(struct dev_table_entry));
-@@ -1061,7 +1126,7 @@ static int device_flush_dte(struct iommu_dev_data *dev_data)
- 	int ret;
- 
- 	iommu = amd_iommu_rlookup_table[dev_data->devid];
--	alias = amd_iommu_alias_table[dev_data->devid];
-+	alias = dev_data->alias;
- 
- 	ret = iommu_flush_dte(iommu, dev_data->devid);
- 	if (!ret && alias != dev_data->devid)
-@@ -2039,7 +2104,7 @@ static void do_attach(struct iommu_dev_data *dev_data,
- 	bool ats;
- 
- 	iommu = amd_iommu_rlookup_table[dev_data->devid];
--	alias = amd_iommu_alias_table[dev_data->devid];
-+	alias = dev_data->alias;
- 	ats   = dev_data->ats.enabled;
- 
- 	/* Update data structures */
-@@ -2073,7 +2138,7 @@ static void do_detach(struct iommu_dev_data *dev_data)
- 		return;
- 
- 	iommu = amd_iommu_rlookup_table[dev_data->devid];
--	alias = amd_iommu_alias_table[dev_data->devid];
-+	alias = dev_data->alias;
- 
- 	/* decrease reference counters */
- 	dev_data->domain->dev_iommu[iommu->index] -= 1;
-diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
-index 72d6182666cb..58f2fe687a24 100644
---- a/drivers/iommu/dma-iommu.c
-+++ b/drivers/iommu/dma-iommu.c
-@@ -403,7 +403,7 @@ static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
- 		unsigned int s_length = sg_dma_len(s);
- 		unsigned int s_dma_len = s->length;
- 
--		s->offset = s_offset;
-+		s->offset += s_offset;
- 		s->length = s_length;
- 		sg_dma_address(s) = dma_addr + s_offset;
- 		dma_addr += s_dma_len;
-@@ -422,7 +422,7 @@ static void __invalidate_sg(struct scatterlist *sg, int nents)
- 
- 	for_each_sg(sg, s, nents, i) {
- 		if (sg_dma_address(s) != DMA_ERROR_CODE)
--			s->offset = sg_dma_address(s);
-+			s->offset += sg_dma_address(s);
- 		if (sg_dma_len(s))
- 			s->length = sg_dma_len(s);
- 		sg_dma_address(s) = DMA_ERROR_CODE;
-diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
-index 0e3b0092ec92..515bb8b80952 100644
---- a/drivers/iommu/iommu.c
-+++ b/drivers/iommu/iommu.c
-@@ -848,7 +848,8 @@ struct iommu_group *iommu_group_get_for_dev(struct device *dev)
- 	if (!group->default_domain) {
- 		group->default_domain = __iommu_domain_alloc(dev->bus,
- 							     IOMMU_DOMAIN_DMA);
--		group->domain = group->default_domain;
-+		if (!group->domain)
-+			group->domain = group->default_domain;
- 	}
- 
- 	ret = iommu_group_add_device(group, dev);
-diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
-index d7be6ddc34f6..2fc499a2207e 100644
---- a/drivers/irqchip/irq-gic-v3.c
-+++ b/drivers/irqchip/irq-gic-v3.c
-@@ -361,6 +361,13 @@ static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs
- 			if (static_key_true(&supports_deactivate))
- 				gic_write_dir(irqnr);
- #ifdef CONFIG_SMP
-+			/*
-+			 * Unlike GICv2, we don't need an smp_rmb() here.
-+			 * The control dependency from gic_read_iar to
-+			 * the ISB in gic_write_eoir is enough to ensure
-+			 * that any shared data read by handle_IPI will
-+			 * be read after the ACK.
-+			 */
- 			handle_IPI(irqnr, regs);
- #else
- 			WARN_ONCE(true, "Unexpected SGI received!\n");
-@@ -380,6 +387,15 @@ static void __init gic_dist_init(void)
- 	writel_relaxed(0, base + GICD_CTLR);
- 	gic_dist_wait_for_rwp();
- 
-+	/*
-+	 * Configure SPIs as non-secure Group-1. This will only matter
-+	 * if the GIC only has a single security state. This will not
-+	 * do the right thing if the kernel is running in secure mode,
-+	 * but that's not the intended use case anyway.
-+	 */
-+	for (i = 32; i < gic_data.irq_nr; i += 32)
-+		writel_relaxed(~0, base + GICD_IGROUPR + i / 8);
-+
- 	gic_dist_config(base, gic_data.irq_nr, gic_dist_wait_for_rwp);
- 
- 	/* Enable distributor with ARE, Group1 */
-@@ -494,6 +510,9 @@ static void gic_cpu_init(void)
- 
- 	rbase = gic_data_rdist_sgi_base();
- 
-+	/* Configure SGIs/PPIs as non-secure Group-1 */
-+	writel_relaxed(~0, rbase + GICR_IGROUPR0);
-+
- 	gic_cpu_config(rbase, gic_redist_wait_for_rwp);
- 
- 	/* Give LPIs a spin */
-diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
-index 8f9ebf714e2b..eef950046ac0 100644
---- a/drivers/irqchip/irq-gic.c
-+++ b/drivers/irqchip/irq-gic.c
-@@ -344,6 +344,14 @@ static void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
- 			if (static_key_true(&supports_deactivate))
- 				writel_relaxed(irqstat, cpu_base + GIC_CPU_DEACTIVATE);
- #ifdef CONFIG_SMP
-+			/*
-+			 * Ensure any shared data written by the CPU sending
-+			 * the IPI is read after we've read the ACK register
-+			 * on the GIC.
-+			 *
-+			 * Pairs with the write barrier in gic_raise_softirq
-+			 */
-+			smp_rmb();
- 			handle_IPI(irqnr, regs);
- #endif
- 			continue;
-diff --git a/drivers/irqchip/irq-mxs.c b/drivers/irqchip/irq-mxs.c
-index efe50845939d..17304705f2cf 100644
---- a/drivers/irqchip/irq-mxs.c
-+++ b/drivers/irqchip/irq-mxs.c
-@@ -183,7 +183,7 @@ static void __iomem * __init icoll_init_iobase(struct device_node *np)
- 	void __iomem *icoll_base;
- 
- 	icoll_base = of_io_request_and_map(np, 0, np->name);
--	if (!icoll_base)
-+	if (IS_ERR(icoll_base))
- 		panic("%s: unable to map resource", np->full_name);
- 	return icoll_base;
- }
-diff --git a/drivers/irqchip/irq-sunxi-nmi.c b/drivers/irqchip/irq-sunxi-nmi.c
-index 0820f67cc9a7..668730c5cb66 100644
---- a/drivers/irqchip/irq-sunxi-nmi.c
-+++ b/drivers/irqchip/irq-sunxi-nmi.c
-@@ -160,9 +160,9 @@ static int __init sunxi_sc_nmi_irq_init(struct device_node *node,
- 
- 	gc = irq_get_domain_generic_chip(domain, 0);
- 	gc->reg_base = of_io_request_and_map(node, 0, of_node_full_name(node));
--	if (!gc->reg_base) {
-+	if (IS_ERR(gc->reg_base)) {
- 		pr_err("unable to map resource\n");
--		ret = -ENOMEM;
-+		ret = PTR_ERR(gc->reg_base);
- 		goto fail_irqd_remove;
- 	}
- 
-diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c
-index 307db1ea22de..b7ddfb352792 100644
---- a/drivers/lightnvm/rrpc.c
-+++ b/drivers/lightnvm/rrpc.c
-@@ -499,12 +499,21 @@ static void rrpc_gc_queue(struct work_struct *work)
- 	struct rrpc *rrpc = gcb->rrpc;
- 	struct rrpc_block *rblk = gcb->rblk;
- 	struct nvm_lun *lun = rblk->parent->lun;
-+	struct nvm_block *blk = rblk->parent;
- 	struct rrpc_lun *rlun = &rrpc->luns[lun->id - rrpc->lun_offset];
- 
- 	spin_lock(&rlun->lock);
- 	list_add_tail(&rblk->prio, &rlun->prio_list);
- 	spin_unlock(&rlun->lock);
- 
-+	spin_lock(&lun->lock);
-+	lun->nr_open_blocks--;
-+	lun->nr_closed_blocks++;
-+	blk->state &= ~NVM_BLK_ST_OPEN;
-+	blk->state |= NVM_BLK_ST_CLOSED;
-+	list_move_tail(&rblk->list, &rlun->closed_list);
-+	spin_unlock(&lun->lock);
-+
- 	mempool_free(gcb, rrpc->gcb_pool);
- 	pr_debug("nvm: block '%lu' is full, allow GC (sched)\n",
- 							rblk->parent->id);
-@@ -668,20 +677,8 @@ static void rrpc_end_io_write(struct rrpc *rrpc, struct rrpc_rq *rrqd,
- 		lun = rblk->parent->lun;
- 
- 		cmnt_size = atomic_inc_return(&rblk->data_cmnt_size);
--		if (unlikely(cmnt_size == rrpc->dev->pgs_per_blk)) {
--			struct nvm_block *blk = rblk->parent;
--			struct rrpc_lun *rlun = rblk->rlun;
--
--			spin_lock(&lun->lock);
--			lun->nr_open_blocks--;
--			lun->nr_closed_blocks++;
--			blk->state &= ~NVM_BLK_ST_OPEN;
--			blk->state |= NVM_BLK_ST_CLOSED;
--			list_move_tail(&rblk->list, &rlun->closed_list);
--			spin_unlock(&lun->lock);
--
-+		if (unlikely(cmnt_size == rrpc->dev->pgs_per_blk))
- 			rrpc_run_gc(rrpc, rblk);
--		}
- 	}
- }
- 
-diff --git a/drivers/mcb/mcb-parse.c b/drivers/mcb/mcb-parse.c
-index 004926955263..b0155b05cddb 100644
---- a/drivers/mcb/mcb-parse.c
-+++ b/drivers/mcb/mcb-parse.c
-@@ -57,7 +57,7 @@ static int chameleon_parse_gdd(struct mcb_bus *bus,
- 	mdev->id = GDD_DEV(reg1);
- 	mdev->rev = GDD_REV(reg1);
- 	mdev->var = GDD_VAR(reg1);
--	mdev->bar = GDD_BAR(reg1);
-+	mdev->bar = GDD_BAR(reg2);
- 	mdev->group = GDD_GRP(reg2);
- 	mdev->inst = GDD_INS(reg2);
- 
-diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
-index 8d0ead98eb6e..a296425a7270 100644
---- a/drivers/md/bcache/super.c
-+++ b/drivers/md/bcache/super.c
-@@ -1015,8 +1015,12 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c)
- 	 */
- 	atomic_set(&dc->count, 1);
- 
--	if (bch_cached_dev_writeback_start(dc))
-+	/* Block writeback thread, but spawn it */
-+	down_write(&dc->writeback_lock);
-+	if (bch_cached_dev_writeback_start(dc)) {
-+		up_write(&dc->writeback_lock);
- 		return -ENOMEM;
-+	}
- 
- 	if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) {
- 		bch_sectors_dirty_init(dc);
-@@ -1028,6 +1032,9 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c)
- 	bch_cached_dev_run(dc);
- 	bcache_device_link(&dc->disk, c, "bdev");
- 
-+	/* Allow the writeback thread to proceed */
-+	up_write(&dc->writeback_lock);
-+
- 	pr_info("Caching %s as %s on set %pU",
- 		bdevname(dc->bdev, buf), dc->disk.disk->disk_name,
- 		dc->disk.c->sb.set_uuid);
-@@ -1366,6 +1373,9 @@ static void cache_set_flush(struct closure *cl)
- 	struct btree *b;
- 	unsigned i;
- 
-+	if (!c)
-+		closure_return(cl);
-+
- 	bch_cache_accounting_destroy(&c->accounting);
- 
- 	kobject_put(&c->internal);
-@@ -1828,11 +1838,12 @@ static int cache_alloc(struct cache_sb *sb, struct cache *ca)
- 	return 0;
- }
- 
--static void register_cache(struct cache_sb *sb, struct page *sb_page,
-+static int register_cache(struct cache_sb *sb, struct page *sb_page,
- 				struct block_device *bdev, struct cache *ca)
- {
- 	char name[BDEVNAME_SIZE];
--	const char *err = "cannot allocate memory";
-+	const char *err = NULL;
-+	int ret = 0;
- 
- 	memcpy(&ca->sb, sb, sizeof(struct cache_sb));
- 	ca->bdev = bdev;
-@@ -1847,27 +1858,35 @@ static void register_cache(struct cache_sb *sb, struct page *sb_page,
- 	if (blk_queue_discard(bdev_get_queue(ca->bdev)))
- 		ca->discard = CACHE_DISCARD(&ca->sb);
- 
--	if (cache_alloc(sb, ca) != 0)
-+	ret = cache_alloc(sb, ca);
-+	if (ret != 0)
- 		goto err;
- 
--	err = "error creating kobject";
--	if (kobject_add(&ca->kobj, &part_to_dev(bdev->bd_part)->kobj, "bcache"))
--		goto err;
-+	if (kobject_add(&ca->kobj, &part_to_dev(bdev->bd_part)->kobj, "bcache")) {
-+		err = "error calling kobject_add";
-+		ret = -ENOMEM;
-+		goto out;
-+	}
- 
- 	mutex_lock(&bch_register_lock);
- 	err = register_cache_set(ca);
- 	mutex_unlock(&bch_register_lock);
- 
--	if (err)
--		goto err;
-+	if (err) {
-+		ret = -ENODEV;
-+		goto out;
-+	}
- 
- 	pr_info("registered cache device %s", bdevname(bdev, name));
-+
- out:
- 	kobject_put(&ca->kobj);
--	return;
-+
- err:
--	pr_notice("error opening %s: %s", bdevname(bdev, name), err);
--	goto out;
-+	if (err)
-+		pr_notice("error opening %s: %s", bdevname(bdev, name), err);
-+
-+	return ret;
- }
- 
- /* Global interfaces/init */
-@@ -1965,7 +1984,8 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
- 		if (!ca)
- 			goto err_close;
- 
--		register_cache(sb, sb_page, bdev, ca);
-+		if (register_cache(sb, sb_page, bdev, ca) != 0)
-+			goto err_close;
- 	}
- out:
- 	if (sb_page)
-diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
-index f6543f3a970f..3970cda10080 100644
---- a/drivers/md/dm-cache-metadata.c
-+++ b/drivers/md/dm-cache-metadata.c
-@@ -867,18 +867,55 @@ static int blocks_are_unmapped_or_clean(struct dm_cache_metadata *cmd,
- 	return 0;
- }
- 
--#define WRITE_LOCK(cmd) \
--	if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) \
--		return -EINVAL; \
--	down_write(&cmd->root_lock)
-+static bool cmd_write_lock(struct dm_cache_metadata *cmd)
-+{
-+	down_write(&cmd->root_lock);
-+	if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) {
-+		up_write(&cmd->root_lock);
-+		return false;
-+	}
-+	return true;
-+}
-+
-+#define WRITE_LOCK(cmd)				\
-+	do {					\
-+		if (!cmd_write_lock((cmd)))	\
-+			return -EINVAL;		\
-+	} while(0)
- 
--#define WRITE_LOCK_VOID(cmd) \
--	if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) \
--		return; \
--	down_write(&cmd->root_lock)
-+#define WRITE_LOCK_VOID(cmd)			\
-+	do {					\
-+		if (!cmd_write_lock((cmd)))	\
-+			return;			\
-+	} while(0)
- 
- #define WRITE_UNLOCK(cmd) \
--	up_write(&cmd->root_lock)
-+	up_write(&(cmd)->root_lock)
-+
-+static bool cmd_read_lock(struct dm_cache_metadata *cmd)
-+{
-+	down_read(&cmd->root_lock);
-+	if (cmd->fail_io) {
-+		up_read(&cmd->root_lock);
-+		return false;
-+	}
-+	return true;
-+}
-+
-+#define READ_LOCK(cmd)				\
-+	do {					\
-+		if (!cmd_read_lock((cmd)))	\
-+			return -EINVAL;		\
-+	} while(0)
-+
-+#define READ_LOCK_VOID(cmd)			\
-+	do {					\
-+		if (!cmd_read_lock((cmd)))	\
-+			return;			\
-+	} while(0)
-+
-+#define READ_UNLOCK(cmd) \
-+	up_read(&(cmd)->root_lock)
- 
- int dm_cache_resize(struct dm_cache_metadata *cmd, dm_cblock_t new_cache_size)
- {
-@@ -1015,22 +1052,20 @@ int dm_cache_load_discards(struct dm_cache_metadata *cmd,
- {
- 	int r;
- 
--	down_read(&cmd->root_lock);
-+	READ_LOCK(cmd);
- 	r = __load_discards(cmd, fn, context);
--	up_read(&cmd->root_lock);
-+	READ_UNLOCK(cmd);
- 
- 	return r;
- }
- 
--dm_cblock_t dm_cache_size(struct dm_cache_metadata *cmd)
-+int dm_cache_size(struct dm_cache_metadata *cmd, dm_cblock_t *result)
- {
--	dm_cblock_t r;
--
--	down_read(&cmd->root_lock);
--	r = cmd->cache_blocks;
--	up_read(&cmd->root_lock);
-+	READ_LOCK(cmd);
-+	*result = cmd->cache_blocks;
-+	READ_UNLOCK(cmd);
- 
--	return r;
-+	return 0;
- }
- 
- static int __remove(struct dm_cache_metadata *cmd, dm_cblock_t cblock)
-@@ -1188,9 +1223,9 @@ int dm_cache_load_mappings(struct dm_cache_metadata *cmd,
- {
- 	int r;
- 
--	down_read(&cmd->root_lock);
-+	READ_LOCK(cmd);
- 	r = __load_mappings(cmd, policy, fn, context);
--	up_read(&cmd->root_lock);
-+	READ_UNLOCK(cmd);
- 
- 	return r;
- }
-@@ -1215,18 +1250,18 @@ static int __dump_mappings(struct dm_cache_metadata *cmd)
- 
- void dm_cache_dump(struct dm_cache_metadata *cmd)
- {
--	down_read(&cmd->root_lock);
-+	READ_LOCK_VOID(cmd);
- 	__dump_mappings(cmd);
--	up_read(&cmd->root_lock);
-+	READ_UNLOCK(cmd);
- }
- 
- int dm_cache_changed_this_transaction(struct dm_cache_metadata *cmd)
- {
- 	int r;
- 
--	down_read(&cmd->root_lock);
-+	READ_LOCK(cmd);
- 	r = cmd->changed;
--	up_read(&cmd->root_lock);
-+	READ_UNLOCK(cmd);
- 
- 	return r;
- }
-@@ -1276,9 +1311,9 @@ int dm_cache_set_dirty(struct dm_cache_metadata *cmd,
- void dm_cache_metadata_get_stats(struct dm_cache_metadata *cmd,
- 				 struct dm_cache_statistics *stats)
- {
--	down_read(&cmd->root_lock);
-+	READ_LOCK_VOID(cmd);
- 	*stats = cmd->stats;
--	up_read(&cmd->root_lock);
-+	READ_UNLOCK(cmd);
- }
- 
- void dm_cache_metadata_set_stats(struct dm_cache_metadata *cmd,
-@@ -1312,9 +1347,9 @@ int dm_cache_get_free_metadata_block_count(struct dm_cache_metadata *cmd,
- {
- 	int r = -EINVAL;
- 
--	down_read(&cmd->root_lock);
-+	READ_LOCK(cmd);
- 	r = dm_sm_get_nr_free(cmd->metadata_sm, result);
--	up_read(&cmd->root_lock);
-+	READ_UNLOCK(cmd);
- 
- 	return r;
- }
-@@ -1324,9 +1359,9 @@ int dm_cache_get_metadata_dev_size(struct dm_cache_metadata *cmd,
- {
- 	int r = -EINVAL;
- 
--	down_read(&cmd->root_lock);
-+	READ_LOCK(cmd);
- 	r = dm_sm_get_nr_blocks(cmd->metadata_sm, result);
--	up_read(&cmd->root_lock);
-+	READ_UNLOCK(cmd);
- 
- 	return r;
- }
-@@ -1417,7 +1452,13 @@ int dm_cache_write_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *
- 
- int dm_cache_metadata_all_clean(struct dm_cache_metadata *cmd, bool *result)
- {
--	return blocks_are_unmapped_or_clean(cmd, 0, cmd->cache_blocks, result);
-+	int r;
-+
-+	READ_LOCK(cmd);
-+	r = blocks_are_unmapped_or_clean(cmd, 0, cmd->cache_blocks, result);
-+	READ_UNLOCK(cmd);
-+
-+	return r;
- }
- 
- void dm_cache_metadata_set_read_only(struct dm_cache_metadata *cmd)
-@@ -1440,10 +1481,7 @@ int dm_cache_metadata_set_needs_check(struct dm_cache_metadata *cmd)
- 	struct dm_block *sblock;
- 	struct cache_disk_superblock *disk_super;
- 
--	/*
--	 * We ignore fail_io for this function.
--	 */
--	down_write(&cmd->root_lock);
-+	WRITE_LOCK(cmd);
- 	set_bit(NEEDS_CHECK, &cmd->flags);
- 
- 	r = superblock_lock(cmd, &sblock);
-@@ -1458,19 +1496,17 @@ int dm_cache_metadata_set_needs_check(struct dm_cache_metadata *cmd)
- 	dm_bm_unlock(sblock);
- 
- out:
--	up_write(&cmd->root_lock);
-+	WRITE_UNLOCK(cmd);
- 	return r;
- }
- 
--bool dm_cache_metadata_needs_check(struct dm_cache_metadata *cmd)
-+int dm_cache_metadata_needs_check(struct dm_cache_metadata *cmd, bool *result)
- {
--	bool needs_check;
-+	READ_LOCK(cmd);
-+	*result = !!test_bit(NEEDS_CHECK, &cmd->flags);
-+	READ_UNLOCK(cmd);
- 
--	down_read(&cmd->root_lock);
--	needs_check = !!test_bit(NEEDS_CHECK, &cmd->flags);
--	up_read(&cmd->root_lock);
--
--	return needs_check;
-+	return 0;
- }
- 
- int dm_cache_metadata_abort(struct dm_cache_metadata *cmd)
-diff --git a/drivers/md/dm-cache-metadata.h b/drivers/md/dm-cache-metadata.h
-index 2ffee21f318d..8528744195e5 100644
---- a/drivers/md/dm-cache-metadata.h
-+++ b/drivers/md/dm-cache-metadata.h
-@@ -66,7 +66,7 @@ void dm_cache_metadata_close(struct dm_cache_metadata *cmd);
-  * origin blocks to map to.
-  */
- int dm_cache_resize(struct dm_cache_metadata *cmd, dm_cblock_t new_cache_size);
--dm_cblock_t dm_cache_size(struct dm_cache_metadata *cmd);
-+int dm_cache_size(struct dm_cache_metadata *cmd, dm_cblock_t *result);
- 
- int dm_cache_discard_bitset_resize(struct dm_cache_metadata *cmd,
- 				   sector_t discard_block_size,
-@@ -137,7 +137,7 @@ int dm_cache_write_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *
-  */
- int dm_cache_metadata_all_clean(struct dm_cache_metadata *cmd, bool *result);
- 
--bool dm_cache_metadata_needs_check(struct dm_cache_metadata *cmd);
-+int dm_cache_metadata_needs_check(struct dm_cache_metadata *cmd, bool *result);
- int dm_cache_metadata_set_needs_check(struct dm_cache_metadata *cmd);
- void dm_cache_metadata_set_read_only(struct dm_cache_metadata *cmd);
- void dm_cache_metadata_set_read_write(struct dm_cache_metadata *cmd);
-diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
-index 5780accffa30..bb9b92ebbf8e 100644
---- a/drivers/md/dm-cache-target.c
-+++ b/drivers/md/dm-cache-target.c
-@@ -984,9 +984,14 @@ static void notify_mode_switch(struct cache *cache, enum cache_metadata_mode mod
- 
- static void set_cache_mode(struct cache *cache, enum cache_metadata_mode new_mode)
- {
--	bool needs_check = dm_cache_metadata_needs_check(cache->cmd);
-+	bool needs_check;
- 	enum cache_metadata_mode old_mode = get_cache_mode(cache);
- 
-+	if (dm_cache_metadata_needs_check(cache->cmd, &needs_check)) {
-+		DMERR("unable to read needs_check flag, setting failure mode");
-+		new_mode = CM_FAIL;
-+	}
-+
- 	if (new_mode == CM_WRITE && needs_check) {
- 		DMERR("%s: unable to switch cache to write mode until repaired.",
- 		      cache_device_name(cache));
-@@ -3510,6 +3515,7 @@ static void cache_status(struct dm_target *ti, status_type_t type,
- 	char buf[BDEVNAME_SIZE];
- 	struct cache *cache = ti->private;
- 	dm_cblock_t residency;
-+	bool needs_check;
- 
- 	switch (type) {
- 	case STATUSTYPE_INFO:
-@@ -3583,7 +3589,9 @@ static void cache_status(struct dm_target *ti, status_type_t type,
- 		else
- 			DMEMIT("rw ");
- 
--		if (dm_cache_metadata_needs_check(cache->cmd))
-+		r = dm_cache_metadata_needs_check(cache->cmd, &needs_check);
-+
-+		if (r || needs_check)
- 			DMEMIT("needs_check ");
- 		else
- 			DMEMIT("- ");
-diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
-index 3766386080a4..e4d1bafe78c1 100644
---- a/drivers/md/dm-snap.c
-+++ b/drivers/md/dm-snap.c
-@@ -1105,6 +1105,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
- 	int i;
- 	int r = -EINVAL;
- 	char *origin_path, *cow_path;
-+	dev_t origin_dev, cow_dev;
- 	unsigned args_used, num_flush_bios = 1;
- 	fmode_t origin_mode = FMODE_READ;
- 
-@@ -1135,11 +1136,19 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
- 		ti->error = "Cannot get origin device";
- 		goto bad_origin;
- 	}
-+	origin_dev = s->origin->bdev->bd_dev;
- 
- 	cow_path = argv[0];
- 	argv++;
- 	argc--;
- 
-+	cow_dev = dm_get_dev_t(cow_path);
-+	if (cow_dev && cow_dev == origin_dev) {
-+		ti->error = "COW device cannot be the same as origin device";
-+		r = -EINVAL;
-+		goto bad_cow;
-+	}
-+
- 	r = dm_get_device(ti, cow_path, dm_table_get_mode(ti->table), &s->cow);
- 	if (r) {
- 		ti->error = "Cannot get COW device";
-diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
-index 061152a43730..cb5d0daf53bb 100644
---- a/drivers/md/dm-table.c
-+++ b/drivers/md/dm-table.c
-@@ -365,6 +365,26 @@ static int upgrade_mode(struct dm_dev_internal *dd, fmode_t new_mode,
- }
- 
- /*
-+ * Convert the path to a device
-+ */
-+dev_t dm_get_dev_t(const char *path)
-+{
-+	dev_t uninitialized_var(dev);
-+	struct block_device *bdev;
-+
-+	bdev = lookup_bdev(path);
-+	if (IS_ERR(bdev))
-+		dev = name_to_dev_t(path);
-+	else {
-+		dev = bdev->bd_dev;
-+		bdput(bdev);
-+	}
-+
-+	return dev;
-+}
-+EXPORT_SYMBOL_GPL(dm_get_dev_t);
-+
-+/*
-  * Add a device to the list, or just increment the usage count if
-  * it's already present.
-  */
-@@ -372,23 +392,15 @@ int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,
- 		  struct dm_dev **result)
- {
- 	int r;
--	dev_t uninitialized_var(dev);
-+	dev_t dev;
- 	struct dm_dev_internal *dd;
- 	struct dm_table *t = ti->table;
--	struct block_device *bdev;
- 
- 	BUG_ON(!t);
- 
--	/* convert the path to a device */
--	bdev = lookup_bdev(path);
--	if (IS_ERR(bdev)) {
--		dev = name_to_dev_t(path);
--		if (!dev)
--			return -ENODEV;
--	} else {
--		dev = bdev->bd_dev;
--		bdput(bdev);
--	}
-+	dev = dm_get_dev_t(path);
-+	if (!dev)
-+		return -ENODEV;
- 
- 	dd = find_device(&t->devices, dev);
- 	if (!dd) {
-diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
-index f962d6453afd..185010d9cccc 100644
---- a/drivers/md/dm-thin-metadata.c
-+++ b/drivers/md/dm-thin-metadata.c
-@@ -1981,5 +1981,8 @@ bool dm_pool_metadata_needs_check(struct dm_pool_metadata *pmd)
- 
- void dm_pool_issue_prefetches(struct dm_pool_metadata *pmd)
- {
--	dm_tm_issue_prefetches(pmd->tm);
-+	down_read(&pmd->root_lock);
-+	if (!pmd->fail_io)
-+		dm_tm_issue_prefetches(pmd->tm);
-+	up_read(&pmd->root_lock);
- }
-diff --git a/drivers/md/dm.c b/drivers/md/dm.c
-index dd834927bc66..c338aebb4ccd 100644
---- a/drivers/md/dm.c
-+++ b/drivers/md/dm.c
-@@ -1109,12 +1109,8 @@ static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
- 	 * back into ->request_fn() could deadlock attempting to grab the
- 	 * queue lock again.
- 	 */
--	if (run_queue) {
--		if (md->queue->mq_ops)
--			blk_mq_run_hw_queues(md->queue, true);
--		else
--			blk_run_queue_async(md->queue);
--	}
-+	if (!md->queue->mq_ops && run_queue)
-+		blk_run_queue_async(md->queue);
- 
- 	/*
- 	 * dm_put() must be at the end of this function. See the comment above
-@@ -1214,9 +1210,9 @@ static void dm_requeue_original_request(struct mapped_device *md,
- {
- 	int rw = rq_data_dir(rq);
- 
-+	rq_end_stats(md, rq);
- 	dm_unprep_request(rq);
- 
--	rq_end_stats(md, rq);
- 	if (!rq->q->mq_ops)
- 		old_requeue_request(rq);
- 	else {
-@@ -1336,7 +1332,10 @@ static void dm_complete_request(struct request *rq, int error)
- 	struct dm_rq_target_io *tio = tio_from_request(rq);
- 
- 	tio->error = error;
--	blk_complete_request(rq);
-+	if (!rq->q->mq_ops)
-+		blk_complete_request(rq);
-+	else
-+		blk_mq_complete_request(rq, error);
- }
- 
- /*
-diff --git a/drivers/md/md.c b/drivers/md/md.c
-index e55e6cf9ec17..7551278030d8 100644
---- a/drivers/md/md.c
-+++ b/drivers/md/md.c
-@@ -284,6 +284,8 @@ static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio)
- 	 * go away inside make_request
- 	 */
- 	sectors = bio_sectors(bio);
-+	/* bio could be mergeable after passing to underlayer */
-+	bio->bi_rw &= ~REQ_NOMERGE;
- 	mddev->pers->make_request(mddev, bio);
- 
- 	cpu = part_stat_lock();
-diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
-index 0a72ab6e6c20..dd483bb2e111 100644
---- a/drivers/md/multipath.c
-+++ b/drivers/md/multipath.c
-@@ -129,7 +129,9 @@ static void multipath_make_request(struct mddev *mddev, struct bio * bio)
- 	}
- 	multipath = conf->multipaths + mp_bh->path;
- 
--	mp_bh->bio = *bio;
-+	bio_init(&mp_bh->bio);
-+	__bio_clone_fast(&mp_bh->bio, bio);
-+
- 	mp_bh->bio.bi_iter.bi_sector += multipath->rdev->data_offset;
- 	mp_bh->bio.bi_bdev = multipath->rdev->bdev;
- 	mp_bh->bio.bi_rw |= REQ_FAILFAST_TRANSPORT;
-diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
-index 4e3843f7d245..bb5bce059eb4 100644
---- a/drivers/md/raid1.c
-+++ b/drivers/md/raid1.c
-@@ -2274,6 +2274,7 @@ static void handle_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
- 	if (fail) {
- 		spin_lock_irq(&conf->device_lock);
- 		list_add(&r1_bio->retry_list, &conf->bio_end_io_list);
-+		conf->nr_queued++;
- 		spin_unlock_irq(&conf->device_lock);
- 		md_wakeup_thread(conf->mddev->thread);
- 	} else {
-@@ -2391,8 +2392,10 @@ static void raid1d(struct md_thread *thread)
- 		LIST_HEAD(tmp);
- 		spin_lock_irqsave(&conf->device_lock, flags);
- 		if (!test_bit(MD_CHANGE_PENDING, &mddev->flags)) {
--			list_add(&tmp, &conf->bio_end_io_list);
--			list_del_init(&conf->bio_end_io_list);
-+			while (!list_empty(&conf->bio_end_io_list)) {
-+				list_move(conf->bio_end_io_list.prev, &tmp);
-+				conf->nr_queued--;
-+			}
- 		}
- 		spin_unlock_irqrestore(&conf->device_lock, flags);
- 		while (!list_empty(&tmp)) {
-diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
-index 1c1447dd3417..e3fd725d5c4d 100644
---- a/drivers/md/raid10.c
-+++ b/drivers/md/raid10.c
-@@ -2664,6 +2664,7 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
- 		if (fail) {
- 			spin_lock_irq(&conf->device_lock);
- 			list_add(&r10_bio->retry_list, &conf->bio_end_io_list);
-+			conf->nr_queued++;
- 			spin_unlock_irq(&conf->device_lock);
- 			md_wakeup_thread(conf->mddev->thread);
- 		} else {
-@@ -2691,8 +2692,10 @@ static void raid10d(struct md_thread *thread)
- 		LIST_HEAD(tmp);
- 		spin_lock_irqsave(&conf->device_lock, flags);
- 		if (!test_bit(MD_CHANGE_PENDING, &mddev->flags)) {
--			list_add(&tmp, &conf->bio_end_io_list);
--			list_del_init(&conf->bio_end_io_list);
-+			while (!list_empty(&conf->bio_end_io_list)) {
-+				list_move(conf->bio_end_io_list.prev, &tmp);
-+				conf->nr_queued--;
-+			}
- 		}
- 		spin_unlock_irqrestore(&conf->device_lock, flags);
- 		while (!list_empty(&tmp)) {
-diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
-index b4f02c9959f2..32d52878f182 100644
---- a/drivers/md/raid5.c
-+++ b/drivers/md/raid5.c
-@@ -340,8 +340,7 @@ static void release_inactive_stripe_list(struct r5conf *conf,
- 					 int hash)
- {
- 	int size;
--	unsigned long do_wakeup = 0;
--	int i = 0;
-+	bool do_wakeup = false;
- 	unsigned long flags;
- 
- 	if (hash == NR_STRIPE_HASH_LOCKS) {
-@@ -362,19 +361,15 @@ static void release_inactive_stripe_list(struct r5conf *conf,
- 			    !list_empty(list))
- 				atomic_dec(&conf->empty_inactive_list_nr);
- 			list_splice_tail_init(list, conf->inactive_list + hash);
--			do_wakeup |= 1 << hash;
-+			do_wakeup = true;
- 			spin_unlock_irqrestore(conf->hash_locks + hash, flags);
- 		}
- 		size--;
- 		hash--;
- 	}
- 
--	for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++) {
--		if (do_wakeup & (1 << i))
--			wake_up(&conf->wait_for_stripe[i]);
--	}
--
- 	if (do_wakeup) {
-+		wake_up(&conf->wait_for_stripe);
- 		if (atomic_read(&conf->active_stripes) == 0)
- 			wake_up(&conf->wait_for_quiescent);
- 		if (conf->retry_read_aligned)
-@@ -687,15 +682,14 @@ raid5_get_active_stripe(struct r5conf *conf, sector_t sector,
- 			if (!sh) {
- 				set_bit(R5_INACTIVE_BLOCKED,
- 					&conf->cache_state);
--				wait_event_exclusive_cmd(
--					conf->wait_for_stripe[hash],
-+				wait_event_lock_irq(
-+					conf->wait_for_stripe,
- 					!list_empty(conf->inactive_list + hash) &&
- 					(atomic_read(&conf->active_stripes)
- 					 < (conf->max_nr_stripes * 3 / 4)
- 					 || !test_bit(R5_INACTIVE_BLOCKED,
- 						      &conf->cache_state)),
--					spin_unlock_irq(conf->hash_locks + hash),
--					spin_lock_irq(conf->hash_locks + hash));
-+					*(conf->hash_locks + hash));
- 				clear_bit(R5_INACTIVE_BLOCKED,
- 					  &conf->cache_state);
- 			} else {
-@@ -720,9 +714,6 @@ raid5_get_active_stripe(struct r5conf *conf, sector_t sector,
- 		}
- 	} while (sh == NULL);
- 
--	if (!list_empty(conf->inactive_list + hash))
--		wake_up(&conf->wait_for_stripe[hash]);
--
- 	spin_unlock_irq(conf->hash_locks + hash);
- 	return sh;
- }
-@@ -2089,6 +2080,14 @@ static int resize_chunks(struct r5conf *conf, int new_disks, int new_sectors)
- 	unsigned long cpu;
- 	int err = 0;
- 
-+	/*
-+	 * Never shrink. And mddev_suspend() could deadlock if this is called
-+	 * from raid5d. In that case, scribble_disks and scribble_sectors
-+	 * should equal to new_disks and new_sectors
-+	 */
-+	if (conf->scribble_disks >= new_disks &&
-+	    conf->scribble_sectors >= new_sectors)
-+		return 0;
- 	mddev_suspend(conf->mddev);
- 	get_online_cpus();
- 	for_each_present_cpu(cpu) {
-@@ -2110,6 +2109,10 @@ static int resize_chunks(struct r5conf *conf, int new_disks, int new_sectors)
- 	}
- 	put_online_cpus();
- 	mddev_resume(conf->mddev);
-+	if (!err) {
-+		conf->scribble_disks = new_disks;
-+		conf->scribble_sectors = new_sectors;
-+	}
- 	return err;
- }
- 
-@@ -2190,7 +2193,7 @@ static int resize_stripes(struct r5conf *conf, int newsize)
- 	cnt = 0;
- 	list_for_each_entry(nsh, &newstripes, lru) {
- 		lock_device_hash_lock(conf, hash);
--		wait_event_exclusive_cmd(conf->wait_for_stripe[hash],
-+		wait_event_cmd(conf->wait_for_stripe,
- 				    !list_empty(conf->inactive_list + hash),
- 				    unlock_device_hash_lock(conf, hash),
- 				    lock_device_hash_lock(conf, hash));
-@@ -4236,7 +4239,6 @@ static void break_stripe_batch_list(struct stripe_head *head_sh,
- 		WARN_ON_ONCE(sh->state & ((1 << STRIPE_ACTIVE) |
- 					  (1 << STRIPE_SYNCING) |
- 					  (1 << STRIPE_REPLACED) |
--					  (1 << STRIPE_PREREAD_ACTIVE) |
- 					  (1 << STRIPE_DELAYED) |
- 					  (1 << STRIPE_BIT_DELAY) |
- 					  (1 << STRIPE_FULL_WRITE) |
-@@ -4251,6 +4253,7 @@ static void break_stripe_batch_list(struct stripe_head *head_sh,
- 					      (1 << STRIPE_REPLACED)));
- 
- 		set_mask_bits(&sh->state, ~(STRIPE_EXPAND_SYNC_FLAGS |
-+					    (1 << STRIPE_PREREAD_ACTIVE) |
- 					    (1 << STRIPE_DEGRADED)),
- 			      head_sh->state & (1 << STRIPE_INSYNC));
- 
-@@ -6413,6 +6416,12 @@ static int raid5_alloc_percpu(struct r5conf *conf)
- 	}
- 	put_online_cpus();
- 
-+	if (!err) {
-+		conf->scribble_disks = max(conf->raid_disks,
-+			conf->previous_raid_disks);
-+		conf->scribble_sectors = max(conf->chunk_sectors,
-+			conf->prev_chunk_sectors);
-+	}
- 	return err;
- }
- 
-@@ -6503,9 +6512,7 @@ static struct r5conf *setup_conf(struct mddev *mddev)
- 	seqcount_init(&conf->gen_lock);
- 	mutex_init(&conf->cache_size_mutex);
- 	init_waitqueue_head(&conf->wait_for_quiescent);
--	for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++) {
--		init_waitqueue_head(&conf->wait_for_stripe[i]);
--	}
-+	init_waitqueue_head(&conf->wait_for_stripe);
- 	init_waitqueue_head(&conf->wait_for_overlap);
- 	INIT_LIST_HEAD(&conf->handle_list);
- 	INIT_LIST_HEAD(&conf->hold_list);
-@@ -7014,8 +7021,8 @@ static int raid5_run(struct mddev *mddev)
- 		}
- 
- 		if (discard_supported &&
--		   mddev->queue->limits.max_discard_sectors >= stripe &&
--		   mddev->queue->limits.discard_granularity >= stripe)
-+		    mddev->queue->limits.max_discard_sectors >= (stripe >> 9) &&
-+		    mddev->queue->limits.discard_granularity >= stripe)
- 			queue_flag_set_unlocked(QUEUE_FLAG_DISCARD,
- 						mddev->queue);
- 		else
-diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
-index a415e1cd39b8..517d4b68a1be 100644
---- a/drivers/md/raid5.h
-+++ b/drivers/md/raid5.h
-@@ -510,6 +510,8 @@ struct r5conf {
- 					      * conversions
- 					      */
- 	} __percpu *percpu;
-+	int scribble_disks;
-+	int scribble_sectors;
- #ifdef CONFIG_HOTPLUG_CPU
- 	struct notifier_block	cpu_notify;
- #endif
-@@ -522,7 +524,7 @@ struct r5conf {
- 	atomic_t		empty_inactive_list_nr;
- 	struct llist_head	released_stripes;
- 	wait_queue_head_t	wait_for_quiescent;
--	wait_queue_head_t	wait_for_stripe[NR_STRIPE_HASH_LOCKS];
-+	wait_queue_head_t	wait_for_stripe;
- 	wait_queue_head_t	wait_for_overlap;
- 	unsigned long		cache_state;
- #define R5_INACTIVE_BLOCKED	1	/* release of inactive stripes blocked,
-diff --git a/drivers/media/i2c/adv7511.c b/drivers/media/i2c/adv7511.c
-index 471fd23b5c5c..08d2c6bf7341 100644
---- a/drivers/media/i2c/adv7511.c
-+++ b/drivers/media/i2c/adv7511.c
-@@ -1161,12 +1161,23 @@ static void adv7511_dbg_dump_edid(int lvl, int debug, struct v4l2_subdev *sd, in
- 	}
- }
- 
-+static void adv7511_notify_no_edid(struct v4l2_subdev *sd)
-+{
-+	struct adv7511_state *state = get_adv7511_state(sd);
-+	struct adv7511_edid_detect ed;
-+
-+	/* We failed to read the EDID, so send an event for this. */
-+	ed.present = false;
-+	ed.segment = adv7511_rd(sd, 0xc4);
-+	v4l2_subdev_notify(sd, ADV7511_EDID_DETECT, (void *)&ed);
-+	v4l2_ctrl_s_ctrl(state->have_edid0_ctrl, 0x0);
-+}
-+
- static void adv7511_edid_handler(struct work_struct *work)
- {
- 	struct delayed_work *dwork = to_delayed_work(work);
- 	struct adv7511_state *state = container_of(dwork, struct adv7511_state, edid_handler);
- 	struct v4l2_subdev *sd = &state->sd;
--	struct adv7511_edid_detect ed;
- 
- 	v4l2_dbg(1, debug, sd, "%s:\n", __func__);
- 
-@@ -1191,9 +1202,7 @@ static void adv7511_edid_handler(struct work_struct *work)
- 	}
- 
- 	/* We failed to read the EDID, so send an event for this. */
--	ed.present = false;
--	ed.segment = adv7511_rd(sd, 0xc4);
--	v4l2_subdev_notify(sd, ADV7511_EDID_DETECT, (void *)&ed);
-+	adv7511_notify_no_edid(sd);
- 	v4l2_dbg(1, debug, sd, "%s: no edid found\n", __func__);
- }
- 
-@@ -1264,7 +1273,6 @@ static void adv7511_check_monitor_present_status(struct v4l2_subdev *sd)
- 	/* update read only ctrls */
- 	v4l2_ctrl_s_ctrl(state->hotplug_ctrl, adv7511_have_hotplug(sd) ? 0x1 : 0x0);
- 	v4l2_ctrl_s_ctrl(state->rx_sense_ctrl, adv7511_have_rx_sense(sd) ? 0x1 : 0x0);
--	v4l2_ctrl_s_ctrl(state->have_edid0_ctrl, state->edid.segments ? 0x1 : 0x0);
- 
- 	if ((status & MASK_ADV7511_HPD_DETECT) && ((status & MASK_ADV7511_MSEN_DETECT) || state->edid.segments)) {
- 		v4l2_dbg(1, debug, sd, "%s: hotplug and (rx-sense or edid)\n", __func__);
-@@ -1294,6 +1302,7 @@ static void adv7511_check_monitor_present_status(struct v4l2_subdev *sd)
- 		}
- 		adv7511_s_power(sd, false);
- 		memset(&state->edid, 0, sizeof(struct adv7511_state_edid));
-+		adv7511_notify_no_edid(sd);
- 	}
- }
- 
-@@ -1370,6 +1379,7 @@ static bool adv7511_check_edid_status(struct v4l2_subdev *sd)
- 		}
- 		/* one more segment read ok */
- 		state->edid.segments = segment + 1;
-+		v4l2_ctrl_s_ctrl(state->have_edid0_ctrl, 0x1);
- 		if (((state->edid.data[0x7e] >> 1) + 1) > state->edid.segments) {
- 			/* Request next EDID segment */
- 			v4l2_dbg(1, debug, sd, "%s: request segment %d\n", __func__, state->edid.segments);
-@@ -1389,7 +1399,6 @@ static bool adv7511_check_edid_status(struct v4l2_subdev *sd)
- 		ed.present = true;
- 		ed.segment = 0;
- 		state->edid_detect_counter++;
--		v4l2_ctrl_s_ctrl(state->have_edid0_ctrl, state->edid.segments ? 0x1 : 0x0);
- 		v4l2_subdev_notify(sd, ADV7511_EDID_DETECT, (void *)&ed);
- 		return ed.present;
- 	}
-diff --git a/drivers/media/pci/bt8xx/bttv-driver.c b/drivers/media/pci/bt8xx/bttv-driver.c
-index 9400e996087b..bedbd51fb77c 100644
---- a/drivers/media/pci/bt8xx/bttv-driver.c
-+++ b/drivers/media/pci/bt8xx/bttv-driver.c
-@@ -2334,6 +2334,19 @@ static int bttv_g_fmt_vid_overlay(struct file *file, void *priv,
- 	return 0;
- }
- 
-+static void bttv_get_width_mask_vid_cap(const struct bttv_format *fmt,
-+					unsigned int *width_mask,
-+					unsigned int *width_bias)
-+{
-+	if (fmt->flags & FORMAT_FLAGS_PLANAR) {
-+		*width_mask = ~15; /* width must be a multiple of 16 pixels */
-+		*width_bias = 8;   /* nearest */
-+	} else {
-+		*width_mask = ~3; /* width must be a multiple of 4 pixels */
-+		*width_bias = 2;  /* nearest */
-+	}
-+}
-+
- static int bttv_try_fmt_vid_cap(struct file *file, void *priv,
- 						struct v4l2_format *f)
- {
-@@ -2343,6 +2356,7 @@ static int bttv_try_fmt_vid_cap(struct file *file, void *priv,
- 	enum v4l2_field field;
- 	__s32 width, height;
- 	__s32 height2;
-+	unsigned int width_mask, width_bias;
- 	int rc;
- 
- 	fmt = format_by_fourcc(f->fmt.pix.pixelformat);
-@@ -2375,9 +2389,9 @@ static int bttv_try_fmt_vid_cap(struct file *file, void *priv,
- 	width = f->fmt.pix.width;
- 	height = f->fmt.pix.height;
- 
-+	bttv_get_width_mask_vid_cap(fmt, &width_mask, &width_bias);
- 	rc = limit_scaled_size_lock(fh, &width, &height, field,
--			       /* width_mask: 4 pixels */ ~3,
--			       /* width_bias: nearest */ 2,
-+			       width_mask, width_bias,
- 			       /* adjust_size */ 1,
- 			       /* adjust_crop */ 0);
- 	if (0 != rc)
-@@ -2410,6 +2424,7 @@ static int bttv_s_fmt_vid_cap(struct file *file, void *priv,
- 	struct bttv_fh *fh = priv;
- 	struct bttv *btv = fh->btv;
- 	__s32 width, height;
-+	unsigned int width_mask, width_bias;
- 	enum v4l2_field field;
- 
- 	retval = bttv_switch_type(fh, f->type);
-@@ -2424,9 +2439,10 @@ static int bttv_s_fmt_vid_cap(struct file *file, void *priv,
- 	height = f->fmt.pix.height;
- 	field = f->fmt.pix.field;
- 
-+	fmt = format_by_fourcc(f->fmt.pix.pixelformat);
-+	bttv_get_width_mask_vid_cap(fmt, &width_mask, &width_bias);
- 	retval = limit_scaled_size_lock(fh, &width, &height, f->fmt.pix.field,
--			       /* width_mask: 4 pixels */ ~3,
--			       /* width_bias: nearest */ 2,
-+			       width_mask, width_bias,
- 			       /* adjust_size */ 1,
- 			       /* adjust_crop */ 1);
- 	if (0 != retval)
-@@ -2434,8 +2450,6 @@ static int bttv_s_fmt_vid_cap(struct file *file, void *priv,
- 
- 	f->fmt.pix.field = field;
- 
--	fmt = format_by_fourcc(f->fmt.pix.pixelformat);
--
- 	/* update our state informations */
- 	fh->fmt              = fmt;
- 	fh->cap.field        = f->fmt.pix.field;
-diff --git a/drivers/media/pci/saa7134/saa7134-video.c b/drivers/media/pci/saa7134/saa7134-video.c
-index a63c1366a64e..1293563b7dce 100644
---- a/drivers/media/pci/saa7134/saa7134-video.c
-+++ b/drivers/media/pci/saa7134/saa7134-video.c
-@@ -1219,10 +1219,13 @@ static int saa7134_g_fmt_vid_cap(struct file *file, void *priv,
- 	f->fmt.pix.height       = dev->height;
- 	f->fmt.pix.field        = dev->field;
- 	f->fmt.pix.pixelformat  = dev->fmt->fourcc;
--	f->fmt.pix.bytesperline =
--		(f->fmt.pix.width * dev->fmt->depth) >> 3;
-+	if (dev->fmt->planar)
-+		f->fmt.pix.bytesperline = f->fmt.pix.width;
-+	else
-+		f->fmt.pix.bytesperline =
-+			(f->fmt.pix.width * dev->fmt->depth) / 8;
- 	f->fmt.pix.sizeimage =
--		f->fmt.pix.height * f->fmt.pix.bytesperline;
-+		(f->fmt.pix.height * f->fmt.pix.width * dev->fmt->depth) / 8;
- 	f->fmt.pix.colorspace   = V4L2_COLORSPACE_SMPTE170M;
- 	return 0;
- }
-@@ -1298,10 +1301,13 @@ static int saa7134_try_fmt_vid_cap(struct file *file, void *priv,
- 	if (f->fmt.pix.height > maxh)
- 		f->fmt.pix.height = maxh;
- 	f->fmt.pix.width &= ~0x03;
--	f->fmt.pix.bytesperline =
--		(f->fmt.pix.width * fmt->depth) >> 3;
-+	if (fmt->planar)
-+		f->fmt.pix.bytesperline = f->fmt.pix.width;
-+	else
-+		f->fmt.pix.bytesperline =
-+			(f->fmt.pix.width * fmt->depth) / 8;
- 	f->fmt.pix.sizeimage =
--		f->fmt.pix.height * f->fmt.pix.bytesperline;
-+		(f->fmt.pix.height * f->fmt.pix.width * fmt->depth) / 8;
- 	f->fmt.pix.colorspace   = V4L2_COLORSPACE_SMPTE170M;
- 
- 	return 0;
-diff --git a/drivers/media/platform/coda/coda-bit.c b/drivers/media/platform/coda/coda-bit.c
-index 7d28899f89ce..6efe9d002961 100644
---- a/drivers/media/platform/coda/coda-bit.c
-+++ b/drivers/media/platform/coda/coda-bit.c
-@@ -1342,7 +1342,7 @@ static void coda_finish_encode(struct coda_ctx *ctx)
- 
- 	/* Calculate bytesused field */
- 	if (dst_buf->sequence == 0) {
--		vb2_set_plane_payload(&dst_buf->vb2_buf, 0,
-+		vb2_set_plane_payload(&dst_buf->vb2_buf, 0, wr_ptr - start_ptr +
- 					ctx->vpu_header_size[0] +
- 					ctx->vpu_header_size[1] +
- 					ctx->vpu_header_size[2]);
-diff --git a/drivers/media/platform/coda/coda-common.c b/drivers/media/platform/coda/coda-common.c
-index 2d782ce94a67..7ae89c684783 100644
---- a/drivers/media/platform/coda/coda-common.c
-+++ b/drivers/media/platform/coda/coda-common.c
-@@ -2118,14 +2118,12 @@ static int coda_probe(struct platform_device *pdev)
- 
- 	pdev_id = of_id ? of_id->data : platform_get_device_id(pdev);
- 
--	if (of_id) {
-+	if (of_id)
- 		dev->devtype = of_id->data;
--	} else if (pdev_id) {
-+	else if (pdev_id)
- 		dev->devtype = &coda_devdata[pdev_id->driver_data];
--	} else {
--		ret = -EINVAL;
--		goto err_v4l2_register;
--	}
-+	else
-+		return -EINVAL;
- 
- 	spin_lock_init(&dev->irqlock);
- 	INIT_LIST_HEAD(&dev->instances);
-diff --git a/drivers/media/platform/vsp1/vsp1_sru.c b/drivers/media/platform/vsp1/vsp1_sru.c
-index 6310acab60e7..d41ae950d1a1 100644
---- a/drivers/media/platform/vsp1/vsp1_sru.c
-+++ b/drivers/media/platform/vsp1/vsp1_sru.c
-@@ -154,6 +154,7 @@ static int sru_s_stream(struct v4l2_subdev *subdev, int enable)
- 	mutex_lock(sru->ctrls.lock);
- 	ctrl0 |= vsp1_sru_read(sru, VI6_SRU_CTRL0)
- 	       & (VI6_SRU_CTRL0_PARAM0_MASK | VI6_SRU_CTRL0_PARAM1_MASK);
-+	vsp1_sru_write(sru, VI6_SRU_CTRL0, ctrl0);
- 	mutex_unlock(sru->ctrls.lock);
- 
- 	vsp1_sru_write(sru, VI6_SRU_CTRL1, VI6_SRU_CTRL1_PARAM5);
-diff --git a/drivers/media/usb/au0828/au0828-core.c b/drivers/media/usb/au0828/au0828-core.c
-index 9e29e70a78d7..d8988801dde0 100644
---- a/drivers/media/usb/au0828/au0828-core.c
-+++ b/drivers/media/usb/au0828/au0828-core.c
-@@ -192,7 +192,7 @@ static void au0828_usb_disconnect(struct usb_interface *interface)
- 	   Set the status so poll routines can check and avoid
- 	   access after disconnect.
- 	*/
--	dev->dev_state = DEV_DISCONNECTED;
-+	set_bit(DEV_DISCONNECTED, &dev->dev_state);
- 
- 	au0828_rc_unregister(dev);
- 	/* Digital TV */
-diff --git a/drivers/media/usb/au0828/au0828-input.c b/drivers/media/usb/au0828/au0828-input.c
-index b0f067971979..3d6687f0407d 100644
---- a/drivers/media/usb/au0828/au0828-input.c
-+++ b/drivers/media/usb/au0828/au0828-input.c
-@@ -130,7 +130,7 @@ static int au0828_get_key_au8522(struct au0828_rc *ir)
- 	bool first = true;
- 
- 	/* do nothing if device is disconnected */
--	if (ir->dev->dev_state == DEV_DISCONNECTED)
-+	if (test_bit(DEV_DISCONNECTED, &ir->dev->dev_state))
- 		return 0;
- 
- 	/* Check IR int */
-@@ -260,7 +260,7 @@ static void au0828_rc_stop(struct rc_dev *rc)
- 	cancel_delayed_work_sync(&ir->work);
- 
- 	/* do nothing if device is disconnected */
--	if (ir->dev->dev_state != DEV_DISCONNECTED) {
-+	if (!test_bit(DEV_DISCONNECTED, &ir->dev->dev_state)) {
- 		/* Disable IR */
- 		au8522_rc_clear(ir, 0xe0, 1 << 4);
- 	}
-diff --git a/drivers/media/usb/au0828/au0828-video.c b/drivers/media/usb/au0828/au0828-video.c
-index a13625722848..8bc69af874a0 100644
---- a/drivers/media/usb/au0828/au0828-video.c
-+++ b/drivers/media/usb/au0828/au0828-video.c
-@@ -104,14 +104,13 @@ static inline void print_err_status(struct au0828_dev *dev,
- 
- static int check_dev(struct au0828_dev *dev)
- {
--	if (dev->dev_state & DEV_DISCONNECTED) {
-+	if (test_bit(DEV_DISCONNECTED, &dev->dev_state)) {
- 		pr_info("v4l2 ioctl: device not present\n");
- 		return -ENODEV;
- 	}
- 
--	if (dev->dev_state & DEV_MISCONFIGURED) {
--		pr_info("v4l2 ioctl: device is misconfigured; "
--		       "close and open it again\n");
-+	if (test_bit(DEV_MISCONFIGURED, &dev->dev_state)) {
-+		pr_info("v4l2 ioctl: device is misconfigured; close and open it again\n");
- 		return -EIO;
- 	}
- 	return 0;
-@@ -519,8 +518,8 @@ static inline int au0828_isoc_copy(struct au0828_dev *dev, struct urb *urb)
- 	if (!dev)
- 		return 0;
- 
--	if ((dev->dev_state & DEV_DISCONNECTED) ||
--	    (dev->dev_state & DEV_MISCONFIGURED))
-+	if (test_bit(DEV_DISCONNECTED, &dev->dev_state) ||
-+	    test_bit(DEV_MISCONFIGURED, &dev->dev_state))
- 		return 0;
- 
- 	if (urb->status < 0) {
-@@ -822,10 +821,10 @@ static int au0828_stream_interrupt(struct au0828_dev *dev)
- 	int ret = 0;
- 
- 	dev->stream_state = STREAM_INTERRUPT;
--	if (dev->dev_state == DEV_DISCONNECTED)
-+	if (test_bit(DEV_DISCONNECTED, &dev->dev_state))
- 		return -ENODEV;
- 	else if (ret) {
--		dev->dev_state = DEV_MISCONFIGURED;
-+		set_bit(DEV_MISCONFIGURED, &dev->dev_state);
- 		dprintk(1, "%s device is misconfigured!\n", __func__);
- 		return ret;
- 	}
-@@ -1014,7 +1013,7 @@ static int au0828_v4l2_open(struct file *filp)
- 	int ret;
- 
- 	dprintk(1,
--		"%s called std_set %d dev_state %d stream users %d users %d\n",
-+		"%s called std_set %d dev_state %ld stream users %d users %d\n",
- 		__func__, dev->std_set_in_tuner_core, dev->dev_state,
- 		dev->streaming_users, dev->users);
- 
-@@ -1033,7 +1032,7 @@ static int au0828_v4l2_open(struct file *filp)
- 		au0828_analog_stream_enable(dev);
- 		au0828_analog_stream_reset(dev);
- 		dev->stream_state = STREAM_OFF;
--		dev->dev_state |= DEV_INITIALIZED;
-+		set_bit(DEV_INITIALIZED, &dev->dev_state);
- 	}
- 	dev->users++;
- 	mutex_unlock(&dev->lock);
-@@ -1047,7 +1046,7 @@ static int au0828_v4l2_close(struct file *filp)
- 	struct video_device *vdev = video_devdata(filp);
- 
- 	dprintk(1,
--		"%s called std_set %d dev_state %d stream users %d users %d\n",
-+		"%s called std_set %d dev_state %ld stream users %d users %d\n",
- 		__func__, dev->std_set_in_tuner_core, dev->dev_state,
- 		dev->streaming_users, dev->users);
- 
-@@ -1063,7 +1062,7 @@ static int au0828_v4l2_close(struct file *filp)
- 		del_timer_sync(&dev->vbi_timeout);
- 	}
- 
--	if (dev->dev_state == DEV_DISCONNECTED)
-+	if (test_bit(DEV_DISCONNECTED, &dev->dev_state))
- 		goto end;
- 
- 	if (dev->users == 1) {
-@@ -1092,7 +1091,7 @@ static void au0828_init_tuner(struct au0828_dev *dev)
- 		.type = V4L2_TUNER_ANALOG_TV,
- 	};
- 
--	dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
-+	dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
- 		dev->std_set_in_tuner_core, dev->dev_state);
- 
- 	if (dev->std_set_in_tuner_core)
-@@ -1164,7 +1163,7 @@ static int vidioc_querycap(struct file *file, void  *priv,
- 	struct video_device *vdev = video_devdata(file);
- 	struct au0828_dev *dev = video_drvdata(file);
- 
--	dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
-+	dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
- 		dev->std_set_in_tuner_core, dev->dev_state);
- 
- 	strlcpy(cap->driver, "au0828", sizeof(cap->driver));
-@@ -1207,7 +1206,7 @@ static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
- {
- 	struct au0828_dev *dev = video_drvdata(file);
- 
--	dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
-+	dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
- 		dev->std_set_in_tuner_core, dev->dev_state);
- 
- 	f->fmt.pix.width = dev->width;
-@@ -1226,7 +1225,7 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
- {
- 	struct au0828_dev *dev = video_drvdata(file);
- 
--	dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
-+	dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
- 		dev->std_set_in_tuner_core, dev->dev_state);
- 
- 	return au0828_set_format(dev, VIDIOC_TRY_FMT, f);
-@@ -1238,7 +1237,7 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
- 	struct au0828_dev *dev = video_drvdata(file);
- 	int rc;
- 
--	dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
-+	dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
- 		dev->std_set_in_tuner_core, dev->dev_state);
- 
- 	rc = check_dev(dev);
-@@ -1260,7 +1259,7 @@ static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id norm)
- {
- 	struct au0828_dev *dev = video_drvdata(file);
- 
--	dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
-+	dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
- 		dev->std_set_in_tuner_core, dev->dev_state);
- 
- 	if (norm == dev->std)
-@@ -1292,7 +1291,7 @@ static int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *norm)
- {
- 	struct au0828_dev *dev = video_drvdata(file);
- 
--	dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
-+	dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
- 		dev->std_set_in_tuner_core, dev->dev_state);
- 
- 	*norm = dev->std;
-@@ -1315,7 +1314,7 @@ static int vidioc_enum_input(struct file *file, void *priv,
- 		[AU0828_VMUX_DEBUG] = "tv debug"
- 	};
- 
--	dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
-+	dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
- 		dev->std_set_in_tuner_core, dev->dev_state);
- 
- 	tmp = input->index;
-@@ -1345,7 +1344,7 @@ static int vidioc_g_input(struct file *file, void *priv, unsigned int *i)
- {
- 	struct au0828_dev *dev = video_drvdata(file);
- 
--	dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
-+	dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
- 		dev->std_set_in_tuner_core, dev->dev_state);
- 
- 	*i = dev->ctrl_input;
-@@ -1356,7 +1355,7 @@ static void au0828_s_input(struct au0828_dev *dev, int index)
- {
- 	int i;
- 
--	dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
-+	dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
- 		dev->std_set_in_tuner_core, dev->dev_state);
- 
- 	switch (AUVI_INPUT(index).type) {
-@@ -1441,7 +1440,7 @@ static int vidioc_g_audio(struct file *file, void *priv, struct v4l2_audio *a)
- {
- 	struct au0828_dev *dev = video_drvdata(file);
- 
--	dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
-+	dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
- 		dev->std_set_in_tuner_core, dev->dev_state);
- 
- 	a->index = dev->ctrl_ainput;
-@@ -1461,7 +1460,7 @@ static int vidioc_s_audio(struct file *file, void *priv, const struct v4l2_audio
- 	if (a->index != dev->ctrl_ainput)
- 		return -EINVAL;
- 
--	dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
-+	dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
- 		dev->std_set_in_tuner_core, dev->dev_state);
- 	return 0;
- }
-@@ -1473,7 +1472,7 @@ static int vidioc_g_tuner(struct file *file, void *priv, struct v4l2_tuner *t)
- 	if (t->index != 0)
- 		return -EINVAL;
- 
--	dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
-+	dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
- 		dev->std_set_in_tuner_core, dev->dev_state);
- 
- 	strcpy(t->name, "Auvitek tuner");
-@@ -1493,7 +1492,7 @@ static int vidioc_s_tuner(struct file *file, void *priv,
- 	if (t->index != 0)
- 		return -EINVAL;
- 
--	dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
-+	dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
- 		dev->std_set_in_tuner_core, dev->dev_state);
- 
- 	au0828_init_tuner(dev);
-@@ -1515,7 +1514,7 @@ static int vidioc_g_frequency(struct file *file, void *priv,
- 
- 	if (freq->tuner != 0)
- 		return -EINVAL;
--	dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
-+	dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
- 		dev->std_set_in_tuner_core, dev->dev_state);
- 	freq->frequency = dev->ctrl_freq;
- 	return 0;
-@@ -1530,7 +1529,7 @@ static int vidioc_s_frequency(struct file *file, void *priv,
- 	if (freq->tuner != 0)
- 		return -EINVAL;
- 
--	dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
-+	dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
- 		dev->std_set_in_tuner_core, dev->dev_state);
- 
- 	au0828_init_tuner(dev);
-@@ -1556,7 +1555,7 @@ static int vidioc_g_fmt_vbi_cap(struct file *file, void *priv,
- {
- 	struct au0828_dev *dev = video_drvdata(file);
- 
--	dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
-+	dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
- 		dev->std_set_in_tuner_core, dev->dev_state);
- 
- 	format->fmt.vbi.samples_per_line = dev->vbi_width;
-@@ -1582,7 +1581,7 @@ static int vidioc_cropcap(struct file *file, void *priv,
- 	if (cc->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
- 		return -EINVAL;
- 
--	dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
-+	dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
- 		dev->std_set_in_tuner_core, dev->dev_state);
- 
- 	cc->bounds.left = 0;
-@@ -1604,7 +1603,7 @@ static int vidioc_g_register(struct file *file, void *priv,
- {
- 	struct au0828_dev *dev = video_drvdata(file);
- 
--	dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
-+	dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
- 		dev->std_set_in_tuner_core, dev->dev_state);
- 
- 	reg->val = au0828_read(dev, reg->reg);
-@@ -1617,7 +1616,7 @@ static int vidioc_s_register(struct file *file, void *priv,
- {
- 	struct au0828_dev *dev = video_drvdata(file);
- 
--	dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
-+	dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
- 		dev->std_set_in_tuner_core, dev->dev_state);
- 
- 	return au0828_writereg(dev, reg->reg, reg->val);
-diff --git a/drivers/media/usb/au0828/au0828.h b/drivers/media/usb/au0828/au0828.h
-index 8276072bc55a..b28a05d76618 100644
---- a/drivers/media/usb/au0828/au0828.h
-+++ b/drivers/media/usb/au0828/au0828.h
-@@ -21,6 +21,7 @@
- 
- #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
- 
-+#include <linux/bitops.h>
- #include <linux/usb.h>
- #include <linux/i2c.h>
- #include <linux/i2c-algo-bit.h>
-@@ -122,9 +123,9 @@ enum au0828_stream_state {
- 
- /* device state */
- enum au0828_dev_state {
--	DEV_INITIALIZED = 0x01,
--	DEV_DISCONNECTED = 0x02,
--	DEV_MISCONFIGURED = 0x04
-+	DEV_INITIALIZED = 0,
-+	DEV_DISCONNECTED = 1,
-+	DEV_MISCONFIGURED = 2
- };
- 
- struct au0828_dev;
-@@ -248,7 +249,7 @@ struct au0828_dev {
- 	int input_type;
- 	int std_set_in_tuner_core;
- 	unsigned int ctrl_input;
--	enum au0828_dev_state dev_state;
-+	long unsigned int dev_state; /* defined at enum au0828_dev_state */;
- 	enum au0828_stream_state stream_state;
- 	wait_queue_head_t open;
- 
-diff --git a/drivers/media/usb/pwc/pwc-if.c b/drivers/media/usb/pwc/pwc-if.c
-index 086cf1c7bd7d..18aed5dd325e 100644
---- a/drivers/media/usb/pwc/pwc-if.c
-+++ b/drivers/media/usb/pwc/pwc-if.c
-@@ -91,6 +91,7 @@ static const struct usb_device_id pwc_device_table [] = {
- 	{ USB_DEVICE(0x0471, 0x0312) },
- 	{ USB_DEVICE(0x0471, 0x0313) }, /* the 'new' 720K */
- 	{ USB_DEVICE(0x0471, 0x0329) }, /* Philips SPC 900NC PC Camera */
-+	{ USB_DEVICE(0x0471, 0x032C) }, /* Philips SPC 880NC PC Camera */
- 	{ USB_DEVICE(0x069A, 0x0001) }, /* Askey */
- 	{ USB_DEVICE(0x046D, 0x08B0) }, /* Logitech QuickCam Pro 3000 */
- 	{ USB_DEVICE(0x046D, 0x08B1) }, /* Logitech QuickCam Notebook Pro */
-@@ -810,6 +811,11 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
- 			name = "Philips SPC 900NC webcam";
- 			type_id = 740;
- 			break;
-+		case 0x032C:
-+			PWC_INFO("Philips SPC 880NC USB webcam detected.\n");
-+			name = "Philips SPC 880NC webcam";
-+			type_id = 740;
-+			break;
- 		default:
- 			return -ENODEV;
- 			break;
-diff --git a/drivers/media/usb/usbvision/usbvision-video.c b/drivers/media/usb/usbvision/usbvision-video.c
-index de9ff3bb8edd..6996ab8db108 100644
---- a/drivers/media/usb/usbvision/usbvision-video.c
-+++ b/drivers/media/usb/usbvision/usbvision-video.c
-@@ -1461,13 +1461,6 @@ static int usbvision_probe(struct usb_interface *intf,
- 	printk(KERN_INFO "%s: %s found\n", __func__,
- 				usbvision_device_data[model].model_string);
- 
--	/*
--	 * this is a security check.
--	 * an exploit using an incorrect bInterfaceNumber is known
--	 */
--	if (ifnum >= USB_MAXINTERFACES || !dev->actconfig->interface[ifnum])
--		return -ENODEV;
--
- 	if (usbvision_device_data[model].interface >= 0)
- 		interface = &dev->actconfig->interface[usbvision_device_data[model].interface]->altsetting[0];
- 	else if (ifnum < dev->actconfig->desc.bNumInterfaces)
-diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
-index 8fd84a67478a..019644ff627d 100644
---- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
-+++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
-@@ -415,7 +415,8 @@ static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
- 		get_user(kp->index, &up->index) ||
- 		get_user(kp->type, &up->type) ||
- 		get_user(kp->flags, &up->flags) ||
--		get_user(kp->memory, &up->memory))
-+		get_user(kp->memory, &up->memory) ||
-+		get_user(kp->length, &up->length))
- 			return -EFAULT;
- 
- 	if (V4L2_TYPE_IS_OUTPUT(kp->type))
-@@ -427,9 +428,6 @@ static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
- 			return -EFAULT;
- 
- 	if (V4L2_TYPE_IS_MULTIPLANAR(kp->type)) {
--		if (get_user(kp->length, &up->length))
--			return -EFAULT;
--
- 		num_planes = kp->length;
- 		if (num_planes == 0) {
- 			kp->m.planes = NULL;
-@@ -462,16 +460,14 @@ static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
- 	} else {
- 		switch (kp->memory) {
- 		case V4L2_MEMORY_MMAP:
--			if (get_user(kp->length, &up->length) ||
--				get_user(kp->m.offset, &up->m.offset))
-+			if (get_user(kp->m.offset, &up->m.offset))
- 				return -EFAULT;
- 			break;
- 		case V4L2_MEMORY_USERPTR:
- 			{
- 			compat_long_t tmp;
- 
--			if (get_user(kp->length, &up->length) ||
--			    get_user(tmp, &up->m.userptr))
-+			if (get_user(tmp, &up->m.userptr))
- 				return -EFAULT;
- 
- 			kp->m.userptr = (unsigned long)compat_ptr(tmp);
-@@ -513,7 +509,8 @@ static int put_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
- 		copy_to_user(&up->timecode, &kp->timecode, sizeof(struct v4l2_timecode)) ||
- 		put_user(kp->sequence, &up->sequence) ||
- 		put_user(kp->reserved2, &up->reserved2) ||
--		put_user(kp->reserved, &up->reserved))
-+		put_user(kp->reserved, &up->reserved) ||
-+		put_user(kp->length, &up->length))
- 			return -EFAULT;
- 
- 	if (V4L2_TYPE_IS_MULTIPLANAR(kp->type)) {
-@@ -536,13 +533,11 @@ static int put_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
- 	} else {
- 		switch (kp->memory) {
- 		case V4L2_MEMORY_MMAP:
--			if (put_user(kp->length, &up->length) ||
--				put_user(kp->m.offset, &up->m.offset))
-+			if (put_user(kp->m.offset, &up->m.offset))
- 				return -EFAULT;
- 			break;
- 		case V4L2_MEMORY_USERPTR:
--			if (put_user(kp->length, &up->length) ||
--				put_user(kp->m.userptr, &up->m.userptr))
-+			if (put_user(kp->m.userptr, &up->m.userptr))
- 				return -EFAULT;
- 			break;
- 		case V4L2_MEMORY_OVERLAY:
-diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
-index ff8953ae52d1..d7d7c52a3060 100644
---- a/drivers/media/v4l2-core/videobuf2-core.c
-+++ b/drivers/media/v4l2-core/videobuf2-core.c
-@@ -1643,7 +1643,7 @@ static int __vb2_wait_for_done_vb(struct vb2_queue *q, int nonblocking)
-  * Will sleep if required for nonblocking == false.
-  */
- static int __vb2_get_done_vb(struct vb2_queue *q, struct vb2_buffer **vb,
--				int nonblocking)
-+			     void *pb, int nonblocking)
- {
- 	unsigned long flags;
- 	int ret;
-@@ -1664,10 +1664,10 @@ static int __vb2_get_done_vb(struct vb2_queue *q, struct vb2_buffer **vb,
- 	/*
- 	 * Only remove the buffer from done_list if v4l2_buffer can handle all
- 	 * the planes.
--	 * Verifying planes is NOT necessary since it already has been checked
--	 * before the buffer is queued/prepared. So it can never fail.
- 	 */
--	list_del(&(*vb)->done_entry);
-+	ret = call_bufop(q, verify_planes_array, *vb, pb);
-+	if (!ret)
-+		list_del(&(*vb)->done_entry);
- 	spin_unlock_irqrestore(&q->done_lock, flags);
- 
- 	return ret;
-@@ -1746,7 +1746,7 @@ int vb2_core_dqbuf(struct vb2_queue *q, unsigned int *pindex, void *pb,
- 	struct vb2_buffer *vb = NULL;
- 	int ret;
- 
--	ret = __vb2_get_done_vb(q, &vb, nonblocking);
-+	ret = __vb2_get_done_vb(q, &vb, pb, nonblocking);
- 	if (ret < 0)
- 		return ret;
- 
-@@ -2293,6 +2293,16 @@ unsigned int vb2_core_poll(struct vb2_queue *q, struct file *file,
- 		return POLLERR;
- 
- 	/*
-+	 * If this quirk is set and QBUF hasn't been called yet then
-+	 * return POLLERR as well. This only affects capture queues, output
-+	 * queues will always initialize waiting_for_buffers to false.
-+	 * This quirk is set by V4L2 for backwards compatibility reasons.
-+	 */
-+	if (q->quirk_poll_must_check_waiting_for_buffers &&
-+	    q->waiting_for_buffers && (req_events & (POLLIN | POLLRDNORM)))
-+		return POLLERR;
-+
-+	/*
- 	 * For output streams you can call write() as long as there are fewer
- 	 * buffers queued than there are buffers available.
- 	 */
-diff --git a/drivers/media/v4l2-core/videobuf2-memops.c b/drivers/media/v4l2-core/videobuf2-memops.c
-index dbec5923fcf0..3c3b517f1d1c 100644
---- a/drivers/media/v4l2-core/videobuf2-memops.c
-+++ b/drivers/media/v4l2-core/videobuf2-memops.c
-@@ -49,7 +49,7 @@ struct frame_vector *vb2_create_framevec(unsigned long start,
- 	vec = frame_vector_create(nr);
- 	if (!vec)
- 		return ERR_PTR(-ENOMEM);
--	ret = get_vaddr_frames(start, nr, write, 1, vec);
-+	ret = get_vaddr_frames(start & PAGE_MASK, nr, write, true, vec);
- 	if (ret < 0)
- 		goto out_destroy;
- 	/* We accept only complete set of PFNs */
-diff --git a/drivers/media/v4l2-core/videobuf2-v4l2.c b/drivers/media/v4l2-core/videobuf2-v4l2.c
-index 91f552124050..0b1b8c7b6ce5 100644
---- a/drivers/media/v4l2-core/videobuf2-v4l2.c
-+++ b/drivers/media/v4l2-core/videobuf2-v4l2.c
-@@ -765,6 +765,12 @@ int vb2_queue_init(struct vb2_queue *q)
- 	q->is_output = V4L2_TYPE_IS_OUTPUT(q->type);
- 	q->copy_timestamp = (q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK)
- 			== V4L2_BUF_FLAG_TIMESTAMP_COPY;
-+	/*
-+	 * For compatibility with vb1: if QBUF hasn't been called yet, then
-+	 * return POLLERR as well. This only affects capture queues, output
-+	 * queues will always initialize waiting_for_buffers to false.
-+	 */
-+	q->quirk_poll_must_check_waiting_for_buffers = true;
- 
- 	return vb2_core_queue_init(q);
- }
-@@ -818,14 +824,6 @@ unsigned int vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait)
- 			poll_wait(file, &fh->wait, wait);
- 	}
- 
--	/*
--	 * For compatibility with vb1: if QBUF hasn't been called yet, then
--	 * return POLLERR as well. This only affects capture queues, output
--	 * queues will always initialize waiting_for_buffers to false.
--	 */
--	if (q->waiting_for_buffers && (req_events & (POLLIN | POLLRDNORM)))
--		return POLLERR;
--
- 	return res | vb2_core_poll(q, file, wait);
- }
- EXPORT_SYMBOL_GPL(vb2_poll);
-diff --git a/drivers/mfd/intel-lpss.c b/drivers/mfd/intel-lpss.c
-index 1743788f1595..1bbbe877ba7e 100644
---- a/drivers/mfd/intel-lpss.c
-+++ b/drivers/mfd/intel-lpss.c
-@@ -453,6 +453,7 @@ int intel_lpss_probe(struct device *dev,
- err_remove_ltr:
- 	intel_lpss_debugfs_remove(lpss);
- 	intel_lpss_ltr_hide(lpss);
-+	intel_lpss_unregister_clock(lpss);
- 
- err_clk_register:
- 	ida_simple_remove(&intel_lpss_devid_ida, lpss->devid);
-diff --git a/drivers/mfd/omap-usb-tll.c b/drivers/mfd/omap-usb-tll.c
-index b7b3e8ee64f2..c30290f33430 100644
---- a/drivers/mfd/omap-usb-tll.c
-+++ b/drivers/mfd/omap-usb-tll.c
-@@ -269,6 +269,8 @@ static int usbtll_omap_probe(struct platform_device *pdev)
- 
- 		if (IS_ERR(tll->ch_clk[i]))
- 			dev_dbg(dev, "can't get clock : %s\n", clkname);
-+		else
-+			clk_prepare(tll->ch_clk[i]);
- 	}
- 
- 	pm_runtime_put_sync(dev);
-@@ -301,9 +303,12 @@ static int usbtll_omap_remove(struct platform_device *pdev)
- 	tll_dev = NULL;
- 	spin_unlock(&tll_lock);
- 
--	for (i = 0; i < tll->nch; i++)
--		if (!IS_ERR(tll->ch_clk[i]))
-+	for (i = 0; i < tll->nch; i++) {
-+		if (!IS_ERR(tll->ch_clk[i])) {
-+			clk_unprepare(tll->ch_clk[i]);
- 			clk_put(tll->ch_clk[i]);
-+		}
-+	}
- 
- 	pm_runtime_disable(&pdev->dev);
- 	return 0;
-@@ -420,7 +425,7 @@ int omap_tll_enable(struct usbhs_omap_platform_data *pdata)
- 			if (IS_ERR(tll->ch_clk[i]))
- 				continue;
- 
--			r = clk_prepare_enable(tll->ch_clk[i]);
-+			r = clk_enable(tll->ch_clk[i]);
- 			if (r) {
- 				dev_err(tll_dev,
- 				 "Error enabling ch %d clock: %d\n", i, r);
-@@ -448,7 +453,7 @@ int omap_tll_disable(struct usbhs_omap_platform_data *pdata)
- 	for (i = 0; i < tll->nch; i++) {
- 		if (omap_usb_mode_needs_tll(pdata->port_mode[i])) {
- 			if (!IS_ERR(tll->ch_clk[i]))
--				clk_disable_unprepare(tll->ch_clk[i]);
-+				clk_disable(tll->ch_clk[i]);
- 		}
- 	}
- 
-diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
-index 054fc10cb3b6..b22c03264270 100644
---- a/drivers/misc/Kconfig
-+++ b/drivers/misc/Kconfig
-@@ -440,7 +440,7 @@ config ARM_CHARLCD
- 	  still useful.
- 
- config BMP085
--	bool
-+	tristate
- 	depends on SYSFS
- 
- config BMP085_I2C
-diff --git a/drivers/misc/ad525x_dpot.c b/drivers/misc/ad525x_dpot.c
-index 15e88078ba1e..f1a0b99f5a9a 100644
---- a/drivers/misc/ad525x_dpot.c
-+++ b/drivers/misc/ad525x_dpot.c
-@@ -216,7 +216,7 @@ static s32 dpot_read_i2c(struct dpot_data *dpot, u8 reg)
- 			 */
- 			value = swab16(value);
- 
--			if (dpot->uid == DPOT_UID(AD5271_ID))
-+			if (dpot->uid == DPOT_UID(AD5274_ID))
- 				value = value >> 2;
- 		return value;
- 	default:
-diff --git a/drivers/misc/cxl/irq.c b/drivers/misc/cxl/irq.c
-index 09a406058c46..efbb6945eb18 100644
---- a/drivers/misc/cxl/irq.c
-+++ b/drivers/misc/cxl/irq.c
-@@ -288,7 +288,6 @@ unsigned int cxl_map_irq(struct cxl *adapter, irq_hw_number_t hwirq,
- void cxl_unmap_irq(unsigned int virq, void *cookie)
- {
- 	free_irq(virq, cookie);
--	irq_dispose_mapping(virq);
- }
- 
- static int cxl_register_one_irq(struct cxl *adapter,
-diff --git a/drivers/misc/mei/amthif.c b/drivers/misc/mei/amthif.c
-index cd0403f09267..e79c0371ee6f 100644
---- a/drivers/misc/mei/amthif.c
-+++ b/drivers/misc/mei/amthif.c
-@@ -417,8 +417,10 @@ int mei_amthif_irq_read_msg(struct mei_cl *cl,
- 
- 	dev = cl->dev;
- 
--	if (dev->iamthif_state != MEI_IAMTHIF_READING)
-+	if (dev->iamthif_state != MEI_IAMTHIF_READING) {
-+		mei_irq_discard_msg(dev, mei_hdr);
- 		return 0;
-+	}
- 
- 	ret = mei_cl_irq_read_msg(cl, mei_hdr, cmpl_list);
- 	if (ret)
-diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
-index 0b05aa938799..a77643954523 100644
---- a/drivers/misc/mei/bus.c
-+++ b/drivers/misc/mei/bus.c
-@@ -53,6 +53,11 @@ ssize_t __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length,
- 	bus = cl->dev;
- 
- 	mutex_lock(&bus->device_lock);
-+	if (bus->dev_state != MEI_DEV_ENABLED) {
-+		rets = -ENODEV;
-+		goto out;
-+	}
-+
- 	if (!mei_cl_is_connected(cl)) {
- 		rets = -ENODEV;
- 		goto out;
-@@ -109,6 +114,10 @@ ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length)
- 	bus = cl->dev;
- 
- 	mutex_lock(&bus->device_lock);
-+	if (bus->dev_state != MEI_DEV_ENABLED) {
-+		rets = -ENODEV;
-+		goto out;
-+	}
- 
- 	cb = mei_cl_read_cb(cl, NULL);
- 	if (cb)
-@@ -213,17 +222,23 @@ EXPORT_SYMBOL_GPL(mei_cldev_recv);
- static void mei_cl_bus_event_work(struct work_struct *work)
- {
- 	struct mei_cl_device *cldev;
-+	struct mei_device *bus;
- 
- 	cldev = container_of(work, struct mei_cl_device, event_work);
- 
-+	bus = cldev->bus;
-+
- 	if (cldev->event_cb)
- 		cldev->event_cb(cldev, cldev->events, cldev->event_context);
- 
- 	cldev->events = 0;
- 
- 	/* Prepare for the next read */
--	if (cldev->events_mask & BIT(MEI_CL_EVENT_RX))
-+	if (cldev->events_mask & BIT(MEI_CL_EVENT_RX)) {
-+		mutex_lock(&bus->device_lock);
- 		mei_cl_read_start(cldev->cl, 0, NULL);
-+		mutex_unlock(&bus->device_lock);
-+	}
- }
- 
- /**
-@@ -287,6 +302,7 @@ int mei_cldev_register_event_cb(struct mei_cl_device *cldev,
- 				unsigned long events_mask,
- 				mei_cldev_event_cb_t event_cb, void *context)
- {
-+	struct mei_device *bus = cldev->bus;
- 	int ret;
- 
- 	if (cldev->event_cb)
-@@ -299,15 +315,17 @@ int mei_cldev_register_event_cb(struct mei_cl_device *cldev,
- 	INIT_WORK(&cldev->event_work, mei_cl_bus_event_work);
- 
- 	if (cldev->events_mask & BIT(MEI_CL_EVENT_RX)) {
-+		mutex_lock(&bus->device_lock);
- 		ret = mei_cl_read_start(cldev->cl, 0, NULL);
-+		mutex_unlock(&bus->device_lock);
- 		if (ret && ret != -EBUSY)
- 			return ret;
- 	}
- 
- 	if (cldev->events_mask & BIT(MEI_CL_EVENT_NOTIF)) {
--		mutex_lock(&cldev->cl->dev->device_lock);
-+		mutex_lock(&bus->device_lock);
- 		ret = mei_cl_notify_request(cldev->cl, NULL, event_cb ? 1 : 0);
--		mutex_unlock(&cldev->cl->dev->device_lock);
-+		mutex_unlock(&bus->device_lock);
- 		if (ret)
- 			return ret;
- 	}
-diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
-index a6c87c713193..958af84884b5 100644
---- a/drivers/misc/mei/client.c
-+++ b/drivers/misc/mei/client.c
-@@ -1735,6 +1735,10 @@ void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb)
- 			wake_up(&cl->wait);
- 
- 		break;
-+	case MEI_FOP_DISCONNECT_RSP:
-+		mei_io_cb_free(cb);
-+		mei_cl_set_disconnected(cl);
-+		break;
- 	default:
- 		BUG_ON(0);
- 	}
-diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c
-index e7b7aad0999b..fd8a9f057ea6 100644
---- a/drivers/misc/mei/hbm.c
-+++ b/drivers/misc/mei/hbm.c
-@@ -873,8 +873,7 @@ static int mei_hbm_fw_disconnect_req(struct mei_device *dev,
- 		cb = mei_io_cb_init(cl, MEI_FOP_DISCONNECT_RSP, NULL);
- 		if (!cb)
- 			return -ENOMEM;
--		cl_dbg(dev, cl, "add disconnect response as first\n");
--		list_add(&cb->list, &dev->ctrl_wr_list.list);
-+		list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
- 	}
- 	return 0;
- }
-diff --git a/drivers/misc/mei/interrupt.c b/drivers/misc/mei/interrupt.c
-index 64b568a0268d..d1df797c7568 100644
---- a/drivers/misc/mei/interrupt.c
-+++ b/drivers/misc/mei/interrupt.c
-@@ -76,7 +76,6 @@ static inline int mei_cl_hbm_equal(struct mei_cl *cl,
-  * @dev: mei device
-  * @hdr: message header
-  */
--static inline
- void mei_irq_discard_msg(struct mei_device *dev, struct mei_msg_hdr *hdr)
- {
- 	/*
-@@ -184,10 +183,7 @@ static int mei_cl_irq_disconnect_rsp(struct mei_cl *cl, struct mei_cl_cb *cb,
- 		return -EMSGSIZE;
- 
- 	ret = mei_hbm_cl_disconnect_rsp(dev, cl);
--	mei_cl_set_disconnected(cl);
--	mei_io_cb_free(cb);
--	mei_me_cl_put(cl->me_cl);
--	cl->me_cl = NULL;
-+	list_move_tail(&cb->list, &cmpl_list->list);
- 
- 	return ret;
- }
-diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h
-index 4250555d5e72..1b06e2fd6858 100644
---- a/drivers/misc/mei/mei_dev.h
-+++ b/drivers/misc/mei/mei_dev.h
-@@ -782,6 +782,8 @@ bool mei_hbuf_acquire(struct mei_device *dev);
- 
- bool mei_write_is_idle(struct mei_device *dev);
- 
-+void mei_irq_discard_msg(struct mei_device *dev, struct mei_msg_hdr *hdr);
-+
- #if IS_ENABLED(CONFIG_DEBUG_FS)
- int mei_dbgfs_register(struct mei_device *dev, const char *name);
- void mei_dbgfs_deregister(struct mei_device *dev);
-diff --git a/drivers/misc/mic/scif/scif_rma.c b/drivers/misc/mic/scif/scif_rma.c
-index 8310b4dbff06..6a451bd65bf3 100644
---- a/drivers/misc/mic/scif/scif_rma.c
-+++ b/drivers/misc/mic/scif/scif_rma.c
-@@ -1511,7 +1511,7 @@ off_t scif_register_pinned_pages(scif_epd_t epd,
- 	if ((map_flags & SCIF_MAP_FIXED) &&
- 	    ((ALIGN(offset, PAGE_SIZE) != offset) ||
- 	    (offset < 0) ||
--	    (offset + (off_t)len < offset)))
-+	    (len > LONG_MAX - offset)))
- 		return -EINVAL;
- 
- 	might_sleep();
-@@ -1614,7 +1614,7 @@ off_t scif_register(scif_epd_t epd, void *addr, size_t len, off_t offset,
- 	if ((map_flags & SCIF_MAP_FIXED) &&
- 	    ((ALIGN(offset, PAGE_SIZE) != offset) ||
- 	    (offset < 0) ||
--	    (offset + (off_t)len < offset)))
-+	    (len > LONG_MAX - offset)))
- 		return -EINVAL;
- 
- 	/* Unsupported protection requested */
-@@ -1732,7 +1732,8 @@ scif_unregister(scif_epd_t epd, off_t offset, size_t len)
- 
- 	/* Offset is not page aligned or offset+len wraps around */
- 	if ((ALIGN(offset, PAGE_SIZE) != offset) ||
--	    (offset + (off_t)len < offset))
-+	    (offset < 0) ||
-+	    (len > LONG_MAX - offset))
- 		return -EINVAL;
- 
- 	err = scif_verify_epd(ep);
-diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
-index fe207e542032..7630b6b7eb75 100644
---- a/drivers/mmc/card/block.c
-+++ b/drivers/mmc/card/block.c
-@@ -86,7 +86,6 @@ static int max_devices;
- 
- /* TODO: Replace these with struct ida */
- static DECLARE_BITMAP(dev_use, MAX_DEVICES);
--static DECLARE_BITMAP(name_use, MAX_DEVICES);
- 
- /*
-  * There is one mmc_blk_data per slot.
-@@ -105,7 +104,6 @@ struct mmc_blk_data {
- 	unsigned int	usage;
- 	unsigned int	read_only;
- 	unsigned int	part_type;
--	unsigned int	name_idx;
- 	unsigned int	reset_done;
- #define MMC_BLK_READ		BIT(0)
- #define MMC_BLK_WRITE		BIT(1)
-@@ -589,6 +587,14 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
- 	struct mmc_card *card;
- 	int err = 0, ioc_err = 0;
- 
-+	/*
-+	 * The caller must have CAP_SYS_RAWIO, and must be calling this on the
-+	 * whole block device, not on a partition.  This prevents overspray
-+	 * between sibling partitions.
-+	 */
-+	if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains))
-+		return -EPERM;
-+
- 	idata = mmc_blk_ioctl_copy_from_user(ic_ptr);
- 	if (IS_ERR(idata))
- 		return PTR_ERR(idata);
-@@ -631,6 +637,14 @@ static int mmc_blk_ioctl_multi_cmd(struct block_device *bdev,
- 	int i, err = 0, ioc_err = 0;
- 	__u64 num_of_cmds;
- 
-+	/*
-+	 * The caller must have CAP_SYS_RAWIO, and must be calling this on the
-+	 * whole block device, not on a partition.  This prevents overspray
-+	 * between sibling partitions.
-+	 */
-+	if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains))
-+		return -EPERM;
-+
- 	if (copy_from_user(&num_of_cmds, &user->num_of_cmds,
- 			   sizeof(num_of_cmds)))
- 		return -EFAULT;
-@@ -688,14 +702,6 @@ cmd_err:
- static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode,
- 	unsigned int cmd, unsigned long arg)
- {
--	/*
--	 * The caller must have CAP_SYS_RAWIO, and must be calling this on the
--	 * whole block device, not on a partition.  This prevents overspray
--	 * between sibling partitions.
--	 */
--	if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains))
--		return -EPERM;
--
- 	switch (cmd) {
- 	case MMC_IOC_CMD:
- 		return mmc_blk_ioctl_cmd(bdev,
-@@ -2194,19 +2200,6 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
- 		goto out;
- 	}
- 
--	/*
--	 * !subname implies we are creating main mmc_blk_data that will be
--	 * associated with mmc_card with dev_set_drvdata. Due to device
--	 * partitions, devidx will not coincide with a per-physical card
--	 * index anymore so we keep track of a name index.
--	 */
--	if (!subname) {
--		md->name_idx = find_first_zero_bit(name_use, max_devices);
--		__set_bit(md->name_idx, name_use);
--	} else
--		md->name_idx = ((struct mmc_blk_data *)
--				dev_to_disk(parent)->private_data)->name_idx;
--
- 	md->area_type = area_type;
- 
- 	/*
-@@ -2256,7 +2249,7 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
- 	 */
- 
- 	snprintf(md->disk->disk_name, sizeof(md->disk->disk_name),
--		 "mmcblk%u%s", md->name_idx, subname ? subname : "");
-+		 "mmcblk%u%s", card->host->index, subname ? subname : "");
- 
- 	if (mmc_card_mmc(card))
- 		blk_queue_logical_block_size(md->queue.queue,
-@@ -2410,7 +2403,6 @@ static void mmc_blk_remove_parts(struct mmc_card *card,
- 	struct list_head *pos, *q;
- 	struct mmc_blk_data *part_md;
- 
--	__clear_bit(md->name_idx, name_use);
- 	list_for_each_safe(pos, q, &md->part) {
- 		part_md = list_entry(pos, struct mmc_blk_data, part);
- 		list_del(pos);
-@@ -2502,11 +2494,12 @@ static const struct mmc_fixup blk_fixups[] =
- 		  MMC_QUIRK_BLK_NO_CMD23),
- 
- 	/*
--	 * Some Micron MMC cards needs longer data read timeout than
--	 * indicated in CSD.
-+	 * Some MMC cards need longer data read timeout than indicated in CSD.
- 	 */
- 	MMC_FIXUP(CID_NAME_ANY, CID_MANFID_MICRON, 0x200, add_quirk_mmc,
- 		  MMC_QUIRK_LONG_READ_TIME),
-+	MMC_FIXUP("008GE0", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
-+		  MMC_QUIRK_LONG_READ_TIME),
- 
- 	/*
- 	 * On these Samsung MoviNAND parts, performing secure erase or
-diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
-index f95d41ffc766..228a81bcea49 100644
---- a/drivers/mmc/core/core.c
-+++ b/drivers/mmc/core/core.c
-@@ -868,11 +868,11 @@ void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card)
- 	/*
- 	 * Some cards require longer data read timeout than indicated in CSD.
- 	 * Address this by setting the read timeout to a "reasonably high"
--	 * value. For the cards tested, 300ms has proven enough. If necessary,
-+	 * value. For the cards tested, 600ms has proven enough. If necessary,
- 	 * this value can be increased if other problematic cards require this.
- 	 */
- 	if (mmc_card_long_read_time(card) && data->flags & MMC_DATA_READ) {
--		data->timeout_ns = 300000000;
-+		data->timeout_ns = 600000000;
- 		data->timeout_clks = 0;
- 	}
- 
-diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
-index bf49e44571f2..07a419fedd43 100644
---- a/drivers/mmc/core/mmc.c
-+++ b/drivers/mmc/core/mmc.c
-@@ -333,6 +333,9 @@ static void mmc_manage_gp_partitions(struct mmc_card *card, u8 *ext_csd)
- 	}
- }
- 
-+/* Minimum partition switch timeout in milliseconds */
-+#define MMC_MIN_PART_SWITCH_TIME	300
-+
- /*
-  * Decode extended CSD.
-  */
-@@ -397,6 +400,10 @@ static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd)
- 
- 		/* EXT_CSD value is in units of 10ms, but we store in ms */
- 		card->ext_csd.part_time = 10 * ext_csd[EXT_CSD_PART_SWITCH_TIME];
-+		/* Some eMMC set the value too low so set a minimum */
-+		if (card->ext_csd.part_time &&
-+		    card->ext_csd.part_time < MMC_MIN_PART_SWITCH_TIME)
-+			card->ext_csd.part_time = MMC_MIN_PART_SWITCH_TIME;
- 
- 		/* Sleep / awake timeout in 100ns units */
- 		if (sa_shift > 0 && sa_shift <= 0x17)
-diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
-index 1526b8a10b09..3b944fc70eec 100644
---- a/drivers/mmc/host/Kconfig
-+++ b/drivers/mmc/host/Kconfig
-@@ -97,6 +97,7 @@ config MMC_RICOH_MMC
- config MMC_SDHCI_ACPI
- 	tristate "SDHCI support for ACPI enumerated SDHCI controllers"
- 	depends on MMC_SDHCI && ACPI
-+	select IOSF_MBI if X86
- 	help
- 	  This selects support for ACPI enumerated SDHCI controllers,
- 	  identified by ACPI Compatibility ID PNP0D40 or specific
-diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
-index 851ccd9ac868..25c179592125 100644
---- a/drivers/mmc/host/atmel-mci.c
-+++ b/drivers/mmc/host/atmel-mci.c
-@@ -2443,7 +2443,7 @@ static int atmci_configure_dma(struct atmel_mci *host)
- 		struct mci_platform_data *pdata = host->pdev->dev.platform_data;
- 		dma_cap_mask_t mask;
- 
--		if (!pdata->dma_filter)
-+		if (!pdata || !pdata->dma_filter)
- 			return -ENODEV;
- 
- 		dma_cap_zero(mask);
-diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
-index 3446097a43c0..e77d79c8cd9f 100644
---- a/drivers/mmc/host/mmc_spi.c
-+++ b/drivers/mmc/host/mmc_spi.c
-@@ -1442,6 +1442,12 @@ static int mmc_spi_probe(struct spi_device *spi)
- 					     host->pdata->cd_debounce);
- 		if (status != 0)
- 			goto fail_add_host;
-+
-+		/* The platform has a CD GPIO signal that may support
-+		 * interrupts, so let mmc_gpiod_request_cd_irq() decide
-+		 * if polling is needed or not.
-+		 */
-+		mmc->caps &= ~MMC_CAP_NEEDS_POLL;
- 		mmc_gpiod_request_cd_irq(mmc);
- 	}
- 
-diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
-index a5cda926d38e..e517be7f03bf 100644
---- a/drivers/mmc/host/sdhci-acpi.c
-+++ b/drivers/mmc/host/sdhci-acpi.c
-@@ -41,6 +41,11 @@
- #include <linux/mmc/pm.h>
- #include <linux/mmc/slot-gpio.h>
- 
-+#ifdef CONFIG_X86
-+#include <asm/cpu_device_id.h>
-+#include <asm/iosf_mbi.h>
-+#endif
-+
- #include "sdhci.h"
- 
- enum {
-@@ -146,6 +151,75 @@ static const struct sdhci_acpi_chip sdhci_acpi_chip_int = {
- 	.ops = &sdhci_acpi_ops_int,
- };
- 
-+#ifdef CONFIG_X86
-+
-+static bool sdhci_acpi_byt(void)
-+{
-+	static const struct x86_cpu_id byt[] = {
-+		{ X86_VENDOR_INTEL, 6, 0x37 },
-+		{}
-+	};
-+
-+	return x86_match_cpu(byt);
-+}
-+
-+#define BYT_IOSF_SCCEP			0x63
-+#define BYT_IOSF_OCP_NETCTRL0		0x1078
-+#define BYT_IOSF_OCP_TIMEOUT_BASE	GENMASK(10, 8)
-+
-+static void sdhci_acpi_byt_setting(struct device *dev)
-+{
-+	u32 val = 0;
-+
-+	if (!sdhci_acpi_byt())
-+		return;
-+
-+	if (iosf_mbi_read(BYT_IOSF_SCCEP, MBI_CR_READ, BYT_IOSF_OCP_NETCTRL0,
-+			  &val)) {
-+		dev_err(dev, "%s read error\n", __func__);
-+		return;
-+	}
-+
-+	if (!(val & BYT_IOSF_OCP_TIMEOUT_BASE))
-+		return;
-+
-+	val &= ~BYT_IOSF_OCP_TIMEOUT_BASE;
-+
-+	if (iosf_mbi_write(BYT_IOSF_SCCEP, MBI_CR_WRITE, BYT_IOSF_OCP_NETCTRL0,
-+			   val)) {
-+		dev_err(dev, "%s write error\n", __func__);
-+		return;
-+	}
-+
-+	dev_dbg(dev, "%s completed\n", __func__);
-+}
-+
-+static bool sdhci_acpi_byt_defer(struct device *dev)
-+{
-+	if (!sdhci_acpi_byt())
-+		return false;
-+
-+	if (!iosf_mbi_available())
-+		return true;
-+
-+	sdhci_acpi_byt_setting(dev);
-+
-+	return false;
-+}
-+
-+#else
-+
-+static inline void sdhci_acpi_byt_setting(struct device *dev)
-+{
-+}
-+
-+static inline bool sdhci_acpi_byt_defer(struct device *dev)
-+{
-+	return false;
-+}
-+
-+#endif
-+
- static int bxt_get_cd(struct mmc_host *mmc)
- {
- 	int gpio_cd = mmc_gpio_get_cd(mmc);
-@@ -233,7 +307,7 @@ static const struct sdhci_acpi_slot sdhci_acpi_slot_int_emmc = {
- 	.chip    = &sdhci_acpi_chip_int,
- 	.caps    = MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE |
- 		   MMC_CAP_HW_RESET | MMC_CAP_1_8V_DDR |
--		   MMC_CAP_BUS_WIDTH_TEST | MMC_CAP_WAIT_WHILE_BUSY,
-+		   MMC_CAP_WAIT_WHILE_BUSY,
- 	.caps2   = MMC_CAP2_HC_ERASE_SZ,
- 	.flags   = SDHCI_ACPI_RUNTIME_PM,
- 	.quirks  = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
-@@ -248,7 +322,7 @@ static const struct sdhci_acpi_slot sdhci_acpi_slot_int_sdio = {
- 		   SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
- 	.quirks2 = SDHCI_QUIRK2_HOST_OFF_CARD_ON,
- 	.caps    = MMC_CAP_NONREMOVABLE | MMC_CAP_POWER_OFF_CARD |
--		   MMC_CAP_BUS_WIDTH_TEST | MMC_CAP_WAIT_WHILE_BUSY,
-+		   MMC_CAP_WAIT_WHILE_BUSY,
- 	.flags   = SDHCI_ACPI_RUNTIME_PM,
- 	.pm_caps = MMC_PM_KEEP_POWER,
- 	.probe_slot	= sdhci_acpi_sdio_probe_slot,
-@@ -260,7 +334,7 @@ static const struct sdhci_acpi_slot sdhci_acpi_slot_int_sd = {
- 	.quirks  = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
- 	.quirks2 = SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON |
- 		   SDHCI_QUIRK2_STOP_WITH_TC,
--	.caps    = MMC_CAP_BUS_WIDTH_TEST | MMC_CAP_WAIT_WHILE_BUSY,
-+	.caps    = MMC_CAP_WAIT_WHILE_BUSY,
- 	.probe_slot	= sdhci_acpi_sd_probe_slot,
- };
- 
-@@ -322,7 +396,7 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
- {
- 	struct device *dev = &pdev->dev;
- 	acpi_handle handle = ACPI_HANDLE(dev);
--	struct acpi_device *device;
-+	struct acpi_device *device, *child;
- 	struct sdhci_acpi_host *c;
- 	struct sdhci_host *host;
- 	struct resource *iomem;
-@@ -334,9 +408,17 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
- 	if (acpi_bus_get_device(handle, &device))
- 		return -ENODEV;
- 
-+	/* Power on the SDHCI controller and its children */
-+	acpi_device_fix_up_power(device);
-+	list_for_each_entry(child, &device->children, node)
-+		acpi_device_fix_up_power(child);
-+
- 	if (acpi_bus_get_status(device) || !device->status.present)
- 		return -ENODEV;
- 
-+	if (sdhci_acpi_byt_defer(dev))
-+		return -EPROBE_DEFER;
-+
- 	hid = acpi_device_hid(device);
- 	uid = device->pnp.unique_id;
- 
-@@ -460,6 +542,8 @@ static int sdhci_acpi_resume(struct device *dev)
- {
- 	struct sdhci_acpi_host *c = dev_get_drvdata(dev);
- 
-+	sdhci_acpi_byt_setting(&c->pdev->dev);
-+
- 	return sdhci_resume_host(c->host);
- }
- 
-@@ -483,6 +567,8 @@ static int sdhci_acpi_runtime_resume(struct device *dev)
- {
- 	struct sdhci_acpi_host *c = dev_get_drvdata(dev);
- 
-+	sdhci_acpi_byt_setting(&c->pdev->dev);
-+
- 	return sdhci_runtime_resume_host(c->host);
- }
- 
-diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
-index df3b8eced8c4..807f930a7c14 100644
---- a/drivers/mmc/host/sdhci-pci-core.c
-+++ b/drivers/mmc/host/sdhci-pci-core.c
-@@ -361,7 +361,6 @@ static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot)
- {
- 	slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE |
- 				 MMC_CAP_HW_RESET | MMC_CAP_1_8V_DDR |
--				 MMC_CAP_BUS_WIDTH_TEST |
- 				 MMC_CAP_WAIT_WHILE_BUSY;
- 	slot->host->mmc->caps2 |= MMC_CAP2_HC_ERASE_SZ;
- 	slot->hw_reset = sdhci_pci_int_hw_reset;
-@@ -377,19 +376,18 @@ static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot)
- static int byt_sdio_probe_slot(struct sdhci_pci_slot *slot)
- {
- 	slot->host->mmc->caps |= MMC_CAP_POWER_OFF_CARD | MMC_CAP_NONREMOVABLE |
--				 MMC_CAP_BUS_WIDTH_TEST |
- 				 MMC_CAP_WAIT_WHILE_BUSY;
- 	return 0;
- }
- 
- static int byt_sd_probe_slot(struct sdhci_pci_slot *slot)
- {
--	slot->host->mmc->caps |= MMC_CAP_BUS_WIDTH_TEST |
--				 MMC_CAP_WAIT_WHILE_BUSY;
-+	slot->host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
- 	slot->cd_con_id = NULL;
- 	slot->cd_idx = 0;
- 	slot->cd_override_level = true;
- 	if (slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BXT_SD ||
-+	    slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BXTM_SD ||
- 	    slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_APL_SD)
- 		slot->host->mmc_host_ops.get_cd = bxt_get_cd;
- 
-@@ -1173,6 +1171,30 @@ static const struct pci_device_id pci_ids[] = {
- 
- 	{
- 		.vendor		= PCI_VENDOR_ID_INTEL,
-+		.device		= PCI_DEVICE_ID_INTEL_BXTM_EMMC,
-+		.subvendor	= PCI_ANY_ID,
-+		.subdevice	= PCI_ANY_ID,
-+		.driver_data	= (kernel_ulong_t)&sdhci_intel_byt_emmc,
-+	},
-+
-+	{
-+		.vendor		= PCI_VENDOR_ID_INTEL,
-+		.device		= PCI_DEVICE_ID_INTEL_BXTM_SDIO,
-+		.subvendor	= PCI_ANY_ID,
-+		.subdevice	= PCI_ANY_ID,
-+		.driver_data	= (kernel_ulong_t)&sdhci_intel_byt_sdio,
-+	},
-+
-+	{
-+		.vendor		= PCI_VENDOR_ID_INTEL,
-+		.device		= PCI_DEVICE_ID_INTEL_BXTM_SD,
-+		.subvendor	= PCI_ANY_ID,
-+		.subdevice	= PCI_ANY_ID,
-+		.driver_data	= (kernel_ulong_t)&sdhci_intel_byt_sd,
-+	},
-+
-+	{
-+		.vendor		= PCI_VENDOR_ID_INTEL,
- 		.device		= PCI_DEVICE_ID_INTEL_APL_EMMC,
- 		.subvendor	= PCI_ANY_ID,
- 		.subdevice	= PCI_ANY_ID,
-diff --git a/drivers/mmc/host/sdhci-pci.h b/drivers/mmc/host/sdhci-pci.h
-index d1a0b4db60db..89e7151684a1 100644
---- a/drivers/mmc/host/sdhci-pci.h
-+++ b/drivers/mmc/host/sdhci-pci.h
-@@ -28,6 +28,9 @@
- #define PCI_DEVICE_ID_INTEL_BXT_SD	0x0aca
- #define PCI_DEVICE_ID_INTEL_BXT_EMMC	0x0acc
- #define PCI_DEVICE_ID_INTEL_BXT_SDIO	0x0ad0
-+#define PCI_DEVICE_ID_INTEL_BXTM_SD	0x1aca
-+#define PCI_DEVICE_ID_INTEL_BXTM_EMMC	0x1acc
-+#define PCI_DEVICE_ID_INTEL_BXTM_SDIO	0x1ad0
- #define PCI_DEVICE_ID_INTEL_APL_SD	0x5aca
- #define PCI_DEVICE_ID_INTEL_APL_EMMC	0x5acc
- #define PCI_DEVICE_ID_INTEL_APL_SDIO	0x5ad0
-diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c
-index f5edf9d3a18a..452bf500c82e 100644
---- a/drivers/mmc/host/sdhci-pxav3.c
-+++ b/drivers/mmc/host/sdhci-pxav3.c
-@@ -137,6 +137,10 @@ static int armada_38x_quirks(struct platform_device *pdev,
- 
- 	host->quirks &= ~SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN;
- 	host->quirks |= SDHCI_QUIRK_MISSING_CAPS;
-+
-+	host->caps = sdhci_readl(host, SDHCI_CAPABILITIES);
-+	host->caps1 = sdhci_readl(host, SDHCI_CAPABILITIES_1);
-+
- 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- 					   "conf-sdio3");
- 	if (res) {
-@@ -150,7 +154,6 @@ static int armada_38x_quirks(struct platform_device *pdev,
- 		 * Configuration register, if the adjustment is not done,
- 		 * remove them from the capabilities.
- 		 */
--		host->caps1 = sdhci_readl(host, SDHCI_CAPABILITIES_1);
- 		host->caps1 &= ~(SDHCI_SUPPORT_SDR50 | SDHCI_SUPPORT_DDR50);
- 
- 		dev_warn(&pdev->dev, "conf-sdio3 register not found: disabling SDR50 and DDR50 modes.\nConsider updating your dtb\n");
-@@ -161,7 +164,6 @@ static int armada_38x_quirks(struct platform_device *pdev,
- 	 * controller has different capabilities than the ones shown
- 	 * in its registers
- 	 */
--	host->caps = sdhci_readl(host, SDHCI_CAPABILITIES);
- 	if (of_property_read_bool(np, "no-1-8-v")) {
- 		host->caps &= ~SDHCI_CAN_VDD_180;
- 		host->mmc->caps &= ~MMC_CAP_1_8V_DDR;
-@@ -307,8 +309,30 @@ static void pxav3_set_uhs_signaling(struct sdhci_host *host, unsigned int uhs)
- 		__func__, uhs, ctrl_2);
- }
- 
-+static void pxav3_set_power(struct sdhci_host *host, unsigned char mode,
-+			    unsigned short vdd)
-+{
-+	struct mmc_host *mmc = host->mmc;
-+	u8 pwr = host->pwr;
-+
-+	sdhci_set_power(host, mode, vdd);
-+
-+	if (host->pwr == pwr)
-+		return;
-+
-+	if (host->pwr == 0)
-+		vdd = 0;
-+
-+	if (!IS_ERR(mmc->supply.vmmc)) {
-+		spin_unlock_irq(&host->lock);
-+		mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
-+		spin_lock_irq(&host->lock);
-+	}
-+}
-+
- static const struct sdhci_ops pxav3_sdhci_ops = {
- 	.set_clock = sdhci_set_clock,
-+	.set_power = pxav3_set_power,
- 	.platform_send_init_74_clocks = pxav3_gen_init_74_clocks,
- 	.get_max_clock = sdhci_pltfm_clk_get_max_clock,
- 	.set_bus_width = sdhci_set_bus_width,
-diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
-index 83c4bf7bc16c..0004721cd213 100644
---- a/drivers/mmc/host/sdhci-tegra.c
-+++ b/drivers/mmc/host/sdhci-tegra.c
-@@ -147,10 +147,16 @@ static void tegra_sdhci_reset(struct sdhci_host *host, u8 mask)
- 	/* Advertise UHS modes as supported by host */
- 	if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR50)
- 		misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDR50;
-+	else
-+		misc_ctrl &= ~SDHCI_MISC_CTRL_ENABLE_SDR50;
- 	if (soc_data->nvquirks & NVQUIRK_ENABLE_DDR50)
- 		misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_DDR50;
-+	else
-+		misc_ctrl &= ~SDHCI_MISC_CTRL_ENABLE_DDR50;
- 	if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR104)
- 		misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDR104;
-+	else
-+		misc_ctrl &= ~SDHCI_MISC_CTRL_ENABLE_SDR104;
- 	sdhci_writel(host, misc_ctrl, SDHCI_TEGRA_VENDOR_MISC_CTRL);
- 
- 	clk_ctrl = sdhci_readl(host, SDHCI_TEGRA_VENDOR_CLOCK_CTRL);
-@@ -188,7 +194,7 @@ static void tegra_sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
- 	unsigned long host_clk;
- 
- 	if (!clock)
--		return;
-+		return sdhci_set_clock(host, clock);
- 
- 	host_clk = tegra_host->ddr_signaling ? clock * 2 : clock;
- 	clk_set_rate(pltfm_host->clk, host_clk);
-@@ -335,6 +341,10 @@ static const struct sdhci_pltfm_data sdhci_tegra114_pdata = {
- 
- static const struct sdhci_tegra_soc_data soc_data_tegra114 = {
- 	.pdata = &sdhci_tegra114_pdata,
-+};
-+
-+static const struct sdhci_tegra_soc_data soc_data_tegra124 = {
-+	.pdata = &sdhci_tegra114_pdata,
- 	.nvquirks = NVQUIRK_ENABLE_SDR50 |
- 		    NVQUIRK_ENABLE_DDR50 |
- 		    NVQUIRK_ENABLE_SDR104,
-@@ -357,7 +367,7 @@ static const struct sdhci_tegra_soc_data soc_data_tegra210 = {
- 
- static const struct of_device_id sdhci_tegra_dt_match[] = {
- 	{ .compatible = "nvidia,tegra210-sdhci", .data = &soc_data_tegra210 },
--	{ .compatible = "nvidia,tegra124-sdhci", .data = &soc_data_tegra114 },
-+	{ .compatible = "nvidia,tegra124-sdhci", .data = &soc_data_tegra124 },
- 	{ .compatible = "nvidia,tegra114-sdhci", .data = &soc_data_tegra114 },
- 	{ .compatible = "nvidia,tegra30-sdhci", .data = &soc_data_tegra30 },
- 	{ .compatible = "nvidia,tegra20-sdhci", .data = &soc_data_tegra20 },
-diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
-index add9fdfd1d8f..6d485b5fa5ca 100644
---- a/drivers/mmc/host/sdhci.c
-+++ b/drivers/mmc/host/sdhci.c
-@@ -465,8 +465,6 @@ static void sdhci_adma_mark_end(void *desc)
- static int sdhci_adma_table_pre(struct sdhci_host *host,
- 	struct mmc_data *data)
- {
--	int direction;
--
- 	void *desc;
- 	void *align;
- 	dma_addr_t addr;
-@@ -483,20 +481,9 @@ static int sdhci_adma_table_pre(struct sdhci_host *host,
- 	 * We currently guess that it is LE.
- 	 */
- 
--	if (data->flags & MMC_DATA_READ)
--		direction = DMA_FROM_DEVICE;
--	else
--		direction = DMA_TO_DEVICE;
--
--	host->align_addr = dma_map_single(mmc_dev(host->mmc),
--		host->align_buffer, host->align_buffer_sz, direction);
--	if (dma_mapping_error(mmc_dev(host->mmc), host->align_addr))
--		goto fail;
--	BUG_ON(host->align_addr & SDHCI_ADMA2_MASK);
--
- 	host->sg_count = sdhci_pre_dma_transfer(host, data);
- 	if (host->sg_count < 0)
--		goto unmap_align;
-+		return -EINVAL;
- 
- 	desc = host->adma_table;
- 	align = host->align_buffer;
-@@ -570,22 +557,7 @@ static int sdhci_adma_table_pre(struct sdhci_host *host,
- 		/* nop, end, valid */
- 		sdhci_adma_write_desc(host, desc, 0, 0, ADMA2_NOP_END_VALID);
- 	}
--
--	/*
--	 * Resync align buffer as we might have changed it.
--	 */
--	if (data->flags & MMC_DATA_WRITE) {
--		dma_sync_single_for_device(mmc_dev(host->mmc),
--			host->align_addr, host->align_buffer_sz, direction);
--	}
--
- 	return 0;
--
--unmap_align:
--	dma_unmap_single(mmc_dev(host->mmc), host->align_addr,
--		host->align_buffer_sz, direction);
--fail:
--	return -EINVAL;
- }
- 
- static void sdhci_adma_table_post(struct sdhci_host *host,
-@@ -605,9 +577,6 @@ static void sdhci_adma_table_post(struct sdhci_host *host,
- 	else
- 		direction = DMA_TO_DEVICE;
- 
--	dma_unmap_single(mmc_dev(host->mmc), host->align_addr,
--		host->align_buffer_sz, direction);
--
- 	/* Do a quick scan of the SG list for any unaligned mappings */
- 	has_unaligned = false;
- 	for_each_sg(data->sg, sg, host->sg_count, i)
-@@ -666,9 +635,20 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd)
- 	if (!data)
- 		target_timeout = cmd->busy_timeout * 1000;
- 	else {
--		target_timeout = data->timeout_ns / 1000;
--		if (host->clock)
--			target_timeout += data->timeout_clks / host->clock;
-+		target_timeout = DIV_ROUND_UP(data->timeout_ns, 1000);
-+		if (host->clock && data->timeout_clks) {
-+			unsigned long long val;
-+
-+			/*
-+			 * data->timeout_clks is in units of clock cycles.
-+			 * host->clock is in Hz.  target_timeout is in us.
-+			 * Hence, us = 1000000 * cycles / Hz.  Round up.
-+			 */
-+			val = 1000000 * data->timeout_clks;
-+			if (do_div(val, host->clock))
-+				target_timeout++;
-+			target_timeout += val;
-+		}
- 	}
- 
- 	/*
-@@ -1003,6 +983,9 @@ void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
- 
- 	WARN_ON(host->cmd);
- 
-+	/* Initially, a command has no error */
-+	cmd->error = 0;
-+
- 	/* Wait max 10 ms */
- 	timeout = 10;
- 
-@@ -1097,8 +1080,6 @@ static void sdhci_finish_command(struct sdhci_host *host)
- 		}
- 	}
- 
--	host->cmd->error = 0;
--
- 	/* Finished CMD23, now send actual command. */
- 	if (host->cmd == host->mrq->sbc) {
- 		host->cmd = NULL;
-@@ -1269,10 +1250,24 @@ clock_set:
- }
- EXPORT_SYMBOL_GPL(sdhci_set_clock);
- 
--static void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
--			    unsigned short vdd)
-+static void sdhci_set_power_reg(struct sdhci_host *host, unsigned char mode,
-+				unsigned short vdd)
- {
- 	struct mmc_host *mmc = host->mmc;
-+
-+	spin_unlock_irq(&host->lock);
-+	mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
-+	spin_lock_irq(&host->lock);
-+
-+	if (mode != MMC_POWER_OFF)
-+		sdhci_writeb(host, SDHCI_POWER_ON, SDHCI_POWER_CONTROL);
-+	else
-+		sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
-+}
-+
-+void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
-+		     unsigned short vdd)
-+{
- 	u8 pwr = 0;
- 
- 	if (mode != MMC_POWER_OFF) {
-@@ -1304,7 +1299,6 @@ static void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
- 		sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
- 		if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
- 			sdhci_runtime_pm_bus_off(host);
--		vdd = 0;
- 	} else {
- 		/*
- 		 * Spec says that we should clear the power reg before setting
-@@ -1335,12 +1329,20 @@ static void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
- 		if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER)
- 			mdelay(10);
- 	}
-+}
-+EXPORT_SYMBOL_GPL(sdhci_set_power);
- 
--	if (!IS_ERR(mmc->supply.vmmc)) {
--		spin_unlock_irq(&host->lock);
--		mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
--		spin_lock_irq(&host->lock);
--	}
-+static void __sdhci_set_power(struct sdhci_host *host, unsigned char mode,
-+			      unsigned short vdd)
-+{
-+	struct mmc_host *mmc = host->mmc;
-+
-+	if (host->ops->set_power)
-+		host->ops->set_power(host, mode, vdd);
-+	else if (!IS_ERR(mmc->supply.vmmc))
-+		sdhci_set_power_reg(host, mode, vdd);
-+	else
-+		sdhci_set_power(host, mode, vdd);
- }
- 
- /*****************************************************************************\
-@@ -1490,7 +1492,7 @@ static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios)
- 		}
- 	}
- 
--	sdhci_set_power(host, ios->power_mode, ios->vdd);
-+	__sdhci_set_power(host, ios->power_mode, ios->vdd);
- 
- 	if (host->ops->platform_send_init_74_clocks)
- 		host->ops->platform_send_init_74_clocks(host, ios->power_mode);
-@@ -2114,14 +2116,13 @@ static void sdhci_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
- 	struct sdhci_host *host = mmc_priv(mmc);
- 	struct mmc_data *data = mrq->data;
- 
--	if (host->flags & SDHCI_REQ_USE_DMA) {
--		if (data->host_cookie == COOKIE_GIVEN ||
--				data->host_cookie == COOKIE_MAPPED)
--			dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
--					 data->flags & MMC_DATA_WRITE ?
--					 DMA_TO_DEVICE : DMA_FROM_DEVICE);
--		data->host_cookie = COOKIE_UNMAPPED;
--	}
-+	if (data->host_cookie == COOKIE_GIVEN ||
-+	    data->host_cookie == COOKIE_MAPPED)
-+		dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
-+			     data->flags & MMC_DATA_WRITE ?
-+			       DMA_TO_DEVICE : DMA_FROM_DEVICE);
-+
-+	data->host_cookie = COOKIE_UNMAPPED;
- }
- 
- static int sdhci_pre_dma_transfer(struct sdhci_host *host,
-@@ -2238,6 +2239,22 @@ static void sdhci_tasklet_finish(unsigned long param)
- 	mrq = host->mrq;
- 
- 	/*
-+	 * Always unmap the data buffers if they were mapped by
-+	 * sdhci_prepare_data() whenever we finish with a request.
-+	 * This avoids leaking DMA mappings on error.
-+	 */
-+	if (host->flags & SDHCI_REQ_USE_DMA) {
-+		struct mmc_data *data = mrq->data;
-+
-+		if (data && data->host_cookie == COOKIE_MAPPED) {
-+			dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
-+				     (data->flags & MMC_DATA_READ) ?
-+				     DMA_FROM_DEVICE : DMA_TO_DEVICE);
-+			data->host_cookie = COOKIE_UNMAPPED;
-+		}
-+	}
-+
-+	/*
- 	 * The controller needs a reset of internal state machines
- 	 * upon error conditions.
- 	 */
-@@ -2322,13 +2339,30 @@ static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *mask)
- 		return;
- 	}
- 
--	if (intmask & SDHCI_INT_TIMEOUT)
--		host->cmd->error = -ETIMEDOUT;
--	else if (intmask & (SDHCI_INT_CRC | SDHCI_INT_END_BIT |
--			SDHCI_INT_INDEX))
--		host->cmd->error = -EILSEQ;
-+	if (intmask & (SDHCI_INT_TIMEOUT | SDHCI_INT_CRC |
-+		       SDHCI_INT_END_BIT | SDHCI_INT_INDEX)) {
-+		if (intmask & SDHCI_INT_TIMEOUT)
-+			host->cmd->error = -ETIMEDOUT;
-+		else
-+			host->cmd->error = -EILSEQ;
-+
-+		/*
-+		 * If this command initiates a data phase and a response
-+		 * CRC error is signalled, the card can start transferring
-+		 * data - the card may have received the command without
-+		 * error.  We must not terminate the mmc_request early.
-+		 *
-+		 * If the card did not receive the command or returned an
-+		 * error which prevented it sending data, the data phase
-+		 * will time out.
-+		 */
-+		if (host->cmd->data &&
-+		    (intmask & (SDHCI_INT_CRC | SDHCI_INT_TIMEOUT)) ==
-+		     SDHCI_INT_CRC) {
-+			host->cmd = NULL;
-+			return;
-+		}
- 
--	if (host->cmd->error) {
- 		tasklet_schedule(&host->finish_tasklet);
- 		return;
- 	}
-@@ -2967,14 +3001,21 @@ int sdhci_add_host(struct sdhci_host *host)
- 						      &host->adma_addr,
- 						      GFP_KERNEL);
- 		host->align_buffer_sz = SDHCI_MAX_SEGS * SDHCI_ADMA2_ALIGN;
--		host->align_buffer = kmalloc(host->align_buffer_sz, GFP_KERNEL);
-+		host->align_buffer = dma_alloc_coherent(mmc_dev(mmc),
-+							host->align_buffer_sz,
-+							&host->align_addr,
-+							GFP_KERNEL);
- 		if (!host->adma_table || !host->align_buffer) {
- 			if (host->adma_table)
- 				dma_free_coherent(mmc_dev(mmc),
- 						  host->adma_table_sz,
- 						  host->adma_table,
- 						  host->adma_addr);
--			kfree(host->align_buffer);
-+			if (host->align_buffer)
-+				dma_free_coherent(mmc_dev(mmc),
-+						  host->align_buffer_sz,
-+						  host->align_buffer,
-+						  host->align_addr);
- 			pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n",
- 				mmc_hostname(mmc));
- 			host->flags &= ~SDHCI_USE_ADMA;
-@@ -2986,10 +3027,14 @@ int sdhci_add_host(struct sdhci_host *host)
- 			host->flags &= ~SDHCI_USE_ADMA;
- 			dma_free_coherent(mmc_dev(mmc), host->adma_table_sz,
- 					  host->adma_table, host->adma_addr);
--			kfree(host->align_buffer);
-+			dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz,
-+					  host->align_buffer, host->align_addr);
- 			host->adma_table = NULL;
- 			host->align_buffer = NULL;
- 		}
-+
-+		/* dma_alloc_coherent returns page aligned and sized buffers */
-+		BUG_ON(host->align_addr & SDHCI_ADMA2_MASK);
- 	}
- 
- 	/*
-@@ -3072,14 +3117,14 @@ int sdhci_add_host(struct sdhci_host *host)
- 		if (caps[0] & SDHCI_TIMEOUT_CLK_UNIT)
- 			host->timeout_clk *= 1000;
- 
-+		if (override_timeout_clk)
-+			host->timeout_clk = override_timeout_clk;
-+
- 		mmc->max_busy_timeout = host->ops->get_max_timeout_count ?
- 			host->ops->get_max_timeout_count(host) : 1 << 27;
- 		mmc->max_busy_timeout /= host->timeout_clk;
- 	}
- 
--	if (override_timeout_clk)
--		host->timeout_clk = override_timeout_clk;
--
- 	mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_ERASE | MMC_CAP_CMD23;
- 	mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
- 
-@@ -3452,7 +3497,9 @@ void sdhci_remove_host(struct sdhci_host *host, int dead)
- 	if (host->adma_table)
- 		dma_free_coherent(mmc_dev(mmc), host->adma_table_sz,
- 				  host->adma_table, host->adma_addr);
--	kfree(host->align_buffer);
-+	if (host->align_buffer)
-+		dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz,
-+				  host->align_buffer, host->align_addr);
- 
- 	host->adma_table = NULL;
- 	host->align_buffer = NULL;
-diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
-index 0115e9907bf8..033d72b5bbd5 100644
---- a/drivers/mmc/host/sdhci.h
-+++ b/drivers/mmc/host/sdhci.h
-@@ -529,6 +529,8 @@ struct sdhci_ops {
- #endif
- 
- 	void	(*set_clock)(struct sdhci_host *host, unsigned int clock);
-+	void	(*set_power)(struct sdhci_host *host, unsigned char mode,
-+			     unsigned short vdd);
- 
- 	int		(*enable_dma)(struct sdhci_host *host);
- 	unsigned int	(*get_max_clock)(struct sdhci_host *host);
-@@ -660,6 +662,8 @@ static inline bool sdhci_sdio_irq_enabled(struct sdhci_host *host)
- }
- 
- void sdhci_set_clock(struct sdhci_host *host, unsigned int clock);
-+void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
-+		     unsigned short vdd);
- void sdhci_set_bus_width(struct sdhci_host *host, int width);
- void sdhci_reset(struct sdhci_host *host, u8 mask);
- void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing);
-diff --git a/drivers/mtd/nand/brcmnand/brcmnand.c b/drivers/mtd/nand/brcmnand/brcmnand.c
-index 844fc07d22cd..f7009c1cb90c 100644
---- a/drivers/mtd/nand/brcmnand/brcmnand.c
-+++ b/drivers/mtd/nand/brcmnand/brcmnand.c
-@@ -311,6 +311,36 @@ static const u16 brcmnand_regs_v60[] = {
- 	[BRCMNAND_FC_BASE]		= 0x400,
- };
- 
-+/* BRCMNAND v7.1 */
-+static const u16 brcmnand_regs_v71[] = {
-+	[BRCMNAND_CMD_START]		=  0x04,
-+	[BRCMNAND_CMD_EXT_ADDRESS]	=  0x08,
-+	[BRCMNAND_CMD_ADDRESS]		=  0x0c,
-+	[BRCMNAND_INTFC_STATUS]		=  0x14,
-+	[BRCMNAND_CS_SELECT]		=  0x18,
-+	[BRCMNAND_CS_XOR]		=  0x1c,
-+	[BRCMNAND_LL_OP]		=  0x20,
-+	[BRCMNAND_CS0_BASE]		=  0x50,
-+	[BRCMNAND_CS1_BASE]		=     0,
-+	[BRCMNAND_CORR_THRESHOLD]	=  0xdc,
-+	[BRCMNAND_CORR_THRESHOLD_EXT]	=  0xe0,
-+	[BRCMNAND_UNCORR_COUNT]		=  0xfc,
-+	[BRCMNAND_CORR_COUNT]		= 0x100,
-+	[BRCMNAND_CORR_EXT_ADDR]	= 0x10c,
-+	[BRCMNAND_CORR_ADDR]		= 0x110,
-+	[BRCMNAND_UNCORR_EXT_ADDR]	= 0x114,
-+	[BRCMNAND_UNCORR_ADDR]		= 0x118,
-+	[BRCMNAND_SEMAPHORE]		= 0x150,
-+	[BRCMNAND_ID]			= 0x194,
-+	[BRCMNAND_ID_EXT]		= 0x198,
-+	[BRCMNAND_LL_RDATA]		= 0x19c,
-+	[BRCMNAND_OOB_READ_BASE]	= 0x200,
-+	[BRCMNAND_OOB_READ_10_BASE]	=     0,
-+	[BRCMNAND_OOB_WRITE_BASE]	= 0x280,
-+	[BRCMNAND_OOB_WRITE_10_BASE]	=     0,
-+	[BRCMNAND_FC_BASE]		= 0x400,
-+};
-+
- enum brcmnand_cs_reg {
- 	BRCMNAND_CS_CFG_EXT = 0,
- 	BRCMNAND_CS_CFG,
-@@ -406,7 +436,9 @@ static int brcmnand_revision_init(struct brcmnand_controller *ctrl)
- 	}
- 
- 	/* Register offsets */
--	if (ctrl->nand_version >= 0x0600)
-+	if (ctrl->nand_version >= 0x0701)
-+		ctrl->reg_offsets = brcmnand_regs_v71;
-+	else if (ctrl->nand_version >= 0x0600)
- 		ctrl->reg_offsets = brcmnand_regs_v60;
- 	else if (ctrl->nand_version >= 0x0500)
- 		ctrl->reg_offsets = brcmnand_regs_v50;
-diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
-index f2c8ff398d6c..171d146645ba 100644
---- a/drivers/mtd/nand/nand_base.c
-+++ b/drivers/mtd/nand/nand_base.c
-@@ -4021,7 +4021,6 @@ static int nand_dt_init(struct nand_chip *chip)
-  * This is the first phase of the normal nand_scan() function. It reads the
-  * flash ID and sets up MTD fields accordingly.
-  *
-- * The mtd->owner field must be set to the module of the caller.
-  */
- int nand_scan_ident(struct mtd_info *mtd, int maxchips,
- 		    struct nand_flash_dev *table)
-@@ -4443,19 +4442,12 @@ EXPORT_SYMBOL(nand_scan_tail);
-  *
-  * This fills out all the uninitialized function pointers with the defaults.
-  * The flash ID is read and the mtd/chip structures are filled with the
-- * appropriate values. The mtd->owner field must be set to the module of the
-- * caller.
-+ * appropriate values.
-  */
- int nand_scan(struct mtd_info *mtd, int maxchips)
- {
- 	int ret;
- 
--	/* Many callers got this wrong, so check for it for a while... */
--	if (!mtd->owner && caller_is_module()) {
--		pr_crit("%s called with NULL mtd->owner!\n", __func__);
--		BUG();
--	}
--
- 	ret = nand_scan_ident(mtd, maxchips, NULL);
- 	if (!ret)
- 		ret = nand_scan_tail(mtd);
-diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
-index 86fc245dc71a..fd78644469fa 100644
---- a/drivers/mtd/nand/pxa3xx_nand.c
-+++ b/drivers/mtd/nand/pxa3xx_nand.c
-@@ -1738,7 +1738,7 @@ static int alloc_nand_resource(struct platform_device *pdev)
- 	if (ret < 0)
- 		return ret;
- 
--	if (use_dma) {
-+	if (!np && use_dma) {
- 		r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
- 		if (r == NULL) {
- 			dev_err(&pdev->dev,
-diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c
-index 43b3392ffee7..652d01832873 100644
---- a/drivers/mtd/onenand/onenand_base.c
-+++ b/drivers/mtd/onenand/onenand_base.c
-@@ -2599,6 +2599,7 @@ static int onenand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
-  */
- static int onenand_block_markbad(struct mtd_info *mtd, loff_t ofs)
- {
-+	struct onenand_chip *this = mtd->priv;
- 	int ret;
- 
- 	ret = onenand_block_isbad(mtd, ofs);
-@@ -2610,7 +2611,7 @@ static int onenand_block_markbad(struct mtd_info *mtd, loff_t ofs)
- 	}
- 
- 	onenand_get_device(mtd, FL_WRITING);
--	ret = mtd_block_markbad(mtd, ofs);
-+	ret = this->block_markbad(mtd, ofs);
- 	onenand_release_device(mtd);
- 	return ret;
- }
-diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c
-index ed0c19c558b5..3028c06547c1 100644
---- a/drivers/mtd/spi-nor/spi-nor.c
-+++ b/drivers/mtd/spi-nor/spi-nor.c
-@@ -1100,45 +1100,6 @@ static int spansion_quad_enable(struct spi_nor *nor)
- 	return 0;
- }
- 
--static int micron_quad_enable(struct spi_nor *nor)
--{
--	int ret;
--	u8 val;
--
--	ret = nor->read_reg(nor, SPINOR_OP_RD_EVCR, &val, 1);
--	if (ret < 0) {
--		dev_err(nor->dev, "error %d reading EVCR\n", ret);
--		return ret;
--	}
--
--	write_enable(nor);
--
--	/* set EVCR, enable quad I/O */
--	nor->cmd_buf[0] = val & ~EVCR_QUAD_EN_MICRON;
--	ret = nor->write_reg(nor, SPINOR_OP_WD_EVCR, nor->cmd_buf, 1);
--	if (ret < 0) {
--		dev_err(nor->dev, "error while writing EVCR register\n");
--		return ret;
--	}
--
--	ret = spi_nor_wait_till_ready(nor);
--	if (ret)
--		return ret;
--
--	/* read EVCR and check it */
--	ret = nor->read_reg(nor, SPINOR_OP_RD_EVCR, &val, 1);
--	if (ret < 0) {
--		dev_err(nor->dev, "error %d reading EVCR\n", ret);
--		return ret;
--	}
--	if (val & EVCR_QUAD_EN_MICRON) {
--		dev_err(nor->dev, "Micron EVCR Quad bit not clear\n");
--		return -EINVAL;
--	}
--
--	return 0;
--}
--
- static int set_quad_mode(struct spi_nor *nor, const struct flash_info *info)
- {
- 	int status;
-@@ -1152,12 +1113,7 @@ static int set_quad_mode(struct spi_nor *nor, const struct flash_info *info)
- 		}
- 		return status;
- 	case SNOR_MFR_MICRON:
--		status = micron_quad_enable(nor);
--		if (status) {
--			dev_err(nor->dev, "Micron quad-read not enabled\n");
--			return -EINVAL;
--		}
--		return status;
-+		return 0;
- 	default:
- 		status = spansion_quad_enable(nor);
- 		if (status) {
-diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
-index 5b9834cf2820..96fddb016bf1 100644
---- a/drivers/mtd/ubi/eba.c
-+++ b/drivers/mtd/ubi/eba.c
-@@ -426,8 +426,25 @@ retry:
- 						 pnum, vol_id, lnum);
- 					err = -EBADMSG;
- 				} else {
--					err = -EINVAL;
--					ubi_ro_mode(ubi);
-+					/*
-+					 * Ending up here in the non-Fastmap case
-+					 * is a clear bug as the VID header had to
-+					 * be present at scan time to have it referenced.
-+					 * With fastmap the story is more complicated.
-+					 * Fastmap has the mapping info without the need
-+					 * of a full scan. So the LEB could have been
-+					 * unmapped, Fastmap cannot know this and keeps
-+					 * the LEB referenced.
-+					 * This is valid and works as the layer above UBI
-+					 * has to do bookkeeping about used/referenced
-+					 * LEBs in any case.
-+					 */
-+					if (ubi->fast_attach) {
-+						err = -EBADMSG;
-+					} else {
-+						err = -EINVAL;
-+						ubi_ro_mode(ubi);
-+					}
- 				}
- 			}
- 			goto out_free;
-diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c
-index 263b439e21a8..990898b9dc72 100644
---- a/drivers/mtd/ubi/fastmap.c
-+++ b/drivers/mtd/ubi/fastmap.c
-@@ -1058,6 +1058,7 @@ int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
- 	ubi_msg(ubi, "fastmap WL pool size: %d",
- 		ubi->fm_wl_pool.max_size);
- 	ubi->fm_disabled = 0;
-+	ubi->fast_attach = 1;
- 
- 	ubi_free_vid_hdr(ubi, vh);
- 	kfree(ech);
-diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
-index 2974b67f6c6c..de1ea2e4c37d 100644
---- a/drivers/mtd/ubi/ubi.h
-+++ b/drivers/mtd/ubi/ubi.h
-@@ -462,6 +462,7 @@ struct ubi_debug_info {
-  * @fm_eba_sem: allows ubi_update_fastmap() to block EBA table changes
-  * @fm_work: fastmap work queue
-  * @fm_work_scheduled: non-zero if fastmap work was scheduled
-+ * @fast_attach: non-zero if UBI was attached by fastmap
-  *
-  * @used: RB-tree of used physical eraseblocks
-  * @erroneous: RB-tree of erroneous used physical eraseblocks
-@@ -570,6 +571,7 @@ struct ubi_device {
- 	size_t fm_size;
- 	struct work_struct fm_work;
- 	int fm_work_scheduled;
-+	int fast_attach;
- 
- 	/* Wear-leveling sub-system's stuff */
- 	struct rb_root used;
-diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
-index b7f1a9919033..5ec8195b02c0 100644
---- a/drivers/net/bonding/bond_main.c
-+++ b/drivers/net/bonding/bond_main.c
-@@ -3308,6 +3308,30 @@ static int bond_close(struct net_device *bond_dev)
- 	return 0;
- }
- 
-+/* fold stats, assuming all rtnl_link_stats64 fields are u64, but
-+ * that some drivers can provide 32bit values only.
-+ */
-+static void bond_fold_stats(struct rtnl_link_stats64 *_res,
-+			    const struct rtnl_link_stats64 *_new,
-+			    const struct rtnl_link_stats64 *_old)
-+{
-+	const u64 *new = (const u64 *)_new;
-+	const u64 *old = (const u64 *)_old;
-+	u64 *res = (u64 *)_res;
-+	int i;
-+
-+	for (i = 0; i < sizeof(*_res) / sizeof(u64); i++) {
-+		u64 nv = new[i];
-+		u64 ov = old[i];
-+
-+		/* detects if this particular field is 32bit only */
-+		if (((nv | ov) >> 32) == 0)
-+			res[i] += (u32)nv - (u32)ov;
-+		else
-+			res[i] += nv - ov;
-+	}
-+}
-+
- static struct rtnl_link_stats64 *bond_get_stats(struct net_device *bond_dev,
- 						struct rtnl_link_stats64 *stats)
- {
-@@ -3316,43 +3340,23 @@ static struct rtnl_link_stats64 *bond_get_stats(struct net_device *bond_dev,
- 	struct list_head *iter;
- 	struct slave *slave;
- 
-+	spin_lock(&bond->stats_lock);
- 	memcpy(stats, &bond->bond_stats, sizeof(*stats));
- 
--	bond_for_each_slave(bond, slave, iter) {
--		const struct rtnl_link_stats64 *sstats =
-+	rcu_read_lock();
-+	bond_for_each_slave_rcu(bond, slave, iter) {
-+		const struct rtnl_link_stats64 *new =
- 			dev_get_stats(slave->dev, &temp);
--		struct rtnl_link_stats64 *pstats = &slave->slave_stats;
--
--		stats->rx_packets +=  sstats->rx_packets - pstats->rx_packets;
--		stats->rx_bytes += sstats->rx_bytes - pstats->rx_bytes;
--		stats->rx_errors += sstats->rx_errors - pstats->rx_errors;
--		stats->rx_dropped += sstats->rx_dropped - pstats->rx_dropped;
--
--		stats->tx_packets += sstats->tx_packets - pstats->tx_packets;;
--		stats->tx_bytes += sstats->tx_bytes - pstats->tx_bytes;
--		stats->tx_errors += sstats->tx_errors - pstats->tx_errors;
--		stats->tx_dropped += sstats->tx_dropped - pstats->tx_dropped;
--
--		stats->multicast += sstats->multicast - pstats->multicast;
--		stats->collisions += sstats->collisions - pstats->collisions;
--
--		stats->rx_length_errors += sstats->rx_length_errors - pstats->rx_length_errors;
--		stats->rx_over_errors += sstats->rx_over_errors - pstats->rx_over_errors;
--		stats->rx_crc_errors += sstats->rx_crc_errors - pstats->rx_crc_errors;
--		stats->rx_frame_errors += sstats->rx_frame_errors - pstats->rx_frame_errors;
--		stats->rx_fifo_errors += sstats->rx_fifo_errors - pstats->rx_fifo_errors;
--		stats->rx_missed_errors += sstats->rx_missed_errors - pstats->rx_missed_errors;
--
--		stats->tx_aborted_errors += sstats->tx_aborted_errors - pstats->tx_aborted_errors;
--		stats->tx_carrier_errors += sstats->tx_carrier_errors - pstats->tx_carrier_errors;
--		stats->tx_fifo_errors += sstats->tx_fifo_errors - pstats->tx_fifo_errors;
--		stats->tx_heartbeat_errors += sstats->tx_heartbeat_errors - pstats->tx_heartbeat_errors;
--		stats->tx_window_errors += sstats->tx_window_errors - pstats->tx_window_errors;
-+
-+		bond_fold_stats(stats, new, &slave->slave_stats);
- 
- 		/* save off the slave stats for the next run */
--		memcpy(pstats, sstats, sizeof(*sstats));
-+		memcpy(&slave->slave_stats, new, sizeof(*new));
- 	}
-+	rcu_read_unlock();
-+
- 	memcpy(&bond->bond_stats, stats, sizeof(*stats));
-+	spin_unlock(&bond->stats_lock);
- 
- 	return stats;
- }
-@@ -4166,6 +4170,7 @@ void bond_setup(struct net_device *bond_dev)
- 	struct bonding *bond = netdev_priv(bond_dev);
- 
- 	spin_lock_init(&bond->mode_lock);
-+	spin_lock_init(&bond->stats_lock);
- 	bond->params = bonding_defaults;
- 
- 	/* Initialize pointers */
-diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
-index 141c2a42d7ed..910c12e2638e 100644
---- a/drivers/net/can/dev.c
-+++ b/drivers/net/can/dev.c
-@@ -696,11 +696,17 @@ int can_change_mtu(struct net_device *dev, int new_mtu)
- 	/* allow change of MTU according to the CANFD ability of the device */
- 	switch (new_mtu) {
- 	case CAN_MTU:
-+		/* 'CANFD-only' controllers can not switch to CAN_MTU */
-+		if (priv->ctrlmode_static & CAN_CTRLMODE_FD)
-+			return -EINVAL;
-+
- 		priv->ctrlmode &= ~CAN_CTRLMODE_FD;
- 		break;
- 
- 	case CANFD_MTU:
--		if (!(priv->ctrlmode_supported & CAN_CTRLMODE_FD))
-+		/* check for potential CANFD ability */
-+		if (!(priv->ctrlmode_supported & CAN_CTRLMODE_FD) &&
-+		    !(priv->ctrlmode_static & CAN_CTRLMODE_FD))
- 			return -EINVAL;
- 
- 		priv->ctrlmode |= CAN_CTRLMODE_FD;
-@@ -782,6 +788,35 @@ static const struct nla_policy can_policy[IFLA_CAN_MAX + 1] = {
- 				= { .len = sizeof(struct can_bittiming_const) },
- };
- 
-+static int can_validate(struct nlattr *tb[], struct nlattr *data[])
-+{
-+	bool is_can_fd = false;
-+
-+	/* Make sure that valid CAN FD configurations always consist of
-+	 * - nominal/arbitration bittiming
-+	 * - data bittiming
-+	 * - control mode with CAN_CTRLMODE_FD set
-+	 */
-+
-+	if (data[IFLA_CAN_CTRLMODE]) {
-+		struct can_ctrlmode *cm = nla_data(data[IFLA_CAN_CTRLMODE]);
-+
-+		is_can_fd = cm->flags & cm->mask & CAN_CTRLMODE_FD;
-+	}
-+
-+	if (is_can_fd) {
-+		if (!data[IFLA_CAN_BITTIMING] || !data[IFLA_CAN_DATA_BITTIMING])
-+			return -EOPNOTSUPP;
-+	}
-+
-+	if (data[IFLA_CAN_DATA_BITTIMING]) {
-+		if (!is_can_fd || !data[IFLA_CAN_BITTIMING])
-+			return -EOPNOTSUPP;
-+	}
-+
-+	return 0;
-+}
-+
- static int can_changelink(struct net_device *dev,
- 			  struct nlattr *tb[], struct nlattr *data[])
- {
-@@ -813,19 +848,31 @@ static int can_changelink(struct net_device *dev,
- 
- 	if (data[IFLA_CAN_CTRLMODE]) {
- 		struct can_ctrlmode *cm;
-+		u32 ctrlstatic;
-+		u32 maskedflags;
- 
- 		/* Do not allow changing controller mode while running */
- 		if (dev->flags & IFF_UP)
- 			return -EBUSY;
- 		cm = nla_data(data[IFLA_CAN_CTRLMODE]);
-+		ctrlstatic = priv->ctrlmode_static;
-+		maskedflags = cm->flags & cm->mask;
-+
-+		/* check whether provided bits are allowed to be passed */
-+		if (cm->mask & ~(priv->ctrlmode_supported | ctrlstatic))
-+			return -EOPNOTSUPP;
-+
-+		/* do not check for static fd-non-iso if 'fd' is disabled */
-+		if (!(maskedflags & CAN_CTRLMODE_FD))
-+			ctrlstatic &= ~CAN_CTRLMODE_FD_NON_ISO;
- 
--		/* check whether changed bits are allowed to be modified */
--		if (cm->mask & ~priv->ctrlmode_supported)
-+		/* make sure static options are provided by configuration */
-+		if ((maskedflags & ctrlstatic) != ctrlstatic)
- 			return -EOPNOTSUPP;
- 
- 		/* clear bits to be modified and copy the flag values */
- 		priv->ctrlmode &= ~cm->mask;
--		priv->ctrlmode |= (cm->flags & cm->mask);
-+		priv->ctrlmode |= maskedflags;
- 
- 		/* CAN_CTRLMODE_FD can only be set when driver supports FD */
- 		if (priv->ctrlmode & CAN_CTRLMODE_FD)
-@@ -966,6 +1013,7 @@ static struct rtnl_link_ops can_link_ops __read_mostly = {
- 	.maxtype	= IFLA_CAN_MAX,
- 	.policy		= can_policy,
- 	.setup		= can_setup,
-+	.validate	= can_validate,
- 	.newlink	= can_newlink,
- 	.changelink	= can_changelink,
- 	.get_size	= can_get_size,
-diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
-index 39cf911f7a1e..195f15edb32e 100644
---- a/drivers/net/can/m_can/m_can.c
-+++ b/drivers/net/can/m_can/m_can.c
-@@ -955,7 +955,7 @@ static struct net_device *alloc_m_can_dev(void)
- 	priv->can.do_get_berr_counter = m_can_get_berr_counter;
- 
- 	/* CAN_CTRLMODE_FD_NON_ISO is fixed with M_CAN IP v3.0.1 */
--	priv->can.ctrlmode = CAN_CTRLMODE_FD_NON_ISO;
-+	can_set_static_ctrlmode(dev, CAN_CTRLMODE_FD_NON_ISO);
- 
- 	/* CAN_CTRLMODE_FD_NON_ISO can not be changed with M_CAN IP v3.0.1 */
- 	priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
-diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
-index 8f76f4558a88..2ff465848b65 100644
---- a/drivers/net/ethernet/atheros/atlx/atl2.c
-+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
-@@ -1412,7 +1412,7 @@ static int atl2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
- 
- 	err = -EIO;
- 
--	netdev->hw_features = NETIF_F_SG | NETIF_F_HW_VLAN_CTAG_RX;
-+	netdev->hw_features = NETIF_F_HW_VLAN_CTAG_RX;
- 	netdev->features |= (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
- 
- 	/* Init PHY as early as possible due to power saving issue  */
-diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
-index d7e01a74e927..6746fd03cb3a 100644
---- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
-+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
-@@ -1197,7 +1197,7 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
- 			dev->stats.tx_bytes += tx_cb_ptr->skb->len;
- 			dma_unmap_single(&dev->dev,
- 					 dma_unmap_addr(tx_cb_ptr, dma_addr),
--					 tx_cb_ptr->skb->len,
-+					 dma_unmap_len(tx_cb_ptr, dma_len),
- 					 DMA_TO_DEVICE);
- 			bcmgenet_free_cb(tx_cb_ptr);
- 		} else if (dma_unmap_addr(tx_cb_ptr, dma_addr)) {
-@@ -1308,7 +1308,7 @@ static int bcmgenet_xmit_single(struct net_device *dev,
- 	}
- 
- 	dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping);
--	dma_unmap_len_set(tx_cb_ptr, dma_len, skb->len);
-+	dma_unmap_len_set(tx_cb_ptr, dma_len, skb_len);
- 	length_status = (skb_len << DMA_BUFLENGTH_SHIFT) | dma_desc_flags |
- 			(priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT) |
- 			DMA_TX_APPEND_CRC;
-diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
-index 767347b1f631..f50bdbfaae7c 100644
---- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
-+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
-@@ -519,6 +519,7 @@ static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs,
- 		nicvf_config_vlan_stripping(nic, nic->netdev->features);
- 
- 	/* Enable Receive queue */
-+	memset(&rq_cfg, 0, sizeof(struct rq_cfg));
- 	rq_cfg.ena = 1;
- 	rq_cfg.tcp_ena = 0;
- 	nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, *(u64 *)&rq_cfg);
-@@ -551,6 +552,7 @@ void nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs,
- 			      qidx, (u64)(cq->dmem.phys_base));
- 
- 	/* Enable Completion queue */
-+	memset(&cq_cfg, 0, sizeof(struct cq_cfg));
- 	cq_cfg.ena = 1;
- 	cq_cfg.reset = 0;
- 	cq_cfg.caching = 0;
-@@ -599,6 +601,7 @@ static void nicvf_snd_queue_config(struct nicvf *nic, struct queue_set *qs,
- 			      qidx, (u64)(sq->dmem.phys_base));
- 
- 	/* Enable send queue  & set queue size */
-+	memset(&sq_cfg, 0, sizeof(struct sq_cfg));
- 	sq_cfg.ena = 1;
- 	sq_cfg.reset = 0;
- 	sq_cfg.ldwb = 0;
-@@ -635,6 +638,7 @@ static void nicvf_rbdr_config(struct nicvf *nic, struct queue_set *qs,
- 
- 	/* Enable RBDR  & set queue size */
- 	/* Buffer size should be in multiples of 128 bytes */
-+	memset(&rbdr_cfg, 0, sizeof(struct rbdr_cfg));
- 	rbdr_cfg.ena = 1;
- 	rbdr_cfg.reset = 0;
- 	rbdr_cfg.ldwb = 0;
-diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
-index 41c81f6ec630..0d6e8c177650 100644
---- a/drivers/net/ethernet/freescale/fec_main.c
-+++ b/drivers/net/ethernet/freescale/fec_main.c
-@@ -1566,9 +1566,15 @@ fec_enet_rx(struct net_device *ndev, int budget)
- 	struct fec_enet_private *fep = netdev_priv(ndev);
- 
- 	for_each_set_bit(queue_id, &fep->work_rx, FEC_ENET_MAX_RX_QS) {
--		clear_bit(queue_id, &fep->work_rx);
--		pkt_received += fec_enet_rx_queue(ndev,
-+		int ret;
-+
-+		ret = fec_enet_rx_queue(ndev,
- 					budget - pkt_received, queue_id);
-+
-+		if (ret < budget - pkt_received)
-+			clear_bit(queue_id, &fep->work_rx);
-+
-+		pkt_received += ret;
- 	}
- 	return pkt_received;
- }
-diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
-index b0ae69f84493..2ee05cebea75 100644
---- a/drivers/net/ethernet/marvell/mvneta.c
-+++ b/drivers/net/ethernet/marvell/mvneta.c
-@@ -3070,17 +3070,17 @@ static int mvneta_stop(struct net_device *dev)
- 	struct mvneta_port *pp = netdev_priv(dev);
- 
- 	/* Inform that we are stopping so we don't want to setup the
--	 * driver for new CPUs in the notifiers
-+	 * driver for new CPUs in the notifiers. The code of the
-+	 * notifier for CPU online is protected by the same spinlock,
-+	 * so when we get the lock, the notifer work is done.
- 	 */
- 	spin_lock(&pp->lock);
- 	pp->is_stopped = true;
-+	spin_unlock(&pp->lock);
-+
- 	mvneta_stop_dev(pp);
- 	mvneta_mdio_remove(pp);
- 	unregister_cpu_notifier(&pp->cpu_notifier);
--	/* Now that the notifier are unregistered, we can release le
--	 * lock
--	 */
--	spin_unlock(&pp->lock);
- 	on_each_cpu(mvneta_percpu_disable, pp, true);
- 	free_percpu_irq(dev->irq, pp->ports);
- 	mvneta_cleanup_rxqs(pp);
-@@ -3612,6 +3612,7 @@ static int mvneta_probe(struct platform_device *pdev)
- 	dev->ethtool_ops = &mvneta_eth_tool_ops;
- 
- 	pp = netdev_priv(dev);
-+	spin_lock_init(&pp->lock);
- 	pp->phy_node = phy_node;
- 	pp->phy_interface = phy_mode;
- 
-@@ -3720,7 +3721,7 @@ static int mvneta_probe(struct platform_device *pdev)
- 	dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
- 	dev->hw_features |= dev->features;
- 	dev->vlan_features |= dev->features;
--	dev->priv_flags |= IFF_UNICAST_FLT;
-+	dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE;
- 	dev->gso_max_segs = MVNETA_MAX_TSO_SEGS;
- 
- 	err = register_netdev(dev);
-diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
-index 41440b2b20a3..03ef9aca21e4 100644
---- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
-+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
-@@ -704,7 +704,7 @@ static int get_fixed_ipv6_csum(__wsum hw_checksum, struct sk_buff *skb,
- 
- 	if (ipv6h->nexthdr == IPPROTO_FRAGMENT || ipv6h->nexthdr == IPPROTO_HOPOPTS)
- 		return -1;
--	hw_checksum = csum_add(hw_checksum, (__force __wsum)(ipv6h->nexthdr << 8));
-+	hw_checksum = csum_add(hw_checksum, (__force __wsum)htons(ipv6h->nexthdr));
- 
- 	csum_pseudo_hdr = csum_partial(&ipv6h->saddr,
- 				       sizeof(ipv6h->saddr) + sizeof(ipv6h->daddr), 0);
-diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
-index e0946ab22010..0debb611da8b 100644
---- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
-+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
-@@ -402,7 +402,6 @@ static bool mlx4_en_process_tx_cq(struct net_device *dev,
- 	u32 packets = 0;
- 	u32 bytes = 0;
- 	int factor = priv->cqe_factor;
--	u64 timestamp = 0;
- 	int done = 0;
- 	int budget = priv->tx_work_limit;
- 	u32 last_nr_txbb;
-@@ -442,9 +441,12 @@ static bool mlx4_en_process_tx_cq(struct net_device *dev,
- 		new_index = be16_to_cpu(cqe->wqe_index) & size_mask;
- 
- 		do {
-+			u64 timestamp = 0;
-+
- 			txbbs_skipped += last_nr_txbb;
- 			ring_index = (ring_index + last_nr_txbb) & size_mask;
--			if (ring->tx_info[ring_index].ts_requested)
-+
-+			if (unlikely(ring->tx_info[ring_index].ts_requested))
- 				timestamp = mlx4_en_get_cqe_ts(cqe);
- 
- 			/* free next descriptor */
-diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
-index 25ce1b030a00..cd9b2b28df88 100644
---- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
-+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
-@@ -3141,7 +3141,7 @@ static int verify_qp_parameters(struct mlx4_dev *dev,
- 		case QP_TRANS_RTS2RTS:
- 		case QP_TRANS_SQD2SQD:
- 		case QP_TRANS_SQD2RTS:
--			if (slave != mlx4_master_func_num(dev))
-+			if (slave != mlx4_master_func_num(dev)) {
- 				if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
- 					port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
- 					if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
-@@ -3160,6 +3160,7 @@ static int verify_qp_parameters(struct mlx4_dev *dev,
- 					if (qp_ctx->alt_path.mgid_index >= num_gids)
- 						return -EINVAL;
- 				}
-+			}
- 			break;
- 		default:
- 			break;
-diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
-index 402994bf7e16..e293a2ec2775 100644
---- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
-+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
-@@ -1389,24 +1389,50 @@ static int mlx5e_refresh_tirs_self_loopback_enable(struct mlx5e_priv *priv)
- 	return 0;
- }
- 
--static int mlx5e_set_dev_port_mtu(struct net_device *netdev)
-+static int mlx5e_set_mtu(struct mlx5e_priv *priv, u16 mtu)
- {
--	struct mlx5e_priv *priv = netdev_priv(netdev);
- 	struct mlx5_core_dev *mdev = priv->mdev;
--	int hw_mtu;
-+	u16 hw_mtu = MLX5E_SW2HW_MTU(mtu);
- 	int err;
- 
--	err = mlx5_set_port_mtu(mdev, MLX5E_SW2HW_MTU(netdev->mtu), 1);
-+	err = mlx5_set_port_mtu(mdev, hw_mtu, 1);
- 	if (err)
- 		return err;
- 
--	mlx5_query_port_oper_mtu(mdev, &hw_mtu, 1);
-+	/* Update vport context MTU */
-+	mlx5_modify_nic_vport_mtu(mdev, hw_mtu);
-+	return 0;
-+}
-+
-+static void mlx5e_query_mtu(struct mlx5e_priv *priv, u16 *mtu)
-+{
-+	struct mlx5_core_dev *mdev = priv->mdev;
-+	u16 hw_mtu = 0;
-+	int err;
-+
-+	err = mlx5_query_nic_vport_mtu(mdev, &hw_mtu);
-+	if (err || !hw_mtu) /* fallback to port oper mtu */
-+		mlx5_query_port_oper_mtu(mdev, &hw_mtu, 1);
-+
-+	*mtu = MLX5E_HW2SW_MTU(hw_mtu);
-+}
-+
-+static int mlx5e_set_dev_port_mtu(struct net_device *netdev)
-+{
-+	struct mlx5e_priv *priv = netdev_priv(netdev);
-+	u16 mtu;
-+	int err;
- 
--	if (MLX5E_HW2SW_MTU(hw_mtu) != netdev->mtu)
--		netdev_warn(netdev, "%s: Port MTU %d is different than netdev mtu %d\n",
--			    __func__, MLX5E_HW2SW_MTU(hw_mtu), netdev->mtu);
-+	err = mlx5e_set_mtu(priv, netdev->mtu);
-+	if (err)
-+		return err;
- 
--	netdev->mtu = MLX5E_HW2SW_MTU(hw_mtu);
-+	mlx5e_query_mtu(priv, &mtu);
-+	if (mtu != netdev->mtu)
-+		netdev_warn(netdev, "%s: VPort MTU %d is different than netdev mtu %d\n",
-+			    __func__, mtu, netdev->mtu);
-+
-+	netdev->mtu = mtu;
- 	return 0;
- }
- 
-@@ -1906,22 +1932,27 @@ static int mlx5e_set_features(struct net_device *netdev,
- 	return err;
- }
- 
-+#define MXL5_HW_MIN_MTU 64
-+#define MXL5E_MIN_MTU (MXL5_HW_MIN_MTU + ETH_FCS_LEN)
-+
- static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu)
- {
- 	struct mlx5e_priv *priv = netdev_priv(netdev);
- 	struct mlx5_core_dev *mdev = priv->mdev;
- 	bool was_opened;
--	int max_mtu;
-+	u16 max_mtu;
-+	u16 min_mtu;
- 	int err = 0;
- 
- 	mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
- 
- 	max_mtu = MLX5E_HW2SW_MTU(max_mtu);
-+	min_mtu = MLX5E_HW2SW_MTU(MXL5E_MIN_MTU);
- 
--	if (new_mtu > max_mtu) {
-+	if (new_mtu > max_mtu || new_mtu < min_mtu) {
- 		netdev_err(netdev,
--			   "%s: Bad MTU (%d) > (%d) Max\n",
--			   __func__, new_mtu, max_mtu);
-+			   "%s: Bad MTU (%d), valid range is: [%d..%d]\n",
-+			   __func__, new_mtu, min_mtu, max_mtu);
- 		return -EINVAL;
- 	}
- 
-diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
-index 6f68dba8d7ed..cc901852f1a9 100644
---- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
-+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
-@@ -957,33 +957,6 @@ unlock_fg:
- 	return rule;
- }
- 
--static struct mlx5_flow_rule *add_rule_to_auto_fg(struct mlx5_flow_table *ft,
--						  u8 match_criteria_enable,
--						  u32 *match_criteria,
--						  u32 *match_value,
--						  u8 action,
--						  u32 flow_tag,
--						  struct mlx5_flow_destination *dest)
--{
--	struct mlx5_flow_rule *rule;
--	struct mlx5_flow_group *g;
--
--	g = create_autogroup(ft, match_criteria_enable, match_criteria);
--	if (IS_ERR(g))
--		return (void *)g;
--
--	rule = add_rule_fg(g, match_value,
--			   action, flow_tag, dest);
--	if (IS_ERR(rule)) {
--		/* Remove assumes refcount > 0 and autogroup creates a group
--		 * with a refcount = 0.
--		 */
--		tree_get_node(&g->node);
--		tree_remove_node(&g->node);
--	}
--	return rule;
--}
--
- struct mlx5_flow_rule *
- mlx5_add_flow_rule(struct mlx5_flow_table *ft,
- 		   u8 match_criteria_enable,
-@@ -1008,8 +981,23 @@ mlx5_add_flow_rule(struct mlx5_flow_table *ft,
- 				goto unlock;
- 		}
- 
--	rule = add_rule_to_auto_fg(ft, match_criteria_enable, match_criteria,
--				   match_value, action, flow_tag, dest);
-+	g = create_autogroup(ft, match_criteria_enable, match_criteria);
-+	if (IS_ERR(g)) {
-+		rule = (void *)g;
-+		goto unlock;
-+	}
-+
-+	rule = add_rule_fg(g, match_value,
-+			   action, flow_tag, dest);
-+	if (IS_ERR(rule)) {
-+		/* Remove assumes refcount > 0 and autogroup creates a group
-+		 * with a refcount = 0.
-+		 */
-+		unlock_ref_node(&ft->node);
-+		tree_get_node(&g->node);
-+		tree_remove_node(&g->node);
-+		return rule;
-+	}
- unlock:
- 	unlock_ref_node(&ft->node);
- 	return rule;
-diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
-index 1545a944c309..b86fe50d5d93 100644
---- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
-+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
-@@ -423,6 +423,10 @@ static int handle_hca_cap_atomic(struct mlx5_core_dev *dev)
- 					 HCA_CAP_OPMOD_GET_CUR);
- 		if (err)
- 			return err;
-+		err = mlx5_core_get_caps(dev, MLX5_CAP_ATOMIC,
-+					 HCA_CAP_OPMOD_GET_MAX);
-+		if (err)
-+			return err;
- 	} else {
- 		return 0;
- 	}
-diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
-index a87e773e93f3..53a793bc2e3d 100644
---- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
-+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
-@@ -246,8 +246,8 @@ int mlx5_query_port_admin_status(struct mlx5_core_dev *dev,
- }
- EXPORT_SYMBOL_GPL(mlx5_query_port_admin_status);
- 
--static void mlx5_query_port_mtu(struct mlx5_core_dev *dev, int *admin_mtu,
--				int *max_mtu, int *oper_mtu, u8 port)
-+static void mlx5_query_port_mtu(struct mlx5_core_dev *dev, u16 *admin_mtu,
-+				u16 *max_mtu, u16 *oper_mtu, u8 port)
- {
- 	u32 in[MLX5_ST_SZ_DW(pmtu_reg)];
- 	u32 out[MLX5_ST_SZ_DW(pmtu_reg)];
-@@ -267,7 +267,7 @@ static void mlx5_query_port_mtu(struct mlx5_core_dev *dev, int *admin_mtu,
- 		*admin_mtu = MLX5_GET(pmtu_reg, out, admin_mtu);
- }
- 
--int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu, u8 port)
-+int mlx5_set_port_mtu(struct mlx5_core_dev *dev, u16 mtu, u8 port)
- {
- 	u32 in[MLX5_ST_SZ_DW(pmtu_reg)];
- 	u32 out[MLX5_ST_SZ_DW(pmtu_reg)];
-@@ -282,14 +282,14 @@ int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu, u8 port)
- }
- EXPORT_SYMBOL_GPL(mlx5_set_port_mtu);
- 
--void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu,
-+void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, u16 *max_mtu,
- 			     u8 port)
- {
- 	mlx5_query_port_mtu(dev, NULL, max_mtu, NULL, port);
- }
- EXPORT_SYMBOL_GPL(mlx5_query_port_max_mtu);
- 
--void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu,
-+void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, u16 *oper_mtu,
- 			      u8 port)
- {
- 	mlx5_query_port_mtu(dev, NULL, NULL, oper_mtu, port);
-diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
-index c7398b95aecd..6d5f56e73b5d 100644
---- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
-+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
-@@ -196,6 +196,46 @@ int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *mdev,
- }
- EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mac_address);
- 
-+int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu)
-+{
-+	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
-+	u32 *out;
-+	int err;
-+
-+	out = mlx5_vzalloc(outlen);
-+	if (!out)
-+		return -ENOMEM;
-+
-+	err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
-+	if (!err)
-+		*mtu = MLX5_GET(query_nic_vport_context_out, out,
-+				nic_vport_context.mtu);
-+
-+	kvfree(out);
-+	return err;
-+}
-+EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mtu);
-+
-+int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 mtu)
-+{
-+	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
-+	void *in;
-+	int err;
-+
-+	in = mlx5_vzalloc(inlen);
-+	if (!in)
-+		return -ENOMEM;
-+
-+	MLX5_SET(modify_nic_vport_context_in, in, field_select.mtu, 1);
-+	MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.mtu, mtu);
-+
-+	err = mlx5_modify_nic_vport_context(mdev, in, inlen);
-+
-+	kvfree(in);
-+	return err;
-+}
-+EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mtu);
-+
- int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev,
- 				  u32 vport,
- 				  enum mlx5_list_type list_type,
-diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
-index 3b89ed2f3c76..65a115fc0c96 100644
---- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
-+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
-@@ -118,6 +118,8 @@ struct mlxsw_sp {
- #define MLXSW_SP_DEFAULT_LEARNING_INTERVAL 100
- 		unsigned int interval; /* ms */
- 	} fdb_notify;
-+#define MLXSW_SP_MIN_AGEING_TIME 10
-+#define MLXSW_SP_MAX_AGEING_TIME 1000000
- #define MLXSW_SP_DEFAULT_AGEING_TIME 300
- 	u32 ageing_time;
- 	struct mlxsw_sp_upper master_bridge;
-diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
-index 7b56098acc58..e1c74efff51a 100644
---- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
-+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
-@@ -311,8 +311,13 @@ static int mlxsw_sp_port_attr_br_ageing_set(struct mlxsw_sp_port *mlxsw_sp_port,
- 	unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
- 	u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000;
- 
--	if (switchdev_trans_ph_prepare(trans))
--		return 0;
-+	if (switchdev_trans_ph_prepare(trans)) {
-+		if (ageing_time < MLXSW_SP_MIN_AGEING_TIME ||
-+		    ageing_time > MLXSW_SP_MAX_AGEING_TIME)
-+			return -ERANGE;
-+		else
-+			return 0;
-+	}
- 
- 	return mlxsw_sp_ageing_set(mlxsw_sp, ageing_time);
- }
-diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
-index 46bbea8e023c..55007f1e6bbc 100644
---- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
-+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
-@@ -566,6 +566,7 @@ struct qlcnic_adapter_stats {
- 	u64  tx_dma_map_error;
- 	u64  spurious_intr;
- 	u64  mac_filter_limit_overrun;
-+	u64  mbx_spurious_intr;
- };
- 
- /*
-@@ -1099,7 +1100,7 @@ struct qlcnic_mailbox {
- 	unsigned long		status;
- 	spinlock_t		queue_lock;	/* Mailbox queue lock */
- 	spinlock_t		aen_lock;	/* Mailbox response/AEN lock */
--	atomic_t		rsp_status;
-+	u32			rsp_status;
- 	u32			num_cmds;
- };
- 
-diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
-index 37a731be7d39..f9640d5ce6ba 100644
---- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
-+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
-@@ -491,7 +491,7 @@ irqreturn_t qlcnic_83xx_clear_legacy_intr(struct qlcnic_adapter *adapter)
- 
- static inline void qlcnic_83xx_notify_mbx_response(struct qlcnic_mailbox *mbx)
- {
--	atomic_set(&mbx->rsp_status, QLC_83XX_MBX_RESPONSE_ARRIVED);
-+	mbx->rsp_status = QLC_83XX_MBX_RESPONSE_ARRIVED;
- 	complete(&mbx->completion);
- }
- 
-@@ -510,7 +510,7 @@ static void qlcnic_83xx_poll_process_aen(struct qlcnic_adapter *adapter)
- 	if (event &  QLCNIC_MBX_ASYNC_EVENT) {
- 		__qlcnic_83xx_process_aen(adapter);
- 	} else {
--		if (atomic_read(&mbx->rsp_status) != rsp_status)
-+		if (mbx->rsp_status != rsp_status)
- 			qlcnic_83xx_notify_mbx_response(mbx);
- 	}
- out:
-@@ -1023,7 +1023,7 @@ static void qlcnic_83xx_process_aen(struct qlcnic_adapter *adapter)
- 		if (event &  QLCNIC_MBX_ASYNC_EVENT) {
- 			__qlcnic_83xx_process_aen(adapter);
- 		} else {
--			if (atomic_read(&mbx->rsp_status) != rsp_status)
-+			if (mbx->rsp_status != rsp_status)
- 				qlcnic_83xx_notify_mbx_response(mbx);
- 		}
- 	}
-@@ -2338,9 +2338,9 @@ static void qlcnic_83xx_handle_link_aen(struct qlcnic_adapter *adapter,
- 
- static irqreturn_t qlcnic_83xx_handle_aen(int irq, void *data)
- {
-+	u32 mask, resp, event, rsp_status = QLC_83XX_MBX_RESPONSE_ARRIVED;
- 	struct qlcnic_adapter *adapter = data;
- 	struct qlcnic_mailbox *mbx;
--	u32 mask, resp, event;
- 	unsigned long flags;
- 
- 	mbx = adapter->ahw->mailbox;
-@@ -2350,10 +2350,14 @@ static irqreturn_t qlcnic_83xx_handle_aen(int irq, void *data)
- 		goto out;
- 
- 	event = readl(QLCNIC_MBX_FW(adapter->ahw, 0));
--	if (event &  QLCNIC_MBX_ASYNC_EVENT)
-+	if (event &  QLCNIC_MBX_ASYNC_EVENT) {
- 		__qlcnic_83xx_process_aen(adapter);
--	else
--		qlcnic_83xx_notify_mbx_response(mbx);
-+	} else {
-+		if (mbx->rsp_status != rsp_status)
-+			qlcnic_83xx_notify_mbx_response(mbx);
-+		else
-+			adapter->stats.mbx_spurious_intr++;
-+	}
- 
- out:
- 	mask = QLCRDX(adapter->ahw, QLCNIC_DEF_INT_MASK);
-@@ -4050,10 +4054,10 @@ static void qlcnic_83xx_mailbox_worker(struct work_struct *work)
- 	struct qlcnic_adapter *adapter = mbx->adapter;
- 	const struct qlcnic_mbx_ops *mbx_ops = mbx->ops;
- 	struct device *dev = &adapter->pdev->dev;
--	atomic_t *rsp_status = &mbx->rsp_status;
- 	struct list_head *head = &mbx->cmd_q;
- 	struct qlcnic_hardware_context *ahw;
- 	struct qlcnic_cmd_args *cmd = NULL;
-+	unsigned long flags;
- 
- 	ahw = adapter->ahw;
- 
-@@ -4063,7 +4067,9 @@ static void qlcnic_83xx_mailbox_worker(struct work_struct *work)
- 			return;
- 		}
- 
--		atomic_set(rsp_status, QLC_83XX_MBX_RESPONSE_WAIT);
-+		spin_lock_irqsave(&mbx->aen_lock, flags);
-+		mbx->rsp_status = QLC_83XX_MBX_RESPONSE_WAIT;
-+		spin_unlock_irqrestore(&mbx->aen_lock, flags);
- 
- 		spin_lock(&mbx->queue_lock);
- 
-diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
-index 494e8105adee..0a2318cad34d 100644
---- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
-+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
-@@ -59,7 +59,8 @@ static const struct qlcnic_stats qlcnic_gstrings_stats[] = {
- 	 QLC_OFF(stats.mac_filter_limit_overrun)},
- 	{"spurious intr", QLC_SIZEOF(stats.spurious_intr),
- 	 QLC_OFF(stats.spurious_intr)},
--
-+	{"mbx spurious intr", QLC_SIZEOF(stats.mbx_spurious_intr),
-+	 QLC_OFF(stats.mbx_spurious_intr)},
- };
- 
- static const char qlcnic_device_gstrings_stats[][ETH_GSTRING_LEN] = {
-diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
-index 997976426799..b28e73ea2c25 100644
---- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
-+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
-@@ -1648,7 +1648,18 @@ static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
- 		return;
- 	}
- 	skb_reserve(new_skb, NET_IP_ALIGN);
-+
-+	pci_dma_sync_single_for_cpu(qdev->pdev,
-+				    dma_unmap_addr(sbq_desc, mapaddr),
-+				    dma_unmap_len(sbq_desc, maplen),
-+				    PCI_DMA_FROMDEVICE);
-+
- 	memcpy(skb_put(new_skb, length), skb->data, length);
-+
-+	pci_dma_sync_single_for_device(qdev->pdev,
-+				       dma_unmap_addr(sbq_desc, mapaddr),
-+				       dma_unmap_len(sbq_desc, maplen),
-+				       PCI_DMA_FROMDEVICE);
- 	skb = new_skb;
- 
- 	/* Frame error, so drop the packet. */
-diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
-index 738449992876..01f6d5bbd420 100644
---- a/drivers/net/ethernet/renesas/sh_eth.c
-+++ b/drivers/net/ethernet/renesas/sh_eth.c
-@@ -1136,11 +1136,8 @@ static void sh_eth_ring_format(struct net_device *ndev)
- 			break;
- 		sh_eth_set_receive_align(skb);
- 
--		/* RX descriptor */
--		rxdesc = &mdp->rx_ring[i];
- 		/* The size of the buffer is a multiple of 32 bytes. */
- 		buf_len = ALIGN(mdp->rx_buf_sz, 32);
--		rxdesc->len = cpu_to_le32(buf_len << 16);
- 		dma_addr = dma_map_single(&ndev->dev, skb->data, buf_len,
- 					  DMA_FROM_DEVICE);
- 		if (dma_mapping_error(&ndev->dev, dma_addr)) {
-@@ -1148,6 +1145,10 @@ static void sh_eth_ring_format(struct net_device *ndev)
- 			break;
- 		}
- 		mdp->rx_skbuff[i] = skb;
-+
-+		/* RX descriptor */
-+		rxdesc = &mdp->rx_ring[i];
-+		rxdesc->len = cpu_to_le32(buf_len << 16);
- 		rxdesc->addr = cpu_to_le32(dma_addr);
- 		rxdesc->status = cpu_to_le32(RD_RACT | RD_RFP);
- 
-@@ -1163,7 +1164,8 @@ static void sh_eth_ring_format(struct net_device *ndev)
- 	mdp->dirty_rx = (u32) (i - mdp->num_rx_ring);
- 
- 	/* Mark the last entry as wrapping the ring. */
--	rxdesc->status |= cpu_to_le32(RD_RDLE);
-+	if (rxdesc)
-+		rxdesc->status |= cpu_to_le32(RD_RDLE);
- 
- 	memset(mdp->tx_ring, 0, tx_ringsize);
- 
-diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c
-index 166a7fc87e2f..f39e7198e818 100644
---- a/drivers/net/ethernet/rocker/rocker.c
-+++ b/drivers/net/ethernet/rocker/rocker.c
-@@ -239,6 +239,7 @@ struct rocker {
- 	struct {
- 		u64 id;
- 	} hw;
-+	unsigned long ageing_time;
- 	spinlock_t cmd_ring_lock;		/* for cmd ring accesses */
- 	struct rocker_dma_ring_info cmd_ring;
- 	struct rocker_dma_ring_info event_ring;
-@@ -3704,7 +3705,7 @@ static void rocker_fdb_cleanup(unsigned long data)
- 	struct rocker_port *rocker_port;
- 	struct rocker_fdb_tbl_entry *entry;
- 	struct hlist_node *tmp;
--	unsigned long next_timer = jiffies + BR_MIN_AGEING_TIME;
-+	unsigned long next_timer = jiffies + rocker->ageing_time;
- 	unsigned long expires;
- 	unsigned long lock_flags;
- 	int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_REMOVE |
-@@ -4367,8 +4368,12 @@ static int rocker_port_bridge_ageing_time(struct rocker_port *rocker_port,
- 					  struct switchdev_trans *trans,
- 					  u32 ageing_time)
- {
-+	struct rocker *rocker = rocker_port->rocker;
-+
- 	if (!switchdev_trans_ph_prepare(trans)) {
- 		rocker_port->ageing_time = clock_t_to_jiffies(ageing_time);
-+		if (rocker_port->ageing_time < rocker->ageing_time)
-+			rocker->ageing_time = rocker_port->ageing_time;
- 		mod_timer(&rocker_port->rocker->fdb_cleanup_timer, jiffies);
- 	}
- 
-@@ -5206,10 +5211,13 @@ static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id)
- 		goto err_init_tbls;
- 	}
- 
-+	rocker->ageing_time = BR_DEFAULT_AGEING_TIME;
- 	setup_timer(&rocker->fdb_cleanup_timer, rocker_fdb_cleanup,
- 		    (unsigned long) rocker);
- 	mod_timer(&rocker->fdb_cleanup_timer, jiffies);
- 
-+	rocker->ageing_time = BR_DEFAULT_AGEING_TIME;
-+
- 	err = rocker_probe_ports(rocker);
- 	if (err) {
- 		dev_err(&pdev->dev, "failed to probe ports\n");
-diff --git a/drivers/net/irda/irtty-sir.c b/drivers/net/irda/irtty-sir.c
-index 696852eb23c3..7a3f990c1935 100644
---- a/drivers/net/irda/irtty-sir.c
-+++ b/drivers/net/irda/irtty-sir.c
-@@ -430,16 +430,6 @@ static int irtty_open(struct tty_struct *tty)
- 
- 	/* Module stuff handled via irda_ldisc.owner - Jean II */
- 
--	/* First make sure we're not already connected. */
--	if (tty->disc_data != NULL) {
--		priv = tty->disc_data;
--		if (priv && priv->magic == IRTTY_MAGIC) {
--			ret = -EEXIST;
--			goto out;
--		}
--		tty->disc_data = NULL;		/* ### */
--	}
--
- 	/* stop the underlying  driver */
- 	irtty_stop_receiver(tty, TRUE);
- 	if (tty->ops->stop)
-diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
-index d636d051fac8..95394edd1ed5 100644
---- a/drivers/net/macvtap.c
-+++ b/drivers/net/macvtap.c
-@@ -760,6 +760,8 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
- 			macvtap16_to_cpu(q, vnet_hdr.hdr_len) : GOODCOPY_LEN;
- 		if (copylen > good_linear)
- 			copylen = good_linear;
-+		else if (copylen < ETH_HLEN)
-+			copylen = ETH_HLEN;
- 		linear = copylen;
- 		i = *from;
- 		iov_iter_advance(&i, copylen);
-@@ -769,10 +771,11 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
- 
- 	if (!zerocopy) {
- 		copylen = len;
--		if (macvtap16_to_cpu(q, vnet_hdr.hdr_len) > good_linear)
-+		linear = macvtap16_to_cpu(q, vnet_hdr.hdr_len);
-+		if (linear > good_linear)
- 			linear = good_linear;
--		else
--			linear = macvtap16_to_cpu(q, vnet_hdr.hdr_len);
-+		else if (linear < ETH_HLEN)
-+			linear = ETH_HLEN;
- 	}
- 
- 	skb = macvtap_alloc_skb(&q->sk, MACVTAP_RESERVE, copylen,
-diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
-index d61da9ece3ba..aafe237b25ac 100644
---- a/drivers/net/ppp/ppp_generic.c
-+++ b/drivers/net/ppp/ppp_generic.c
-@@ -575,7 +575,7 @@ static int get_filter(void __user *arg, struct sock_filter **p)
- 
- static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
- {
--	struct ppp_file *pf = file->private_data;
-+	struct ppp_file *pf;
- 	struct ppp *ppp;
- 	int err = -EFAULT, val, val2, i;
- 	struct ppp_idle idle;
-@@ -585,9 +585,14 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
- 	void __user *argp = (void __user *)arg;
- 	int __user *p = argp;
- 
--	if (!pf)
--		return ppp_unattached_ioctl(current->nsproxy->net_ns,
--					pf, file, cmd, arg);
-+	mutex_lock(&ppp_mutex);
-+
-+	pf = file->private_data;
-+	if (!pf) {
-+		err = ppp_unattached_ioctl(current->nsproxy->net_ns,
-+					   pf, file, cmd, arg);
-+		goto out;
-+	}
- 
- 	if (cmd == PPPIOCDETACH) {
- 		/*
-@@ -602,7 +607,6 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
- 		 * this fd and reopening /dev/ppp.
- 		 */
- 		err = -EINVAL;
--		mutex_lock(&ppp_mutex);
- 		if (pf->kind == INTERFACE) {
- 			ppp = PF_TO_PPP(pf);
- 			rtnl_lock();
-@@ -616,15 +620,13 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
- 		} else
- 			pr_warn("PPPIOCDETACH file->f_count=%ld\n",
- 				atomic_long_read(&file->f_count));
--		mutex_unlock(&ppp_mutex);
--		return err;
-+		goto out;
- 	}
- 
- 	if (pf->kind == CHANNEL) {
- 		struct channel *pch;
- 		struct ppp_channel *chan;
- 
--		mutex_lock(&ppp_mutex);
- 		pch = PF_TO_CHANNEL(pf);
- 
- 		switch (cmd) {
-@@ -646,17 +648,16 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
- 				err = chan->ops->ioctl(chan, cmd, arg);
- 			up_read(&pch->chan_sem);
- 		}
--		mutex_unlock(&ppp_mutex);
--		return err;
-+		goto out;
- 	}
- 
- 	if (pf->kind != INTERFACE) {
- 		/* can't happen */
- 		pr_err("PPP: not interface or channel??\n");
--		return -EINVAL;
-+		err = -EINVAL;
-+		goto out;
- 	}
- 
--	mutex_lock(&ppp_mutex);
- 	ppp = PF_TO_PPP(pf);
- 	switch (cmd) {
- 	case PPPIOCSMRU:
-@@ -831,7 +832,10 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
- 	default:
- 		err = -ENOTTY;
- 	}
-+
-+out:
- 	mutex_unlock(&ppp_mutex);
-+
- 	return err;
- }
- 
-@@ -844,7 +848,6 @@ static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf,
- 	struct ppp_net *pn;
- 	int __user *p = (int __user *)arg;
- 
--	mutex_lock(&ppp_mutex);
- 	switch (cmd) {
- 	case PPPIOCNEWUNIT:
- 		/* Create a new ppp unit */
-@@ -894,7 +897,7 @@ static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf,
- 	default:
- 		err = -ENOTTY;
- 	}
--	mutex_unlock(&ppp_mutex);
-+
- 	return err;
- }
- 
-@@ -2304,7 +2307,7 @@ int ppp_register_net_channel(struct net *net, struct ppp_channel *chan)
- 
- 	pch->ppp = NULL;
- 	pch->chan = chan;
--	pch->chan_net = net;
-+	pch->chan_net = get_net(net);
- 	chan->ppp = pch;
- 	init_ppp_file(&pch->file, CHANNEL);
- 	pch->file.hdrlen = chan->hdrlen;
-@@ -2401,6 +2404,8 @@ ppp_unregister_channel(struct ppp_channel *chan)
- 	spin_lock_bh(&pn->all_channels_lock);
- 	list_del(&pch->list);
- 	spin_unlock_bh(&pn->all_channels_lock);
-+	put_net(pch->chan_net);
-+	pch->chan_net = NULL;
- 
- 	pch->file.dead = 1;
- 	wake_up_interruptible(&pch->file.rwait);
-diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c
-index 01f08a7751f7..e7034c55e796 100644
---- a/drivers/net/rionet.c
-+++ b/drivers/net/rionet.c
-@@ -280,7 +280,7 @@ static void rionet_outb_msg_event(struct rio_mport *mport, void *dev_id, int mbo
- 	struct net_device *ndev = dev_id;
- 	struct rionet_private *rnet = netdev_priv(ndev);
- 
--	spin_lock(&rnet->lock);
-+	spin_lock(&rnet->tx_lock);
- 
- 	if (netif_msg_intr(rnet))
- 		printk(KERN_INFO
-@@ -299,7 +299,7 @@ static void rionet_outb_msg_event(struct rio_mport *mport, void *dev_id, int mbo
- 	if (rnet->tx_cnt < RIONET_TX_RING_SIZE)
- 		netif_wake_queue(ndev);
- 
--	spin_unlock(&rnet->lock);
-+	spin_unlock(&rnet->tx_lock);
- }
- 
- static int rionet_open(struct net_device *ndev)
-diff --git a/drivers/net/tun.c b/drivers/net/tun.c
-index 88bb8cc3555b..81ecc2ed8137 100644
---- a/drivers/net/tun.c
-+++ b/drivers/net/tun.c
-@@ -621,7 +621,8 @@ static int tun_attach(struct tun_struct *tun, struct file *file, bool skip_filte
- 
- 	/* Re-attach the filter to persist device */
- 	if (!skip_filter && (tun->filter_attached == true)) {
--		err = sk_attach_filter(&tun->fprog, tfile->socket.sk);
-+		err = __sk_attach_filter(&tun->fprog, tfile->socket.sk,
-+					 lockdep_rtnl_is_held());
- 		if (!err)
- 			goto out;
- 	}
-@@ -1000,7 +1001,6 @@ static void tun_net_init(struct net_device *dev)
- 		/* Zero header length */
- 		dev->type = ARPHRD_NONE;
- 		dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
--		dev->tx_queue_len = TUN_READQ_SIZE;  /* We prefer our own queue length */
- 		break;
- 
- 	case IFF_TAP:
-@@ -1012,7 +1012,6 @@ static void tun_net_init(struct net_device *dev)
- 
- 		eth_hw_addr_random(dev);
- 
--		dev->tx_queue_len = TUN_READQ_SIZE;  /* We prefer our own queue length */
- 		break;
- 	}
- }
-@@ -1466,6 +1465,8 @@ static void tun_setup(struct net_device *dev)
- 
- 	dev->ethtool_ops = &tun_ethtool_ops;
- 	dev->destructor = tun_free_netdev;
-+	/* We prefer our own queue length */
-+	dev->tx_queue_len = TUN_READQ_SIZE;
- }
- 
- /* Trivial set of netlink ops to allow deleting tun or tap
-@@ -1807,7 +1808,7 @@ static void tun_detach_filter(struct tun_struct *tun, int n)
- 
- 	for (i = 0; i < n; i++) {
- 		tfile = rtnl_dereference(tun->tfiles[i]);
--		sk_detach_filter(tfile->socket.sk);
-+		__sk_detach_filter(tfile->socket.sk, lockdep_rtnl_is_held());
- 	}
- 
- 	tun->filter_attached = false;
-@@ -1820,7 +1821,8 @@ static int tun_attach_filter(struct tun_struct *tun)
- 
- 	for (i = 0; i < tun->numqueues; i++) {
- 		tfile = rtnl_dereference(tun->tfiles[i]);
--		ret = sk_attach_filter(&tun->fprog, tfile->socket.sk);
-+		ret = __sk_attach_filter(&tun->fprog, tfile->socket.sk,
-+					 lockdep_rtnl_is_held());
- 		if (ret) {
- 			tun_detach_filter(tun, i);
- 			return ret;
-diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c
-index 0c5c22b84da8..7de5ab589e4e 100644
---- a/drivers/net/usb/asix_common.c
-+++ b/drivers/net/usb/asix_common.c
-@@ -66,7 +66,7 @@ int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb,
- 	 * buffer.
- 	 */
- 	if (rx->remaining && (rx->remaining + sizeof(u32) <= skb->len)) {
--		offset = ((rx->remaining + 1) & 0xfffe) + sizeof(u32);
-+		offset = ((rx->remaining + 1) & 0xfffe);
- 		rx->header = get_unaligned_le32(skb->data + offset);
- 		offset = 0;
- 
-diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c
-index bdd83d95ec0a..96a5028621c8 100644
---- a/drivers/net/usb/cdc_mbim.c
-+++ b/drivers/net/usb/cdc_mbim.c
-@@ -617,8 +617,13 @@ static const struct usb_device_id mbim_devs[] = {
- 	{ USB_VENDOR_AND_INTERFACE_INFO(0x0bdb, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
- 	  .driver_info = (unsigned long)&cdc_mbim_info,
- 	},
--	/* Huawei E3372 fails unless NDP comes after the IP packets */
--	{ USB_DEVICE_AND_INTERFACE_INFO(0x12d1, 0x157d, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
-+
-+	/* Some Huawei devices, ME906s-158 (12d1:15c1) and E3372
-+	 * (12d1:157d), are known to fail unless the NDP is placed
-+	 * after the IP packets.  Applying the quirk to all Huawei
-+	 * devices is broader than necessary, but harmless.
-+	 */
-+	{ USB_VENDOR_AND_INTERFACE_INFO(0x12d1, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
- 	  .driver_info = (unsigned long)&cdc_mbim_info_ndp_to_end,
- 	},
- 	/* default entry */
-diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
-index a3a4ccf7cf52..1232a8c608b4 100644
---- a/drivers/net/usb/qmi_wwan.c
-+++ b/drivers/net/usb/qmi_wwan.c
-@@ -844,6 +844,7 @@ static const struct usb_device_id products[] = {
- 	{QMI_FIXED_INTF(0x19d2, 0x1426, 2)},	/* ZTE MF91 */
- 	{QMI_FIXED_INTF(0x19d2, 0x1428, 2)},	/* Telewell TW-LTE 4G v2 */
- 	{QMI_FIXED_INTF(0x19d2, 0x2002, 4)},	/* ZTE (Vodafone) K3765-Z */
-+	{QMI_FIXED_INTF(0x2001, 0x7e19, 4)},	/* D-Link DWM-221 B1 */
- 	{QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)},    /* Sierra Wireless MC7700 */
- 	{QMI_FIXED_INTF(0x114f, 0x68a2, 8)},    /* Sierra Wireless MC7750 */
- 	{QMI_FIXED_INTF(0x1199, 0x68a2, 8)},	/* Sierra Wireless MC7710 in QMI mode */
-diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c
-index 44541dbc5c28..69b994f3b8c5 100644
---- a/drivers/net/wan/farsync.c
-+++ b/drivers/net/wan/farsync.c
-@@ -2516,7 +2516,7 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent)
-                 dev->mem_start   = card->phys_mem
-                                  + BUF_OFFSET ( txBuffer[i][0][0]);
-                 dev->mem_end     = card->phys_mem
--                                 + BUF_OFFSET ( txBuffer[i][NUM_TX_BUFFER][0]);
-+                                 + BUF_OFFSET ( txBuffer[i][NUM_TX_BUFFER - 1][LEN_RX_BUFFER - 1]);
-                 dev->base_addr   = card->pci_conf;
-                 dev->irq         = card->irq;
- 
-diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
-index 91afa3ae414c..a192d451dab2 100644
---- a/drivers/net/wireless/ath/ath10k/htt_rx.c
-+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
-@@ -2143,11 +2143,7 @@ EXPORT_SYMBOL(ath10k_htt_t2h_msg_handler);
- void ath10k_htt_rx_pktlog_completion_handler(struct ath10k *ar,
- 					     struct sk_buff *skb)
- {
--	struct ath10k_pktlog_10_4_hdr *hdr =
--		(struct ath10k_pktlog_10_4_hdr *)skb->data;
--
--	trace_ath10k_htt_pktlog(ar, hdr->payload,
--				sizeof(*hdr) + __le16_to_cpu(hdr->size));
-+	trace_ath10k_htt_pktlog(ar, skb->data, skb->len);
- 	dev_kfree_skb_any(skb);
- }
- EXPORT_SYMBOL(ath10k_htt_rx_pktlog_completion_handler);
-diff --git a/drivers/net/wireless/ath/ath9k/ar5008_phy.c b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
-index 8f8793004b9f..1b271b99c49e 100644
---- a/drivers/net/wireless/ath/ath9k/ar5008_phy.c
-+++ b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
-@@ -274,6 +274,9 @@ void ar5008_hw_cmn_spur_mitigate(struct ath_hw *ah,
- 	};
- 	static const int inc[4] = { 0, 100, 0, 0 };
- 
-+	memset(&mask_m, 0, sizeof(int8_t) * 123);
-+	memset(&mask_p, 0, sizeof(int8_t) * 123);
-+
- 	cur_bin = -6000;
- 	upper = bin + 100;
- 	lower = bin - 100;
-@@ -424,14 +427,9 @@ static void ar5008_hw_spur_mitigate(struct ath_hw *ah,
- 	int tmp, new;
- 	int i;
- 
--	int8_t mask_m[123];
--	int8_t mask_p[123];
- 	int cur_bb_spur;
- 	bool is2GHz = IS_CHAN_2GHZ(chan);
- 
--	memset(&mask_m, 0, sizeof(int8_t) * 123);
--	memset(&mask_p, 0, sizeof(int8_t) * 123);
--
- 	for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
- 		cur_bb_spur = ah->eep_ops->get_spur_channel(ah, i, is2GHz);
- 		if (AR_NO_SPUR == cur_bb_spur)
-diff --git a/drivers/net/wireless/ath/ath9k/ar9002_phy.c b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
-index db6624527d99..53d7445a5d12 100644
---- a/drivers/net/wireless/ath/ath9k/ar9002_phy.c
-+++ b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
-@@ -178,14 +178,9 @@ static void ar9002_hw_spur_mitigate(struct ath_hw *ah,
- 	int i;
- 	struct chan_centers centers;
- 
--	int8_t mask_m[123];
--	int8_t mask_p[123];
- 	int cur_bb_spur;
- 	bool is2GHz = IS_CHAN_2GHZ(chan);
- 
--	memset(&mask_m, 0, sizeof(int8_t) * 123);
--	memset(&mask_p, 0, sizeof(int8_t) * 123);
--
- 	ath9k_hw_get_channel_centers(ah, chan, &centers);
- 	freq = centers.synth_center;
- 
-diff --git a/drivers/net/wireless/ath/ath9k/eeprom.c b/drivers/net/wireless/ath/ath9k/eeprom.c
-index 73fb4232f9f2..a794157a147d 100644
---- a/drivers/net/wireless/ath/ath9k/eeprom.c
-+++ b/drivers/net/wireless/ath/ath9k/eeprom.c
-@@ -477,10 +477,9 @@ void ath9k_hw_get_gain_boundaries_pdadcs(struct ath_hw *ah,
- 
- 	if (match) {
- 		if (AR_SREV_9287(ah)) {
--			/* FIXME: array overrun? */
- 			for (i = 0; i < numXpdGains; i++) {
- 				minPwrT4[i] = data_9287[idxL].pwrPdg[i][0];
--				maxPwrT4[i] = data_9287[idxL].pwrPdg[i][4];
-+				maxPwrT4[i] = data_9287[idxL].pwrPdg[i][intercepts - 1];
- 				ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i],
- 						data_9287[idxL].pwrPdg[i],
- 						data_9287[idxL].vpdPdg[i],
-@@ -490,7 +489,7 @@ void ath9k_hw_get_gain_boundaries_pdadcs(struct ath_hw *ah,
- 		} else if (eeprom_4k) {
- 			for (i = 0; i < numXpdGains; i++) {
- 				minPwrT4[i] = data_4k[idxL].pwrPdg[i][0];
--				maxPwrT4[i] = data_4k[idxL].pwrPdg[i][4];
-+				maxPwrT4[i] = data_4k[idxL].pwrPdg[i][intercepts - 1];
- 				ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i],
- 						data_4k[idxL].pwrPdg[i],
- 						data_4k[idxL].vpdPdg[i],
-@@ -500,7 +499,7 @@ void ath9k_hw_get_gain_boundaries_pdadcs(struct ath_hw *ah,
- 		} else {
- 			for (i = 0; i < numXpdGains; i++) {
- 				minPwrT4[i] = data_def[idxL].pwrPdg[i][0];
--				maxPwrT4[i] = data_def[idxL].pwrPdg[i][4];
-+				maxPwrT4[i] = data_def[idxL].pwrPdg[i][intercepts - 1];
- 				ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i],
- 						data_def[idxL].pwrPdg[i],
- 						data_def[idxL].vpdPdg[i],
-diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c
-index 2ca783fa50cf..7e269f9aa607 100644
---- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c
-+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c
-@@ -32,7 +32,7 @@
- #define BRCMF_FLOWRING_LOW		(BRCMF_FLOWRING_HIGH - 256)
- #define BRCMF_FLOWRING_INVALID_IFIDX	0xff
- 
--#define BRCMF_FLOWRING_HASH_AP(da, fifo, ifidx) (da[5] + fifo + ifidx * 16)
-+#define BRCMF_FLOWRING_HASH_AP(da, fifo, ifidx) (da[5] * 2 + fifo + ifidx * 16)
- #define BRCMF_FLOWRING_HASH_STA(fifo, ifidx) (fifo + ifidx * 16)
- 
- static const u8 brcmf_flowring_prio2fifo[] = {
-@@ -68,7 +68,7 @@ u32 brcmf_flowring_lookup(struct brcmf_flowring *flow, u8 da[ETH_ALEN],
- 			  u8 prio, u8 ifidx)
- {
- 	struct brcmf_flowring_hash *hash;
--	u8 hash_idx;
-+	u16 hash_idx;
- 	u32 i;
- 	bool found;
- 	bool sta;
-@@ -88,6 +88,7 @@ u32 brcmf_flowring_lookup(struct brcmf_flowring *flow, u8 da[ETH_ALEN],
- 	}
- 	hash_idx =  sta ? BRCMF_FLOWRING_HASH_STA(fifo, ifidx) :
- 			  BRCMF_FLOWRING_HASH_AP(mac, fifo, ifidx);
-+	hash_idx &= (BRCMF_FLOWRING_HASHSIZE - 1);
- 	found = false;
- 	hash = flow->hash;
- 	for (i = 0; i < BRCMF_FLOWRING_HASHSIZE; i++) {
-@@ -98,6 +99,7 @@ u32 brcmf_flowring_lookup(struct brcmf_flowring *flow, u8 da[ETH_ALEN],
- 			break;
- 		}
- 		hash_idx++;
-+		hash_idx &= (BRCMF_FLOWRING_HASHSIZE - 1);
- 	}
- 	if (found)
- 		return hash[hash_idx].flowid;
-@@ -111,7 +113,7 @@ u32 brcmf_flowring_create(struct brcmf_flowring *flow, u8 da[ETH_ALEN],
- {
- 	struct brcmf_flowring_ring *ring;
- 	struct brcmf_flowring_hash *hash;
--	u8 hash_idx;
-+	u16 hash_idx;
- 	u32 i;
- 	bool found;
- 	u8 fifo;
-@@ -131,6 +133,7 @@ u32 brcmf_flowring_create(struct brcmf_flowring *flow, u8 da[ETH_ALEN],
- 	}
- 	hash_idx =  sta ? BRCMF_FLOWRING_HASH_STA(fifo, ifidx) :
- 			  BRCMF_FLOWRING_HASH_AP(mac, fifo, ifidx);
-+	hash_idx &= (BRCMF_FLOWRING_HASHSIZE - 1);
- 	found = false;
- 	hash = flow->hash;
- 	for (i = 0; i < BRCMF_FLOWRING_HASHSIZE; i++) {
-@@ -140,6 +143,7 @@ u32 brcmf_flowring_create(struct brcmf_flowring *flow, u8 da[ETH_ALEN],
- 			break;
- 		}
- 		hash_idx++;
-+		hash_idx &= (BRCMF_FLOWRING_HASHSIZE - 1);
- 	}
- 	if (found) {
- 		for (i = 0; i < flow->nrofrings; i++) {
-@@ -169,7 +173,7 @@ u32 brcmf_flowring_create(struct brcmf_flowring *flow, u8 da[ETH_ALEN],
- }
- 
- 
--u8 brcmf_flowring_tid(struct brcmf_flowring *flow, u8 flowid)
-+u8 brcmf_flowring_tid(struct brcmf_flowring *flow, u16 flowid)
- {
- 	struct brcmf_flowring_ring *ring;
- 
-@@ -179,7 +183,7 @@ u8 brcmf_flowring_tid(struct brcmf_flowring *flow, u8 flowid)
- }
- 
- 
--static void brcmf_flowring_block(struct brcmf_flowring *flow, u8 flowid,
-+static void brcmf_flowring_block(struct brcmf_flowring *flow, u16 flowid,
- 				 bool blocked)
- {
- 	struct brcmf_flowring_ring *ring;
-@@ -228,10 +232,10 @@ static void brcmf_flowring_block(struct brcmf_flowring *flow, u8 flowid,
- }
- 
- 
--void brcmf_flowring_delete(struct brcmf_flowring *flow, u8 flowid)
-+void brcmf_flowring_delete(struct brcmf_flowring *flow, u16 flowid)
- {
- 	struct brcmf_flowring_ring *ring;
--	u8 hash_idx;
-+	u16 hash_idx;
- 	struct sk_buff *skb;
- 
- 	ring = flow->rings[flowid];
-@@ -253,7 +257,7 @@ void brcmf_flowring_delete(struct brcmf_flowring *flow, u8 flowid)
- }
- 
- 
--u32 brcmf_flowring_enqueue(struct brcmf_flowring *flow, u8 flowid,
-+u32 brcmf_flowring_enqueue(struct brcmf_flowring *flow, u16 flowid,
- 			   struct sk_buff *skb)
- {
- 	struct brcmf_flowring_ring *ring;
-@@ -279,7 +283,7 @@ u32 brcmf_flowring_enqueue(struct brcmf_flowring *flow, u8 flowid,
- }
- 
- 
--struct sk_buff *brcmf_flowring_dequeue(struct brcmf_flowring *flow, u8 flowid)
-+struct sk_buff *brcmf_flowring_dequeue(struct brcmf_flowring *flow, u16 flowid)
- {
- 	struct brcmf_flowring_ring *ring;
- 	struct sk_buff *skb;
-@@ -300,7 +304,7 @@ struct sk_buff *brcmf_flowring_dequeue(struct brcmf_flowring *flow, u8 flowid)
- }
- 
- 
--void brcmf_flowring_reinsert(struct brcmf_flowring *flow, u8 flowid,
-+void brcmf_flowring_reinsert(struct brcmf_flowring *flow, u16 flowid,
- 			     struct sk_buff *skb)
- {
- 	struct brcmf_flowring_ring *ring;
-@@ -311,7 +315,7 @@ void brcmf_flowring_reinsert(struct brcmf_flowring *flow, u8 flowid,
- }
- 
- 
--u32 brcmf_flowring_qlen(struct brcmf_flowring *flow, u8 flowid)
-+u32 brcmf_flowring_qlen(struct brcmf_flowring *flow, u16 flowid)
- {
- 	struct brcmf_flowring_ring *ring;
- 
-@@ -326,7 +330,7 @@ u32 brcmf_flowring_qlen(struct brcmf_flowring *flow, u8 flowid)
- }
- 
- 
--void brcmf_flowring_open(struct brcmf_flowring *flow, u8 flowid)
-+void brcmf_flowring_open(struct brcmf_flowring *flow, u16 flowid)
- {
- 	struct brcmf_flowring_ring *ring;
- 
-@@ -340,10 +344,10 @@ void brcmf_flowring_open(struct brcmf_flowring *flow, u8 flowid)
- }
- 
- 
--u8 brcmf_flowring_ifidx_get(struct brcmf_flowring *flow, u8 flowid)
-+u8 brcmf_flowring_ifidx_get(struct brcmf_flowring *flow, u16 flowid)
- {
- 	struct brcmf_flowring_ring *ring;
--	u8 hash_idx;
-+	u16 hash_idx;
- 
- 	ring = flow->rings[flowid];
- 	hash_idx = ring->hash_id;
-@@ -384,7 +388,7 @@ void brcmf_flowring_detach(struct brcmf_flowring *flow)
- 	struct brcmf_pub *drvr = bus_if->drvr;
- 	struct brcmf_flowring_tdls_entry *search;
- 	struct brcmf_flowring_tdls_entry *remove;
--	u8 flowid;
-+	u16 flowid;
- 
- 	for (flowid = 0; flowid < flow->nrofrings; flowid++) {
- 		if (flow->rings[flowid])
-@@ -408,7 +412,7 @@ void brcmf_flowring_configure_addr_mode(struct brcmf_flowring *flow, int ifidx,
- 	struct brcmf_bus *bus_if = dev_get_drvdata(flow->dev);
- 	struct brcmf_pub *drvr = bus_if->drvr;
- 	u32 i;
--	u8 flowid;
-+	u16 flowid;
- 
- 	if (flow->addr_mode[ifidx] != addr_mode) {
- 		for (i = 0; i < ARRAY_SIZE(flow->hash); i++) {
-@@ -434,7 +438,7 @@ void brcmf_flowring_delete_peer(struct brcmf_flowring *flow, int ifidx,
- 	struct brcmf_flowring_tdls_entry *prev;
- 	struct brcmf_flowring_tdls_entry *search;
- 	u32 i;
--	u8 flowid;
-+	u16 flowid;
- 	bool sta;
- 
- 	sta = (flow->addr_mode[ifidx] == ADDR_INDIRECT);
-diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.h
-index 95fd1c9675d1..068e68d94999 100644
---- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.h
-+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.h
-@@ -16,7 +16,7 @@
- #define BRCMFMAC_FLOWRING_H
- 
- 
--#define BRCMF_FLOWRING_HASHSIZE		256
-+#define BRCMF_FLOWRING_HASHSIZE		512		/* has to be 2^x */
- #define BRCMF_FLOWRING_INVALID_ID	0xFFFFFFFF
- 
- 
-@@ -24,7 +24,7 @@ struct brcmf_flowring_hash {
- 	u8 mac[ETH_ALEN];
- 	u8 fifo;
- 	u8 ifidx;
--	u8 flowid;
-+	u16 flowid;
- };
- 
- enum ring_status {
-@@ -61,16 +61,16 @@ u32 brcmf_flowring_lookup(struct brcmf_flowring *flow, u8 da[ETH_ALEN],
- 			  u8 prio, u8 ifidx);
- u32 brcmf_flowring_create(struct brcmf_flowring *flow, u8 da[ETH_ALEN],
- 			  u8 prio, u8 ifidx);
--void brcmf_flowring_delete(struct brcmf_flowring *flow, u8 flowid);
--void brcmf_flowring_open(struct brcmf_flowring *flow, u8 flowid);
--u8 brcmf_flowring_tid(struct brcmf_flowring *flow, u8 flowid);
--u32 brcmf_flowring_enqueue(struct brcmf_flowring *flow, u8 flowid,
-+void brcmf_flowring_delete(struct brcmf_flowring *flow, u16 flowid);
-+void brcmf_flowring_open(struct brcmf_flowring *flow, u16 flowid);
-+u8 brcmf_flowring_tid(struct brcmf_flowring *flow, u16 flowid);
-+u32 brcmf_flowring_enqueue(struct brcmf_flowring *flow, u16 flowid,
- 			   struct sk_buff *skb);
--struct sk_buff *brcmf_flowring_dequeue(struct brcmf_flowring *flow, u8 flowid);
--void brcmf_flowring_reinsert(struct brcmf_flowring *flow, u8 flowid,
-+struct sk_buff *brcmf_flowring_dequeue(struct brcmf_flowring *flow, u16 flowid);
-+void brcmf_flowring_reinsert(struct brcmf_flowring *flow, u16 flowid,
- 			     struct sk_buff *skb);
--u32 brcmf_flowring_qlen(struct brcmf_flowring *flow, u8 flowid);
--u8 brcmf_flowring_ifidx_get(struct brcmf_flowring *flow, u8 flowid);
-+u32 brcmf_flowring_qlen(struct brcmf_flowring *flow, u16 flowid);
-+u8 brcmf_flowring_ifidx_get(struct brcmf_flowring *flow, u16 flowid);
- struct brcmf_flowring *brcmf_flowring_attach(struct device *dev, u16 nrofrings);
- void brcmf_flowring_detach(struct brcmf_flowring *flow);
- void brcmf_flowring_configure_addr_mode(struct brcmf_flowring *flow, int ifidx,
-diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
-index c2bdb91746cf..922966734a7f 100644
---- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
-+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
-@@ -677,7 +677,7 @@ static u32 brcmf_msgbuf_flowring_create(struct brcmf_msgbuf *msgbuf, int ifidx,
- }
- 
- 
--static void brcmf_msgbuf_txflow(struct brcmf_msgbuf *msgbuf, u8 flowid)
-+static void brcmf_msgbuf_txflow(struct brcmf_msgbuf *msgbuf, u16 flowid)
- {
- 	struct brcmf_flowring *flow = msgbuf->flow;
- 	struct brcmf_commonring *commonring;
-@@ -1310,7 +1310,7 @@ int brcmf_proto_msgbuf_rx_trigger(struct device *dev)
- }
- 
- 
--void brcmf_msgbuf_delete_flowring(struct brcmf_pub *drvr, u8 flowid)
-+void brcmf_msgbuf_delete_flowring(struct brcmf_pub *drvr, u16 flowid)
- {
- 	struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
- 	struct msgbuf_tx_flowring_delete_req *delete;
-@@ -1415,6 +1415,13 @@ int brcmf_proto_msgbuf_attach(struct brcmf_pub *drvr)
- 	u32 count;
- 
- 	if_msgbuf = drvr->bus_if->msgbuf;
-+
-+	if (if_msgbuf->nrof_flowrings >= BRCMF_FLOWRING_HASHSIZE) {
-+		brcmf_err("driver not configured for this many flowrings %d\n",
-+			  if_msgbuf->nrof_flowrings);
-+		if_msgbuf->nrof_flowrings = BRCMF_FLOWRING_HASHSIZE - 1;
-+	}
-+
- 	msgbuf = kzalloc(sizeof(*msgbuf), GFP_KERNEL);
- 	if (!msgbuf)
- 		goto fail;
-diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.h
-index 3d513e407e3d..ee6906a3c3f6 100644
---- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.h
-+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.h
-@@ -33,7 +33,7 @@
- 
- 
- int brcmf_proto_msgbuf_rx_trigger(struct device *dev);
--void brcmf_msgbuf_delete_flowring(struct brcmf_pub *drvr, u8 flowid);
-+void brcmf_msgbuf_delete_flowring(struct brcmf_pub *drvr, u16 flowid);
- int brcmf_proto_msgbuf_attach(struct brcmf_pub *drvr);
- void brcmf_proto_msgbuf_detach(struct brcmf_pub *drvr);
- #else
-diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
-index d70a1716f3e0..1486f33a743e 100644
---- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
-+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
-@@ -1143,6 +1143,8 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
- 	/* the fw is stopped, the aux sta is dead: clean up driver state */
- 	iwl_mvm_del_aux_sta(mvm);
- 
-+	iwl_free_fw_paging(mvm);
-+
- 	/*
- 	 * Clear IN_HW_RESTART flag when stopping the hw (as restart_complete()
- 	 * won't be called in this case).
-diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
-index e80be9a59520..89ea70deeb84 100644
---- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
-+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
-@@ -684,8 +684,6 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
- 	for (i = 0; i < NVM_MAX_NUM_SECTIONS; i++)
- 		kfree(mvm->nvm_sections[i].data);
- 
--	iwl_free_fw_paging(mvm);
--
- 	iwl_mvm_tof_clean(mvm);
- 
- 	ieee80211_free_hw(mvm->hw);
-diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
-index 5a854c609477..1198caac35c8 100644
---- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
-+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
-@@ -731,8 +731,8 @@ static int iwl_pcie_rsa_race_bug_wa(struct iwl_trans *trans)
- 	 */
- 	val = iwl_read_prph(trans, PREG_AUX_BUS_WPROT_0);
- 	if (val & (BIT(1) | BIT(17))) {
--		IWL_INFO(trans,
--			 "can't access the RSA semaphore it is write protected\n");
-+		IWL_DEBUG_INFO(trans,
-+			       "can't access the RSA semaphore it is write protected\n");
- 		return 0;
- 	}
- 
-diff --git a/drivers/net/wireless/marvell/mwifiex/sta_event.c b/drivers/net/wireless/marvell/mwifiex/sta_event.c
-index ff3ee9dfbbd5..23bae87d4d3d 100644
---- a/drivers/net/wireless/marvell/mwifiex/sta_event.c
-+++ b/drivers/net/wireless/marvell/mwifiex/sta_event.c
-@@ -607,11 +607,13 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
- 
- 	case EVENT_PS_AWAKE:
- 		mwifiex_dbg(adapter, EVENT, "info: EVENT: AWAKE\n");
--		if (!adapter->pps_uapsd_mode && priv->port_open &&
-+		if (!adapter->pps_uapsd_mode &&
-+		    (priv->port_open ||
-+		     (priv->bss_mode == NL80211_IFTYPE_ADHOC)) &&
- 		    priv->media_connected && adapter->sleep_period.period) {
--				adapter->pps_uapsd_mode = true;
--				mwifiex_dbg(adapter, EVENT,
--					    "event: PPS/UAPSD mode activated\n");
-+			adapter->pps_uapsd_mode = true;
-+			mwifiex_dbg(adapter, EVENT,
-+				    "event: PPS/UAPSD mode activated\n");
- 		}
- 		adapter->tx_lock_flag = false;
- 		if (adapter->pps_uapsd_mode && adapter->gen_null_pkt) {
-diff --git a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
-index 6a4fc5d183cf..d7db6f23e728 100644
---- a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
-+++ b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
-@@ -314,6 +314,7 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss,
- 			mwifiex_dbg(adapter, ERROR,
- 				    "Attempt to reconnect on csa closed chan(%d)\n",
- 				    bss_desc->channel);
-+			ret = -1;
- 			goto done;
- 		}
- 
-diff --git a/drivers/net/wireless/marvell/mwifiex/wmm.c b/drivers/net/wireless/marvell/mwifiex/wmm.c
-index acccd6734e3b..499e5a741c62 100644
---- a/drivers/net/wireless/marvell/mwifiex/wmm.c
-+++ b/drivers/net/wireless/marvell/mwifiex/wmm.c
-@@ -475,7 +475,8 @@ mwifiex_wmm_lists_empty(struct mwifiex_adapter *adapter)
- 		priv = adapter->priv[i];
- 		if (!priv)
- 			continue;
--		if (!priv->port_open)
-+		if (!priv->port_open &&
-+		    (priv->bss_mode != NL80211_IFTYPE_ADHOC))
- 			continue;
- 		if (adapter->if_ops.is_port_ready &&
- 		    !adapter->if_ops.is_port_ready(priv))
-@@ -1099,7 +1100,8 @@ mwifiex_wmm_get_highest_priolist_ptr(struct mwifiex_adapter *adapter,
- 
- 			priv_tmp = adapter->bss_prio_tbl[j].bss_prio_cur->priv;
- 
--			if (!priv_tmp->port_open ||
-+			if (((priv_tmp->bss_mode != NL80211_IFTYPE_ADHOC) &&
-+			     !priv_tmp->port_open) ||
- 			    (atomic_read(&priv_tmp->wmm.tx_pkts_queued) == 0))
- 				continue;
- 
-diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800usb.c b/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
-index bf9afbf46c1b..4b0bb6b4f6f1 100644
---- a/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
-+++ b/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
-@@ -1026,6 +1026,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
- 	{ USB_DEVICE(0x0411, 0x01a2) },
- 	{ USB_DEVICE(0x0411, 0x01ee) },
- 	{ USB_DEVICE(0x0411, 0x01a8) },
-+	{ USB_DEVICE(0x0411, 0x01fd) },
- 	/* Corega */
- 	{ USB_DEVICE(0x07aa, 0x002f) },
- 	{ USB_DEVICE(0x07aa, 0x003c) },
-diff --git a/drivers/net/wireless/realtek/rtlwifi/rc.c b/drivers/net/wireless/realtek/rtlwifi/rc.c
-index 28f7010e7108..1aca77719521 100644
---- a/drivers/net/wireless/realtek/rtlwifi/rc.c
-+++ b/drivers/net/wireless/realtek/rtlwifi/rc.c
-@@ -41,7 +41,7 @@ static u8 _rtl_rc_get_highest_rix(struct rtl_priv *rtlpriv,
- 	struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
- 	struct rtl_phy *rtlphy = &(rtlpriv->phy);
- 	struct rtl_sta_info *sta_entry = NULL;
--	u8 wireless_mode = 0;
-+	u16 wireless_mode = 0;
- 
- 	/*
- 	 *this rate is no use for true rate, firmware
-@@ -99,7 +99,7 @@ static void _rtl_rc_rate_set_series(struct rtl_priv *rtlpriv,
- {
- 	struct rtl_mac *mac = rtl_mac(rtlpriv);
- 	struct rtl_sta_info *sta_entry = NULL;
--	u8 wireless_mode = 0;
-+	u16 wireless_mode = 0;
- 	u8 sgi_20 = 0, sgi_40 = 0, sgi_80 = 0;
- 
- 	if (sta) {
-diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
-index bbb789f8990b..5e5719b26774 100644
---- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
-+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
-@@ -3855,7 +3855,7 @@ void rtl8821ae_update_channel_access_setting(struct ieee80211_hw *hw)
- {
- 	struct rtl_priv *rtlpriv = rtl_priv(hw);
- 	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
--	u8 wireless_mode = mac->mode;
-+	u16 wireless_mode = mac->mode;
- 	u8 sifs_timer, r2t_sifs;
- 
- 	rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SLOT_TIME,
-diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h
-index 4544752a2ba8..84397b190cc0 100644
---- a/drivers/net/wireless/realtek/rtlwifi/wifi.h
-+++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h
-@@ -1323,14 +1323,13 @@ struct rtl_tid_data {
- 
- struct rtl_sta_info {
- 	struct list_head list;
--	u8 ratr_index;
--	u8 wireless_mode;
--	u8 mimo_ps;
--	u8 mac_addr[ETH_ALEN];
- 	struct rtl_tid_data tids[MAX_TID_COUNT];
--
- 	/* just used for ap adhoc or mesh*/
- 	struct rssi_sta rssi_stat;
-+	u16 wireless_mode;
-+	u8 ratr_index;
-+	u8 mimo_ps;
-+	u8 mac_addr[ETH_ALEN];
- } __packed;
- 
- struct rtl_priv;
-diff --git a/drivers/net/wireless/ti/wlcore/event.c b/drivers/net/wireless/ti/wlcore/event.c
-index c96405498bf4..4b59f67724de 100644
---- a/drivers/net/wireless/ti/wlcore/event.c
-+++ b/drivers/net/wireless/ti/wlcore/event.c
-@@ -38,7 +38,7 @@
- 
- int wlcore_event_fw_logger(struct wl1271 *wl)
- {
--	u32 ret;
-+	int ret;
- 	struct fw_logger_information fw_log;
- 	u8  *buffer;
- 	u32 internal_fw_addrbase = WL18XX_DATA_RAM_BASE_ADDRESS;
-diff --git a/drivers/nfc/st21nfca/i2c.c b/drivers/nfc/st21nfca/i2c.c
-index 1f44a151d206..d5a099b022e4 100644
---- a/drivers/nfc/st21nfca/i2c.c
-+++ b/drivers/nfc/st21nfca/i2c.c
-@@ -524,8 +524,10 @@ static int st21nfca_hci_i2c_acpi_request_resources(struct i2c_client *client)
- 	/* Get EN GPIO from ACPI */
- 	gpiod_ena = devm_gpiod_get_index(dev, ST21NFCA_GPIO_NAME_EN, 1,
- 					 GPIOD_OUT_LOW);
--	if (!IS_ERR(gpiod_ena))
--		phy->gpio_ena = desc_to_gpio(gpiod_ena);
-+	if (!IS_ERR(gpiod_ena)) {
-+		nfc_err(dev, "Unable to get ENABLE GPIO\n");
-+		return -ENODEV;
-+	}
- 
- 	phy->gpio_ena = desc_to_gpio(gpiod_ena);
- 
-diff --git a/drivers/ntb/hw/amd/ntb_hw_amd.c b/drivers/ntb/hw/amd/ntb_hw_amd.c
-index 588803ad6847..6ccba0d862df 100644
---- a/drivers/ntb/hw/amd/ntb_hw_amd.c
-+++ b/drivers/ntb/hw/amd/ntb_hw_amd.c
-@@ -357,20 +357,6 @@ static int amd_ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits)
- 	return 0;
- }
- 
--static int amd_ntb_peer_db_addr(struct ntb_dev *ntb,
--				phys_addr_t *db_addr,
--				resource_size_t *db_size)
--{
--	struct amd_ntb_dev *ndev = ntb_ndev(ntb);
--
--	if (db_addr)
--		*db_addr = (phys_addr_t)(ndev->peer_mmio + AMD_DBREQ_OFFSET);
--	if (db_size)
--		*db_size = sizeof(u32);
--
--	return 0;
--}
--
- static int amd_ntb_peer_db_set(struct ntb_dev *ntb, u64 db_bits)
- {
- 	struct amd_ntb_dev *ndev = ntb_ndev(ntb);
-@@ -415,20 +401,6 @@ static int amd_ntb_spad_write(struct ntb_dev *ntb,
- 	return 0;
- }
- 
--static int amd_ntb_peer_spad_addr(struct ntb_dev *ntb, int idx,
--				  phys_addr_t *spad_addr)
--{
--	struct amd_ntb_dev *ndev = ntb_ndev(ntb);
--
--	if (idx < 0 || idx >= ndev->spad_count)
--		return -EINVAL;
--
--	if (spad_addr)
--		*spad_addr = (phys_addr_t)(ndev->self_mmio + AMD_SPAD_OFFSET +
--					   ndev->peer_spad + (idx << 2));
--	return 0;
--}
--
- static u32 amd_ntb_peer_spad_read(struct ntb_dev *ntb, int idx)
- {
- 	struct amd_ntb_dev *ndev = ntb_ndev(ntb);
-@@ -472,12 +444,10 @@ static const struct ntb_dev_ops amd_ntb_ops = {
- 	.db_clear		= amd_ntb_db_clear,
- 	.db_set_mask		= amd_ntb_db_set_mask,
- 	.db_clear_mask		= amd_ntb_db_clear_mask,
--	.peer_db_addr		= amd_ntb_peer_db_addr,
- 	.peer_db_set		= amd_ntb_peer_db_set,
- 	.spad_count		= amd_ntb_spad_count,
- 	.spad_read		= amd_ntb_spad_read,
- 	.spad_write		= amd_ntb_spad_write,
--	.peer_spad_addr		= amd_ntb_peer_spad_addr,
- 	.peer_spad_read		= amd_ntb_peer_spad_read,
- 	.peer_spad_write	= amd_ntb_peer_spad_write,
- };
-diff --git a/drivers/ntb/test/ntb_perf.c b/drivers/ntb/test/ntb_perf.c
-index c8a37ba4b4f9..6bdc1e7b7503 100644
---- a/drivers/ntb/test/ntb_perf.c
-+++ b/drivers/ntb/test/ntb_perf.c
-@@ -178,7 +178,7 @@ static void perf_copy_callback(void *data)
- 	atomic_dec(&pctx->dma_sync);
- }
- 
--static ssize_t perf_copy(struct pthr_ctx *pctx, char *dst,
-+static ssize_t perf_copy(struct pthr_ctx *pctx, char __iomem *dst,
- 			 char *src, size_t size)
- {
- 	struct perf_ctx *perf = pctx->perf;
-@@ -189,7 +189,8 @@ static ssize_t perf_copy(struct pthr_ctx *pctx, char *dst,
- 	dma_cookie_t cookie;
- 	size_t src_off, dst_off;
- 	struct perf_mw *mw = &perf->mw;
--	u64 vbase, dst_vaddr;
-+	void __iomem *vbase;
-+	void __iomem *dst_vaddr;
- 	dma_addr_t dst_phys;
- 	int retries = 0;
- 
-@@ -204,14 +205,14 @@ static ssize_t perf_copy(struct pthr_ctx *pctx, char *dst,
- 	}
- 
- 	device = chan->device;
--	src_off = (size_t)src & ~PAGE_MASK;
--	dst_off = (size_t)dst & ~PAGE_MASK;
-+	src_off = (uintptr_t)src & ~PAGE_MASK;
-+	dst_off = (uintptr_t __force)dst & ~PAGE_MASK;
- 
- 	if (!is_dma_copy_aligned(device, src_off, dst_off, size))
- 		return -ENODEV;
- 
--	vbase = (u64)(u64 *)mw->vbase;
--	dst_vaddr = (u64)(u64 *)dst;
-+	vbase = mw->vbase;
-+	dst_vaddr = dst;
- 	dst_phys = mw->phys_addr + (dst_vaddr - vbase);
- 
- 	unmap = dmaengine_get_unmap_data(device->dev, 1, GFP_NOWAIT);
-@@ -261,13 +262,13 @@ err_get_unmap:
- 	return 0;
- }
- 
--static int perf_move_data(struct pthr_ctx *pctx, char *dst, char *src,
-+static int perf_move_data(struct pthr_ctx *pctx, char __iomem *dst, char *src,
- 			  u64 buf_size, u64 win_size, u64 total)
- {
- 	int chunks, total_chunks, i;
- 	int copied_chunks = 0;
- 	u64 copied = 0, result;
--	char *tmp = dst;
-+	char __iomem *tmp = dst;
- 	u64 perf, diff_us;
- 	ktime_t kstart, kstop, kdiff;
- 
-@@ -324,7 +325,7 @@ static int ntb_perf_thread(void *data)
- 	struct perf_ctx *perf = pctx->perf;
- 	struct pci_dev *pdev = perf->ntb->pdev;
- 	struct perf_mw *mw = &perf->mw;
--	char *dst;
-+	char __iomem *dst;
- 	u64 win_size, buf_size, total;
- 	void *src;
- 	int rc, node, i;
-@@ -364,7 +365,7 @@ static int ntb_perf_thread(void *data)
- 	if (buf_size > MAX_TEST_SIZE)
- 		buf_size = MAX_TEST_SIZE;
- 
--	dst = (char *)mw->vbase;
-+	dst = (char __iomem *)mw->vbase;
- 
- 	atomic_inc(&perf->tsync);
- 	while (atomic_read(&perf->tsync) != perf->perf_threads)
-diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
-index 5d28e9405f32..cdbab06bf74f 100644
---- a/drivers/nvdimm/bus.c
-+++ b/drivers/nvdimm/bus.c
-@@ -335,7 +335,7 @@ static const struct nd_cmd_desc __nd_cmd_dimm_descs[] = {
- 	[ND_CMD_IMPLEMENTED] = { },
- 	[ND_CMD_SMART] = {
- 		.out_num = 2,
--		.out_sizes = { 4, 8, },
-+		.out_sizes = { 4, 128, },
- 	},
- 	[ND_CMD_SMART_THRESHOLD] = {
- 		.out_num = 2,
-@@ -513,10 +513,10 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
- 
- 	/* fail write commands (when read-only) */
- 	if (read_only)
--		switch (ioctl_cmd) {
--		case ND_IOCTL_VENDOR:
--		case ND_IOCTL_SET_CONFIG_DATA:
--		case ND_IOCTL_ARS_START:
-+		switch (cmd) {
-+		case ND_CMD_VENDOR:
-+		case ND_CMD_SET_CONFIG_DATA:
-+		case ND_CMD_ARS_START:
- 			dev_dbg(&nvdimm_bus->dev, "'%s' command while read-only.\n",
- 					nvdimm ? nvdimm_cmd_name(cmd)
- 					: nvdimm_bus_cmd_name(cmd));
-diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
-index ae81a2f1da50..f0b56b3aac4d 100644
---- a/drivers/nvdimm/pfn_devs.c
-+++ b/drivers/nvdimm/pfn_devs.c
-@@ -315,7 +315,7 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn)
- 	} else {
- 		/* from init we validate */
- 		if (memcmp(nd_pfn->uuid, pfn_sb->uuid, 16) != 0)
--			return -EINVAL;
-+			return -ENODEV;
- 	}
- 
- 	if (nd_pfn->align > nvdimm_namespace_capacity(ndns)) {
-diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
-index 8d0b54670184..02c574f8ccb3 100644
---- a/drivers/nvdimm/pmem.c
-+++ b/drivers/nvdimm/pmem.c
-@@ -66,22 +66,25 @@ static int pmem_do_bvec(struct pmem_device *pmem, struct page *page,
- 			unsigned int len, unsigned int off, int rw,
- 			sector_t sector)
- {
-+	int rc = 0;
- 	void *mem = kmap_atomic(page);
- 	phys_addr_t pmem_off = sector * 512 + pmem->data_offset;
- 	void __pmem *pmem_addr = pmem->virt_addr + pmem_off;
- 
- 	if (rw == READ) {
- 		if (unlikely(is_bad_pmem(&pmem->bb, sector, len)))
--			return -EIO;
--		memcpy_from_pmem(mem + off, pmem_addr, len);
--		flush_dcache_page(page);
-+			rc = -EIO;
-+		else {
-+			memcpy_from_pmem(mem + off, pmem_addr, len);
-+			flush_dcache_page(page);
-+		}
- 	} else {
- 		flush_dcache_page(page);
- 		memcpy_to_pmem(pmem_addr, mem + off, len);
- 	}
- 
- 	kunmap_atomic(mem);
--	return 0;
-+	return rc;
- }
- 
- static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio)
-@@ -311,9 +314,16 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
- 	 * implementation will limit the pfns advertised through
- 	 * ->direct_access() to those that are included in the memmap.
- 	 */
--	if (nd_pfn->mode == PFN_MODE_PMEM)
--		offset = ALIGN(SZ_8K + 64 * npfns, nd_pfn->align);
--	else if (nd_pfn->mode == PFN_MODE_RAM)
-+	if (nd_pfn->mode == PFN_MODE_PMEM) {
-+		unsigned long memmap_size;
-+
-+		/*
-+		 * vmemmap_populate_hugepages() allocates the memmap array in
-+		 * HPAGE_SIZE chunks.
-+		 */
-+		memmap_size = ALIGN(64 * npfns, PMD_SIZE);
-+		offset = ALIGN(SZ_8K + memmap_size, nd_pfn->align);
-+	} else if (nd_pfn->mode == PFN_MODE_RAM)
- 		offset = ALIGN(SZ_8K, nd_pfn->align);
- 	else
- 		goto err;
-diff --git a/drivers/nvmem/mxs-ocotp.c b/drivers/nvmem/mxs-ocotp.c
-index 8ba19bba3156..2bb3c5799ac4 100644
---- a/drivers/nvmem/mxs-ocotp.c
-+++ b/drivers/nvmem/mxs-ocotp.c
-@@ -94,7 +94,7 @@ static int mxs_ocotp_read(void *context, const void *reg, size_t reg_size,
- 	if (ret)
- 		goto close_banks;
- 
--	while (val_size) {
-+	while (val_size >= reg_size) {
- 		if ((offset < OCOTP_DATA_OFFSET) || (offset % 16)) {
- 			/* fill up non-data register */
- 			*buf = 0;
-@@ -103,7 +103,7 @@ static int mxs_ocotp_read(void *context, const void *reg, size_t reg_size,
- 		}
- 
- 		buf++;
--		val_size--;
-+		val_size -= reg_size;
- 		offset += reg_size;
- 	}
- 
-diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c
-index 1a3556a9e9ea..ed01c0172e4a 100644
---- a/drivers/of/of_reserved_mem.c
-+++ b/drivers/of/of_reserved_mem.c
-@@ -32,11 +32,13 @@ int __init __weak early_init_dt_alloc_reserved_memory_arch(phys_addr_t size,
- 	phys_addr_t align, phys_addr_t start, phys_addr_t end, bool nomap,
- 	phys_addr_t *res_base)
- {
-+	phys_addr_t base;
- 	/*
- 	 * We use __memblock_alloc_base() because memblock_alloc_base()
- 	 * panic()s on allocation failure.
- 	 */
--	phys_addr_t base = __memblock_alloc_base(size, align, end);
-+	end = !end ? MEMBLOCK_ALLOC_ANYWHERE : end;
-+	base = __memblock_alloc_base(size, align, end);
- 	if (!base)
- 		return -ENOMEM;
- 
-diff --git a/drivers/pci/host/pci-imx6.c b/drivers/pci/host/pci-imx6.c
-index fe600964fa50..88ccfeaa49c7 100644
---- a/drivers/pci/host/pci-imx6.c
-+++ b/drivers/pci/host/pci-imx6.c
-@@ -32,7 +32,7 @@
- #define to_imx6_pcie(x)	container_of(x, struct imx6_pcie, pp)
- 
- struct imx6_pcie {
--	struct gpio_desc	*reset_gpio;
-+	int			reset_gpio;
- 	struct clk		*pcie_bus;
- 	struct clk		*pcie_phy;
- 	struct clk		*pcie;
-@@ -287,10 +287,10 @@ static int imx6_pcie_deassert_core_reset(struct pcie_port *pp)
- 	usleep_range(200, 500);
- 
- 	/* Some boards don't have PCIe reset GPIO. */
--	if (imx6_pcie->reset_gpio) {
--		gpiod_set_value_cansleep(imx6_pcie->reset_gpio, 0);
-+	if (gpio_is_valid(imx6_pcie->reset_gpio)) {
-+		gpio_set_value_cansleep(imx6_pcie->reset_gpio, 0);
- 		msleep(100);
--		gpiod_set_value_cansleep(imx6_pcie->reset_gpio, 1);
-+		gpio_set_value_cansleep(imx6_pcie->reset_gpio, 1);
- 	}
- 	return 0;
- 
-@@ -561,6 +561,7 @@ static int __init imx6_pcie_probe(struct platform_device *pdev)
- {
- 	struct imx6_pcie *imx6_pcie;
- 	struct pcie_port *pp;
-+	struct device_node *np = pdev->dev.of_node;
- 	struct resource *dbi_base;
- 	int ret;
- 
-@@ -581,8 +582,15 @@ static int __init imx6_pcie_probe(struct platform_device *pdev)
- 		return PTR_ERR(pp->dbi_base);
- 
- 	/* Fetch GPIOs */
--	imx6_pcie->reset_gpio = devm_gpiod_get_optional(&pdev->dev, "reset",
--							GPIOD_OUT_LOW);
-+	imx6_pcie->reset_gpio = of_get_named_gpio(np, "reset-gpio", 0);
-+	if (gpio_is_valid(imx6_pcie->reset_gpio)) {
-+		ret = devm_gpio_request_one(&pdev->dev, imx6_pcie->reset_gpio,
-+					    GPIOF_OUT_INIT_LOW, "PCIe reset");
-+		if (ret) {
-+			dev_err(&pdev->dev, "unable to get reset gpio\n");
-+			return ret;
-+		}
-+	}
- 
- 	/* Fetch clocks */
- 	imx6_pcie->pcie_phy = devm_clk_get(&pdev->dev, "pcie_phy");
-diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
-index 6d7ab9bb0d5a..6b0056e9c33e 100644
---- a/drivers/pci/probe.c
-+++ b/drivers/pci/probe.c
-@@ -179,6 +179,9 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
- 	u16 orig_cmd;
- 	struct pci_bus_region region, inverted_region;
- 
-+	if (dev->non_compliant_bars)
-+		return 0;
-+
- 	mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
- 
- 	/* No printks while decoding is disabled! */
-@@ -1171,6 +1174,7 @@ static void pci_msi_setup_pci_dev(struct pci_dev *dev)
- int pci_setup_device(struct pci_dev *dev)
- {
- 	u32 class;
-+	u16 cmd;
- 	u8 hdr_type;
- 	int pos = 0;
- 	struct pci_bus_region region;
-@@ -1214,6 +1218,16 @@ int pci_setup_device(struct pci_dev *dev)
- 	/* device class may be changed after fixup */
- 	class = dev->class >> 8;
- 
-+	if (dev->non_compliant_bars) {
-+		pci_read_config_word(dev, PCI_COMMAND, &cmd);
-+		if (cmd & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) {
-+			dev_info(&dev->dev, "device has non-compliant BARs; disabling IO/MEM decoding\n");
-+			cmd &= ~PCI_COMMAND_IO;
-+			cmd &= ~PCI_COMMAND_MEMORY;
-+			pci_write_config_word(dev, PCI_COMMAND, cmd);
-+		}
-+	}
-+
- 	switch (dev->hdr_type) {		    /* header type */
- 	case PCI_HEADER_TYPE_NORMAL:		    /* standard header */
- 		if (class == PCI_CLASS_BRIDGE_PCI)
-diff --git a/drivers/pcmcia/db1xxx_ss.c b/drivers/pcmcia/db1xxx_ss.c
-index 4c2fa05b4589..944674ee3464 100644
---- a/drivers/pcmcia/db1xxx_ss.c
-+++ b/drivers/pcmcia/db1xxx_ss.c
-@@ -56,6 +56,7 @@ struct db1x_pcmcia_sock {
- 	int	stschg_irq;	/* card-status-change irq */
- 	int	card_irq;	/* card irq */
- 	int	eject_irq;	/* db1200/pb1200 have these */
-+	int	insert_gpio;	/* db1000 carddetect gpio */
- 
- #define BOARD_TYPE_DEFAULT	0	/* most boards */
- #define BOARD_TYPE_DB1200	1	/* IRQs aren't gpios */
-@@ -83,7 +84,7 @@ static int db1200_card_inserted(struct db1x_pcmcia_sock *sock)
- /* carddetect gpio: low-active */
- static int db1000_card_inserted(struct db1x_pcmcia_sock *sock)
- {
--	return !gpio_get_value(irq_to_gpio(sock->insert_irq));
-+	return !gpio_get_value(sock->insert_gpio);
- }
- 
- static int db1x_card_inserted(struct db1x_pcmcia_sock *sock)
-@@ -457,9 +458,15 @@ static int db1x_pcmcia_socket_probe(struct platform_device *pdev)
- 	r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "card");
- 	sock->card_irq = r ? r->start : 0;
- 
--	/* insert: irq which triggers on card insertion/ejection */
-+	/* insert: irq which triggers on card insertion/ejection
-+	 * BIG FAT NOTE: on DB1000/1100/1500/1550 we pass a GPIO here!
-+	 */
- 	r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "insert");
- 	sock->insert_irq = r ? r->start : -1;
-+	if (sock->board_type == BOARD_TYPE_DEFAULT) {
-+		sock->insert_gpio = r ? r->start : -1;
-+		sock->insert_irq = r ? gpio_to_irq(r->start) : -1;
-+	}
- 
- 	/* stschg: irq which trigger on card status change (optional) */
- 	r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "stschg");
-diff --git a/drivers/pinctrl/bcm/pinctrl-bcm2835.c b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
-index 0f5997ceb494..08b1d93da9fe 100644
---- a/drivers/pinctrl/bcm/pinctrl-bcm2835.c
-+++ b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
-@@ -779,7 +779,7 @@ static int bcm2835_pctl_dt_node_to_map(struct pinctrl_dev *pctldev,
- 		}
- 		if (num_pulls) {
- 			err = of_property_read_u32_index(np, "brcm,pull",
--					(num_funcs > 1) ? i : 0, &pull);
-+					(num_pulls > 1) ? i : 0, &pull);
- 			if (err)
- 				goto out;
- 			err = bcm2835_pctl_dt_node_to_map_pull(pc, np, pin,
-diff --git a/drivers/pinctrl/freescale/pinctrl-imx.c b/drivers/pinctrl/freescale/pinctrl-imx.c
-index a5bb93987378..1029aa7889b5 100644
---- a/drivers/pinctrl/freescale/pinctrl-imx.c
-+++ b/drivers/pinctrl/freescale/pinctrl-imx.c
-@@ -726,19 +726,18 @@ int imx_pinctrl_probe(struct platform_device *pdev,
- 
- 	if (of_property_read_bool(dev_np, "fsl,input-sel")) {
- 		np = of_parse_phandle(dev_np, "fsl,input-sel", 0);
--		if (np) {
--			ipctl->input_sel_base = of_iomap(np, 0);
--			if (IS_ERR(ipctl->input_sel_base)) {
--				of_node_put(np);
--				dev_err(&pdev->dev,
--					"iomuxc input select base address not found\n");
--				return PTR_ERR(ipctl->input_sel_base);
--			}
--		} else {
-+		if (!np) {
- 			dev_err(&pdev->dev, "iomuxc fsl,input-sel property not found\n");
- 			return -EINVAL;
- 		}
-+
-+		ipctl->input_sel_base = of_iomap(np, 0);
- 		of_node_put(np);
-+		if (!ipctl->input_sel_base) {
-+			dev_err(&pdev->dev,
-+				"iomuxc input select base address not found\n");
-+			return -ENOMEM;
-+		}
- 	}
- 
- 	imx_pinctrl_desc.name = dev_name(&pdev->dev);
-diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
-index e96e86d2e745..3878d23ca7a8 100644
---- a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
-+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
-@@ -949,7 +949,8 @@ static int mtk_gpio_set_debounce(struct gpio_chip *chip, unsigned offset,
- 	struct mtk_pinctrl *pctl = dev_get_drvdata(chip->parent);
- 	int eint_num, virq, eint_offset;
- 	unsigned int set_offset, bit, clr_bit, clr_offset, rst, i, unmask, dbnc;
--	static const unsigned int dbnc_arr[] = {0 , 1, 16, 32, 64, 128, 256};
-+	static const unsigned int debounce_time[] = {500, 1000, 16000, 32000, 64000,
-+						128000, 256000};
- 	const struct mtk_desc_pin *pin;
- 	struct irq_data *d;
- 
-@@ -967,9 +968,9 @@ static int mtk_gpio_set_debounce(struct gpio_chip *chip, unsigned offset,
- 	if (!mtk_eint_can_en_debounce(pctl, eint_num))
- 		return -ENOSYS;
- 
--	dbnc = ARRAY_SIZE(dbnc_arr);
--	for (i = 0; i < ARRAY_SIZE(dbnc_arr); i++) {
--		if (debounce <= dbnc_arr[i]) {
-+	dbnc = ARRAY_SIZE(debounce_time);
-+	for (i = 0; i < ARRAY_SIZE(debounce_time); i++) {
-+		if (debounce <= debounce_time[i]) {
- 			dbnc = i;
- 			break;
- 		}
-diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik.c b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
-index 352406108fa0..c8969dd49449 100644
---- a/drivers/pinctrl/nomadik/pinctrl-nomadik.c
-+++ b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
-@@ -990,7 +990,7 @@ static void nmk_gpio_dbg_show_one(struct seq_file *s,
- 		int val;
- 
- 		if (pull)
--			pullidx = data_out ? 1 : 2;
-+			pullidx = data_out ? 2 : 1;
- 
- 		seq_printf(s, " gpio-%-3d (%-20.20s) in  %s %s",
- 			   gpio,
-diff --git a/drivers/pinctrl/pinctrl-at91-pio4.c b/drivers/pinctrl/pinctrl-at91-pio4.c
-index ee69db6ae1c7..e1c0d4e1bb33 100644
---- a/drivers/pinctrl/pinctrl-at91-pio4.c
-+++ b/drivers/pinctrl/pinctrl-at91-pio4.c
-@@ -722,9 +722,11 @@ static int atmel_conf_pin_config_group_set(struct pinctrl_dev *pctldev,
- 			break;
- 		case PIN_CONFIG_BIAS_PULL_UP:
- 			conf |= ATMEL_PIO_PUEN_MASK;
-+			conf &= (~ATMEL_PIO_PDEN_MASK);
- 			break;
- 		case PIN_CONFIG_BIAS_PULL_DOWN:
- 			conf |= ATMEL_PIO_PDEN_MASK;
-+			conf &= (~ATMEL_PIO_PUEN_MASK);
- 			break;
- 		case PIN_CONFIG_DRIVE_OPEN_DRAIN:
- 			if (arg == 0)
-diff --git a/drivers/pinctrl/pinctrl-pistachio.c b/drivers/pinctrl/pinctrl-pistachio.c
-index 856f736cb1a6..2673cd9d106e 100644
---- a/drivers/pinctrl/pinctrl-pistachio.c
-+++ b/drivers/pinctrl/pinctrl-pistachio.c
-@@ -469,27 +469,27 @@ static const char * const pistachio_mips_pll_lock_groups[] = {
- 	"mfio83",
- };
- 
--static const char * const pistachio_sys_pll_lock_groups[] = {
-+static const char * const pistachio_audio_pll_lock_groups[] = {
- 	"mfio84",
- };
- 
--static const char * const pistachio_wifi_pll_lock_groups[] = {
-+static const char * const pistachio_rpu_v_pll_lock_groups[] = {
- 	"mfio85",
- };
- 
--static const char * const pistachio_bt_pll_lock_groups[] = {
-+static const char * const pistachio_rpu_l_pll_lock_groups[] = {
- 	"mfio86",
- };
- 
--static const char * const pistachio_rpu_v_pll_lock_groups[] = {
-+static const char * const pistachio_sys_pll_lock_groups[] = {
- 	"mfio87",
- };
- 
--static const char * const pistachio_rpu_l_pll_lock_groups[] = {
-+static const char * const pistachio_wifi_pll_lock_groups[] = {
- 	"mfio88",
- };
- 
--static const char * const pistachio_audio_pll_lock_groups[] = {
-+static const char * const pistachio_bt_pll_lock_groups[] = {
- 	"mfio89",
- };
- 
-@@ -559,12 +559,12 @@ enum pistachio_mux_option {
- 	PISTACHIO_FUNCTION_DREQ4,
- 	PISTACHIO_FUNCTION_DREQ5,
- 	PISTACHIO_FUNCTION_MIPS_PLL_LOCK,
-+	PISTACHIO_FUNCTION_AUDIO_PLL_LOCK,
-+	PISTACHIO_FUNCTION_RPU_V_PLL_LOCK,
-+	PISTACHIO_FUNCTION_RPU_L_PLL_LOCK,
- 	PISTACHIO_FUNCTION_SYS_PLL_LOCK,
- 	PISTACHIO_FUNCTION_WIFI_PLL_LOCK,
- 	PISTACHIO_FUNCTION_BT_PLL_LOCK,
--	PISTACHIO_FUNCTION_RPU_V_PLL_LOCK,
--	PISTACHIO_FUNCTION_RPU_L_PLL_LOCK,
--	PISTACHIO_FUNCTION_AUDIO_PLL_LOCK,
- 	PISTACHIO_FUNCTION_DEBUG_RAW_CCA_IND,
- 	PISTACHIO_FUNCTION_DEBUG_ED_SEC20_CCA_IND,
- 	PISTACHIO_FUNCTION_DEBUG_ED_SEC40_CCA_IND,
-@@ -620,12 +620,12 @@ static const struct pistachio_function pistachio_functions[] = {
- 	FUNCTION(dreq4),
- 	FUNCTION(dreq5),
- 	FUNCTION(mips_pll_lock),
-+	FUNCTION(audio_pll_lock),
-+	FUNCTION(rpu_v_pll_lock),
-+	FUNCTION(rpu_l_pll_lock),
- 	FUNCTION(sys_pll_lock),
- 	FUNCTION(wifi_pll_lock),
- 	FUNCTION(bt_pll_lock),
--	FUNCTION(rpu_v_pll_lock),
--	FUNCTION(rpu_l_pll_lock),
--	FUNCTION(audio_pll_lock),
- 	FUNCTION(debug_raw_cca_ind),
- 	FUNCTION(debug_ed_sec20_cca_ind),
- 	FUNCTION(debug_ed_sec40_cca_ind),
-diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
-index d24e5f1d1525..bd2e657163b8 100644
---- a/drivers/pinctrl/pinctrl-single.c
-+++ b/drivers/pinctrl/pinctrl-single.c
-@@ -1273,9 +1273,9 @@ static int pcs_parse_bits_in_pinctrl_entry(struct pcs_device *pcs,
- 
- 		/* Parse pins in each row from LSB */
- 		while (mask) {
--			bit_pos = ffs(mask);
-+			bit_pos = __ffs(mask);
- 			pin_num_from_lsb = bit_pos / pcs->bits_per_pin;
--			mask_pos = ((pcs->fmask) << (bit_pos - 1));
-+			mask_pos = ((pcs->fmask) << bit_pos);
- 			val_pos = val & mask_pos;
- 			submask = mask & mask_pos;
- 
-@@ -1844,7 +1844,7 @@ static int pcs_probe(struct platform_device *pdev)
- 	ret = of_property_read_u32(np, "pinctrl-single,function-mask",
- 				   &pcs->fmask);
- 	if (!ret) {
--		pcs->fshift = ffs(pcs->fmask) - 1;
-+		pcs->fshift = __ffs(pcs->fmask);
- 		pcs->fmax = pcs->fmask >> pcs->fshift;
- 	} else {
- 		/* If mask property doesn't exist, function mux is invalid. */
-diff --git a/drivers/pinctrl/sh-pfc/core.c b/drivers/pinctrl/sh-pfc/core.c
-index 181ea98a63b7..2b0d70217bbd 100644
---- a/drivers/pinctrl/sh-pfc/core.c
-+++ b/drivers/pinctrl/sh-pfc/core.c
-@@ -545,7 +545,9 @@ static int sh_pfc_probe(struct platform_device *pdev)
- 			return ret;
- 	}
- 
--	pinctrl_provide_dummies();
-+	/* Enable dummy states for those platforms without pinctrl support */
-+	if (!of_have_populated_dt())
-+		pinctrl_provide_dummies();
- 
- 	ret = sh_pfc_init_ranges(pfc);
- 	if (ret < 0)
-diff --git a/drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c b/drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c
-index 00265f0435a7..8b381d69df86 100644
---- a/drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c
-+++ b/drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c
-@@ -485,6 +485,7 @@ static const struct sunxi_pinctrl_desc sun8i_a33_pinctrl_data = {
- 	.pins = sun8i_a33_pins,
- 	.npins = ARRAY_SIZE(sun8i_a33_pins),
- 	.irq_banks = 2,
-+	.irq_bank_base = 1,
- };
- 
- static int sun8i_a33_pinctrl_probe(struct platform_device *pdev)
-diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
-index 7a2465f5e71e..884c2b314567 100644
---- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c
-+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
-@@ -578,7 +578,7 @@ static void sunxi_pinctrl_irq_release_resources(struct irq_data *d)
- static int sunxi_pinctrl_irq_set_type(struct irq_data *d, unsigned int type)
- {
- 	struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d);
--	u32 reg = sunxi_irq_cfg_reg(d->hwirq);
-+	u32 reg = sunxi_irq_cfg_reg(d->hwirq, pctl->desc->irq_bank_base);
- 	u8 index = sunxi_irq_cfg_offset(d->hwirq);
- 	unsigned long flags;
- 	u32 regval;
-@@ -625,7 +625,8 @@ static int sunxi_pinctrl_irq_set_type(struct irq_data *d, unsigned int type)
- static void sunxi_pinctrl_irq_ack(struct irq_data *d)
- {
- 	struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d);
--	u32 status_reg = sunxi_irq_status_reg(d->hwirq);
-+	u32 status_reg = sunxi_irq_status_reg(d->hwirq,
-+					      pctl->desc->irq_bank_base);
- 	u8 status_idx = sunxi_irq_status_offset(d->hwirq);
- 
- 	/* Clear the IRQ */
-@@ -635,7 +636,7 @@ static void sunxi_pinctrl_irq_ack(struct irq_data *d)
- static void sunxi_pinctrl_irq_mask(struct irq_data *d)
- {
- 	struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d);
--	u32 reg = sunxi_irq_ctrl_reg(d->hwirq);
-+	u32 reg = sunxi_irq_ctrl_reg(d->hwirq, pctl->desc->irq_bank_base);
- 	u8 idx = sunxi_irq_ctrl_offset(d->hwirq);
- 	unsigned long flags;
- 	u32 val;
-@@ -652,7 +653,7 @@ static void sunxi_pinctrl_irq_mask(struct irq_data *d)
- static void sunxi_pinctrl_irq_unmask(struct irq_data *d)
- {
- 	struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d);
--	u32 reg = sunxi_irq_ctrl_reg(d->hwirq);
-+	u32 reg = sunxi_irq_ctrl_reg(d->hwirq, pctl->desc->irq_bank_base);
- 	u8 idx = sunxi_irq_ctrl_offset(d->hwirq);
- 	unsigned long flags;
- 	u32 val;
-@@ -744,7 +745,7 @@ static void sunxi_pinctrl_irq_handler(struct irq_desc *desc)
- 	if (bank == pctl->desc->irq_banks)
- 		return;
- 
--	reg = sunxi_irq_status_reg_from_bank(bank);
-+	reg = sunxi_irq_status_reg_from_bank(bank, pctl->desc->irq_bank_base);
- 	val = readl(pctl->membase + reg);
- 
- 	if (val) {
-@@ -1023,9 +1024,11 @@ int sunxi_pinctrl_init(struct platform_device *pdev,
- 
- 	for (i = 0; i < pctl->desc->irq_banks; i++) {
- 		/* Mask and clear all IRQs before registering a handler */
--		writel(0, pctl->membase + sunxi_irq_ctrl_reg_from_bank(i));
-+		writel(0, pctl->membase + sunxi_irq_ctrl_reg_from_bank(i,
-+						pctl->desc->irq_bank_base));
- 		writel(0xffffffff,
--			pctl->membase + sunxi_irq_status_reg_from_bank(i));
-+		       pctl->membase + sunxi_irq_status_reg_from_bank(i,
-+						pctl->desc->irq_bank_base));
- 
- 		irq_set_chained_handler_and_data(pctl->irq[i],
- 						 sunxi_pinctrl_irq_handler,
-diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.h b/drivers/pinctrl/sunxi/pinctrl-sunxi.h
-index e248e81a0f9e..0afce1ab12d0 100644
---- a/drivers/pinctrl/sunxi/pinctrl-sunxi.h
-+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.h
-@@ -97,6 +97,7 @@ struct sunxi_pinctrl_desc {
- 	int				npins;
- 	unsigned			pin_base;
- 	unsigned			irq_banks;
-+	unsigned			irq_bank_base;
- 	bool				irq_read_needs_mux;
- };
- 
-@@ -233,12 +234,12 @@ static inline u32 sunxi_pull_offset(u16 pin)
- 	return pin_num * PULL_PINS_BITS;
- }
- 
--static inline u32 sunxi_irq_cfg_reg(u16 irq)
-+static inline u32 sunxi_irq_cfg_reg(u16 irq, unsigned bank_base)
- {
- 	u8 bank = irq / IRQ_PER_BANK;
- 	u8 reg = (irq % IRQ_PER_BANK) / IRQ_CFG_IRQ_PER_REG * 0x04;
- 
--	return IRQ_CFG_REG + bank * IRQ_MEM_SIZE + reg;
-+	return IRQ_CFG_REG + (bank_base + bank) * IRQ_MEM_SIZE + reg;
- }
- 
- static inline u32 sunxi_irq_cfg_offset(u16 irq)
-@@ -247,16 +248,16 @@ static inline u32 sunxi_irq_cfg_offset(u16 irq)
- 	return irq_num * IRQ_CFG_IRQ_BITS;
- }
- 
--static inline u32 sunxi_irq_ctrl_reg_from_bank(u8 bank)
-+static inline u32 sunxi_irq_ctrl_reg_from_bank(u8 bank, unsigned bank_base)
- {
--	return IRQ_CTRL_REG + bank * IRQ_MEM_SIZE;
-+	return IRQ_CTRL_REG + (bank_base + bank) * IRQ_MEM_SIZE;
- }
- 
--static inline u32 sunxi_irq_ctrl_reg(u16 irq)
-+static inline u32 sunxi_irq_ctrl_reg(u16 irq, unsigned bank_base)
- {
- 	u8 bank = irq / IRQ_PER_BANK;
- 
--	return sunxi_irq_ctrl_reg_from_bank(bank);
-+	return sunxi_irq_ctrl_reg_from_bank(bank, bank_base);
- }
- 
- static inline u32 sunxi_irq_ctrl_offset(u16 irq)
-@@ -265,16 +266,16 @@ static inline u32 sunxi_irq_ctrl_offset(u16 irq)
- 	return irq_num * IRQ_CTRL_IRQ_BITS;
- }
- 
--static inline u32 sunxi_irq_status_reg_from_bank(u8 bank)
-+static inline u32 sunxi_irq_status_reg_from_bank(u8 bank, unsigned bank_base)
- {
--	return IRQ_STATUS_REG + bank * IRQ_MEM_SIZE;
-+	return IRQ_STATUS_REG + (bank_base + bank) * IRQ_MEM_SIZE;
- }
- 
--static inline u32 sunxi_irq_status_reg(u16 irq)
-+static inline u32 sunxi_irq_status_reg(u16 irq, unsigned bank_base)
- {
- 	u8 bank = irq / IRQ_PER_BANK;
- 
--	return sunxi_irq_status_reg_from_bank(bank);
-+	return sunxi_irq_status_reg_from_bank(bank, bank_base);
- }
- 
- static inline u32 sunxi_irq_status_offset(u16 irq)
-diff --git a/drivers/platform/x86/dell-rbtn.c b/drivers/platform/x86/dell-rbtn.c
-index cd410e392550..d33e9ad3218f 100644
---- a/drivers/platform/x86/dell-rbtn.c
-+++ b/drivers/platform/x86/dell-rbtn.c
-@@ -28,6 +28,7 @@ struct rbtn_data {
- 	enum rbtn_type type;
- 	struct rfkill *rfkill;
- 	struct input_dev *input_dev;
-+	bool suspended;
- };
- 
- 
-@@ -220,9 +221,55 @@ static const struct acpi_device_id rbtn_ids[] = {
- 	{ "", 0 },
- };
- 
-+#ifdef CONFIG_PM_SLEEP
-+static void ACPI_SYSTEM_XFACE rbtn_clear_suspended_flag(void *context)
-+{
-+	struct rbtn_data *rbtn_data = context;
-+
-+	rbtn_data->suspended = false;
-+}
-+
-+static int rbtn_suspend(struct device *dev)
-+{
-+	struct acpi_device *device = to_acpi_device(dev);
-+	struct rbtn_data *rbtn_data = acpi_driver_data(device);
-+
-+	rbtn_data->suspended = true;
-+
-+	return 0;
-+}
-+
-+static int rbtn_resume(struct device *dev)
-+{
-+	struct acpi_device *device = to_acpi_device(dev);
-+	struct rbtn_data *rbtn_data = acpi_driver_data(device);
-+	acpi_status status;
-+
-+	/*
-+	 * Upon resume, some BIOSes send an ACPI notification thet triggers
-+	 * an unwanted input event. In order to ignore it, we use a flag
-+	 * that we set at suspend and clear once we have received the extra
-+	 * ACPI notification. Since ACPI notifications are delivered
-+	 * asynchronously to drivers, we clear the flag from the workqueue
-+	 * used to deliver the notifications. This should be enough
-+	 * to have the flag cleared only after we received the extra
-+	 * notification, if any.
-+	 */
-+	status = acpi_os_execute(OSL_NOTIFY_HANDLER,
-+			 rbtn_clear_suspended_flag, rbtn_data);
-+	if (ACPI_FAILURE(status))
-+		rbtn_clear_suspended_flag(rbtn_data);
-+
-+	return 0;
-+}
-+#endif
-+
-+static SIMPLE_DEV_PM_OPS(rbtn_pm_ops, rbtn_suspend, rbtn_resume);
-+
- static struct acpi_driver rbtn_driver = {
- 	.name = "dell-rbtn",
- 	.ids = rbtn_ids,
-+	.drv.pm = &rbtn_pm_ops,
- 	.ops = {
- 		.add = rbtn_add,
- 		.remove = rbtn_remove,
-@@ -384,6 +431,15 @@ static void rbtn_notify(struct acpi_device *device, u32 event)
- {
- 	struct rbtn_data *rbtn_data = device->driver_data;
- 
-+	/*
-+	 * Some BIOSes send a notification at resume.
-+	 * Ignore it to prevent unwanted input events.
-+	 */
-+	if (rbtn_data->suspended) {
-+		dev_dbg(&device->dev, "ACPI notification ignored\n");
-+		return;
-+	}
-+
- 	if (event != 0x80) {
- 		dev_info(&device->dev, "Received unknown event (0x%x)\n",
- 			 event);
-diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
-index d78ee151c9e4..be3bc2f4edd4 100644
---- a/drivers/platform/x86/ideapad-laptop.c
-+++ b/drivers/platform/x86/ideapad-laptop.c
-@@ -865,6 +865,20 @@ static const struct dmi_system_id no_hw_rfkill_list[] = {
- 		},
- 	},
- 	{
-+		.ident = "Lenovo ideapad Y700-15ISK",
-+		.matches = {
-+			DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
-+			DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad Y700-15ISK"),
-+		},
-+	},
-+	{
-+		.ident = "Lenovo ideapad Y700 Touch-15ISK",
-+		.matches = {
-+			DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
-+			DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad Y700 Touch-15ISK"),
-+		},
-+	},
-+	{
- 		.ident = "Lenovo ideapad Y700-17ISK",
- 		.matches = {
- 			DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
-diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
-index 73833079bac8..d6baea6a7544 100644
---- a/drivers/platform/x86/toshiba_acpi.c
-+++ b/drivers/platform/x86/toshiba_acpi.c
-@@ -133,7 +133,7 @@ MODULE_LICENSE("GPL");
- /* Field definitions */
- #define HCI_ACCEL_MASK			0x7fff
- #define HCI_HOTKEY_DISABLE		0x0b
--#define HCI_HOTKEY_ENABLE		0x01
-+#define HCI_HOTKEY_ENABLE		0x09
- #define HCI_HOTKEY_SPECIAL_FUNCTIONS	0x10
- #define HCI_LCD_BRIGHTNESS_BITS		3
- #define HCI_LCD_BRIGHTNESS_SHIFT	(16-HCI_LCD_BRIGHTNESS_BITS)
-diff --git a/drivers/pwm/pwm-brcmstb.c b/drivers/pwm/pwm-brcmstb.c
-index 423ce087cd9c..5d5adee16886 100644
---- a/drivers/pwm/pwm-brcmstb.c
-+++ b/drivers/pwm/pwm-brcmstb.c
-@@ -274,8 +274,8 @@ static int brcmstb_pwm_probe(struct platform_device *pdev)
- 
- 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- 	p->base = devm_ioremap_resource(&pdev->dev, res);
--	if (!p->base) {
--		ret = -ENOMEM;
-+	if (IS_ERR(p->base)) {
-+		ret = PTR_ERR(p->base);
- 		goto out_clk;
- 	}
- 
-diff --git a/drivers/pwm/pwm-omap-dmtimer.c b/drivers/pwm/pwm-omap-dmtimer.c
-index 826634ec0d5c..e0679eb399f6 100644
---- a/drivers/pwm/pwm-omap-dmtimer.c
-+++ b/drivers/pwm/pwm-omap-dmtimer.c
-@@ -31,6 +31,7 @@
- #include <linux/time.h>
- 
- #define DM_TIMER_LOAD_MIN 0xfffffffe
-+#define DM_TIMER_MAX      0xffffffff
- 
- struct pwm_omap_dmtimer_chip {
- 	struct pwm_chip chip;
-@@ -46,13 +47,9 @@ to_pwm_omap_dmtimer_chip(struct pwm_chip *chip)
- 	return container_of(chip, struct pwm_omap_dmtimer_chip, chip);
- }
- 
--static int pwm_omap_dmtimer_calc_value(unsigned long clk_rate, int ns)
-+static u32 pwm_omap_dmtimer_get_clock_cycles(unsigned long clk_rate, int ns)
- {
--	u64 c = (u64)clk_rate * ns;
--
--	do_div(c, NSEC_PER_SEC);
--
--	return DM_TIMER_LOAD_MIN - c;
-+	return DIV_ROUND_CLOSEST_ULL((u64)clk_rate * ns, NSEC_PER_SEC);
- }
- 
- static void pwm_omap_dmtimer_start(struct pwm_omap_dmtimer_chip *omap)
-@@ -99,7 +96,8 @@ static int pwm_omap_dmtimer_config(struct pwm_chip *chip,
- 				   int duty_ns, int period_ns)
- {
- 	struct pwm_omap_dmtimer_chip *omap = to_pwm_omap_dmtimer_chip(chip);
--	int load_value, match_value;
-+	u32 period_cycles, duty_cycles;
-+	u32 load_value, match_value;
- 	struct clk *fclk;
- 	unsigned long clk_rate;
- 	bool timer_active;
-@@ -117,15 +115,13 @@ static int pwm_omap_dmtimer_config(struct pwm_chip *chip,
- 	fclk = omap->pdata->get_fclk(omap->dm_timer);
- 	if (!fclk) {
- 		dev_err(chip->dev, "invalid pmtimer fclk\n");
--		mutex_unlock(&omap->mutex);
--		return -EINVAL;
-+		goto err_einval;
- 	}
- 
- 	clk_rate = clk_get_rate(fclk);
- 	if (!clk_rate) {
- 		dev_err(chip->dev, "invalid pmtimer fclk rate\n");
--		mutex_unlock(&omap->mutex);
--		return -EINVAL;
-+		goto err_einval;
- 	}
- 
- 	dev_dbg(chip->dev, "clk rate: %luHz\n", clk_rate);
-@@ -133,11 +129,45 @@ static int pwm_omap_dmtimer_config(struct pwm_chip *chip,
- 	/*
- 	 * Calculate the appropriate load and match values based on the
- 	 * specified period and duty cycle. The load value determines the
--	 * cycle time and the match value determines the duty cycle.
-+	 * period time and the match value determines the duty time.
-+	 *
-+	 * The period lasts for (DM_TIMER_MAX-load_value+1) clock cycles.
-+	 * Similarly, the active time lasts (match_value-load_value+1) cycles.
-+	 * The non-active time is the remainder: (DM_TIMER_MAX-match_value)
-+	 * clock cycles.
-+	 *
-+	 * NOTE: It is required that: load_value <= match_value < DM_TIMER_MAX
-+	 *
-+	 * References:
-+	 *   OMAP4430/60/70 TRM sections 22.2.4.10 and 22.2.4.11
-+	 *   AM335x Sitara TRM sections 20.1.3.5 and 20.1.3.6
- 	 */
--	load_value = pwm_omap_dmtimer_calc_value(clk_rate, period_ns);
--	match_value = pwm_omap_dmtimer_calc_value(clk_rate,
--						  period_ns - duty_ns);
-+	period_cycles = pwm_omap_dmtimer_get_clock_cycles(clk_rate, period_ns);
-+	duty_cycles = pwm_omap_dmtimer_get_clock_cycles(clk_rate, duty_ns);
-+
-+	if (period_cycles < 2) {
-+		dev_info(chip->dev,
-+			 "period %d ns too short for clock rate %lu Hz\n",
-+			 period_ns, clk_rate);
-+		goto err_einval;
-+	}
-+
-+	if (duty_cycles < 1) {
-+		dev_dbg(chip->dev,
-+			"duty cycle %d ns is too short for clock rate %lu Hz\n",
-+			duty_ns, clk_rate);
-+		dev_dbg(chip->dev, "using minimum of 1 clock cycle\n");
-+		duty_cycles = 1;
-+	} else if (duty_cycles >= period_cycles) {
-+		dev_dbg(chip->dev,
-+			"duty cycle %d ns is too long for period %d ns at clock rate %lu Hz\n",
-+			duty_ns, period_ns, clk_rate);
-+		dev_dbg(chip->dev, "using maximum of 1 clock cycle less than period\n");
-+		duty_cycles = period_cycles - 1;
-+	}
-+
-+	load_value = (DM_TIMER_MAX - period_cycles) + 1;
-+	match_value = load_value + duty_cycles - 1;
- 
- 	/*
- 	 * We MUST stop the associated dual-mode timer before attempting to
-@@ -166,6 +196,11 @@ static int pwm_omap_dmtimer_config(struct pwm_chip *chip,
- 	mutex_unlock(&omap->mutex);
- 
- 	return 0;
-+
-+err_einval:
-+	mutex_unlock(&omap->mutex);
-+
-+	return -EINVAL;
- }
- 
- static int pwm_omap_dmtimer_set_polarity(struct pwm_chip *chip,
-diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c
-index f2e1a39ce0f3..5cf4a97e0304 100644
---- a/drivers/regulator/axp20x-regulator.c
-+++ b/drivers/regulator/axp20x-regulator.c
-@@ -221,10 +221,10 @@ static const struct regulator_desc axp22x_regulators[] = {
- 		 AXP22X_ELDO2_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, BIT(1)),
- 	AXP_DESC(AXP22X, ELDO3, "eldo3", "eldoin", 700, 3300, 100,
- 		 AXP22X_ELDO3_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, BIT(2)),
--	AXP_DESC_IO(AXP22X, LDO_IO0, "ldo_io0", "ips", 1800, 3300, 100,
-+	AXP_DESC_IO(AXP22X, LDO_IO0, "ldo_io0", "ips", 700, 3300, 100,
- 		    AXP22X_LDO_IO0_V_OUT, 0x1f, AXP20X_GPIO0_CTRL, 0x07,
- 		    AXP22X_IO_ENABLED, AXP22X_IO_DISABLED),
--	AXP_DESC_IO(AXP22X, LDO_IO1, "ldo_io1", "ips", 1800, 3300, 100,
-+	AXP_DESC_IO(AXP22X, LDO_IO1, "ldo_io1", "ips", 700, 3300, 100,
- 		    AXP22X_LDO_IO1_V_OUT, 0x1f, AXP20X_GPIO1_CTRL, 0x07,
- 		    AXP22X_IO_ENABLED, AXP22X_IO_DISABLED),
- 	AXP_DESC_FIXED(AXP22X, RTC_LDO, "rtc_ldo", "ips", 3000),
-diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c
-index 3242ffc0cb25..09dce49609c1 100644
---- a/drivers/regulator/s2mps11.c
-+++ b/drivers/regulator/s2mps11.c
-@@ -306,7 +306,7 @@ static struct regulator_ops s2mps11_buck_ops = {
- 	.enable_mask	= S2MPS11_ENABLE_MASK			\
- }
- 
--#define regulator_desc_s2mps11_buck6_10(num, min, step) {	\
-+#define regulator_desc_s2mps11_buck67810(num, min, step) {	\
- 	.name		= "BUCK"#num,				\
- 	.id		= S2MPS11_BUCK##num,			\
- 	.ops		= &s2mps11_buck_ops,			\
-@@ -322,6 +322,22 @@ static struct regulator_ops s2mps11_buck_ops = {
- 	.enable_mask	= S2MPS11_ENABLE_MASK			\
- }
- 
-+#define regulator_desc_s2mps11_buck9 {				\
-+	.name		= "BUCK9",				\
-+	.id		= S2MPS11_BUCK9,			\
-+	.ops		= &s2mps11_buck_ops,			\
-+	.type		= REGULATOR_VOLTAGE,			\
-+	.owner		= THIS_MODULE,				\
-+	.min_uV		= MIN_3000_MV,				\
-+	.uV_step	= STEP_25_MV,				\
-+	.n_voltages	= S2MPS11_BUCK9_N_VOLTAGES,		\
-+	.ramp_delay	= S2MPS11_RAMP_DELAY,			\
-+	.vsel_reg	= S2MPS11_REG_B9CTRL2,			\
-+	.vsel_mask	= S2MPS11_BUCK9_VSEL_MASK,		\
-+	.enable_reg	= S2MPS11_REG_B9CTRL1,			\
-+	.enable_mask	= S2MPS11_ENABLE_MASK			\
-+}
-+
- static const struct regulator_desc s2mps11_regulators[] = {
- 	regulator_desc_s2mps11_ldo(1, STEP_25_MV),
- 	regulator_desc_s2mps11_ldo(2, STEP_50_MV),
-@@ -366,11 +382,11 @@ static const struct regulator_desc s2mps11_regulators[] = {
- 	regulator_desc_s2mps11_buck1_4(3),
- 	regulator_desc_s2mps11_buck1_4(4),
- 	regulator_desc_s2mps11_buck5,
--	regulator_desc_s2mps11_buck6_10(6, MIN_600_MV, STEP_6_25_MV),
--	regulator_desc_s2mps11_buck6_10(7, MIN_600_MV, STEP_6_25_MV),
--	regulator_desc_s2mps11_buck6_10(8, MIN_600_MV, STEP_6_25_MV),
--	regulator_desc_s2mps11_buck6_10(9, MIN_3000_MV, STEP_25_MV),
--	regulator_desc_s2mps11_buck6_10(10, MIN_750_MV, STEP_12_5_MV),
-+	regulator_desc_s2mps11_buck67810(6, MIN_600_MV, STEP_6_25_MV),
-+	regulator_desc_s2mps11_buck67810(7, MIN_600_MV, STEP_6_25_MV),
-+	regulator_desc_s2mps11_buck67810(8, MIN_600_MV, STEP_6_25_MV),
-+	regulator_desc_s2mps11_buck9,
-+	regulator_desc_s2mps11_buck67810(10, MIN_750_MV, STEP_12_5_MV),
- };
- 
- static struct regulator_ops s2mps14_reg_ops;
-diff --git a/drivers/regulator/s5m8767.c b/drivers/regulator/s5m8767.c
-index 58f5d3b8e981..27343e1c43ef 100644
---- a/drivers/regulator/s5m8767.c
-+++ b/drivers/regulator/s5m8767.c
-@@ -202,9 +202,10 @@ static int s5m8767_get_register(struct s5m8767_info *s5m8767, int reg_id,
- 		}
- 	}
- 
--	if (i < s5m8767->num_regulators)
--		*enable_ctrl =
--		s5m8767_opmode_reg[reg_id][mode] << S5M8767_ENCTRL_SHIFT;
-+	if (i >= s5m8767->num_regulators)
-+		return -EINVAL;
-+
-+	*enable_ctrl = s5m8767_opmode_reg[reg_id][mode] << S5M8767_ENCTRL_SHIFT;
- 
- 	return 0;
- }
-@@ -937,8 +938,12 @@ static int s5m8767_pmic_probe(struct platform_device *pdev)
- 			else
- 				regulators[id].vsel_mask = 0xff;
- 
--			s5m8767_get_register(s5m8767, id, &enable_reg,
-+			ret = s5m8767_get_register(s5m8767, id, &enable_reg,
- 					     &enable_val);
-+			if (ret) {
-+				dev_err(s5m8767->dev, "error reading registers\n");
-+				return ret;
-+			}
- 			regulators[id].enable_reg = enable_reg;
- 			regulators[id].enable_mask = S5M8767_ENCTRL_MASK;
- 			regulators[id].enable_val = enable_val;
-diff --git a/drivers/rtc/rtc-ds1685.c b/drivers/rtc/rtc-ds1685.c
-index 535050fc5e9f..08e0ff8c786a 100644
---- a/drivers/rtc/rtc-ds1685.c
-+++ b/drivers/rtc/rtc-ds1685.c
-@@ -187,9 +187,9 @@ ds1685_rtc_end_data_access(struct ds1685_priv *rtc)
-  * Only use this where you are certain another lock will not be held.
-  */
- static inline void
--ds1685_rtc_begin_ctrl_access(struct ds1685_priv *rtc, unsigned long flags)
-+ds1685_rtc_begin_ctrl_access(struct ds1685_priv *rtc, unsigned long *flags)
- {
--	spin_lock_irqsave(&rtc->lock, flags);
-+	spin_lock_irqsave(&rtc->lock, *flags);
- 	ds1685_rtc_switch_to_bank1(rtc);
- }
- 
-@@ -1300,7 +1300,7 @@ ds1685_rtc_sysfs_ctrl_regs_store(struct device *dev,
- {
- 	struct ds1685_priv *rtc = dev_get_drvdata(dev);
- 	u8 reg = 0, bit = 0, tmp;
--	unsigned long flags = 0;
-+	unsigned long flags;
- 	long int val = 0;
- 	const struct ds1685_rtc_ctrl_regs *reg_info =
- 		ds1685_rtc_sysfs_ctrl_regs_lookup(attr->attr.name);
-@@ -1321,7 +1321,7 @@ ds1685_rtc_sysfs_ctrl_regs_store(struct device *dev,
- 	bit = reg_info->bit;
- 
- 	/* Safe to spinlock during a write. */
--	ds1685_rtc_begin_ctrl_access(rtc, flags);
-+	ds1685_rtc_begin_ctrl_access(rtc, &flags);
- 	tmp = rtc->read(rtc, reg);
- 	rtc->write(rtc, reg, (val ? (tmp | bit) : (tmp & ~(bit))));
- 	ds1685_rtc_end_ctrl_access(rtc, flags);
-diff --git a/drivers/rtc/rtc-hym8563.c b/drivers/rtc/rtc-hym8563.c
-index 097325d96db5..b1b4746a0eab 100644
---- a/drivers/rtc/rtc-hym8563.c
-+++ b/drivers/rtc/rtc-hym8563.c
-@@ -144,7 +144,7 @@ static int hym8563_rtc_set_time(struct device *dev, struct rtc_time *tm)
- 	 * it does not seem to carry it over a subsequent write/read.
- 	 * So we'll limit ourself to 100 years, starting at 2000 for now.
- 	 */
--	buf[6] = tm->tm_year - 100;
-+	buf[6] = bin2bcd(tm->tm_year - 100);
- 
- 	/*
- 	 * CTL1 only contains TEST-mode bits apart from stop,
-diff --git a/drivers/rtc/rtc-max77686.c b/drivers/rtc/rtc-max77686.c
-index 7184a0eda793..725dccae24e7 100644
---- a/drivers/rtc/rtc-max77686.c
-+++ b/drivers/rtc/rtc-max77686.c
-@@ -465,7 +465,7 @@ static int max77686_rtc_probe(struct platform_device *pdev)
- 
- 	info->virq = regmap_irq_get_virq(max77686->rtc_irq_data,
- 					 MAX77686_RTCIRQ_RTCA1);
--	if (!info->virq) {
-+	if (info->virq <= 0) {
- 		ret = -ENXIO;
- 		goto err_rtc;
- 	}
-diff --git a/drivers/rtc/rtc-rx8025.c b/drivers/rtc/rtc-rx8025.c
-index bd911bafb809..17341feadad1 100644
---- a/drivers/rtc/rtc-rx8025.c
-+++ b/drivers/rtc/rtc-rx8025.c
-@@ -65,7 +65,6 @@
- 
- static const struct i2c_device_id rx8025_id[] = {
- 	{ "rx8025", 0 },
--	{ "rv8803", 1 },
- 	{ }
- };
- MODULE_DEVICE_TABLE(i2c, rx8025_id);
-diff --git a/drivers/rtc/rtc-vr41xx.c b/drivers/rtc/rtc-vr41xx.c
-index f64c282275b3..e1b86bb01062 100644
---- a/drivers/rtc/rtc-vr41xx.c
-+++ b/drivers/rtc/rtc-vr41xx.c
-@@ -272,12 +272,13 @@ static irqreturn_t rtclong1_interrupt(int irq, void *dev_id)
- }
- 
- static const struct rtc_class_ops vr41xx_rtc_ops = {
--	.release	= vr41xx_rtc_release,
--	.ioctl		= vr41xx_rtc_ioctl,
--	.read_time	= vr41xx_rtc_read_time,
--	.set_time	= vr41xx_rtc_set_time,
--	.read_alarm	= vr41xx_rtc_read_alarm,
--	.set_alarm	= vr41xx_rtc_set_alarm,
-+	.release		= vr41xx_rtc_release,
-+	.ioctl			= vr41xx_rtc_ioctl,
-+	.read_time		= vr41xx_rtc_read_time,
-+	.set_time		= vr41xx_rtc_set_time,
-+	.read_alarm		= vr41xx_rtc_read_alarm,
-+	.set_alarm		= vr41xx_rtc_set_alarm,
-+	.alarm_irq_enable	= vr41xx_rtc_alarm_irq_enable,
- };
- 
- static int rtc_probe(struct platform_device *pdev)
-diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
-index d72867257346..3eff2a69fe08 100644
---- a/drivers/scsi/NCR5380.c
-+++ b/drivers/scsi/NCR5380.c
-@@ -760,7 +760,7 @@ static struct scsi_cmnd *dequeue_next_cmd(struct Scsi_Host *instance)
- 	struct NCR5380_cmd *ncmd;
- 	struct scsi_cmnd *cmd;
- 
--	if (list_empty(&hostdata->autosense)) {
-+	if (hostdata->sensing || list_empty(&hostdata->autosense)) {
- 		list_for_each_entry(ncmd, &hostdata->unissued, list) {
- 			cmd = NCR5380_to_scmd(ncmd);
- 			dsprintk(NDEBUG_QUEUES, instance, "dequeue: cmd=%p target=%d busy=0x%02x lun=%llu\n",
-@@ -793,7 +793,7 @@ static void requeue_cmd(struct Scsi_Host *instance, struct scsi_cmnd *cmd)
- 	struct NCR5380_hostdata *hostdata = shost_priv(instance);
- 	struct NCR5380_cmd *ncmd = scsi_cmd_priv(cmd);
- 
--	if (hostdata->sensing) {
-+	if (hostdata->sensing == cmd) {
- 		scsi_eh_restore_cmnd(cmd, &hostdata->ses);
- 		list_add(&ncmd->list, &hostdata->autosense);
- 		hostdata->sensing = NULL;
-@@ -815,15 +815,17 @@ static void NCR5380_main(struct work_struct *work)
- 	struct NCR5380_hostdata *hostdata =
- 		container_of(work, struct NCR5380_hostdata, main_task);
- 	struct Scsi_Host *instance = hostdata->host;
--	struct scsi_cmnd *cmd;
- 	int done;
- 
- 	do {
- 		done = 1;
- 
- 		spin_lock_irq(&hostdata->lock);
--		while (!hostdata->connected &&
--		       (cmd = dequeue_next_cmd(instance))) {
-+		while (!hostdata->connected && !hostdata->selecting) {
-+			struct scsi_cmnd *cmd = dequeue_next_cmd(instance);
-+
-+			if (!cmd)
-+				break;
- 
- 			dsprintk(NDEBUG_MAIN, instance, "main: dequeued %p\n", cmd);
- 
-@@ -840,8 +842,7 @@ static void NCR5380_main(struct work_struct *work)
- 			 * entire unit.
- 			 */
- 
--			cmd = NCR5380_select(instance, cmd);
--			if (!cmd) {
-+			if (!NCR5380_select(instance, cmd)) {
- 				dsprintk(NDEBUG_MAIN, instance, "main: select complete\n");
- 			} else {
- 				dsprintk(NDEBUG_MAIN | NDEBUG_QUEUES, instance,
-@@ -1056,6 +1057,11 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance,
- 		/* Reselection interrupt */
- 		goto out;
- 	}
-+	if (!hostdata->selecting) {
-+		/* Command was aborted */
-+		NCR5380_write(MODE_REG, MR_BASE);
-+		goto out;
-+	}
- 	if (err < 0) {
- 		NCR5380_write(MODE_REG, MR_BASE);
- 		shost_printk(KERN_ERR, instance,
-@@ -1759,9 +1765,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
- 	unsigned char msgout = NOP;
- 	int sink = 0;
- 	int len;
--#if defined(PSEUDO_DMA) || defined(REAL_DMA_POLL)
- 	int transfersize;
--#endif
- 	unsigned char *data;
- 	unsigned char phase, tmp, extended_msg[10], old_phase = 0xff;
- 	struct scsi_cmnd *cmd;
-@@ -1798,6 +1802,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
- 				do_abort(instance);
- 				cmd->result = DID_ERROR << 16;
- 				complete_cmd(instance, cmd);
-+				hostdata->connected = NULL;
- 				return;
- #endif
- 			case PHASE_DATAIN:
-@@ -1847,20 +1852,23 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
- 						sink = 1;
- 						do_abort(instance);
- 						cmd->result = DID_ERROR << 16;
--						complete_cmd(instance, cmd);
- 						/* XXX - need to source or sink data here, as appropriate */
- 					} else
- 						cmd->SCp.this_residual -= transfersize - len;
- 				} else
- #endif				/* defined(PSEUDO_DMA) || defined(REAL_DMA_POLL) */
- 				{
--					spin_unlock_irq(&hostdata->lock);
--					NCR5380_transfer_pio(instance, &phase,
--					                     (int *)&cmd->SCp.this_residual,
-+					/* Break up transfer into 3 ms chunks,
-+					 * presuming 6 accesses per handshake.
-+					 */
-+					transfersize = min((unsigned long)cmd->SCp.this_residual,
-+					                   hostdata->accesses_per_ms / 2);
-+					len = transfersize;
-+					NCR5380_transfer_pio(instance, &phase, &len,
- 					                     (unsigned char **)&cmd->SCp.ptr);
--					spin_lock_irq(&hostdata->lock);
-+					cmd->SCp.this_residual -= transfersize - len;
- 				}
--				break;
-+				return;
- 			case PHASE_MSGIN:
- 				len = 1;
- 				data = &tmp;
-@@ -2292,14 +2300,17 @@ static bool list_del_cmd(struct list_head *haystack,
-  * [disconnected -> connected ->]...
-  * [autosense -> connected ->] done
-  *
-- * If cmd is unissued then just remove it.
-- * If cmd is disconnected, try to select the target.
-- * If cmd is connected, try to send an abort message.
-- * If cmd is waiting for autosense, give it a chance to complete but check
-- * that it isn't left connected.
-  * If cmd was not found at all then presumably it has already been completed,
-  * in which case return SUCCESS to try to avoid further EH measures.
-+ *
-  * If the command has not completed yet, we must not fail to find it.
-+ * We have no option but to forget the aborted command (even if it still
-+ * lacks sense data). The mid-layer may re-issue a command that is in error
-+ * recovery (see scsi_send_eh_cmnd), but the logic and data structures in
-+ * this driver are such that a command can appear on one queue only.
-+ *
-+ * The lock protects driver data structures, but EH handlers also use it
-+ * to serialize their own execution and prevent their own re-entry.
-  */
- 
- static int NCR5380_abort(struct scsi_cmnd *cmd)
-@@ -2322,6 +2333,7 @@ static int NCR5380_abort(struct scsi_cmnd *cmd)
- 		         "abort: removed %p from issue queue\n", cmd);
- 		cmd->result = DID_ABORT << 16;
- 		cmd->scsi_done(cmd); /* No tag or busy flag to worry about */
-+		goto out;
- 	}
- 
- 	if (hostdata->selecting == cmd) {
-@@ -2336,58 +2348,21 @@ static int NCR5380_abort(struct scsi_cmnd *cmd)
- 	if (list_del_cmd(&hostdata->disconnected, cmd)) {
- 		dsprintk(NDEBUG_ABORT, instance,
- 		         "abort: removed %p from disconnected list\n", cmd);
--		cmd->result = DID_ERROR << 16;
--		if (!hostdata->connected)
--			NCR5380_select(instance, cmd);
--		if (hostdata->connected != cmd) {
--			complete_cmd(instance, cmd);
--			result = FAILED;
--			goto out;
--		}
-+		/* Can't call NCR5380_select() and send ABORT because that
-+		 * means releasing the lock. Need a bus reset.
-+		 */
-+		set_host_byte(cmd, DID_ERROR);
-+		complete_cmd(instance, cmd);
-+		result = FAILED;
-+		goto out;
- 	}
- 
- 	if (hostdata->connected == cmd) {
- 		dsprintk(NDEBUG_ABORT, instance, "abort: cmd %p is connected\n", cmd);
- 		hostdata->connected = NULL;
--		if (do_abort(instance)) {
--			set_host_byte(cmd, DID_ERROR);
--			complete_cmd(instance, cmd);
--			result = FAILED;
--			goto out;
--		}
--		set_host_byte(cmd, DID_ABORT);
- #ifdef REAL_DMA
- 		hostdata->dma_len = 0;
- #endif
--		if (cmd->cmnd[0] == REQUEST_SENSE)
--			complete_cmd(instance, cmd);
--		else {
--			struct NCR5380_cmd *ncmd = scsi_cmd_priv(cmd);
--
--			/* Perform autosense for this command */
--			list_add(&ncmd->list, &hostdata->autosense);
--		}
--	}
--
--	if (list_find_cmd(&hostdata->autosense, cmd)) {
--		dsprintk(NDEBUG_ABORT, instance,
--		         "abort: found %p on sense queue\n", cmd);
--		spin_unlock_irqrestore(&hostdata->lock, flags);
--		queue_work(hostdata->work_q, &hostdata->main_task);
--		msleep(1000);
--		spin_lock_irqsave(&hostdata->lock, flags);
--		if (list_del_cmd(&hostdata->autosense, cmd)) {
--			dsprintk(NDEBUG_ABORT, instance,
--			         "abort: removed %p from sense queue\n", cmd);
--			set_host_byte(cmd, DID_ABORT);
--			complete_cmd(instance, cmd);
--			goto out;
--		}
--	}
--
--	if (hostdata->connected == cmd) {
--		dsprintk(NDEBUG_ABORT, instance, "abort: cmd %p is connected\n", cmd);
--		hostdata->connected = NULL;
- 		if (do_abort(instance)) {
- 			set_host_byte(cmd, DID_ERROR);
- 			complete_cmd(instance, cmd);
-@@ -2395,9 +2370,14 @@ static int NCR5380_abort(struct scsi_cmnd *cmd)
- 			goto out;
- 		}
- 		set_host_byte(cmd, DID_ABORT);
--#ifdef REAL_DMA
--		hostdata->dma_len = 0;
--#endif
-+		complete_cmd(instance, cmd);
-+		goto out;
-+	}
-+
-+	if (list_del_cmd(&hostdata->autosense, cmd)) {
-+		dsprintk(NDEBUG_ABORT, instance,
-+		         "abort: removed %p from sense queue\n", cmd);
-+		set_host_byte(cmd, DID_ERROR);
- 		complete_cmd(instance, cmd);
- 	}
- 
-@@ -2450,7 +2430,16 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd)
- 	 * commands!
- 	 */
- 
--	hostdata->selecting = NULL;
-+	if (list_del_cmd(&hostdata->unissued, cmd)) {
-+		cmd->result = DID_RESET << 16;
-+		cmd->scsi_done(cmd);
-+	}
-+
-+	if (hostdata->selecting) {
-+		hostdata->selecting->result = DID_RESET << 16;
-+		complete_cmd(instance, hostdata->selecting);
-+		hostdata->selecting = NULL;
-+	}
- 
- 	list_for_each_entry(ncmd, &hostdata->disconnected, list) {
- 		struct scsi_cmnd *cmd = NCR5380_to_scmd(ncmd);
-@@ -2458,6 +2447,7 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd)
- 		set_host_byte(cmd, DID_RESET);
- 		cmd->scsi_done(cmd);
- 	}
-+	INIT_LIST_HEAD(&hostdata->disconnected);
- 
- 	list_for_each_entry(ncmd, &hostdata->autosense, list) {
- 		struct scsi_cmnd *cmd = NCR5380_to_scmd(ncmd);
-@@ -2465,6 +2455,7 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd)
- 		set_host_byte(cmd, DID_RESET);
- 		cmd->scsi_done(cmd);
- 	}
-+	INIT_LIST_HEAD(&hostdata->autosense);
- 
- 	if (hostdata->connected) {
- 		set_host_byte(hostdata->connected, DID_RESET);
-@@ -2472,12 +2463,6 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd)
- 		hostdata->connected = NULL;
- 	}
- 
--	if (hostdata->sensing) {
--		set_host_byte(hostdata->connected, DID_RESET);
--		complete_cmd(instance, hostdata->sensing);
--		hostdata->sensing = NULL;
--	}
--
- 	for (i = 0; i < 8; ++i)
- 		hostdata->busy[i] = 0;
- #ifdef REAL_DMA
-diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
-index 074878b55a0b..d044f3f273be 100644
---- a/drivers/scsi/aacraid/aacraid.h
-+++ b/drivers/scsi/aacraid/aacraid.h
-@@ -944,6 +944,7 @@ struct fib {
- 	 */
- 	struct list_head	fiblink;
- 	void			*data;
-+	u32			vector_no;
- 	struct hw_fib		*hw_fib_va;		/* Actual shared object */
- 	dma_addr_t		hw_fib_pa;		/* physical address of hw_fib*/
- };
-@@ -2113,6 +2114,7 @@ static inline unsigned int cap_to_cyls(sector_t capacity, unsigned divisor)
- int aac_acquire_irq(struct aac_dev *dev);
- void aac_free_irq(struct aac_dev *dev);
- const char *aac_driverinfo(struct Scsi_Host *);
-+void aac_fib_vector_assign(struct aac_dev *dev);
- struct fib *aac_fib_alloc(struct aac_dev *dev);
- int aac_fib_setup(struct aac_dev *dev);
- void aac_fib_map_free(struct aac_dev *dev);
-diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
-index a1f90fe849c9..4cbf54928640 100644
---- a/drivers/scsi/aacraid/commsup.c
-+++ b/drivers/scsi/aacraid/commsup.c
-@@ -83,13 +83,38 @@ static int fib_map_alloc(struct aac_dev *dev)
- 
- void aac_fib_map_free(struct aac_dev *dev)
- {
--	pci_free_consistent(dev->pdev,
--	  dev->max_fib_size * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB),
--	  dev->hw_fib_va, dev->hw_fib_pa);
-+	if (dev->hw_fib_va && dev->max_fib_size) {
-+		pci_free_consistent(dev->pdev,
-+		(dev->max_fib_size *
-+		(dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB)),
-+		dev->hw_fib_va, dev->hw_fib_pa);
-+	}
- 	dev->hw_fib_va = NULL;
- 	dev->hw_fib_pa = 0;
- }
- 
-+void aac_fib_vector_assign(struct aac_dev *dev)
-+{
-+	u32 i = 0;
-+	u32 vector = 1;
-+	struct fib *fibptr = NULL;
-+
-+	for (i = 0, fibptr = &dev->fibs[i];
-+		i < (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB);
-+		i++, fibptr++) {
-+		if ((dev->max_msix == 1) ||
-+		  (i > ((dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB - 1)
-+			- dev->vector_cap))) {
-+			fibptr->vector_no = 0;
-+		} else {
-+			fibptr->vector_no = vector;
-+			vector++;
-+			if (vector == dev->max_msix)
-+				vector = 1;
-+		}
-+	}
-+}
-+
- /**
-  *	aac_fib_setup	-	setup the fibs
-  *	@dev: Adapter to set up
-@@ -151,6 +176,12 @@ int aac_fib_setup(struct aac_dev * dev)
- 		hw_fib_pa = hw_fib_pa +
- 			dev->max_fib_size + sizeof(struct aac_fib_xporthdr);
- 	}
-+
-+	/*
-+	 *Assign vector numbers to fibs
-+	 */
-+	aac_fib_vector_assign(dev);
-+
- 	/*
- 	 *	Add the fib chain to the free list
- 	 */
-diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
-index 76eaa38ffd6e..8a8e84548d64 100644
---- a/drivers/scsi/aacraid/linit.c
-+++ b/drivers/scsi/aacraid/linit.c
-@@ -1404,8 +1404,18 @@ static int aac_acquire_resources(struct aac_dev *dev)
- 
- 	aac_adapter_enable_int(dev);
- 
--	if (!dev->sync_mode)
-+	/*max msix may change  after EEH
-+	 * Re-assign vectors to fibs
-+	 */
-+	aac_fib_vector_assign(dev);
-+
-+	if (!dev->sync_mode) {
-+		/* After EEH recovery or suspend resume, max_msix count
-+		 * may change, therfore updating in init as well.
-+		 */
- 		aac_adapter_start(dev);
-+		dev->init->Sa_MSIXVectors = cpu_to_le32(dev->max_msix);
-+	}
- 	return 0;
- 
- error_iounmap:
-diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c
-index 2aa34ea8ceb1..bc0203f3d243 100644
---- a/drivers/scsi/aacraid/src.c
-+++ b/drivers/scsi/aacraid/src.c
-@@ -156,8 +156,8 @@ static irqreturn_t aac_src_intr_message(int irq, void *dev_id)
- 				break;
- 			if (dev->msi_enabled && dev->max_msix > 1)
- 				atomic_dec(&dev->rrq_outstanding[vector_no]);
--			aac_intr_normal(dev, handle-1, 0, isFastResponse, NULL);
- 			dev->host_rrq[index++] = 0;
-+			aac_intr_normal(dev, handle-1, 0, isFastResponse, NULL);
- 			if (index == (vector_no + 1) * dev->vector_cap)
- 				index = vector_no * dev->vector_cap;
- 			dev->host_rrq_idx[vector_no] = index;
-@@ -452,36 +452,20 @@ static int aac_src_deliver_message(struct fib *fib)
- #endif
- 
- 	u16 hdr_size = le16_to_cpu(fib->hw_fib_va->header.Size);
-+	u16 vector_no;
- 
- 	atomic_inc(&q->numpending);
- 
- 	if (dev->msi_enabled && fib->hw_fib_va->header.Command != AifRequest &&
- 	    dev->max_msix > 1) {
--		u_int16_t vector_no, first_choice = 0xffff;
--
--		vector_no = dev->fibs_pushed_no % dev->max_msix;
--		do {
--			vector_no += 1;
--			if (vector_no == dev->max_msix)
--				vector_no = 1;
--			if (atomic_read(&dev->rrq_outstanding[vector_no]) <
--			    dev->vector_cap)
--				break;
--			if (0xffff == first_choice)
--				first_choice = vector_no;
--			else if (vector_no == first_choice)
--				break;
--		} while (1);
--		if (vector_no == first_choice)
--			vector_no = 0;
--		atomic_inc(&dev->rrq_outstanding[vector_no]);
--		if (dev->fibs_pushed_no == 0xffffffff)
--			dev->fibs_pushed_no = 0;
--		else
--			dev->fibs_pushed_no++;
-+		vector_no = fib->vector_no;
- 		fib->hw_fib_va->header.Handle += (vector_no << 16);
-+	} else {
-+		vector_no = 0;
- 	}
- 
-+	atomic_inc(&dev->rrq_outstanding[vector_no]);
-+
- 	if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE2) {
- 		/* Calculate the amount to the fibsize bits */
- 		fibsize = (hdr_size + 127) / 128 - 1;
-diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.c b/drivers/scsi/aic7xxx/aic7xxx_osm.c
-index b846a4683562..fc6a83188c1e 100644
---- a/drivers/scsi/aic7xxx/aic7xxx_osm.c
-+++ b/drivers/scsi/aic7xxx/aic7xxx_osm.c
-@@ -1336,6 +1336,7 @@ ahc_platform_set_tags(struct ahc_softc *ahc, struct scsi_device *sdev,
- 	case AHC_DEV_Q_TAGGED:
- 		scsi_change_queue_depth(sdev,
- 				dev->openings + dev->active);
-+		break;
- 	default:
- 		/*
- 		 * We allow the OS to queue 2 untagged transactions to
-diff --git a/drivers/scsi/atari_NCR5380.c b/drivers/scsi/atari_NCR5380.c
-index e65478651ca9..389825ba5d96 100644
---- a/drivers/scsi/atari_NCR5380.c
-+++ b/drivers/scsi/atari_NCR5380.c
-@@ -862,7 +862,7 @@ static struct scsi_cmnd *dequeue_next_cmd(struct Scsi_Host *instance)
- 	struct NCR5380_cmd *ncmd;
- 	struct scsi_cmnd *cmd;
- 
--	if (list_empty(&hostdata->autosense)) {
-+	if (hostdata->sensing || list_empty(&hostdata->autosense)) {
- 		list_for_each_entry(ncmd, &hostdata->unissued, list) {
- 			cmd = NCR5380_to_scmd(ncmd);
- 			dsprintk(NDEBUG_QUEUES, instance, "dequeue: cmd=%p target=%d busy=0x%02x lun=%llu\n",
-@@ -901,7 +901,7 @@ static void requeue_cmd(struct Scsi_Host *instance, struct scsi_cmnd *cmd)
- 	struct NCR5380_hostdata *hostdata = shost_priv(instance);
- 	struct NCR5380_cmd *ncmd = scsi_cmd_priv(cmd);
- 
--	if (hostdata->sensing) {
-+	if (hostdata->sensing == cmd) {
- 		scsi_eh_restore_cmnd(cmd, &hostdata->ses);
- 		list_add(&ncmd->list, &hostdata->autosense);
- 		hostdata->sensing = NULL;
-@@ -923,7 +923,6 @@ static void NCR5380_main(struct work_struct *work)
- 	struct NCR5380_hostdata *hostdata =
- 		container_of(work, struct NCR5380_hostdata, main_task);
- 	struct Scsi_Host *instance = hostdata->host;
--	struct scsi_cmnd *cmd;
- 	int done;
- 
- 	/*
-@@ -936,8 +935,11 @@ static void NCR5380_main(struct work_struct *work)
- 		done = 1;
- 
- 		spin_lock_irq(&hostdata->lock);
--		while (!hostdata->connected &&
--		       (cmd = dequeue_next_cmd(instance))) {
-+		while (!hostdata->connected && !hostdata->selecting) {
-+			struct scsi_cmnd *cmd = dequeue_next_cmd(instance);
-+
-+			if (!cmd)
-+				break;
- 
- 			dsprintk(NDEBUG_MAIN, instance, "main: dequeued %p\n", cmd);
- 
-@@ -960,8 +962,7 @@ static void NCR5380_main(struct work_struct *work)
- #ifdef SUPPORT_TAGS
- 			cmd_get_tag(cmd, cmd->cmnd[0] != REQUEST_SENSE);
- #endif
--			cmd = NCR5380_select(instance, cmd);
--			if (!cmd) {
-+			if (!NCR5380_select(instance, cmd)) {
- 				dsprintk(NDEBUG_MAIN, instance, "main: select complete\n");
- 				maybe_release_dma_irq(instance);
- 			} else {
-@@ -1257,6 +1258,11 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance,
- 		/* Reselection interrupt */
- 		goto out;
- 	}
-+	if (!hostdata->selecting) {
-+		/* Command was aborted */
-+		NCR5380_write(MODE_REG, MR_BASE);
-+		goto out;
-+	}
- 	if (err < 0) {
- 		NCR5380_write(MODE_REG, MR_BASE);
- 		shost_printk(KERN_ERR, instance,
-@@ -1838,9 +1844,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
- 	unsigned char msgout = NOP;
- 	int sink = 0;
- 	int len;
--#if defined(REAL_DMA)
- 	int transfersize;
--#endif
- 	unsigned char *data;
- 	unsigned char phase, tmp, extended_msg[10], old_phase = 0xff;
- 	struct scsi_cmnd *cmd;
-@@ -1909,6 +1913,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
- 				do_abort(instance);
- 				cmd->result = DID_ERROR << 16;
- 				complete_cmd(instance, cmd);
-+				hostdata->connected = NULL;
- 				return;
- #endif
- 			case PHASE_DATAIN:
-@@ -1966,7 +1971,6 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
- 						sink = 1;
- 						do_abort(instance);
- 						cmd->result = DID_ERROR << 16;
--						complete_cmd(instance, cmd);
- 						/* XXX - need to source or sink data here, as appropriate */
- 					} else {
- #ifdef REAL_DMA
-@@ -1983,18 +1987,22 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
- 				} else
- #endif /* defined(REAL_DMA) */
- 				{
--					spin_unlock_irq(&hostdata->lock);
--					NCR5380_transfer_pio(instance, &phase,
--					                     (int *)&cmd->SCp.this_residual,
-+					/* Break up transfer into 3 ms chunks,
-+					 * presuming 6 accesses per handshake.
-+					 */
-+					transfersize = min((unsigned long)cmd->SCp.this_residual,
-+					                   hostdata->accesses_per_ms / 2);
-+					len = transfersize;
-+					NCR5380_transfer_pio(instance, &phase, &len,
- 					                     (unsigned char **)&cmd->SCp.ptr);
--					spin_lock_irq(&hostdata->lock);
-+					cmd->SCp.this_residual -= transfersize - len;
- 				}
- #if defined(CONFIG_SUN3) && defined(REAL_DMA)
- 				/* if we had intended to dma that command clear it */
- 				if (sun3_dma_setup_done == cmd)
- 					sun3_dma_setup_done = NULL;
- #endif
--				break;
-+				return;
- 			case PHASE_MSGIN:
- 				len = 1;
- 				data = &tmp;
-@@ -2487,14 +2495,17 @@ static bool list_del_cmd(struct list_head *haystack,
-  * [disconnected -> connected ->]...
-  * [autosense -> connected ->] done
-  *
-- * If cmd is unissued then just remove it.
-- * If cmd is disconnected, try to select the target.
-- * If cmd is connected, try to send an abort message.
-- * If cmd is waiting for autosense, give it a chance to complete but check
-- * that it isn't left connected.
-  * If cmd was not found at all then presumably it has already been completed,
-  * in which case return SUCCESS to try to avoid further EH measures.
-+ *
-  * If the command has not completed yet, we must not fail to find it.
-+ * We have no option but to forget the aborted command (even if it still
-+ * lacks sense data). The mid-layer may re-issue a command that is in error
-+ * recovery (see scsi_send_eh_cmnd), but the logic and data structures in
-+ * this driver are such that a command can appear on one queue only.
-+ *
-+ * The lock protects driver data structures, but EH handlers also use it
-+ * to serialize their own execution and prevent their own re-entry.
-  */
- 
- static int NCR5380_abort(struct scsi_cmnd *cmd)
-@@ -2517,6 +2528,7 @@ static int NCR5380_abort(struct scsi_cmnd *cmd)
- 		         "abort: removed %p from issue queue\n", cmd);
- 		cmd->result = DID_ABORT << 16;
- 		cmd->scsi_done(cmd); /* No tag or busy flag to worry about */
-+		goto out;
- 	}
- 
- 	if (hostdata->selecting == cmd) {
-@@ -2531,58 +2543,21 @@ static int NCR5380_abort(struct scsi_cmnd *cmd)
- 	if (list_del_cmd(&hostdata->disconnected, cmd)) {
- 		dsprintk(NDEBUG_ABORT, instance,
- 		         "abort: removed %p from disconnected list\n", cmd);
--		cmd->result = DID_ERROR << 16;
--		if (!hostdata->connected)
--			NCR5380_select(instance, cmd);
--		if (hostdata->connected != cmd) {
--			complete_cmd(instance, cmd);
--			result = FAILED;
--			goto out;
--		}
-+		/* Can't call NCR5380_select() and send ABORT because that
-+		 * means releasing the lock. Need a bus reset.
-+		 */
-+		set_host_byte(cmd, DID_ERROR);
-+		complete_cmd(instance, cmd);
-+		result = FAILED;
-+		goto out;
- 	}
- 
- 	if (hostdata->connected == cmd) {
- 		dsprintk(NDEBUG_ABORT, instance, "abort: cmd %p is connected\n", cmd);
- 		hostdata->connected = NULL;
--		if (do_abort(instance)) {
--			set_host_byte(cmd, DID_ERROR);
--			complete_cmd(instance, cmd);
--			result = FAILED;
--			goto out;
--		}
--		set_host_byte(cmd, DID_ABORT);
- #ifdef REAL_DMA
- 		hostdata->dma_len = 0;
- #endif
--		if (cmd->cmnd[0] == REQUEST_SENSE)
--			complete_cmd(instance, cmd);
--		else {
--			struct NCR5380_cmd *ncmd = scsi_cmd_priv(cmd);
--
--			/* Perform autosense for this command */
--			list_add(&ncmd->list, &hostdata->autosense);
--		}
--	}
--
--	if (list_find_cmd(&hostdata->autosense, cmd)) {
--		dsprintk(NDEBUG_ABORT, instance,
--		         "abort: found %p on sense queue\n", cmd);
--		spin_unlock_irqrestore(&hostdata->lock, flags);
--		queue_work(hostdata->work_q, &hostdata->main_task);
--		msleep(1000);
--		spin_lock_irqsave(&hostdata->lock, flags);
--		if (list_del_cmd(&hostdata->autosense, cmd)) {
--			dsprintk(NDEBUG_ABORT, instance,
--			         "abort: removed %p from sense queue\n", cmd);
--			set_host_byte(cmd, DID_ABORT);
--			complete_cmd(instance, cmd);
--			goto out;
--		}
--	}
--
--	if (hostdata->connected == cmd) {
--		dsprintk(NDEBUG_ABORT, instance, "abort: cmd %p is connected\n", cmd);
--		hostdata->connected = NULL;
- 		if (do_abort(instance)) {
- 			set_host_byte(cmd, DID_ERROR);
- 			complete_cmd(instance, cmd);
-@@ -2590,9 +2565,14 @@ static int NCR5380_abort(struct scsi_cmnd *cmd)
- 			goto out;
- 		}
- 		set_host_byte(cmd, DID_ABORT);
--#ifdef REAL_DMA
--		hostdata->dma_len = 0;
--#endif
-+		complete_cmd(instance, cmd);
-+		goto out;
-+	}
-+
-+	if (list_del_cmd(&hostdata->autosense, cmd)) {
-+		dsprintk(NDEBUG_ABORT, instance,
-+		         "abort: removed %p from sense queue\n", cmd);
-+		set_host_byte(cmd, DID_ERROR);
- 		complete_cmd(instance, cmd);
- 	}
- 
-@@ -2646,7 +2626,16 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd)
- 	 * commands!
- 	 */
- 
--	hostdata->selecting = NULL;
-+	if (list_del_cmd(&hostdata->unissued, cmd)) {
-+		cmd->result = DID_RESET << 16;
-+		cmd->scsi_done(cmd);
-+	}
-+
-+	if (hostdata->selecting) {
-+		hostdata->selecting->result = DID_RESET << 16;
-+		complete_cmd(instance, hostdata->selecting);
-+		hostdata->selecting = NULL;
-+	}
- 
- 	list_for_each_entry(ncmd, &hostdata->disconnected, list) {
- 		struct scsi_cmnd *cmd = NCR5380_to_scmd(ncmd);
-@@ -2654,6 +2643,7 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd)
- 		set_host_byte(cmd, DID_RESET);
- 		cmd->scsi_done(cmd);
- 	}
-+	INIT_LIST_HEAD(&hostdata->disconnected);
- 
- 	list_for_each_entry(ncmd, &hostdata->autosense, list) {
- 		struct scsi_cmnd *cmd = NCR5380_to_scmd(ncmd);
-@@ -2661,6 +2651,7 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd)
- 		set_host_byte(cmd, DID_RESET);
- 		cmd->scsi_done(cmd);
- 	}
-+	INIT_LIST_HEAD(&hostdata->autosense);
- 
- 	if (hostdata->connected) {
- 		set_host_byte(hostdata->connected, DID_RESET);
-@@ -2668,12 +2659,6 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd)
- 		hostdata->connected = NULL;
- 	}
- 
--	if (hostdata->sensing) {
--		set_host_byte(hostdata->connected, DID_RESET);
--		complete_cmd(instance, hostdata->sensing);
--		hostdata->sensing = NULL;
--	}
--
- #ifdef SUPPORT_TAGS
- 	free_all_tags(hostdata);
- #endif
-diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
-index cb9072a841be..069e5c50abd0 100644
---- a/drivers/scsi/be2iscsi/be_main.c
-+++ b/drivers/scsi/be2iscsi/be_main.c
-@@ -4468,6 +4468,7 @@ put_shost:
- 	scsi_host_put(phba->shost);
- free_kset:
- 	iscsi_boot_destroy_kset(phba->boot_kset);
-+	phba->boot_kset = NULL;
- 	return -ENOMEM;
- }
- 
-diff --git a/drivers/scsi/device_handler/Kconfig b/drivers/scsi/device_handler/Kconfig
-index e5647d59224f..0b331c9c0a8f 100644
---- a/drivers/scsi/device_handler/Kconfig
-+++ b/drivers/scsi/device_handler/Kconfig
-@@ -13,13 +13,13 @@ menuconfig SCSI_DH
- 
- config SCSI_DH_RDAC
- 	tristate "LSI RDAC Device Handler"
--	depends on SCSI_DH
-+	depends on SCSI_DH && SCSI
- 	help
- 	If you have a LSI RDAC select y. Otherwise, say N.
- 
- config SCSI_DH_HP_SW
- 	tristate "HP/COMPAQ MSA Device Handler"
--	depends on SCSI_DH
-+	depends on SCSI_DH && SCSI
- 	help
- 	If you have a HP/COMPAQ MSA device that requires START_STOP to
- 	be sent to start it and cannot upgrade the firmware then select y.
-@@ -27,13 +27,13 @@ config SCSI_DH_HP_SW
- 
- config SCSI_DH_EMC
- 	tristate "EMC CLARiiON Device Handler"
--	depends on SCSI_DH
-+	depends on SCSI_DH && SCSI
- 	help
- 	If you have a EMC CLARiiON select y. Otherwise, say N.
- 
- config SCSI_DH_ALUA
- 	tristate "SPC-3 ALUA Device Handler"
--	depends on SCSI_DH
-+	depends on SCSI_DH && SCSI
- 	help
- 	  SCSI Device handler for generic SPC-3 Asymmetric Logical Unit
- 	  Access (ALUA).
-diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
-index a544366a367e..f57d02c3b6cf 100644
---- a/drivers/scsi/lpfc/lpfc_init.c
-+++ b/drivers/scsi/lpfc/lpfc_init.c
-@@ -2860,7 +2860,7 @@ lpfc_online(struct lpfc_hba *phba)
- 	}
- 
- 	vports = lpfc_create_vport_work_array(phba);
--	if (vports != NULL)
-+	if (vports != NULL) {
- 		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
- 			struct Scsi_Host *shost;
- 			shost = lpfc_shost_from_vport(vports[i]);
-@@ -2877,7 +2877,8 @@ lpfc_online(struct lpfc_hba *phba)
- 			}
- 			spin_unlock_irq(shost->host_lock);
- 		}
--		lpfc_destroy_vport_work_array(phba, vports);
-+	}
-+	lpfc_destroy_vport_work_array(phba, vports);
- 
- 	lpfc_unblock_mgmt_io(phba);
- 	return 0;
-diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
-index 97a1c1c33b05..00ce3e269a43 100644
---- a/drivers/scsi/megaraid/megaraid_sas_base.c
-+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
-@@ -6282,12 +6282,13 @@ out:
- 	}
- 
- 	for (i = 0; i < ioc->sge_count; i++) {
--		if (kbuff_arr[i])
-+		if (kbuff_arr[i]) {
- 			dma_free_coherent(&instance->pdev->dev,
- 					  le32_to_cpu(kern_sge32[i].length),
- 					  kbuff_arr[i],
- 					  le32_to_cpu(kern_sge32[i].phys_addr));
- 			kbuff_arr[i] = NULL;
-+		}
- 	}
- 
- 	megasas_return_cmd(instance, cmd);
-diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
-index 5d0ec42a9317..634254a52301 100644
---- a/drivers/scsi/qla1280.c
-+++ b/drivers/scsi/qla1280.c
-@@ -4214,7 +4214,7 @@ static struct scsi_host_template qla1280_driver_template = {
- 	.eh_bus_reset_handler	= qla1280_eh_bus_reset,
- 	.eh_host_reset_handler	= qla1280_eh_adapter_reset,
- 	.bios_param		= qla1280_biosparam,
--	.can_queue		= 0xfffff,
-+	.can_queue		= MAX_OUTSTANDING_COMMANDS,
- 	.this_id		= -1,
- 	.sg_tablesize		= SG_ALL,
- 	.use_clustering		= ENABLE_CLUSTERING,
-diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
-index b1bf42b93fcc..1deb6adc411f 100644
---- a/drivers/scsi/scsi.c
-+++ b/drivers/scsi/scsi.c
-@@ -784,8 +784,9 @@ void scsi_attach_vpd(struct scsi_device *sdev)
- 	int pg83_supported = 0;
- 	unsigned char __rcu *vpd_buf, *orig_vpd_buf = NULL;
- 
--	if (sdev->skip_vpd_pages)
-+	if (!scsi_device_supports_vpd(sdev))
- 		return;
-+
- retry_pg0:
- 	vpd_buf = kmalloc(vpd_len, GFP_KERNEL);
- 	if (!vpd_buf)
-diff --git a/drivers/scsi/scsi_common.c b/drivers/scsi/scsi_common.c
-index c126966130ab..ce79de822e46 100644
---- a/drivers/scsi/scsi_common.c
-+++ b/drivers/scsi/scsi_common.c
-@@ -278,8 +278,16 @@ int scsi_set_sense_information(u8 *buf, int buf_len, u64 info)
- 		ucp[3] = 0;
- 		put_unaligned_be64(info, &ucp[4]);
- 	} else if ((buf[0] & 0x7f) == 0x70) {
--		buf[0] |= 0x80;
--		put_unaligned_be64(info, &buf[3]);
-+		/*
-+		 * Only set the 'VALID' bit if we can represent the value
-+		 * correctly; otherwise just fill out the lower bytes and
-+		 * clear the 'VALID' flag.
-+		 */
-+		if (info <= 0xffffffffUL)
-+			buf[0] |= 0x80;
-+		else
-+			buf[0] &= 0x7f;
-+		put_unaligned_be32((u32)info, &buf[3]);
- 	}
- 
- 	return 0;
-diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
-index 6a820668d442..b7cc6027cb7b 100644
---- a/drivers/scsi/scsi_scan.c
-+++ b/drivers/scsi/scsi_scan.c
-@@ -315,6 +315,7 @@ static void scsi_target_destroy(struct scsi_target *starget)
- 	struct Scsi_Host *shost = dev_to_shost(dev->parent);
- 	unsigned long flags;
- 
-+	BUG_ON(starget->state == STARGET_DEL);
- 	starget->state = STARGET_DEL;
- 	transport_destroy_device(dev);
- 	spin_lock_irqsave(shost->host_lock, flags);
-diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
-index 00bc7218a7f8..9e5f893aa3ad 100644
---- a/drivers/scsi/scsi_sysfs.c
-+++ b/drivers/scsi/scsi_sysfs.c
-@@ -1272,18 +1272,18 @@ static void __scsi_remove_target(struct scsi_target *starget)
- void scsi_remove_target(struct device *dev)
- {
- 	struct Scsi_Host *shost = dev_to_shost(dev->parent);
--	struct scsi_target *starget, *last_target = NULL;
-+	struct scsi_target *starget;
- 	unsigned long flags;
- 
- restart:
- 	spin_lock_irqsave(shost->host_lock, flags);
- 	list_for_each_entry(starget, &shost->__targets, siblings) {
- 		if (starget->state == STARGET_DEL ||
--		    starget == last_target)
-+		    starget->state == STARGET_REMOVE)
- 			continue;
- 		if (starget->dev.parent == dev || &starget->dev == dev) {
- 			kref_get(&starget->reap_ref);
--			last_target = starget;
-+			starget->state = STARGET_REMOVE;
- 			spin_unlock_irqrestore(shost->host_lock, flags);
- 			__scsi_remove_target(starget);
- 			scsi_target_reap(starget);
-diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
-index d749da765df1..974ca5b45f8d 100644
---- a/drivers/scsi/sd.c
-+++ b/drivers/scsi/sd.c
-@@ -648,7 +648,7 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode)
- 	 */
- 	if (sdkp->lbprz) {
- 		q->limits.discard_alignment = 0;
--		q->limits.discard_granularity = 1;
-+		q->limits.discard_granularity = logical_block_size;
- 	} else {
- 		q->limits.discard_alignment = sdkp->unmap_alignment *
- 			logical_block_size;
-@@ -1275,18 +1275,19 @@ static int sd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
- 	struct scsi_disk *sdkp = scsi_disk(bdev->bd_disk);
- 	struct scsi_device *sdp = sdkp->device;
- 	struct Scsi_Host *host = sdp->host;
-+	sector_t capacity = logical_to_sectors(sdp, sdkp->capacity);
- 	int diskinfo[4];
- 
- 	/* default to most commonly used values */
--        diskinfo[0] = 0x40;	/* 1 << 6 */
--       	diskinfo[1] = 0x20;	/* 1 << 5 */
--       	diskinfo[2] = sdkp->capacity >> 11;
--	
-+	diskinfo[0] = 0x40;	/* 1 << 6 */
-+	diskinfo[1] = 0x20;	/* 1 << 5 */
-+	diskinfo[2] = capacity >> 11;
-+
- 	/* override with calculated, extended default, or driver values */
- 	if (host->hostt->bios_param)
--		host->hostt->bios_param(sdp, bdev, sdkp->capacity, diskinfo);
-+		host->hostt->bios_param(sdp, bdev, capacity, diskinfo);
- 	else
--		scsicam_bios_param(bdev, sdkp->capacity, diskinfo);
-+		scsicam_bios_param(bdev, capacity, diskinfo);
- 
- 	geo->heads = diskinfo[0];
- 	geo->sectors = diskinfo[1];
-@@ -2337,14 +2338,6 @@ got_data:
- 	if (sdkp->capacity > 0xffffffff)
- 		sdp->use_16_for_rw = 1;
- 
--	/* Rescale capacity to 512-byte units */
--	if (sector_size == 4096)
--		sdkp->capacity <<= 3;
--	else if (sector_size == 2048)
--		sdkp->capacity <<= 2;
--	else if (sector_size == 1024)
--		sdkp->capacity <<= 1;
--
- 	blk_queue_physical_block_size(sdp->request_queue,
- 				      sdkp->physical_block_size);
- 	sdkp->device->sector_size = sector_size;
-@@ -2795,28 +2788,6 @@ static void sd_read_write_same(struct scsi_disk *sdkp, unsigned char *buffer)
- 		sdkp->ws10 = 1;
- }
- 
--static int sd_try_extended_inquiry(struct scsi_device *sdp)
--{
--	/* Attempt VPD inquiry if the device blacklist explicitly calls
--	 * for it.
--	 */
--	if (sdp->try_vpd_pages)
--		return 1;
--	/*
--	 * Although VPD inquiries can go to SCSI-2 type devices,
--	 * some USB ones crash on receiving them, and the pages
--	 * we currently ask for are for SPC-3 and beyond
--	 */
--	if (sdp->scsi_level > SCSI_SPC_2 && !sdp->skip_vpd_pages)
--		return 1;
--	return 0;
--}
--
--static inline u32 logical_to_sectors(struct scsi_device *sdev, u32 blocks)
--{
--	return blocks << (ilog2(sdev->sector_size) - 9);
--}
--
- /**
-  *	sd_revalidate_disk - called the first time a new disk is seen,
-  *	performs disk spin up, read_capacity, etc.
-@@ -2856,7 +2827,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
- 	if (sdkp->media_present) {
- 		sd_read_capacity(sdkp, buffer);
- 
--		if (sd_try_extended_inquiry(sdp)) {
-+		if (scsi_device_supports_vpd(sdp)) {
- 			sd_read_block_provisioning(sdkp);
- 			sd_read_block_limits(sdkp);
- 			sd_read_block_characteristics(sdkp);
-@@ -2900,7 +2871,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
- 	/* Combine with controller limits */
- 	q->limits.max_sectors = min(rw_max, queue_max_hw_sectors(q));
- 
--	set_capacity(disk, sdkp->capacity);
-+	set_capacity(disk, logical_to_sectors(sdp, sdkp->capacity));
- 	sd_config_write_same(sdkp);
- 	kfree(buffer);
- 
-diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
-index 5f2a84aff29f..654630bb7d0e 100644
---- a/drivers/scsi/sd.h
-+++ b/drivers/scsi/sd.h
-@@ -65,7 +65,7 @@ struct scsi_disk {
- 	struct device	dev;
- 	struct gendisk	*disk;
- 	atomic_t	openers;
--	sector_t	capacity;	/* size in 512-byte sectors */
-+	sector_t	capacity;	/* size in logical blocks */
- 	u32		max_xfer_blocks;
- 	u32		opt_xfer_blocks;
- 	u32		max_ws_blocks;
-@@ -146,6 +146,11 @@ static inline int scsi_medium_access_command(struct scsi_cmnd *scmd)
- 	return 0;
- }
- 
-+static inline sector_t logical_to_sectors(struct scsi_device *sdev, sector_t blocks)
-+{
-+	return blocks << (ilog2(sdev->sector_size) - 9);
-+}
-+
- /*
-  * A DIF-capable target device can be formatted with different
-  * protection schemes.  Currently 0 through 3 are defined:
-diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
-index 5e820674432c..ae7d9bdf409c 100644
---- a/drivers/scsi/sg.c
-+++ b/drivers/scsi/sg.c
-@@ -652,7 +652,8 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
- 	else
- 		hp->dxfer_direction = (mxsize > 0) ? SG_DXFER_FROM_DEV : SG_DXFER_NONE;
- 	hp->dxfer_len = mxsize;
--	if (hp->dxfer_direction == SG_DXFER_TO_DEV)
-+	if ((hp->dxfer_direction == SG_DXFER_TO_DEV) ||
-+	    (hp->dxfer_direction == SG_DXFER_TO_FROM_DEV))
- 		hp->dxferp = (char __user *)buf + cmd_size;
- 	else
- 		hp->dxferp = NULL;
-diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
-index 292c04eec9ad..3ddcabb790a8 100644
---- a/drivers/scsi/storvsc_drv.c
-+++ b/drivers/scsi/storvsc_drv.c
-@@ -914,8 +914,9 @@ static void storvsc_handle_error(struct vmscsi_request *vm_srb,
- 		do_work = true;
- 		process_err_fn = storvsc_remove_lun;
- 		break;
--	case (SRB_STATUS_ABORTED | SRB_STATUS_AUTOSENSE_VALID):
--		if ((asc == 0x2a) && (ascq == 0x9)) {
-+	case SRB_STATUS_ABORTED:
-+		if (vm_srb->srb_status & SRB_STATUS_AUTOSENSE_VALID &&
-+		    (asc == 0x2a) && (ascq == 0x9)) {
- 			do_work = true;
- 			process_err_fn = storvsc_device_scan;
- 			/*
-diff --git a/drivers/soc/rockchip/pm_domains.c b/drivers/soc/rockchip/pm_domains.c
-index 534c58937a56..4a65c5bda146 100644
---- a/drivers/soc/rockchip/pm_domains.c
-+++ b/drivers/soc/rockchip/pm_domains.c
-@@ -419,6 +419,7 @@ static int rockchip_pm_domain_probe(struct platform_device *pdev)
- 		if (error) {
- 			dev_err(dev, "failed to handle node %s: %d\n",
- 				node->name, error);
-+			of_node_put(node);
- 			goto err_out;
- 		}
- 	}
-diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
-index ab9914ad8365..64232ecbb821 100644
---- a/drivers/spi/spi-pxa2xx.c
-+++ b/drivers/spi/spi-pxa2xx.c
-@@ -111,7 +111,7 @@ static const struct lpss_config lpss_platforms[] = {
- 		.reg_general = -1,
- 		.reg_ssp = 0x20,
- 		.reg_cs_ctrl = 0x24,
--		.reg_capabilities = 0xfc,
-+		.reg_capabilities = -1,
- 		.rx_threshold = 1,
- 		.tx_threshold_lo = 32,
- 		.tx_threshold_hi = 56,
-diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c
-index 7cb1b2d710c1..475fb44c1883 100644
---- a/drivers/spi/spi-rockchip.c
-+++ b/drivers/spi/spi-rockchip.c
-@@ -265,7 +265,10 @@ static inline u32 rx_max(struct rockchip_spi *rs)
- static void rockchip_spi_set_cs(struct spi_device *spi, bool enable)
- {
- 	u32 ser;
--	struct rockchip_spi *rs = spi_master_get_devdata(spi->master);
-+	struct spi_master *master = spi->master;
-+	struct rockchip_spi *rs = spi_master_get_devdata(master);
-+
-+	pm_runtime_get_sync(rs->dev);
- 
- 	ser = readl_relaxed(rs->regs + ROCKCHIP_SPI_SER) & SER_MASK;
- 
-@@ -290,6 +293,8 @@ static void rockchip_spi_set_cs(struct spi_device *spi, bool enable)
- 		ser &= ~(1 << spi->chip_select);
- 
- 	writel_relaxed(ser, rs->regs + ROCKCHIP_SPI_SER);
-+
-+	pm_runtime_put_sync(rs->dev);
- }
- 
- static int rockchip_spi_prepare_message(struct spi_master *master,
-diff --git a/drivers/spi/spi-ti-qspi.c b/drivers/spi/spi-ti-qspi.c
-index 64318fcfacf2..5044c6198332 100644
---- a/drivers/spi/spi-ti-qspi.c
-+++ b/drivers/spi/spi-ti-qspi.c
-@@ -94,6 +94,7 @@ struct ti_qspi {
- #define QSPI_FLEN(n)			((n - 1) << 0)
- #define QSPI_WLEN_MAX_BITS		128
- #define QSPI_WLEN_MAX_BYTES		16
-+#define QSPI_WLEN_MASK			QSPI_WLEN(QSPI_WLEN_MAX_BITS)
- 
- /* STATUS REGISTER */
- #define BUSY				0x01
-@@ -224,16 +225,16 @@ static inline int ti_qspi_poll_wc(struct ti_qspi *qspi)
- 	return  -ETIMEDOUT;
- }
- 
--static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t)
-+static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t,
-+			  int count)
- {
--	int wlen, count, xfer_len;
-+	int wlen, xfer_len;
- 	unsigned int cmd;
- 	const u8 *txbuf;
- 	u32 data;
- 
- 	txbuf = t->tx_buf;
- 	cmd = qspi->cmd | QSPI_WR_SNGL;
--	count = t->len;
- 	wlen = t->bits_per_word >> 3;	/* in bytes */
- 	xfer_len = wlen;
- 
-@@ -293,9 +294,10 @@ static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t)
- 	return 0;
- }
- 
--static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t)
-+static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t,
-+			 int count)
- {
--	int wlen, count;
-+	int wlen;
- 	unsigned int cmd;
- 	u8 *rxbuf;
- 
-@@ -312,7 +314,6 @@ static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t)
- 		cmd |= QSPI_RD_SNGL;
- 		break;
- 	}
--	count = t->len;
- 	wlen = t->bits_per_word >> 3;	/* in bytes */
- 
- 	while (count) {
-@@ -343,12 +344,13 @@ static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t)
- 	return 0;
- }
- 
--static int qspi_transfer_msg(struct ti_qspi *qspi, struct spi_transfer *t)
-+static int qspi_transfer_msg(struct ti_qspi *qspi, struct spi_transfer *t,
-+			     int count)
- {
- 	int ret;
- 
- 	if (t->tx_buf) {
--		ret = qspi_write_msg(qspi, t);
-+		ret = qspi_write_msg(qspi, t, count);
- 		if (ret) {
- 			dev_dbg(qspi->dev, "Error while writing\n");
- 			return ret;
-@@ -356,7 +358,7 @@ static int qspi_transfer_msg(struct ti_qspi *qspi, struct spi_transfer *t)
- 	}
- 
- 	if (t->rx_buf) {
--		ret = qspi_read_msg(qspi, t);
-+		ret = qspi_read_msg(qspi, t, count);
- 		if (ret) {
- 			dev_dbg(qspi->dev, "Error while reading\n");
- 			return ret;
-@@ -373,7 +375,8 @@ static int ti_qspi_start_transfer_one(struct spi_master *master,
- 	struct spi_device *spi = m->spi;
- 	struct spi_transfer *t;
- 	int status = 0, ret;
--	int frame_length;
-+	unsigned int frame_len_words, transfer_len_words;
-+	int wlen;
- 
- 	/* setup device control reg */
- 	qspi->dc = 0;
-@@ -385,30 +388,38 @@ static int ti_qspi_start_transfer_one(struct spi_master *master,
- 	if (spi->mode & SPI_CS_HIGH)
- 		qspi->dc |= QSPI_CSPOL(spi->chip_select);
- 
--	frame_length = (m->frame_length << 3) / spi->bits_per_word;
--
--	frame_length = clamp(frame_length, 0, QSPI_FRAME);
-+	frame_len_words = 0;
-+	list_for_each_entry(t, &m->transfers, transfer_list)
-+		frame_len_words += t->len / (t->bits_per_word >> 3);
-+	frame_len_words = min_t(unsigned int, frame_len_words, QSPI_FRAME);
- 
- 	/* setup command reg */
- 	qspi->cmd = 0;
- 	qspi->cmd |= QSPI_EN_CS(spi->chip_select);
--	qspi->cmd |= QSPI_FLEN(frame_length);
-+	qspi->cmd |= QSPI_FLEN(frame_len_words);
- 
- 	ti_qspi_write(qspi, qspi->dc, QSPI_SPI_DC_REG);
- 
- 	mutex_lock(&qspi->list_lock);
- 
- 	list_for_each_entry(t, &m->transfers, transfer_list) {
--		qspi->cmd |= QSPI_WLEN(t->bits_per_word);
-+		qspi->cmd = ((qspi->cmd & ~QSPI_WLEN_MASK) |
-+			     QSPI_WLEN(t->bits_per_word));
-+
-+		wlen = t->bits_per_word >> 3;
-+		transfer_len_words = min(t->len / wlen, frame_len_words);
- 
--		ret = qspi_transfer_msg(qspi, t);
-+		ret = qspi_transfer_msg(qspi, t, transfer_len_words * wlen);
- 		if (ret) {
- 			dev_dbg(qspi->dev, "transfer message failed\n");
- 			mutex_unlock(&qspi->list_lock);
- 			return -EINVAL;
- 		}
- 
--		m->actual_length += t->len;
-+		m->actual_length += transfer_len_words * wlen;
-+		frame_len_words -= transfer_len_words;
-+		if (frame_len_words == 0)
-+			break;
- 	}
- 
- 	mutex_unlock(&qspi->list_lock);
-diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
-index e237e9f3312d..df560216d702 100644
---- a/drivers/staging/android/ion/ion.c
-+++ b/drivers/staging/android/ion/ion.c
-@@ -251,8 +251,10 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
- 	 * memory coming from the heaps is ready for dma, ie if it has a
- 	 * cached mapping that mapping has been invalidated
- 	 */
--	for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i)
-+	for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
- 		sg_dma_address(sg) = sg_phys(sg);
-+		sg_dma_len(sg) = sg->length;
-+	}
- 	mutex_lock(&dev->buffer_lock);
- 	ion_buffer_add(dev, buffer);
- 	mutex_unlock(&dev->buffer_lock);
-diff --git a/drivers/staging/android/ion/ion_test.c b/drivers/staging/android/ion/ion_test.c
-index b8dcf5a26cc4..58d46893e5ff 100644
---- a/drivers/staging/android/ion/ion_test.c
-+++ b/drivers/staging/android/ion/ion_test.c
-@@ -285,8 +285,8 @@ static int __init ion_test_init(void)
- {
- 	ion_test_pdev = platform_device_register_simple("ion-test",
- 							-1, NULL, 0);
--	if (!ion_test_pdev)
--		return -ENODEV;
-+	if (IS_ERR(ion_test_pdev))
-+		return PTR_ERR(ion_test_pdev);
- 
- 	return platform_driver_probe(&ion_test_platform_driver, ion_test_probe);
- }
-diff --git a/drivers/staging/comedi/drivers/das1800.c b/drivers/staging/comedi/drivers/das1800.c
-index 940781183fac..3be10963f98b 100644
---- a/drivers/staging/comedi/drivers/das1800.c
-+++ b/drivers/staging/comedi/drivers/das1800.c
-@@ -567,14 +567,17 @@ static int das1800_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
- 	struct comedi_isadma_desc *desc;
- 	int i;
- 
--	outb(0x0, dev->iobase + DAS1800_STATUS);	/* disable conversions */
--	outb(0x0, dev->iobase + DAS1800_CONTROL_B);	/* disable interrupts and dma */
--	outb(0x0, dev->iobase + DAS1800_CONTROL_A);	/* disable and clear fifo and stop triggering */
--
--	for (i = 0; i < 2; i++) {
--		desc = &dma->desc[i];
--		if (desc->chan)
--			comedi_isadma_disable(desc->chan);
-+	/* disable and stop conversions */
-+	outb(0x0, dev->iobase + DAS1800_STATUS);
-+	outb(0x0, dev->iobase + DAS1800_CONTROL_B);
-+	outb(0x0, dev->iobase + DAS1800_CONTROL_A);
-+
-+	if (dma) {
-+		for (i = 0; i < 2; i++) {
-+			desc = &dma->desc[i];
-+			if (desc->chan)
-+				comedi_isadma_disable(desc->chan);
-+		}
- 	}
- 
- 	return 0;
-@@ -934,13 +937,14 @@ static void das1800_ai_setup_dma(struct comedi_device *dev,
- {
- 	struct das1800_private *devpriv = dev->private;
- 	struct comedi_isadma *dma = devpriv->dma;
--	struct comedi_isadma_desc *desc = &dma->desc[0];
-+	struct comedi_isadma_desc *desc;
- 	unsigned int bytes;
- 
- 	if ((devpriv->irq_dma_bits & DMA_ENABLED) == 0)
- 		return;
- 
- 	dma->cur_dma = 0;
-+	desc = &dma->desc[0];
- 
- 	/* determine a dma transfer size to fill buffer in 0.3 sec */
- 	bytes = das1800_ai_transfer_size(dev, s, desc->maxsize, 300000000);
-diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c
-index 5e8130a7d670..0e9f77924e26 100644
---- a/drivers/staging/comedi/drivers/ni_mio_common.c
-+++ b/drivers/staging/comedi/drivers/ni_mio_common.c
-@@ -246,24 +246,24 @@ static void ni_writel(struct comedi_device *dev, uint32_t data, int reg)
- {
- 	if (dev->mmio)
- 		writel(data, dev->mmio + reg);
--
--	outl(data, dev->iobase + reg);
-+	else
-+		outl(data, dev->iobase + reg);
- }
- 
- static void ni_writew(struct comedi_device *dev, uint16_t data, int reg)
- {
- 	if (dev->mmio)
- 		writew(data, dev->mmio + reg);
--
--	outw(data, dev->iobase + reg);
-+	else
-+		outw(data, dev->iobase + reg);
- }
- 
- static void ni_writeb(struct comedi_device *dev, uint8_t data, int reg)
- {
- 	if (dev->mmio)
- 		writeb(data, dev->mmio + reg);
--
--	outb(data, dev->iobase + reg);
-+	else
-+		outb(data, dev->iobase + reg);
- }
- 
- static uint32_t ni_readl(struct comedi_device *dev, int reg)
-diff --git a/drivers/staging/comedi/drivers/ni_tiocmd.c b/drivers/staging/comedi/drivers/ni_tiocmd.c
-index 437f723bb34d..823e47910004 100644
---- a/drivers/staging/comedi/drivers/ni_tiocmd.c
-+++ b/drivers/staging/comedi/drivers/ni_tiocmd.c
-@@ -92,7 +92,7 @@ static int ni_tio_input_inttrig(struct comedi_device *dev,
- 	unsigned long flags;
- 	int ret = 0;
- 
--	if (trig_num != cmd->start_src)
-+	if (trig_num != cmd->start_arg)
- 		return -EINVAL;
- 
- 	spin_lock_irqsave(&counter->lock, flags);
-diff --git a/drivers/staging/rdma/hfi1/TODO b/drivers/staging/rdma/hfi1/TODO
-index 05de0dad8762..4c6f1d7d2eaf 100644
---- a/drivers/staging/rdma/hfi1/TODO
-+++ b/drivers/staging/rdma/hfi1/TODO
-@@ -3,4 +3,4 @@ July, 2015
- - Remove unneeded file entries in sysfs
- - Remove software processing of IB protocol and place in library for use
-   by qib, ipath (if still present), hfi1, and eventually soft-roce
--
-+- Replace incorrect uAPI
-diff --git a/drivers/staging/rdma/hfi1/file_ops.c b/drivers/staging/rdma/hfi1/file_ops.c
-index d57d549052c8..29ae777556d2 100644
---- a/drivers/staging/rdma/hfi1/file_ops.c
-+++ b/drivers/staging/rdma/hfi1/file_ops.c
-@@ -52,6 +52,8 @@
- #include <linux/vmalloc.h>
- #include <linux/io.h>
- 
-+#include <rdma/ib.h>
-+
- #include "hfi.h"
- #include "pio.h"
- #include "device.h"
-@@ -194,6 +196,10 @@ static ssize_t hfi1_file_write(struct file *fp, const char __user *data,
- 	int uctxt_required = 1;
- 	int must_be_root = 0;
- 
-+	/* FIXME: This interface cannot continue out of staging */
-+	if (WARN_ON_ONCE(!ib_safe_file_access(fp)))
-+		return -EACCES;
-+
- 	if (count < sizeof(cmd)) {
- 		ret = -EINVAL;
- 		goto bail;
-diff --git a/drivers/staging/wilc1000/wilc_spi.c b/drivers/staging/wilc1000/wilc_spi.c
-index 86de50c9f7f5..b3d6541b3896 100644
---- a/drivers/staging/wilc1000/wilc_spi.c
-+++ b/drivers/staging/wilc1000/wilc_spi.c
-@@ -120,8 +120,6 @@ static u8 crc7(u8 crc, const u8 *buffer, u32 len)
- 
- #define USE_SPI_DMA     0
- 
--static const struct wilc1000_ops wilc1000_spi_ops;
--
- static int wilc_bus_probe(struct spi_device *spi)
- {
- 	int ret, gpio;
-diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
-index 867bc6d0a68a..43d8b42c0f22 100644
---- a/drivers/target/target_core_transport.c
-+++ b/drivers/target/target_core_transport.c
-@@ -2596,8 +2596,6 @@ void target_wait_for_sess_cmds(struct se_session *se_sess)
- 
- 	list_for_each_entry_safe(se_cmd, tmp_cmd,
- 				&se_sess->sess_wait_list, se_cmd_list) {
--		list_del_init(&se_cmd->se_cmd_list);
--
- 		pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:"
- 			" %d\n", se_cmd, se_cmd->t_state,
- 			se_cmd->se_tfo->get_cmd_state(se_cmd));
-diff --git a/drivers/thermal/rockchip_thermal.c b/drivers/thermal/rockchip_thermal.c
-index b58e3fb9b311..433085a97626 100644
---- a/drivers/thermal/rockchip_thermal.c
-+++ b/drivers/thermal/rockchip_thermal.c
-@@ -693,15 +693,14 @@ static int rockchip_configure_from_dt(struct device *dev,
- 			 thermal->chip->tshut_temp);
- 		thermal->tshut_temp = thermal->chip->tshut_temp;
- 	} else {
-+		if (shut_temp > INT_MAX) {
-+			dev_err(dev, "Invalid tshut temperature specified: %d\n",
-+				shut_temp);
-+			return -ERANGE;
-+		}
- 		thermal->tshut_temp = shut_temp;
- 	}
- 
--	if (thermal->tshut_temp > INT_MAX) {
--		dev_err(dev, "Invalid tshut temperature specified: %d\n",
--			thermal->tshut_temp);
--		return -ERANGE;
--	}
--
- 	if (of_property_read_u32(np, "rockchip,hw-tshut-mode", &tshut_mode)) {
- 		dev_warn(dev,
- 			 "Missing tshut mode property, using default (%s)\n",
-diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
-index a0a8fd1235e2..d4b54653ecf8 100644
---- a/drivers/thermal/thermal_core.c
-+++ b/drivers/thermal/thermal_core.c
-@@ -454,6 +454,10 @@ static void handle_thermal_trip(struct thermal_zone_device *tz, int trip)
- {
- 	enum thermal_trip_type type;
- 
-+	/* Ignore disabled trip points */
-+	if (test_bit(trip, &tz->trips_disabled))
-+		return;
-+
- 	tz->ops->get_trip_type(tz, trip, &type);
- 
- 	if (type == THERMAL_TRIP_CRITICAL || type == THERMAL_TRIP_HOT)
-@@ -1800,6 +1804,7 @@ struct thermal_zone_device *thermal_zone_device_register(const char *type,
- {
- 	struct thermal_zone_device *tz;
- 	enum thermal_trip_type trip_type;
-+	int trip_temp;
- 	int result;
- 	int count;
- 	int passive = 0;
-@@ -1871,9 +1876,15 @@ struct thermal_zone_device *thermal_zone_device_register(const char *type,
- 		goto unregister;
- 
- 	for (count = 0; count < trips; count++) {
--		tz->ops->get_trip_type(tz, count, &trip_type);
-+		if (tz->ops->get_trip_type(tz, count, &trip_type))
-+			set_bit(count, &tz->trips_disabled);
- 		if (trip_type == THERMAL_TRIP_PASSIVE)
- 			passive = 1;
-+		if (tz->ops->get_trip_temp(tz, count, &trip_temp))
-+			set_bit(count, &tz->trips_disabled);
-+		/* Check for bogus trip points */
-+		if (trip_temp == 0)
-+			set_bit(count, &tz->trips_disabled);
- 	}
- 
- 	if (!passive) {
-diff --git a/drivers/thunderbolt/eeprom.c b/drivers/thunderbolt/eeprom.c
-index 0dde34e3a7c5..545c60c826a1 100644
---- a/drivers/thunderbolt/eeprom.c
-+++ b/drivers/thunderbolt/eeprom.c
-@@ -444,6 +444,7 @@ int tb_drom_read(struct tb_switch *sw)
- 	return tb_drom_parse_entries(sw);
- err:
- 	kfree(sw->drom);
-+	sw->drom = NULL;
- 	return -EIO;
- 
- }
-diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
-index c3fe026d3168..9aff37186246 100644
---- a/drivers/tty/n_gsm.c
-+++ b/drivers/tty/n_gsm.c
-@@ -2045,7 +2045,9 @@ static void gsm_cleanup_mux(struct gsm_mux *gsm)
- 		}
- 	}
- 	spin_unlock(&gsm_mux_lock);
--	WARN_ON(i == MAX_MUX);
-+	/* open failed before registering => nothing to do */
-+	if (i == MAX_MUX)
-+		return;
- 
- 	/* In theory disconnecting DLCI 0 is sufficient but for some
- 	   modems this is apparently not the case. */
-diff --git a/drivers/tty/n_hdlc.c b/drivers/tty/n_hdlc.c
-index bbc4ce66c2c1..644ddb841d9f 100644
---- a/drivers/tty/n_hdlc.c
-+++ b/drivers/tty/n_hdlc.c
-@@ -600,7 +600,7 @@ static ssize_t n_hdlc_tty_read(struct tty_struct *tty, struct file *file,
- 	add_wait_queue(&tty->read_wait, &wait);
- 
- 	for (;;) {
--		if (test_bit(TTY_OTHER_DONE, &tty->flags)) {
-+		if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) {
- 			ret = -EIO;
- 			break;
- 		}
-@@ -828,7 +828,7 @@ static unsigned int n_hdlc_tty_poll(struct tty_struct *tty, struct file *filp,
- 		/* set bits for operations that won't block */
- 		if (n_hdlc->rx_buf_list.head)
- 			mask |= POLLIN | POLLRDNORM;	/* readable */
--		if (test_bit(TTY_OTHER_DONE, &tty->flags))
-+		if (test_bit(TTY_OTHER_CLOSED, &tty->flags))
- 			mask |= POLLHUP;
- 		if (tty_hung_up_p(filp))
- 			mask |= POLLHUP;
-diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
-index b280abaad91b..c12def71ed37 100644
---- a/drivers/tty/n_tty.c
-+++ b/drivers/tty/n_tty.c
-@@ -1963,18 +1963,6 @@ static inline int input_available_p(struct tty_struct *tty, int poll)
- 		return ldata->commit_head - ldata->read_tail >= amt;
- }
- 
--static inline int check_other_done(struct tty_struct *tty)
--{
--	int done = test_bit(TTY_OTHER_DONE, &tty->flags);
--	if (done) {
--		/* paired with cmpxchg() in check_other_closed(); ensures
--		 * read buffer head index is not stale
--		 */
--		smp_mb__after_atomic();
--	}
--	return done;
--}
--
- /**
-  *	copy_from_read_buf	-	copy read data directly
-  *	@tty: terminal device
-@@ -2170,7 +2158,7 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file,
- 	struct n_tty_data *ldata = tty->disc_data;
- 	unsigned char __user *b = buf;
- 	DEFINE_WAIT_FUNC(wait, woken_wake_function);
--	int c, done;
-+	int c;
- 	int minimum, time;
- 	ssize_t retval = 0;
- 	long timeout;
-@@ -2238,32 +2226,35 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file,
- 		    ((minimum - (b - buf)) >= 1))
- 			ldata->minimum_to_wake = (minimum - (b - buf));
- 
--		done = check_other_done(tty);
--
- 		if (!input_available_p(tty, 0)) {
--			if (done) {
--				retval = -EIO;
--				break;
--			}
--			if (tty_hung_up_p(file))
--				break;
--			if (!timeout)
--				break;
--			if (file->f_flags & O_NONBLOCK) {
--				retval = -EAGAIN;
--				break;
--			}
--			if (signal_pending(current)) {
--				retval = -ERESTARTSYS;
--				break;
--			}
- 			up_read(&tty->termios_rwsem);
-+			tty_buffer_flush_work(tty->port);
-+			down_read(&tty->termios_rwsem);
-+			if (!input_available_p(tty, 0)) {
-+				if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) {
-+					retval = -EIO;
-+					break;
-+				}
-+				if (tty_hung_up_p(file))
-+					break;
-+				if (!timeout)
-+					break;
-+				if (file->f_flags & O_NONBLOCK) {
-+					retval = -EAGAIN;
-+					break;
-+				}
-+				if (signal_pending(current)) {
-+					retval = -ERESTARTSYS;
-+					break;
-+				}
-+				up_read(&tty->termios_rwsem);
- 
--			timeout = wait_woken(&wait, TASK_INTERRUPTIBLE,
--					     timeout);
-+				timeout = wait_woken(&wait, TASK_INTERRUPTIBLE,
-+						timeout);
- 
--			down_read(&tty->termios_rwsem);
--			continue;
-+				down_read(&tty->termios_rwsem);
-+				continue;
-+			}
- 		}
- 
- 		if (ldata->icanon && !L_EXTPROC(tty)) {
-@@ -2445,12 +2436,17 @@ static unsigned int n_tty_poll(struct tty_struct *tty, struct file *file,
- 
- 	poll_wait(file, &tty->read_wait, wait);
- 	poll_wait(file, &tty->write_wait, wait);
--	if (check_other_done(tty))
--		mask |= POLLHUP;
- 	if (input_available_p(tty, 1))
- 		mask |= POLLIN | POLLRDNORM;
-+	else {
-+		tty_buffer_flush_work(tty->port);
-+		if (input_available_p(tty, 1))
-+			mask |= POLLIN | POLLRDNORM;
-+	}
- 	if (tty->packet && tty->link->ctrl_status)
- 		mask |= POLLPRI | POLLIN | POLLRDNORM;
-+	if (test_bit(TTY_OTHER_CLOSED, &tty->flags))
-+		mask |= POLLHUP;
- 	if (tty_hung_up_p(file))
- 		mask |= POLLHUP;
- 	if (!(mask & (POLLHUP | POLLIN | POLLRDNORM))) {
-diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
-index 2348fa613707..6427a39bd360 100644
---- a/drivers/tty/pty.c
-+++ b/drivers/tty/pty.c
-@@ -59,7 +59,7 @@ static void pty_close(struct tty_struct *tty, struct file *filp)
- 	if (!tty->link)
- 		return;
- 	set_bit(TTY_OTHER_CLOSED, &tty->link->flags);
--	tty_flip_buffer_push(tty->link->port);
-+	wake_up_interruptible(&tty->link->read_wait);
- 	wake_up_interruptible(&tty->link->write_wait);
- 	if (tty->driver->subtype == PTY_TYPE_MASTER) {
- 		set_bit(TTY_OTHER_CLOSED, &tty->flags);
-@@ -247,9 +247,7 @@ static int pty_open(struct tty_struct *tty, struct file *filp)
- 		goto out;
- 
- 	clear_bit(TTY_IO_ERROR, &tty->flags);
--	/* TTY_OTHER_CLOSED must be cleared before TTY_OTHER_DONE */
- 	clear_bit(TTY_OTHER_CLOSED, &tty->link->flags);
--	clear_bit(TTY_OTHER_DONE, &tty->link->flags);
- 	set_bit(TTY_THROTTLED, &tty->flags);
- 	return 0;
- 
-diff --git a/drivers/tty/serial/8250/8250_mid.c b/drivers/tty/serial/8250/8250_mid.c
-index 88531a36b69c..ed489880e62b 100644
---- a/drivers/tty/serial/8250/8250_mid.c
-+++ b/drivers/tty/serial/8250/8250_mid.c
-@@ -14,6 +14,7 @@
- #include <linux/pci.h>
- 
- #include <linux/dma/hsu.h>
-+#include <linux/8250_pci.h>
- 
- #include "8250.h"
- 
-@@ -24,6 +25,7 @@
- #define PCI_DEVICE_ID_INTEL_DNV_UART	0x19d8
- 
- /* Intel MID Specific registers */
-+#define INTEL_MID_UART_DNV_FISR		0x08
- #define INTEL_MID_UART_PS		0x30
- #define INTEL_MID_UART_MUL		0x34
- #define INTEL_MID_UART_DIV		0x38
-@@ -31,6 +33,7 @@
- struct mid8250;
- 
- struct mid8250_board {
-+	unsigned int flags;
- 	unsigned long freq;
- 	unsigned int base_baud;
- 	int (*setup)(struct mid8250 *, struct uart_port *p);
-@@ -88,16 +91,16 @@ static int tng_setup(struct mid8250 *mid, struct uart_port *p)
- static int dnv_handle_irq(struct uart_port *p)
- {
- 	struct mid8250 *mid = p->private_data;
--	int ret;
--
--	ret = hsu_dma_irq(&mid->dma_chip, 0);
--	ret |= hsu_dma_irq(&mid->dma_chip, 1);
--
--	/* For now, letting the HW generate separate interrupt for the UART */
--	if (ret)
--		return ret;
--
--	return serial8250_handle_irq(p, serial_port_in(p, UART_IIR));
-+	unsigned int fisr = serial_port_in(p, INTEL_MID_UART_DNV_FISR);
-+	int ret = IRQ_NONE;
-+
-+	if (fisr & BIT(2))
-+		ret |= hsu_dma_irq(&mid->dma_chip, 1);
-+	if (fisr & BIT(1))
-+		ret |= hsu_dma_irq(&mid->dma_chip, 0);
-+	if (fisr & BIT(0))
-+		ret |= serial8250_handle_irq(p, serial_port_in(p, UART_IIR));
-+	return ret;
- }
- 
- #define DNV_DMA_CHAN_OFFSET 0x80
-@@ -106,12 +109,13 @@ static int dnv_setup(struct mid8250 *mid, struct uart_port *p)
- {
- 	struct hsu_dma_chip *chip = &mid->dma_chip;
- 	struct pci_dev *pdev = to_pci_dev(p->dev);
-+	unsigned int bar = FL_GET_BASE(mid->board->flags);
- 	int ret;
- 
- 	chip->dev = &pdev->dev;
- 	chip->irq = pdev->irq;
- 	chip->regs = p->membase;
--	chip->length = pci_resource_len(pdev, 0);
-+	chip->length = pci_resource_len(pdev, bar);
- 	chip->offset = DNV_DMA_CHAN_OFFSET;
- 
- 	/* Falling back to PIO mode if DMA probing fails */
-@@ -217,6 +221,7 @@ static int mid8250_probe(struct pci_dev *pdev, const struct pci_device_id *id)
- {
- 	struct uart_8250_port uart;
- 	struct mid8250 *mid;
-+	unsigned int bar;
- 	int ret;
- 
- 	ret = pcim_enable_device(pdev);
-@@ -230,6 +235,7 @@ static int mid8250_probe(struct pci_dev *pdev, const struct pci_device_id *id)
- 		return -ENOMEM;
- 
- 	mid->board = (struct mid8250_board *)id->driver_data;
-+	bar = FL_GET_BASE(mid->board->flags);
- 
- 	memset(&uart, 0, sizeof(struct uart_8250_port));
- 
-@@ -242,8 +248,8 @@ static int mid8250_probe(struct pci_dev *pdev, const struct pci_device_id *id)
- 	uart.port.flags = UPF_SHARE_IRQ | UPF_FIXED_PORT | UPF_FIXED_TYPE;
- 	uart.port.set_termios = mid8250_set_termios;
- 
--	uart.port.mapbase = pci_resource_start(pdev, 0);
--	uart.port.membase = pcim_iomap(pdev, 0, 0);
-+	uart.port.mapbase = pci_resource_start(pdev, bar);
-+	uart.port.membase = pcim_iomap(pdev, bar, 0);
- 	if (!uart.port.membase)
- 		return -ENOMEM;
- 
-@@ -282,18 +288,21 @@ static void mid8250_remove(struct pci_dev *pdev)
- }
- 
- static const struct mid8250_board pnw_board = {
-+	.flags = FL_BASE0,
- 	.freq = 50000000,
- 	.base_baud = 115200,
- 	.setup = pnw_setup,
- };
- 
- static const struct mid8250_board tng_board = {
-+	.flags = FL_BASE0,
- 	.freq = 38400000,
- 	.base_baud = 1843200,
- 	.setup = tng_setup,
- };
- 
- static const struct mid8250_board dnv_board = {
-+	.flags = FL_BASE1,
- 	.freq = 133333333,
- 	.base_baud = 115200,
- 	.setup = dnv_setup,
-diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
-index 7cd6f9a90542..c1d4a8fa9be8 100644
---- a/drivers/tty/serial/8250/8250_pci.c
-+++ b/drivers/tty/serial/8250/8250_pci.c
-@@ -1401,6 +1401,9 @@ byt_set_termios(struct uart_port *p, struct ktermios *termios,
- 	unsigned long m, n;
- 	u32 reg;
- 
-+	/* Gracefully handle the B0 case: fall back to B9600 */
-+	fuart = fuart ? fuart : 9600 * 16;
-+
- 	/* Get Fuart closer to Fref */
- 	fuart *= rounddown_pow_of_two(fref / fuart);
- 
-diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
-index 8d262bce97e4..720b9465b12e 100644
---- a/drivers/tty/serial/8250/8250_port.c
-+++ b/drivers/tty/serial/8250/8250_port.c
-@@ -731,22 +731,16 @@ static int size_fifo(struct uart_8250_port *up)
-  */
- static unsigned int autoconfig_read_divisor_id(struct uart_8250_port *p)
- {
--	unsigned char old_dll, old_dlm, old_lcr;
--	unsigned int id;
-+	unsigned char old_lcr;
-+	unsigned int id, old_dl;
- 
- 	old_lcr = serial_in(p, UART_LCR);
- 	serial_out(p, UART_LCR, UART_LCR_CONF_MODE_A);
-+	old_dl = serial_dl_read(p);
-+	serial_dl_write(p, 0);
-+	id = serial_dl_read(p);
-+	serial_dl_write(p, old_dl);
- 
--	old_dll = serial_in(p, UART_DLL);
--	old_dlm = serial_in(p, UART_DLM);
--
--	serial_out(p, UART_DLL, 0);
--	serial_out(p, UART_DLM, 0);
--
--	id = serial_in(p, UART_DLL) | serial_in(p, UART_DLM) << 8;
--
--	serial_out(p, UART_DLL, old_dll);
--	serial_out(p, UART_DLM, old_dlm);
- 	serial_out(p, UART_LCR, old_lcr);
- 
- 	return id;
-diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
-index 1c0884d8ef32..172a8ccb0b63 100644
---- a/drivers/tty/serial/atmel_serial.c
-+++ b/drivers/tty/serial/atmel_serial.c
-@@ -273,6 +273,13 @@ static bool atmel_use_dma_rx(struct uart_port *port)
- 	return atmel_port->use_dma_rx;
- }
- 
-+static bool atmel_use_fifo(struct uart_port *port)
-+{
-+	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
-+
-+	return atmel_port->fifo_size;
-+}
-+
- static unsigned int atmel_get_lines_status(struct uart_port *port)
- {
- 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
-@@ -2082,7 +2089,12 @@ static void atmel_set_termios(struct uart_port *port, struct ktermios *termios,
- 		mode |= ATMEL_US_USMODE_RS485;
- 	} else if (termios->c_cflag & CRTSCTS) {
- 		/* RS232 with hardware handshake (RTS/CTS) */
--		mode |= ATMEL_US_USMODE_HWHS;
-+		if (atmel_use_dma_rx(port) && !atmel_use_fifo(port)) {
-+			dev_info(port->dev, "not enabling hardware flow control because DMA is used");
-+			termios->c_cflag &= ~CRTSCTS;
-+		} else {
-+			mode |= ATMEL_US_USMODE_HWHS;
-+		}
- 	} else {
- 		/* RS232 without hadware handshake */
- 		mode |= ATMEL_US_USMODE_NORMAL;
-diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
-index d72cd736bdc6..8320173af846 100644
---- a/drivers/tty/serial/samsung.c
-+++ b/drivers/tty/serial/samsung.c
-@@ -1263,6 +1263,8 @@ static void s3c24xx_serial_set_termios(struct uart_port *port,
- 	/* check to see if we need  to change clock source */
- 
- 	if (ourport->baudclk != clk) {
-+		clk_prepare_enable(clk);
-+
- 		s3c24xx_serial_setsource(port, clk_sel);
- 
- 		if (!IS_ERR(ourport->baudclk)) {
-@@ -1270,8 +1272,6 @@ static void s3c24xx_serial_set_termios(struct uart_port *port,
- 			ourport->baudclk = ERR_PTR(-EINVAL);
- 		}
- 
--		clk_prepare_enable(clk);
--
- 		ourport->baudclk = clk;
- 		ourport->baudclk_rate = clk ? clk_get_rate(clk) : 0;
- 	}
-diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c
-index 3cd31e0d4bd9..fb31eecb708d 100644
---- a/drivers/tty/tty_buffer.c
-+++ b/drivers/tty/tty_buffer.c
-@@ -37,29 +37,6 @@
- 
- #define TTY_BUFFER_PAGE	(((PAGE_SIZE - sizeof(struct tty_buffer)) / 2) & ~0xFF)
- 
--/*
-- * If all tty flip buffers have been processed by flush_to_ldisc() or
-- * dropped by tty_buffer_flush(), check if the linked pty has been closed.
-- * If so, wake the reader/poll to process
-- */
--static inline void check_other_closed(struct tty_struct *tty)
--{
--	unsigned long flags, old;
--
--	/* transition from TTY_OTHER_CLOSED => TTY_OTHER_DONE must be atomic */
--	for (flags = ACCESS_ONCE(tty->flags);
--	     test_bit(TTY_OTHER_CLOSED, &flags);
--	     ) {
--		old = flags;
--		__set_bit(TTY_OTHER_DONE, &flags);
--		flags = cmpxchg(&tty->flags, old, flags);
--		if (old == flags) {
--			wake_up_interruptible(&tty->read_wait);
--			break;
--		}
--	}
--}
--
- /**
-  *	tty_buffer_lock_exclusive	-	gain exclusive access to buffer
-  *	tty_buffer_unlock_exclusive	-	release exclusive access
-@@ -254,8 +231,6 @@ void tty_buffer_flush(struct tty_struct *tty, struct tty_ldisc *ld)
- 	if (ld && ld->ops->flush_buffer)
- 		ld->ops->flush_buffer(tty);
- 
--	check_other_closed(tty);
--
- 	atomic_dec(&buf->priority);
- 	mutex_unlock(&buf->lock);
- }
-@@ -505,10 +480,8 @@ static void flush_to_ldisc(struct work_struct *work)
- 		 */
- 		count = smp_load_acquire(&head->commit) - head->read;
- 		if (!count) {
--			if (next == NULL) {
--				check_other_closed(tty);
-+			if (next == NULL)
- 				break;
--			}
- 			buf->head = next;
- 			tty_buffer_free(port, head);
- 			continue;
-@@ -597,3 +570,8 @@ bool tty_buffer_cancel_work(struct tty_port *port)
- {
- 	return cancel_work_sync(&port->buf.work);
- }
-+
-+void tty_buffer_flush_work(struct tty_port *port)
-+{
-+	flush_work(&port->buf.work);
-+}
-diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
-index bd51bdd0a7bf..2b5382ea4842 100644
---- a/drivers/tty/vt/vt.c
-+++ b/drivers/tty/vt/vt.c
-@@ -3583,9 +3583,10 @@ static int do_register_con_driver(const struct consw *csw, int first, int last)
- 		goto err;
- 
- 	desc = csw->con_startup();
--
--	if (!desc)
-+	if (!desc) {
-+		retval = -ENODEV;
- 		goto err;
-+	}
- 
- 	retval = -EINVAL;
- 
-diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
-index fa4e23930614..d37fdcc3143c 100644
---- a/drivers/usb/class/cdc-acm.c
-+++ b/drivers/usb/class/cdc-acm.c
-@@ -1114,6 +1114,9 @@ static int acm_probe(struct usb_interface *intf,
- 	if (quirks == NO_UNION_NORMAL) {
- 		data_interface = usb_ifnum_to_if(usb_dev, 1);
- 		control_interface = usb_ifnum_to_if(usb_dev, 0);
-+		/* we would crash */
-+		if (!data_interface || !control_interface)
-+			return -ENODEV;
- 		goto skip_normal_probe;
- 	}
- 
-diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
-index 56593a9a8726..dadd1e8dfe09 100644
---- a/drivers/usb/core/driver.c
-+++ b/drivers/usb/core/driver.c
-@@ -284,7 +284,7 @@ static int usb_probe_interface(struct device *dev)
- 	struct usb_device *udev = interface_to_usbdev(intf);
- 	const struct usb_device_id *id;
- 	int error = -ENODEV;
--	int lpm_disable_error;
-+	int lpm_disable_error = -ENODEV;
- 
- 	dev_dbg(dev, "%s\n", __func__);
- 
-@@ -336,12 +336,14 @@ static int usb_probe_interface(struct device *dev)
- 	 * setting during probe, that should also be fine.  usb_set_interface()
- 	 * will attempt to disable LPM, and fail if it can't disable it.
- 	 */
--	lpm_disable_error = usb_unlocked_disable_lpm(udev);
--	if (lpm_disable_error && driver->disable_hub_initiated_lpm) {
--		dev_err(&intf->dev, "%s Failed to disable LPM for driver %s\n.",
--				__func__, driver->name);
--		error = lpm_disable_error;
--		goto err;
-+	if (driver->disable_hub_initiated_lpm) {
-+		lpm_disable_error = usb_unlocked_disable_lpm(udev);
-+		if (lpm_disable_error) {
-+			dev_err(&intf->dev, "%s Failed to disable LPM for driver %s\n.",
-+					__func__, driver->name);
-+			error = lpm_disable_error;
-+			goto err;
-+		}
- 	}
- 
- 	/* Carry out a deferred switch to altsetting 0 */
-@@ -391,7 +393,8 @@ static int usb_unbind_interface(struct device *dev)
- 	struct usb_interface *intf = to_usb_interface(dev);
- 	struct usb_host_endpoint *ep, **eps = NULL;
- 	struct usb_device *udev;
--	int i, j, error, r, lpm_disable_error;
-+	int i, j, error, r;
-+	int lpm_disable_error = -ENODEV;
- 
- 	intf->condition = USB_INTERFACE_UNBINDING;
- 
-@@ -399,12 +402,13 @@ static int usb_unbind_interface(struct device *dev)
- 	udev = interface_to_usbdev(intf);
- 	error = usb_autoresume_device(udev);
- 
--	/* Hub-initiated LPM policy may change, so attempt to disable LPM until
-+	/* If hub-initiated LPM policy may change, attempt to disable LPM until
- 	 * the driver is unbound.  If LPM isn't disabled, that's fine because it
- 	 * wouldn't be enabled unless all the bound interfaces supported
- 	 * hub-initiated LPM.
- 	 */
--	lpm_disable_error = usb_unlocked_disable_lpm(udev);
-+	if (driver->disable_hub_initiated_lpm)
-+		lpm_disable_error = usb_unlocked_disable_lpm(udev);
- 
- 	/*
- 	 * Terminate all URBs for this interface unless the driver
-@@ -502,11 +506,15 @@ static int usb_unbind_interface(struct device *dev)
- int usb_driver_claim_interface(struct usb_driver *driver,
- 				struct usb_interface *iface, void *priv)
- {
--	struct device *dev = &iface->dev;
-+	struct device *dev;
- 	struct usb_device *udev;
- 	int retval = 0;
--	int lpm_disable_error;
-+	int lpm_disable_error = -ENODEV;
- 
-+	if (!iface)
-+		return -ENODEV;
-+
-+	dev = &iface->dev;
- 	if (dev->driver)
- 		return -EBUSY;
- 
-@@ -522,12 +530,14 @@ int usb_driver_claim_interface(struct usb_driver *driver,
- 
- 	iface->condition = USB_INTERFACE_BOUND;
- 
--	/* Disable LPM until this driver is bound. */
--	lpm_disable_error = usb_unlocked_disable_lpm(udev);
--	if (lpm_disable_error && driver->disable_hub_initiated_lpm) {
--		dev_err(&iface->dev, "%s Failed to disable LPM for driver %s\n.",
--				__func__, driver->name);
--		return -ENOMEM;
-+	/* See the comment about disabling LPM in usb_probe_interface(). */
-+	if (driver->disable_hub_initiated_lpm) {
-+		lpm_disable_error = usb_unlocked_disable_lpm(udev);
-+		if (lpm_disable_error) {
-+			dev_err(&iface->dev, "%s Failed to disable LPM for driver %s\n.",
-+					__func__, driver->name);
-+			return -ENOMEM;
-+		}
- 	}
- 
- 	/* Claimed interfaces are initially inactive (suspended) and
-diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
-index 9eb1cff28bd4..b8b580e5ae6e 100644
---- a/drivers/usb/core/hcd-pci.c
-+++ b/drivers/usb/core/hcd-pci.c
-@@ -74,6 +74,15 @@ static void for_each_companion(struct pci_dev *pdev, struct usb_hcd *hcd,
- 		if (companion->bus != pdev->bus ||
- 				PCI_SLOT(companion->devfn) != slot)
- 			continue;
-+
-+		/*
-+		 * Companion device should be either UHCI,OHCI or EHCI host
-+		 * controller, otherwise skip.
-+		 */
-+		if (companion->class != CL_UHCI && companion->class != CL_OHCI &&
-+				companion->class != CL_EHCI)
-+			continue;
-+
- 		companion_hcd = pci_get_drvdata(companion);
- 		if (!companion_hcd || !companion_hcd->self.root_hub)
- 			continue;
-diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
-index 51b436918f78..84f65743f29a 100644
---- a/drivers/usb/core/hub.c
-+++ b/drivers/usb/core/hub.c
-@@ -4292,7 +4292,7 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
- {
- 	struct usb_device	*hdev = hub->hdev;
- 	struct usb_hcd		*hcd = bus_to_hcd(hdev->bus);
--	int			i, j, retval;
-+	int			retries, operations, retval, i;
- 	unsigned		delay = HUB_SHORT_RESET_TIME;
- 	enum usb_device_speed	oldspeed = udev->speed;
- 	const char		*speed;
-@@ -4394,7 +4394,7 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
- 	 * first 8 bytes of the device descriptor to get the ep0 maxpacket
- 	 * value.
- 	 */
--	for (i = 0; i < GET_DESCRIPTOR_TRIES; (++i, msleep(100))) {
-+	for (retries = 0; retries < GET_DESCRIPTOR_TRIES; (++retries, msleep(100))) {
- 		bool did_new_scheme = false;
- 
- 		if (use_new_scheme(udev, retry_counter)) {
-@@ -4421,7 +4421,7 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
- 			 * 255 is for WUSB devices, we actually need to use
- 			 * 512 (WUSB1.0[4.8.1]).
- 			 */
--			for (j = 0; j < 3; ++j) {
-+			for (operations = 0; operations < 3; ++operations) {
- 				buf->bMaxPacketSize0 = 0;
- 				r = usb_control_msg(udev, usb_rcvaddr0pipe(),
- 					USB_REQ_GET_DESCRIPTOR, USB_DIR_IN,
-@@ -4441,7 +4441,13 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
- 						r = -EPROTO;
- 					break;
- 				}
--				if (r == 0)
-+				/*
-+				 * Some devices time out if they are powered on
-+				 * when already connected. They need a second
-+				 * reset. But only on the first attempt,
-+				 * lest we get into a time out/reset loop
-+				 */
-+				if (r == 0  || (r == -ETIMEDOUT && retries == 0))
- 					break;
- 			}
- 			udev->descriptor.bMaxPacketSize0 =
-@@ -4473,7 +4479,7 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
- 		 * authorization will assign the final address.
- 		 */
- 		if (udev->wusb == 0) {
--			for (j = 0; j < SET_ADDRESS_TRIES; ++j) {
-+			for (operations = 0; operations < SET_ADDRESS_TRIES; ++operations) {
- 				retval = hub_set_address(udev, devnum);
- 				if (retval >= 0)
- 					break;
-diff --git a/drivers/usb/core/port.c b/drivers/usb/core/port.c
-index 14718a9ffcfb..460c855be0d0 100644
---- a/drivers/usb/core/port.c
-+++ b/drivers/usb/core/port.c
-@@ -249,18 +249,12 @@ static int usb_port_runtime_suspend(struct device *dev)
- 
- 	return retval;
- }
--
--static int usb_port_prepare(struct device *dev)
--{
--	return 1;
--}
- #endif
- 
- static const struct dev_pm_ops usb_port_pm_ops = {
- #ifdef CONFIG_PM
- 	.runtime_suspend =	usb_port_runtime_suspend,
- 	.runtime_resume =	usb_port_runtime_resume,
--	.prepare =		usb_port_prepare,
- #endif
- };
- 
-diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
-index ebb29caa3fe4..77e4c9bc0ab1 100644
---- a/drivers/usb/core/usb.c
-+++ b/drivers/usb/core/usb.c
-@@ -311,13 +311,7 @@ static int usb_dev_uevent(struct device *dev, struct kobj_uevent_env *env)
- 
- static int usb_dev_prepare(struct device *dev)
- {
--	struct usb_device *udev = to_usb_device(dev);
--
--	/* Return 0 if the current wakeup setting is wrong, otherwise 1 */
--	if (udev->do_remote_wakeup != device_may_wakeup(dev))
--		return 0;
--
--	return 1;
-+	return 0;		/* Implement eventually? */
- }
- 
- static void usb_dev_complete(struct device *dev)
-diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
-index cf43e9e18368..97ef75af9632 100644
---- a/drivers/usb/gadget/function/f_fs.c
-+++ b/drivers/usb/gadget/function/f_fs.c
-@@ -646,24 +646,23 @@ static void ffs_user_copy_worker(struct work_struct *work)
- 						   work);
- 	int ret = io_data->req->status ? io_data->req->status :
- 					 io_data->req->actual;
-+	bool kiocb_has_eventfd = io_data->kiocb->ki_flags & IOCB_EVENTFD;
- 
- 	if (io_data->read && ret > 0) {
- 		use_mm(io_data->mm);
- 		ret = copy_to_iter(io_data->buf, ret, &io_data->data);
--		if (iov_iter_count(&io_data->data))
-+		if (ret != io_data->req->actual && iov_iter_count(&io_data->data))
- 			ret = -EFAULT;
- 		unuse_mm(io_data->mm);
- 	}
- 
- 	io_data->kiocb->ki_complete(io_data->kiocb, ret, ret);
- 
--	if (io_data->ffs->ffs_eventfd &&
--	    !(io_data->kiocb->ki_flags & IOCB_EVENTFD))
-+	if (io_data->ffs->ffs_eventfd && !kiocb_has_eventfd)
- 		eventfd_signal(io_data->ffs->ffs_eventfd, 1);
- 
- 	usb_ep_free_request(io_data->ep, io_data->req);
- 
--	io_data->kiocb->private = NULL;
- 	if (io_data->read)
- 		kfree(io_data->to_free);
- 	kfree(io_data->buf);
-diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c
-index 223ccf89d226..a4f664062e0c 100644
---- a/drivers/usb/gadget/function/f_mass_storage.c
-+++ b/drivers/usb/gadget/function/f_mass_storage.c
-@@ -2977,25 +2977,6 @@ void fsg_common_set_inquiry_string(struct fsg_common *common, const char *vn,
- }
- EXPORT_SYMBOL_GPL(fsg_common_set_inquiry_string);
- 
--int fsg_common_run_thread(struct fsg_common *common)
--{
--	common->state = FSG_STATE_IDLE;
--	/* Tell the thread to start working */
--	common->thread_task =
--		kthread_create(fsg_main_thread, common, "file-storage");
--	if (IS_ERR(common->thread_task)) {
--		common->state = FSG_STATE_TERMINATED;
--		return PTR_ERR(common->thread_task);
--	}
--
--	DBG(common, "I/O thread pid: %d\n", task_pid_nr(common->thread_task));
--
--	wake_up_process(common->thread_task);
--
--	return 0;
--}
--EXPORT_SYMBOL_GPL(fsg_common_run_thread);
--
- static void fsg_common_release(struct kref *ref)
- {
- 	struct fsg_common *common = container_of(ref, struct fsg_common, ref);
-@@ -3005,6 +2986,7 @@ static void fsg_common_release(struct kref *ref)
- 	if (common->state != FSG_STATE_TERMINATED) {
- 		raise_exception(common, FSG_STATE_EXIT);
- 		wait_for_completion(&common->thread_notifier);
-+		common->thread_task = NULL;
- 	}
- 
- 	for (i = 0; i < ARRAY_SIZE(common->luns); ++i) {
-@@ -3050,9 +3032,21 @@ static int fsg_bind(struct usb_configuration *c, struct usb_function *f)
- 		if (ret)
- 			return ret;
- 		fsg_common_set_inquiry_string(fsg->common, NULL, NULL);
--		ret = fsg_common_run_thread(fsg->common);
--		if (ret)
-+	}
-+
-+	if (!common->thread_task) {
-+		common->state = FSG_STATE_IDLE;
-+		common->thread_task =
-+			kthread_create(fsg_main_thread, common, "file-storage");
-+		if (IS_ERR(common->thread_task)) {
-+			int ret = PTR_ERR(common->thread_task);
-+			common->thread_task = NULL;
-+			common->state = FSG_STATE_TERMINATED;
- 			return ret;
-+		}
-+		DBG(common, "I/O thread pid: %d\n",
-+		    task_pid_nr(common->thread_task));
-+		wake_up_process(common->thread_task);
- 	}
- 
- 	fsg->gadget = gadget;
-diff --git a/drivers/usb/gadget/function/f_mass_storage.h b/drivers/usb/gadget/function/f_mass_storage.h
-index 445df6775609..b6a9918eaefb 100644
---- a/drivers/usb/gadget/function/f_mass_storage.h
-+++ b/drivers/usb/gadget/function/f_mass_storage.h
-@@ -153,8 +153,6 @@ int fsg_common_create_luns(struct fsg_common *common, struct fsg_config *cfg);
- void fsg_common_set_inquiry_string(struct fsg_common *common, const char *vn,
- 				   const char *pn);
- 
--int fsg_common_run_thread(struct fsg_common *common);
--
- void fsg_config_from_params(struct fsg_config *cfg,
- 			    const struct fsg_module_parameters *params,
- 			    unsigned int fsg_num_buffers);
-diff --git a/drivers/usb/gadget/legacy/acm_ms.c b/drivers/usb/gadget/legacy/acm_ms.c
-index c16089efc322..c39de65a448b 100644
---- a/drivers/usb/gadget/legacy/acm_ms.c
-+++ b/drivers/usb/gadget/legacy/acm_ms.c
-@@ -133,10 +133,6 @@ static int acm_ms_do_config(struct usb_configuration *c)
- 	if (status < 0)
- 		goto put_msg;
- 
--	status = fsg_common_run_thread(opts->common);
--	if (status)
--		goto remove_acm;
--
- 	status = usb_add_function(c, f_msg);
- 	if (status)
- 		goto remove_acm;
-diff --git a/drivers/usb/gadget/legacy/mass_storage.c b/drivers/usb/gadget/legacy/mass_storage.c
-index e61af53c7d2b..125974f32f50 100644
---- a/drivers/usb/gadget/legacy/mass_storage.c
-+++ b/drivers/usb/gadget/legacy/mass_storage.c
-@@ -132,10 +132,6 @@ static int msg_do_config(struct usb_configuration *c)
- 	if (IS_ERR(f_msg))
- 		return PTR_ERR(f_msg);
- 
--	ret = fsg_common_run_thread(opts->common);
--	if (ret)
--		goto put_func;
--
- 	ret = usb_add_function(c, f_msg);
- 	if (ret)
- 		goto put_func;
-diff --git a/drivers/usb/gadget/legacy/multi.c b/drivers/usb/gadget/legacy/multi.c
-index 229d704a620b..a70a406580ea 100644
---- a/drivers/usb/gadget/legacy/multi.c
-+++ b/drivers/usb/gadget/legacy/multi.c
-@@ -137,7 +137,6 @@ static struct usb_function *f_msg_rndis;
- 
- static int rndis_do_config(struct usb_configuration *c)
- {
--	struct fsg_opts *fsg_opts;
- 	int ret;
- 
- 	if (gadget_is_otg(c->cdev->gadget)) {
-@@ -169,11 +168,6 @@ static int rndis_do_config(struct usb_configuration *c)
- 		goto err_fsg;
- 	}
- 
--	fsg_opts = fsg_opts_from_func_inst(fi_msg);
--	ret = fsg_common_run_thread(fsg_opts->common);
--	if (ret)
--		goto err_run;
--
- 	ret = usb_add_function(c, f_msg_rndis);
- 	if (ret)
- 		goto err_run;
-@@ -225,7 +219,6 @@ static struct usb_function *f_msg_multi;
- 
- static int cdc_do_config(struct usb_configuration *c)
- {
--	struct fsg_opts *fsg_opts;
- 	int ret;
- 
- 	if (gadget_is_otg(c->cdev->gadget)) {
-@@ -258,11 +251,6 @@ static int cdc_do_config(struct usb_configuration *c)
- 		goto err_fsg;
- 	}
- 
--	fsg_opts = fsg_opts_from_func_inst(fi_msg);
--	ret = fsg_common_run_thread(fsg_opts->common);
--	if (ret)
--		goto err_run;
--
- 	ret = usb_add_function(c, f_msg_multi);
- 	if (ret)
- 		goto err_run;
-diff --git a/drivers/usb/gadget/legacy/nokia.c b/drivers/usb/gadget/legacy/nokia.c
-index 09975046c694..b1e535f4022e 100644
---- a/drivers/usb/gadget/legacy/nokia.c
-+++ b/drivers/usb/gadget/legacy/nokia.c
-@@ -152,7 +152,6 @@ static int nokia_bind_config(struct usb_configuration *c)
- 	struct usb_function *f_ecm;
- 	struct usb_function *f_obex2 = NULL;
- 	struct usb_function *f_msg;
--	struct fsg_opts *fsg_opts;
- 	int status = 0;
- 	int obex1_stat = -1;
- 	int obex2_stat = -1;
-@@ -222,12 +221,6 @@ static int nokia_bind_config(struct usb_configuration *c)
- 		goto err_ecm;
- 	}
- 
--	fsg_opts = fsg_opts_from_func_inst(fi_msg);
--
--	status = fsg_common_run_thread(fsg_opts->common);
--	if (status)
--		goto err_msg;
--
- 	status = usb_add_function(c, f_msg);
- 	if (status)
- 		goto err_msg;
-diff --git a/drivers/usb/gadget/udc/udc-core.c b/drivers/usb/gadget/udc/udc-core.c
-index b86a6f03592e..e272b3ba1d14 100644
---- a/drivers/usb/gadget/udc/udc-core.c
-+++ b/drivers/usb/gadget/udc/udc-core.c
-@@ -75,7 +75,7 @@ int usb_gadget_map_request(struct usb_gadget *gadget,
- 		mapped = dma_map_sg(dev, req->sg, req->num_sgs,
- 				is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
- 		if (mapped == 0) {
--			dev_err(&gadget->dev, "failed to map SGs\n");
-+			dev_err(dev, "failed to map SGs\n");
- 			return -EFAULT;
- 		}
- 
-diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
-index 5cd080e0a685..743d9a20e248 100644
---- a/drivers/usb/host/xhci-mem.c
-+++ b/drivers/usb/host/xhci-mem.c
-@@ -1873,6 +1873,12 @@ no_bw:
- 	kfree(xhci->rh_bw);
- 	kfree(xhci->ext_caps);
- 
-+	xhci->usb2_ports = NULL;
-+	xhci->usb3_ports = NULL;
-+	xhci->port_array = NULL;
-+	xhci->rh_bw = NULL;
-+	xhci->ext_caps = NULL;
-+
- 	xhci->page_size = 0;
- 	xhci->page_shift = 0;
- 	xhci->bus_state[0].bus_suspended = 0;
-diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
-index f0640b7a1c42..48672fac7ff3 100644
---- a/drivers/usb/host/xhci-pci.c
-+++ b/drivers/usb/host/xhci-pci.c
-@@ -48,6 +48,7 @@
- #define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI		0xa12f
- #define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI	0x9d2f
- #define PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI		0x0aa8
-+#define PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI		0x1aa8
- 
- static const char hcd_name[] = "xhci_hcd";
- 
-@@ -155,7 +156,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
- 		(pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI ||
- 		 pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI ||
- 		 pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI ||
--		 pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI)) {
-+		 pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI ||
-+		 pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI)) {
- 		xhci->quirks |= XHCI_PME_STUCK_QUIRK;
- 	}
- 	if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
-@@ -302,6 +304,7 @@ static void xhci_pci_remove(struct pci_dev *dev)
- 	struct xhci_hcd *xhci;
- 
- 	xhci = hcd_to_xhci(pci_get_drvdata(dev));
-+	xhci->xhc_state |= XHCI_STATE_REMOVING;
- 	if (xhci->shared_hcd) {
- 		usb_remove_hcd(xhci->shared_hcd);
- 		usb_put_hcd(xhci->shared_hcd);
-diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
-index d39d6bf1d090..d4962208be30 100644
---- a/drivers/usb/host/xhci-plat.c
-+++ b/drivers/usb/host/xhci-plat.c
-@@ -39,12 +39,25 @@ static const struct xhci_driver_overrides xhci_plat_overrides __initconst = {
- 
- static void xhci_plat_quirks(struct device *dev, struct xhci_hcd *xhci)
- {
-+	struct usb_hcd *hcd = xhci_to_hcd(xhci);
-+
- 	/*
- 	 * As of now platform drivers don't provide MSI support so we ensure
- 	 * here that the generic code does not try to make a pci_dev from our
- 	 * dev struct in order to setup MSI
- 	 */
- 	xhci->quirks |= XHCI_PLAT;
-+
-+	/*
-+	 * On R-Car Gen2 and Gen3, the AC64 bit (bit 0) of HCCPARAMS1 is set
-+	 * to 1. However, these SoCs don't support 64-bit address memory
-+	 * pointers. So, this driver clears the AC64 bit of xhci->hcc_params
-+	 * to call dma_set_coherent_mask(dev, DMA_BIT_MASK(32)) in
-+	 * xhci_gen_setup().
-+	 */
-+	if (xhci_plat_type_is(hcd, XHCI_PLAT_TYPE_RENESAS_RCAR_GEN2) ||
-+	    xhci_plat_type_is(hcd, XHCI_PLAT_TYPE_RENESAS_RCAR_GEN3))
-+		xhci->quirks |= XHCI_NO_64BIT_SUPPORT;
- }
- 
- /* called during probe() after chip reset completes */
-diff --git a/drivers/usb/host/xhci-plat.h b/drivers/usb/host/xhci-plat.h
-index 5a2e2e3936c4..529c3c40f901 100644
---- a/drivers/usb/host/xhci-plat.h
-+++ b/drivers/usb/host/xhci-plat.h
-@@ -14,7 +14,7 @@
- #include "xhci.h"	/* for hcd_to_xhci() */
- 
- enum xhci_plat_type {
--	XHCI_PLAT_TYPE_MARVELL_ARMADA,
-+	XHCI_PLAT_TYPE_MARVELL_ARMADA = 1,
- 	XHCI_PLAT_TYPE_RENESAS_RCAR_GEN2,
- 	XHCI_PLAT_TYPE_RENESAS_RCAR_GEN3,
- };
-diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
-index 3915657e6078..a85a1c993d61 100644
---- a/drivers/usb/host/xhci-ring.c
-+++ b/drivers/usb/host/xhci-ring.c
-@@ -4014,7 +4014,8 @@ static int queue_command(struct xhci_hcd *xhci, struct xhci_command *cmd,
- 	int reserved_trbs = xhci->cmd_ring_reserved_trbs;
- 	int ret;
- 
--	if (xhci->xhc_state) {
-+	if ((xhci->xhc_state & XHCI_STATE_DYING) ||
-+		(xhci->xhc_state & XHCI_STATE_HALTED)) {
- 		xhci_dbg(xhci, "xHCI dying or halted, can't queue_command\n");
- 		return -ESHUTDOWN;
- 	}
-diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
-index 0c8087d3c313..8e713cca58ed 100644
---- a/drivers/usb/host/xhci.c
-+++ b/drivers/usb/host/xhci.c
-@@ -147,7 +147,8 @@ static int xhci_start(struct xhci_hcd *xhci)
- 				"waited %u microseconds.\n",
- 				XHCI_MAX_HALT_USEC);
- 	if (!ret)
--		xhci->xhc_state &= ~(XHCI_STATE_HALTED | XHCI_STATE_DYING);
-+		/* clear state flags. Including dying, halted or removing */
-+		xhci->xhc_state = 0;
- 
- 	return ret;
- }
-@@ -1108,8 +1109,8 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
- 		/* Resume root hubs only when have pending events. */
- 		status = readl(&xhci->op_regs->status);
- 		if (status & STS_EINT) {
--			usb_hcd_resume_root_hub(hcd);
- 			usb_hcd_resume_root_hub(xhci->shared_hcd);
-+			usb_hcd_resume_root_hub(hcd);
- 		}
- 	}
- 
-@@ -1124,10 +1125,10 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
- 
- 	/* Re-enable port polling. */
- 	xhci_dbg(xhci, "%s: starting port polling.\n", __func__);
--	set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
--	usb_hcd_poll_rh_status(hcd);
- 	set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
- 	usb_hcd_poll_rh_status(xhci->shared_hcd);
-+	set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
-+	usb_hcd_poll_rh_status(hcd);
- 
- 	return retval;
- }
-@@ -2770,7 +2771,8 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
- 	if (ret <= 0)
- 		return ret;
- 	xhci = hcd_to_xhci(hcd);
--	if (xhci->xhc_state & XHCI_STATE_DYING)
-+	if ((xhci->xhc_state & XHCI_STATE_DYING) ||
-+		(xhci->xhc_state & XHCI_STATE_REMOVING))
- 		return -ENODEV;
- 
- 	xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
-@@ -3817,7 +3819,7 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
- 
- 	mutex_lock(&xhci->mutex);
- 
--	if (xhci->xhc_state)	/* dying or halted */
-+	if (xhci->xhc_state)	/* dying, removing or halted */
- 		goto out;
- 
- 	if (!udev->slot_id) {
-@@ -4944,6 +4946,16 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
- 		return retval;
- 	xhci_dbg(xhci, "Reset complete\n");
- 
-+	/*
-+	 * On some xHCI controllers (e.g. R-Car SoCs), the AC64 bit (bit 0)
-+	 * of HCCPARAMS1 is set to 1. However, the xHCs don't support 64-bit
-+	 * address memory pointers actually. So, this driver clears the AC64
-+	 * bit of xhci->hcc_params to call dma_set_coherent_mask(dev,
-+	 * DMA_BIT_MASK(32)) in this xhci_gen_setup().
-+	 */
-+	if (xhci->quirks & XHCI_NO_64BIT_SUPPORT)
-+		xhci->hcc_params &= ~BIT(0);
-+
- 	/* Set dma_mask and coherent_dma_mask to 64-bits,
- 	 * if xHC supports 64-bit addressing */
- 	if (HCC_64BIT_ADDR(xhci->hcc_params) &&
-diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
-index cc651383ce5a..1cdea4a8c895 100644
---- a/drivers/usb/host/xhci.h
-+++ b/drivers/usb/host/xhci.h
-@@ -1596,6 +1596,7 @@ struct xhci_hcd {
-  */
- #define XHCI_STATE_DYING	(1 << 0)
- #define XHCI_STATE_HALTED	(1 << 1)
-+#define XHCI_STATE_REMOVING	(1 << 2)
- 	/* Statistics */
- 	int			error_bitmask;
- 	unsigned int		quirks;
-@@ -1632,6 +1633,7 @@ struct xhci_hcd {
- #define XHCI_PME_STUCK_QUIRK	(1 << 20)
- #define XHCI_MTK_HOST		(1 << 21)
- #define XHCI_SSIC_PORT_UNUSED	(1 << 22)
-+#define XHCI_NO_64BIT_SUPPORT	(1 << 23)
- 	unsigned int		num_active_eps;
- 	unsigned int		limit_active_eps;
- 	/* There are two roothubs to keep track of bus suspend info for */
-diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c
-index c6bfd13f6c92..1950e87b4219 100644
---- a/drivers/usb/misc/iowarrior.c
-+++ b/drivers/usb/misc/iowarrior.c
-@@ -787,6 +787,12 @@ static int iowarrior_probe(struct usb_interface *interface,
- 	iface_desc = interface->cur_altsetting;
- 	dev->product_id = le16_to_cpu(udev->descriptor.idProduct);
- 
-+	if (iface_desc->desc.bNumEndpoints < 1) {
-+		dev_err(&interface->dev, "Invalid number of endpoints\n");
-+		retval = -EINVAL;
-+		goto error;
-+	}
-+
- 	/* set up the endpoint information */
- 	for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
- 		endpoint = &iface_desc->endpoint[i].desc;
-diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c
-index 92fdb6e9faff..c78ff95a43be 100644
---- a/drivers/usb/misc/usbtest.c
-+++ b/drivers/usb/misc/usbtest.c
-@@ -529,6 +529,7 @@ static struct scatterlist *
- alloc_sglist(int nents, int max, int vary, struct usbtest_dev *dev, int pipe)
- {
- 	struct scatterlist	*sg;
-+	unsigned int		n_size = 0;
- 	unsigned		i;
- 	unsigned		size = max;
- 	unsigned		maxpacket =
-@@ -561,7 +562,8 @@ alloc_sglist(int nents, int max, int vary, struct usbtest_dev *dev, int pipe)
- 			break;
- 		case 1:
- 			for (j = 0; j < size; j++)
--				*buf++ = (u8) ((j % maxpacket) % 63);
-+				*buf++ = (u8) (((j + n_size) % maxpacket) % 63);
-+			n_size += size;
- 			break;
- 		}
- 
-diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c
-index c0f5c652d272..f1893e08e51a 100644
---- a/drivers/usb/renesas_usbhs/fifo.c
-+++ b/drivers/usb/renesas_usbhs/fifo.c
-@@ -190,7 +190,8 @@ static int usbhsf_pkt_handler(struct usbhs_pipe *pipe, int type)
- 		goto __usbhs_pkt_handler_end;
- 	}
- 
--	ret = func(pkt, &is_done);
-+	if (likely(func))
-+		ret = func(pkt, &is_done);
- 
- 	if (is_done)
- 		__usbhsf_pkt_del(pkt);
-@@ -889,6 +890,7 @@ static int usbhsf_dma_prepare_push(struct usbhs_pkt *pkt, int *is_done)
- 
- 	pkt->trans = len;
- 
-+	usbhsf_tx_irq_ctrl(pipe, 0);
- 	INIT_WORK(&pkt->work, xfer_work);
- 	schedule_work(&pkt->work);
- 
-diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c
-index 657f9672ceba..251053551866 100644
---- a/drivers/usb/renesas_usbhs/mod_gadget.c
-+++ b/drivers/usb/renesas_usbhs/mod_gadget.c
-@@ -158,10 +158,14 @@ static void usbhsg_queue_done(struct usbhs_priv *priv, struct usbhs_pkt *pkt)
- 	struct usbhs_pipe *pipe = pkt->pipe;
- 	struct usbhsg_uep *uep = usbhsg_pipe_to_uep(pipe);
- 	struct usbhsg_request *ureq = usbhsg_pkt_to_ureq(pkt);
-+	unsigned long flags;
- 
- 	ureq->req.actual = pkt->actual;
- 
--	usbhsg_queue_pop(uep, ureq, 0);
-+	usbhs_lock(priv, flags);
-+	if (uep)
-+		__usbhsg_queue_pop(uep, ureq, 0);
-+	usbhs_unlock(priv, flags);
- }
- 
- static void usbhsg_queue_push(struct usbhsg_uep *uep,
-diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
-index 73a366de5102..a543cdc0f88f 100644
---- a/drivers/usb/serial/cp210x.c
-+++ b/drivers/usb/serial/cp210x.c
-@@ -109,6 +109,7 @@ static const struct usb_device_id id_table[] = {
- 	{ USB_DEVICE(0x10C4, 0x826B) }, /* Cygnal Integrated Products, Inc., Fasttrax GPS demonstration module */
- 	{ USB_DEVICE(0x10C4, 0x8281) }, /* Nanotec Plug & Drive */
- 	{ USB_DEVICE(0x10C4, 0x8293) }, /* Telegesis ETRX2USB */
-+	{ USB_DEVICE(0x10C4, 0x82F4) }, /* Starizona MicroTouch */
- 	{ USB_DEVICE(0x10C4, 0x82F9) }, /* Procyon AVS */
- 	{ USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */
- 	{ USB_DEVICE(0x10C4, 0x8382) }, /* Cygnal Integrated Products, Inc. */
-@@ -118,6 +119,7 @@ static const struct usb_device_id id_table[] = {
- 	{ USB_DEVICE(0x10C4, 0x8418) }, /* IRZ Automation Teleport SG-10 GSM/GPRS Modem */
- 	{ USB_DEVICE(0x10C4, 0x846E) }, /* BEI USB Sensor Interface (VCP) */
- 	{ USB_DEVICE(0x10C4, 0x8477) }, /* Balluff RFID */
-+	{ USB_DEVICE(0x10C4, 0x84B6) }, /* Starizona Hyperion */
- 	{ USB_DEVICE(0x10C4, 0x85EA) }, /* AC-Services IBUS-IF */
- 	{ USB_DEVICE(0x10C4, 0x85EB) }, /* AC-Services CIS-IBUS */
- 	{ USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */
-@@ -141,6 +143,8 @@ static const struct usb_device_id id_table[] = {
- 	{ USB_DEVICE(0x10C4, 0xF004) }, /* Elan Digital Systems USBcount50 */
- 	{ USB_DEVICE(0x10C5, 0xEA61) }, /* Silicon Labs MobiData GPRS USB Modem */
- 	{ USB_DEVICE(0x10CE, 0xEA6A) }, /* Silicon Labs MobiData GPRS USB Modem 100EU */
-+	{ USB_DEVICE(0x12B8, 0xEC60) }, /* Link G4 ECU */
-+	{ USB_DEVICE(0x12B8, 0xEC62) }, /* Link G4+ ECU */
- 	{ USB_DEVICE(0x13AD, 0x9999) }, /* Baltech card reader */
- 	{ USB_DEVICE(0x1555, 0x0004) }, /* Owen AC4 USB-RS485 Converter */
- 	{ USB_DEVICE(0x166A, 0x0201) }, /* Clipsal 5500PACA C-Bus Pascal Automation Controller */
-@@ -165,6 +169,7 @@ static const struct usb_device_id id_table[] = {
- 	{ USB_DEVICE(0x18EF, 0xE025) }, /* ELV Marble Sound Board 1 */
- 	{ USB_DEVICE(0x1901, 0x0190) }, /* GE B850 CP2105 Recorder interface */
- 	{ USB_DEVICE(0x1901, 0x0193) }, /* GE B650 CP2104 PMC interface */
-+	{ USB_DEVICE(0x1901, 0x0194) },	/* GE Healthcare Remote Alarm Box */
- 	{ USB_DEVICE(0x19CF, 0x3000) }, /* Parrot NMEA GPS Flight Recorder */
- 	{ USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */
- 	{ USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */
-diff --git a/drivers/usb/serial/cypress_m8.c b/drivers/usb/serial/cypress_m8.c
-index 01bf53392819..244acb1299a9 100644
---- a/drivers/usb/serial/cypress_m8.c
-+++ b/drivers/usb/serial/cypress_m8.c
-@@ -447,6 +447,11 @@ static int cypress_generic_port_probe(struct usb_serial_port *port)
- 	struct usb_serial *serial = port->serial;
- 	struct cypress_private *priv;
- 
-+	if (!port->interrupt_out_urb || !port->interrupt_in_urb) {
-+		dev_err(&port->dev, "required endpoint is missing\n");
-+		return -ENODEV;
-+	}
-+
- 	priv = kzalloc(sizeof(struct cypress_private), GFP_KERNEL);
- 	if (!priv)
- 		return -ENOMEM;
-@@ -606,12 +611,6 @@ static int cypress_open(struct tty_struct *tty, struct usb_serial_port *port)
- 		cypress_set_termios(tty, port, &priv->tmp_termios);
- 
- 	/* setup the port and start reading from the device */
--	if (!port->interrupt_in_urb) {
--		dev_err(&port->dev, "%s - interrupt_in_urb is empty!\n",
--			__func__);
--		return -1;
--	}
--
- 	usb_fill_int_urb(port->interrupt_in_urb, serial->dev,
- 		usb_rcvintpipe(serial->dev, port->interrupt_in_endpointAddress),
- 		port->interrupt_in_urb->transfer_buffer,
-diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c
-index 12b0e67473ba..3df7b7ec178e 100644
---- a/drivers/usb/serial/digi_acceleport.c
-+++ b/drivers/usb/serial/digi_acceleport.c
-@@ -1251,8 +1251,27 @@ static int digi_port_init(struct usb_serial_port *port, unsigned port_num)
- 
- static int digi_startup(struct usb_serial *serial)
- {
-+	struct device *dev = &serial->interface->dev;
- 	struct digi_serial *serial_priv;
- 	int ret;
-+	int i;
-+
-+	/* check whether the device has the expected number of endpoints */
-+	if (serial->num_port_pointers < serial->type->num_ports + 1) {
-+		dev_err(dev, "OOB endpoints missing\n");
-+		return -ENODEV;
-+	}
-+
-+	for (i = 0; i < serial->type->num_ports + 1 ; i++) {
-+		if (!serial->port[i]->read_urb) {
-+			dev_err(dev, "bulk-in endpoint missing\n");
-+			return -ENODEV;
-+		}
-+		if (!serial->port[i]->write_urb) {
-+			dev_err(dev, "bulk-out endpoint missing\n");
-+			return -ENODEV;
-+		}
-+	}
- 
- 	serial_priv = kzalloc(sizeof(*serial_priv), GFP_KERNEL);
- 	if (!serial_priv)
-diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
-index 8c660ae401d8..b61f12160d37 100644
---- a/drivers/usb/serial/ftdi_sio.c
-+++ b/drivers/usb/serial/ftdi_sio.c
-@@ -1004,6 +1004,10 @@ static const struct usb_device_id id_table_combined[] = {
- 	{ USB_DEVICE(FTDI_VID, CHETCO_SEASMART_DISPLAY_PID) },
- 	{ USB_DEVICE(FTDI_VID, CHETCO_SEASMART_LITE_PID) },
- 	{ USB_DEVICE(FTDI_VID, CHETCO_SEASMART_ANALOG_PID) },
-+	/* ICP DAS I-756xU devices */
-+	{ USB_DEVICE(ICPDAS_VID, ICPDAS_I7560U_PID) },
-+	{ USB_DEVICE(ICPDAS_VID, ICPDAS_I7561U_PID) },
-+	{ USB_DEVICE(ICPDAS_VID, ICPDAS_I7563U_PID) },
- 	{ }					/* Terminating entry */
- };
- 
-diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
-index a84df2513994..c5d6c1e73e8e 100644
---- a/drivers/usb/serial/ftdi_sio_ids.h
-+++ b/drivers/usb/serial/ftdi_sio_ids.h
-@@ -872,6 +872,14 @@
- #define NOVITUS_BONO_E_PID		0x6010
- 
- /*
-+ * ICPDAS I-756*U devices
-+ */
-+#define ICPDAS_VID			0x1b5c
-+#define ICPDAS_I7560U_PID		0x0103
-+#define ICPDAS_I7561U_PID		0x0104
-+#define ICPDAS_I7563U_PID		0x0105
-+
-+/*
-  * RT Systems programming cables for various ham radios
-  */
- #define RTSYSTEMS_VID		0x2100	/* Vendor ID */
-diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c
-index f49327d20ee8..0a935b1e5475 100644
---- a/drivers/usb/serial/io_edgeport.c
-+++ b/drivers/usb/serial/io_edgeport.c
-@@ -2849,14 +2849,16 @@ static int edge_startup(struct usb_serial *serial)
- 				/* not set up yet, so do it now */
- 				edge_serial->interrupt_read_urb =
- 						usb_alloc_urb(0, GFP_KERNEL);
--				if (!edge_serial->interrupt_read_urb)
--					return -ENOMEM;
-+				if (!edge_serial->interrupt_read_urb) {
-+					response = -ENOMEM;
-+					break;
-+				}
- 
- 				edge_serial->interrupt_in_buffer =
- 					kmalloc(buffer_size, GFP_KERNEL);
- 				if (!edge_serial->interrupt_in_buffer) {
--					usb_free_urb(edge_serial->interrupt_read_urb);
--					return -ENOMEM;
-+					response = -ENOMEM;
-+					break;
- 				}
- 				edge_serial->interrupt_in_endpoint =
- 						endpoint->bEndpointAddress;
-@@ -2884,14 +2886,16 @@ static int edge_startup(struct usb_serial *serial)
- 				/* not set up yet, so do it now */
- 				edge_serial->read_urb =
- 						usb_alloc_urb(0, GFP_KERNEL);
--				if (!edge_serial->read_urb)
--					return -ENOMEM;
-+				if (!edge_serial->read_urb) {
-+					response = -ENOMEM;
-+					break;
-+				}
- 
- 				edge_serial->bulk_in_buffer =
- 					kmalloc(buffer_size, GFP_KERNEL);
- 				if (!edge_serial->bulk_in_buffer) {
--					usb_free_urb(edge_serial->read_urb);
--					return -ENOMEM;
-+					response = -ENOMEM;
-+					break;
- 				}
- 				edge_serial->bulk_in_endpoint =
- 						endpoint->bEndpointAddress;
-@@ -2917,9 +2921,22 @@ static int edge_startup(struct usb_serial *serial)
- 			}
- 		}
- 
--		if (!interrupt_in_found || !bulk_in_found || !bulk_out_found) {
--			dev_err(ddev, "Error - the proper endpoints were not found!\n");
--			return -ENODEV;
-+		if (response || !interrupt_in_found || !bulk_in_found ||
-+							!bulk_out_found) {
-+			if (!response) {
-+				dev_err(ddev, "expected endpoints not found\n");
-+				response = -ENODEV;
-+			}
-+
-+			usb_free_urb(edge_serial->interrupt_read_urb);
-+			kfree(edge_serial->interrupt_in_buffer);
-+
-+			usb_free_urb(edge_serial->read_urb);
-+			kfree(edge_serial->bulk_in_buffer);
-+
-+			kfree(edge_serial);
-+
-+			return response;
- 		}
- 
- 		/* start interrupt read for this edgeport this interrupt will
-@@ -2942,16 +2959,9 @@ static void edge_disconnect(struct usb_serial *serial)
- {
- 	struct edgeport_serial *edge_serial = usb_get_serial_data(serial);
- 
--	/* stop reads and writes on all ports */
--	/* free up our endpoint stuff */
- 	if (edge_serial->is_epic) {
- 		usb_kill_urb(edge_serial->interrupt_read_urb);
--		usb_free_urb(edge_serial->interrupt_read_urb);
--		kfree(edge_serial->interrupt_in_buffer);
--
- 		usb_kill_urb(edge_serial->read_urb);
--		usb_free_urb(edge_serial->read_urb);
--		kfree(edge_serial->bulk_in_buffer);
- 	}
- }
- 
-@@ -2964,6 +2974,16 @@ static void edge_release(struct usb_serial *serial)
- {
- 	struct edgeport_serial *edge_serial = usb_get_serial_data(serial);
- 
-+	if (edge_serial->is_epic) {
-+		usb_kill_urb(edge_serial->interrupt_read_urb);
-+		usb_free_urb(edge_serial->interrupt_read_urb);
-+		kfree(edge_serial->interrupt_in_buffer);
-+
-+		usb_kill_urb(edge_serial->read_urb);
-+		usb_free_urb(edge_serial->read_urb);
-+		kfree(edge_serial->bulk_in_buffer);
-+	}
-+
- 	kfree(edge_serial);
- }
- 
-diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c
-index e07b15ed5814..7faa901ee47f 100644
---- a/drivers/usb/serial/keyspan.c
-+++ b/drivers/usb/serial/keyspan.c
-@@ -2376,6 +2376,10 @@ static void keyspan_release(struct usb_serial *serial)
- 
- 	s_priv = usb_get_serial_data(serial);
- 
-+	/* Make sure to unlink the URBs submitted in attach. */
-+	usb_kill_urb(s_priv->instat_urb);
-+	usb_kill_urb(s_priv->indat_urb);
-+
- 	usb_free_urb(s_priv->instat_urb);
- 	usb_free_urb(s_priv->indat_urb);
- 	usb_free_urb(s_priv->glocont_urb);
-diff --git a/drivers/usb/serial/mct_u232.c b/drivers/usb/serial/mct_u232.c
-index fd707d6a10e2..89726f702202 100644
---- a/drivers/usb/serial/mct_u232.c
-+++ b/drivers/usb/serial/mct_u232.c
-@@ -376,14 +376,21 @@ static void mct_u232_msr_to_state(struct usb_serial_port *port,
- 
- static int mct_u232_port_probe(struct usb_serial_port *port)
- {
-+	struct usb_serial *serial = port->serial;
- 	struct mct_u232_private *priv;
- 
-+	/* check first to simplify error handling */
-+	if (!serial->port[1] || !serial->port[1]->interrupt_in_urb) {
-+		dev_err(&port->dev, "expected endpoint missing\n");
-+		return -ENODEV;
-+	}
-+
- 	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- 	if (!priv)
- 		return -ENOMEM;
- 
- 	/* Use second interrupt-in endpoint for reading. */
--	priv->read_urb = port->serial->port[1]->interrupt_in_urb;
-+	priv->read_urb = serial->port[1]->interrupt_in_urb;
- 	priv->read_urb->context = port;
- 
- 	spin_lock_init(&priv->lock);
-diff --git a/drivers/usb/serial/mxuport.c b/drivers/usb/serial/mxuport.c
-index 31a8b47f1ac6..c6596cbcc4b6 100644
---- a/drivers/usb/serial/mxuport.c
-+++ b/drivers/usb/serial/mxuport.c
-@@ -1259,6 +1259,15 @@ static int mxuport_attach(struct usb_serial *serial)
- 	return 0;
- }
- 
-+static void mxuport_release(struct usb_serial *serial)
-+{
-+	struct usb_serial_port *port0 = serial->port[0];
-+	struct usb_serial_port *port1 = serial->port[1];
-+
-+	usb_serial_generic_close(port1);
-+	usb_serial_generic_close(port0);
-+}
-+
- static int mxuport_open(struct tty_struct *tty, struct usb_serial_port *port)
- {
- 	struct mxuport_port *mxport = usb_get_serial_port_data(port);
-@@ -1361,6 +1370,7 @@ static struct usb_serial_driver mxuport_device = {
- 	.probe			= mxuport_probe,
- 	.port_probe		= mxuport_port_probe,
- 	.attach			= mxuport_attach,
-+	.release		= mxuport_release,
- 	.calc_num_ports		= mxuport_calc_num_ports,
- 	.open			= mxuport_open,
- 	.close			= mxuport_close,
-diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
-index 348e19834b83..d96d423d00e6 100644
---- a/drivers/usb/serial/option.c
-+++ b/drivers/usb/serial/option.c
-@@ -375,18 +375,22 @@ static void option_instat_callback(struct urb *urb);
- #define HAIER_PRODUCT_CE81B			0x10f8
- #define HAIER_PRODUCT_CE100			0x2009
- 
--/* Cinterion (formerly Siemens) products */
--#define SIEMENS_VENDOR_ID				0x0681
--#define CINTERION_VENDOR_ID				0x1e2d
-+/* Gemalto's Cinterion products (formerly Siemens) */
-+#define SIEMENS_VENDOR_ID			0x0681
-+#define CINTERION_VENDOR_ID			0x1e2d
-+#define CINTERION_PRODUCT_HC25_MDMNET		0x0040
- #define CINTERION_PRODUCT_HC25_MDM		0x0047
--#define CINTERION_PRODUCT_HC25_MDMNET	0x0040
-+#define CINTERION_PRODUCT_HC28_MDMNET		0x004A /* same for HC28J */
- #define CINTERION_PRODUCT_HC28_MDM		0x004C
--#define CINTERION_PRODUCT_HC28_MDMNET	0x004A /* same for HC28J */
- #define CINTERION_PRODUCT_EU3_E			0x0051
- #define CINTERION_PRODUCT_EU3_P			0x0052
- #define CINTERION_PRODUCT_PH8			0x0053
- #define CINTERION_PRODUCT_AHXX			0x0055
- #define CINTERION_PRODUCT_PLXX			0x0060
-+#define CINTERION_PRODUCT_PH8_2RMNET		0x0082
-+#define CINTERION_PRODUCT_PH8_AUDIO		0x0083
-+#define CINTERION_PRODUCT_AHXX_2RMNET		0x0084
-+#define CINTERION_PRODUCT_AHXX_AUDIO		0x0085
- 
- /* Olivetti products */
- #define OLIVETTI_VENDOR_ID			0x0b3c
-@@ -633,6 +637,10 @@ static const struct option_blacklist_info telit_le922_blacklist_usbcfg3 = {
- 	.reserved = BIT(1) | BIT(2) | BIT(3),
- };
- 
-+static const struct option_blacklist_info cinterion_rmnet2_blacklist = {
-+	.reserved = BIT(4) | BIT(5),
-+};
-+
- static const struct usb_device_id option_ids[] = {
- 	{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
- 	{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) },
-@@ -1602,7 +1610,79 @@ static const struct usb_device_id option_ids[] = {
- 		.driver_info = (kernel_ulong_t)&net_intf3_blacklist },
- 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0178, 0xff, 0xff, 0xff),
- 		.driver_info = (kernel_ulong_t)&net_intf3_blacklist },
--	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffe9, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff42, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff43, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff44, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff45, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff46, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff47, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff48, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff49, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff4a, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff4b, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff4c, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff4d, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff4e, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff4f, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff50, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff51, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff52, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff53, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff54, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff55, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff56, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff57, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff58, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff59, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff5a, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff5b, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff5c, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff5d, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff5e, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff5f, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff60, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff61, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff62, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff63, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff64, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff65, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff66, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff67, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff68, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff69, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff6a, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff6b, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff6c, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff6d, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff6e, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff6f, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff70, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff71, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff72, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff73, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff74, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff75, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff76, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff77, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff78, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff79, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff7a, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff7b, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff7c, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff7d, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff7e, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff7f, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff80, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff81, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff82, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff83, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff84, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff85, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff86, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff87, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff88, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff89, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff8a, 0xff, 0xff, 0xff) },
- 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff8b, 0xff, 0xff, 0xff) },
- 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff8c, 0xff, 0xff, 0xff) },
- 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff8d, 0xff, 0xff, 0xff) },
-@@ -1613,6 +1693,61 @@ static const struct usb_device_id option_ids[] = {
- 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff92, 0xff, 0xff, 0xff) },
- 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff93, 0xff, 0xff, 0xff) },
- 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff94, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff9f, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa0, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa1, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa2, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa3, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa4, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa5, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa6, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa7, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa8, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa9, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffaa, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffab, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffac, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffae, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffaf, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb0, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb1, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb2, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb3, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb4, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb5, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb6, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb7, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb8, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb9, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffba, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffbb, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffbc, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffbd, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffbe, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffbf, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc0, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc1, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc2, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc3, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc4, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc5, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc6, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc7, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc8, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc9, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffca, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffcb, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffcc, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffcd, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffce, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffcf, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffd0, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffd1, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffd2, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffd3, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffd4, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffd5, 0xff, 0xff, 0xff) },
-+	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffe9, 0xff, 0xff, 0xff) },
- 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffec, 0xff, 0xff, 0xff) },
- 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffee, 0xff, 0xff, 0xff) },
- 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xfff6, 0xff, 0xff, 0xff) },
-@@ -1712,7 +1847,13 @@ static const struct usb_device_id option_ids[] = {
- 	{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX, 0xff) },
- 	{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PLXX),
- 		.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
--	{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) }, 
-+	{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8_2RMNET, 0xff),
-+		.driver_info = (kernel_ulong_t)&cinterion_rmnet2_blacklist },
-+	{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8_AUDIO, 0xff),
-+		.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
-+	{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX_2RMNET, 0xff) },
-+	{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX_AUDIO, 0xff) },
-+	{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) },
- 	{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) },
- 	{ USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDM) },
- 	{ USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDMNET) },
-@@ -1818,6 +1959,8 @@ static const struct usb_device_id option_ids[] = {
- 	{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d02, 0xff, 0x00, 0x00) },
- 	{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x02, 0x01) },
- 	{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x00, 0x00) },
-+	{ USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e19, 0xff),			/* D-Link DWM-221 B1 */
-+	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
- 	{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
- 	{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
- 	{ USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) },                /* OLICARD300 - MT6225 */
-diff --git a/drivers/usb/serial/quatech2.c b/drivers/usb/serial/quatech2.c
-index 504f5bff79c0..b18974cbd995 100644
---- a/drivers/usb/serial/quatech2.c
-+++ b/drivers/usb/serial/quatech2.c
-@@ -141,6 +141,7 @@ static void qt2_release(struct usb_serial *serial)
- 
- 	serial_priv = usb_get_serial_data(serial);
- 
-+	usb_kill_urb(serial_priv->read_urb);
- 	usb_free_urb(serial_priv->read_urb);
- 	kfree(serial_priv->read_buffer);
- 	kfree(serial_priv);
-diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
-index 9ff9404f99d7..e4ade8d89eb5 100644
---- a/drivers/usb/storage/uas.c
-+++ b/drivers/usb/storage/uas.c
-@@ -2,7 +2,7 @@
-  * USB Attached SCSI
-  * Note that this is not the same as the USB Mass Storage driver
-  *
-- * Copyright Hans de Goede <hdegoede@redhat.com> for Red Hat, Inc. 2013 - 2014
-+ * Copyright Hans de Goede <hdegoede@redhat.com> for Red Hat, Inc. 2013 - 2016
-  * Copyright Matthew Wilcox for Intel Corp, 2010
-  * Copyright Sarah Sharp for Intel Corp, 2010
-  *
-@@ -757,6 +757,17 @@ static int uas_eh_bus_reset_handler(struct scsi_cmnd *cmnd)
- 	return SUCCESS;
- }
- 
-+static int uas_target_alloc(struct scsi_target *starget)
-+{
-+	struct uas_dev_info *devinfo = (struct uas_dev_info *)
-+			dev_to_shost(starget->dev.parent)->hostdata;
-+
-+	if (devinfo->flags & US_FL_NO_REPORT_LUNS)
-+		starget->no_report_luns = 1;
-+
-+	return 0;
-+}
-+
- static int uas_slave_alloc(struct scsi_device *sdev)
- {
- 	struct uas_dev_info *devinfo =
-@@ -800,7 +811,6 @@ static int uas_slave_configure(struct scsi_device *sdev)
- 	if (devinfo->flags & US_FL_BROKEN_FUA)
- 		sdev->broken_fua = 1;
- 
--	scsi_change_queue_depth(sdev, devinfo->qdepth - 2);
- 	return 0;
- }
- 
-@@ -808,11 +818,12 @@ static struct scsi_host_template uas_host_template = {
- 	.module = THIS_MODULE,
- 	.name = "uas",
- 	.queuecommand = uas_queuecommand,
-+	.target_alloc = uas_target_alloc,
- 	.slave_alloc = uas_slave_alloc,
- 	.slave_configure = uas_slave_configure,
- 	.eh_abort_handler = uas_eh_abort_handler,
- 	.eh_bus_reset_handler = uas_eh_bus_reset_handler,
--	.can_queue = 65536,	/* Is there a limit on the _host_ ? */
-+	.can_queue = MAX_CMNDS,
- 	.this_id = -1,
- 	.sg_tablesize = SG_NONE,
- 	.skip_settle_delay = 1,
-@@ -932,6 +943,12 @@ static int uas_probe(struct usb_interface *intf, const struct usb_device_id *id)
- 	if (result)
- 		goto set_alt0;
- 
-+	/*
-+	 * 1 tag is reserved for untagged commands +
-+	 * 1 tag to avoid off by one errors in some bridge firmwares
-+	 */
-+	shost->can_queue = devinfo->qdepth - 2;
-+
- 	usb_set_intfdata(intf, shost);
- 	result = scsi_add_host(shost, &intf->dev);
- 	if (result)
-diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
-index ccc113e83d88..53341a77d89f 100644
---- a/drivers/usb/storage/unusual_uas.h
-+++ b/drivers/usb/storage/unusual_uas.h
-@@ -64,6 +64,13 @@ UNUSUAL_DEV(0x0bc2, 0x3312, 0x0000, 0x9999,
- 		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
- 		US_FL_NO_ATA_1X),
- 
-+/* Reported-by: David Webb <djw@noc.ac.uk> */
-+UNUSUAL_DEV(0x0bc2, 0x331a, 0x0000, 0x9999,
-+		"Seagate",
-+		"Expansion Desk",
-+		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
-+		US_FL_NO_REPORT_LUNS),
-+
- /* Reported-by: Hans de Goede <hdegoede@redhat.com> */
- UNUSUAL_DEV(0x0bc2, 0x3320, 0x0000, 0x9999,
- 		"Seagate",
-diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
-index 43576ed31ccd..9de988a0f856 100644
---- a/drivers/usb/storage/usb.c
-+++ b/drivers/usb/storage/usb.c
-@@ -482,7 +482,7 @@ void usb_stor_adjust_quirks(struct usb_device *udev, unsigned long *fflags)
- 			US_FL_NO_READ_DISC_INFO | US_FL_NO_READ_CAPACITY_16 |
- 			US_FL_INITIAL_READ10 | US_FL_WRITE_CACHE |
- 			US_FL_NO_ATA_1X | US_FL_NO_REPORT_OPCODES |
--			US_FL_MAX_SECTORS_240);
-+			US_FL_MAX_SECTORS_240 | US_FL_NO_REPORT_LUNS);
- 
- 	p = quirks;
- 	while (*p) {
-@@ -532,6 +532,9 @@ void usb_stor_adjust_quirks(struct usb_device *udev, unsigned long *fflags)
- 		case 'i':
- 			f |= US_FL_IGNORE_DEVICE;
- 			break;
-+		case 'j':
-+			f |= US_FL_NO_REPORT_LUNS;
-+			break;
- 		case 'l':
- 			f |= US_FL_NOT_LOCKABLE;
- 			break;
-diff --git a/drivers/usb/usbip/usbip_common.c b/drivers/usb/usbip/usbip_common.c
-index facaaf003f19..e40da7759a0e 100644
---- a/drivers/usb/usbip/usbip_common.c
-+++ b/drivers/usb/usbip/usbip_common.c
-@@ -741,6 +741,17 @@ int usbip_recv_xbuff(struct usbip_device *ud, struct urb *urb)
- 	if (!(size > 0))
- 		return 0;
- 
-+	if (size > urb->transfer_buffer_length) {
-+		/* should not happen, probably malicious packet */
-+		if (ud->side == USBIP_STUB) {
-+			usbip_event_add(ud, SDEV_EVENT_ERROR_TCP);
-+			return 0;
-+		} else {
-+			usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
-+			return -EPIPE;
-+		}
-+	}
-+
- 	ret = usbip_recv(ud->tcp_socket, urb->transfer_buffer, size);
- 	if (ret != size) {
- 		dev_err(&urb->dev->dev, "recv xbuf, %d\n", ret);
-diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
-index 8ea45a5cd806..d889ef2048df 100644
---- a/drivers/video/fbdev/Kconfig
-+++ b/drivers/video/fbdev/Kconfig
-@@ -2246,7 +2246,6 @@ config XEN_FBDEV_FRONTEND
- 	select FB_SYS_IMAGEBLIT
- 	select FB_SYS_FOPS
- 	select FB_DEFERRED_IO
--	select INPUT_XEN_KBDDEV_FRONTEND if INPUT_MISC
- 	select XEN_XENBUS_FRONTEND
- 	default y
- 	help
-diff --git a/drivers/video/fbdev/amba-clcd.c b/drivers/video/fbdev/amba-clcd.c
-index 9362424c2340..f9ef06d0cd48 100644
---- a/drivers/video/fbdev/amba-clcd.c
-+++ b/drivers/video/fbdev/amba-clcd.c
-@@ -440,13 +440,14 @@ static int clcdfb_register(struct clcd_fb *fb)
- 		fb->off_ienb = CLCD_PL111_IENB;
- 		fb->off_cntl = CLCD_PL111_CNTL;
- 	} else {
--#ifdef CONFIG_ARCH_VERSATILE
--		fb->off_ienb = CLCD_PL111_IENB;
--		fb->off_cntl = CLCD_PL111_CNTL;
--#else
--		fb->off_ienb = CLCD_PL110_IENB;
--		fb->off_cntl = CLCD_PL110_CNTL;
--#endif
-+		if (of_machine_is_compatible("arm,versatile-ab") ||
-+		    of_machine_is_compatible("arm,versatile-pb")) {
-+			fb->off_ienb = CLCD_PL111_IENB;
-+			fb->off_cntl = CLCD_PL111_CNTL;
-+		} else {
-+			fb->off_ienb = CLCD_PL110_IENB;
-+			fb->off_cntl = CLCD_PL110_CNTL;
-+		}
- 	}
- 
- 	fb->clk = clk_get(&fb->dev->dev, NULL);
-diff --git a/drivers/video/fbdev/da8xx-fb.c b/drivers/video/fbdev/da8xx-fb.c
-index 6b2a06d09f2b..d8d583d32a37 100644
---- a/drivers/video/fbdev/da8xx-fb.c
-+++ b/drivers/video/fbdev/da8xx-fb.c
-@@ -209,8 +209,7 @@ static struct fb_videomode known_lcd_panels[] = {
- 		.lower_margin   = 2,
- 		.hsync_len      = 0,
- 		.vsync_len      = 0,
--		.sync           = FB_SYNC_CLK_INVERT |
--			FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
-+		.sync           = FB_SYNC_CLK_INVERT,
- 	},
- 	/* Sharp LK043T1DG01 */
- 	[1] = {
-@@ -224,7 +223,7 @@ static struct fb_videomode known_lcd_panels[] = {
- 		.lower_margin   = 2,
- 		.hsync_len      = 41,
- 		.vsync_len      = 10,
--		.sync           = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
-+		.sync           = 0,
- 		.flag           = 0,
- 	},
- 	[2] = {
-@@ -239,7 +238,7 @@ static struct fb_videomode known_lcd_panels[] = {
- 		.lower_margin   = 10,
- 		.hsync_len      = 10,
- 		.vsync_len      = 10,
--		.sync           = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
-+		.sync           = 0,
- 		.flag           = 0,
- 	},
- 	[3] = {
-diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c
-index 7760fc1a2218..1f413a2f70b6 100644
---- a/drivers/virtio/virtio_pci_modern.c
-+++ b/drivers/virtio/virtio_pci_modern.c
-@@ -17,6 +17,7 @@
-  *
-  */
- 
-+#include <linux/delay.h>
- #define VIRTIO_PCI_NO_LEGACY
- #include "virtio_pci_common.h"
- 
-@@ -271,9 +272,13 @@ static void vp_reset(struct virtio_device *vdev)
- 	struct virtio_pci_device *vp_dev = to_vp_device(vdev);
- 	/* 0 status means a reset. */
- 	vp_iowrite8(0, &vp_dev->common->device_status);
--	/* Flush out the status write, and flush in device writes,
--	 * including MSI-X interrupts, if any. */
--	vp_ioread8(&vp_dev->common->device_status);
-+	/* After writing 0 to device_status, the driver MUST wait for a read of
-+	 * device_status to return 0 before reinitializing the device.
-+	 * This will flush out the status write, and flush in device writes,
-+	 * including MSI-X interrupts, if any.
-+	 */
-+	while (vp_ioread8(&vp_dev->common->device_status))
-+		msleep(1);
- 	/* Flush pending VQ/configuration callbacks. */
- 	vp_synchronize_vectors(vdev);
- }
-diff --git a/drivers/watchdog/rc32434_wdt.c b/drivers/watchdog/rc32434_wdt.c
-index 71e78ef4b736..3a75f3b53452 100644
---- a/drivers/watchdog/rc32434_wdt.c
-+++ b/drivers/watchdog/rc32434_wdt.c
-@@ -237,7 +237,7 @@ static long rc32434_wdt_ioctl(struct file *file, unsigned int cmd,
- 			return -EINVAL;
- 		/* Fall through */
- 	case WDIOC_GETTIMEOUT:
--		return copy_to_user(argp, &timeout, sizeof(int));
-+		return copy_to_user(argp, &timeout, sizeof(int)) ? -EFAULT : 0;
- 	default:
- 		return -ENOTTY;
- 	}
-diff --git a/drivers/watchdog/sp5100_tco.c b/drivers/watchdog/sp5100_tco.c
-index 6467b91f2245..028618c5eeba 100644
---- a/drivers/watchdog/sp5100_tco.c
-+++ b/drivers/watchdog/sp5100_tco.c
-@@ -73,6 +73,13 @@ MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started."
- /*
-  * Some TCO specific functions
-  */
-+
-+static bool tco_has_sp5100_reg_layout(struct pci_dev *dev)
-+{
-+	return dev->device == PCI_DEVICE_ID_ATI_SBX00_SMBUS &&
-+	       dev->revision < 0x40;
-+}
-+
- static void tco_timer_start(void)
- {
- 	u32 val;
-@@ -129,7 +136,7 @@ static void tco_timer_enable(void)
- {
- 	int val;
- 
--	if (sp5100_tco_pci->revision >= 0x40) {
-+	if (!tco_has_sp5100_reg_layout(sp5100_tco_pci)) {
- 		/* For SB800 or later */
- 		/* Set the Watchdog timer resolution to 1 sec */
- 		outb(SB800_PM_WATCHDOG_CONFIG, SB800_IO_PM_INDEX_REG);
-@@ -342,8 +349,7 @@ static unsigned char sp5100_tco_setupdevice(void)
- 	/*
- 	 * Determine type of southbridge chipset.
- 	 */
--	if (sp5100_tco_pci->device == PCI_DEVICE_ID_ATI_SBX00_SMBUS &&
--	    sp5100_tco_pci->revision < 0x40) {
-+	if (tco_has_sp5100_reg_layout(sp5100_tco_pci)) {
- 		dev_name = SP5100_DEVNAME;
- 		index_reg = SP5100_IO_PM_INDEX_REG;
- 		data_reg = SP5100_IO_PM_DATA_REG;
-@@ -388,8 +394,7 @@ static unsigned char sp5100_tco_setupdevice(void)
- 	 * Secondly, Find the watchdog timer MMIO address
- 	 * from SBResource_MMIO register.
- 	 */
--	if (sp5100_tco_pci->device == PCI_DEVICE_ID_ATI_SBX00_SMBUS &&
--	    sp5100_tco_pci->revision < 0x40) {
-+	if (tco_has_sp5100_reg_layout(sp5100_tco_pci)) {
- 		/* Read SBResource_MMIO from PCI config(PCI_Reg: 9Ch) */
- 		pci_read_config_dword(sp5100_tco_pci,
- 				      SP5100_SB_RESOURCE_MMIO_BASE, &val);
-diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
-index 12eab503efd1..364bc44610c1 100644
---- a/drivers/xen/balloon.c
-+++ b/drivers/xen/balloon.c
-@@ -152,6 +152,8 @@ static DECLARE_WAIT_QUEUE_HEAD(balloon_wq);
- static void balloon_process(struct work_struct *work);
- static DECLARE_DELAYED_WORK(balloon_worker, balloon_process);
- 
-+static void release_memory_resource(struct resource *resource);
-+
- /* When ballooning out (allocating memory to return to Xen) we don't really
-    want the kernel to try too hard since that can trigger the oom killer. */
- #define GFP_BALLOON \
-@@ -268,6 +270,20 @@ static struct resource *additional_memory_resource(phys_addr_t size)
- 		return NULL;
- 	}
- 
-+#ifdef CONFIG_SPARSEMEM
-+	{
-+		unsigned long limit = 1UL << (MAX_PHYSMEM_BITS - PAGE_SHIFT);
-+		unsigned long pfn = res->start >> PAGE_SHIFT;
-+
-+		if (pfn > limit) {
-+			pr_err("New System RAM resource outside addressable RAM (%lu > %lu)\n",
-+			       pfn, limit);
-+			release_memory_resource(res);
-+			return NULL;
-+		}
-+	}
-+#endif
-+
- 	return res;
- }
- 
-diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
-index 524c22146429..44367783f07a 100644
---- a/drivers/xen/events/events_base.c
-+++ b/drivers/xen/events/events_base.c
-@@ -484,9 +484,19 @@ static void eoi_pirq(struct irq_data *data)
- 	struct physdev_eoi eoi = { .irq = pirq_from_irq(data->irq) };
- 	int rc = 0;
- 
--	irq_move_irq(data);
-+	if (!VALID_EVTCHN(evtchn))
-+		return;
- 
--	if (VALID_EVTCHN(evtchn))
-+	if (unlikely(irqd_is_setaffinity_pending(data))) {
-+		int masked = test_and_set_mask(evtchn);
-+
-+		clear_evtchn(evtchn);
-+
-+		irq_move_masked_irq(data);
-+
-+		if (!masked)
-+			unmask_evtchn(evtchn);
-+	} else
- 		clear_evtchn(evtchn);
- 
- 	if (pirq_needs_eoi(data->irq)) {
-@@ -1357,9 +1367,19 @@ static void ack_dynirq(struct irq_data *data)
- {
- 	int evtchn = evtchn_from_irq(data->irq);
- 
--	irq_move_irq(data);
-+	if (!VALID_EVTCHN(evtchn))
-+		return;
- 
--	if (VALID_EVTCHN(evtchn))
-+	if (unlikely(irqd_is_setaffinity_pending(data))) {
-+		int masked = test_and_set_mask(evtchn);
-+
-+		clear_evtchn(evtchn);
-+
-+		irq_move_masked_irq(data);
-+
-+		if (!masked)
-+			unmask_evtchn(evtchn);
-+	} else
- 		clear_evtchn(evtchn);
- }
- 
-diff --git a/drivers/xen/evtchn.c b/drivers/xen/evtchn.c
-index 38272ad24551..f4edd6df3df2 100644
---- a/drivers/xen/evtchn.c
-+++ b/drivers/xen/evtchn.c
-@@ -316,7 +316,6 @@ static int evtchn_resize_ring(struct per_user_data *u)
- {
- 	unsigned int new_size;
- 	evtchn_port_t *new_ring, *old_ring;
--	unsigned int p, c;
- 
- 	/*
- 	 * Ensure the ring is large enough to capture all possible
-@@ -346,20 +345,17 @@ static int evtchn_resize_ring(struct per_user_data *u)
- 	/*
- 	 * Copy the old ring contents to the new ring.
- 	 *
--	 * If the ring contents crosses the end of the current ring,
--	 * it needs to be copied in two chunks.
-+	 * To take care of wrapping, a full ring, and the new index
-+	 * pointing into the second half, simply copy the old contents
-+	 * twice.
- 	 *
- 	 * +---------+    +------------------+
--	 * |34567  12| -> |       1234567    |
--	 * +-----p-c-+    +------------------+
-+	 * |34567  12| -> |34567  1234567  12|
-+	 * +-----p-c-+    +-------c------p---+
- 	 */
--	p = evtchn_ring_offset(u, u->ring_prod);
--	c = evtchn_ring_offset(u, u->ring_cons);
--	if (p < c) {
--		memcpy(new_ring + c, u->ring + c, (u->ring_size - c) * sizeof(*u->ring));
--		memcpy(new_ring + u->ring_size, u->ring, p * sizeof(*u->ring));
--	} else
--		memcpy(new_ring + c, u->ring + c, (p - c) * sizeof(*u->ring));
-+	memcpy(new_ring, old_ring, u->ring_size * sizeof(*u->ring));
-+	memcpy(new_ring + u->ring_size, old_ring,
-+	       u->ring_size * sizeof(*u->ring));
- 
- 	u->ring = new_ring;
- 	u->ring_size = new_size;
-diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
-index 769e0ff1b4ce..dea6486a7508 100644
---- a/fs/btrfs/ctree.c
-+++ b/fs/btrfs/ctree.c
-@@ -19,6 +19,7 @@
- #include <linux/sched.h>
- #include <linux/slab.h>
- #include <linux/rbtree.h>
-+#include <linux/vmalloc.h>
- #include "ctree.h"
- #include "disk-io.h"
- #include "transaction.h"
-@@ -5361,10 +5362,13 @@ int btrfs_compare_trees(struct btrfs_root *left_root,
- 		goto out;
- 	}
- 
--	tmp_buf = kmalloc(left_root->nodesize, GFP_NOFS);
-+	tmp_buf = kmalloc(left_root->nodesize, GFP_KERNEL | __GFP_NOWARN);
- 	if (!tmp_buf) {
--		ret = -ENOMEM;
--		goto out;
-+		tmp_buf = vmalloc(left_root->nodesize);
-+		if (!tmp_buf) {
-+			ret = -ENOMEM;
-+			goto out;
-+		}
- 	}
- 
- 	left_path->search_commit_root = 1;
-@@ -5565,7 +5569,7 @@ int btrfs_compare_trees(struct btrfs_root *left_root,
- out:
- 	btrfs_free_path(left_path);
- 	btrfs_free_path(right_path);
--	kfree(tmp_buf);
-+	kvfree(tmp_buf);
- 	return ret;
- }
- 
-diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
-index bfe4a337fb4d..6661ad8b4088 100644
---- a/fs/btrfs/ctree.h
-+++ b/fs/btrfs/ctree.h
-@@ -2252,7 +2252,7 @@ struct btrfs_ioctl_defrag_range_args {
- #define BTRFS_MOUNT_FREE_SPACE_TREE	(1 << 26)
- 
- #define BTRFS_DEFAULT_COMMIT_INTERVAL	(30)
--#define BTRFS_DEFAULT_MAX_INLINE	(8192)
-+#define BTRFS_DEFAULT_MAX_INLINE	(2048)
- 
- #define btrfs_clear_opt(o, opt)		((o) &= ~BTRFS_MOUNT_##opt)
- #define btrfs_set_opt(o, opt)		((o) |= BTRFS_MOUNT_##opt)
-diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
-index cbb7dbfb3fff..218f51a5dbab 100644
---- a/fs/btrfs/dev-replace.c
-+++ b/fs/btrfs/dev-replace.c
-@@ -394,6 +394,8 @@ int btrfs_dev_replace_start(struct btrfs_root *root,
- 	dev_replace->cursor_right = 0;
- 	dev_replace->is_valid = 1;
- 	dev_replace->item_needs_writeback = 1;
-+	atomic64_set(&dev_replace->num_write_errors, 0);
-+	atomic64_set(&dev_replace->num_uncorrectable_read_errors, 0);
- 	args->result = BTRFS_IOCTL_DEV_REPLACE_RESULT_NO_ERROR;
- 	btrfs_dev_replace_unlock(dev_replace);
- 
-diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
-index 4545e2e2ad45..ae6e3e36fdf0 100644
---- a/fs/btrfs/disk-io.c
-+++ b/fs/btrfs/disk-io.c
-@@ -303,7 +303,7 @@ static int csum_tree_block(struct btrfs_fs_info *fs_info,
- 		err = map_private_extent_buffer(buf, offset, 32,
- 					&kaddr, &map_start, &map_len);
- 		if (err)
--			return 1;
-+			return err;
- 		cur_len = min(len, map_len - (offset - map_start));
- 		crc = btrfs_csum_data(kaddr + offset - map_start,
- 				      crc, cur_len);
-@@ -313,7 +313,7 @@ static int csum_tree_block(struct btrfs_fs_info *fs_info,
- 	if (csum_size > sizeof(inline_result)) {
- 		result = kzalloc(csum_size, GFP_NOFS);
- 		if (!result)
--			return 1;
-+			return -ENOMEM;
- 	} else {
- 		result = (char *)&inline_result;
- 	}
-@@ -334,7 +334,7 @@ static int csum_tree_block(struct btrfs_fs_info *fs_info,
- 				val, found, btrfs_header_level(buf));
- 			if (result != (char *)&inline_result)
- 				kfree(result);
--			return 1;
-+			return -EUCLEAN;
- 		}
- 	} else {
- 		write_extent_buffer(buf, result, 0, csum_size);
-@@ -513,11 +513,21 @@ static int csum_dirty_buffer(struct btrfs_fs_info *fs_info, struct page *page)
- 	eb = (struct extent_buffer *)page->private;
- 	if (page != eb->pages[0])
- 		return 0;
-+
- 	found_start = btrfs_header_bytenr(eb);
--	if (WARN_ON(found_start != start || !PageUptodate(page)))
--		return 0;
--	csum_tree_block(fs_info, eb, 0);
--	return 0;
-+	/*
-+	 * Please do not consolidate these warnings into a single if.
-+	 * It is useful to know what went wrong.
-+	 */
-+	if (WARN_ON(found_start != start))
-+		return -EUCLEAN;
-+	if (WARN_ON(!PageUptodate(page)))
-+		return -EUCLEAN;
-+
-+	ASSERT(memcmp_extent_buffer(eb, fs_info->fsid,
-+			btrfs_header_fsid(), BTRFS_FSID_SIZE) == 0);
-+
-+	return csum_tree_block(fs_info, eb, 0);
- }
- 
- static int check_tree_block_fsid(struct btrfs_fs_info *fs_info,
-@@ -660,10 +670,8 @@ static int btree_readpage_end_io_hook(struct btrfs_io_bio *io_bio,
- 				       eb, found_level);
- 
- 	ret = csum_tree_block(root->fs_info, eb, 1);
--	if (ret) {
--		ret = -EIO;
-+	if (ret)
- 		goto err;
--	}
- 
- 	/*
- 	 * If this is a leaf block and it is corrupt, set the corrupt bit so
-@@ -1830,7 +1838,7 @@ static int cleaner_kthread(void *arg)
- 		 */
- 		btrfs_delete_unused_bgs(root->fs_info);
- sleep:
--		if (!try_to_freeze() && !again) {
-+		if (!again) {
- 			set_current_state(TASK_INTERRUPTIBLE);
- 			if (!kthread_should_stop())
- 				schedule();
-diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
-index 098bb8f690c9..5d956b869e03 100644
---- a/fs/btrfs/file.c
-+++ b/fs/btrfs/file.c
-@@ -1883,7 +1883,7 @@ static int start_ordered_ops(struct inode *inode, loff_t start, loff_t end)
-  */
- int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
- {
--	struct dentry *dentry = file->f_path.dentry;
-+	struct dentry *dentry = file_dentry(file);
- 	struct inode *inode = d_inode(dentry);
- 	struct btrfs_root *root = BTRFS_I(inode)->root;
- 	struct btrfs_trans_handle *trans;
-@@ -1996,10 +1996,11 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
- 	 */
- 	smp_mb();
- 	if (btrfs_inode_in_log(inode, root->fs_info->generation) ||
--	    (BTRFS_I(inode)->last_trans <=
--	     root->fs_info->last_trans_committed &&
--	     (full_sync ||
--	      !btrfs_have_ordered_extents_in_range(inode, start, len)))) {
-+	    (full_sync && BTRFS_I(inode)->last_trans <=
-+	     root->fs_info->last_trans_committed) ||
-+	    (!btrfs_have_ordered_extents_in_range(inode, start, len) &&
-+	     BTRFS_I(inode)->last_trans
-+	     <= root->fs_info->last_trans_committed)) {
- 		/*
- 		 * We'v had everything committed since the last time we were
- 		 * modified so clear this flag in case it was set for whatever
-diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
-index d96f5cf38a2d..f407e487c687 100644
---- a/fs/btrfs/inode.c
-+++ b/fs/btrfs/inode.c
-@@ -4211,11 +4211,20 @@ static int truncate_space_check(struct btrfs_trans_handle *trans,
- {
- 	int ret;
- 
-+	/*
-+	 * This is only used to apply pressure to the enospc system, we don't
-+	 * intend to use this reservation at all.
-+	 */
- 	bytes_deleted = btrfs_csum_bytes_to_leaves(root, bytes_deleted);
-+	bytes_deleted *= root->nodesize;
- 	ret = btrfs_block_rsv_add(root, &root->fs_info->trans_block_rsv,
- 				  bytes_deleted, BTRFS_RESERVE_NO_FLUSH);
--	if (!ret)
-+	if (!ret) {
-+		trace_btrfs_space_reservation(root->fs_info, "transaction",
-+					      trans->transid,
-+					      bytes_deleted, 1);
- 		trans->bytes_reserved += bytes_deleted;
-+	}
- 	return ret;
- 
- }
-@@ -7414,7 +7423,26 @@ static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend,
- 				     cached_state, GFP_NOFS);
- 
- 		if (ordered) {
--			btrfs_start_ordered_extent(inode, ordered, 1);
-+			/*
-+			 * If we are doing a DIO read and the ordered extent we
-+			 * found is for a buffered write, we can not wait for it
-+			 * to complete and retry, because if we do so we can
-+			 * deadlock with concurrent buffered writes on page
-+			 * locks. This happens only if our DIO read covers more
-+			 * than one extent map, if at this point has already
-+			 * created an ordered extent for a previous extent map
-+			 * and locked its range in the inode's io tree, and a
-+			 * concurrent write against that previous extent map's
-+			 * range and this range started (we unlock the ranges
-+			 * in the io tree only when the bios complete and
-+			 * buffered writes always lock pages before attempting
-+			 * to lock range in the io tree).
-+			 */
-+			if (writing ||
-+			    test_bit(BTRFS_ORDERED_DIRECT, &ordered->flags))
-+				btrfs_start_ordered_extent(inode, ordered, 1);
-+			else
-+				ret = -ENOTBLK;
- 			btrfs_put_ordered_extent(ordered);
- 		} else {
- 			/*
-@@ -7431,9 +7459,11 @@ static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend,
- 			 * that page.
- 			 */
- 			ret = -ENOTBLK;
--			break;
- 		}
- 
-+		if (ret)
-+			break;
-+
- 		cond_resched();
- 	}
- 
-diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
-index 48aee9846329..e3791f268489 100644
---- a/fs/btrfs/ioctl.c
-+++ b/fs/btrfs/ioctl.c
-@@ -59,6 +59,7 @@
- #include "props.h"
- #include "sysfs.h"
- #include "qgroup.h"
-+#include "tree-log.h"
- 
- #ifdef CONFIG_64BIT
- /* If we have a 32-bit userspace and 64-bit kernel, then the UAPI
-@@ -1656,7 +1657,7 @@ static noinline int btrfs_ioctl_snap_create_transid(struct file *file,
- 
- 		src_inode = file_inode(src.file);
- 		if (src_inode->i_sb != file_inode(file)->i_sb) {
--			btrfs_info(BTRFS_I(src_inode)->root->fs_info,
-+			btrfs_info(BTRFS_I(file_inode(file))->root->fs_info,
- 				   "Snapshot src from another FS");
- 			ret = -EXDEV;
- 		} else if (!inode_owner_or_capable(src_inode)) {
-@@ -2097,8 +2098,6 @@ static noinline int search_ioctl(struct inode *inode,
- 		key.offset = (u64)-1;
- 		root = btrfs_read_fs_root_no_name(info, &key);
- 		if (IS_ERR(root)) {
--			btrfs_err(info, "could not find root %llu",
--			       sk->tree_id);
- 			btrfs_free_path(path);
- 			return -ENOENT;
- 		}
-@@ -2476,6 +2475,8 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
- 	trans->block_rsv = &block_rsv;
- 	trans->bytes_reserved = block_rsv.size;
- 
-+	btrfs_record_snapshot_destroy(trans, dir);
-+
- 	ret = btrfs_unlink_subvol(trans, root, dir,
- 				dest->root_key.objectid,
- 				dentry->d_name.name,
-@@ -3068,6 +3069,9 @@ static int btrfs_extent_same(struct inode *src, u64 loff, u64 olen,
- 		ret = extent_same_check_offsets(src, loff, &len, olen);
- 		if (ret)
- 			goto out_unlock;
-+		ret = extent_same_check_offsets(src, dst_loff, &len, olen);
-+		if (ret)
-+			goto out_unlock;
- 
- 		/*
- 		 * Single inode case wants the same checks, except we
-diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
-index 5279fdae7142..7173360eea7a 100644
---- a/fs/btrfs/qgroup.c
-+++ b/fs/btrfs/qgroup.c
-@@ -1842,8 +1842,10 @@ out:
- }
- 
- /*
-- * copy the acounting information between qgroups. This is necessary when a
-- * snapshot or a subvolume is created
-+ * Copy the acounting information between qgroups. This is necessary
-+ * when a snapshot or a subvolume is created. Throwing an error will
-+ * cause a transaction abort so we take extra care here to only error
-+ * when a readonly fs is a reasonable outcome.
-  */
- int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
- 			 struct btrfs_fs_info *fs_info, u64 srcid, u64 objectid,
-@@ -1873,15 +1875,15 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
- 		       2 * inherit->num_excl_copies;
- 		for (i = 0; i < nums; ++i) {
- 			srcgroup = find_qgroup_rb(fs_info, *i_qgroups);
--			if (!srcgroup) {
--				ret = -EINVAL;
--				goto out;
--			}
- 
--			if ((srcgroup->qgroupid >> 48) <= (objectid >> 48)) {
--				ret = -EINVAL;
--				goto out;
--			}
-+			/*
-+			 * Zero out invalid groups so we can ignore
-+			 * them later.
-+			 */
-+			if (!srcgroup ||
-+			    ((srcgroup->qgroupid >> 48) <= (objectid >> 48)))
-+				*i_qgroups = 0ULL;
-+
- 			++i_qgroups;
- 		}
- 	}
-@@ -1916,17 +1918,19 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
- 	 */
- 	if (inherit) {
- 		i_qgroups = (u64 *)(inherit + 1);
--		for (i = 0; i < inherit->num_qgroups; ++i) {
-+		for (i = 0; i < inherit->num_qgroups; ++i, ++i_qgroups) {
-+			if (*i_qgroups == 0)
-+				continue;
- 			ret = add_qgroup_relation_item(trans, quota_root,
- 						       objectid, *i_qgroups);
--			if (ret)
-+			if (ret && ret != -EEXIST)
- 				goto out;
- 			ret = add_qgroup_relation_item(trans, quota_root,
- 						       *i_qgroups, objectid);
--			if (ret)
-+			if (ret && ret != -EEXIST)
- 				goto out;
--			++i_qgroups;
- 		}
-+		ret = 0;
- 	}
- 
- 
-@@ -1987,17 +1991,22 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
- 
- 	i_qgroups = (u64 *)(inherit + 1);
- 	for (i = 0; i < inherit->num_qgroups; ++i) {
--		ret = add_relation_rb(quota_root->fs_info, objectid,
--				      *i_qgroups);
--		if (ret)
--			goto unlock;
-+		if (*i_qgroups) {
-+			ret = add_relation_rb(quota_root->fs_info, objectid,
-+					      *i_qgroups);
-+			if (ret)
-+				goto unlock;
-+		}
- 		++i_qgroups;
- 	}
- 
--	for (i = 0; i <  inherit->num_ref_copies; ++i) {
-+	for (i = 0; i <  inherit->num_ref_copies; ++i, i_qgroups += 2) {
- 		struct btrfs_qgroup *src;
- 		struct btrfs_qgroup *dst;
- 
-+		if (!i_qgroups[0] || !i_qgroups[1])
-+			continue;
-+
- 		src = find_qgroup_rb(fs_info, i_qgroups[0]);
- 		dst = find_qgroup_rb(fs_info, i_qgroups[1]);
- 
-@@ -2008,12 +2017,14 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
- 
- 		dst->rfer = src->rfer - level_size;
- 		dst->rfer_cmpr = src->rfer_cmpr - level_size;
--		i_qgroups += 2;
- 	}
--	for (i = 0; i <  inherit->num_excl_copies; ++i) {
-+	for (i = 0; i <  inherit->num_excl_copies; ++i, i_qgroups += 2) {
- 		struct btrfs_qgroup *src;
- 		struct btrfs_qgroup *dst;
- 
-+		if (!i_qgroups[0] || !i_qgroups[1])
-+			continue;
-+
- 		src = find_qgroup_rb(fs_info, i_qgroups[0]);
- 		dst = find_qgroup_rb(fs_info, i_qgroups[1]);
- 
-@@ -2024,7 +2035,6 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
- 
- 		dst->excl = src->excl + level_size;
- 		dst->excl_cmpr = src->excl_cmpr + level_size;
--		i_qgroups += 2;
- 	}
- 
- unlock:
-diff --git a/fs/btrfs/reada.c b/fs/btrfs/reada.c
-index 619f92963e27..49b3fb73ffbf 100644
---- a/fs/btrfs/reada.c
-+++ b/fs/btrfs/reada.c
-@@ -265,7 +265,7 @@ static struct reada_zone *reada_find_zone(struct btrfs_fs_info *fs_info,
- 	spin_unlock(&fs_info->reada_lock);
- 
- 	if (ret == 1) {
--		if (logical >= zone->start && logical < zone->end)
-+		if (logical >= zone->start && logical <= zone->end)
- 			return zone;
- 		spin_lock(&fs_info->reada_lock);
- 		kref_put(&zone->refcnt, reada_zone_release);
-@@ -679,7 +679,7 @@ static int reada_start_machine_dev(struct btrfs_fs_info *fs_info,
- 	 */
- 	ret = radix_tree_gang_lookup(&dev->reada_extents, (void **)&re,
- 				     dev->reada_next >> PAGE_CACHE_SHIFT, 1);
--	if (ret == 0 || re->logical >= dev->reada_curr_zone->end) {
-+	if (ret == 0 || re->logical > dev->reada_curr_zone->end) {
- 		ret = reada_pick_zone(dev);
- 		if (!ret) {
- 			spin_unlock(&fs_info->reada_lock);
-diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
-index 2bd0011450df..5c806f0d443d 100644
---- a/fs/btrfs/relocation.c
-+++ b/fs/btrfs/relocation.c
-@@ -1850,6 +1850,7 @@ again:
- 			eb = read_tree_block(dest, old_bytenr, old_ptr_gen);
- 			if (IS_ERR(eb)) {
- 				ret = PTR_ERR(eb);
-+				break;
- 			} else if (!extent_buffer_uptodate(eb)) {
- 				ret = -EIO;
- 				free_extent_buffer(eb);
-diff --git a/fs/btrfs/tests/btrfs-tests.c b/fs/btrfs/tests/btrfs-tests.c
-index 0e1e61a7ec23..d39f714dabeb 100644
---- a/fs/btrfs/tests/btrfs-tests.c
-+++ b/fs/btrfs/tests/btrfs-tests.c
-@@ -189,12 +189,6 @@ btrfs_alloc_dummy_block_group(unsigned long length)
- 		kfree(cache);
- 		return NULL;
- 	}
--	cache->fs_info = btrfs_alloc_dummy_fs_info();
--	if (!cache->fs_info) {
--		kfree(cache->free_space_ctl);
--		kfree(cache);
--		return NULL;
--	}
- 
- 	cache->key.objectid = 0;
- 	cache->key.offset = length;
-diff --git a/fs/btrfs/tests/free-space-tree-tests.c b/fs/btrfs/tests/free-space-tree-tests.c
-index d05fe1ab4808..7cea4462acd5 100644
---- a/fs/btrfs/tests/free-space-tree-tests.c
-+++ b/fs/btrfs/tests/free-space-tree-tests.c
-@@ -485,6 +485,7 @@ static int run_test(test_func_t test_func, int bitmaps)
- 	cache->bitmap_low_thresh = 0;
- 	cache->bitmap_high_thresh = (u32)-1;
- 	cache->needs_free_space = 1;
-+	cache->fs_info = root->fs_info;
- 
- 	btrfs_init_dummy_trans(&trans);
- 
-diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
-index 978c3a810893..58ae0a2ce65c 100644
---- a/fs/btrfs/tree-log.c
-+++ b/fs/btrfs/tree-log.c
-@@ -4414,6 +4414,127 @@ static int btrfs_log_trailing_hole(struct btrfs_trans_handle *trans,
- 	return ret;
- }
- 
-+/*
-+ * When we are logging a new inode X, check if it doesn't have a reference that
-+ * matches the reference from some other inode Y created in a past transaction
-+ * and that was renamed in the current transaction. If we don't do this, then at
-+ * log replay time we can lose inode Y (and all its files if it's a directory):
-+ *
-+ * mkdir /mnt/x
-+ * echo "hello world" > /mnt/x/foobar
-+ * sync
-+ * mv /mnt/x /mnt/y
-+ * mkdir /mnt/x                 # or touch /mnt/x
-+ * xfs_io -c fsync /mnt/x
-+ * <power fail>
-+ * mount fs, trigger log replay
-+ *
-+ * After the log replay procedure, we would lose the first directory and all its
-+ * files (file foobar).
-+ * For the case where inode Y is not a directory we simply end up losing it:
-+ *
-+ * echo "123" > /mnt/foo
-+ * sync
-+ * mv /mnt/foo /mnt/bar
-+ * echo "abc" > /mnt/foo
-+ * xfs_io -c fsync /mnt/foo
-+ * <power fail>
-+ *
-+ * We also need this for cases where a snapshot entry is replaced by some other
-+ * entry (file or directory) otherwise we end up with an unreplayable log due to
-+ * attempts to delete the snapshot entry (entry of type BTRFS_ROOT_ITEM_KEY) as
-+ * if it were a regular entry:
-+ *
-+ * mkdir /mnt/x
-+ * btrfs subvolume snapshot /mnt /mnt/x/snap
-+ * btrfs subvolume delete /mnt/x/snap
-+ * rmdir /mnt/x
-+ * mkdir /mnt/x
-+ * fsync /mnt/x or fsync some new file inside it
-+ * <power fail>
-+ *
-+ * The snapshot delete, rmdir of x, mkdir of a new x and the fsync all happen in
-+ * the same transaction.
-+ */
-+static int btrfs_check_ref_name_override(struct extent_buffer *eb,
-+					 const int slot,
-+					 const struct btrfs_key *key,
-+					 struct inode *inode)
-+{
-+	int ret;
-+	struct btrfs_path *search_path;
-+	char *name = NULL;
-+	u32 name_len = 0;
-+	u32 item_size = btrfs_item_size_nr(eb, slot);
-+	u32 cur_offset = 0;
-+	unsigned long ptr = btrfs_item_ptr_offset(eb, slot);
-+
-+	search_path = btrfs_alloc_path();
-+	if (!search_path)
-+		return -ENOMEM;
-+	search_path->search_commit_root = 1;
-+	search_path->skip_locking = 1;
-+
-+	while (cur_offset < item_size) {
-+		u64 parent;
-+		u32 this_name_len;
-+		u32 this_len;
-+		unsigned long name_ptr;
-+		struct btrfs_dir_item *di;
-+
-+		if (key->type == BTRFS_INODE_REF_KEY) {
-+			struct btrfs_inode_ref *iref;
-+
-+			iref = (struct btrfs_inode_ref *)(ptr + cur_offset);
-+			parent = key->offset;
-+			this_name_len = btrfs_inode_ref_name_len(eb, iref);
-+			name_ptr = (unsigned long)(iref + 1);
-+			this_len = sizeof(*iref) + this_name_len;
-+		} else {
-+			struct btrfs_inode_extref *extref;
-+
-+			extref = (struct btrfs_inode_extref *)(ptr +
-+							       cur_offset);
-+			parent = btrfs_inode_extref_parent(eb, extref);
-+			this_name_len = btrfs_inode_extref_name_len(eb, extref);
-+			name_ptr = (unsigned long)&extref->name;
-+			this_len = sizeof(*extref) + this_name_len;
-+		}
-+
-+		if (this_name_len > name_len) {
-+			char *new_name;
-+
-+			new_name = krealloc(name, this_name_len, GFP_NOFS);
-+			if (!new_name) {
-+				ret = -ENOMEM;
-+				goto out;
-+			}
-+			name_len = this_name_len;
-+			name = new_name;
-+		}
-+
-+		read_extent_buffer(eb, name, name_ptr, this_name_len);
-+		di = btrfs_lookup_dir_item(NULL, BTRFS_I(inode)->root,
-+					   search_path, parent,
-+					   name, this_name_len, 0);
-+		if (di && !IS_ERR(di)) {
-+			ret = 1;
-+			goto out;
-+		} else if (IS_ERR(di)) {
-+			ret = PTR_ERR(di);
-+			goto out;
-+		}
-+		btrfs_release_path(search_path);
-+
-+		cur_offset += this_len;
-+	}
-+	ret = 0;
-+out:
-+	btrfs_free_path(search_path);
-+	kfree(name);
-+	return ret;
-+}
-+
- /* log a single inode in the tree log.
-  * At least one parent directory for this inode must exist in the tree
-  * or be logged already.
-@@ -4500,7 +4621,22 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
- 
- 	mutex_lock(&BTRFS_I(inode)->log_mutex);
- 
--	btrfs_get_logged_extents(inode, &logged_list, start, end);
-+	/*
-+	 * Collect ordered extents only if we are logging data. This is to
-+	 * ensure a subsequent request to log this inode in LOG_INODE_ALL mode
-+	 * will process the ordered extents if they still exists at the time,
-+	 * because when we collect them we test and set for the flag
-+	 * BTRFS_ORDERED_LOGGED to prevent multiple log requests to process the
-+	 * same ordered extents. The consequence for the LOG_INODE_ALL log mode
-+	 * not processing the ordered extents is that we end up logging the
-+	 * corresponding file extent items, based on the extent maps in the
-+	 * inode's extent_map_tree's modified_list, without logging the
-+	 * respective checksums (since the may still be only attached to the
-+	 * ordered extents and have not been inserted in the csum tree by
-+	 * btrfs_finish_ordered_io() yet).
-+	 */
-+	if (inode_only == LOG_INODE_ALL)
-+		btrfs_get_logged_extents(inode, &logged_list, start, end);
- 
- 	/*
- 	 * a brute force approach to making sure we get the most uptodate
-@@ -4586,6 +4722,22 @@ again:
- 		if (min_key.type == BTRFS_INODE_ITEM_KEY)
- 			need_log_inode_item = false;
- 
-+		if ((min_key.type == BTRFS_INODE_REF_KEY ||
-+		     min_key.type == BTRFS_INODE_EXTREF_KEY) &&
-+		    BTRFS_I(inode)->generation == trans->transid) {
-+			ret = btrfs_check_ref_name_override(path->nodes[0],
-+							    path->slots[0],
-+							    &min_key, inode);
-+			if (ret < 0) {
-+				err = ret;
-+				goto out_unlock;
-+			} else if (ret > 0) {
-+				err = 1;
-+				btrfs_set_log_full_commit(root->fs_info, trans);
-+				goto out_unlock;
-+			}
-+		}
-+
- 		/* Skip xattrs, we log them later with btrfs_log_all_xattrs() */
- 		if (min_key.type == BTRFS_XATTR_ITEM_KEY) {
- 			if (ins_nr == 0)
-@@ -4772,6 +4924,42 @@ out_unlock:
- }
- 
- /*
-+ * Check if we must fallback to a transaction commit when logging an inode.
-+ * This must be called after logging the inode and is used only in the context
-+ * when fsyncing an inode requires the need to log some other inode - in which
-+ * case we can't lock the i_mutex of each other inode we need to log as that
-+ * can lead to deadlocks with concurrent fsync against other inodes (as we can
-+ * log inodes up or down in the hierarchy) or rename operations for example. So
-+ * we take the log_mutex of the inode after we have logged it and then check for
-+ * its last_unlink_trans value - this is safe because any task setting
-+ * last_unlink_trans must take the log_mutex and it must do this before it does
-+ * the actual unlink operation, so if we do this check before a concurrent task
-+ * sets last_unlink_trans it means we've logged a consistent version/state of
-+ * all the inode items, otherwise we are not sure and must do a transaction
-+ * commit (the concurrent task migth have only updated last_unlink_trans before
-+ * we logged the inode or it might have also done the unlink).
-+ */
-+static bool btrfs_must_commit_transaction(struct btrfs_trans_handle *trans,
-+					  struct inode *inode)
-+{
-+	struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
-+	bool ret = false;
-+
-+	mutex_lock(&BTRFS_I(inode)->log_mutex);
-+	if (BTRFS_I(inode)->last_unlink_trans > fs_info->last_trans_committed) {
-+		/*
-+		 * Make sure any commits to the log are forced to be full
-+		 * commits.
-+		 */
-+		btrfs_set_log_full_commit(fs_info, trans);
-+		ret = true;
-+	}
-+	mutex_unlock(&BTRFS_I(inode)->log_mutex);
-+
-+	return ret;
-+}
-+
-+/*
-  * follow the dentry parent pointers up the chain and see if any
-  * of the directories in it require a full commit before they can
-  * be logged.  Returns zero if nothing special needs to be done or 1 if
-@@ -4784,7 +4972,6 @@ static noinline int check_parent_dirs_for_sync(struct btrfs_trans_handle *trans,
- 					       u64 last_committed)
- {
- 	int ret = 0;
--	struct btrfs_root *root;
- 	struct dentry *old_parent = NULL;
- 	struct inode *orig_inode = inode;
- 
-@@ -4816,14 +5003,7 @@ static noinline int check_parent_dirs_for_sync(struct btrfs_trans_handle *trans,
- 			BTRFS_I(inode)->logged_trans = trans->transid;
- 		smp_mb();
- 
--		if (BTRFS_I(inode)->last_unlink_trans > last_committed) {
--			root = BTRFS_I(inode)->root;
--
--			/*
--			 * make sure any commits to the log are forced
--			 * to be full commits
--			 */
--			btrfs_set_log_full_commit(root->fs_info, trans);
-+		if (btrfs_must_commit_transaction(trans, inode)) {
- 			ret = 1;
- 			break;
- 		}
-@@ -4982,6 +5162,9 @@ process_leaf:
- 			btrfs_release_path(path);
- 			ret = btrfs_log_inode(trans, root, di_inode,
- 					      log_mode, 0, LLONG_MAX, ctx);
-+			if (!ret &&
-+			    btrfs_must_commit_transaction(trans, di_inode))
-+				ret = 1;
- 			iput(di_inode);
- 			if (ret)
- 				goto next_dir_inode;
-@@ -5096,6 +5279,9 @@ static int btrfs_log_all_parents(struct btrfs_trans_handle *trans,
- 
- 			ret = btrfs_log_inode(trans, root, dir_inode,
- 					      LOG_INODE_ALL, 0, LLONG_MAX, ctx);
-+			if (!ret &&
-+			    btrfs_must_commit_transaction(trans, dir_inode))
-+				ret = 1;
- 			iput(dir_inode);
- 			if (ret)
- 				goto out;
-@@ -5447,6 +5633,9 @@ error:
-  * They revolve around files there were unlinked from the directory, and
-  * this function updates the parent directory so that a full commit is
-  * properly done if it is fsync'd later after the unlinks are done.
-+ *
-+ * Must be called before the unlink operations (updates to the subvolume tree,
-+ * inodes, etc) are done.
-  */
- void btrfs_record_unlink_dir(struct btrfs_trans_handle *trans,
- 			     struct inode *dir, struct inode *inode,
-@@ -5462,8 +5651,11 @@ void btrfs_record_unlink_dir(struct btrfs_trans_handle *trans,
- 	 * into the file.  When the file is logged we check it and
- 	 * don't log the parents if the file is fully on disk.
- 	 */
--	if (S_ISREG(inode->i_mode))
-+	if (S_ISREG(inode->i_mode)) {
-+		mutex_lock(&BTRFS_I(inode)->log_mutex);
- 		BTRFS_I(inode)->last_unlink_trans = trans->transid;
-+		mutex_unlock(&BTRFS_I(inode)->log_mutex);
-+	}
- 
- 	/*
- 	 * if this directory was already logged any new
-@@ -5494,7 +5686,29 @@ void btrfs_record_unlink_dir(struct btrfs_trans_handle *trans,
- 	return;
- 
- record:
-+	mutex_lock(&BTRFS_I(dir)->log_mutex);
- 	BTRFS_I(dir)->last_unlink_trans = trans->transid;
-+	mutex_unlock(&BTRFS_I(dir)->log_mutex);
-+}
-+
-+/*
-+ * Make sure that if someone attempts to fsync the parent directory of a deleted
-+ * snapshot, it ends up triggering a transaction commit. This is to guarantee
-+ * that after replaying the log tree of the parent directory's root we will not
-+ * see the snapshot anymore and at log replay time we will not see any log tree
-+ * corresponding to the deleted snapshot's root, which could lead to replaying
-+ * it after replaying the log tree of the parent directory (which would replay
-+ * the snapshot delete operation).
-+ *
-+ * Must be called before the actual snapshot destroy operation (updates to the
-+ * parent root and tree of tree roots trees, etc) are done.
-+ */
-+void btrfs_record_snapshot_destroy(struct btrfs_trans_handle *trans,
-+				   struct inode *dir)
-+{
-+	mutex_lock(&BTRFS_I(dir)->log_mutex);
-+	BTRFS_I(dir)->last_unlink_trans = trans->transid;
-+	mutex_unlock(&BTRFS_I(dir)->log_mutex);
- }
- 
- /*
-diff --git a/fs/btrfs/tree-log.h b/fs/btrfs/tree-log.h
-index 6916a781ea02..a9f1b75d080d 100644
---- a/fs/btrfs/tree-log.h
-+++ b/fs/btrfs/tree-log.h
-@@ -79,6 +79,8 @@ int btrfs_pin_log_trans(struct btrfs_root *root);
- void btrfs_record_unlink_dir(struct btrfs_trans_handle *trans,
- 			     struct inode *dir, struct inode *inode,
- 			     int for_rename);
-+void btrfs_record_snapshot_destroy(struct btrfs_trans_handle *trans,
-+				   struct inode *dir);
- int btrfs_log_new_name(struct btrfs_trans_handle *trans,
- 			struct inode *inode, struct inode *old_dir,
- 			struct dentry *parent);
-diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
-index 59727e32ed0f..af0ec2d5ad0e 100644
---- a/fs/cifs/sess.c
-+++ b/fs/cifs/sess.c
-@@ -400,19 +400,27 @@ int build_ntlmssp_auth_blob(unsigned char *pbuffer,
- 	sec_blob->LmChallengeResponse.MaximumLength = 0;
- 
- 	sec_blob->NtChallengeResponse.BufferOffset = cpu_to_le32(tmp - pbuffer);
--	rc = setup_ntlmv2_rsp(ses, nls_cp);
--	if (rc) {
--		cifs_dbg(VFS, "Error %d during NTLMSSP authentication\n", rc);
--		goto setup_ntlmv2_ret;
-+	if (ses->user_name != NULL) {
-+		rc = setup_ntlmv2_rsp(ses, nls_cp);
-+		if (rc) {
-+			cifs_dbg(VFS, "Error %d during NTLMSSP authentication\n", rc);
-+			goto setup_ntlmv2_ret;
-+		}
-+		memcpy(tmp, ses->auth_key.response + CIFS_SESS_KEY_SIZE,
-+				ses->auth_key.len - CIFS_SESS_KEY_SIZE);
-+		tmp += ses->auth_key.len - CIFS_SESS_KEY_SIZE;
-+
-+		sec_blob->NtChallengeResponse.Length =
-+				cpu_to_le16(ses->auth_key.len - CIFS_SESS_KEY_SIZE);
-+		sec_blob->NtChallengeResponse.MaximumLength =
-+				cpu_to_le16(ses->auth_key.len - CIFS_SESS_KEY_SIZE);
-+	} else {
-+		/*
-+		 * don't send an NT Response for anonymous access
-+		 */
-+		sec_blob->NtChallengeResponse.Length = 0;
-+		sec_blob->NtChallengeResponse.MaximumLength = 0;
- 	}
--	memcpy(tmp, ses->auth_key.response + CIFS_SESS_KEY_SIZE,
--			ses->auth_key.len - CIFS_SESS_KEY_SIZE);
--	tmp += ses->auth_key.len - CIFS_SESS_KEY_SIZE;
--
--	sec_blob->NtChallengeResponse.Length =
--			cpu_to_le16(ses->auth_key.len - CIFS_SESS_KEY_SIZE);
--	sec_blob->NtChallengeResponse.MaximumLength =
--			cpu_to_le16(ses->auth_key.len - CIFS_SESS_KEY_SIZE);
- 
- 	if (ses->domainName == NULL) {
- 		sec_blob->DomainName.BufferOffset = cpu_to_le32(tmp - pbuffer);
-@@ -670,20 +678,24 @@ sess_auth_lanman(struct sess_data *sess_data)
- 
- 	pSMB->req.hdr.Flags2 &= ~SMBFLG2_UNICODE;
- 
--	/* no capabilities flags in old lanman negotiation */
--	pSMB->old_req.PasswordLength = cpu_to_le16(CIFS_AUTH_RESP_SIZE);
--
--	/* Calculate hash with password and copy into bcc_ptr.
--	 * Encryption Key (stored as in cryptkey) gets used if the
--	 * security mode bit in Negottiate Protocol response states
--	 * to use challenge/response method (i.e. Password bit is 1).
--	 */
--	rc = calc_lanman_hash(ses->password, ses->server->cryptkey,
--			      ses->server->sec_mode & SECMODE_PW_ENCRYPT ?
--			      true : false, lnm_session_key);
--
--	memcpy(bcc_ptr, (char *)lnm_session_key, CIFS_AUTH_RESP_SIZE);
--	bcc_ptr += CIFS_AUTH_RESP_SIZE;
-+	if (ses->user_name != NULL) {
-+		/* no capabilities flags in old lanman negotiation */
-+		pSMB->old_req.PasswordLength = cpu_to_le16(CIFS_AUTH_RESP_SIZE);
-+
-+		/* Calculate hash with password and copy into bcc_ptr.
-+		 * Encryption Key (stored as in cryptkey) gets used if the
-+		 * security mode bit in Negottiate Protocol response states
-+		 * to use challenge/response method (i.e. Password bit is 1).
-+		 */
-+		rc = calc_lanman_hash(ses->password, ses->server->cryptkey,
-+				      ses->server->sec_mode & SECMODE_PW_ENCRYPT ?
-+				      true : false, lnm_session_key);
-+
-+		memcpy(bcc_ptr, (char *)lnm_session_key, CIFS_AUTH_RESP_SIZE);
-+		bcc_ptr += CIFS_AUTH_RESP_SIZE;
-+	} else {
-+		pSMB->old_req.PasswordLength = 0;
-+	}
- 
- 	/*
- 	 * can not sign if LANMAN negotiated so no need
-@@ -769,26 +781,31 @@ sess_auth_ntlm(struct sess_data *sess_data)
- 	capabilities = cifs_ssetup_hdr(ses, pSMB);
- 
- 	pSMB->req_no_secext.Capabilities = cpu_to_le32(capabilities);
--	pSMB->req_no_secext.CaseInsensitivePasswordLength =
--			cpu_to_le16(CIFS_AUTH_RESP_SIZE);
--	pSMB->req_no_secext.CaseSensitivePasswordLength =
--			cpu_to_le16(CIFS_AUTH_RESP_SIZE);
--
--	/* calculate ntlm response and session key */
--	rc = setup_ntlm_response(ses, sess_data->nls_cp);
--	if (rc) {
--		cifs_dbg(VFS, "Error %d during NTLM authentication\n",
--				 rc);
--		goto out;
--	}
-+	if (ses->user_name != NULL) {
-+		pSMB->req_no_secext.CaseInsensitivePasswordLength =
-+				cpu_to_le16(CIFS_AUTH_RESP_SIZE);
-+		pSMB->req_no_secext.CaseSensitivePasswordLength =
-+				cpu_to_le16(CIFS_AUTH_RESP_SIZE);
-+
-+		/* calculate ntlm response and session key */
-+		rc = setup_ntlm_response(ses, sess_data->nls_cp);
-+		if (rc) {
-+			cifs_dbg(VFS, "Error %d during NTLM authentication\n",
-+					 rc);
-+			goto out;
-+		}
- 
--	/* copy ntlm response */
--	memcpy(bcc_ptr, ses->auth_key.response + CIFS_SESS_KEY_SIZE,
--			CIFS_AUTH_RESP_SIZE);
--	bcc_ptr += CIFS_AUTH_RESP_SIZE;
--	memcpy(bcc_ptr, ses->auth_key.response + CIFS_SESS_KEY_SIZE,
--			CIFS_AUTH_RESP_SIZE);
--	bcc_ptr += CIFS_AUTH_RESP_SIZE;
-+		/* copy ntlm response */
-+		memcpy(bcc_ptr, ses->auth_key.response + CIFS_SESS_KEY_SIZE,
-+				CIFS_AUTH_RESP_SIZE);
-+		bcc_ptr += CIFS_AUTH_RESP_SIZE;
-+		memcpy(bcc_ptr, ses->auth_key.response + CIFS_SESS_KEY_SIZE,
-+				CIFS_AUTH_RESP_SIZE);
-+		bcc_ptr += CIFS_AUTH_RESP_SIZE;
-+	} else {
-+		pSMB->req_no_secext.CaseInsensitivePasswordLength = 0;
-+		pSMB->req_no_secext.CaseSensitivePasswordLength = 0;
-+	}
- 
- 	if (ses->capabilities & CAP_UNICODE) {
- 		/* unicode strings must be word aligned */
-@@ -878,22 +895,26 @@ sess_auth_ntlmv2(struct sess_data *sess_data)
- 	/* LM2 password would be here if we supported it */
- 	pSMB->req_no_secext.CaseInsensitivePasswordLength = 0;
- 
--	/* calculate nlmv2 response and session key */
--	rc = setup_ntlmv2_rsp(ses, sess_data->nls_cp);
--	if (rc) {
--		cifs_dbg(VFS, "Error %d during NTLMv2 authentication\n", rc);
--		goto out;
--	}
-+	if (ses->user_name != NULL) {
-+		/* calculate nlmv2 response and session key */
-+		rc = setup_ntlmv2_rsp(ses, sess_data->nls_cp);
-+		if (rc) {
-+			cifs_dbg(VFS, "Error %d during NTLMv2 authentication\n", rc);
-+			goto out;
-+		}
- 
--	memcpy(bcc_ptr, ses->auth_key.response + CIFS_SESS_KEY_SIZE,
--			ses->auth_key.len - CIFS_SESS_KEY_SIZE);
--	bcc_ptr += ses->auth_key.len - CIFS_SESS_KEY_SIZE;
-+		memcpy(bcc_ptr, ses->auth_key.response + CIFS_SESS_KEY_SIZE,
-+				ses->auth_key.len - CIFS_SESS_KEY_SIZE);
-+		bcc_ptr += ses->auth_key.len - CIFS_SESS_KEY_SIZE;
- 
--	/* set case sensitive password length after tilen may get
--	 * assigned, tilen is 0 otherwise.
--	 */
--	pSMB->req_no_secext.CaseSensitivePasswordLength =
--		cpu_to_le16(ses->auth_key.len - CIFS_SESS_KEY_SIZE);
-+		/* set case sensitive password length after tilen may get
-+		 * assigned, tilen is 0 otherwise.
-+		 */
-+		pSMB->req_no_secext.CaseSensitivePasswordLength =
-+			cpu_to_le16(ses->auth_key.len - CIFS_SESS_KEY_SIZE);
-+	} else {
-+		pSMB->req_no_secext.CaseSensitivePasswordLength = 0;
-+	}
- 
- 	if (ses->capabilities & CAP_UNICODE) {
- 		if (sess_data->iov[0].iov_len % 2) {
-diff --git a/fs/cifs/smb2glob.h b/fs/cifs/smb2glob.h
-index bc0bb9c34f72..0ffa18094335 100644
---- a/fs/cifs/smb2glob.h
-+++ b/fs/cifs/smb2glob.h
-@@ -44,6 +44,7 @@
- #define SMB2_OP_DELETE 7
- #define SMB2_OP_HARDLINK 8
- #define SMB2_OP_SET_EOF 9
-+#define SMB2_OP_RMDIR 10
- 
- /* Used when constructing chained read requests. */
- #define CHAINED_REQUEST 1
-diff --git a/fs/cifs/smb2inode.c b/fs/cifs/smb2inode.c
-index 899bbc86f73e..4f0231e685a9 100644
---- a/fs/cifs/smb2inode.c
-+++ b/fs/cifs/smb2inode.c
-@@ -80,6 +80,10 @@ smb2_open_op_close(const unsigned int xid, struct cifs_tcon *tcon,
- 		 * SMB2_open() call.
- 		 */
- 		break;
-+	case SMB2_OP_RMDIR:
-+		tmprc = SMB2_rmdir(xid, tcon, fid.persistent_fid,
-+				   fid.volatile_fid);
-+		break;
- 	case SMB2_OP_RENAME:
- 		tmprc = SMB2_rename(xid, tcon, fid.persistent_fid,
- 				    fid.volatile_fid, (__le16 *)data);
-@@ -191,8 +195,8 @@ smb2_rmdir(const unsigned int xid, struct cifs_tcon *tcon, const char *name,
- 	   struct cifs_sb_info *cifs_sb)
- {
- 	return smb2_open_op_close(xid, tcon, cifs_sb, name, DELETE, FILE_OPEN,
--				  CREATE_NOT_FILE | CREATE_DELETE_ON_CLOSE,
--				  NULL, SMB2_OP_DELETE);
-+				  CREATE_NOT_FILE,
-+				  NULL, SMB2_OP_RMDIR);
- }
- 
- int
-diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
-index 42e1f440eb1e..8f38e33d365b 100644
---- a/fs/cifs/smb2pdu.c
-+++ b/fs/cifs/smb2pdu.c
-@@ -2575,6 +2575,22 @@ SMB2_rename(const unsigned int xid, struct cifs_tcon *tcon,
- }
- 
- int
-+SMB2_rmdir(const unsigned int xid, struct cifs_tcon *tcon,
-+		  u64 persistent_fid, u64 volatile_fid)
-+{
-+	__u8 delete_pending = 1;
-+	void *data;
-+	unsigned int size;
-+
-+	data = &delete_pending;
-+	size = 1; /* sizeof __u8 */
-+
-+	return send_set_info(xid, tcon, persistent_fid, volatile_fid,
-+			current->tgid, FILE_DISPOSITION_INFORMATION, 1, &data,
-+			&size);
-+}
-+
-+int
- SMB2_set_hardlink(const unsigned int xid, struct cifs_tcon *tcon,
- 		  u64 persistent_fid, u64 volatile_fid, __le16 *target_file)
- {
-diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h
-index 4f07dc93608d..eb2cde2f64ba 100644
---- a/fs/cifs/smb2proto.h
-+++ b/fs/cifs/smb2proto.h
-@@ -141,6 +141,8 @@ extern int SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
- extern int SMB2_rename(const unsigned int xid, struct cifs_tcon *tcon,
- 		       u64 persistent_fid, u64 volatile_fid,
- 		       __le16 *target_file);
-+extern int SMB2_rmdir(const unsigned int xid, struct cifs_tcon *tcon,
-+		      u64 persistent_fid, u64 volatile_fid);
- extern int SMB2_set_hardlink(const unsigned int xid, struct cifs_tcon *tcon,
- 			     u64 persistent_fid, u64 volatile_fid,
- 			     __le16 *target_file);
-diff --git a/fs/coredump.c b/fs/coredump.c
-index 9ea87e9fdccf..47c32c3bfa1d 100644
---- a/fs/coredump.c
-+++ b/fs/coredump.c
-@@ -32,6 +32,9 @@
- #include <linux/pipe_fs_i.h>
- #include <linux/oom.h>
- #include <linux/compat.h>
-+#include <linux/sched.h>
-+#include <linux/fs.h>
-+#include <linux/path.h>
- #include <linux/timekeeping.h>
- 
- #include <asm/uaccess.h>
-@@ -649,6 +652,8 @@ void do_coredump(const siginfo_t *siginfo)
- 		}
- 	} else {
- 		struct inode *inode;
-+		int open_flags = O_CREAT | O_RDWR | O_NOFOLLOW |
-+				 O_LARGEFILE | O_EXCL;
- 
- 		if (cprm.limit < binfmt->min_coredump)
- 			goto fail_unlock;
-@@ -687,10 +692,27 @@ void do_coredump(const siginfo_t *siginfo)
- 		 * what matters is that at least one of the two processes
- 		 * writes its coredump successfully, not which one.
- 		 */
--		cprm.file = filp_open(cn.corename,
--				 O_CREAT | 2 | O_NOFOLLOW |
--				 O_LARGEFILE | O_EXCL,
--				 0600);
-+		if (need_suid_safe) {
-+			/*
-+			 * Using user namespaces, normal user tasks can change
-+			 * their current->fs->root to point to arbitrary
-+			 * directories. Since the intention of the "only dump
-+			 * with a fully qualified path" rule is to control where
-+			 * coredumps may be placed using root privileges,
-+			 * current->fs->root must not be used. Instead, use the
-+			 * root directory of init_task.
-+			 */
-+			struct path root;
-+
-+			task_lock(&init_task);
-+			get_fs_root(init_task.fs, &root);
-+			task_unlock(&init_task);
-+			cprm.file = file_open_root(root.dentry, root.mnt,
-+				cn.corename, open_flags, 0600);
-+			path_put(&root);
-+		} else {
-+			cprm.file = filp_open(cn.corename, open_flags, 0600);
-+		}
- 		if (IS_ERR(cprm.file))
- 			goto fail_unlock;
- 
-diff --git a/fs/dcache.c b/fs/dcache.c
-index 2398f9f94337..7566b2689609 100644
---- a/fs/dcache.c
-+++ b/fs/dcache.c
-@@ -1667,7 +1667,8 @@ void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op)
- 				DCACHE_OP_REVALIDATE	|
- 				DCACHE_OP_WEAK_REVALIDATE	|
- 				DCACHE_OP_DELETE	|
--				DCACHE_OP_SELECT_INODE));
-+				DCACHE_OP_SELECT_INODE	|
-+				DCACHE_OP_REAL));
- 	dentry->d_op = op;
- 	if (!op)
- 		return;
-@@ -1685,6 +1686,8 @@ void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op)
- 		dentry->d_flags |= DCACHE_OP_PRUNE;
- 	if (op->d_select_inode)
- 		dentry->d_flags |= DCACHE_OP_SELECT_INODE;
-+	if (op->d_real)
-+		dentry->d_flags |= DCACHE_OP_REAL;
- 
- }
- EXPORT_SYMBOL(d_set_d_op);
-diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
-index bece948b363d..8580831ed237 100644
---- a/fs/debugfs/inode.c
-+++ b/fs/debugfs/inode.c
-@@ -457,7 +457,7 @@ struct dentry *debugfs_create_automount(const char *name,
- 	if (unlikely(!inode))
- 		return failed_creating(dentry);
- 
--	inode->i_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
-+	make_empty_dir_inode(inode);
- 	inode->i_flags |= S_AUTOMOUNT;
- 	inode->i_private = data;
- 	dentry->d_fsdata = (void *)f;
-diff --git a/fs/ext4/crypto.c b/fs/ext4/crypto.c
-index 38f7562489bb..25634c353191 100644
---- a/fs/ext4/crypto.c
-+++ b/fs/ext4/crypto.c
-@@ -34,6 +34,7 @@
- #include <linux/random.h>
- #include <linux/scatterlist.h>
- #include <linux/spinlock_types.h>
-+#include <linux/namei.h>
- 
- #include "ext4_extents.h"
- #include "xattr.h"
-@@ -475,13 +476,19 @@ uint32_t ext4_validate_encryption_key_size(uint32_t mode, uint32_t size)
-  */
- static int ext4_d_revalidate(struct dentry *dentry, unsigned int flags)
- {
--	struct inode *dir = d_inode(dentry->d_parent);
--	struct ext4_crypt_info *ci = EXT4_I(dir)->i_crypt_info;
-+	struct dentry *dir;
-+	struct ext4_crypt_info *ci;
- 	int dir_has_key, cached_with_key;
- 
--	if (!ext4_encrypted_inode(dir))
--		return 0;
-+	if (flags & LOOKUP_RCU)
-+		return -ECHILD;
- 
-+	dir = dget_parent(dentry);
-+	if (!ext4_encrypted_inode(d_inode(dir))) {
-+		dput(dir);
-+		return 0;
-+	}
-+	ci = EXT4_I(d_inode(dir))->i_crypt_info;
- 	if (ci && ci->ci_keyring_key &&
- 	    (ci->ci_keyring_key->flags & ((1 << KEY_FLAG_INVALIDATED) |
- 					  (1 << KEY_FLAG_REVOKED) |
-@@ -491,6 +498,7 @@ static int ext4_d_revalidate(struct dentry *dentry, unsigned int flags)
- 	/* this should eventually be an flag in d_flags */
- 	cached_with_key = dentry->d_fsdata != NULL;
- 	dir_has_key = (ci != NULL);
-+	dput(dir);
- 
- 	/*
- 	 * If the dentry was cached without the key, and it is a
-diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
-index 157b458a69d4..b213449a5d1b 100644
---- a/fs/ext4/ext4.h
-+++ b/fs/ext4/ext4.h
-@@ -900,6 +900,29 @@ do {									       \
- #include "extents_status.h"
- 
- /*
-+ * Lock subclasses for i_data_sem in the ext4_inode_info structure.
-+ *
-+ * These are needed to avoid lockdep false positives when we need to
-+ * allocate blocks to the quota inode during ext4_map_blocks(), while
-+ * holding i_data_sem for a normal (non-quota) inode.  Since we don't
-+ * do quota tracking for the quota inode, this avoids deadlock (as
-+ * well as infinite recursion, since it isn't turtles all the way
-+ * down...)
-+ *
-+ *  I_DATA_SEM_NORMAL - Used for most inodes
-+ *  I_DATA_SEM_OTHER  - Used by move_inode.c for the second normal inode
-+ *			  where the second inode has larger inode number
-+ *			  than the first
-+ *  I_DATA_SEM_QUOTA  - Used for quota inodes only
-+ */
-+enum {
-+	I_DATA_SEM_NORMAL = 0,
-+	I_DATA_SEM_OTHER,
-+	I_DATA_SEM_QUOTA,
-+};
-+
-+
-+/*
-  * fourth extended file system inode data in memory
-  */
- struct ext4_inode_info {
-diff --git a/fs/ext4/file.c b/fs/ext4/file.c
-index 4cd318f31cbe..38847f38b34a 100644
---- a/fs/ext4/file.c
-+++ b/fs/ext4/file.c
-@@ -335,7 +335,7 @@ static int ext4_file_open(struct inode * inode, struct file * filp)
- 	struct super_block *sb = inode->i_sb;
- 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
- 	struct vfsmount *mnt = filp->f_path.mnt;
--	struct inode *dir = filp->f_path.dentry->d_parent->d_inode;
-+	struct dentry *dir;
- 	struct path path;
- 	char buf[64], *cp;
- 	int ret;
-@@ -379,14 +379,18 @@ static int ext4_file_open(struct inode * inode, struct file * filp)
- 		if (ext4_encryption_info(inode) == NULL)
- 			return -ENOKEY;
- 	}
--	if (ext4_encrypted_inode(dir) &&
--	    !ext4_is_child_context_consistent_with_parent(dir, inode)) {
-+
-+	dir = dget_parent(file_dentry(filp));
-+	if (ext4_encrypted_inode(d_inode(dir)) &&
-+	    !ext4_is_child_context_consistent_with_parent(d_inode(dir), inode)) {
- 		ext4_warning(inode->i_sb,
- 			     "Inconsistent encryption contexts: %lu/%lu\n",
--			     (unsigned long) dir->i_ino,
-+			     (unsigned long) d_inode(dir)->i_ino,
- 			     (unsigned long) inode->i_ino);
-+		dput(dir);
- 		return -EPERM;
- 	}
-+	dput(dir);
- 	/*
- 	 * Set up the jbd2_inode if we are opening the inode for
- 	 * writing and the journal is present
-diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
-index aee960b1af34..e6218cbc8332 100644
---- a/fs/ext4/inode.c
-+++ b/fs/ext4/inode.c
-@@ -5261,6 +5261,8 @@ int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
- 	might_sleep();
- 	trace_ext4_mark_inode_dirty(inode, _RET_IP_);
- 	err = ext4_reserve_inode_write(handle, inode, &iloc);
-+	if (err)
-+		return err;
- 	if (ext4_handle_valid(handle) &&
- 	    EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize &&
- 	    !ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND)) {
-@@ -5291,9 +5293,7 @@ int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
- 			}
- 		}
- 	}
--	if (!err)
--		err = ext4_mark_iloc_dirty(handle, inode, &iloc);
--	return err;
-+	return ext4_mark_iloc_dirty(handle, inode, &iloc);
- }
- 
- /*
-diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c
-index 4098acc701c3..796ff0eafd3c 100644
---- a/fs/ext4/move_extent.c
-+++ b/fs/ext4/move_extent.c
-@@ -60,10 +60,10 @@ ext4_double_down_write_data_sem(struct inode *first, struct inode *second)
- {
- 	if (first < second) {
- 		down_write(&EXT4_I(first)->i_data_sem);
--		down_write_nested(&EXT4_I(second)->i_data_sem, SINGLE_DEPTH_NESTING);
-+		down_write_nested(&EXT4_I(second)->i_data_sem, I_DATA_SEM_OTHER);
- 	} else {
- 		down_write(&EXT4_I(second)->i_data_sem);
--		down_write_nested(&EXT4_I(first)->i_data_sem, SINGLE_DEPTH_NESTING);
-+		down_write_nested(&EXT4_I(first)->i_data_sem, I_DATA_SEM_OTHER);
- 
- 	}
- }
-@@ -484,6 +484,13 @@ mext_check_arguments(struct inode *orig_inode,
- 		return -EBUSY;
- 	}
- 
-+	if (IS_NOQUOTA(orig_inode) || IS_NOQUOTA(donor_inode)) {
-+		ext4_debug("ext4 move extent: The argument files should "
-+			"not be quota files [ino:orig %lu, donor %lu]\n",
-+			orig_inode->i_ino, donor_inode->i_ino);
-+		return -EBUSY;
-+	}
-+
- 	/* Ext4 move extent supports only extent based file */
- 	if (!(ext4_test_inode_flag(orig_inode, EXT4_INODE_EXTENTS))) {
- 		ext4_debug("ext4 move extent: orig file is not extents "
-diff --git a/fs/ext4/super.c b/fs/ext4/super.c
-index 3ed01ec011d7..a76ca677fd1a 100644
---- a/fs/ext4/super.c
-+++ b/fs/ext4/super.c
-@@ -1324,9 +1324,9 @@ static int set_qf_name(struct super_block *sb, int qtype, substring_t *args)
- 		return -1;
- 	}
- 	if (ext4_has_feature_quota(sb)) {
--		ext4_msg(sb, KERN_ERR, "Cannot set journaled quota options "
--			 "when QUOTA feature is enabled");
--		return -1;
-+		ext4_msg(sb, KERN_INFO, "Journaled quota options "
-+			 "ignored when QUOTA feature is enabled");
-+		return 1;
- 	}
- 	qname = match_strdup(args);
- 	if (!qname) {
-@@ -1689,10 +1689,10 @@ static int handle_mount_opt(struct super_block *sb, char *opt, int token,
- 			return -1;
- 		}
- 		if (ext4_has_feature_quota(sb)) {
--			ext4_msg(sb, KERN_ERR,
--				 "Cannot set journaled quota options "
-+			ext4_msg(sb, KERN_INFO,
-+				 "Quota format mount options ignored "
- 				 "when QUOTA feature is enabled");
--			return -1;
-+			return 1;
- 		}
- 		sbi->s_jquota_fmt = m->mount_opt;
- #endif
-@@ -1753,11 +1753,11 @@ static int parse_options(char *options, struct super_block *sb,
- #ifdef CONFIG_QUOTA
- 	if (ext4_has_feature_quota(sb) &&
- 	    (test_opt(sb, USRQUOTA) || test_opt(sb, GRPQUOTA))) {
--		ext4_msg(sb, KERN_ERR, "Cannot set quota options when QUOTA "
--			 "feature is enabled");
--		return 0;
--	}
--	if (sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA]) {
-+		ext4_msg(sb, KERN_INFO, "Quota feature enabled, usrquota and grpquota "
-+			 "mount options ignored.");
-+		clear_opt(sb, USRQUOTA);
-+		clear_opt(sb, GRPQUOTA);
-+	} else if (sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA]) {
- 		if (test_opt(sb, USRQUOTA) && sbi->s_qf_names[USRQUOTA])
- 			clear_opt(sb, USRQUOTA);
- 
-@@ -5021,6 +5021,20 @@ static int ext4_quota_on_mount(struct super_block *sb, int type)
- 					EXT4_SB(sb)->s_jquota_fmt, type);
- }
- 
-+static void lockdep_set_quota_inode(struct inode *inode, int subclass)
-+{
-+	struct ext4_inode_info *ei = EXT4_I(inode);
-+
-+	/* The first argument of lockdep_set_subclass has to be
-+	 * *exactly* the same as the argument to init_rwsem() --- in
-+	 * this case, in init_once() --- or lockdep gets unhappy
-+	 * because the name of the lock is set using the
-+	 * stringification of the argument to init_rwsem().
-+	 */
-+	(void) ei;	/* shut up clang warning if !CONFIG_LOCKDEP */
-+	lockdep_set_subclass(&ei->i_data_sem, subclass);
-+}
-+
- /*
-  * Standard function to be called on quota_on
-  */
-@@ -5060,8 +5074,12 @@ static int ext4_quota_on(struct super_block *sb, int type, int format_id,
- 		if (err)
- 			return err;
- 	}
--
--	return dquot_quota_on(sb, type, format_id, path);
-+	lockdep_set_quota_inode(path->dentry->d_inode, I_DATA_SEM_QUOTA);
-+	err = dquot_quota_on(sb, type, format_id, path);
-+	if (err)
-+		lockdep_set_quota_inode(path->dentry->d_inode,
-+					     I_DATA_SEM_NORMAL);
-+	return err;
- }
- 
- static int ext4_quota_enable(struct super_block *sb, int type, int format_id,
-@@ -5088,8 +5106,11 @@ static int ext4_quota_enable(struct super_block *sb, int type, int format_id,
- 
- 	/* Don't account quota for quota files to avoid recursion */
- 	qf_inode->i_flags |= S_NOQUOTA;
-+	lockdep_set_quota_inode(qf_inode, I_DATA_SEM_QUOTA);
- 	err = dquot_enable(qf_inode, type, format_id, flags);
- 	iput(qf_inode);
-+	if (err)
-+		lockdep_set_quota_inode(qf_inode, I_DATA_SEM_NORMAL);
- 
- 	return err;
- }
-diff --git a/fs/f2fs/crypto_policy.c b/fs/f2fs/crypto_policy.c
-index d4a96af513c2..596f02490f27 100644
---- a/fs/f2fs/crypto_policy.c
-+++ b/fs/f2fs/crypto_policy.c
-@@ -192,7 +192,8 @@ int f2fs_inherit_context(struct inode *parent, struct inode *child,
- 		return res;
- 
- 	ci = F2FS_I(parent)->i_crypt_info;
--	BUG_ON(ci == NULL);
-+	if (ci == NULL)
-+		return -ENOKEY;
- 
- 	ctx.format = F2FS_ENCRYPTION_CONTEXT_FORMAT_V1;
- 
-diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
-index 5c06db17e41f..44802599fa67 100644
---- a/fs/f2fs/data.c
-+++ b/fs/f2fs/data.c
-@@ -67,7 +67,6 @@ static void f2fs_write_end_io(struct bio *bio)
- 		f2fs_restore_and_release_control_page(&page);
- 
- 		if (unlikely(bio->bi_error)) {
--			set_page_dirty(page);
- 			set_bit(AS_EIO, &page->mapping->flags);
- 			f2fs_stop_checkpoint(sbi);
- 		}
-@@ -504,7 +503,7 @@ static int __allocate_data_blocks(struct inode *inode, loff_t offset,
- 	struct dnode_of_data dn;
- 	u64 start = F2FS_BYTES_TO_BLK(offset);
- 	u64 len = F2FS_BYTES_TO_BLK(count);
--	bool allocated;
-+	bool allocated = false;
- 	u64 end_offset;
- 	int err = 0;
- 
-@@ -546,7 +545,7 @@ static int __allocate_data_blocks(struct inode *inode, loff_t offset,
- 		f2fs_put_dnode(&dn);
- 		f2fs_unlock_op(sbi);
- 
--		f2fs_balance_fs(sbi, dn.node_changed);
-+		f2fs_balance_fs(sbi, allocated);
- 	}
- 	return err;
- 
-@@ -556,7 +555,7 @@ sync_out:
- 	f2fs_put_dnode(&dn);
- out:
- 	f2fs_unlock_op(sbi);
--	f2fs_balance_fs(sbi, dn.node_changed);
-+	f2fs_balance_fs(sbi, allocated);
- 	return err;
- }
- 
-@@ -650,14 +649,14 @@ get_next:
- 	if (dn.ofs_in_node >= end_offset) {
- 		if (allocated)
- 			sync_inode_page(&dn);
--		allocated = false;
- 		f2fs_put_dnode(&dn);
- 
- 		if (create) {
- 			f2fs_unlock_op(sbi);
--			f2fs_balance_fs(sbi, dn.node_changed);
-+			f2fs_balance_fs(sbi, allocated);
- 			f2fs_lock_op(sbi);
- 		}
-+		allocated = false;
- 
- 		set_new_dnode(&dn, inode, NULL, NULL, 0);
- 		err = get_dnode_of_data(&dn, pgofs, mode);
-@@ -715,7 +714,7 @@ put_out:
- unlock_out:
- 	if (create) {
- 		f2fs_unlock_op(sbi);
--		f2fs_balance_fs(sbi, dn.node_changed);
-+		f2fs_balance_fs(sbi, allocated);
- 	}
- out:
- 	trace_f2fs_map_blocks(inode, map, err);
-diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
-index faa7495e2d7e..30e6b6563494 100644
---- a/fs/f2fs/dir.c
-+++ b/fs/f2fs/dir.c
-@@ -892,11 +892,19 @@ out:
- 	return err;
- }
- 
-+static int f2fs_dir_open(struct inode *inode, struct file *filp)
-+{
-+	if (f2fs_encrypted_inode(inode))
-+		return f2fs_get_encryption_info(inode) ? -EACCES : 0;
-+	return 0;
-+}
-+
- const struct file_operations f2fs_dir_operations = {
- 	.llseek		= generic_file_llseek,
- 	.read		= generic_read_dir,
- 	.iterate	= f2fs_readdir,
- 	.fsync		= f2fs_sync_file,
-+	.open		= f2fs_dir_open,
- 	.unlocked_ioctl	= f2fs_ioctl,
- #ifdef CONFIG_COMPAT
- 	.compat_ioctl   = f2fs_compat_ioctl,
-diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
-index ea272be62677..5a322bc00ac4 100644
---- a/fs/f2fs/file.c
-+++ b/fs/f2fs/file.c
-@@ -425,6 +425,8 @@ static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma)
- 		err = f2fs_get_encryption_info(inode);
- 		if (err)
- 			return 0;
-+		if (!f2fs_encrypted_inode(inode))
-+			return -ENOKEY;
- 	}
- 
- 	/* we don't need to use inline_data strictly */
-@@ -444,7 +446,9 @@ static int f2fs_file_open(struct inode *inode, struct file *filp)
- 	if (!ret && f2fs_encrypted_inode(inode)) {
- 		ret = f2fs_get_encryption_info(inode);
- 		if (ret)
--			ret = -EACCES;
-+			return -EACCES;
-+		if (!f2fs_encrypted_inode(inode))
-+			return -ENOKEY;
- 	}
- 	return ret;
- }
-diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
-index 6f944e5eb76e..7e9e38769660 100644
---- a/fs/f2fs/namei.c
-+++ b/fs/f2fs/namei.c
-@@ -980,12 +980,6 @@ static const char *f2fs_encrypted_get_link(struct dentry *dentry,
- 	}
- 	memcpy(cstr.name, sd->encrypted_path, cstr.len);
- 
--	/* this is broken symlink case */
--	if (unlikely(cstr.name[0] == 0)) {
--		res = -ENOENT;
--		goto errout;
--	}
--
- 	if ((cstr.len + sizeof(struct f2fs_encrypted_symlink_data) - 1) >
- 								max_size) {
- 		/* Symlink data on the disk is corrupted */
-@@ -1002,6 +996,12 @@ static const char *f2fs_encrypted_get_link(struct dentry *dentry,
- 
- 	kfree(cstr.name);
- 
-+	/* this is broken symlink case */
-+	if (unlikely(pstr.name[0] == 0)) {
-+		res = -ENOENT;
-+		goto errout;
-+	}
-+
- 	paddr = pstr.name;
- 
- 	/* Null-terminate the name */
-diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
-index 6134832baaaf..013a62b2f8ca 100644
---- a/fs/f2fs/super.c
-+++ b/fs/f2fs/super.c
-@@ -926,9 +926,25 @@ static loff_t max_file_blocks(void)
- 	return result;
- }
- 
-+static int __f2fs_commit_super(struct buffer_head *bh,
-+			struct f2fs_super_block *super)
-+{
-+	lock_buffer(bh);
-+	if (super)
-+		memcpy(bh->b_data + F2FS_SUPER_OFFSET, super, sizeof(*super));
-+	set_buffer_uptodate(bh);
-+	set_buffer_dirty(bh);
-+	unlock_buffer(bh);
-+
-+	/* it's rare case, we can do fua all the time */
-+	return __sync_dirty_buffer(bh, WRITE_FLUSH_FUA);
-+}
-+
- static inline bool sanity_check_area_boundary(struct super_block *sb,
--					struct f2fs_super_block *raw_super)
-+					struct buffer_head *bh)
- {
-+	struct f2fs_super_block *raw_super = (struct f2fs_super_block *)
-+					(bh->b_data + F2FS_SUPER_OFFSET);
- 	u32 segment0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr);
- 	u32 cp_blkaddr = le32_to_cpu(raw_super->cp_blkaddr);
- 	u32 sit_blkaddr = le32_to_cpu(raw_super->sit_blkaddr);
-@@ -942,6 +958,10 @@ static inline bool sanity_check_area_boundary(struct super_block *sb,
- 	u32 segment_count_main = le32_to_cpu(raw_super->segment_count_main);
- 	u32 segment_count = le32_to_cpu(raw_super->segment_count);
- 	u32 log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
-+	u64 main_end_blkaddr = main_blkaddr +
-+				(segment_count_main << log_blocks_per_seg);
-+	u64 seg_end_blkaddr = segment0_blkaddr +
-+				(segment_count << log_blocks_per_seg);
- 
- 	if (segment0_blkaddr != cp_blkaddr) {
- 		f2fs_msg(sb, KERN_INFO,
-@@ -986,22 +1006,45 @@ static inline bool sanity_check_area_boundary(struct super_block *sb,
- 		return true;
- 	}
- 
--	if (main_blkaddr + (segment_count_main << log_blocks_per_seg) !=
--		segment0_blkaddr + (segment_count << log_blocks_per_seg)) {
-+	if (main_end_blkaddr > seg_end_blkaddr) {
- 		f2fs_msg(sb, KERN_INFO,
--			"Wrong MAIN_AREA boundary, start(%u) end(%u) blocks(%u)",
-+			"Wrong MAIN_AREA boundary, start(%u) end(%u) block(%u)",
- 			main_blkaddr,
--			segment0_blkaddr + (segment_count << log_blocks_per_seg),
-+			segment0_blkaddr +
-+				(segment_count << log_blocks_per_seg),
- 			segment_count_main << log_blocks_per_seg);
- 		return true;
-+	} else if (main_end_blkaddr < seg_end_blkaddr) {
-+		int err = 0;
-+		char *res;
-+
-+		/* fix in-memory information all the time */
-+		raw_super->segment_count = cpu_to_le32((main_end_blkaddr -
-+				segment0_blkaddr) >> log_blocks_per_seg);
-+
-+		if (f2fs_readonly(sb) || bdev_read_only(sb->s_bdev)) {
-+			res = "internally";
-+		} else {
-+			err = __f2fs_commit_super(bh, NULL);
-+			res = err ? "failed" : "done";
-+		}
-+		f2fs_msg(sb, KERN_INFO,
-+			"Fix alignment : %s, start(%u) end(%u) block(%u)",
-+			res, main_blkaddr,
-+			segment0_blkaddr +
-+				(segment_count << log_blocks_per_seg),
-+			segment_count_main << log_blocks_per_seg);
-+		if (err)
-+			return true;
- 	}
--
- 	return false;
- }
- 
- static int sanity_check_raw_super(struct super_block *sb,
--			struct f2fs_super_block *raw_super)
-+				struct buffer_head *bh)
- {
-+	struct f2fs_super_block *raw_super = (struct f2fs_super_block *)
-+					(bh->b_data + F2FS_SUPER_OFFSET);
- 	unsigned int blocksize;
- 
- 	if (F2FS_SUPER_MAGIC != le32_to_cpu(raw_super->magic)) {
-@@ -1068,7 +1111,7 @@ static int sanity_check_raw_super(struct super_block *sb,
- 	}
- 
- 	/* check CP/SIT/NAT/SSA/MAIN_AREA area boundary */
--	if (sanity_check_area_boundary(sb, raw_super))
-+	if (sanity_check_area_boundary(sb, bh))
- 		return 1;
- 
- 	return 0;
-@@ -1134,103 +1177,87 @@ static void init_sb_info(struct f2fs_sb_info *sbi)
- 
- /*
-  * Read f2fs raw super block.
-- * Because we have two copies of super block, so read the first one at first,
-- * if the first one is invalid, move to read the second one.
-+ * Because we have two copies of super block, so read both of them
-+ * to get the first valid one. If any one of them is broken, we pass
-+ * them recovery flag back to the caller.
-  */
- static int read_raw_super_block(struct super_block *sb,
- 			struct f2fs_super_block **raw_super,
- 			int *valid_super_block, int *recovery)
- {
--	int block = 0;
-+	int block;
- 	struct buffer_head *bh;
--	struct f2fs_super_block *super, *buf;
-+	struct f2fs_super_block *super;
- 	int err = 0;
- 
- 	super = kzalloc(sizeof(struct f2fs_super_block), GFP_KERNEL);
- 	if (!super)
- 		return -ENOMEM;
--retry:
--	bh = sb_bread(sb, block);
--	if (!bh) {
--		*recovery = 1;
--		f2fs_msg(sb, KERN_ERR, "Unable to read %dth superblock",
-+
-+	for (block = 0; block < 2; block++) {
-+		bh = sb_bread(sb, block);
-+		if (!bh) {
-+			f2fs_msg(sb, KERN_ERR, "Unable to read %dth superblock",
- 				block + 1);
--		err = -EIO;
--		goto next;
--	}
-+			err = -EIO;
-+			continue;
-+		}
- 
--	buf = (struct f2fs_super_block *)(bh->b_data + F2FS_SUPER_OFFSET);
-+		/* sanity checking of raw super */
-+		if (sanity_check_raw_super(sb, bh)) {
-+			f2fs_msg(sb, KERN_ERR,
-+				"Can't find valid F2FS filesystem in %dth superblock",
-+				block + 1);
-+			err = -EINVAL;
-+			brelse(bh);
-+			continue;
-+		}
- 
--	/* sanity checking of raw super */
--	if (sanity_check_raw_super(sb, buf)) {
-+		if (!*raw_super) {
-+			memcpy(super, bh->b_data + F2FS_SUPER_OFFSET,
-+							sizeof(*super));
-+			*valid_super_block = block;
-+			*raw_super = super;
-+		}
- 		brelse(bh);
--		*recovery = 1;
--		f2fs_msg(sb, KERN_ERR,
--			"Can't find valid F2FS filesystem in %dth superblock",
--								block + 1);
--		err = -EINVAL;
--		goto next;
- 	}
- 
--	if (!*raw_super) {
--		memcpy(super, buf, sizeof(*super));
--		*valid_super_block = block;
--		*raw_super = super;
--	}
--	brelse(bh);
--
--next:
--	/* check the validity of the second superblock */
--	if (block == 0) {
--		block++;
--		goto retry;
--	}
-+	/* Fail to read any one of the superblocks*/
-+	if (err < 0)
-+		*recovery = 1;
- 
- 	/* No valid superblock */
--	if (!*raw_super) {
-+	if (!*raw_super)
- 		kfree(super);
--		return err;
--	}
-+	else
-+		err = 0;
- 
--	return 0;
-+	return err;
- }
- 
--static int __f2fs_commit_super(struct f2fs_sb_info *sbi, int block)
-+int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover)
- {
--	struct f2fs_super_block *super = F2FS_RAW_SUPER(sbi);
- 	struct buffer_head *bh;
- 	int err;
- 
--	bh = sb_getblk(sbi->sb, block);
-+	/* write back-up superblock first */
-+	bh = sb_getblk(sbi->sb, sbi->valid_super_block ? 0: 1);
- 	if (!bh)
- 		return -EIO;
--
--	lock_buffer(bh);
--	memcpy(bh->b_data + F2FS_SUPER_OFFSET, super, sizeof(*super));
--	set_buffer_uptodate(bh);
--	set_buffer_dirty(bh);
--	unlock_buffer(bh);
--
--	/* it's rare case, we can do fua all the time */
--	err = __sync_dirty_buffer(bh, WRITE_FLUSH_FUA);
-+	err = __f2fs_commit_super(bh, F2FS_RAW_SUPER(sbi));
- 	brelse(bh);
- 
--	return err;
--}
--
--int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover)
--{
--	int err;
--
--	/* write back-up superblock first */
--	err = __f2fs_commit_super(sbi, sbi->valid_super_block ? 0 : 1);
--
- 	/* if we are in recovery path, skip writing valid superblock */
- 	if (recover || err)
- 		return err;
- 
- 	/* write current valid superblock */
--	return __f2fs_commit_super(sbi, sbi->valid_super_block);
-+	bh = sb_getblk(sbi->sb, sbi->valid_super_block);
-+	if (!bh)
-+		return -EIO;
-+	err = __f2fs_commit_super(bh, F2FS_RAW_SUPER(sbi));
-+	brelse(bh);
-+	return err;
- }
- 
- static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
-diff --git a/fs/fhandle.c b/fs/fhandle.c
-index d59712dfa3e7..ca3c3dd01789 100644
---- a/fs/fhandle.c
-+++ b/fs/fhandle.c
-@@ -228,7 +228,7 @@ long do_handle_open(int mountdirfd,
- 		path_put(&path);
- 		return fd;
- 	}
--	file = file_open_root(path.dentry, path.mnt, "", open_flag);
-+	file = file_open_root(path.dentry, path.mnt, "", open_flag, 0);
- 	if (IS_ERR(file)) {
- 		put_unused_fd(fd);
- 		retval =  PTR_ERR(file);
-diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
-index 5c46ed9f3e14..fee81e8768c9 100644
---- a/fs/fs-writeback.c
-+++ b/fs/fs-writeback.c
-@@ -281,13 +281,15 @@ locked_inode_to_wb_and_lock_list(struct inode *inode)
- 		wb_get(wb);
- 		spin_unlock(&inode->i_lock);
- 		spin_lock(&wb->list_lock);
--		wb_put(wb);		/* not gonna deref it anymore */
- 
- 		/* i_wb may have changed inbetween, can't use inode_to_wb() */
--		if (likely(wb == inode->i_wb))
--			return wb;	/* @inode already has ref */
-+		if (likely(wb == inode->i_wb)) {
-+			wb_put(wb);	/* @inode already has ref */
-+			return wb;
-+		}
- 
- 		spin_unlock(&wb->list_lock);
-+		wb_put(wb);
- 		cpu_relax();
- 		spin_lock(&inode->i_lock);
- 	}
-@@ -1337,10 +1339,10 @@ __writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
-  * we go e.g. from filesystem. Flusher thread uses __writeback_single_inode()
-  * and does more profound writeback list handling in writeback_sb_inodes().
-  */
--static int
--writeback_single_inode(struct inode *inode, struct bdi_writeback *wb,
--		       struct writeback_control *wbc)
-+static int writeback_single_inode(struct inode *inode,
-+				  struct writeback_control *wbc)
- {
-+	struct bdi_writeback *wb;
- 	int ret = 0;
- 
- 	spin_lock(&inode->i_lock);
-@@ -1378,7 +1380,8 @@ writeback_single_inode(struct inode *inode, struct bdi_writeback *wb,
- 	ret = __writeback_single_inode(inode, wbc);
- 
- 	wbc_detach_inode(wbc);
--	spin_lock(&wb->list_lock);
-+
-+	wb = inode_to_wb_and_lock_list(inode);
- 	spin_lock(&inode->i_lock);
- 	/*
- 	 * If inode is clean, remove it from writeback lists. Otherwise don't
-@@ -1453,6 +1456,7 @@ static long writeback_sb_inodes(struct super_block *sb,
- 
- 	while (!list_empty(&wb->b_io)) {
- 		struct inode *inode = wb_inode(wb->b_io.prev);
-+		struct bdi_writeback *tmp_wb;
- 
- 		if (inode->i_sb != sb) {
- 			if (work->sb) {
-@@ -1543,15 +1547,23 @@ static long writeback_sb_inodes(struct super_block *sb,
- 			cond_resched();
- 		}
- 
--
--		spin_lock(&wb->list_lock);
-+		/*
-+		 * Requeue @inode if still dirty.  Be careful as @inode may
-+		 * have been switched to another wb in the meantime.
-+		 */
-+		tmp_wb = inode_to_wb_and_lock_list(inode);
- 		spin_lock(&inode->i_lock);
- 		if (!(inode->i_state & I_DIRTY_ALL))
- 			wrote++;
--		requeue_inode(inode, wb, &wbc);
-+		requeue_inode(inode, tmp_wb, &wbc);
- 		inode_sync_complete(inode);
- 		spin_unlock(&inode->i_lock);
- 
-+		if (unlikely(tmp_wb != wb)) {
-+			spin_unlock(&tmp_wb->list_lock);
-+			spin_lock(&wb->list_lock);
-+		}
-+
- 		/*
- 		 * bail out to wb_writeback() often enough to check
- 		 * background threshold and other termination conditions.
-@@ -2338,7 +2350,6 @@ EXPORT_SYMBOL(sync_inodes_sb);
-  */
- int write_inode_now(struct inode *inode, int sync)
- {
--	struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
- 	struct writeback_control wbc = {
- 		.nr_to_write = LONG_MAX,
- 		.sync_mode = sync ? WB_SYNC_ALL : WB_SYNC_NONE,
-@@ -2350,7 +2361,7 @@ int write_inode_now(struct inode *inode, int sync)
- 		wbc.nr_to_write = 0;
- 
- 	might_sleep();
--	return writeback_single_inode(inode, wb, &wbc);
-+	return writeback_single_inode(inode, &wbc);
- }
- EXPORT_SYMBOL(write_inode_now);
- 
-@@ -2367,7 +2378,7 @@ EXPORT_SYMBOL(write_inode_now);
-  */
- int sync_inode(struct inode *inode, struct writeback_control *wbc)
- {
--	return writeback_single_inode(inode, &inode_to_bdi(inode)->wb, wbc);
-+	return writeback_single_inode(inode, wbc);
- }
- EXPORT_SYMBOL(sync_inode);
- 
-diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
-index 8e3ee1936c7e..c5b6b7165489 100644
---- a/fs/fuse/cuse.c
-+++ b/fs/fuse/cuse.c
-@@ -90,7 +90,7 @@ static struct list_head *cuse_conntbl_head(dev_t devt)
- 
- static ssize_t cuse_read_iter(struct kiocb *kiocb, struct iov_iter *to)
- {
--	struct fuse_io_priv io = { .async = 0, .file = kiocb->ki_filp };
-+	struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(kiocb->ki_filp);
- 	loff_t pos = 0;
- 
- 	return fuse_direct_io(&io, to, &pos, FUSE_DIO_CUSE);
-@@ -98,7 +98,7 @@ static ssize_t cuse_read_iter(struct kiocb *kiocb, struct iov_iter *to)
- 
- static ssize_t cuse_write_iter(struct kiocb *kiocb, struct iov_iter *from)
- {
--	struct fuse_io_priv io = { .async = 0, .file = kiocb->ki_filp };
-+	struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(kiocb->ki_filp);
- 	loff_t pos = 0;
- 	/*
- 	 * No locking or generic_write_checks(), the server is
-diff --git a/fs/fuse/file.c b/fs/fuse/file.c
-index b03d253ece15..416108b42412 100644
---- a/fs/fuse/file.c
-+++ b/fs/fuse/file.c
-@@ -528,6 +528,11 @@ static void fuse_release_user_pages(struct fuse_req *req, int write)
- 	}
- }
- 
-+static void fuse_io_release(struct kref *kref)
-+{
-+	kfree(container_of(kref, struct fuse_io_priv, refcnt));
-+}
-+
- static ssize_t fuse_get_res_by_io(struct fuse_io_priv *io)
- {
- 	if (io->err)
-@@ -585,8 +590,9 @@ static void fuse_aio_complete(struct fuse_io_priv *io, int err, ssize_t pos)
- 		}
- 
- 		io->iocb->ki_complete(io->iocb, res, 0);
--		kfree(io);
- 	}
-+
-+	kref_put(&io->refcnt, fuse_io_release);
- }
- 
- static void fuse_aio_complete_req(struct fuse_conn *fc, struct fuse_req *req)
-@@ -613,6 +619,7 @@ static size_t fuse_async_req_send(struct fuse_conn *fc, struct fuse_req *req,
- 		size_t num_bytes, struct fuse_io_priv *io)
- {
- 	spin_lock(&io->lock);
-+	kref_get(&io->refcnt);
- 	io->size += num_bytes;
- 	io->reqs++;
- 	spin_unlock(&io->lock);
-@@ -691,7 +698,7 @@ static void fuse_short_read(struct fuse_req *req, struct inode *inode,
- 
- static int fuse_do_readpage(struct file *file, struct page *page)
- {
--	struct fuse_io_priv io = { .async = 0, .file = file };
-+	struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(file);
- 	struct inode *inode = page->mapping->host;
- 	struct fuse_conn *fc = get_fuse_conn(inode);
- 	struct fuse_req *req;
-@@ -984,7 +991,7 @@ static size_t fuse_send_write_pages(struct fuse_req *req, struct file *file,
- 	size_t res;
- 	unsigned offset;
- 	unsigned i;
--	struct fuse_io_priv io = { .async = 0, .file = file };
-+	struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(file);
- 
- 	for (i = 0; i < req->num_pages; i++)
- 		fuse_wait_on_page_writeback(inode, req->pages[i]->index);
-@@ -1398,7 +1405,7 @@ static ssize_t __fuse_direct_read(struct fuse_io_priv *io,
- 
- static ssize_t fuse_direct_read_iter(struct kiocb *iocb, struct iov_iter *to)
- {
--	struct fuse_io_priv io = { .async = 0, .file = iocb->ki_filp };
-+	struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(iocb->ki_filp);
- 	return __fuse_direct_read(&io, to, &iocb->ki_pos);
- }
- 
-@@ -1406,7 +1413,7 @@ static ssize_t fuse_direct_write_iter(struct kiocb *iocb, struct iov_iter *from)
- {
- 	struct file *file = iocb->ki_filp;
- 	struct inode *inode = file_inode(file);
--	struct fuse_io_priv io = { .async = 0, .file = file };
-+	struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(file);
- 	ssize_t res;
- 
- 	if (is_bad_inode(inode))
-@@ -2843,6 +2850,7 @@ fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t offset)
- 	loff_t i_size;
- 	size_t count = iov_iter_count(iter);
- 	struct fuse_io_priv *io;
-+	bool is_sync = is_sync_kiocb(iocb);
- 
- 	pos = offset;
- 	inode = file->f_mapping->host;
-@@ -2863,6 +2871,7 @@ fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t offset)
- 	if (!io)
- 		return -ENOMEM;
- 	spin_lock_init(&io->lock);
-+	kref_init(&io->refcnt);
- 	io->reqs = 1;
- 	io->bytes = -1;
- 	io->size = 0;
-@@ -2882,12 +2891,18 @@ fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t offset)
- 	 * to wait on real async I/O requests, so we must submit this request
- 	 * synchronously.
- 	 */
--	if (!is_sync_kiocb(iocb) && (offset + count > i_size) &&
-+	if (!is_sync && (offset + count > i_size) &&
- 	    iov_iter_rw(iter) == WRITE)
- 		io->async = false;
- 
--	if (io->async && is_sync_kiocb(iocb))
-+	if (io->async && is_sync) {
-+		/*
-+		 * Additional reference to keep io around after
-+		 * calling fuse_aio_complete()
-+		 */
-+		kref_get(&io->refcnt);
- 		io->done = &wait;
-+	}
- 
- 	if (iov_iter_rw(iter) == WRITE) {
- 		ret = fuse_direct_io(io, iter, &pos, FUSE_DIO_WRITE);
-@@ -2900,14 +2915,14 @@ fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t offset)
- 		fuse_aio_complete(io, ret < 0 ? ret : 0, -1);
- 
- 		/* we have a non-extending, async request, so return */
--		if (!is_sync_kiocb(iocb))
-+		if (!is_sync)
- 			return -EIOCBQUEUED;
- 
- 		wait_for_completion(&wait);
- 		ret = fuse_get_res_by_io(io);
- 	}
- 
--	kfree(io);
-+	kref_put(&io->refcnt, fuse_io_release);
- 
- 	if (iov_iter_rw(iter) == WRITE) {
- 		if (ret > 0)
-diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
-index ce394b5fe6b4..eddbe02c4028 100644
---- a/fs/fuse/fuse_i.h
-+++ b/fs/fuse/fuse_i.h
-@@ -22,6 +22,7 @@
- #include <linux/rbtree.h>
- #include <linux/poll.h>
- #include <linux/workqueue.h>
-+#include <linux/kref.h>
- 
- /** Max number of pages that can be used in a single read request */
- #define FUSE_MAX_PAGES_PER_REQ 32
-@@ -243,6 +244,7 @@ struct fuse_args {
- 
- /** The request IO state (for asynchronous processing) */
- struct fuse_io_priv {
-+	struct kref refcnt;
- 	int async;
- 	spinlock_t lock;
- 	unsigned reqs;
-@@ -256,6 +258,13 @@ struct fuse_io_priv {
- 	struct completion *done;
- };
- 
-+#define FUSE_IO_PRIV_SYNC(f) \
-+{					\
-+	.refcnt = { ATOMIC_INIT(1) },	\
-+	.async = 0,			\
-+	.file = f,			\
-+}
-+
- /**
-  * Request flags
-  *
-diff --git a/fs/hpfs/super.c b/fs/hpfs/super.c
-index 458cf463047b..82067ca22f2b 100644
---- a/fs/hpfs/super.c
-+++ b/fs/hpfs/super.c
-@@ -15,6 +15,7 @@
- #include <linux/sched.h>
- #include <linux/bitmap.h>
- #include <linux/slab.h>
-+#include <linux/seq_file.h>
- 
- /* Mark the filesystem dirty, so that chkdsk checks it when os/2 booted */
- 
-@@ -453,10 +454,6 @@ static int hpfs_remount_fs(struct super_block *s, int *flags, char *data)
- 	int lowercase, eas, chk, errs, chkdsk, timeshift;
- 	int o;
- 	struct hpfs_sb_info *sbi = hpfs_sb(s);
--	char *new_opts = kstrdup(data, GFP_KERNEL);
--
--	if (!new_opts)
--		return -ENOMEM;
- 
- 	sync_filesystem(s);
- 
-@@ -493,17 +490,44 @@ static int hpfs_remount_fs(struct super_block *s, int *flags, char *data)
- 
- 	if (!(*flags & MS_RDONLY)) mark_dirty(s, 1);
- 
--	replace_mount_options(s, new_opts);
--
- 	hpfs_unlock(s);
- 	return 0;
- 
- out_err:
- 	hpfs_unlock(s);
--	kfree(new_opts);
- 	return -EINVAL;
- }
- 
-+static int hpfs_show_options(struct seq_file *seq, struct dentry *root)
-+{
-+	struct hpfs_sb_info *sbi = hpfs_sb(root->d_sb);
-+
-+	seq_printf(seq, ",uid=%u", from_kuid_munged(&init_user_ns, sbi->sb_uid));
-+	seq_printf(seq, ",gid=%u", from_kgid_munged(&init_user_ns, sbi->sb_gid));
-+	seq_printf(seq, ",umask=%03o", (~sbi->sb_mode & 0777));
-+	if (sbi->sb_lowercase)
-+		seq_printf(seq, ",case=lower");
-+	if (!sbi->sb_chk)
-+		seq_printf(seq, ",check=none");
-+	if (sbi->sb_chk == 2)
-+		seq_printf(seq, ",check=strict");
-+	if (!sbi->sb_err)
-+		seq_printf(seq, ",errors=continue");
-+	if (sbi->sb_err == 2)
-+		seq_printf(seq, ",errors=panic");
-+	if (!sbi->sb_chkdsk)
-+		seq_printf(seq, ",chkdsk=no");
-+	if (sbi->sb_chkdsk == 2)
-+		seq_printf(seq, ",chkdsk=always");
-+	if (!sbi->sb_eas)
-+		seq_printf(seq, ",eas=no");
-+	if (sbi->sb_eas == 1)
-+		seq_printf(seq, ",eas=ro");
-+	if (sbi->sb_timeshift)
-+		seq_printf(seq, ",timeshift=%d", sbi->sb_timeshift);
-+	return 0;
-+}
-+
- /* Super operations */
- 
- static const struct super_operations hpfs_sops =
-@@ -514,7 +538,7 @@ static const struct super_operations hpfs_sops =
- 	.put_super	= hpfs_put_super,
- 	.statfs		= hpfs_statfs,
- 	.remount_fs	= hpfs_remount_fs,
--	.show_options	= generic_show_options,
-+	.show_options	= hpfs_show_options,
- };
- 
- static int hpfs_fill_super(struct super_block *s, void *options, int silent)
-@@ -537,8 +561,6 @@ static int hpfs_fill_super(struct super_block *s, void *options, int silent)
- 
- 	int o;
- 
--	save_mount_options(s, options);
--
- 	sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
- 	if (!sbi) {
- 		return -ENOMEM;
-diff --git a/fs/isofs/rock.c b/fs/isofs/rock.c
-index 5384ceb35b1c..98b3eb7d8eaf 100644
---- a/fs/isofs/rock.c
-+++ b/fs/isofs/rock.c
-@@ -203,6 +203,8 @@ int get_rock_ridge_filename(struct iso_directory_record *de,
- 	int retnamlen = 0;
- 	int truncate = 0;
- 	int ret = 0;
-+	char *p;
-+	int len;
- 
- 	if (!ISOFS_SB(inode->i_sb)->s_rock)
- 		return 0;
-@@ -267,12 +269,17 @@ repeat:
- 					rr->u.NM.flags);
- 				break;
- 			}
--			if ((strlen(retname) + rr->len - 5) >= 254) {
-+			len = rr->len - 5;
-+			if (retnamlen + len >= 254) {
- 				truncate = 1;
- 				break;
- 			}
--			strncat(retname, rr->u.NM.name, rr->len - 5);
--			retnamlen += rr->len - 5;
-+			p = memchr(rr->u.NM.name, '\0', len);
-+			if (unlikely(p))
-+				len = p - rr->u.NM.name;
-+			memcpy(retname + retnamlen, rr->u.NM.name, len);
-+			retnamlen += len;
-+			retname[retnamlen] = '\0';
- 			break;
- 		case SIG('R', 'E'):
- 			kfree(rs.buffer);
-diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
-index 81e622681c82..624a57a9c4aa 100644
---- a/fs/jbd2/journal.c
-+++ b/fs/jbd2/journal.c
-@@ -1408,11 +1408,12 @@ out:
- /**
-  * jbd2_mark_journal_empty() - Mark on disk journal as empty.
-  * @journal: The journal to update.
-+ * @write_op: With which operation should we write the journal sb
-  *
-  * Update a journal's dynamic superblock fields to show that journal is empty.
-  * Write updated superblock to disk waiting for IO to complete.
-  */
--static void jbd2_mark_journal_empty(journal_t *journal)
-+static void jbd2_mark_journal_empty(journal_t *journal, int write_op)
- {
- 	journal_superblock_t *sb = journal->j_superblock;
- 
-@@ -1430,7 +1431,7 @@ static void jbd2_mark_journal_empty(journal_t *journal)
- 	sb->s_start    = cpu_to_be32(0);
- 	read_unlock(&journal->j_state_lock);
- 
--	jbd2_write_superblock(journal, WRITE_FUA);
-+	jbd2_write_superblock(journal, write_op);
- 
- 	/* Log is no longer empty */
- 	write_lock(&journal->j_state_lock);
-@@ -1716,7 +1717,13 @@ int jbd2_journal_destroy(journal_t *journal)
- 	if (journal->j_sb_buffer) {
- 		if (!is_journal_aborted(journal)) {
- 			mutex_lock(&journal->j_checkpoint_mutex);
--			jbd2_mark_journal_empty(journal);
-+
-+			write_lock(&journal->j_state_lock);
-+			journal->j_tail_sequence =
-+				++journal->j_transaction_sequence;
-+			write_unlock(&journal->j_state_lock);
-+
-+			jbd2_mark_journal_empty(journal, WRITE_FLUSH_FUA);
- 			mutex_unlock(&journal->j_checkpoint_mutex);
- 		} else
- 			err = -EIO;
-@@ -1975,7 +1982,7 @@ int jbd2_journal_flush(journal_t *journal)
- 	 * the magic code for a fully-recovered superblock.  Any future
- 	 * commits of data to the journal will restore the current
- 	 * s_start value. */
--	jbd2_mark_journal_empty(journal);
-+	jbd2_mark_journal_empty(journal, WRITE_FUA);
- 	mutex_unlock(&journal->j_checkpoint_mutex);
- 	write_lock(&journal->j_state_lock);
- 	J_ASSERT(!journal->j_running_transaction);
-@@ -2021,7 +2028,7 @@ int jbd2_journal_wipe(journal_t *journal, int write)
- 	if (write) {
- 		/* Lock to make assertions happy... */
- 		mutex_lock(&journal->j_checkpoint_mutex);
--		jbd2_mark_journal_empty(journal);
-+		jbd2_mark_journal_empty(journal, WRITE_FUA);
- 		mutex_unlock(&journal->j_checkpoint_mutex);
- 	}
- 
-diff --git a/fs/namei.c b/fs/namei.c
-index 9c590e0f66e9..7824bfb89ada 100644
---- a/fs/namei.c
-+++ b/fs/namei.c
-@@ -2968,22 +2968,10 @@ no_open:
- 		dentry = lookup_real(dir, dentry, nd->flags);
- 		if (IS_ERR(dentry))
- 			return PTR_ERR(dentry);
--
--		if (create_error) {
--			int open_flag = op->open_flag;
--
--			error = create_error;
--			if ((open_flag & O_EXCL)) {
--				if (!dentry->d_inode)
--					goto out;
--			} else if (!dentry->d_inode) {
--				goto out;
--			} else if ((open_flag & O_TRUNC) &&
--				   d_is_reg(dentry)) {
--				goto out;
--			}
--			/* will fail later, go on to get the right error */
--		}
-+	}
-+	if (create_error && !dentry->d_inode) {
-+		error = create_error;
-+		goto out;
- 	}
- looked_up:
- 	path->dentry = dentry;
-@@ -4258,7 +4246,11 @@ int vfs_rename(struct inode *old_dir, struct dentry *old_dentry,
- 	bool new_is_dir = false;
- 	unsigned max_links = new_dir->i_sb->s_max_links;
- 
--	if (source == target)
-+	/*
-+	 * Check source == target.
-+	 * On overlayfs need to look at underlying inodes.
-+	 */
-+	if (vfs_select_inode(old_dentry, 0) == vfs_select_inode(new_dentry, 0))
- 		return 0;
- 
- 	error = may_delete(old_dir, old_dentry, is_dir);
-diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
-index 9cce67043f92..7ded17764754 100644
---- a/fs/nfs/dir.c
-+++ b/fs/nfs/dir.c
-@@ -377,7 +377,7 @@ int nfs_readdir_xdr_filler(struct page **pages, nfs_readdir_descriptor_t *desc,
-  again:
- 	timestamp = jiffies;
- 	gencount = nfs_inc_attr_generation_counter();
--	error = NFS_PROTO(inode)->readdir(file->f_path.dentry, cred, entry->cookie, pages,
-+	error = NFS_PROTO(inode)->readdir(file_dentry(file), cred, entry->cookie, pages,
- 					  NFS_SERVER(inode)->dtsize, desc->plus);
- 	if (error < 0) {
- 		/* We requested READDIRPLUS, but the server doesn't grok it */
-@@ -560,7 +560,7 @@ int nfs_readdir_page_filler(nfs_readdir_descriptor_t *desc, struct nfs_entry *en
- 		count++;
- 
- 		if (desc->plus != 0)
--			nfs_prime_dcache(desc->file->f_path.dentry, entry);
-+			nfs_prime_dcache(file_dentry(desc->file), entry);
- 
- 		status = nfs_readdir_add_to_array(entry, page);
- 		if (status != 0)
-@@ -864,7 +864,7 @@ static bool nfs_dir_mapping_need_revalidate(struct inode *dir)
-  */
- static int nfs_readdir(struct file *file, struct dir_context *ctx)
- {
--	struct dentry	*dentry = file->f_path.dentry;
-+	struct dentry	*dentry = file_dentry(file);
- 	struct inode	*inode = d_inode(dentry);
- 	nfs_readdir_descriptor_t my_desc,
- 			*desc = &my_desc;
-diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
-index 86faecf8f328..847b678af4f0 100644
---- a/fs/nfs/inode.c
-+++ b/fs/nfs/inode.c
-@@ -940,7 +940,7 @@ int nfs_open(struct inode *inode, struct file *filp)
- {
- 	struct nfs_open_context *ctx;
- 
--	ctx = alloc_nfs_open_context(filp->f_path.dentry, filp->f_mode);
-+	ctx = alloc_nfs_open_context(file_dentry(filp), filp->f_mode);
- 	if (IS_ERR(ctx))
- 		return PTR_ERR(ctx);
- 	nfs_file_set_open_context(filp, ctx);
-diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c
-index 57ca1c8039c1..2a9ff14cfb3b 100644
---- a/fs/nfs/nfs4file.c
-+++ b/fs/nfs/nfs4file.c
-@@ -26,7 +26,7 @@ static int
- nfs4_file_open(struct inode *inode, struct file *filp)
- {
- 	struct nfs_open_context *ctx;
--	struct dentry *dentry = filp->f_path.dentry;
-+	struct dentry *dentry = file_dentry(filp);
- 	struct dentry *parent = NULL;
- 	struct inode *dir;
- 	unsigned openflags = filp->f_flags;
-@@ -57,7 +57,7 @@ nfs4_file_open(struct inode *inode, struct file *filp)
- 	parent = dget_parent(dentry);
- 	dir = d_inode(parent);
- 
--	ctx = alloc_nfs_open_context(filp->f_path.dentry, filp->f_mode);
-+	ctx = alloc_nfs_open_context(file_dentry(filp), filp->f_mode);
- 	err = PTR_ERR(ctx);
- 	if (IS_ERR(ctx))
- 		goto out;
-diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
-index 4cba7865f496..f8082c7cde8b 100644
---- a/fs/nfsd/nfs4proc.c
-+++ b/fs/nfsd/nfs4proc.c
-@@ -878,6 +878,7 @@ nfsd4_secinfo(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
- 				    &exp, &dentry);
- 	if (err)
- 		return err;
-+	fh_unlock(&cstate->current_fh);
- 	if (d_really_is_negative(dentry)) {
- 		exp_put(exp);
- 		err = nfserr_noent;
-diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
-index d6ef0955a979..1600ec470ce7 100644
---- a/fs/nfsd/nfs4xdr.c
-+++ b/fs/nfsd/nfs4xdr.c
-@@ -1072,8 +1072,9 @@ nfsd4_decode_rename(struct nfsd4_compoundargs *argp, struct nfsd4_rename *rename
- 
- 	READ_BUF(4);
- 	rename->rn_snamelen = be32_to_cpup(p++);
--	READ_BUF(rename->rn_snamelen + 4);
-+	READ_BUF(rename->rn_snamelen);
- 	SAVEMEM(rename->rn_sname, rename->rn_snamelen);
-+	READ_BUF(4);
- 	rename->rn_tnamelen = be32_to_cpup(p++);
- 	READ_BUF(rename->rn_tnamelen);
- 	SAVEMEM(rename->rn_tname, rename->rn_tnamelen);
-@@ -1155,13 +1156,14 @@ nfsd4_decode_setclientid(struct nfsd4_compoundargs *argp, struct nfsd4_setclient
- 	READ_BUF(8);
- 	setclientid->se_callback_prog = be32_to_cpup(p++);
- 	setclientid->se_callback_netid_len = be32_to_cpup(p++);
--
--	READ_BUF(setclientid->se_callback_netid_len + 4);
-+	READ_BUF(setclientid->se_callback_netid_len);
- 	SAVEMEM(setclientid->se_callback_netid_val, setclientid->se_callback_netid_len);
-+	READ_BUF(4);
- 	setclientid->se_callback_addr_len = be32_to_cpup(p++);
- 
--	READ_BUF(setclientid->se_callback_addr_len + 4);
-+	READ_BUF(setclientid->se_callback_addr_len);
- 	SAVEMEM(setclientid->se_callback_addr_val, setclientid->se_callback_addr_len);
-+	READ_BUF(4);
- 	setclientid->se_callback_ident = be32_to_cpup(p++);
- 
- 	DECODE_TAIL;
-@@ -1835,8 +1837,9 @@ nfsd4_decode_compound(struct nfsd4_compoundargs *argp)
- 
- 	READ_BUF(4);
- 	argp->taglen = be32_to_cpup(p++);
--	READ_BUF(argp->taglen + 8);
-+	READ_BUF(argp->taglen);
- 	SAVEMEM(argp->tag, argp->taglen);
-+	READ_BUF(8);
- 	argp->minorversion = be32_to_cpup(p++);
- 	argp->opcnt = be32_to_cpup(p++);
- 	max_reply += 4 + (XDR_QUADLEN(argp->taglen) << 2);
-diff --git a/fs/ocfs2/acl.c b/fs/ocfs2/acl.c
-index 0cdf497c91ef..2162434728c0 100644
---- a/fs/ocfs2/acl.c
-+++ b/fs/ocfs2/acl.c
-@@ -322,3 +322,90 @@ struct posix_acl *ocfs2_iop_get_acl(struct inode *inode, int type)
- 	brelse(di_bh);
- 	return acl;
- }
-+
-+int ocfs2_acl_chmod(struct inode *inode, struct buffer_head *bh)
-+{
-+	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
-+	struct posix_acl *acl;
-+	int ret;
-+
-+	if (S_ISLNK(inode->i_mode))
-+		return -EOPNOTSUPP;
-+
-+	if (!(osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL))
-+		return 0;
-+
-+	acl = ocfs2_get_acl_nolock(inode, ACL_TYPE_ACCESS, bh);
-+	if (IS_ERR(acl) || !acl)
-+		return PTR_ERR(acl);
-+	ret = __posix_acl_chmod(&acl, GFP_KERNEL, inode->i_mode);
-+	if (ret)
-+		return ret;
-+	ret = ocfs2_set_acl(NULL, inode, NULL, ACL_TYPE_ACCESS,
-+			    acl, NULL, NULL);
-+	posix_acl_release(acl);
-+	return ret;
-+}
-+
-+/*
-+ * Initialize the ACLs of a new inode. If parent directory has default ACL,
-+ * then clone to new inode. Called from ocfs2_mknod.
-+ */
-+int ocfs2_init_acl(handle_t *handle,
-+		   struct inode *inode,
-+		   struct inode *dir,
-+		   struct buffer_head *di_bh,
-+		   struct buffer_head *dir_bh,
-+		   struct ocfs2_alloc_context *meta_ac,
-+		   struct ocfs2_alloc_context *data_ac)
-+{
-+	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
-+	struct posix_acl *acl = NULL;
-+	int ret = 0, ret2;
-+	umode_t mode;
-+
-+	if (!S_ISLNK(inode->i_mode)) {
-+		if (osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL) {
-+			acl = ocfs2_get_acl_nolock(dir, ACL_TYPE_DEFAULT,
-+						   dir_bh);
-+			if (IS_ERR(acl))
-+				return PTR_ERR(acl);
-+		}
-+		if (!acl) {
-+			mode = inode->i_mode & ~current_umask();
-+			ret = ocfs2_acl_set_mode(inode, di_bh, handle, mode);
-+			if (ret) {
-+				mlog_errno(ret);
-+				goto cleanup;
-+			}
-+		}
-+	}
-+	if ((osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL) && acl) {
-+		if (S_ISDIR(inode->i_mode)) {
-+			ret = ocfs2_set_acl(handle, inode, di_bh,
-+					    ACL_TYPE_DEFAULT, acl,
-+					    meta_ac, data_ac);
-+			if (ret)
-+				goto cleanup;
-+		}
-+		mode = inode->i_mode;
-+		ret = __posix_acl_create(&acl, GFP_NOFS, &mode);
-+		if (ret < 0)
-+			return ret;
-+
-+		ret2 = ocfs2_acl_set_mode(inode, di_bh, handle, mode);
-+		if (ret2) {
-+			mlog_errno(ret2);
-+			ret = ret2;
-+			goto cleanup;
-+		}
-+		if (ret > 0) {
-+			ret = ocfs2_set_acl(handle, inode,
-+					    di_bh, ACL_TYPE_ACCESS,
-+					    acl, meta_ac, data_ac);
-+		}
-+	}
-+cleanup:
-+	posix_acl_release(acl);
-+	return ret;
-+}
-diff --git a/fs/ocfs2/acl.h b/fs/ocfs2/acl.h
-index 3fce68d08625..2783a75b3999 100644
---- a/fs/ocfs2/acl.h
-+++ b/fs/ocfs2/acl.h
-@@ -35,5 +35,10 @@ int ocfs2_set_acl(handle_t *handle,
- 			 struct posix_acl *acl,
- 			 struct ocfs2_alloc_context *meta_ac,
- 			 struct ocfs2_alloc_context *data_ac);
-+extern int ocfs2_acl_chmod(struct inode *, struct buffer_head *);
-+extern int ocfs2_init_acl(handle_t *, struct inode *, struct inode *,
-+			  struct buffer_head *, struct buffer_head *,
-+			  struct ocfs2_alloc_context *,
-+			  struct ocfs2_alloc_context *);
- 
- #endif /* OCFS2_ACL_H */
-diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
-index a76b9ea7722e..a2370e2c7295 100644
---- a/fs/ocfs2/cluster/heartbeat.c
-+++ b/fs/ocfs2/cluster/heartbeat.c
-@@ -1445,8 +1445,8 @@ static void o2hb_region_release(struct config_item *item)
- 	debugfs_remove(reg->hr_debug_dir);
- 	kfree(reg->hr_db_livenodes);
- 	kfree(reg->hr_db_regnum);
--	kfree(reg->hr_debug_elapsed_time);
--	kfree(reg->hr_debug_pinned);
-+	kfree(reg->hr_db_elapsed_time);
-+	kfree(reg->hr_db_pinned);
- 
- 	spin_lock(&o2hb_live_lock);
- 	list_del(&reg->hr_all_item);
-diff --git a/fs/ocfs2/dlm/dlmconvert.c b/fs/ocfs2/dlm/dlmconvert.c
-index e36d63ff1783..f90931335c6b 100644
---- a/fs/ocfs2/dlm/dlmconvert.c
-+++ b/fs/ocfs2/dlm/dlmconvert.c
-@@ -262,6 +262,7 @@ enum dlm_status dlmconvert_remote(struct dlm_ctxt *dlm,
- 				  struct dlm_lock *lock, int flags, int type)
- {
- 	enum dlm_status status;
-+	u8 old_owner = res->owner;
- 
- 	mlog(0, "type=%d, convert_type=%d, busy=%d\n", lock->ml.type,
- 	     lock->ml.convert_type, res->state & DLM_LOCK_RES_IN_PROGRESS);
-@@ -287,6 +288,19 @@ enum dlm_status dlmconvert_remote(struct dlm_ctxt *dlm,
- 		status = DLM_DENIED;
- 		goto bail;
- 	}
-+
-+	if (lock->ml.type == type && lock->ml.convert_type == LKM_IVMODE) {
-+		mlog(0, "last convert request returned DLM_RECOVERING, but "
-+		     "owner has already queued and sent ast to me. res %.*s, "
-+		     "(cookie=%u:%llu, type=%d, conv=%d)\n",
-+		     res->lockname.len, res->lockname.name,
-+		     dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
-+		     dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)),
-+		     lock->ml.type, lock->ml.convert_type);
-+		status = DLM_NORMAL;
-+		goto bail;
-+	}
-+
- 	res->state |= DLM_LOCK_RES_IN_PROGRESS;
- 	/* move lock to local convert queue */
- 	/* do not alter lock refcount.  switching lists. */
-@@ -316,11 +330,19 @@ enum dlm_status dlmconvert_remote(struct dlm_ctxt *dlm,
- 	spin_lock(&res->spinlock);
- 	res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
- 	lock->convert_pending = 0;
--	/* if it failed, move it back to granted queue */
-+	/* if it failed, move it back to granted queue.
-+	 * if master returns DLM_NORMAL and then down before sending ast,
-+	 * it may have already been moved to granted queue, reset to
-+	 * DLM_RECOVERING and retry convert */
- 	if (status != DLM_NORMAL) {
- 		if (status != DLM_NOTQUEUED)
- 			dlm_error(status);
- 		dlm_revert_pending_convert(res, lock);
-+	} else if ((res->state & DLM_LOCK_RES_RECOVERING) ||
-+			(old_owner != res->owner)) {
-+		mlog(0, "res %.*s is in recovering or has been recovered.\n",
-+				res->lockname.len, res->lockname.name);
-+		status = DLM_RECOVERING;
- 	}
- bail:
- 	spin_unlock(&res->spinlock);
-diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
-index b94a425f0175..23d0ab881f6e 100644
---- a/fs/ocfs2/dlm/dlmrecovery.c
-+++ b/fs/ocfs2/dlm/dlmrecovery.c
-@@ -2071,7 +2071,6 @@ void dlm_move_lockres_to_recovery_list(struct dlm_ctxt *dlm,
- 			dlm_lock_get(lock);
- 			if (lock->convert_pending) {
- 				/* move converting lock back to granted */
--				BUG_ON(i != DLM_CONVERTING_LIST);
- 				mlog(0, "node died with convert pending "
- 				     "on %.*s. move back to granted list.\n",
- 				     res->lockname.len, res->lockname.name);
-diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
-index 7cb38fdca229..3d60fda1cb09 100644
---- a/fs/ocfs2/file.c
-+++ b/fs/ocfs2/file.c
-@@ -1268,20 +1268,20 @@ bail_unlock_rw:
- 	if (size_change)
- 		ocfs2_rw_unlock(inode, 1);
- bail:
--	brelse(bh);
- 
- 	/* Release quota pointers in case we acquired them */
- 	for (qtype = 0; qtype < OCFS2_MAXQUOTAS; qtype++)
- 		dqput(transfer_to[qtype]);
- 
- 	if (!status && attr->ia_valid & ATTR_MODE) {
--		status = posix_acl_chmod(inode, inode->i_mode);
-+		status = ocfs2_acl_chmod(inode, bh);
- 		if (status < 0)
- 			mlog_errno(status);
- 	}
- 	if (inode_locked)
- 		ocfs2_inode_unlock(inode, 1);
- 
-+	brelse(bh);
- 	return status;
- }
- 
-diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
-index 6b3e87189a64..a8f1225e6d9b 100644
---- a/fs/ocfs2/namei.c
-+++ b/fs/ocfs2/namei.c
-@@ -259,7 +259,6 @@ static int ocfs2_mknod(struct inode *dir,
- 	struct ocfs2_dir_lookup_result lookup = { NULL, };
- 	sigset_t oldset;
- 	int did_block_signals = 0;
--	struct posix_acl *default_acl = NULL, *acl = NULL;
- 	struct ocfs2_dentry_lock *dl = NULL;
- 
- 	trace_ocfs2_mknod(dir, dentry, dentry->d_name.len, dentry->d_name.name,
-@@ -367,12 +366,6 @@ static int ocfs2_mknod(struct inode *dir,
- 		goto leave;
- 	}
- 
--	status = posix_acl_create(dir, &inode->i_mode, &default_acl, &acl);
--	if (status) {
--		mlog_errno(status);
--		goto leave;
--	}
--
- 	handle = ocfs2_start_trans(osb, ocfs2_mknod_credits(osb->sb,
- 							    S_ISDIR(mode),
- 							    xattr_credits));
-@@ -421,16 +414,8 @@ static int ocfs2_mknod(struct inode *dir,
- 		inc_nlink(dir);
- 	}
- 
--	if (default_acl) {
--		status = ocfs2_set_acl(handle, inode, new_fe_bh,
--				       ACL_TYPE_DEFAULT, default_acl,
--				       meta_ac, data_ac);
--	}
--	if (!status && acl) {
--		status = ocfs2_set_acl(handle, inode, new_fe_bh,
--				       ACL_TYPE_ACCESS, acl,
--				       meta_ac, data_ac);
--	}
-+	status = ocfs2_init_acl(handle, inode, dir, new_fe_bh, parent_fe_bh,
-+			 meta_ac, data_ac);
- 
- 	if (status < 0) {
- 		mlog_errno(status);
-@@ -472,10 +457,6 @@ static int ocfs2_mknod(struct inode *dir,
- 	d_instantiate(dentry, inode);
- 	status = 0;
- leave:
--	if (default_acl)
--		posix_acl_release(default_acl);
--	if (acl)
--		posix_acl_release(acl);
- 	if (status < 0 && did_quota_inode)
- 		dquot_free_inode(inode);
- 	if (handle)
-diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
-index 3eff031aaf26..9919964c5b3b 100644
---- a/fs/ocfs2/refcounttree.c
-+++ b/fs/ocfs2/refcounttree.c
-@@ -4248,20 +4248,12 @@ static int ocfs2_reflink(struct dentry *old_dentry, struct inode *dir,
- 	struct inode *inode = d_inode(old_dentry);
- 	struct buffer_head *old_bh = NULL;
- 	struct inode *new_orphan_inode = NULL;
--	struct posix_acl *default_acl, *acl;
--	umode_t mode;
- 
- 	if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb)))
- 		return -EOPNOTSUPP;
- 
--	mode = inode->i_mode;
--	error = posix_acl_create(dir, &mode, &default_acl, &acl);
--	if (error) {
--		mlog_errno(error);
--		return error;
--	}
- 
--	error = ocfs2_create_inode_in_orphan(dir, mode,
-+	error = ocfs2_create_inode_in_orphan(dir, inode->i_mode,
- 					     &new_orphan_inode);
- 	if (error) {
- 		mlog_errno(error);
-@@ -4300,16 +4292,11 @@ static int ocfs2_reflink(struct dentry *old_dentry, struct inode *dir,
- 	/* If the security isn't preserved, we need to re-initialize them. */
- 	if (!preserve) {
- 		error = ocfs2_init_security_and_acl(dir, new_orphan_inode,
--						    &new_dentry->d_name,
--						    default_acl, acl);
-+						    &new_dentry->d_name);
- 		if (error)
- 			mlog_errno(error);
- 	}
- out:
--	if (default_acl)
--		posix_acl_release(default_acl);
--	if (acl)
--		posix_acl_release(acl);
- 	if (!error) {
- 		error = ocfs2_mv_orphaned_inode_to_new(dir, new_orphan_inode,
- 						       new_dentry);
-diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
-index 7d3d979f57d9..f19b7381a998 100644
---- a/fs/ocfs2/xattr.c
-+++ b/fs/ocfs2/xattr.c
-@@ -7216,12 +7216,10 @@ out:
-  */
- int ocfs2_init_security_and_acl(struct inode *dir,
- 				struct inode *inode,
--				const struct qstr *qstr,
--				struct posix_acl *default_acl,
--				struct posix_acl *acl)
-+				const struct qstr *qstr)
- {
--	struct buffer_head *dir_bh = NULL;
- 	int ret = 0;
-+	struct buffer_head *dir_bh = NULL;
- 
- 	ret = ocfs2_init_security_get(inode, dir, qstr, NULL);
- 	if (ret) {
-@@ -7234,11 +7232,9 @@ int ocfs2_init_security_and_acl(struct inode *dir,
- 		mlog_errno(ret);
- 		goto leave;
- 	}
--
--	if (!ret && default_acl)
--		ret = ocfs2_iop_set_acl(inode, default_acl, ACL_TYPE_DEFAULT);
--	if (!ret && acl)
--		ret = ocfs2_iop_set_acl(inode, acl, ACL_TYPE_ACCESS);
-+	ret = ocfs2_init_acl(NULL, inode, dir, NULL, dir_bh, NULL, NULL);
-+	if (ret)
-+		mlog_errno(ret);
- 
- 	ocfs2_inode_unlock(dir, 0);
- 	brelse(dir_bh);
-diff --git a/fs/ocfs2/xattr.h b/fs/ocfs2/xattr.h
-index f10d5b93c366..1633cc15ea1f 100644
---- a/fs/ocfs2/xattr.h
-+++ b/fs/ocfs2/xattr.h
-@@ -94,7 +94,5 @@ int ocfs2_reflink_xattrs(struct inode *old_inode,
- 			 bool preserve_security);
- int ocfs2_init_security_and_acl(struct inode *dir,
- 				struct inode *inode,
--				const struct qstr *qstr,
--				struct posix_acl *default_acl,
--				struct posix_acl *acl);
-+				const struct qstr *qstr);
- #endif /* OCFS2_XATTR_H */
-diff --git a/fs/open.c b/fs/open.c
-index 55bdc75e2172..081d3d6df74b 100644
---- a/fs/open.c
-+++ b/fs/open.c
-@@ -840,16 +840,12 @@ EXPORT_SYMBOL(file_path);
- int vfs_open(const struct path *path, struct file *file,
- 	     const struct cred *cred)
- {
--	struct dentry *dentry = path->dentry;
--	struct inode *inode = dentry->d_inode;
-+	struct inode *inode = vfs_select_inode(path->dentry, file->f_flags);
- 
--	file->f_path = *path;
--	if (dentry->d_flags & DCACHE_OP_SELECT_INODE) {
--		inode = dentry->d_op->d_select_inode(dentry, file->f_flags);
--		if (IS_ERR(inode))
--			return PTR_ERR(inode);
--	}
-+	if (IS_ERR(inode))
-+		return PTR_ERR(inode);
- 
-+	file->f_path = *path;
- 	return do_dentry_open(file, inode, NULL, cred);
- }
- 
-@@ -992,14 +988,12 @@ struct file *filp_open(const char *filename, int flags, umode_t mode)
- EXPORT_SYMBOL(filp_open);
- 
- struct file *file_open_root(struct dentry *dentry, struct vfsmount *mnt,
--			    const char *filename, int flags)
-+			    const char *filename, int flags, umode_t mode)
- {
- 	struct open_flags op;
--	int err = build_open_flags(flags, 0, &op);
-+	int err = build_open_flags(flags, mode, &op);
- 	if (err)
- 		return ERR_PTR(err);
--	if (flags & O_CREAT)
--		return ERR_PTR(-EINVAL);
- 	return do_file_open_root(dentry, mnt, filename, &op);
- }
- EXPORT_SYMBOL(file_open_root);
-diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
-index 619ad4b016d2..4399ea804447 100644
---- a/fs/overlayfs/super.c
-+++ b/fs/overlayfs/super.c
-@@ -295,6 +295,37 @@ static void ovl_dentry_release(struct dentry *dentry)
- 	}
- }
- 
-+static struct dentry *ovl_d_real(struct dentry *dentry, struct inode *inode)
-+{
-+	struct dentry *real;
-+
-+	if (d_is_dir(dentry)) {
-+		if (!inode || inode == d_inode(dentry))
-+			return dentry;
-+		goto bug;
-+	}
-+
-+	real = ovl_dentry_upper(dentry);
-+	if (real && (!inode || inode == d_inode(real)))
-+		return real;
-+
-+	real = ovl_dentry_lower(dentry);
-+	if (!real)
-+		goto bug;
-+
-+	if (!inode || inode == d_inode(real))
-+		return real;
-+
-+	/* Handle recursion */
-+	if (real->d_flags & DCACHE_OP_REAL)
-+		return real->d_op->d_real(real, inode);
-+
-+bug:
-+	WARN(1, "ovl_d_real(%pd4, %s:%lu\n): real dentry not found\n", dentry,
-+	     inode ? inode->i_sb->s_id : "NULL", inode ? inode->i_ino : 0);
-+	return dentry;
-+}
-+
- static int ovl_dentry_revalidate(struct dentry *dentry, unsigned int flags)
- {
- 	struct ovl_entry *oe = dentry->d_fsdata;
-@@ -339,11 +370,13 @@ static int ovl_dentry_weak_revalidate(struct dentry *dentry, unsigned int flags)
- static const struct dentry_operations ovl_dentry_operations = {
- 	.d_release = ovl_dentry_release,
- 	.d_select_inode = ovl_d_select_inode,
-+	.d_real = ovl_d_real,
- };
- 
- static const struct dentry_operations ovl_reval_dentry_operations = {
- 	.d_release = ovl_dentry_release,
- 	.d_select_inode = ovl_d_select_inode,
-+	.d_real = ovl_d_real,
- 	.d_revalidate = ovl_dentry_revalidate,
- 	.d_weak_revalidate = ovl_dentry_weak_revalidate,
- };
-diff --git a/fs/pnode.c b/fs/pnode.c
-index c524fdddc7fb..99899705b105 100644
---- a/fs/pnode.c
-+++ b/fs/pnode.c
-@@ -198,7 +198,7 @@ static struct mount *next_group(struct mount *m, struct mount *origin)
- 
- /* all accesses are serialized by namespace_sem */
- static struct user_namespace *user_ns;
--static struct mount *last_dest, *last_source, *dest_master;
-+static struct mount *last_dest, *first_source, *last_source, *dest_master;
- static struct mountpoint *mp;
- static struct hlist_head *list;
- 
-@@ -221,20 +221,22 @@ static int propagate_one(struct mount *m)
- 		type = CL_MAKE_SHARED;
- 	} else {
- 		struct mount *n, *p;
-+		bool done;
- 		for (n = m; ; n = p) {
- 			p = n->mnt_master;
--			if (p == dest_master || IS_MNT_MARKED(p)) {
--				while (last_dest->mnt_master != p) {
--					last_source = last_source->mnt_master;
--					last_dest = last_source->mnt_parent;
--				}
--				if (!peers(n, last_dest)) {
--					last_source = last_source->mnt_master;
--					last_dest = last_source->mnt_parent;
--				}
-+			if (p == dest_master || IS_MNT_MARKED(p))
- 				break;
--			}
- 		}
-+		do {
-+			struct mount *parent = last_source->mnt_parent;
-+			if (last_source == first_source)
-+				break;
-+			done = parent->mnt_master == p;
-+			if (done && peers(n, parent))
-+				break;
-+			last_source = last_source->mnt_master;
-+		} while (!done);
-+
- 		type = CL_SLAVE;
- 		/* beginning of peer group among the slaves? */
- 		if (IS_MNT_SHARED(m))
-@@ -286,6 +288,7 @@ int propagate_mnt(struct mount *dest_mnt, struct mountpoint *dest_mp,
- 	 */
- 	user_ns = current->nsproxy->mnt_ns->user_ns;
- 	last_dest = dest_mnt;
-+	first_source = source_mnt;
- 	last_source = source_mnt;
- 	mp = dest_mp;
- 	list = tree_list;
-diff --git a/fs/proc/base.c b/fs/proc/base.c
-index 4f764c2ac1a5..45f2162e55b2 100644
---- a/fs/proc/base.c
-+++ b/fs/proc/base.c
-@@ -955,7 +955,8 @@ static ssize_t environ_read(struct file *file, char __user *buf,
- 	struct mm_struct *mm = file->private_data;
- 	unsigned long env_start, env_end;
- 
--	if (!mm)
-+	/* Ensure the process spawned far enough to have an environment. */
-+	if (!mm || !mm->env_end)
- 		return 0;
- 
- 	page = (char *)__get_free_page(GFP_TEMPORARY);
-diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
-index fa95ab2d3674..9d2f3e0a6360 100644
---- a/fs/proc/task_mmu.c
-+++ b/fs/proc/task_mmu.c
-@@ -1504,6 +1504,32 @@ static struct page *can_gather_numa_stats(pte_t pte, struct vm_area_struct *vma,
- 	return page;
- }
- 
-+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-+static struct page *can_gather_numa_stats_pmd(pmd_t pmd,
-+					      struct vm_area_struct *vma,
-+					      unsigned long addr)
-+{
-+	struct page *page;
-+	int nid;
-+
-+	if (!pmd_present(pmd))
-+		return NULL;
-+
-+	page = vm_normal_page_pmd(vma, addr, pmd);
-+	if (!page)
-+		return NULL;
-+
-+	if (PageReserved(page))
-+		return NULL;
-+
-+	nid = page_to_nid(page);
-+	if (!node_isset(nid, node_states[N_MEMORY]))
-+		return NULL;
-+
-+	return page;
-+}
-+#endif
-+
- static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
- 		unsigned long end, struct mm_walk *walk)
- {
-@@ -1513,14 +1539,14 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
- 	pte_t *orig_pte;
- 	pte_t *pte;
- 
-+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- 	ptl = pmd_trans_huge_lock(pmd, vma);
- 	if (ptl) {
--		pte_t huge_pte = *(pte_t *)pmd;
- 		struct page *page;
- 
--		page = can_gather_numa_stats(huge_pte, vma, addr);
-+		page = can_gather_numa_stats_pmd(*pmd, vma, addr);
- 		if (page)
--			gather_stats(page, md, pte_dirty(huge_pte),
-+			gather_stats(page, md, pmd_dirty(*pmd),
- 				     HPAGE_PMD_SIZE/PAGE_SIZE);
- 		spin_unlock(ptl);
- 		return 0;
-@@ -1528,6 +1554,7 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
- 
- 	if (pmd_trans_unstable(pmd))
- 		return 0;
-+#endif
- 	orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
- 	do {
- 		struct page *page = can_gather_numa_stats(*pte, vma, addr);
-diff --git a/fs/proc_namespace.c b/fs/proc_namespace.c
-index 2256e7e23e67..3f1190d18991 100644
---- a/fs/proc_namespace.c
-+++ b/fs/proc_namespace.c
-@@ -199,6 +199,8 @@ static int show_vfsstat(struct seq_file *m, struct vfsmount *mnt)
- 	if (sb->s_op->show_devname) {
- 		seq_puts(m, "device ");
- 		err = sb->s_op->show_devname(m, mnt_path.dentry);
-+		if (err)
-+			goto out;
- 	} else {
- 		if (r->mnt_devname) {
- 			seq_puts(m, "device ");
-diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
-index 3c3b81bb6dfe..850d17fa0aa3 100644
---- a/fs/quota/dquot.c
-+++ b/fs/quota/dquot.c
-@@ -1398,7 +1398,7 @@ static int dquot_active(const struct inode *inode)
- static int __dquot_initialize(struct inode *inode, int type)
- {
- 	int cnt, init_needed = 0;
--	struct dquot **dquots, *got[MAXQUOTAS];
-+	struct dquot **dquots, *got[MAXQUOTAS] = {};
- 	struct super_block *sb = inode->i_sb;
- 	qsize_t rsv;
- 	int ret = 0;
-@@ -1415,7 +1415,6 @@ static int __dquot_initialize(struct inode *inode, int type)
- 		int rc;
- 		struct dquot *dquot;
- 
--		got[cnt] = NULL;
- 		if (type != -1 && cnt != type)
- 			continue;
- 		/*
-diff --git a/fs/splice.c b/fs/splice.c
-index 82bc0d64fc38..19e0b103d253 100644
---- a/fs/splice.c
-+++ b/fs/splice.c
-@@ -185,6 +185,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
- 	unsigned int spd_pages = spd->nr_pages;
- 	int ret, do_wakeup, page_nr;
- 
-+	if (!spd_pages)
-+		return 0;
-+
- 	ret = 0;
- 	do_wakeup = 0;
- 	page_nr = 0;
-diff --git a/fs/xfs/xfs_attr_list.c b/fs/xfs/xfs_attr_list.c
-index 0ef7c2ed3f8a..4fa14820e2e2 100644
---- a/fs/xfs/xfs_attr_list.c
-+++ b/fs/xfs/xfs_attr_list.c
-@@ -202,8 +202,10 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context)
- 					sbp->namelen,
- 					sbp->valuelen,
- 					&sbp->name[sbp->namelen]);
--		if (error)
-+		if (error) {
-+			kmem_free(sbuf);
- 			return error;
-+		}
- 		if (context->seen_enough)
- 			break;
- 		cursor->offset++;
-@@ -454,14 +456,13 @@ xfs_attr3_leaf_list_int(
- 				args.rmtblkcnt = xfs_attr3_rmt_blocks(
- 							args.dp->i_mount, valuelen);
- 				retval = xfs_attr_rmtval_get(&args);
--				if (retval)
--					return retval;
--				retval = context->put_listent(context,
--						entry->flags,
--						name_rmt->name,
--						(int)name_rmt->namelen,
--						valuelen,
--						args.value);
-+				if (!retval)
-+					retval = context->put_listent(context,
-+							entry->flags,
-+							name_rmt->name,
-+							(int)name_rmt->namelen,
-+							valuelen,
-+							args.value);
- 				kmem_free(args.value);
- 			} else {
- 				retval = context->put_listent(context,
-diff --git a/include/asm-generic/bitops/lock.h b/include/asm-generic/bitops/lock.h
-index c30266e94806..8ef0ccbf8167 100644
---- a/include/asm-generic/bitops/lock.h
-+++ b/include/asm-generic/bitops/lock.h
-@@ -29,16 +29,16 @@ do {					\
-  * @nr: the bit to set
-  * @addr: the address to start counting from
-  *
-- * This operation is like clear_bit_unlock, however it is not atomic.
-- * It does provide release barrier semantics so it can be used to unlock
-- * a bit lock, however it would only be used if no other CPU can modify
-- * any bits in the memory until the lock is released (a good example is
-- * if the bit lock itself protects access to the other bits in the word).
-+ * A weaker form of clear_bit_unlock() as used by __bit_lock_unlock(). If all
-+ * the bits in the word are protected by this lock some archs can use weaker
-+ * ops to safely unlock.
-+ *
-+ * See for example x86's implementation.
-  */
- #define __clear_bit_unlock(nr, addr)	\
- do {					\
--	smp_mb();			\
--	__clear_bit(nr, addr);		\
-+	smp_mb__before_atomic();	\
-+	clear_bit(nr, addr);		\
- } while (0)
- 
- #endif /* _ASM_GENERIC_BITOPS_LOCK_H_ */
-diff --git a/include/asm-generic/futex.h b/include/asm-generic/futex.h
-index e56272c919b5..bf2d34c9d804 100644
---- a/include/asm-generic/futex.h
-+++ b/include/asm-generic/futex.h
-@@ -108,11 +108,15 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
- 	u32 val;
- 
- 	preempt_disable();
--	if (unlikely(get_user(val, uaddr) != 0))
-+	if (unlikely(get_user(val, uaddr) != 0)) {
-+		preempt_enable();
- 		return -EFAULT;
-+	}
- 
--	if (val == oldval && unlikely(put_user(newval, uaddr) != 0))
-+	if (val == oldval && unlikely(put_user(newval, uaddr) != 0)) {
-+		preempt_enable();
- 		return -EFAULT;
-+	}
- 
- 	*uval = val;
- 	preempt_enable();
-diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h
-index 39e1cb201b8e..332da3ad8eb5 100644
---- a/include/asm-generic/qspinlock.h
-+++ b/include/asm-generic/qspinlock.h
-@@ -28,7 +28,30 @@
-  */
- static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
- {
--	return atomic_read(&lock->val);
-+	/*
-+	 * queued_spin_lock_slowpath() can ACQUIRE the lock before
-+	 * issuing the unordered store that sets _Q_LOCKED_VAL.
-+	 *
-+	 * See both smp_cond_acquire() sites for more detail.
-+	 *
-+	 * This however means that in code like:
-+	 *
-+	 *   spin_lock(A)		spin_lock(B)
-+	 *   spin_unlock_wait(B)	spin_is_locked(A)
-+	 *   do_something()		do_something()
-+	 *
-+	 * Both CPUs can end up running do_something() because the store
-+	 * setting _Q_LOCKED_VAL will pass through the loads in
-+	 * spin_unlock_wait() and/or spin_is_locked().
-+	 *
-+	 * Avoid this by issuing a full memory barrier between the spin_lock()
-+	 * and the loads in spin_unlock_wait() and spin_is_locked().
-+	 *
-+	 * Note that regular mutual exclusion doesn't care about this
-+	 * delayed store.
-+	 */
-+	smp_mb();
-+	return atomic_read(&lock->val) & _Q_LOCKED_MASK;
- }
- 
- /**
-@@ -108,6 +131,8 @@ static __always_inline void queued_spin_unlock(struct qspinlock *lock)
-  */
- static inline void queued_spin_unlock_wait(struct qspinlock *lock)
- {
-+	/* See queued_spin_is_locked() */
-+	smp_mb();
- 	while (atomic_read(&lock->val) & _Q_LOCKED_MASK)
- 		cpu_relax();
- }
-diff --git a/include/asm-generic/siginfo.h b/include/asm-generic/siginfo.h
-index 3d1a3af5cf59..a2508a8f9a9c 100644
---- a/include/asm-generic/siginfo.h
-+++ b/include/asm-generic/siginfo.h
-@@ -17,21 +17,6 @@
- struct siginfo;
- void do_schedule_next_timer(struct siginfo *info);
- 
--#ifndef HAVE_ARCH_COPY_SIGINFO
--
--#include <linux/string.h>
--
--static inline void copy_siginfo(struct siginfo *to, struct siginfo *from)
--{
--	if (from->si_code < 0)
--		memcpy(to, from, sizeof(*to));
--	else
--		/* _sigchld is currently the largest know union member */
--		memcpy(to, from, __ARCH_SI_PREAMBLE_SIZE + sizeof(from->_sifields._sigchld));
--}
--
--#endif
--
- extern int copy_siginfo_to_user(struct siginfo __user *to, const struct siginfo *from);
- 
- #endif
-diff --git a/include/drm/drm_cache.h b/include/drm/drm_cache.h
-index 461a0558bca4..cebecff536a3 100644
---- a/include/drm/drm_cache.h
-+++ b/include/drm/drm_cache.h
-@@ -39,6 +39,8 @@ static inline bool drm_arch_can_wc_memory(void)
- {
- #if defined(CONFIG_PPC) && !defined(CONFIG_NOT_COHERENT_CACHE)
- 	return false;
-+#elif defined(CONFIG_MIPS) && defined(CONFIG_CPU_LOONGSON3)
-+	return false;
- #else
- 	return true;
- #endif
-diff --git a/include/keys/trusted-type.h b/include/keys/trusted-type.h
-index 42cf2d991bf4..4ea7e55f20b0 100644
---- a/include/keys/trusted-type.h
-+++ b/include/keys/trusted-type.h
-@@ -38,7 +38,7 @@ struct trusted_key_options {
- 	unsigned char pcrinfo[MAX_PCRINFO_SIZE];
- 	int pcrlock;
- 	uint32_t hash;
--	uint32_t digest_len;
-+	uint32_t policydigest_len;
- 	unsigned char policydigest[MAX_DIGEST_SIZE];
- 	uint32_t policyhandle;
- };
-diff --git a/include/linux/bpf.h b/include/linux/bpf.h
-index 83d1926c61e4..67bc2da5d233 100644
---- a/include/linux/bpf.h
-+++ b/include/linux/bpf.h
-@@ -165,12 +165,13 @@ void bpf_register_prog_type(struct bpf_prog_type_list *tl);
- void bpf_register_map_type(struct bpf_map_type_list *tl);
- 
- struct bpf_prog *bpf_prog_get(u32 ufd);
-+struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog);
- void bpf_prog_put(struct bpf_prog *prog);
- void bpf_prog_put_rcu(struct bpf_prog *prog);
- 
- struct bpf_map *bpf_map_get_with_uref(u32 ufd);
- struct bpf_map *__bpf_map_get(struct fd f);
--void bpf_map_inc(struct bpf_map *map, bool uref);
-+struct bpf_map *bpf_map_inc(struct bpf_map *map, bool uref);
- void bpf_map_put_with_uref(struct bpf_map *map);
- void bpf_map_put(struct bpf_map *map);
- 
-diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h
-index 735f9f8c4e43..5261751f6bd4 100644
---- a/include/linux/can/dev.h
-+++ b/include/linux/can/dev.h
-@@ -40,8 +40,11 @@ struct can_priv {
- 	struct can_clock clock;
- 
- 	enum can_state state;
--	u32 ctrlmode;
--	u32 ctrlmode_supported;
-+
-+	/* CAN controller features - see include/uapi/linux/can/netlink.h */
-+	u32 ctrlmode;		/* current options setting */
-+	u32 ctrlmode_supported;	/* options that can be modified by netlink */
-+	u32 ctrlmode_static;	/* static enabled options for driver/hardware */
- 
- 	int restart_ms;
- 	struct timer_list restart_timer;
-@@ -108,6 +111,21 @@ static inline bool can_is_canfd_skb(const struct sk_buff *skb)
- 	return skb->len == CANFD_MTU;
- }
- 
-+/* helper to define static CAN controller features at device creation time */
-+static inline void can_set_static_ctrlmode(struct net_device *dev,
-+					   u32 static_mode)
-+{
-+	struct can_priv *priv = netdev_priv(dev);
-+
-+	/* alloc_candev() succeeded => netdev_priv() is valid at this point */
-+	priv->ctrlmode = static_mode;
-+	priv->ctrlmode_static = static_mode;
-+
-+	/* override MTU which was set by default in can_setup()? */
-+	if (static_mode & CAN_CTRLMODE_FD)
-+		dev->mtu = CANFD_MTU;
-+}
-+
- /* get data length from can_dlc with sanitized can_dlc */
- u8 can_dlc2len(u8 can_dlc);
- 
-diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
-index 789471dba6fb..7fc7cb7872e3 100644
---- a/include/linux/cgroup-defs.h
-+++ b/include/linux/cgroup-defs.h
-@@ -210,6 +210,9 @@ struct css_set {
- 	/* all css_task_iters currently walking this cset */
- 	struct list_head task_iters;
- 
-+	/* dead and being drained, ignore for migration */
-+	bool dead;
-+
- 	/* For RCU-protected deletion */
- 	struct rcu_head rcu_head;
- };
-@@ -439,6 +442,7 @@ struct cgroup_subsys {
- 	int (*can_attach)(struct cgroup_taskset *tset);
- 	void (*cancel_attach)(struct cgroup_taskset *tset);
- 	void (*attach)(struct cgroup_taskset *tset);
-+	void (*post_attach)(void);
- 	int (*can_fork)(struct task_struct *task);
- 	void (*cancel_fork)(struct task_struct *task);
- 	void (*fork)(struct task_struct *task);
-diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
-index 1143e38555a4..408a60dca353 100644
---- a/include/linux/clk-provider.h
-+++ b/include/linux/clk-provider.h
-@@ -385,6 +385,7 @@ struct clk_divider {
- #define CLK_DIVIDER_MAX_AT_ZERO		BIT(6)
- 
- extern const struct clk_ops clk_divider_ops;
-+extern const struct clk_ops clk_divider_ro_ops;
- 
- unsigned long divider_recalc_rate(struct clk_hw *hw, unsigned long parent_rate,
- 		unsigned int val, const struct clk_div_table *table,
-diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
-index 22ab246feed3..eeae401a2412 100644
---- a/include/linux/compiler-gcc.h
-+++ b/include/linux/compiler-gcc.h
-@@ -199,7 +199,7 @@
- #define unreachable() __builtin_unreachable()
- 
- /* Mark a function definition as prohibited from being cloned. */
--#define __noclone	__attribute__((__noclone__))
-+#define __noclone	__attribute__((__noclone__, __optimize__("no-tracer")))
- 
- #endif /* GCC_VERSION >= 40500 */
- 
-diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
-index fea160ee5803..85a868ccb493 100644
---- a/include/linux/cpuset.h
-+++ b/include/linux/cpuset.h
-@@ -137,8 +137,6 @@ static inline void set_mems_allowed(nodemask_t nodemask)
- 	task_unlock(current);
- }
- 
--extern void cpuset_post_attach_flush(void);
--
- #else /* !CONFIG_CPUSETS */
- 
- static inline bool cpusets_enabled(void) { return false; }
-@@ -245,10 +243,6 @@ static inline bool read_mems_allowed_retry(unsigned int seq)
- 	return false;
- }
- 
--static inline void cpuset_post_attach_flush(void)
--{
--}
--
- #endif /* !CONFIG_CPUSETS */
- 
- #endif /* _LINUX_CPUSET_H */
-diff --git a/include/linux/dcache.h b/include/linux/dcache.h
-index c4b5f4b3f8f8..96c1a2da92d7 100644
---- a/include/linux/dcache.h
-+++ b/include/linux/dcache.h
-@@ -161,6 +161,7 @@ struct dentry_operations {
- 	struct vfsmount *(*d_automount)(struct path *);
- 	int (*d_manage)(struct dentry *, bool);
- 	struct inode *(*d_select_inode)(struct dentry *, unsigned);
-+	struct dentry *(*d_real)(struct dentry *, struct inode *);
- } ____cacheline_aligned;
- 
- /*
-@@ -227,6 +228,7 @@ struct dentry_operations {
- #define DCACHE_MAY_FREE			0x00800000
- #define DCACHE_FALLTHRU			0x01000000 /* Fall through to lower layer */
- #define DCACHE_OP_SELECT_INODE		0x02000000 /* Unioned entry: dcache op selects inode */
-+#define DCACHE_OP_REAL			0x08000000
- 
- extern seqlock_t rename_lock;
- 
-@@ -582,4 +584,24 @@ static inline struct dentry *d_backing_dentry(struct dentry *upper)
- 	return upper;
- }
- 
-+static inline struct dentry *d_real(struct dentry *dentry)
-+{
-+	if (unlikely(dentry->d_flags & DCACHE_OP_REAL))
-+		return dentry->d_op->d_real(dentry, NULL);
-+	else
-+		return dentry;
-+}
-+
-+static inline struct inode *vfs_select_inode(struct dentry *dentry,
-+					     unsigned open_flags)
-+{
-+	struct inode *inode = d_inode(dentry);
-+
-+	if (inode && unlikely(dentry->d_flags & DCACHE_OP_SELECT_INODE))
-+		inode = dentry->d_op->d_select_inode(dentry, open_flags);
-+
-+	return inode;
-+}
-+
-+
- #endif	/* __LINUX_DCACHE_H */
-diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
-index ec1c61c87d89..899ab9f8549e 100644
---- a/include/linux/device-mapper.h
-+++ b/include/linux/device-mapper.h
-@@ -124,6 +124,8 @@ struct dm_dev {
- 	char name[16];
- };
- 
-+dev_t dm_get_dev_t(const char *path);
-+
- /*
-  * Constructors should call these functions to ensure destination devices
-  * are opened/closed correctly.
-diff --git a/include/linux/filter.h b/include/linux/filter.h
-index 43aa1f8855c7..a51a5361695f 100644
---- a/include/linux/filter.h
-+++ b/include/linux/filter.h
-@@ -465,10 +465,14 @@ int bpf_prog_create_from_user(struct bpf_prog **pfp, struct sock_fprog *fprog,
- void bpf_prog_destroy(struct bpf_prog *fp);
- 
- int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk);
-+int __sk_attach_filter(struct sock_fprog *fprog, struct sock *sk,
-+		       bool locked);
- int sk_attach_bpf(u32 ufd, struct sock *sk);
- int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk);
- int sk_reuseport_attach_bpf(u32 ufd, struct sock *sk);
- int sk_detach_filter(struct sock *sk);
-+int __sk_detach_filter(struct sock *sk, bool locked);
-+
- int sk_get_filter(struct sock *sk, struct sock_filter __user *filter,
- 		  unsigned int len);
- 
-diff --git a/include/linux/fs.h b/include/linux/fs.h
-index ae681002100a..83c77b093ce2 100644
---- a/include/linux/fs.h
-+++ b/include/linux/fs.h
-@@ -1234,6 +1234,16 @@ static inline struct inode *file_inode(const struct file *f)
- 	return f->f_inode;
- }
- 
-+static inline struct dentry *file_dentry(const struct file *file)
-+{
-+	struct dentry *dentry = file->f_path.dentry;
-+
-+	if (unlikely(dentry->d_flags & DCACHE_OP_REAL))
-+		return dentry->d_op->d_real(dentry, file_inode(file));
-+	else
-+		return dentry;
-+}
-+
- static inline int locks_lock_file_wait(struct file *filp, struct file_lock *fl)
- {
- 	return locks_lock_inode_wait(file_inode(filp), fl);
-@@ -2259,7 +2269,7 @@ extern long do_sys_open(int dfd, const char __user *filename, int flags,
- extern struct file *file_open_name(struct filename *, int, umode_t);
- extern struct file *filp_open(const char *, int, umode_t);
- extern struct file *file_open_root(struct dentry *, struct vfsmount *,
--				   const char *, int);
-+				   const char *, int, umode_t);
- extern struct file * dentry_open(const struct path *, int, const struct cred *);
- extern int filp_close(struct file *, fl_owner_t id);
- 
-diff --git a/include/linux/hash.h b/include/linux/hash.h
-index 1afde47e1528..79c52fa81cac 100644
---- a/include/linux/hash.h
-+++ b/include/linux/hash.h
-@@ -32,12 +32,28 @@
- #error Wordsize not 32 or 64
- #endif
- 
-+/*
-+ * The above primes are actively bad for hashing, since they are
-+ * too sparse. The 32-bit one is mostly ok, the 64-bit one causes
-+ * real problems. Besides, the "prime" part is pointless for the
-+ * multiplicative hash.
-+ *
-+ * Although a random odd number will do, it turns out that the golden
-+ * ratio phi = (sqrt(5)-1)/2, or its negative, has particularly nice
-+ * properties.
-+ *
-+ * These are the negative, (1 - phi) = (phi^2) = (3 - sqrt(5))/2.
-+ * (See Knuth vol 3, section 6.4, exercise 9.)
-+ */
-+#define GOLDEN_RATIO_32 0x61C88647
-+#define GOLDEN_RATIO_64 0x61C8864680B583EBull
-+
- static __always_inline u64 hash_64(u64 val, unsigned int bits)
- {
- 	u64 hash = val;
- 
--#if defined(CONFIG_ARCH_HAS_FAST_MULTIPLIER) && BITS_PER_LONG == 64
--	hash = hash * GOLDEN_RATIO_PRIME_64;
-+#if BITS_PER_LONG == 64
-+	hash = hash * GOLDEN_RATIO_64;
- #else
- 	/*  Sigh, gcc can't optimise this alone like it does for 32 bits. */
- 	u64 n = hash;
-diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h
-index a338a688ee4a..dcb89e3515db 100644
---- a/include/linux/if_bridge.h
-+++ b/include/linux/if_bridge.h
-@@ -46,10 +46,6 @@ struct br_ip_list {
- #define BR_LEARNING_SYNC	BIT(9)
- #define BR_PROXYARP_WIFI	BIT(10)
- 
--/* values as per ieee8021QBridgeFdbAgingTime */
--#define BR_MIN_AGEING_TIME	(10 * HZ)
--#define BR_MAX_AGEING_TIME	(1000000 * HZ)
--
- #define BR_DEFAULT_AGEING_TIME	(300 * HZ)
- 
- extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __user *));
-diff --git a/include/linux/kernel.h b/include/linux/kernel.h
-index f31638c6e873..95452f72349a 100644
---- a/include/linux/kernel.h
-+++ b/include/linux/kernel.h
-@@ -635,7 +635,7 @@ do {							\
- 
- #define do_trace_printk(fmt, args...)					\
- do {									\
--	static const char *trace_printk_fmt				\
-+	static const char *trace_printk_fmt __used			\
- 		__attribute__((section("__trace_printk_fmt"))) =	\
- 		__builtin_constant_p(fmt) ? fmt : NULL;			\
- 									\
-@@ -679,7 +679,7 @@ int __trace_printk(unsigned long ip, const char *fmt, ...);
-  */
- 
- #define trace_puts(str) ({						\
--	static const char *trace_printk_fmt				\
-+	static const char *trace_printk_fmt __used			\
- 		__attribute__((section("__trace_printk_fmt"))) =	\
- 		__builtin_constant_p(str) ? str : NULL;			\
- 									\
-@@ -701,7 +701,7 @@ extern void trace_dump_stack(int skip);
- #define ftrace_vprintk(fmt, vargs)					\
- do {									\
- 	if (__builtin_constant_p(fmt)) {				\
--		static const char *trace_printk_fmt			\
-+		static const char *trace_printk_fmt __used		\
- 		  __attribute__((section("__trace_printk_fmt"))) =	\
- 			__builtin_constant_p(fmt) ? fmt : NULL;		\
- 									\
-diff --git a/include/linux/mfd/samsung/s2mps11.h b/include/linux/mfd/samsung/s2mps11.h
-index b288965e8101..2c14eeca46f0 100644
---- a/include/linux/mfd/samsung/s2mps11.h
-+++ b/include/linux/mfd/samsung/s2mps11.h
-@@ -173,10 +173,12 @@ enum s2mps11_regulators {
- 
- #define S2MPS11_LDO_VSEL_MASK	0x3F
- #define S2MPS11_BUCK_VSEL_MASK	0xFF
-+#define S2MPS11_BUCK9_VSEL_MASK	0x1F
- #define S2MPS11_ENABLE_MASK	(0x03 << S2MPS11_ENABLE_SHIFT)
- #define S2MPS11_ENABLE_SHIFT	0x06
- #define S2MPS11_LDO_N_VOLTAGES	(S2MPS11_LDO_VSEL_MASK + 1)
- #define S2MPS11_BUCK_N_VOLTAGES (S2MPS11_BUCK_VSEL_MASK + 1)
-+#define S2MPS11_BUCK9_N_VOLTAGES (S2MPS11_BUCK9_VSEL_MASK + 1)
- #define S2MPS11_RAMP_DELAY	25000		/* uV/us */
- 
- #define S2MPS11_CTRL1_PWRHOLD_MASK	BIT(4)
-diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
-index 987764afa65c..f8b83792939b 100644
---- a/include/linux/mlx5/device.h
-+++ b/include/linux/mlx5/device.h
-@@ -363,6 +363,17 @@ enum {
- 	MLX5_CAP_OFF_CMDIF_CSUM		= 46,
- };
- 
-+enum {
-+	/*
-+	 * Max wqe size for rdma read is 512 bytes, so this
-+	 * limits our max_sge_rd as the wqe needs to fit:
-+	 * - ctrl segment (16 bytes)
-+	 * - rdma segment (16 bytes)
-+	 * - scatter elements (16 bytes each)
-+	 */
-+	MLX5_MAX_SGE_RD	= (512 - 16 - 16) / 16
-+};
-+
- struct mlx5_inbox_hdr {
- 	__be16		opcode;
- 	u8		rsvd[4];
-diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
-index 1e3006dcf35d..46dd88e7071b 100644
---- a/include/linux/mlx5/driver.h
-+++ b/include/linux/mlx5/driver.h
-@@ -813,9 +813,9 @@ int mlx5_set_port_admin_status(struct mlx5_core_dev *dev,
- int mlx5_query_port_admin_status(struct mlx5_core_dev *dev,
- 				 enum mlx5_port_status *status);
- 
--int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu, u8 port);
--void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu, u8 port);
--void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu,
-+int mlx5_set_port_mtu(struct mlx5_core_dev *dev, u16 mtu, u8 port);
-+void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, u16 *max_mtu, u8 port);
-+void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, u16 *oper_mtu,
- 			      u8 port);
- 
- int mlx5_query_port_vl_hw_cap(struct mlx5_core_dev *dev,
-diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h
-index 123771003e68..a3f3c71124d3 100644
---- a/include/linux/mlx5/vport.h
-+++ b/include/linux/mlx5/vport.h
-@@ -45,6 +45,8 @@ int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
- 				     u16 vport, u8 *addr);
- int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *dev,
- 				      u16 vport, u8 *addr);
-+int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu);
-+int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 mtu);
- int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev,
- 					   u64 *system_image_guid);
- int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid);
-diff --git a/include/linux/mm.h b/include/linux/mm.h
-index 516e14944339..e4e59f9d82f1 100644
---- a/include/linux/mm.h
-+++ b/include/linux/mm.h
-@@ -456,11 +456,20 @@ static inline int page_mapcount(struct page *page)
- 
- #ifdef CONFIG_TRANSPARENT_HUGEPAGE
- int total_mapcount(struct page *page);
-+int page_trans_huge_mapcount(struct page *page, int *total_mapcount);
- #else
- static inline int total_mapcount(struct page *page)
- {
- 	return page_mapcount(page);
- }
-+static inline int page_trans_huge_mapcount(struct page *page,
-+					   int *total_mapcount)
-+{
-+	int mapcount = page_mapcount(page);
-+	if (total_mapcount)
-+		*total_mapcount = mapcount;
-+	return mapcount;
-+}
- #endif
- 
- static inline int page_count(struct page *page)
-@@ -1010,6 +1019,8 @@ static inline bool page_mapped(struct page *page)
- 	page = compound_head(page);
- 	if (atomic_read(compound_mapcount_ptr(page)) >= 0)
- 		return true;
-+	if (PageHuge(page))
-+		return false;
- 	for (i = 0; i < hpage_nr_pages(page); i++) {
- 		if (atomic_read(&page[i]._mapcount) >= 0)
- 			return true;
-@@ -1117,6 +1128,8 @@ struct zap_details {
- 
- struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
- 		pte_t pte);
-+struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
-+				pmd_t pmd);
- 
- int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
- 		unsigned long size);
-diff --git a/include/linux/net.h b/include/linux/net.h
-index 0b4ac7da583a..25ef630f1bd6 100644
---- a/include/linux/net.h
-+++ b/include/linux/net.h
-@@ -245,7 +245,15 @@ do {								\
- 	net_ratelimited_function(pr_warn, fmt, ##__VA_ARGS__)
- #define net_info_ratelimited(fmt, ...)				\
- 	net_ratelimited_function(pr_info, fmt, ##__VA_ARGS__)
--#if defined(DEBUG)
-+#if defined(CONFIG_DYNAMIC_DEBUG)
-+#define net_dbg_ratelimited(fmt, ...)					\
-+do {									\
-+	DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt);			\
-+	if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT) &&	\
-+	    net_ratelimit())						\
-+		__dynamic_pr_debug(&descriptor, fmt, ##__VA_ARGS__);	\
-+} while (0)
-+#elif defined(DEBUG)
- #define net_dbg_ratelimited(fmt, ...)				\
- 	net_ratelimited_function(pr_debug, fmt, ##__VA_ARGS__)
- #else
-diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
-index 5440b7b705eb..6d1d8f4f759b 100644
---- a/include/linux/netdevice.h
-+++ b/include/linux/netdevice.h
-@@ -267,6 +267,7 @@ struct header_ops {
- 	void	(*cache_update)(struct hh_cache *hh,
- 				const struct net_device *dev,
- 				const unsigned char *haddr);
-+	bool	(*validate)(const char *ll_header, unsigned int len);
- };
- 
- /* These flag bits are private to the generic network queueing
-@@ -1420,8 +1421,7 @@ enum netdev_priv_flags {
-  *	@dma:		DMA channel
-  *	@mtu:		Interface MTU value
-  *	@type:		Interface hardware type
-- *	@hard_header_len: Hardware header length, which means that this is the
-- *			  minimum size of a packet.
-+ *	@hard_header_len: Maximum hardware header length.
-  *
-  *	@needed_headroom: Extra headroom the hardware may need, but not in all
-  *			  cases can this be guaranteed
-@@ -2627,6 +2627,24 @@ static inline int dev_parse_header(const struct sk_buff *skb,
- 	return dev->header_ops->parse(skb, haddr);
- }
- 
-+/* ll_header must have at least hard_header_len allocated */
-+static inline bool dev_validate_header(const struct net_device *dev,
-+				       char *ll_header, int len)
-+{
-+	if (likely(len >= dev->hard_header_len))
-+		return true;
-+
-+	if (capable(CAP_SYS_RAWIO)) {
-+		memset(ll_header + len, 0, dev->hard_header_len - len);
-+		return true;
-+	}
-+
-+	if (dev->header_ops && dev->header_ops->validate)
-+		return dev->header_ops->validate(ll_header, len);
-+
-+	return false;
-+}
-+
- typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len);
- int register_gifconf(unsigned int family, gifconf_func_t *gifconf);
- static inline int unregister_gifconf(unsigned int family)
-diff --git a/include/linux/pci.h b/include/linux/pci.h
-index 27716254dcc5..60042ab5d7bd 100644
---- a/include/linux/pci.h
-+++ b/include/linux/pci.h
-@@ -359,6 +359,7 @@ struct pci_dev {
- 	unsigned int	io_window_1k:1;	/* Intel P2P bridge 1K I/O windows */
- 	unsigned int	irq_managed:1;
- 	unsigned int	has_secondary_link:1;
-+	unsigned int	non_compliant_bars:1;	/* broken BARs; ignore them */
- 	pci_dev_flags_t dev_flags;
- 	atomic_t	enable_cnt;	/* pci_enable_device has been called */
- 
-diff --git a/include/linux/platform_data/mmp_dma.h b/include/linux/platform_data/mmp_dma.h
-index 2a330ec9e2af..d1397c8ed94e 100644
---- a/include/linux/platform_data/mmp_dma.h
-+++ b/include/linux/platform_data/mmp_dma.h
-@@ -14,6 +14,7 @@
- 
- struct mmp_dma_platdata {
- 	int dma_channels;
-+	int nb_requestors;
- };
- 
- #endif /* MMP_DMA_H */
-diff --git a/include/linux/rculist_nulls.h b/include/linux/rculist_nulls.h
-index 1c33dd7da4a7..4ae95f7e8597 100644
---- a/include/linux/rculist_nulls.h
-+++ b/include/linux/rculist_nulls.h
-@@ -98,6 +98,45 @@ static inline void hlist_nulls_add_head_rcu(struct hlist_nulls_node *n,
- 	if (!is_a_nulls(first))
- 		first->pprev = &n->next;
- }
-+
-+/**
-+ * hlist_nulls_add_tail_rcu
-+ * @n: the element to add to the hash list.
-+ * @h: the list to add to.
-+ *
-+ * Description:
-+ * Adds the specified element to the end of the specified hlist_nulls,
-+ * while permitting racing traversals.  NOTE: tail insertion requires
-+ * list traversal.
-+ *
-+ * The caller must take whatever precautions are necessary
-+ * (such as holding appropriate locks) to avoid racing
-+ * with another list-mutation primitive, such as hlist_nulls_add_head_rcu()
-+ * or hlist_nulls_del_rcu(), running on this same list.
-+ * However, it is perfectly legal to run concurrently with
-+ * the _rcu list-traversal primitives, such as
-+ * hlist_nulls_for_each_entry_rcu(), used to prevent memory-consistency
-+ * problems on Alpha CPUs.  Regardless of the type of CPU, the
-+ * list-traversal primitive must be guarded by rcu_read_lock().
-+ */
-+static inline void hlist_nulls_add_tail_rcu(struct hlist_nulls_node *n,
-+					struct hlist_nulls_head *h)
-+{
-+	struct hlist_nulls_node *i, *last = NULL;
-+
-+	for (i = hlist_nulls_first_rcu(h); !is_a_nulls(i);
-+	     i = hlist_nulls_next_rcu(i))
-+		last = i;
-+
-+	if (last) {
-+		n->next = last->next;
-+		n->pprev = &last->next;
-+		rcu_assign_pointer(hlist_nulls_next_rcu(last), n);
-+	} else {
-+		hlist_nulls_add_head_rcu(n, h);
-+	}
-+}
-+
- /**
-  * hlist_nulls_for_each_entry_rcu - iterate over rcu list of given type
-  * @tpos:	the type * to use as a loop cursor.
-diff --git a/include/linux/signal.h b/include/linux/signal.h
-index 92557bbce7e7..d80259afb9e5 100644
---- a/include/linux/signal.h
-+++ b/include/linux/signal.h
-@@ -28,6 +28,21 @@ struct sigpending {
- 	sigset_t signal;
- };
- 
-+#ifndef HAVE_ARCH_COPY_SIGINFO
-+
-+#include <linux/string.h>
-+
-+static inline void copy_siginfo(struct siginfo *to, struct siginfo *from)
-+{
-+	if (from->si_code < 0)
-+		memcpy(to, from, sizeof(*to));
-+	else
-+		/* _sigchld is currently the largest know union member */
-+		memcpy(to, from, __ARCH_SI_PREAMBLE_SIZE + sizeof(from->_sifields._sigchld));
-+}
-+
-+#endif
-+
- /*
-  * Define some primitives to manipulate sigset_t.
-  */
-diff --git a/include/linux/swap.h b/include/linux/swap.h
-index d18b65c53dbb..b974a2106dd7 100644
---- a/include/linux/swap.h
-+++ b/include/linux/swap.h
-@@ -418,7 +418,7 @@ extern sector_t swapdev_block(int, pgoff_t);
- extern int page_swapcount(struct page *);
- extern int swp_swapcount(swp_entry_t entry);
- extern struct swap_info_struct *page_swap_info(struct page *);
--extern int reuse_swap_page(struct page *);
-+extern bool reuse_swap_page(struct page *, int *);
- extern int try_to_free_swap(struct page *);
- struct backing_dev_info;
- 
-@@ -513,8 +513,8 @@ static inline int swp_swapcount(swp_entry_t entry)
- 	return 0;
- }
- 
--#define reuse_swap_page(page) \
--	(!PageTransCompound(page) && page_mapcount(page) == 1)
-+#define reuse_swap_page(page, total_mapcount) \
-+	(page_trans_huge_mapcount(page, total_mapcount) == 1)
- 
- static inline int try_to_free_swap(struct page *page)
- {
-@@ -533,6 +533,10 @@ static inline swp_entry_t get_swap_page(void)
- #ifdef CONFIG_MEMCG
- static inline int mem_cgroup_swappiness(struct mem_cgroup *memcg)
- {
-+	/* Cgroup2 doesn't have per-cgroup swappiness */
-+	if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
-+		return vm_swappiness;
-+
- 	/* root ? */
- 	if (mem_cgroup_disabled() || !memcg->css.parent)
- 		return vm_swappiness;
-diff --git a/include/linux/thermal.h b/include/linux/thermal.h
-index e13a1ace50e9..4a849f19e6c9 100644
---- a/include/linux/thermal.h
-+++ b/include/linux/thermal.h
-@@ -156,6 +156,7 @@ struct thermal_attr {
-  * @trip_hyst_attrs:	attributes for trip points for sysfs: trip hysteresis
-  * @devdata:	private pointer for device private data
-  * @trips:	number of trip points the thermal zone supports
-+ * @trips_disabled;	bitmap for disabled trips
-  * @passive_delay:	number of milliseconds to wait between polls when
-  *			performing passive cooling.
-  * @polling_delay:	number of milliseconds to wait between polls when
-@@ -191,6 +192,7 @@ struct thermal_zone_device {
- 	struct thermal_attr *trip_hyst_attrs;
- 	void *devdata;
- 	int trips;
-+	unsigned long trips_disabled;	/* bitmap for disabled trips */
- 	int passive_delay;
- 	int polling_delay;
- 	int temperature;
-diff --git a/include/linux/tty.h b/include/linux/tty.h
-index d9fb4b043f56..e5b996d887ce 100644
---- a/include/linux/tty.h
-+++ b/include/linux/tty.h
-@@ -338,7 +338,6 @@ struct tty_file_private {
- #define TTY_EXCLUSIVE 		3	/* Exclusive open mode */
- #define TTY_DEBUG 		4	/* Debugging */
- #define TTY_DO_WRITE_WAKEUP 	5	/* Call write_wakeup after queuing new */
--#define TTY_OTHER_DONE		6	/* Closed pty has completed input processing */
- #define TTY_LDISC_OPEN	 	11	/* Line discipline is open */
- #define TTY_PTY_LOCK 		16	/* pty private */
- #define TTY_NO_WRITE_SPLIT 	17	/* Preserve write boundaries to driver */
-@@ -464,6 +463,7 @@ extern void tty_buffer_init(struct tty_port *port);
- extern void tty_buffer_set_lock_subclass(struct tty_port *port);
- extern bool tty_buffer_restart_work(struct tty_port *port);
- extern bool tty_buffer_cancel_work(struct tty_port *port);
-+extern void tty_buffer_flush_work(struct tty_port *port);
- extern speed_t tty_termios_baud_rate(struct ktermios *termios);
- extern speed_t tty_termios_input_baud_rate(struct ktermios *termios);
- extern void tty_termios_encode_baud_rate(struct ktermios *termios,
-@@ -589,7 +589,7 @@ static inline int tty_ldisc_receive_buf(struct tty_ldisc *ld, unsigned char *p,
- 		count = ld->ops->receive_buf2(ld->tty, p, f, count);
- 	else {
- 		count = min_t(int, count, ld->tty->receive_room);
--		if (count)
-+		if (count && ld->ops->receive_buf)
- 			ld->ops->receive_buf(ld->tty, p, f, count);
- 	}
- 	return count;
-diff --git a/include/linux/usb.h b/include/linux/usb.h
-index 89533ba38691..f3dbc217ff41 100644
---- a/include/linux/usb.h
-+++ b/include/linux/usb.h
-@@ -1066,7 +1066,7 @@ struct usbdrv_wrap {
-  *	for interfaces bound to this driver.
-  * @soft_unbind: if set to 1, the USB core will not kill URBs and disable
-  *	endpoints before calling the driver's disconnect method.
-- * @disable_hub_initiated_lpm: if set to 0, the USB core will not allow hubs
-+ * @disable_hub_initiated_lpm: if set to 1, the USB core will not allow hubs
-  *	to initiate lower power link state transitions when an idle timeout
-  *	occurs.  Device-initiated USB 3.0 link PM will still be allowed.
-  *
-diff --git a/include/linux/usb_usual.h b/include/linux/usb_usual.h
-index 7f5f78bd15ad..245f57dbbb61 100644
---- a/include/linux/usb_usual.h
-+++ b/include/linux/usb_usual.h
-@@ -79,6 +79,8 @@
- 		/* Cannot handle MI_REPORT_SUPPORTED_OPERATION_CODES */	\
- 	US_FLAG(MAX_SECTORS_240,	0x08000000)		\
- 		/* Sets max_sectors to 240 */			\
-+	US_FLAG(NO_REPORT_LUNS,	0x10000000)			\
-+		/* Cannot handle REPORT_LUNS */			\
- 
- #define US_FLAG(name, value)	US_FL_##name = value ,
- enum { US_DO_ALL_FLAGS };
-diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h
-index 8a0f55b6c2ba..88e3ab496e8f 100644
---- a/include/media/videobuf2-core.h
-+++ b/include/media/videobuf2-core.h
-@@ -375,6 +375,9 @@ struct vb2_ops {
- /**
-  * struct vb2_ops - driver-specific callbacks
-  *
-+ * @verify_planes_array: Verify that a given user space structure contains
-+ *			enough planes for the buffer. This is called
-+ *			for each dequeued buffer.
-  * @fill_user_buffer:	given a vb2_buffer fill in the userspace structure.
-  *			For V4L2 this is a struct v4l2_buffer.
-  * @fill_vb2_buffer:	given a userspace structure, fill in the vb2_buffer.
-@@ -384,6 +387,7 @@ struct vb2_ops {
-  *			the vb2_buffer struct.
-  */
- struct vb2_buf_ops {
-+	int (*verify_planes_array)(struct vb2_buffer *vb, const void *pb);
- 	void (*fill_user_buffer)(struct vb2_buffer *vb, void *pb);
- 	int (*fill_vb2_buffer)(struct vb2_buffer *vb, const void *pb,
- 				struct vb2_plane *planes);
-@@ -400,6 +404,9 @@ struct vb2_buf_ops {
-  * @fileio_read_once:		report EOF after reading the first buffer
-  * @fileio_write_immediately:	queue buffer after each write() call
-  * @allow_zero_bytesused:	allow bytesused == 0 to be passed to the driver
-+ * @quirk_poll_must_check_waiting_for_buffers: Return POLLERR at poll when QBUF
-+ *              has not been called. This is a vb1 idiom that has been adopted
-+ *              also by vb2.
-  * @lock:	pointer to a mutex that protects the vb2_queue struct. The
-  *		driver can set this to a mutex to let the v4l2 core serialize
-  *		the queuing ioctls. If the driver wants to handle locking
-@@ -463,6 +470,7 @@ struct vb2_queue {
- 	unsigned			fileio_read_once:1;
- 	unsigned			fileio_write_immediately:1;
- 	unsigned			allow_zero_bytesused:1;
-+	unsigned		   quirk_poll_must_check_waiting_for_buffers:1;
- 
- 	struct mutex			*lock;
- 	void				*owner;
-diff --git a/include/net/bonding.h b/include/net/bonding.h
-index ee6c52053aa3..791800ddd6d9 100644
---- a/include/net/bonding.h
-+++ b/include/net/bonding.h
-@@ -215,6 +215,7 @@ struct bonding {
- 	 * ALB mode (6) - to sync the use and modifications of its hash table
- 	 */
- 	spinlock_t mode_lock;
-+	spinlock_t stats_lock;
- 	u8	 send_peer_notif;
- 	u8       igmp_retrans;
- #ifdef CONFIG_PROC_FS
-diff --git a/include/net/codel.h b/include/net/codel.h
-index 267e70210061..d168aca115cc 100644
---- a/include/net/codel.h
-+++ b/include/net/codel.h
-@@ -162,12 +162,14 @@ struct codel_vars {
-  * struct codel_stats - contains codel shared variables and stats
-  * @maxpacket:	largest packet we've seen so far
-  * @drop_count:	temp count of dropped packets in dequeue()
-+ * @drop_len:	bytes of dropped packets in dequeue()
-  * ecn_mark:	number of packets we ECN marked instead of dropping
-  * ce_mark:	number of packets CE marked because sojourn time was above ce_threshold
-  */
- struct codel_stats {
- 	u32		maxpacket;
- 	u32		drop_count;
-+	u32		drop_len;
- 	u32		ecn_mark;
- 	u32		ce_mark;
- };
-@@ -308,6 +310,7 @@ static struct sk_buff *codel_dequeue(struct Qdisc *sch,
- 								  vars->rec_inv_sqrt);
- 					goto end;
- 				}
-+				stats->drop_len += qdisc_pkt_len(skb);
- 				qdisc_drop(skb, sch);
- 				stats->drop_count++;
- 				skb = dequeue_func(vars, sch);
-@@ -330,6 +333,7 @@ static struct sk_buff *codel_dequeue(struct Qdisc *sch,
- 		if (params->ecn && INET_ECN_set_ce(skb)) {
- 			stats->ecn_mark++;
- 		} else {
-+			stats->drop_len += qdisc_pkt_len(skb);
- 			qdisc_drop(skb, sch);
- 			stats->drop_count++;
- 
-diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
-index 0816c872b689..a6cc576fd467 100644
---- a/include/net/ip_vs.h
-+++ b/include/net/ip_vs.h
-@@ -1588,6 +1588,23 @@ static inline void ip_vs_conn_drop_conntrack(struct ip_vs_conn *cp)
- }
- #endif /* CONFIG_IP_VS_NFCT */
- 
-+/* Really using conntrack? */
-+static inline bool ip_vs_conn_uses_conntrack(struct ip_vs_conn *cp,
-+					     struct sk_buff *skb)
-+{
-+#ifdef CONFIG_IP_VS_NFCT
-+	enum ip_conntrack_info ctinfo;
-+	struct nf_conn *ct;
-+
-+	if (!(cp->flags & IP_VS_CONN_F_NFCT))
-+		return false;
-+	ct = nf_ct_get(skb, &ctinfo);
-+	if (ct && !nf_ct_is_untracked(ct))
-+		return true;
-+#endif
-+	return false;
-+}
-+
- static inline int
- ip_vs_dest_conn_overhead(struct ip_vs_dest *dest)
- {
-diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
-index 636a362a0e03..e5bba897d206 100644
---- a/include/net/sch_generic.h
-+++ b/include/net/sch_generic.h
-@@ -396,7 +396,8 @@ struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
- 			      struct Qdisc *qdisc);
- void qdisc_reset(struct Qdisc *qdisc);
- void qdisc_destroy(struct Qdisc *qdisc);
--void qdisc_tree_decrease_qlen(struct Qdisc *qdisc, unsigned int n);
-+void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, unsigned int n,
-+			       unsigned int len);
- struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
- 			  const struct Qdisc_ops *ops);
- struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
-@@ -707,6 +708,23 @@ static inline void qdisc_reset_queue(struct Qdisc *sch)
- 	sch->qstats.backlog = 0;
- }
- 
-+static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new,
-+					  struct Qdisc **pold)
-+{
-+	struct Qdisc *old;
-+
-+	sch_tree_lock(sch);
-+	old = *pold;
-+	*pold = new;
-+	if (old != NULL) {
-+		qdisc_tree_reduce_backlog(old, old->q.qlen, old->qstats.backlog);
-+		qdisc_reset(old);
-+	}
-+	sch_tree_unlock(sch);
-+
-+	return old;
-+}
-+
- static inline unsigned int __qdisc_queue_drop(struct Qdisc *sch,
- 					      struct sk_buff_head *list)
- {
-diff --git a/include/net/sock.h b/include/net/sock.h
-index f5ea148853e2..3c688ca3456d 100644
---- a/include/net/sock.h
-+++ b/include/net/sock.h
-@@ -630,7 +630,11 @@ static inline void sk_add_node_rcu(struct sock *sk, struct hlist_head *list)
- 
- static inline void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
- {
--	hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list);
-+	if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport &&
-+	    sk->sk_family == AF_INET6)
-+		hlist_nulls_add_tail_rcu(&sk->sk_nulls_node, list);
-+	else
-+		hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list);
- }
- 
- static inline void sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
-diff --git a/include/rdma/ib.h b/include/rdma/ib.h
-index cf8f9e700e48..a6b93706b0fc 100644
---- a/include/rdma/ib.h
-+++ b/include/rdma/ib.h
-@@ -34,6 +34,7 @@
- #define _RDMA_IB_H
- 
- #include <linux/types.h>
-+#include <linux/sched.h>
- 
- struct ib_addr {
- 	union {
-@@ -86,4 +87,19 @@ struct sockaddr_ib {
- 	__u64			sib_scope_id;
- };
- 
-+/*
-+ * The IB interfaces that use write() as bi-directional ioctl() are
-+ * fundamentally unsafe, since there are lots of ways to trigger "write()"
-+ * calls from various contexts with elevated privileges. That includes the
-+ * traditional suid executable error message writes, but also various kernel
-+ * interfaces that can write to file descriptors.
-+ *
-+ * This function provides protection for the legacy API by restricting the
-+ * calling context.
-+ */
-+static inline bool ib_safe_file_access(struct file *filp)
-+{
-+	return filp->f_cred == current_cred() && segment_eq(get_fs(), USER_DS);
-+}
-+
- #endif /* _RDMA_IB_H */
-diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
-index f63a16760ae9..a5d31f794cac 100644
---- a/include/scsi/scsi_device.h
-+++ b/include/scsi/scsi_device.h
-@@ -240,6 +240,7 @@ scmd_printk(const char *, const struct scsi_cmnd *, const char *, ...);
- enum scsi_target_state {
- 	STARGET_CREATED = 1,
- 	STARGET_RUNNING,
-+	STARGET_REMOVE,
- 	STARGET_DEL,
- };
- 
-@@ -513,6 +514,31 @@ static inline int scsi_device_tpgs(struct scsi_device *sdev)
- 	return sdev->inquiry ? (sdev->inquiry[5] >> 4) & 0x3 : 0;
- }
- 
-+/**
-+ * scsi_device_supports_vpd - test if a device supports VPD pages
-+ * @sdev: the &struct scsi_device to test
-+ *
-+ * If the 'try_vpd_pages' flag is set it takes precedence.
-+ * Otherwise we will assume VPD pages are supported if the
-+ * SCSI level is at least SPC-3 and 'skip_vpd_pages' is not set.
-+ */
-+static inline int scsi_device_supports_vpd(struct scsi_device *sdev)
-+{
-+	/* Attempt VPD inquiry if the device blacklist explicitly calls
-+	 * for it.
-+	 */
-+	if (sdev->try_vpd_pages)
-+		return 1;
-+	/*
-+	 * Although VPD inquiries can go to SCSI-2 type devices,
-+	 * some USB ones crash on receiving them, and the pages
-+	 * we currently ask for are for SPC-3 and beyond
-+	 */
-+	if (sdev->scsi_level > SCSI_SPC_2 && !sdev->skip_vpd_pages)
-+		return 1;
-+	return 0;
-+}
-+
- #define MODULE_ALIAS_SCSI_DEVICE(type) \
- 	MODULE_ALIAS("scsi:t-" __stringify(type) "*")
- #define SCSI_DEVICE_MODALIAS_FMT "scsi:t-0x%02x"
-diff --git a/include/sound/hda_i915.h b/include/sound/hda_i915.h
-index fa341fcb5829..f5842bcd9c94 100644
---- a/include/sound/hda_i915.h
-+++ b/include/sound/hda_i915.h
-@@ -9,7 +9,7 @@
- #ifdef CONFIG_SND_HDA_I915
- int snd_hdac_set_codec_wakeup(struct hdac_bus *bus, bool enable);
- int snd_hdac_display_power(struct hdac_bus *bus, bool enable);
--int snd_hdac_get_display_clk(struct hdac_bus *bus);
-+void snd_hdac_i915_set_bclk(struct hdac_bus *bus);
- int snd_hdac_sync_audio_rate(struct hdac_bus *bus, hda_nid_t nid, int rate);
- int snd_hdac_acomp_get_eld(struct hdac_bus *bus, hda_nid_t nid,
- 			   bool *audio_enabled, char *buffer, int max_bytes);
-@@ -25,9 +25,8 @@ static inline int snd_hdac_display_power(struct hdac_bus *bus, bool enable)
- {
- 	return 0;
- }
--static inline int snd_hdac_get_display_clk(struct hdac_bus *bus)
-+static inline void snd_hdac_i915_set_bclk(struct hdac_bus *bus)
- {
--	return 0;
- }
- static inline int snd_hdac_sync_audio_rate(struct hdac_bus *bus, hda_nid_t nid,
- 					   int rate)
-diff --git a/include/sound/hdaudio.h b/include/sound/hdaudio.h
-index c21c38ce7450..93e63c56f48f 100644
---- a/include/sound/hdaudio.h
-+++ b/include/sound/hdaudio.h
-@@ -168,11 +168,13 @@ int snd_hdac_power_up(struct hdac_device *codec);
- int snd_hdac_power_down(struct hdac_device *codec);
- int snd_hdac_power_up_pm(struct hdac_device *codec);
- int snd_hdac_power_down_pm(struct hdac_device *codec);
-+int snd_hdac_keep_power_up(struct hdac_device *codec);
- #else
- static inline int snd_hdac_power_up(struct hdac_device *codec) { return 0; }
- static inline int snd_hdac_power_down(struct hdac_device *codec) { return 0; }
- static inline int snd_hdac_power_up_pm(struct hdac_device *codec) { return 0; }
- static inline int snd_hdac_power_down_pm(struct hdac_device *codec) { return 0; }
-+static inline int snd_hdac_keep_power_up(struct hdac_device *codec) { return 0; }
- #endif
- 
- /*
-diff --git a/include/uapi/linux/if.h b/include/uapi/linux/if.h
-index 9cf2394f0bcf..752f5dc040a5 100644
---- a/include/uapi/linux/if.h
-+++ b/include/uapi/linux/if.h
-@@ -19,14 +19,20 @@
- #ifndef _LINUX_IF_H
- #define _LINUX_IF_H
- 
-+#include <linux/libc-compat.h>          /* for compatibility with glibc */
- #include <linux/types.h>		/* for "__kernel_caddr_t" et al	*/
- #include <linux/socket.h>		/* for "struct sockaddr" et al	*/
- #include <linux/compiler.h>		/* for "__user" et al           */
- 
-+#if __UAPI_DEF_IF_IFNAMSIZ
- #define	IFNAMSIZ	16
-+#endif /* __UAPI_DEF_IF_IFNAMSIZ */
- #define	IFALIASZ	256
- #include <linux/hdlc/ioctl.h>
- 
-+/* For glibc compatibility. An empty enum does not compile. */
-+#if __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO != 0 && \
-+    __UAPI_DEF_IF_NET_DEVICE_FLAGS != 0
- /**
-  * enum net_device_flags - &struct net_device flags
-  *
-@@ -68,6 +74,8 @@
-  * @IFF_ECHO: echo sent packets. Volatile.
-  */
- enum net_device_flags {
-+/* for compatibility with glibc net/if.h */
-+#if __UAPI_DEF_IF_NET_DEVICE_FLAGS
- 	IFF_UP				= 1<<0,  /* sysfs */
- 	IFF_BROADCAST			= 1<<1,  /* volatile */
- 	IFF_DEBUG			= 1<<2,  /* sysfs */
-@@ -84,11 +92,17 @@ enum net_device_flags {
- 	IFF_PORTSEL			= 1<<13, /* sysfs */
- 	IFF_AUTOMEDIA			= 1<<14, /* sysfs */
- 	IFF_DYNAMIC			= 1<<15, /* sysfs */
-+#endif /* __UAPI_DEF_IF_NET_DEVICE_FLAGS */
-+#if __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO
- 	IFF_LOWER_UP			= 1<<16, /* volatile */
- 	IFF_DORMANT			= 1<<17, /* volatile */
- 	IFF_ECHO			= 1<<18, /* volatile */
-+#endif /* __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO */
- };
-+#endif /* __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO != 0 && __UAPI_DEF_IF_NET_DEVICE_FLAGS != 0 */
- 
-+/* for compatibility with glibc net/if.h */
-+#if __UAPI_DEF_IF_NET_DEVICE_FLAGS
- #define IFF_UP				IFF_UP
- #define IFF_BROADCAST			IFF_BROADCAST
- #define IFF_DEBUG			IFF_DEBUG
-@@ -105,9 +119,13 @@ enum net_device_flags {
- #define IFF_PORTSEL			IFF_PORTSEL
- #define IFF_AUTOMEDIA			IFF_AUTOMEDIA
- #define IFF_DYNAMIC			IFF_DYNAMIC
-+#endif /* __UAPI_DEF_IF_NET_DEVICE_FLAGS */
-+
-+#if __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO
- #define IFF_LOWER_UP			IFF_LOWER_UP
- #define IFF_DORMANT			IFF_DORMANT
- #define IFF_ECHO			IFF_ECHO
-+#endif /* __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO */
- 
- #define IFF_VOLATILE	(IFF_LOOPBACK|IFF_POINTOPOINT|IFF_BROADCAST|IFF_ECHO|\
- 		IFF_MASTER|IFF_SLAVE|IFF_RUNNING|IFF_LOWER_UP|IFF_DORMANT)
-@@ -166,6 +184,8 @@ enum {
-  *	being very small might be worth keeping for clean configuration.
-  */
- 
-+/* for compatibility with glibc net/if.h */
-+#if __UAPI_DEF_IF_IFMAP
- struct ifmap {
- 	unsigned long mem_start;
- 	unsigned long mem_end;
-@@ -175,6 +195,7 @@ struct ifmap {
- 	unsigned char port;
- 	/* 3 bytes spare */
- };
-+#endif /* __UAPI_DEF_IF_IFMAP */
- 
- struct if_settings {
- 	unsigned int type;	/* Type of physical device or protocol */
-@@ -200,6 +221,8 @@ struct if_settings {
-  * remainder may be interface specific.
-  */
- 
-+/* for compatibility with glibc net/if.h */
-+#if __UAPI_DEF_IF_IFREQ
- struct ifreq {
- #define IFHWADDRLEN	6
- 	union
-@@ -223,6 +246,7 @@ struct ifreq {
- 		struct	if_settings ifru_settings;
- 	} ifr_ifru;
- };
-+#endif /* __UAPI_DEF_IF_IFREQ */
- 
- #define ifr_name	ifr_ifrn.ifrn_name	/* interface name 	*/
- #define ifr_hwaddr	ifr_ifru.ifru_hwaddr	/* MAC address 		*/
-@@ -249,6 +273,8 @@ struct ifreq {
-  * must know all networks accessible).
-  */
- 
-+/* for compatibility with glibc net/if.h */
-+#if __UAPI_DEF_IF_IFCONF
- struct ifconf  {
- 	int	ifc_len;			/* size of buffer	*/
- 	union {
-@@ -256,6 +282,8 @@ struct ifconf  {
- 		struct ifreq __user *ifcu_req;
- 	} ifc_ifcu;
- };
-+#endif /* __UAPI_DEF_IF_IFCONF */
-+
- #define	ifc_buf	ifc_ifcu.ifcu_buf		/* buffer address	*/
- #define	ifc_req	ifc_ifcu.ifcu_req		/* array of structures	*/
- 
-diff --git a/include/uapi/linux/libc-compat.h b/include/uapi/linux/libc-compat.h
-index 7d024ceb075d..d5e38c73377c 100644
---- a/include/uapi/linux/libc-compat.h
-+++ b/include/uapi/linux/libc-compat.h
-@@ -51,6 +51,40 @@
- /* We have included glibc headers... */
- #if defined(__GLIBC__)
- 
-+/* Coordinate with glibc net/if.h header. */
-+#if defined(_NET_IF_H)
-+
-+/* GLIBC headers included first so don't define anything
-+ * that would already be defined. */
-+
-+#define __UAPI_DEF_IF_IFCONF 0
-+#define __UAPI_DEF_IF_IFMAP 0
-+#define __UAPI_DEF_IF_IFNAMSIZ 0
-+#define __UAPI_DEF_IF_IFREQ 0
-+/* Everything up to IFF_DYNAMIC, matches net/if.h until glibc 2.23 */
-+#define __UAPI_DEF_IF_NET_DEVICE_FLAGS 0
-+/* For the future if glibc adds IFF_LOWER_UP, IFF_DORMANT and IFF_ECHO */
-+#ifndef __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO
-+#define __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO 1
-+#endif /* __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO */
-+
-+#else /* _NET_IF_H */
-+
-+/* Linux headers included first, and we must define everything
-+ * we need. The expectation is that glibc will check the
-+ * __UAPI_DEF_* defines and adjust appropriately. */
-+
-+#define __UAPI_DEF_IF_IFCONF 1
-+#define __UAPI_DEF_IF_IFMAP 1
-+#define __UAPI_DEF_IF_IFNAMSIZ 1
-+#define __UAPI_DEF_IF_IFREQ 1
-+/* Everything up to IFF_DYNAMIC, matches net/if.h until glibc 2.23 */
-+#define __UAPI_DEF_IF_NET_DEVICE_FLAGS 1
-+/* For the future if glibc adds IFF_LOWER_UP, IFF_DORMANT and IFF_ECHO */
-+#define __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO 1
-+
-+#endif /* _NET_IF_H */
-+
- /* Coordinate with glibc netinet/in.h header. */
- #if defined(_NETINET_IN_H)
- 
-@@ -117,6 +151,16 @@
-  * that we need. */
- #else /* !defined(__GLIBC__) */
- 
-+/* Definitions for if.h */
-+#define __UAPI_DEF_IF_IFCONF 1
-+#define __UAPI_DEF_IF_IFMAP 1
-+#define __UAPI_DEF_IF_IFNAMSIZ 1
-+#define __UAPI_DEF_IF_IFREQ 1
-+/* Everything up to IFF_DYNAMIC, matches net/if.h until glibc 2.23 */
-+#define __UAPI_DEF_IF_NET_DEVICE_FLAGS 1
-+/* For the future if glibc adds IFF_LOWER_UP, IFF_DORMANT and IFF_ECHO */
-+#define __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO 1
-+
- /* Definitions for in.h */
- #define __UAPI_DEF_IN_ADDR		1
- #define __UAPI_DEF_IN_IPPROTO		1
-diff --git a/include/uapi/linux/v4l2-dv-timings.h b/include/uapi/linux/v4l2-dv-timings.h
-index c039f1d68a09..086168e18ca8 100644
---- a/include/uapi/linux/v4l2-dv-timings.h
-+++ b/include/uapi/linux/v4l2-dv-timings.h
-@@ -183,7 +183,8 @@
- 
- #define V4L2_DV_BT_CEA_3840X2160P24 { \
- 	.type = V4L2_DV_BT_656_1120, \
--	V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
-+	V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
-+		V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
- 		297000000, 1276, 88, 296, 8, 10, 72, 0, 0, 0, \
- 		V4L2_DV_BT_STD_CEA861, \
- 		V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
-@@ -191,14 +192,16 @@
- 
- #define V4L2_DV_BT_CEA_3840X2160P25 { \
- 	.type = V4L2_DV_BT_656_1120, \
--	V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
-+	V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
-+		V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
- 		297000000, 1056, 88, 296, 8, 10, 72, 0, 0, 0, \
- 		V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
- }
- 
- #define V4L2_DV_BT_CEA_3840X2160P30 { \
- 	.type = V4L2_DV_BT_656_1120, \
--	V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
-+	V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
-+		V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
- 		297000000, 176, 88, 296, 8, 10, 72, 0, 0, 0, \
- 		V4L2_DV_BT_STD_CEA861, \
- 		V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
-@@ -206,14 +209,16 @@
- 
- #define V4L2_DV_BT_CEA_3840X2160P50 { \
- 	.type = V4L2_DV_BT_656_1120, \
--	V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
-+	V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
-+		V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
- 		594000000, 1056, 88, 296, 8, 10, 72, 0, 0, 0, \
- 		V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
- }
- 
- #define V4L2_DV_BT_CEA_3840X2160P60 { \
- 	.type = V4L2_DV_BT_656_1120, \
--	V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
-+	V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
-+		V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
- 		594000000, 176, 88, 296, 8, 10, 72, 0, 0, 0, \
- 		V4L2_DV_BT_STD_CEA861, \
- 		V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
-@@ -221,7 +226,8 @@
- 
- #define V4L2_DV_BT_CEA_4096X2160P24 { \
- 	.type = V4L2_DV_BT_656_1120, \
--	V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
-+	V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
-+		V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
- 		297000000, 1020, 88, 296, 8, 10, 72, 0, 0, 0, \
- 		V4L2_DV_BT_STD_CEA861, \
- 		V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
-@@ -229,14 +235,16 @@
- 
- #define V4L2_DV_BT_CEA_4096X2160P25 { \
- 	.type = V4L2_DV_BT_656_1120, \
--	V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
-+	V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
-+		V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
- 		297000000, 968, 88, 128, 8, 10, 72, 0, 0, 0, \
- 		V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
- }
- 
- #define V4L2_DV_BT_CEA_4096X2160P30 { \
- 	.type = V4L2_DV_BT_656_1120, \
--	V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
-+	V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
-+		V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
- 		297000000, 88, 88, 128, 8, 10, 72, 0, 0, 0, \
- 		V4L2_DV_BT_STD_CEA861, \
- 		V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
-@@ -244,14 +252,16 @@
- 
- #define V4L2_DV_BT_CEA_4096X2160P50 { \
- 	.type = V4L2_DV_BT_656_1120, \
--	V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
-+	V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
-+		V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
- 		594000000, 968, 88, 128, 8, 10, 72, 0, 0, 0, \
- 		V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
- }
- 
- #define V4L2_DV_BT_CEA_4096X2160P60 { \
- 	.type = V4L2_DV_BT_656_1120, \
--	V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
-+	V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
-+		V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
- 		594000000, 88, 88, 128, 8, 10, 72, 0, 0, 0, \
- 		V4L2_DV_BT_STD_CEA861, \
- 		V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
-diff --git a/include/xen/page.h b/include/xen/page.h
-index 96294ac93755..9dc46cb8a0fd 100644
---- a/include/xen/page.h
-+++ b/include/xen/page.h
-@@ -15,9 +15,9 @@
-  */
- 
- #define xen_pfn_to_page(xen_pfn)	\
--	((pfn_to_page(((unsigned long)(xen_pfn) << XEN_PAGE_SHIFT) >> PAGE_SHIFT)))
-+	(pfn_to_page((unsigned long)(xen_pfn) >> (PAGE_SHIFT - XEN_PAGE_SHIFT)))
- #define page_to_xen_pfn(page)		\
--	(((page_to_pfn(page)) << PAGE_SHIFT) >> XEN_PAGE_SHIFT)
-+	((page_to_pfn(page)) << (PAGE_SHIFT - XEN_PAGE_SHIFT))
- 
- #define XEN_PFN_PER_PAGE	(PAGE_SIZE / XEN_PAGE_SIZE)
- 
-diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
-index 4504ca66118d..50da680c479f 100644
---- a/kernel/bpf/helpers.c
-+++ b/kernel/bpf/helpers.c
-@@ -166,7 +166,7 @@ static u64 bpf_get_current_comm(u64 r1, u64 size, u64 r3, u64 r4, u64 r5)
- 	if (!task)
- 		return -EINVAL;
- 
--	memcpy(buf, task->comm, min_t(size_t, size, sizeof(task->comm)));
-+	strlcpy(buf, task->comm, min_t(size_t, size, sizeof(task->comm)));
- 	return 0;
- }
- 
-diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c
-index f2ece3c174a5..8f94ca1860cf 100644
---- a/kernel/bpf/inode.c
-+++ b/kernel/bpf/inode.c
-@@ -31,10 +31,10 @@ static void *bpf_any_get(void *raw, enum bpf_type type)
- {
- 	switch (type) {
- 	case BPF_TYPE_PROG:
--		atomic_inc(&((struct bpf_prog *)raw)->aux->refcnt);
-+		raw = bpf_prog_inc(raw);
- 		break;
- 	case BPF_TYPE_MAP:
--		bpf_map_inc(raw, true);
-+		raw = bpf_map_inc(raw, true);
- 		break;
- 	default:
- 		WARN_ON_ONCE(1);
-@@ -297,7 +297,8 @@ static void *bpf_obj_do_get(const struct filename *pathname,
- 		goto out;
- 
- 	raw = bpf_any_get(inode->i_private, *type);
--	touch_atime(&path);
-+	if (!IS_ERR(raw))
-+		touch_atime(&path);
- 
- 	path_put(&path);
- 	return raw;
-diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
-index 637397059f76..aa5f39772ac4 100644
---- a/kernel/bpf/syscall.c
-+++ b/kernel/bpf/syscall.c
-@@ -201,11 +201,18 @@ struct bpf_map *__bpf_map_get(struct fd f)
- 	return f.file->private_data;
- }
- 
--void bpf_map_inc(struct bpf_map *map, bool uref)
-+/* prog's and map's refcnt limit */
-+#define BPF_MAX_REFCNT 32768
-+
-+struct bpf_map *bpf_map_inc(struct bpf_map *map, bool uref)
- {
--	atomic_inc(&map->refcnt);
-+	if (atomic_inc_return(&map->refcnt) > BPF_MAX_REFCNT) {
-+		atomic_dec(&map->refcnt);
-+		return ERR_PTR(-EBUSY);
-+	}
- 	if (uref)
- 		atomic_inc(&map->usercnt);
-+	return map;
- }
- 
- struct bpf_map *bpf_map_get_with_uref(u32 ufd)
-@@ -217,7 +224,7 @@ struct bpf_map *bpf_map_get_with_uref(u32 ufd)
- 	if (IS_ERR(map))
- 		return map;
- 
--	bpf_map_inc(map, true);
-+	map = bpf_map_inc(map, true);
- 	fdput(f);
- 
- 	return map;
-@@ -600,6 +607,15 @@ static struct bpf_prog *__bpf_prog_get(struct fd f)
- 	return f.file->private_data;
- }
- 
-+struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog)
-+{
-+	if (atomic_inc_return(&prog->aux->refcnt) > BPF_MAX_REFCNT) {
-+		atomic_dec(&prog->aux->refcnt);
-+		return ERR_PTR(-EBUSY);
-+	}
-+	return prog;
-+}
-+
- /* called by sockets/tracing/seccomp before attaching program to an event
-  * pairs with bpf_prog_put()
-  */
-@@ -612,7 +628,7 @@ struct bpf_prog *bpf_prog_get(u32 ufd)
- 	if (IS_ERR(prog))
- 		return prog;
- 
--	atomic_inc(&prog->aux->refcnt);
-+	prog = bpf_prog_inc(prog);
- 	fdput(f);
- 
- 	return prog;
-diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
-index 2e7f7ab739e4..2cbfba78d3db 100644
---- a/kernel/bpf/verifier.c
-+++ b/kernel/bpf/verifier.c
-@@ -239,15 +239,6 @@ static const char * const reg_type_str[] = {
- 	[CONST_IMM]		= "imm",
- };
- 
--static const struct {
--	int map_type;
--	int func_id;
--} func_limit[] = {
--	{BPF_MAP_TYPE_PROG_ARRAY, BPF_FUNC_tail_call},
--	{BPF_MAP_TYPE_PERF_EVENT_ARRAY, BPF_FUNC_perf_event_read},
--	{BPF_MAP_TYPE_PERF_EVENT_ARRAY, BPF_FUNC_perf_event_output},
--};
--
- static void print_verifier_state(struct verifier_env *env)
- {
- 	enum bpf_reg_type t;
-@@ -898,24 +889,44 @@ static int check_func_arg(struct verifier_env *env, u32 regno,
- 
- static int check_map_func_compatibility(struct bpf_map *map, int func_id)
- {
--	bool bool_map, bool_func;
--	int i;
--
- 	if (!map)
- 		return 0;
- 
--	for (i = 0; i < ARRAY_SIZE(func_limit); i++) {
--		bool_map = (map->map_type == func_limit[i].map_type);
--		bool_func = (func_id == func_limit[i].func_id);
--		/* only when map & func pair match it can continue.
--		 * don't allow any other map type to be passed into
--		 * the special func;
--		 */
--		if (bool_func && bool_map != bool_func)
--			return -EINVAL;
-+	/* We need a two way check, first is from map perspective ... */
-+	switch (map->map_type) {
-+	case BPF_MAP_TYPE_PROG_ARRAY:
-+		if (func_id != BPF_FUNC_tail_call)
-+			goto error;
-+		break;
-+	case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
-+		if (func_id != BPF_FUNC_perf_event_read &&
-+		    func_id != BPF_FUNC_perf_event_output)
-+			goto error;
-+		break;
-+	default:
-+		break;
-+	}
-+
-+	/* ... and second from the function itself. */
-+	switch (func_id) {
-+	case BPF_FUNC_tail_call:
-+		if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
-+			goto error;
-+		break;
-+	case BPF_FUNC_perf_event_read:
-+	case BPF_FUNC_perf_event_output:
-+		if (map->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY)
-+			goto error;
-+		break;
-+	default:
-+		break;
- 	}
- 
- 	return 0;
-+error:
-+	verbose("cannot pass map_type %d into func %d\n",
-+		map->map_type, func_id);
-+	return -EINVAL;
- }
- 
- static int check_call(struct verifier_env *env, int func_id)
-@@ -1348,6 +1359,7 @@ static int check_ld_abs(struct verifier_env *env, struct bpf_insn *insn)
- 	}
- 
- 	if (insn->dst_reg != BPF_REG_0 || insn->off != 0 ||
-+	    BPF_SIZE(insn->code) == BPF_DW ||
- 	    (mode == BPF_ABS && insn->src_reg != BPF_REG_0)) {
- 		verbose("BPF_LD_ABS uses reserved fields\n");
- 		return -EINVAL;
-@@ -2003,7 +2015,6 @@ static int replace_map_fd_with_map_ptr(struct verifier_env *env)
- 			if (IS_ERR(map)) {
- 				verbose("fd %d is not pointing to valid bpf_map\n",
- 					insn->imm);
--				fdput(f);
- 				return PTR_ERR(map);
- 			}
- 
-@@ -2023,15 +2034,18 @@ static int replace_map_fd_with_map_ptr(struct verifier_env *env)
- 				return -E2BIG;
- 			}
- 
--			/* remember this map */
--			env->used_maps[env->used_map_cnt++] = map;
--
- 			/* hold the map. If the program is rejected by verifier,
- 			 * the map will be released by release_maps() or it
- 			 * will be used by the valid program until it's unloaded
- 			 * and all maps are released in free_bpf_prog_info()
- 			 */
--			bpf_map_inc(map, false);
-+			map = bpf_map_inc(map, false);
-+			if (IS_ERR(map)) {
-+				fdput(f);
-+				return PTR_ERR(map);
-+			}
-+			env->used_maps[env->used_map_cnt++] = map;
-+
- 			fdput(f);
- next_insn:
- 			insn++;
-diff --git a/kernel/cgroup.c b/kernel/cgroup.c
-index d27904c193da..355cd5f2b416 100644
---- a/kernel/cgroup.c
-+++ b/kernel/cgroup.c
-@@ -2474,6 +2474,14 @@ static void cgroup_migrate_add_src(struct css_set *src_cset,
- 	lockdep_assert_held(&cgroup_mutex);
- 	lockdep_assert_held(&css_set_lock);
- 
-+	/*
-+	 * If ->dead, @src_set is associated with one or more dead cgroups
-+	 * and doesn't contain any migratable tasks.  Ignore it early so
-+	 * that the rest of migration path doesn't get confused by it.
-+	 */
-+	if (src_cset->dead)
-+		return;
-+
- 	src_cgrp = cset_cgroup_from_root(src_cset, dst_cgrp->root);
- 
- 	if (!list_empty(&src_cset->mg_preload_node))
-@@ -2689,9 +2697,10 @@ static ssize_t __cgroup_procs_write(struct kernfs_open_file *of, char *buf,
- 				    size_t nbytes, loff_t off, bool threadgroup)
- {
- 	struct task_struct *tsk;
-+	struct cgroup_subsys *ss;
- 	struct cgroup *cgrp;
- 	pid_t pid;
--	int ret;
-+	int ssid, ret;
- 
- 	if (kstrtoint(strstrip(buf), 0, &pid) || pid < 0)
- 		return -EINVAL;
-@@ -2739,8 +2748,10 @@ out_unlock_rcu:
- 	rcu_read_unlock();
- out_unlock_threadgroup:
- 	percpu_up_write(&cgroup_threadgroup_rwsem);
-+	for_each_subsys(ss, ssid)
-+		if (ss->post_attach)
-+			ss->post_attach();
- 	cgroup_kn_unlock(of->kn);
--	cpuset_post_attach_flush();
- 	return ret ?: nbytes;
- }
- 
-@@ -5114,6 +5125,7 @@ static int cgroup_destroy_locked(struct cgroup *cgrp)
- 	__releases(&cgroup_mutex) __acquires(&cgroup_mutex)
- {
- 	struct cgroup_subsys_state *css;
-+	struct cgrp_cset_link *link;
- 	int ssid;
- 
- 	lockdep_assert_held(&cgroup_mutex);
-@@ -5134,11 +5146,18 @@ static int cgroup_destroy_locked(struct cgroup *cgrp)
- 		return -EBUSY;
- 
- 	/*
--	 * Mark @cgrp dead.  This prevents further task migration and child
--	 * creation by disabling cgroup_lock_live_group().
-+	 * Mark @cgrp and the associated csets dead.  The former prevents
-+	 * further task migration and child creation by disabling
-+	 * cgroup_lock_live_group().  The latter makes the csets ignored by
-+	 * the migration path.
- 	 */
- 	cgrp->self.flags &= ~CSS_ONLINE;
- 
-+	spin_lock_bh(&css_set_lock);
-+	list_for_each_entry(link, &cgrp->cset_links, cset_link)
-+		link->cset->dead = true;
-+	spin_unlock_bh(&css_set_lock);
-+
- 	/* initiate massacre of all css's */
- 	for_each_css(css, ssid, cgrp)
- 		kill_css(css);
-diff --git a/kernel/cpuset.c b/kernel/cpuset.c
-index 41989ab4db57..df16d0c9349f 100644
---- a/kernel/cpuset.c
-+++ b/kernel/cpuset.c
-@@ -58,7 +58,6 @@
- #include <asm/uaccess.h>
- #include <linux/atomic.h>
- #include <linux/mutex.h>
--#include <linux/workqueue.h>
- #include <linux/cgroup.h>
- #include <linux/wait.h>
- 
-@@ -1016,7 +1015,7 @@ static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
- 	}
- }
- 
--void cpuset_post_attach_flush(void)
-+static void cpuset_post_attach(void)
- {
- 	flush_workqueue(cpuset_migrate_mm_wq);
- }
-@@ -2087,6 +2086,7 @@ struct cgroup_subsys cpuset_cgrp_subsys = {
- 	.can_attach	= cpuset_can_attach,
- 	.cancel_attach	= cpuset_cancel_attach,
- 	.attach		= cpuset_attach,
-+	.post_attach	= cpuset_post_attach,
- 	.bind		= cpuset_bind,
- 	.legacy_cftypes	= files,
- 	.early_init	= 1,
-diff --git a/kernel/events/core.c b/kernel/events/core.c
-index 614614821f00..477fb6b8ca20 100644
---- a/kernel/events/core.c
-+++ b/kernel/events/core.c
-@@ -1090,6 +1090,7 @@ static void put_ctx(struct perf_event_context *ctx)
-  * function.
-  *
-  * Lock order:
-+ *    cred_guard_mutex
-  *	task_struct::perf_event_mutex
-  *	  perf_event_context::mutex
-  *	    perf_event::child_mutex;
-@@ -2402,14 +2403,24 @@ static void ctx_sched_out(struct perf_event_context *ctx,
- 			cpuctx->task_ctx = NULL;
- 	}
- 
--	is_active ^= ctx->is_active; /* changed bits */
--
-+	/*
-+	 * Always update time if it was set; not only when it changes.
-+	 * Otherwise we can 'forget' to update time for any but the last
-+	 * context we sched out. For example:
-+	 *
-+	 *   ctx_sched_out(.event_type = EVENT_FLEXIBLE)
-+	 *   ctx_sched_out(.event_type = EVENT_PINNED)
-+	 *
-+	 * would only update time for the pinned events.
-+	 */
- 	if (is_active & EVENT_TIME) {
- 		/* update (and stop) ctx time */
- 		update_context_time(ctx);
- 		update_cgrp_time_from_cpuctx(cpuctx);
- 	}
- 
-+	is_active ^= ctx->is_active; /* changed bits */
-+
- 	if (!ctx->nr_active || !(is_active & EVENT_ALL))
- 		return;
- 
-@@ -3405,7 +3416,6 @@ static struct task_struct *
- find_lively_task_by_vpid(pid_t vpid)
- {
- 	struct task_struct *task;
--	int err;
- 
- 	rcu_read_lock();
- 	if (!vpid)
-@@ -3419,16 +3429,7 @@ find_lively_task_by_vpid(pid_t vpid)
- 	if (!task)
- 		return ERR_PTR(-ESRCH);
- 
--	/* Reuse ptrace permission checks for now. */
--	err = -EACCES;
--	if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS))
--		goto errout;
--
- 	return task;
--errout:
--	put_task_struct(task);
--	return ERR_PTR(err);
--
- }
- 
- /*
-@@ -8001,6 +8002,9 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
- 		}
- 	}
- 
-+	/* symmetric to unaccount_event() in _free_event() */
-+	account_event(event);
-+
- 	return event;
- 
- err_per_task:
-@@ -8347,6 +8351,24 @@ SYSCALL_DEFINE5(perf_event_open,
- 
- 	get_online_cpus();
- 
-+	if (task) {
-+		err = mutex_lock_interruptible(&task->signal->cred_guard_mutex);
-+		if (err)
-+			goto err_cpus;
-+
-+		/*
-+		 * Reuse ptrace permission checks for now.
-+		 *
-+		 * We must hold cred_guard_mutex across this and any potential
-+		 * perf_install_in_context() call for this new event to
-+		 * serialize against exec() altering our credentials (and the
-+		 * perf_event_exit_task() that could imply).
-+		 */
-+		err = -EACCES;
-+		if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS))
-+			goto err_cred;
-+	}
-+
- 	if (flags & PERF_FLAG_PID_CGROUP)
- 		cgroup_fd = pid;
- 
-@@ -8354,7 +8376,7 @@ SYSCALL_DEFINE5(perf_event_open,
- 				 NULL, NULL, cgroup_fd);
- 	if (IS_ERR(event)) {
- 		err = PTR_ERR(event);
--		goto err_cpus;
-+		goto err_cred;
- 	}
- 
- 	if (is_sampling_event(event)) {
-@@ -8364,8 +8386,6 @@ SYSCALL_DEFINE5(perf_event_open,
- 		}
- 	}
- 
--	account_event(event);
--
- 	/*
- 	 * Special case software events and allow them to be part of
- 	 * any hardware group.
-@@ -8415,11 +8435,6 @@ SYSCALL_DEFINE5(perf_event_open,
- 		goto err_context;
- 	}
- 
--	if (task) {
--		put_task_struct(task);
--		task = NULL;
--	}
--
- 	/*
- 	 * Look up the group leader (we will attach this event to it):
- 	 */
-@@ -8478,6 +8493,7 @@ SYSCALL_DEFINE5(perf_event_open,
- 					f_flags);
- 	if (IS_ERR(event_file)) {
- 		err = PTR_ERR(event_file);
-+		event_file = NULL;
- 		goto err_context;
- 	}
- 
-@@ -8516,6 +8532,11 @@ SYSCALL_DEFINE5(perf_event_open,
- 
- 	WARN_ON_ONCE(ctx->parent_ctx);
- 
-+	/*
-+	 * This is the point on no return; we cannot fail hereafter. This is
-+	 * where we start modifying current state.
-+	 */
-+
- 	if (move_group) {
- 		/*
- 		 * See perf_event_ctx_lock() for comments on the details
-@@ -8587,6 +8608,11 @@ SYSCALL_DEFINE5(perf_event_open,
- 		mutex_unlock(&gctx->mutex);
- 	mutex_unlock(&ctx->mutex);
- 
-+	if (task) {
-+		mutex_unlock(&task->signal->cred_guard_mutex);
-+		put_task_struct(task);
-+	}
-+
- 	put_online_cpus();
- 
- 	mutex_lock(&current->perf_event_mutex);
-@@ -8619,6 +8645,9 @@ err_alloc:
- 	 */
- 	if (!event_file)
- 		free_event(event);
-+err_cred:
-+	if (task)
-+		mutex_unlock(&task->signal->cred_guard_mutex);
- err_cpus:
- 	put_online_cpus();
- err_task:
-@@ -8662,8 +8691,6 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
- 	/* Mark owner so we could distinguish it from user events. */
- 	event->owner = TASK_TOMBSTONE;
- 
--	account_event(event);
--
- 	ctx = find_get_context(event->pmu, task, event);
- 	if (IS_ERR(ctx)) {
- 		err = PTR_ERR(ctx);
-@@ -8905,6 +8932,9 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
- 
- /*
-  * When a child task exits, feed back event values to parent events.
-+ *
-+ * Can be called with cred_guard_mutex held when called from
-+ * install_exec_creds().
-  */
- void perf_event_exit_task(struct task_struct *child)
- {
-diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
-index 1faad2cfdb9e..287201a5d12f 100644
---- a/kernel/events/ring_buffer.c
-+++ b/kernel/events/ring_buffer.c
-@@ -347,6 +347,7 @@ void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size,
- 			 bool truncated)
- {
- 	struct ring_buffer *rb = handle->rb;
-+	bool wakeup = truncated;
- 	unsigned long aux_head;
- 	u64 flags = 0;
- 
-@@ -375,9 +376,16 @@ void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size,
- 	aux_head = rb->user_page->aux_head = local_read(&rb->aux_head);
- 
- 	if (aux_head - local_read(&rb->aux_wakeup) >= rb->aux_watermark) {
--		perf_output_wakeup(handle);
-+		wakeup = true;
- 		local_add(rb->aux_watermark, &rb->aux_wakeup);
- 	}
-+
-+	if (wakeup) {
-+		if (truncated)
-+			handle->event->pending_disable = 1;
-+		perf_output_wakeup(handle);
-+	}
-+
- 	handle->event = NULL;
- 
- 	local_set(&rb->aux_nest, 0);
-diff --git a/kernel/futex.c b/kernel/futex.c
-index 5d6ce6413ef1..11b502159f3a 100644
---- a/kernel/futex.c
-+++ b/kernel/futex.c
-@@ -1212,10 +1212,20 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this,
- 	if (unlikely(should_fail_futex(true)))
- 		ret = -EFAULT;
- 
--	if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval))
-+	if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)) {
- 		ret = -EFAULT;
--	else if (curval != uval)
--		ret = -EINVAL;
-+	} else if (curval != uval) {
-+		/*
-+		 * If a unconditional UNLOCK_PI operation (user space did not
-+		 * try the TID->0 transition) raced with a waiter setting the
-+		 * FUTEX_WAITERS flag between get_user() and locking the hash
-+		 * bucket lock, retry the operation.
-+		 */
-+		if ((FUTEX_TID_MASK & curval) == uval)
-+			ret = -EAGAIN;
-+		else
-+			ret = -EINVAL;
-+	}
- 	if (ret) {
- 		raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
- 		return ret;
-@@ -1442,8 +1452,8 @@ void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1,
- 	if (likely(&hb1->chain != &hb2->chain)) {
- 		plist_del(&q->list, &hb1->chain);
- 		hb_waiters_dec(hb1);
--		plist_add(&q->list, &hb2->chain);
- 		hb_waiters_inc(hb2);
-+		plist_add(&q->list, &hb2->chain);
- 		q->lock_ptr = &hb2->lock;
- 	}
- 	get_futex_key_refs(key2);
-@@ -2536,6 +2546,15 @@ retry:
- 		if (ret == -EFAULT)
- 			goto pi_faulted;
- 		/*
-+		 * A unconditional UNLOCK_PI op raced against a waiter
-+		 * setting the FUTEX_WAITERS bit. Try again.
-+		 */
-+		if (ret == -EAGAIN) {
-+			spin_unlock(&hb->lock);
-+			put_futex_key(&key);
-+			goto retry;
-+		}
-+		/*
- 		 * wake_futex_pi has detected invalid state. Tell user
- 		 * space.
- 		 */
-diff --git a/kernel/locking/mcs_spinlock.h b/kernel/locking/mcs_spinlock.h
-index 5b9102a47ea5..c835270f0c2f 100644
---- a/kernel/locking/mcs_spinlock.h
-+++ b/kernel/locking/mcs_spinlock.h
-@@ -67,7 +67,13 @@ void mcs_spin_lock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
- 	node->locked = 0;
- 	node->next   = NULL;
- 
--	prev = xchg_acquire(lock, node);
-+	/*
-+	 * We rely on the full barrier with global transitivity implied by the
-+	 * below xchg() to order the initialization stores above against any
-+	 * observation of @node. And to provide the ACQUIRE ordering associated
-+	 * with a LOCK primitive.
-+	 */
-+	prev = xchg(lock, node);
- 	if (likely(prev == NULL)) {
- 		/*
- 		 * Lock acquired, don't need to set node->locked to 1. Threads
-diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
-index b7342a24f559..b7dd5718836e 100644
---- a/kernel/power/hibernate.c
-+++ b/kernel/power/hibernate.c
-@@ -339,6 +339,7 @@ int hibernation_snapshot(int platform_mode)
- 	pm_message_t msg;
- 	int error;
- 
-+	pm_suspend_clear_flags();
- 	error = platform_begin(platform_mode);
- 	if (error)
- 		goto Close;
-diff --git a/kernel/sched/core.c b/kernel/sched/core.c
-index 41f6b2215aa8..1c1d2a00ad95 100644
---- a/kernel/sched/core.c
-+++ b/kernel/sched/core.c
-@@ -5630,6 +5630,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
- 
- 	case CPU_UP_PREPARE:
- 		rq->calc_load_update = calc_load_update;
-+		account_reset_rq(rq);
- 		break;
- 
- 	case CPU_ONLINE:
-@@ -7801,7 +7802,7 @@ void set_curr_task(int cpu, struct task_struct *p)
- /* task_group_lock serializes the addition/removal of task groups */
- static DEFINE_SPINLOCK(task_group_lock);
- 
--static void free_sched_group(struct task_group *tg)
-+static void sched_free_group(struct task_group *tg)
- {
- 	free_fair_sched_group(tg);
- 	free_rt_sched_group(tg);
-@@ -7827,7 +7828,7 @@ struct task_group *sched_create_group(struct task_group *parent)
- 	return tg;
- 
- err:
--	free_sched_group(tg);
-+	sched_free_group(tg);
- 	return ERR_PTR(-ENOMEM);
- }
- 
-@@ -7847,17 +7848,16 @@ void sched_online_group(struct task_group *tg, struct task_group *parent)
- }
- 
- /* rcu callback to free various structures associated with a task group */
--static void free_sched_group_rcu(struct rcu_head *rhp)
-+static void sched_free_group_rcu(struct rcu_head *rhp)
- {
- 	/* now it should be safe to free those cfs_rqs */
--	free_sched_group(container_of(rhp, struct task_group, rcu));
-+	sched_free_group(container_of(rhp, struct task_group, rcu));
- }
- 
--/* Destroy runqueue etc associated with a task group */
- void sched_destroy_group(struct task_group *tg)
- {
- 	/* wait for possible concurrent references to cfs_rqs complete */
--	call_rcu(&tg->rcu, free_sched_group_rcu);
-+	call_rcu(&tg->rcu, sched_free_group_rcu);
- }
- 
- void sched_offline_group(struct task_group *tg)
-@@ -8318,31 +8318,26 @@ cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
- 	if (IS_ERR(tg))
- 		return ERR_PTR(-ENOMEM);
- 
-+	sched_online_group(tg, parent);
-+
- 	return &tg->css;
- }
- 
--static int cpu_cgroup_css_online(struct cgroup_subsys_state *css)
-+static void cpu_cgroup_css_released(struct cgroup_subsys_state *css)
- {
- 	struct task_group *tg = css_tg(css);
--	struct task_group *parent = css_tg(css->parent);
- 
--	if (parent)
--		sched_online_group(tg, parent);
--	return 0;
-+	sched_offline_group(tg);
- }
- 
- static void cpu_cgroup_css_free(struct cgroup_subsys_state *css)
- {
- 	struct task_group *tg = css_tg(css);
- 
--	sched_destroy_group(tg);
--}
--
--static void cpu_cgroup_css_offline(struct cgroup_subsys_state *css)
--{
--	struct task_group *tg = css_tg(css);
--
--	sched_offline_group(tg);
-+	/*
-+	 * Relies on the RCU grace period between css_released() and this.
-+	 */
-+	sched_free_group(tg);
- }
- 
- static void cpu_cgroup_fork(struct task_struct *task)
-@@ -8702,9 +8697,8 @@ static struct cftype cpu_files[] = {
- 
- struct cgroup_subsys cpu_cgrp_subsys = {
- 	.css_alloc	= cpu_cgroup_css_alloc,
-+	.css_released	= cpu_cgroup_css_released,
- 	.css_free	= cpu_cgroup_css_free,
--	.css_online	= cpu_cgroup_css_online,
--	.css_offline	= cpu_cgroup_css_offline,
- 	.fork		= cpu_cgroup_fork,
- 	.can_attach	= cpu_cgroup_can_attach,
- 	.attach		= cpu_cgroup_attach,
-diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
-index b2ab2ffb1adc..ab2b5fb9821d 100644
---- a/kernel/sched/cputime.c
-+++ b/kernel/sched/cputime.c
-@@ -262,21 +262,21 @@ static __always_inline bool steal_account_process_tick(void)
- #ifdef CONFIG_PARAVIRT
- 	if (static_key_false(&paravirt_steal_enabled)) {
- 		u64 steal;
--		cputime_t steal_ct;
-+		unsigned long steal_jiffies;
- 
- 		steal = paravirt_steal_clock(smp_processor_id());
- 		steal -= this_rq()->prev_steal_time;
- 
- 		/*
--		 * cputime_t may be less precise than nsecs (eg: if it's
--		 * based on jiffies). Lets cast the result to cputime
-+		 * steal is in nsecs but our caller is expecting steal
-+		 * time in jiffies. Lets cast the result to jiffies
- 		 * granularity and account the rest on the next rounds.
- 		 */
--		steal_ct = nsecs_to_cputime(steal);
--		this_rq()->prev_steal_time += cputime_to_nsecs(steal_ct);
-+		steal_jiffies = nsecs_to_jiffies(steal);
-+		this_rq()->prev_steal_time += jiffies_to_nsecs(steal_jiffies);
- 
--		account_steal_time(steal_ct);
--		return steal_ct;
-+		account_steal_time(jiffies_to_cputime(steal_jiffies));
-+		return steal_jiffies;
- 	}
- #endif
- 	return false;
-diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
-index 56b7d4b83947..adff850e5d42 100644
---- a/kernel/sched/fair.c
-+++ b/kernel/sched/fair.c
-@@ -4459,9 +4459,17 @@ static void __update_cpu_load(struct rq *this_rq, unsigned long this_load,
- 
- 		/* scale is effectively 1 << i now, and >> i divides by scale */
- 
--		old_load = this_rq->cpu_load[i] - tickless_load;
-+		old_load = this_rq->cpu_load[i];
- 		old_load = decay_load_missed(old_load, pending_updates - 1, i);
--		old_load += tickless_load;
-+		if (tickless_load) {
-+			old_load -= decay_load_missed(tickless_load, pending_updates - 1, i);
-+			/*
-+			 * old_load can never be a negative value because a
-+			 * decayed tickless_load cannot be greater than the
-+			 * original tickless_load.
-+			 */
-+			old_load += tickless_load;
-+		}
- 		new_load = this_load;
- 		/*
- 		 * Round up the averaging division if load is increasing. This
-diff --git a/kernel/sched/loadavg.c b/kernel/sched/loadavg.c
-index ef7159012cf3..b0b93fd33af9 100644
---- a/kernel/sched/loadavg.c
-+++ b/kernel/sched/loadavg.c
-@@ -99,10 +99,13 @@ long calc_load_fold_active(struct rq *this_rq)
- static unsigned long
- calc_load(unsigned long load, unsigned long exp, unsigned long active)
- {
--	load *= exp;
--	load += active * (FIXED_1 - exp);
--	load += 1UL << (FSHIFT - 1);
--	return load >> FSHIFT;
-+	unsigned long newload;
-+
-+	newload = load * exp + active * (FIXED_1 - exp);
-+	if (active >= load)
-+		newload += FIXED_1-1;
-+
-+	return newload / FIXED_1;
- }
- 
- #ifdef CONFIG_NO_HZ_COMMON
-diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
-index 10f16374df7f..ff87d887ff62 100644
---- a/kernel/sched/sched.h
-+++ b/kernel/sched/sched.h
-@@ -1738,3 +1738,16 @@ static inline u64 irq_time_read(int cpu)
- }
- #endif /* CONFIG_64BIT */
- #endif /* CONFIG_IRQ_TIME_ACCOUNTING */
-+
-+static inline void account_reset_rq(struct rq *rq)
-+{
-+#ifdef CONFIG_IRQ_TIME_ACCOUNTING
-+	rq->prev_irq_time = 0;
-+#endif
-+#ifdef CONFIG_PARAVIRT
-+	rq->prev_steal_time = 0;
-+#endif
-+#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
-+	rq->prev_steal_time_rq = 0;
-+#endif
-+}
-diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
-index 7e7746a42a62..10a1d7dc9313 100644
---- a/kernel/sysctl_binary.c
-+++ b/kernel/sysctl_binary.c
-@@ -1321,7 +1321,7 @@ static ssize_t binary_sysctl(const int *name, int nlen,
- 	}
- 
- 	mnt = task_active_pid_ns(current)->proc_mnt;
--	file = file_open_root(mnt->mnt_root, mnt, pathname, flags);
-+	file = file_open_root(mnt->mnt_root, mnt, pathname, flags, 0);
- 	result = PTR_ERR(file);
- 	if (IS_ERR(file))
- 		goto out_putname;
-diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
-index 95181e36891a..9c143739b8d7 100644
---- a/kernel/trace/ring_buffer.c
-+++ b/kernel/trace/ring_buffer.c
-@@ -437,7 +437,7 @@ struct ring_buffer_per_cpu {
- 	raw_spinlock_t			reader_lock;	/* serialize readers */
- 	arch_spinlock_t			lock;
- 	struct lock_class_key		lock_key;
--	unsigned int			nr_pages;
-+	unsigned long			nr_pages;
- 	unsigned int			current_context;
- 	struct list_head		*pages;
- 	struct buffer_page		*head_page;	/* read from head */
-@@ -458,7 +458,7 @@ struct ring_buffer_per_cpu {
- 	u64				write_stamp;
- 	u64				read_stamp;
- 	/* ring buffer pages to update, > 0 to add, < 0 to remove */
--	int				nr_pages_to_update;
-+	long				nr_pages_to_update;
- 	struct list_head		new_pages; /* new pages to add */
- 	struct work_struct		update_pages_work;
- 	struct completion		update_done;
-@@ -1128,10 +1128,10 @@ static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
- 	return 0;
- }
- 
--static int __rb_allocate_pages(int nr_pages, struct list_head *pages, int cpu)
-+static int __rb_allocate_pages(long nr_pages, struct list_head *pages, int cpu)
- {
--	int i;
- 	struct buffer_page *bpage, *tmp;
-+	long i;
- 
- 	for (i = 0; i < nr_pages; i++) {
- 		struct page *page;
-@@ -1168,7 +1168,7 @@ free_pages:
- }
- 
- static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
--			     unsigned nr_pages)
-+			     unsigned long nr_pages)
- {
- 	LIST_HEAD(pages);
- 
-@@ -1193,7 +1193,7 @@ static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
- }
- 
- static struct ring_buffer_per_cpu *
--rb_allocate_cpu_buffer(struct ring_buffer *buffer, int nr_pages, int cpu)
-+rb_allocate_cpu_buffer(struct ring_buffer *buffer, long nr_pages, int cpu)
- {
- 	struct ring_buffer_per_cpu *cpu_buffer;
- 	struct buffer_page *bpage;
-@@ -1293,8 +1293,9 @@ struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
- 					struct lock_class_key *key)
- {
- 	struct ring_buffer *buffer;
-+	long nr_pages;
- 	int bsize;
--	int cpu, nr_pages;
-+	int cpu;
- 
- 	/* keep it in its own cache line */
- 	buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
-@@ -1420,12 +1421,12 @@ static inline unsigned long rb_page_write(struct buffer_page *bpage)
- }
- 
- static int
--rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned int nr_pages)
-+rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages)
- {
- 	struct list_head *tail_page, *to_remove, *next_page;
- 	struct buffer_page *to_remove_page, *tmp_iter_page;
- 	struct buffer_page *last_page, *first_page;
--	unsigned int nr_removed;
-+	unsigned long nr_removed;
- 	unsigned long head_bit;
- 	int page_entries;
- 
-@@ -1642,7 +1643,7 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size,
- 			int cpu_id)
- {
- 	struct ring_buffer_per_cpu *cpu_buffer;
--	unsigned nr_pages;
-+	unsigned long nr_pages;
- 	int cpu, err = 0;
- 
- 	/*
-@@ -1656,14 +1657,13 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size,
- 	    !cpumask_test_cpu(cpu_id, buffer->cpumask))
- 		return size;
- 
--	size = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
--	size *= BUF_PAGE_SIZE;
-+	nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
- 
- 	/* we need a minimum of two pages */
--	if (size < BUF_PAGE_SIZE * 2)
--		size = BUF_PAGE_SIZE * 2;
-+	if (nr_pages < 2)
-+		nr_pages = 2;
- 
--	nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
-+	size = nr_pages * BUF_PAGE_SIZE;
- 
- 	/*
- 	 * Don't succeed if resizing is disabled, as a reader might be
-@@ -4640,8 +4640,9 @@ static int rb_cpu_notify(struct notifier_block *self,
- 	struct ring_buffer *buffer =
- 		container_of(self, struct ring_buffer, cpu_notify);
- 	long cpu = (long)hcpu;
--	int cpu_i, nr_pages_same;
--	unsigned int nr_pages;
-+	long nr_pages_same;
-+	int cpu_i;
-+	unsigned long nr_pages;
- 
- 	switch (action) {
- 	case CPU_UP_PREPARE:
-diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
-index d9293402ee68..8305cbb2d5a2 100644
---- a/kernel/trace/trace.c
-+++ b/kernel/trace/trace.c
-@@ -4949,7 +4949,10 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
- 
- 	spd.nr_pages = i;
- 
--	ret = splice_to_pipe(pipe, &spd);
-+	if (i)
-+		ret = splice_to_pipe(pipe, &spd);
-+	else
-+		ret = 0;
- out:
- 	splice_shrink_spd(&spd);
- 	return ret;
-diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
-index 05ddc0820771..6f965864cc02 100644
---- a/kernel/trace/trace_events.c
-+++ b/kernel/trace/trace_events.c
-@@ -2095,8 +2095,13 @@ event_create_dir(struct dentry *parent, struct trace_event_file *file)
- 	trace_create_file("filter", 0644, file->dir, file,
- 			  &ftrace_event_filter_fops);
- 
--	trace_create_file("trigger", 0644, file->dir, file,
--			  &event_trigger_fops);
-+	/*
-+	 * Only event directories that can be enabled should have
-+	 * triggers.
-+	 */
-+	if (!(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
-+		trace_create_file("trigger", 0644, file->dir, file,
-+				  &event_trigger_fops);
- 
- 	trace_create_file("format", 0444, file->dir, call,
- 			  &ftrace_event_format_fops);
-diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
-index e4e56589ec1d..be3222b7d72e 100644
---- a/kernel/trace/trace_irqsoff.c
-+++ b/kernel/trace/trace_irqsoff.c
-@@ -109,8 +109,12 @@ static int func_prolog_dec(struct trace_array *tr,
- 		return 0;
- 
- 	local_save_flags(*flags);
--	/* slight chance to get a false positive on tracing_cpu */
--	if (!irqs_disabled_flags(*flags))
-+	/*
-+	 * Slight chance to get a false positive on tracing_cpu,
-+	 * although I'm starting to think there isn't a chance.
-+	 * Leave this for now just to be paranoid.
-+	 */
-+	if (!irqs_disabled_flags(*flags) && !preempt_count())
- 		return 0;
- 
- 	*data = per_cpu_ptr(tr->trace_buffer.data, cpu);
-diff --git a/kernel/trace/trace_printk.c b/kernel/trace/trace_printk.c
-index 060df67dbdd1..f96f0383f6c6 100644
---- a/kernel/trace/trace_printk.c
-+++ b/kernel/trace/trace_printk.c
-@@ -296,6 +296,9 @@ static int t_show(struct seq_file *m, void *v)
- 	const char *str = *fmt;
- 	int i;
- 
-+	if (!*fmt)
-+		return 0;
-+
- 	seq_printf(m, "0x%lx : \"", *(unsigned long *)fmt);
- 
- 	/*
-diff --git a/kernel/watchdog.c b/kernel/watchdog.c
-index b3ace6ebbba3..9acb29f280ec 100644
---- a/kernel/watchdog.c
-+++ b/kernel/watchdog.c
-@@ -923,6 +923,9 @@ static int proc_watchdog_common(int which, struct ctl_table *table, int write,
- 		 * both lockup detectors are disabled if proc_watchdog_update()
- 		 * returns an error.
- 		 */
-+		if (old == new)
-+			goto out;
-+
- 		err = proc_watchdog_update();
- 	}
- out:
-@@ -967,7 +970,7 @@ int proc_soft_watchdog(struct ctl_table *table, int write,
- int proc_watchdog_thresh(struct ctl_table *table, int write,
- 			 void __user *buffer, size_t *lenp, loff_t *ppos)
- {
--	int err, old;
-+	int err, old, new;
- 
- 	get_online_cpus();
- 	mutex_lock(&watchdog_proc_mutex);
-@@ -987,6 +990,10 @@ int proc_watchdog_thresh(struct ctl_table *table, int write,
- 	/*
- 	 * Update the sample period. Restore on failure.
- 	 */
-+	new = ACCESS_ONCE(watchdog_thresh);
-+	if (old == new)
-+		goto out;
-+
- 	set_sample_period();
- 	err = proc_watchdog_update();
- 	if (err) {
-diff --git a/kernel/workqueue.c b/kernel/workqueue.c
-index 7ff5dc7d2ac5..f48c80e4ba75 100644
---- a/kernel/workqueue.c
-+++ b/kernel/workqueue.c
-@@ -667,6 +667,35 @@ static void set_work_pool_and_clear_pending(struct work_struct *work,
- 	 */
- 	smp_wmb();
- 	set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 0);
-+	/*
-+	 * The following mb guarantees that previous clear of a PENDING bit
-+	 * will not be reordered with any speculative LOADS or STORES from
-+	 * work->current_func, which is executed afterwards.  This possible
-+	 * reordering can lead to a missed execution on attempt to qeueue
-+	 * the same @work.  E.g. consider this case:
-+	 *
-+	 *   CPU#0                         CPU#1
-+	 *   ----------------------------  --------------------------------
-+	 *
-+	 * 1  STORE event_indicated
-+	 * 2  queue_work_on() {
-+	 * 3    test_and_set_bit(PENDING)
-+	 * 4 }                             set_..._and_clear_pending() {
-+	 * 5                                 set_work_data() # clear bit
-+	 * 6                                 smp_mb()
-+	 * 7                               work->current_func() {
-+	 * 8				      LOAD event_indicated
-+	 *				   }
-+	 *
-+	 * Without an explicit full barrier speculative LOAD on line 8 can
-+	 * be executed before CPU#0 does STORE on line 1.  If that happens,
-+	 * CPU#0 observes the PENDING bit is still set and new execution of
-+	 * a @work is not queued in a hope, that CPU#1 will eventually
-+	 * finish the queued @work.  Meanwhile CPU#1 does not see
-+	 * event_indicated is set, because speculative LOAD was executed
-+	 * before actual STORE.
-+	 */
-+	smp_mb();
- }
- 
- static void clear_work_data(struct work_struct *work)
-@@ -4527,6 +4556,17 @@ static void rebind_workers(struct worker_pool *pool)
- 						  pool->attrs->cpumask) < 0);
- 
- 	spin_lock_irq(&pool->lock);
-+
-+	/*
-+	 * XXX: CPU hotplug notifiers are weird and can call DOWN_FAILED
-+	 * w/o preceding DOWN_PREPARE.  Work around it.  CPU hotplug is
-+	 * being reworked and this can go away in time.
-+	 */
-+	if (!(pool->flags & POOL_DISASSOCIATED)) {
-+		spin_unlock_irq(&pool->lock);
-+		return;
-+	}
-+
- 	pool->flags &= ~POOL_DISASSOCIATED;
- 
- 	for_each_pool_worker(worker, pool) {
-diff --git a/lib/assoc_array.c b/lib/assoc_array.c
-index 03dd576e6773..59fd7c0b119c 100644
---- a/lib/assoc_array.c
-+++ b/lib/assoc_array.c
-@@ -524,7 +524,9 @@ static bool assoc_array_insert_into_terminal_node(struct assoc_array_edit *edit,
- 			free_slot = i;
- 			continue;
+ 	for (;;) {
+-		if (test_bit(TTY_OTHER_DONE, &tty->flags)) {
++		if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) {
+ 			ret = -EIO;
+ 			break;
  		}
--		if (ops->compare_object(assoc_array_ptr_to_leaf(ptr), index_key)) {
-+		if (assoc_array_ptr_is_leaf(ptr) &&
-+		    ops->compare_object(assoc_array_ptr_to_leaf(ptr),
-+					index_key)) {
- 			pr_devel("replace in slot %d\n", i);
- 			edit->leaf_p = &node->slots[i];
- 			edit->dead_leaf = node->slots[i];
-diff --git a/lib/lz4/lz4defs.h b/lib/lz4/lz4defs.h
-index abcecdc2d0f2..0710a62ad2f6 100644
---- a/lib/lz4/lz4defs.h
-+++ b/lib/lz4/lz4defs.h
-@@ -11,8 +11,7 @@
- /*
-  * Detects 64 bits mode
-  */
--#if (defined(__x86_64__) || defined(__x86_64) || defined(__amd64__) \
--	|| defined(__ppc64__) || defined(__LP64__))
-+#if defined(CONFIG_64BIT)
- #define LZ4_ARCH64 1
- #else
- #define LZ4_ARCH64 0
-@@ -35,6 +34,10 @@ typedef struct _U64_S { u64 v; } U64_S;
- 
- #define PUT4(s, d) (A32(d) = A32(s))
- #define PUT8(s, d) (A64(d) = A64(s))
-+
-+#define LZ4_READ_LITTLEENDIAN_16(d, s, p)	\
-+	(d = s - A16(p))
-+
- #define LZ4_WRITE_LITTLEENDIAN_16(p, v)	\
- 	do {	\
- 		A16(p) = v; \
-@@ -51,10 +54,13 @@ typedef struct _U64_S { u64 v; } U64_S;
- #define PUT8(s, d) \
- 	put_unaligned(get_unaligned((const u64 *) s), (u64 *) d)
- 
--#define LZ4_WRITE_LITTLEENDIAN_16(p, v)	\
--	do {	\
--		put_unaligned(v, (u16 *)(p)); \
--		p += 2; \
-+#define LZ4_READ_LITTLEENDIAN_16(d, s, p)	\
-+	(d = s - get_unaligned_le16(p))
-+
-+#define LZ4_WRITE_LITTLEENDIAN_16(p, v)			\
-+	do {						\
-+		put_unaligned_le16(v, (u16 *)(p));	\
-+		p += 2;					\
- 	} while (0)
- #endif
- 
-@@ -140,9 +146,6 @@ typedef struct _U64_S { u64 v; } U64_S;
- 
- #endif
- 
--#define LZ4_READ_LITTLEENDIAN_16(d, s, p) \
--	(d = s - get_unaligned_le16(p))
--
- #define LZ4_WILDCOPY(s, d, e)		\
- 	do {				\
- 		LZ4_COPYPACKET(s, d);	\
-diff --git a/lib/mpi/mpicoder.c b/lib/mpi/mpicoder.c
-index ec533a6c77b5..eb15e7dc7b65 100644
---- a/lib/mpi/mpicoder.c
-+++ b/lib/mpi/mpicoder.c
-@@ -128,6 +128,23 @@ leave:
+@@ -828,7 +828,7 @@ static unsigned int n_hdlc_tty_poll(struct tty_struct *tty, struct file *filp,
+ 		/* set bits for operations that won't block */
+ 		if (n_hdlc->rx_buf_list.head)
+ 			mask |= POLLIN | POLLRDNORM;	/* readable */
+-		if (test_bit(TTY_OTHER_DONE, &tty->flags))
++		if (test_bit(TTY_OTHER_CLOSED, &tty->flags))
+ 			mask |= POLLHUP;
+ 		if (tty_hung_up_p(filp))
+ 			mask |= POLLHUP;
+diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
+index b280abaad91b..c12def71ed37 100644
+--- a/drivers/tty/n_tty.c
++++ b/drivers/tty/n_tty.c
+@@ -1963,18 +1963,6 @@ static inline int input_available_p(struct tty_struct *tty, int poll)
+ 		return ldata->commit_head - ldata->read_tail >= amt;
  }
- EXPORT_SYMBOL_GPL(mpi_read_from_buffer);
  
-+static int count_lzeros(MPI a)
-+{
-+	mpi_limb_t alimb;
-+	int i, lzeros = 0;
-+
-+	for (i = a->nlimbs - 1; i >= 0; i--) {
-+		alimb = a->d[i];
-+		if (alimb == 0) {
-+			lzeros += sizeof(mpi_limb_t);
-+		} else {
-+			lzeros += count_leading_zeros(alimb) / 8;
-+			break;
-+		}
-+	}
-+	return lzeros;
-+}
-+
+-static inline int check_other_done(struct tty_struct *tty)
+-{
+-	int done = test_bit(TTY_OTHER_DONE, &tty->flags);
+-	if (done) {
+-		/* paired with cmpxchg() in check_other_closed(); ensures
+-		 * read buffer head index is not stale
+-		 */
+-		smp_mb__after_atomic();
+-	}
+-	return done;
+-}
+-
  /**
-  * mpi_read_buffer() - read MPI to a bufer provided by user (msb first)
-  *
-@@ -148,7 +165,7 @@ int mpi_read_buffer(MPI a, uint8_t *buf, unsigned buf_len, unsigned *nbytes,
- 	uint8_t *p;
- 	mpi_limb_t alimb;
- 	unsigned int n = mpi_get_size(a);
--	int i, lzeros = 0;
-+	int i, lzeros;
- 
- 	if (!buf || !nbytes)
- 		return -EINVAL;
-@@ -156,14 +173,7 @@ int mpi_read_buffer(MPI a, uint8_t *buf, unsigned buf_len, unsigned *nbytes,
- 	if (sign)
- 		*sign = a->sign;
+  *	copy_from_read_buf	-	copy read data directly
+  *	@tty: terminal device
+@@ -2170,7 +2158,7 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file,
+ 	struct n_tty_data *ldata = tty->disc_data;
+ 	unsigned char __user *b = buf;
+ 	DEFINE_WAIT_FUNC(wait, woken_wake_function);
+-	int c, done;
++	int c;
+ 	int minimum, time;
+ 	ssize_t retval = 0;
+ 	long timeout;
+@@ -2238,32 +2226,35 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file,
+ 		    ((minimum - (b - buf)) >= 1))
+ 			ldata->minimum_to_wake = (minimum - (b - buf));
  
--	p = (void *)&a->d[a->nlimbs] - 1;
+-		done = check_other_done(tty);
 -
--	for (i = a->nlimbs * sizeof(alimb) - 1; i >= 0; i--, p--) {
--		if (!*p)
--			lzeros++;
--		else
--			break;
--	}
-+	lzeros = count_lzeros(a);
- 
- 	if (buf_len < n - lzeros) {
- 		*nbytes = n - lzeros;
-@@ -351,7 +361,7 @@ int mpi_write_to_sgl(MPI a, struct scatterlist *sgl, unsigned *nbytes,
- 	u8 *p, *p2;
- 	mpi_limb_t alimb, alimb2;
- 	unsigned int n = mpi_get_size(a);
--	int i, x, y = 0, lzeros = 0, buf_len;
-+	int i, x, y = 0, lzeros, buf_len;
- 
- 	if (!nbytes)
- 		return -EINVAL;
-@@ -359,14 +369,7 @@ int mpi_write_to_sgl(MPI a, struct scatterlist *sgl, unsigned *nbytes,
- 	if (sign)
- 		*sign = a->sign;
+ 		if (!input_available_p(tty, 0)) {
+-			if (done) {
+-				retval = -EIO;
+-				break;
+-			}
+-			if (tty_hung_up_p(file))
+-				break;
+-			if (!timeout)
+-				break;
+-			if (file->f_flags & O_NONBLOCK) {
+-				retval = -EAGAIN;
+-				break;
+-			}
+-			if (signal_pending(current)) {
+-				retval = -ERESTARTSYS;
+-				break;
+-			}
+ 			up_read(&tty->termios_rwsem);
++			tty_buffer_flush_work(tty->port);
++			down_read(&tty->termios_rwsem);
++			if (!input_available_p(tty, 0)) {
++				if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) {
++					retval = -EIO;
++					break;
++				}
++				if (tty_hung_up_p(file))
++					break;
++				if (!timeout)
++					break;
++				if (file->f_flags & O_NONBLOCK) {
++					retval = -EAGAIN;
++					break;
++				}
++				if (signal_pending(current)) {
++					retval = -ERESTARTSYS;
++					break;
++				}
++				up_read(&tty->termios_rwsem);
  
--	p = (void *)&a->d[a->nlimbs] - 1;
--
--	for (i = a->nlimbs * sizeof(alimb) - 1; i >= 0; i--, p--) {
--		if (!*p)
--			lzeros++;
--		else
--			break;
--	}
-+	lzeros = count_lzeros(a);
- 
- 	if (*nbytes < n - lzeros) {
- 		*nbytes = n - lzeros;
-diff --git a/mm/compaction.c b/mm/compaction.c
-index 585de54dbe8c..29fb26970fba 100644
---- a/mm/compaction.c
-+++ b/mm/compaction.c
-@@ -880,16 +880,8 @@ isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn,
- 		pfn = isolate_migratepages_block(cc, pfn, block_end_pfn,
- 							ISOLATE_UNEVICTABLE);
- 
--		/*
--		 * In case of fatal failure, release everything that might
--		 * have been isolated in the previous iteration, and signal
--		 * the failure back to caller.
--		 */
--		if (!pfn) {
--			putback_movable_pages(&cc->migratepages);
--			cc->nr_migratepages = 0;
-+		if (!pfn)
- 			break;
--		}
+-			timeout = wait_woken(&wait, TASK_INTERRUPTIBLE,
+-					     timeout);
++				timeout = wait_woken(&wait, TASK_INTERRUPTIBLE,
++						timeout);
  
- 		if (cc->nr_migratepages == COMPACT_CLUSTER_MAX)
- 			break;
-diff --git a/mm/huge_memory.c b/mm/huge_memory.c
-index e10a4fee88d2..a82a87b3f9c6 100644
---- a/mm/huge_memory.c
-+++ b/mm/huge_memory.c
-@@ -1257,15 +1257,9 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
- 	VM_BUG_ON_PAGE(!PageCompound(page) || !PageHead(page), page);
- 	/*
- 	 * We can only reuse the page if nobody else maps the huge page or it's
--	 * part. We can do it by checking page_mapcount() on each sub-page, but
--	 * it's expensive.
--	 * The cheaper way is to check page_count() to be equal 1: every
--	 * mapcount takes page reference reference, so this way we can
--	 * guarantee, that the PMD is the only mapping.
--	 * This can give false negative if somebody pinned the page, but that's
--	 * fine.
-+	 * part.
- 	 */
--	if (page_mapcount(page) == 1 && page_count(page) == 1) {
-+	if (page_trans_huge_mapcount(page, NULL) == 1) {
- 		pmd_t entry;
- 		entry = pmd_mkyoung(orig_pmd);
- 		entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
-@@ -1919,10 +1913,9 @@ int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
- 		 * page fault if needed.
- 		 */
- 		return 0;
--	if (vma->vm_ops)
-+	if (vma->vm_ops || (vm_flags & VM_NO_THP))
- 		/* khugepaged not yet working on file or special mappings */
- 		return 0;
--	VM_BUG_ON_VMA(vm_flags & VM_NO_THP, vma);
- 	hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
- 	hend = vma->vm_end & HPAGE_PMD_MASK;
- 	if (hstart < hend)
-@@ -2039,7 +2032,8 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
- 		if (pte_write(pteval)) {
- 			writable = true;
- 		} else {
--			if (PageSwapCache(page) && !reuse_swap_page(page)) {
-+			if (PageSwapCache(page) &&
-+			    !reuse_swap_page(page, NULL)) {
- 				unlock_page(page);
- 				result = SCAN_SWAP_CACHE_PAGE;
- 				goto out;
-@@ -2310,8 +2304,7 @@ static bool hugepage_vma_check(struct vm_area_struct *vma)
- 		return false;
- 	if (is_vma_temporary_stack(vma))
- 		return false;
--	VM_BUG_ON_VMA(vma->vm_flags & VM_NO_THP, vma);
--	return true;
-+	return !(vma->vm_flags & VM_NO_THP);
- }
+-			down_read(&tty->termios_rwsem);
+-			continue;
++				down_read(&tty->termios_rwsem);
++				continue;
++			}
+ 		}
  
- static void collapse_huge_page(struct mm_struct *mm,
-@@ -3340,6 +3333,64 @@ int total_mapcount(struct page *page)
- }
+ 		if (ldata->icanon && !L_EXTPROC(tty)) {
+@@ -2445,12 +2436,17 @@ static unsigned int n_tty_poll(struct tty_struct *tty, struct file *file,
  
- /*
-+ * This calculates accurately how many mappings a transparent hugepage
-+ * has (unlike page_mapcount() which isn't fully accurate). This full
-+ * accuracy is primarily needed to know if copy-on-write faults can
-+ * reuse the page and change the mapping to read-write instead of
-+ * copying them. At the same time this returns the total_mapcount too.
-+ *
-+ * The function returns the highest mapcount any one of the subpages
-+ * has. If the return value is one, even if different processes are
-+ * mapping different subpages of the transparent hugepage, they can
-+ * all reuse it, because each process is reusing a different subpage.
-+ *
-+ * The total_mapcount is instead counting all virtual mappings of the
-+ * subpages. If the total_mapcount is equal to "one", it tells the
-+ * caller all mappings belong to the same "mm" and in turn the
-+ * anon_vma of the transparent hugepage can become the vma->anon_vma
-+ * local one as no other process may be mapping any of the subpages.
-+ *
-+ * It would be more accurate to replace page_mapcount() with
-+ * page_trans_huge_mapcount(), however we only use
-+ * page_trans_huge_mapcount() in the copy-on-write faults where we
-+ * need full accuracy to avoid breaking page pinning, because
-+ * page_trans_huge_mapcount() is slower than page_mapcount().
-+ */
-+int page_trans_huge_mapcount(struct page *page, int *total_mapcount)
-+{
-+	int i, ret, _total_mapcount, mapcount;
-+
-+	/* hugetlbfs shouldn't call it */
-+	VM_BUG_ON_PAGE(PageHuge(page), page);
-+
-+	if (likely(!PageTransCompound(page))) {
-+		mapcount = atomic_read(&page->_mapcount) + 1;
-+		if (total_mapcount)
-+			*total_mapcount = mapcount;
-+		return mapcount;
-+	}
-+
-+	page = compound_head(page);
-+
-+	_total_mapcount = ret = 0;
-+	for (i = 0; i < HPAGE_PMD_NR; i++) {
-+		mapcount = atomic_read(&page[i]._mapcount) + 1;
-+		ret = max(ret, mapcount);
-+		_total_mapcount += mapcount;
-+	}
-+	if (PageDoubleMap(page)) {
-+		ret -= 1;
-+		_total_mapcount -= HPAGE_PMD_NR;
+ 	poll_wait(file, &tty->read_wait, wait);
+ 	poll_wait(file, &tty->write_wait, wait);
+-	if (check_other_done(tty))
+-		mask |= POLLHUP;
+ 	if (input_available_p(tty, 1))
+ 		mask |= POLLIN | POLLRDNORM;
++	else {
++		tty_buffer_flush_work(tty->port);
++		if (input_available_p(tty, 1))
++			mask |= POLLIN | POLLRDNORM;
 +	}
-+	mapcount = compound_mapcount(page);
-+	ret += mapcount;
-+	_total_mapcount += mapcount;
-+	if (total_mapcount)
-+		*total_mapcount = _total_mapcount;
-+	return ret;
-+}
-+
-+/*
-  * This function splits huge page into normal pages. @page can point to any
-  * subpage of huge page to split. Split doesn't change the position of @page.
-  *
-diff --git a/mm/memcontrol.c b/mm/memcontrol.c
-index d06cae2de783..a65ad1d59232 100644
---- a/mm/memcontrol.c
-+++ b/mm/memcontrol.c
-@@ -207,6 +207,7 @@ static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
- /* "mc" and its members are protected by cgroup_mutex */
- static struct move_charge_struct {
- 	spinlock_t	  lock; /* for from, to */
-+	struct mm_struct  *mm;
- 	struct mem_cgroup *from;
- 	struct mem_cgroup *to;
- 	unsigned long flags;
-@@ -1262,7 +1263,7 @@ static unsigned long mem_cgroup_get_limit(struct mem_cgroup *memcg)
- 	return limit;
- }
+ 	if (tty->packet && tty->link->ctrl_status)
+ 		mask |= POLLPRI | POLLIN | POLLRDNORM;
++	if (test_bit(TTY_OTHER_CLOSED, &tty->flags))
++		mask |= POLLHUP;
+ 	if (tty_hung_up_p(file))
+ 		mask |= POLLHUP;
+ 	if (!(mask & (POLLHUP | POLLIN | POLLRDNORM))) {
+diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
+index 2348fa613707..6427a39bd360 100644
+--- a/drivers/tty/pty.c
++++ b/drivers/tty/pty.c
+@@ -59,7 +59,7 @@ static void pty_close(struct tty_struct *tty, struct file *filp)
+ 	if (!tty->link)
+ 		return;
+ 	set_bit(TTY_OTHER_CLOSED, &tty->link->flags);
+-	tty_flip_buffer_push(tty->link->port);
++	wake_up_interruptible(&tty->link->read_wait);
+ 	wake_up_interruptible(&tty->link->write_wait);
+ 	if (tty->driver->subtype == PTY_TYPE_MASTER) {
+ 		set_bit(TTY_OTHER_CLOSED, &tty->flags);
+@@ -247,9 +247,7 @@ static int pty_open(struct tty_struct *tty, struct file *filp)
+ 		goto out;
  
--static void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
-+static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
- 				     int order)
- {
- 	struct oom_control oc = {
-@@ -1340,6 +1341,7 @@ static void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
- 	}
- unlock:
- 	mutex_unlock(&oom_lock);
-+	return chosen;
- }
+ 	clear_bit(TTY_IO_ERROR, &tty->flags);
+-	/* TTY_OTHER_CLOSED must be cleared before TTY_OTHER_DONE */
+ 	clear_bit(TTY_OTHER_CLOSED, &tty->link->flags);
+-	clear_bit(TTY_OTHER_DONE, &tty->link->flags);
+ 	set_bit(TTY_THROTTLED, &tty->flags);
+ 	return 0;
  
- #if MAX_NUMNODES > 1
-@@ -4729,6 +4731,8 @@ static void __mem_cgroup_clear_mc(void)
+diff --git a/drivers/tty/serial/8250/8250_mid.c b/drivers/tty/serial/8250/8250_mid.c
+index 88531a36b69c..ed489880e62b 100644
+--- a/drivers/tty/serial/8250/8250_mid.c
++++ b/drivers/tty/serial/8250/8250_mid.c
+@@ -14,6 +14,7 @@
+ #include <linux/pci.h>
  
- static void mem_cgroup_clear_mc(void)
- {
-+	struct mm_struct *mm = mc.mm;
-+
- 	/*
- 	 * we must clear moving_task before waking up waiters at the end of
- 	 * task migration.
-@@ -4738,7 +4742,10 @@ static void mem_cgroup_clear_mc(void)
- 	spin_lock(&mc.lock);
- 	mc.from = NULL;
- 	mc.to = NULL;
-+	mc.mm = NULL;
- 	spin_unlock(&mc.lock);
-+
-+	mmput(mm);
- }
+ #include <linux/dma/hsu.h>
++#include <linux/8250_pci.h>
  
- static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
-@@ -4795,6 +4802,7 @@ static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
- 		VM_BUG_ON(mc.moved_swap);
- 
- 		spin_lock(&mc.lock);
-+		mc.mm = mm;
- 		mc.from = from;
- 		mc.to = memcg;
- 		mc.flags = move_flags;
-@@ -4804,8 +4812,9 @@ static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
- 		ret = mem_cgroup_precharge_mc(mm);
- 		if (ret)
- 			mem_cgroup_clear_mc();
-+	} else {
-+		mmput(mm);
- 	}
--	mmput(mm);
- 	return ret;
- }
+ #include "8250.h"
  
-@@ -4914,11 +4923,11 @@ put:			/* get_mctgt_type() gets the page */
- 	return ret;
- }
+@@ -24,6 +25,7 @@
+ #define PCI_DEVICE_ID_INTEL_DNV_UART	0x19d8
  
--static void mem_cgroup_move_charge(struct mm_struct *mm)
-+static void mem_cgroup_move_charge(void)
- {
- 	struct mm_walk mem_cgroup_move_charge_walk = {
- 		.pmd_entry = mem_cgroup_move_charge_pte_range,
--		.mm = mm,
-+		.mm = mc.mm,
- 	};
- 
- 	lru_add_drain_all();
-@@ -4930,7 +4939,7 @@ static void mem_cgroup_move_charge(struct mm_struct *mm)
- 	atomic_inc(&mc.from->moving_account);
- 	synchronize_rcu();
- retry:
--	if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
-+	if (unlikely(!down_read_trylock(&mc.mm->mmap_sem))) {
- 		/*
- 		 * Someone who are holding the mmap_sem might be waiting in
- 		 * waitq. So we cancel all extra charges, wake up all waiters,
-@@ -4947,23 +4956,16 @@ retry:
- 	 * additional charge, the page walk just aborts.
- 	 */
- 	walk_page_range(0, ~0UL, &mem_cgroup_move_charge_walk);
--	up_read(&mm->mmap_sem);
-+	up_read(&mc.mm->mmap_sem);
- 	atomic_dec(&mc.from->moving_account);
- }
+ /* Intel MID Specific registers */
++#define INTEL_MID_UART_DNV_FISR		0x08
+ #define INTEL_MID_UART_PS		0x30
+ #define INTEL_MID_UART_MUL		0x34
+ #define INTEL_MID_UART_DIV		0x38
+@@ -31,6 +33,7 @@
+ struct mid8250;
  
--static void mem_cgroup_move_task(struct cgroup_taskset *tset)
-+static void mem_cgroup_move_task(void)
+ struct mid8250_board {
++	unsigned int flags;
+ 	unsigned long freq;
+ 	unsigned int base_baud;
+ 	int (*setup)(struct mid8250 *, struct uart_port *p);
+@@ -88,16 +91,16 @@ static int tng_setup(struct mid8250 *mid, struct uart_port *p)
+ static int dnv_handle_irq(struct uart_port *p)
  {
--	struct cgroup_subsys_state *css;
--	struct task_struct *p = cgroup_taskset_first(tset, &css);
--	struct mm_struct *mm = get_task_mm(p);
+ 	struct mid8250 *mid = p->private_data;
+-	int ret;
 -
--	if (mm) {
--		if (mc.to)
--			mem_cgroup_move_charge(mm);
--		mmput(mm);
--	}
--	if (mc.to)
-+	if (mc.to) {
-+		mem_cgroup_move_charge();
- 		mem_cgroup_clear_mc();
-+	}
- }
- #else	/* !CONFIG_MMU */
- static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
-@@ -4973,7 +4975,7 @@ static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
- static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
- {
+-	ret = hsu_dma_irq(&mid->dma_chip, 0);
+-	ret |= hsu_dma_irq(&mid->dma_chip, 1);
+-
+-	/* For now, letting the HW generate separate interrupt for the UART */
+-	if (ret)
+-		return ret;
+-
+-	return serial8250_handle_irq(p, serial_port_in(p, UART_IIR));
++	unsigned int fisr = serial_port_in(p, INTEL_MID_UART_DNV_FISR);
++	int ret = IRQ_NONE;
++
++	if (fisr & BIT(2))
++		ret |= hsu_dma_irq(&mid->dma_chip, 1);
++	if (fisr & BIT(1))
++		ret |= hsu_dma_irq(&mid->dma_chip, 0);
++	if (fisr & BIT(0))
++		ret |= serial8250_handle_irq(p, serial_port_in(p, UART_IIR));
++	return ret;
  }
--static void mem_cgroup_move_task(struct cgroup_taskset *tset)
-+static void mem_cgroup_move_task(void)
+ 
+ #define DNV_DMA_CHAN_OFFSET 0x80
+@@ -106,12 +109,13 @@ static int dnv_setup(struct mid8250 *mid, struct uart_port *p)
  {
- }
- #endif
-@@ -5051,6 +5053,7 @@ static ssize_t memory_high_write(struct kernfs_open_file *of,
- 				 char *buf, size_t nbytes, loff_t off)
+ 	struct hsu_dma_chip *chip = &mid->dma_chip;
+ 	struct pci_dev *pdev = to_pci_dev(p->dev);
++	unsigned int bar = FL_GET_BASE(mid->board->flags);
+ 	int ret;
+ 
+ 	chip->dev = &pdev->dev;
+ 	chip->irq = pdev->irq;
+ 	chip->regs = p->membase;
+-	chip->length = pci_resource_len(pdev, 0);
++	chip->length = pci_resource_len(pdev, bar);
+ 	chip->offset = DNV_DMA_CHAN_OFFSET;
+ 
+ 	/* Falling back to PIO mode if DMA probing fails */
+@@ -217,6 +221,7 @@ static int mid8250_probe(struct pci_dev *pdev, const struct pci_device_id *id)
  {
- 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
-+	unsigned long nr_pages;
- 	unsigned long high;
- 	int err;
+ 	struct uart_8250_port uart;
+ 	struct mid8250 *mid;
++	unsigned int bar;
+ 	int ret;
  
-@@ -5061,6 +5064,11 @@ static ssize_t memory_high_write(struct kernfs_open_file *of,
+ 	ret = pcim_enable_device(pdev);
+@@ -230,6 +235,7 @@ static int mid8250_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ 		return -ENOMEM;
  
- 	memcg->high = high;
+ 	mid->board = (struct mid8250_board *)id->driver_data;
++	bar = FL_GET_BASE(mid->board->flags);
  
-+	nr_pages = page_counter_read(&memcg->memory);
-+	if (nr_pages > high)
-+		try_to_free_mem_cgroup_pages(memcg, nr_pages - high,
-+					     GFP_KERNEL, true);
-+
- 	memcg_wb_domain_size_changed(memcg);
- 	return nbytes;
- }
-@@ -5082,6 +5090,8 @@ static ssize_t memory_max_write(struct kernfs_open_file *of,
- 				char *buf, size_t nbytes, loff_t off)
- {
- 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
-+	unsigned int nr_reclaims = MEM_CGROUP_RECLAIM_RETRIES;
-+	bool drained = false;
- 	unsigned long max;
- 	int err;
+ 	memset(&uart, 0, sizeof(struct uart_8250_port));
  
-@@ -5090,9 +5100,36 @@ static ssize_t memory_max_write(struct kernfs_open_file *of,
- 	if (err)
- 		return err;
+@@ -242,8 +248,8 @@ static int mid8250_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ 	uart.port.flags = UPF_SHARE_IRQ | UPF_FIXED_PORT | UPF_FIXED_TYPE;
+ 	uart.port.set_termios = mid8250_set_termios;
  
--	err = mem_cgroup_resize_limit(memcg, max);
--	if (err)
--		return err;
-+	xchg(&memcg->memory.limit, max);
-+
-+	for (;;) {
-+		unsigned long nr_pages = page_counter_read(&memcg->memory);
-+
-+		if (nr_pages <= max)
-+			break;
-+
-+		if (signal_pending(current)) {
-+			err = -EINTR;
-+			break;
-+		}
-+
-+		if (!drained) {
-+			drain_all_stock(memcg);
-+			drained = true;
-+			continue;
-+		}
-+
-+		if (nr_reclaims) {
-+			if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max,
-+							  GFP_KERNEL, true))
-+				nr_reclaims--;
-+			continue;
-+		}
-+
-+		mem_cgroup_events(memcg, MEMCG_OOM, 1);
-+		if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0))
-+			break;
-+	}
+-	uart.port.mapbase = pci_resource_start(pdev, 0);
+-	uart.port.membase = pcim_iomap(pdev, 0, 0);
++	uart.port.mapbase = pci_resource_start(pdev, bar);
++	uart.port.membase = pcim_iomap(pdev, bar, 0);
+ 	if (!uart.port.membase)
+ 		return -ENOMEM;
  
- 	memcg_wb_domain_size_changed(memcg);
- 	return nbytes;
-@@ -5210,7 +5247,7 @@ struct cgroup_subsys memory_cgrp_subsys = {
- 	.css_reset = mem_cgroup_css_reset,
- 	.can_attach = mem_cgroup_can_attach,
- 	.cancel_attach = mem_cgroup_cancel_attach,
--	.attach = mem_cgroup_move_task,
-+	.post_attach = mem_cgroup_move_task,
- 	.bind = mem_cgroup_bind,
- 	.dfl_cftypes = memory_files,
- 	.legacy_cftypes = mem_cgroup_legacy_files,
-diff --git a/mm/memory.c b/mm/memory.c
-index 8132787ae4d5..c1aa0e4b4096 100644
---- a/mm/memory.c
-+++ b/mm/memory.c
-@@ -792,6 +792,46 @@ out:
- 	return pfn_to_page(pfn);
+@@ -282,18 +288,21 @@ static void mid8250_remove(struct pci_dev *pdev)
  }
  
-+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-+struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
-+				pmd_t pmd)
-+{
-+	unsigned long pfn = pmd_pfn(pmd);
-+
-+	/*
-+	 * There is no pmd_special() but there may be special pmds, e.g.
-+	 * in a direct-access (dax) mapping, so let's just replicate the
-+	 * !HAVE_PTE_SPECIAL case from vm_normal_page() here.
-+	 */
-+	if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
-+		if (vma->vm_flags & VM_MIXEDMAP) {
-+			if (!pfn_valid(pfn))
-+				return NULL;
-+			goto out;
-+		} else {
-+			unsigned long off;
-+			off = (addr - vma->vm_start) >> PAGE_SHIFT;
-+			if (pfn == vma->vm_pgoff + off)
-+				return NULL;
-+			if (!is_cow_mapping(vma->vm_flags))
-+				return NULL;
-+		}
-+	}
-+
-+	if (is_zero_pfn(pfn))
-+		return NULL;
-+	if (unlikely(pfn > highest_memmap_pfn))
-+		return NULL;
-+
-+	/*
-+	 * NOTE! We still have PageReserved() pages in the page tables.
-+	 * eg. VDSO mappings can cause them to exist.
-+	 */
-+out:
-+	return pfn_to_page(pfn);
-+}
-+#endif
-+
- /*
-  * copy one vm_area from one task to the other. Assumes the page tables
-  * already present in the new task to be cleared in the whole range
-@@ -2317,6 +2357,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
- 	 * not dirty accountable.
- 	 */
- 	if (PageAnon(old_page) && !PageKsm(old_page)) {
-+		int total_mapcount;
- 		if (!trylock_page(old_page)) {
- 			page_cache_get(old_page);
- 			pte_unmap_unlock(page_table, ptl);
-@@ -2331,13 +2372,18 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
- 			}
- 			page_cache_release(old_page);
- 		}
--		if (reuse_swap_page(old_page)) {
--			/*
--			 * The page is all ours.  Move it to our anon_vma so
--			 * the rmap code will not search our parent or siblings.
--			 * Protected against the rmap code by the page lock.
--			 */
--			page_move_anon_rmap(old_page, vma, address);
-+		if (reuse_swap_page(old_page, &total_mapcount)) {
-+			if (total_mapcount == 1) {
-+				/*
-+				 * The page is all ours. Move it to
-+				 * our anon_vma so the rmap code will
-+				 * not search our parent or siblings.
-+				 * Protected against the rmap code by
-+				 * the page lock.
-+				 */
-+				page_move_anon_rmap(compound_head(old_page),
-+						    vma, address);
-+			}
- 			unlock_page(old_page);
- 			return wp_page_reuse(mm, vma, address, page_table, ptl,
- 					     orig_pte, old_page, 0, 0);
-@@ -2562,7 +2608,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
- 	inc_mm_counter_fast(mm, MM_ANONPAGES);
- 	dec_mm_counter_fast(mm, MM_SWAPENTS);
- 	pte = mk_pte(page, vma->vm_page_prot);
--	if ((flags & FAULT_FLAG_WRITE) && reuse_swap_page(page)) {
-+	if ((flags & FAULT_FLAG_WRITE) && reuse_swap_page(page, NULL)) {
- 		pte = maybe_mkwrite(pte_mkdirty(pte), vma);
- 		flags &= ~FAULT_FLAG_WRITE;
- 		ret |= VM_FAULT_WRITE;
-diff --git a/mm/migrate.c b/mm/migrate.c
-index 3ad0fea5c438..625741faa068 100644
---- a/mm/migrate.c
-+++ b/mm/migrate.c
-@@ -967,7 +967,13 @@ out:
- 		dec_zone_page_state(page, NR_ISOLATED_ANON +
- 				page_is_file_cache(page));
- 		/* Soft-offlined page shouldn't go through lru cache list */
--		if (reason == MR_MEMORY_FAILURE) {
-+		if (reason == MR_MEMORY_FAILURE && rc == MIGRATEPAGE_SUCCESS) {
-+			/*
-+			 * With this release, we free successfully migrated
-+			 * page and set PG_HWPoison on just freed page
-+			 * intentionally. Although it's rather weird, it's how
-+			 * HWPoison flag works at the moment.
-+			 */
- 			put_page(page);
- 			if (!test_set_page_hwpoison(page))
- 				num_poisoned_pages_inc();
-diff --git a/mm/page-writeback.c b/mm/page-writeback.c
-index 6fe7d15bd1f7..62bbf350ddf7 100644
---- a/mm/page-writeback.c
-+++ b/mm/page-writeback.c
-@@ -1909,7 +1909,8 @@ bool wb_over_bg_thresh(struct bdi_writeback *wb)
- 	if (gdtc->dirty > gdtc->bg_thresh)
- 		return true;
- 
--	if (wb_stat(wb, WB_RECLAIMABLE) > __wb_calc_thresh(gdtc))
-+	if (wb_stat(wb, WB_RECLAIMABLE) >
-+	    wb_calc_thresh(gdtc->wb, gdtc->bg_thresh))
- 		return true;
+ static const struct mid8250_board pnw_board = {
++	.flags = FL_BASE0,
+ 	.freq = 50000000,
+ 	.base_baud = 115200,
+ 	.setup = pnw_setup,
+ };
  
- 	if (mdtc) {
-@@ -1923,7 +1924,8 @@ bool wb_over_bg_thresh(struct bdi_writeback *wb)
- 		if (mdtc->dirty > mdtc->bg_thresh)
- 			return true;
+ static const struct mid8250_board tng_board = {
++	.flags = FL_BASE0,
+ 	.freq = 38400000,
+ 	.base_baud = 1843200,
+ 	.setup = tng_setup,
+ };
  
--		if (wb_stat(wb, WB_RECLAIMABLE) > __wb_calc_thresh(mdtc))
-+		if (wb_stat(wb, WB_RECLAIMABLE) >
-+		    wb_calc_thresh(mdtc->wb, mdtc->bg_thresh))
- 			return true;
- 	}
+ static const struct mid8250_board dnv_board = {
++	.flags = FL_BASE1,
+ 	.freq = 133333333,
+ 	.base_baud = 115200,
+ 	.setup = dnv_setup,
+diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
+index 7cd6f9a90542..c1d4a8fa9be8 100644
+--- a/drivers/tty/serial/8250/8250_pci.c
++++ b/drivers/tty/serial/8250/8250_pci.c
+@@ -1401,6 +1401,9 @@ byt_set_termios(struct uart_port *p, struct ktermios *termios,
+ 	unsigned long m, n;
+ 	u32 reg;
  
-diff --git a/mm/page_alloc.c b/mm/page_alloc.c
-index 838ca8bb64f7..629ce645cffd 100644
---- a/mm/page_alloc.c
-+++ b/mm/page_alloc.c
-@@ -660,34 +660,28 @@ static inline void __free_one_page(struct page *page,
- 	unsigned long combined_idx;
- 	unsigned long uninitialized_var(buddy_idx);
- 	struct page *buddy;
--	unsigned int max_order = MAX_ORDER;
-+	unsigned int max_order;
++	/* Gracefully handle the B0 case: fall back to B9600 */
++	fuart = fuart ? fuart : 9600 * 16;
 +
-+	max_order = min_t(unsigned int, MAX_ORDER, pageblock_order + 1);
- 
- 	VM_BUG_ON(!zone_is_initialized(zone));
- 	VM_BUG_ON_PAGE(page->flags & PAGE_FLAGS_CHECK_AT_PREP, page);
- 
- 	VM_BUG_ON(migratetype == -1);
--	if (is_migrate_isolate(migratetype)) {
--		/*
--		 * We restrict max order of merging to prevent merge
--		 * between freepages on isolate pageblock and normal
--		 * pageblock. Without this, pageblock isolation
--		 * could cause incorrect freepage accounting.
--		 */
--		max_order = min_t(unsigned int, MAX_ORDER, pageblock_order + 1);
--	} else {
-+	if (likely(!is_migrate_isolate(migratetype)))
- 		__mod_zone_freepage_state(zone, 1 << order, migratetype);
--	}
- 
--	page_idx = pfn & ((1 << max_order) - 1);
-+	page_idx = pfn & ((1 << MAX_ORDER) - 1);
+ 	/* Get Fuart closer to Fref */
+ 	fuart *= rounddown_pow_of_two(fref / fuart);
  
- 	VM_BUG_ON_PAGE(page_idx & ((1 << order) - 1), page);
- 	VM_BUG_ON_PAGE(bad_range(zone, page), page);
+diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
+index 1c0884d8ef32..172a8ccb0b63 100644
+--- a/drivers/tty/serial/atmel_serial.c
++++ b/drivers/tty/serial/atmel_serial.c
+@@ -273,6 +273,13 @@ static bool atmel_use_dma_rx(struct uart_port *port)
+ 	return atmel_port->use_dma_rx;
+ }
  
-+continue_merging:
- 	while (order < max_order - 1) {
- 		buddy_idx = __find_buddy_index(page_idx, order);
- 		buddy = page + (buddy_idx - page_idx);
- 		if (!page_is_buddy(page, buddy, order))
--			break;
-+			goto done_merging;
- 		/*
- 		 * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page,
- 		 * merge with it and move up one order.
-@@ -704,6 +698,32 @@ static inline void __free_one_page(struct page *page,
- 		page_idx = combined_idx;
- 		order++;
- 	}
-+	if (max_order < MAX_ORDER) {
-+		/* If we are here, it means order is >= pageblock_order.
-+		 * We want to prevent merge between freepages on isolate
-+		 * pageblock and normal pageblock. Without this, pageblock
-+		 * isolation could cause incorrect freepage or CMA accounting.
-+		 *
-+		 * We don't want to hit this code for the more frequent
-+		 * low-order merging.
-+		 */
-+		if (unlikely(has_isolate_pageblock(zone))) {
-+			int buddy_mt;
++static bool atmel_use_fifo(struct uart_port *port)
++{
++	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
 +
-+			buddy_idx = __find_buddy_index(page_idx, order);
-+			buddy = page + (buddy_idx - page_idx);
-+			buddy_mt = get_pageblock_migratetype(buddy);
++	return atmel_port->fifo_size;
++}
 +
-+			if (migratetype != buddy_mt
-+					&& (is_migrate_isolate(migratetype) ||
-+						is_migrate_isolate(buddy_mt)))
-+				goto done_merging;
+ static unsigned int atmel_get_lines_status(struct uart_port *port)
+ {
+ 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
+@@ -2082,7 +2089,12 @@ static void atmel_set_termios(struct uart_port *port, struct ktermios *termios,
+ 		mode |= ATMEL_US_USMODE_RS485;
+ 	} else if (termios->c_cflag & CRTSCTS) {
+ 		/* RS232 with hardware handshake (RTS/CTS) */
+-		mode |= ATMEL_US_USMODE_HWHS;
++		if (atmel_use_dma_rx(port) && !atmel_use_fifo(port)) {
++			dev_info(port->dev, "not enabling hardware flow control because DMA is used");
++			termios->c_cflag &= ~CRTSCTS;
++		} else {
++			mode |= ATMEL_US_USMODE_HWHS;
 +		}
-+		max_order++;
-+		goto continue_merging;
-+	}
+ 	} else {
+ 		/* RS232 without hadware handshake */
+ 		mode |= ATMEL_US_USMODE_NORMAL;
+diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
+index d72cd736bdc6..8320173af846 100644
+--- a/drivers/tty/serial/samsung.c
++++ b/drivers/tty/serial/samsung.c
+@@ -1263,6 +1263,8 @@ static void s3c24xx_serial_set_termios(struct uart_port *port,
+ 	/* check to see if we need  to change clock source */
+ 
+ 	if (ourport->baudclk != clk) {
++		clk_prepare_enable(clk);
 +
-+done_merging:
- 	set_page_order(page, order);
+ 		s3c24xx_serial_setsource(port, clk_sel);
  
- 	/*
-@@ -6194,7 +6214,7 @@ int __meminit init_per_zone_wmark_min(void)
- 	setup_per_zone_inactive_ratio();
- 	return 0;
- }
--module_init(init_per_zone_wmark_min)
-+core_initcall(init_per_zone_wmark_min)
+ 		if (!IS_ERR(ourport->baudclk)) {
+@@ -1270,8 +1272,6 @@ static void s3c24xx_serial_set_termios(struct uart_port *port,
+ 			ourport->baudclk = ERR_PTR(-EINVAL);
+ 		}
  
- /*
-  * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
-diff --git a/mm/page_isolation.c b/mm/page_isolation.c
-index 92c4c36501e7..31555b689eb9 100644
---- a/mm/page_isolation.c
-+++ b/mm/page_isolation.c
-@@ -289,11 +289,11 @@ struct page *alloc_migrate_target(struct page *page, unsigned long private,
- 	 * now as a simple work-around, we use the next node for destination.
- 	 */
- 	if (PageHuge(page)) {
--		nodemask_t src = nodemask_of_node(page_to_nid(page));
--		nodemask_t dst;
--		nodes_complement(dst, src);
-+		int node = next_online_node(page_to_nid(page));
-+		if (node == MAX_NUMNODES)
-+			node = first_online_node;
- 		return alloc_huge_page_node(page_hstate(compound_head(page)),
--					    next_node(page_to_nid(page), dst));
-+					    node);
+-		clk_prepare_enable(clk);
+-
+ 		ourport->baudclk = clk;
+ 		ourport->baudclk_rate = clk ? clk_get_rate(clk) : 0;
  	}
+diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c
+index 3cd31e0d4bd9..fb31eecb708d 100644
+--- a/drivers/tty/tty_buffer.c
++++ b/drivers/tty/tty_buffer.c
+@@ -37,29 +37,6 @@
  
- 	if (PageHighMem(page))
-diff --git a/mm/slub.c b/mm/slub.c
-index d8fbd4a6ed59..2a722e141958 100644
---- a/mm/slub.c
-+++ b/mm/slub.c
-@@ -2815,6 +2815,7 @@ struct detached_freelist {
- 	void *tail;
- 	void *freelist;
- 	int cnt;
-+	struct kmem_cache *s;
- };
- 
- /*
-@@ -2829,8 +2830,9 @@ struct detached_freelist {
-  * synchronization primitive.  Look ahead in the array is limited due
-  * to performance reasons.
-  */
--static int build_detached_freelist(struct kmem_cache *s, size_t size,
--				   void **p, struct detached_freelist *df)
-+static inline
-+int build_detached_freelist(struct kmem_cache *s, size_t size,
-+			    void **p, struct detached_freelist *df)
- {
- 	size_t first_skipped_index = 0;
- 	int lookahead = 3;
-@@ -2846,8 +2848,11 @@ static int build_detached_freelist(struct kmem_cache *s, size_t size,
- 	if (!object)
- 		return 0;
+ #define TTY_BUFFER_PAGE	(((PAGE_SIZE - sizeof(struct tty_buffer)) / 2) & ~0xFF)
  
-+	/* Support for memcg, compiler can optimize this out */
-+	df->s = cache_from_obj(s, object);
-+
- 	/* Start new detached freelist */
--	set_freepointer(s, object, NULL);
-+	set_freepointer(df->s, object, NULL);
- 	df->page = virt_to_head_page(object);
- 	df->tail = object;
- 	df->freelist = object;
-@@ -2862,7 +2867,7 @@ static int build_detached_freelist(struct kmem_cache *s, size_t size,
- 		/* df->page is always set at this point */
- 		if (df->page == virt_to_head_page(object)) {
- 			/* Opportunity build freelist */
--			set_freepointer(s, object, df->freelist);
-+			set_freepointer(df->s, object, df->freelist);
- 			df->freelist = object;
- 			df->cnt++;
- 			p[size] = NULL; /* mark object processed */
-@@ -2881,25 +2886,20 @@ static int build_detached_freelist(struct kmem_cache *s, size_t size,
- 	return first_skipped_index;
- }
+-/*
+- * If all tty flip buffers have been processed by flush_to_ldisc() or
+- * dropped by tty_buffer_flush(), check if the linked pty has been closed.
+- * If so, wake the reader/poll to process
+- */
+-static inline void check_other_closed(struct tty_struct *tty)
+-{
+-	unsigned long flags, old;
+-
+-	/* transition from TTY_OTHER_CLOSED => TTY_OTHER_DONE must be atomic */
+-	for (flags = ACCESS_ONCE(tty->flags);
+-	     test_bit(TTY_OTHER_CLOSED, &flags);
+-	     ) {
+-		old = flags;
+-		__set_bit(TTY_OTHER_DONE, &flags);
+-		flags = cmpxchg(&tty->flags, old, flags);
+-		if (old == flags) {
+-			wake_up_interruptible(&tty->read_wait);
+-			break;
+-		}
+-	}
+-}
+-
+ /**
+  *	tty_buffer_lock_exclusive	-	gain exclusive access to buffer
+  *	tty_buffer_unlock_exclusive	-	release exclusive access
+@@ -254,8 +231,6 @@ void tty_buffer_flush(struct tty_struct *tty, struct tty_ldisc *ld)
+ 	if (ld && ld->ops->flush_buffer)
+ 		ld->ops->flush_buffer(tty);
  
+-	check_other_closed(tty);
 -
- /* Note that interrupts must be enabled when calling this function. */
--void kmem_cache_free_bulk(struct kmem_cache *orig_s, size_t size, void **p)
-+void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
+ 	atomic_dec(&buf->priority);
+ 	mutex_unlock(&buf->lock);
+ }
+@@ -505,10 +480,8 @@ static void flush_to_ldisc(struct work_struct *work)
+ 		 */
+ 		count = smp_load_acquire(&head->commit) - head->read;
+ 		if (!count) {
+-			if (next == NULL) {
+-				check_other_closed(tty);
++			if (next == NULL)
+ 				break;
+-			}
+ 			buf->head = next;
+ 			tty_buffer_free(port, head);
+ 			continue;
+@@ -597,3 +570,8 @@ bool tty_buffer_cancel_work(struct tty_port *port)
  {
- 	if (WARN_ON(!size))
- 		return;
+ 	return cancel_work_sync(&port->buf.work);
+ }
++
++void tty_buffer_flush_work(struct tty_port *port)
++{
++	flush_work(&port->buf.work);
++}
+diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
+index bd51bdd0a7bf..2b5382ea4842 100644
+--- a/drivers/tty/vt/vt.c
++++ b/drivers/tty/vt/vt.c
+@@ -3583,9 +3583,10 @@ static int do_register_con_driver(const struct consw *csw, int first, int last)
+ 		goto err;
  
- 	do {
- 		struct detached_freelist df;
--		struct kmem_cache *s;
+ 	desc = csw->con_startup();
 -
--		/* Support for memcg */
--		s = cache_from_obj(orig_s, p[size - 1]);
- 
- 		size = build_detached_freelist(s, size, p, &df);
- 		if (unlikely(!df.page))
- 			continue;
+-	if (!desc)
++	if (!desc) {
++		retval = -ENODEV;
+ 		goto err;
++	}
  
--		slab_free(s, df.page, df.freelist, df.tail, df.cnt, _RET_IP_);
-+		slab_free(df.s, df.page, df.freelist, df.tail, df.cnt,_RET_IP_);
- 	} while (likely(size));
- }
- EXPORT_SYMBOL(kmem_cache_free_bulk);
-diff --git a/mm/swapfile.c b/mm/swapfile.c
-index d2c37365e2d6..954fd8f72b79 100644
---- a/mm/swapfile.c
-+++ b/mm/swapfile.c
-@@ -916,18 +916,19 @@ out:
-  * to it.  And as a side-effect, free up its swap: because the old content
-  * on disk will never be read, and seeking back there to write new content
-  * later would only waste time away from clustering.
-+ *
-+ * NOTE: total_mapcount should not be relied upon by the caller if
-+ * reuse_swap_page() returns false, but it may be always overwritten
-+ * (see the other implementation for CONFIG_SWAP=n).
-  */
--int reuse_swap_page(struct page *page)
-+bool reuse_swap_page(struct page *page, int *total_mapcount)
- {
- 	int count;
- 
- 	VM_BUG_ON_PAGE(!PageLocked(page), page);
- 	if (unlikely(PageKsm(page)))
--		return 0;
--	/* The page is part of THP and cannot be reused */
--	if (PageTransCompound(page))
--		return 0;
--	count = page_mapcount(page);
-+		return false;
-+	count = page_trans_huge_mapcount(page, total_mapcount);
- 	if (count <= 1 && PageSwapCache(page)) {
- 		count += page_swapcount(page);
- 		if (count == 1 && !PageWriteback(page)) {
-diff --git a/mm/vmscan.c b/mm/vmscan.c
-index 71b1c29948db..c712b016e0ab 100644
---- a/mm/vmscan.c
-+++ b/mm/vmscan.c
-@@ -2561,7 +2561,7 @@ static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
- 		sc->gfp_mask |= __GFP_HIGHMEM;
- 
- 	for_each_zone_zonelist_nodemask(zone, z, zonelist,
--					requested_highidx, sc->nodemask) {
-+					gfp_zone(sc->gfp_mask), sc->nodemask) {
- 		enum zone_type classzone_idx;
- 
- 		if (!populated_zone(zone))
-diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
-index 2d7c4c11fc63..336149ffd07d 100644
---- a/mm/zsmalloc.c
-+++ b/mm/zsmalloc.c
-@@ -1732,10 +1732,13 @@ static struct page *isolate_source_page(struct size_class *class)
- static unsigned long zs_can_compact(struct size_class *class)
- {
- 	unsigned long obj_wasted;
-+	unsigned long obj_allocated = zs_stat_get(class, OBJ_ALLOCATED);
-+	unsigned long obj_used = zs_stat_get(class, OBJ_USED);
+ 	retval = -EINVAL;
  
--	obj_wasted = zs_stat_get(class, OBJ_ALLOCATED) -
--		zs_stat_get(class, OBJ_USED);
-+	if (obj_allocated <= obj_used)
-+		return 0;
+diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
+index 2057d91d8336..dadd1e8dfe09 100644
+--- a/drivers/usb/core/driver.c
++++ b/drivers/usb/core/driver.c
+@@ -284,7 +284,7 @@ static int usb_probe_interface(struct device *dev)
+ 	struct usb_device *udev = interface_to_usbdev(intf);
+ 	const struct usb_device_id *id;
+ 	int error = -ENODEV;
+-	int lpm_disable_error;
++	int lpm_disable_error = -ENODEV;
  
-+	obj_wasted = obj_allocated - obj_used;
- 	obj_wasted /= get_maxobj_per_zspage(class->size,
- 			class->pages_per_zspage);
- 
-diff --git a/mm/zswap.c b/mm/zswap.c
-index bf14508afd64..340261946fda 100644
---- a/mm/zswap.c
-+++ b/mm/zswap.c
-@@ -170,6 +170,8 @@ static struct zswap_tree *zswap_trees[MAX_SWAPFILES];
- static LIST_HEAD(zswap_pools);
- /* protects zswap_pools list modification */
- static DEFINE_SPINLOCK(zswap_pools_lock);
-+/* pool counter to provide unique names to zpool */
-+static atomic_t zswap_pools_count = ATOMIC_INIT(0);
- 
- /* used by param callback function */
- static bool zswap_init_started;
-@@ -565,6 +567,7 @@ static struct zswap_pool *zswap_pool_find_get(char *type, char *compressor)
- static struct zswap_pool *zswap_pool_create(char *type, char *compressor)
- {
- 	struct zswap_pool *pool;
-+	char name[38]; /* 'zswap' + 32 char (max) num + \0 */
- 	gfp_t gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM;
+ 	dev_dbg(dev, "%s\n", __func__);
  
- 	pool = kzalloc(sizeof(*pool), GFP_KERNEL);
-@@ -573,7 +576,10 @@ static struct zswap_pool *zswap_pool_create(char *type, char *compressor)
- 		return NULL;
+@@ -336,12 +336,14 @@ static int usb_probe_interface(struct device *dev)
+ 	 * setting during probe, that should also be fine.  usb_set_interface()
+ 	 * will attempt to disable LPM, and fail if it can't disable it.
+ 	 */
+-	lpm_disable_error = usb_unlocked_disable_lpm(udev);
+-	if (lpm_disable_error && driver->disable_hub_initiated_lpm) {
+-		dev_err(&intf->dev, "%s Failed to disable LPM for driver %s\n.",
+-				__func__, driver->name);
+-		error = lpm_disable_error;
+-		goto err;
++	if (driver->disable_hub_initiated_lpm) {
++		lpm_disable_error = usb_unlocked_disable_lpm(udev);
++		if (lpm_disable_error) {
++			dev_err(&intf->dev, "%s Failed to disable LPM for driver %s\n.",
++					__func__, driver->name);
++			error = lpm_disable_error;
++			goto err;
++		}
  	}
  
--	pool->zpool = zpool_create_pool(type, "zswap", gfp, &zswap_zpool_ops);
-+	/* unique name for each pool specifically required by zsmalloc */
-+	snprintf(name, 38, "zswap%x", atomic_inc_return(&zswap_pools_count));
-+
-+	pool->zpool = zpool_create_pool(type, name, gfp, &zswap_zpool_ops);
- 	if (!pool->zpool) {
- 		pr_err("%s zpool not available\n", type);
- 		goto error;
-diff --git a/net/ax25/ax25_ip.c b/net/ax25/ax25_ip.c
-index b563a3f5f2a8..2fa3be965101 100644
---- a/net/ax25/ax25_ip.c
-+++ b/net/ax25/ax25_ip.c
-@@ -228,8 +228,23 @@ netdev_tx_t ax25_ip_xmit(struct sk_buff *skb)
- }
- #endif
+ 	/* Carry out a deferred switch to altsetting 0 */
+@@ -391,7 +393,8 @@ static int usb_unbind_interface(struct device *dev)
+ 	struct usb_interface *intf = to_usb_interface(dev);
+ 	struct usb_host_endpoint *ep, **eps = NULL;
+ 	struct usb_device *udev;
+-	int i, j, error, r, lpm_disable_error;
++	int i, j, error, r;
++	int lpm_disable_error = -ENODEV;
  
-+static bool ax25_validate_header(const char *header, unsigned int len)
-+{
-+	ax25_digi digi;
-+
-+	if (!len)
-+		return false;
-+
-+	if (header[0])
-+		return true;
-+
-+	return ax25_addr_parse(header + 1, len - 1, NULL, NULL, &digi, NULL,
-+			       NULL);
-+}
-+
- const struct header_ops ax25_header_ops = {
- 	.create = ax25_hard_header,
-+	.validate = ax25_validate_header,
- };
+ 	intf->condition = USB_INTERFACE_UNBINDING;
  
- EXPORT_SYMBOL(ax25_header_ops);
-diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c
-index a49c705fb86b..5f19133c5530 100644
---- a/net/batman-adv/distributed-arp-table.c
-+++ b/net/batman-adv/distributed-arp-table.c
-@@ -553,6 +553,7 @@ static void batadv_choose_next_candidate(struct batadv_priv *bat_priv,
-  * be sent to
-  * @bat_priv: the bat priv with all the soft interface information
-  * @ip_dst: ipv4 to look up in the DHT
-+ * @vid: VLAN identifier
-  *
-  * An originator O is selected if and only if its DHT_ID value is one of three
-  * closest values (from the LEFT, with wrap around if needed) then the hash
-@@ -561,7 +562,8 @@ static void batadv_choose_next_candidate(struct batadv_priv *bat_priv,
-  * Returns the candidate array of size BATADV_DAT_CANDIDATE_NUM.
-  */
- static struct batadv_dat_candidate *
--batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst)
-+batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst,
-+			     unsigned short vid)
- {
- 	int select;
- 	batadv_dat_addr_t last_max = BATADV_DAT_ADDR_MAX, ip_key;
-@@ -577,7 +579,7 @@ batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst)
- 		return NULL;
- 
- 	dat.ip = ip_dst;
--	dat.vid = 0;
-+	dat.vid = vid;
- 	ip_key = (batadv_dat_addr_t)batadv_hash_dat(&dat,
- 						    BATADV_DAT_ADDR_MAX);
- 
-@@ -597,6 +599,7 @@ batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst)
-  * @bat_priv: the bat priv with all the soft interface information
-  * @skb: payload to send
-  * @ip: the DHT key
-+ * @vid: VLAN identifier
-  * @packet_subtype: unicast4addr packet subtype to use
-  *
-  * This function copies the skb with pskb_copy() and is sent as unicast packet
-@@ -607,7 +610,7 @@ batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst)
-  */
- static bool batadv_dat_send_data(struct batadv_priv *bat_priv,
- 				 struct sk_buff *skb, __be32 ip,
--				 int packet_subtype)
-+				 unsigned short vid, int packet_subtype)
- {
- 	int i;
- 	bool ret = false;
-@@ -616,7 +619,7 @@ static bool batadv_dat_send_data(struct batadv_priv *bat_priv,
- 	struct sk_buff *tmp_skb;
- 	struct batadv_dat_candidate *cand;
- 
--	cand = batadv_dat_select_candidates(bat_priv, ip);
-+	cand = batadv_dat_select_candidates(bat_priv, ip, vid);
- 	if (!cand)
- 		goto out;
+@@ -399,12 +402,13 @@ static int usb_unbind_interface(struct device *dev)
+ 	udev = interface_to_usbdev(intf);
+ 	error = usb_autoresume_device(udev);
  
-@@ -1004,7 +1007,7 @@ bool batadv_dat_snoop_outgoing_arp_request(struct batadv_priv *bat_priv,
- 		ret = true;
- 	} else {
- 		/* Send the request to the DHT */
--		ret = batadv_dat_send_data(bat_priv, skb, ip_dst,
-+		ret = batadv_dat_send_data(bat_priv, skb, ip_dst, vid,
- 					   BATADV_P_DAT_DHT_GET);
- 	}
- out:
-@@ -1132,8 +1135,8 @@ void batadv_dat_snoop_outgoing_arp_reply(struct batadv_priv *bat_priv,
- 	/* Send the ARP reply to the candidates for both the IP addresses that
- 	 * the node obtained from the ARP reply
+-	/* Hub-initiated LPM policy may change, so attempt to disable LPM until
++	/* If hub-initiated LPM policy may change, attempt to disable LPM until
+ 	 * the driver is unbound.  If LPM isn't disabled, that's fine because it
+ 	 * wouldn't be enabled unless all the bound interfaces supported
+ 	 * hub-initiated LPM.
  	 */
--	batadv_dat_send_data(bat_priv, skb, ip_src, BATADV_P_DAT_DHT_PUT);
--	batadv_dat_send_data(bat_priv, skb, ip_dst, BATADV_P_DAT_DHT_PUT);
-+	batadv_dat_send_data(bat_priv, skb, ip_src, vid, BATADV_P_DAT_DHT_PUT);
-+	batadv_dat_send_data(bat_priv, skb, ip_dst, vid, BATADV_P_DAT_DHT_PUT);
- }
+-	lpm_disable_error = usb_unlocked_disable_lpm(udev);
++	if (driver->disable_hub_initiated_lpm)
++		lpm_disable_error = usb_unlocked_disable_lpm(udev);
  
- /**
-diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
-index e4f2646d9246..43d15d6c4b6a 100644
---- a/net/batman-adv/routing.c
-+++ b/net/batman-adv/routing.c
-@@ -104,6 +104,15 @@ static void _batadv_update_route(struct batadv_priv *bat_priv,
- 		neigh_node = NULL;
- 
- 	spin_lock_bh(&orig_node->neigh_list_lock);
-+	/* curr_router used earlier may not be the current orig_ifinfo->router
-+	 * anymore because it was dereferenced outside of the neigh_list_lock
-+	 * protected region. After the new best neighbor has replace the current
-+	 * best neighbor the reference counter needs to decrease. Consequently,
-+	 * the code needs to ensure the curr_router variable contains a pointer
-+	 * to the replaced best neighbor.
-+	 */
-+	curr_router = rcu_dereference_protected(orig_ifinfo->router, true);
-+
- 	rcu_assign_pointer(orig_ifinfo->router, neigh_node);
- 	spin_unlock_bh(&orig_node->neigh_list_lock);
- 	batadv_orig_ifinfo_free_ref(orig_ifinfo);
-diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c
-index 782fa33ec296..45bfdefa15a5 100644
---- a/net/batman-adv/send.c
-+++ b/net/batman-adv/send.c
-@@ -629,6 +629,9 @@ batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
- 
- 		if (pending) {
- 			hlist_del(&forw_packet->list);
-+			if (!forw_packet->own)
-+				atomic_inc(&bat_priv->bcast_queue_left);
-+
- 			batadv_forw_packet_free(forw_packet);
- 		}
- 	}
-@@ -656,6 +659,9 @@ batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
+ 	/*
+ 	 * Terminate all URBs for this interface unless the driver
+@@ -505,7 +509,7 @@ int usb_driver_claim_interface(struct usb_driver *driver,
+ 	struct device *dev;
+ 	struct usb_device *udev;
+ 	int retval = 0;
+-	int lpm_disable_error;
++	int lpm_disable_error = -ENODEV;
  
- 		if (pending) {
- 			hlist_del(&forw_packet->list);
-+			if (!forw_packet->own)
-+				atomic_inc(&bat_priv->batman_queue_left);
-+
- 			batadv_forw_packet_free(forw_packet);
- 		}
- 	}
-diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
-index ac4d08de5df4..720f1a5b81ac 100644
---- a/net/batman-adv/soft-interface.c
-+++ b/net/batman-adv/soft-interface.c
-@@ -407,11 +407,17 @@ void batadv_interface_rx(struct net_device *soft_iface,
- 	 */
- 	nf_reset(skb);
+ 	if (!iface)
+ 		return -ENODEV;
+@@ -526,12 +530,14 @@ int usb_driver_claim_interface(struct usb_driver *driver,
  
-+	if (unlikely(!pskb_may_pull(skb, ETH_HLEN)))
-+		goto dropped;
-+
- 	vid = batadv_get_vid(skb, 0);
- 	ethhdr = eth_hdr(skb);
+ 	iface->condition = USB_INTERFACE_BOUND;
  
- 	switch (ntohs(ethhdr->h_proto)) {
- 	case ETH_P_8021Q:
-+		if (!pskb_may_pull(skb, VLAN_ETH_HLEN))
-+			goto dropped;
-+
- 		vhdr = (struct vlan_ethhdr *)skb->data;
+-	/* Disable LPM until this driver is bound. */
+-	lpm_disable_error = usb_unlocked_disable_lpm(udev);
+-	if (lpm_disable_error && driver->disable_hub_initiated_lpm) {
+-		dev_err(&iface->dev, "%s Failed to disable LPM for driver %s\n.",
+-				__func__, driver->name);
+-		return -ENOMEM;
++	/* See the comment about disabling LPM in usb_probe_interface(). */
++	if (driver->disable_hub_initiated_lpm) {
++		lpm_disable_error = usb_unlocked_disable_lpm(udev);
++		if (lpm_disable_error) {
++			dev_err(&iface->dev, "%s Failed to disable LPM for driver %s\n.",
++					__func__, driver->name);
++			return -ENOMEM;
++		}
+ 	}
  
- 		if (vhdr->h_vlan_encapsulated_proto != ethertype)
-@@ -423,8 +429,6 @@ void batadv_interface_rx(struct net_device *soft_iface,
+ 	/* Claimed interfaces are initially inactive (suspended) and
+diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
+index 79d895c2dd71..97ef75af9632 100644
+--- a/drivers/usb/gadget/function/f_fs.c
++++ b/drivers/usb/gadget/function/f_fs.c
+@@ -651,7 +651,7 @@ static void ffs_user_copy_worker(struct work_struct *work)
+ 	if (io_data->read && ret > 0) {
+ 		use_mm(io_data->mm);
+ 		ret = copy_to_iter(io_data->buf, ret, &io_data->data);
+-		if (iov_iter_count(&io_data->data))
++		if (ret != io_data->req->actual && iov_iter_count(&io_data->data))
+ 			ret = -EFAULT;
+ 		unuse_mm(io_data->mm);
  	}
+diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c
+index 223ccf89d226..a4f664062e0c 100644
+--- a/drivers/usb/gadget/function/f_mass_storage.c
++++ b/drivers/usb/gadget/function/f_mass_storage.c
+@@ -2977,25 +2977,6 @@ void fsg_common_set_inquiry_string(struct fsg_common *common, const char *vn,
+ }
+ EXPORT_SYMBOL_GPL(fsg_common_set_inquiry_string);
  
- 	/* skb->dev & skb->pkt_type are set here */
--	if (unlikely(!pskb_may_pull(skb, ETH_HLEN)))
--		goto dropped;
- 	skb->protocol = eth_type_trans(skb, soft_iface);
- 
- 	/* should not be necessary anymore as we use skb_pull_rcsum()
-diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
-index 5a5089cb6570..1363b8ffd89c 100644
---- a/net/bluetooth/mgmt.c
-+++ b/net/bluetooth/mgmt.c
-@@ -5979,6 +5979,10 @@ static int add_advertising(struct sock *sk, struct hci_dev *hdev,
- 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
- 				       MGMT_STATUS_INVALID_PARAMS);
- 
-+	if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
-+		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
-+				       MGMT_STATUS_INVALID_PARAMS);
-+
- 	flags = __le32_to_cpu(cp->flags);
- 	timeout = __le16_to_cpu(cp->timeout);
- 	duration = __le16_to_cpu(cp->duration);
-diff --git a/net/bridge/br_ioctl.c b/net/bridge/br_ioctl.c
-index 263b4de4de57..60a3dbfca8a1 100644
---- a/net/bridge/br_ioctl.c
-+++ b/net/bridge/br_ioctl.c
-@@ -21,18 +21,19 @@
- #include <asm/uaccess.h>
- #include "br_private.h"
- 
--/* called with RTNL */
- static int get_bridge_ifindices(struct net *net, int *indices, int num)
+-int fsg_common_run_thread(struct fsg_common *common)
+-{
+-	common->state = FSG_STATE_IDLE;
+-	/* Tell the thread to start working */
+-	common->thread_task =
+-		kthread_create(fsg_main_thread, common, "file-storage");
+-	if (IS_ERR(common->thread_task)) {
+-		common->state = FSG_STATE_TERMINATED;
+-		return PTR_ERR(common->thread_task);
+-	}
+-
+-	DBG(common, "I/O thread pid: %d\n", task_pid_nr(common->thread_task));
+-
+-	wake_up_process(common->thread_task);
+-
+-	return 0;
+-}
+-EXPORT_SYMBOL_GPL(fsg_common_run_thread);
+-
+ static void fsg_common_release(struct kref *ref)
  {
- 	struct net_device *dev;
- 	int i = 0;
- 
--	for_each_netdev(net, dev) {
-+	rcu_read_lock();
-+	for_each_netdev_rcu(net, dev) {
- 		if (i >= num)
- 			break;
- 		if (dev->priv_flags & IFF_EBRIDGE)
- 			indices[i++] = dev->ifindex;
+ 	struct fsg_common *common = container_of(ref, struct fsg_common, ref);
+@@ -3005,6 +2986,7 @@ static void fsg_common_release(struct kref *ref)
+ 	if (common->state != FSG_STATE_TERMINATED) {
+ 		raise_exception(common, FSG_STATE_EXIT);
+ 		wait_for_completion(&common->thread_notifier);
++		common->thread_task = NULL;
  	}
-+	rcu_read_unlock();
  
- 	return i;
- }
-diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
-index 03661d97463c..ea9893743a0f 100644
---- a/net/bridge/br_multicast.c
-+++ b/net/bridge/br_multicast.c
-@@ -1270,6 +1270,7 @@ static int br_ip4_multicast_query(struct net_bridge *br,
- 	struct br_ip saddr;
- 	unsigned long max_delay;
- 	unsigned long now = jiffies;
-+	unsigned int offset = skb_transport_offset(skb);
- 	__be32 group;
- 	int err = 0;
- 
-@@ -1280,14 +1281,14 @@ static int br_ip4_multicast_query(struct net_bridge *br,
- 
- 	group = ih->group;
- 
--	if (skb->len == sizeof(*ih)) {
-+	if (skb->len == offset + sizeof(*ih)) {
- 		max_delay = ih->code * (HZ / IGMP_TIMER_SCALE);
- 
- 		if (!max_delay) {
- 			max_delay = 10 * HZ;
- 			group = 0;
- 		}
--	} else if (skb->len >= sizeof(*ih3)) {
-+	} else if (skb->len >= offset + sizeof(*ih3)) {
- 		ih3 = igmpv3_query_hdr(skb);
- 		if (ih3->nsrcs)
- 			goto out;
-@@ -1348,6 +1349,7 @@ static int br_ip6_multicast_query(struct net_bridge *br,
- 	struct br_ip saddr;
- 	unsigned long max_delay;
- 	unsigned long now = jiffies;
-+	unsigned int offset = skb_transport_offset(skb);
- 	const struct in6_addr *group = NULL;
- 	bool is_general_query;
- 	int err = 0;
-@@ -1357,8 +1359,8 @@ static int br_ip6_multicast_query(struct net_bridge *br,
- 	    (port && port->state == BR_STATE_DISABLED))
- 		goto out;
+ 	for (i = 0; i < ARRAY_SIZE(common->luns); ++i) {
+@@ -3050,9 +3032,21 @@ static int fsg_bind(struct usb_configuration *c, struct usb_function *f)
+ 		if (ret)
+ 			return ret;
+ 		fsg_common_set_inquiry_string(fsg->common, NULL, NULL);
+-		ret = fsg_common_run_thread(fsg->common);
+-		if (ret)
++	}
++
++	if (!common->thread_task) {
++		common->state = FSG_STATE_IDLE;
++		common->thread_task =
++			kthread_create(fsg_main_thread, common, "file-storage");
++		if (IS_ERR(common->thread_task)) {
++			int ret = PTR_ERR(common->thread_task);
++			common->thread_task = NULL;
++			common->state = FSG_STATE_TERMINATED;
+ 			return ret;
++		}
++		DBG(common, "I/O thread pid: %d\n",
++		    task_pid_nr(common->thread_task));
++		wake_up_process(common->thread_task);
+ 	}
  
--	if (skb->len == sizeof(*mld)) {
--		if (!pskb_may_pull(skb, sizeof(*mld))) {
-+	if (skb->len == offset + sizeof(*mld)) {
-+		if (!pskb_may_pull(skb, offset + sizeof(*mld))) {
- 			err = -EINVAL;
- 			goto out;
- 		}
-@@ -1367,7 +1369,7 @@ static int br_ip6_multicast_query(struct net_bridge *br,
- 		if (max_delay)
- 			group = &mld->mld_mca;
- 	} else {
--		if (!pskb_may_pull(skb, sizeof(*mld2q))) {
-+		if (!pskb_may_pull(skb, offset + sizeof(*mld2q))) {
- 			err = -EINVAL;
- 			goto out;
- 		}
-diff --git a/net/bridge/br_stp.c b/net/bridge/br_stp.c
-index b3cca126b103..e2670c5baafd 100644
---- a/net/bridge/br_stp.c
-+++ b/net/bridge/br_stp.c
-@@ -568,6 +568,14 @@ int br_set_max_age(struct net_bridge *br, unsigned long val)
+ 	fsg->gadget = gadget;
+diff --git a/drivers/usb/gadget/function/f_mass_storage.h b/drivers/usb/gadget/function/f_mass_storage.h
+index 445df6775609..b6a9918eaefb 100644
+--- a/drivers/usb/gadget/function/f_mass_storage.h
++++ b/drivers/usb/gadget/function/f_mass_storage.h
+@@ -153,8 +153,6 @@ int fsg_common_create_luns(struct fsg_common *common, struct fsg_config *cfg);
+ void fsg_common_set_inquiry_string(struct fsg_common *common, const char *vn,
+ 				   const char *pn);
  
- }
+-int fsg_common_run_thread(struct fsg_common *common);
+-
+ void fsg_config_from_params(struct fsg_config *cfg,
+ 			    const struct fsg_module_parameters *params,
+ 			    unsigned int fsg_num_buffers);
+diff --git a/drivers/usb/gadget/legacy/acm_ms.c b/drivers/usb/gadget/legacy/acm_ms.c
+index c16089efc322..c39de65a448b 100644
+--- a/drivers/usb/gadget/legacy/acm_ms.c
++++ b/drivers/usb/gadget/legacy/acm_ms.c
+@@ -133,10 +133,6 @@ static int acm_ms_do_config(struct usb_configuration *c)
+ 	if (status < 0)
+ 		goto put_msg;
  
-+/* Set time interval that dynamic forwarding entries live
-+ * For pure software bridge, allow values outside the 802.1
-+ * standard specification for special cases:
-+ *  0 - entry never ages (all permanant)
-+ *  1 - entry disappears (no persistance)
-+ *
-+ * Offloaded switch entries maybe more restrictive
-+ */
- int br_set_ageing_time(struct net_bridge *br, u32 ageing_time)
- {
- 	struct switchdev_attr attr = {
-@@ -579,11 +587,8 @@ int br_set_ageing_time(struct net_bridge *br, u32 ageing_time)
- 	unsigned long t = clock_t_to_jiffies(ageing_time);
- 	int err;
+-	status = fsg_common_run_thread(opts->common);
+-	if (status)
+-		goto remove_acm;
+-
+ 	status = usb_add_function(c, f_msg);
+ 	if (status)
+ 		goto remove_acm;
+diff --git a/drivers/usb/gadget/legacy/mass_storage.c b/drivers/usb/gadget/legacy/mass_storage.c
+index e61af53c7d2b..125974f32f50 100644
+--- a/drivers/usb/gadget/legacy/mass_storage.c
++++ b/drivers/usb/gadget/legacy/mass_storage.c
+@@ -132,10 +132,6 @@ static int msg_do_config(struct usb_configuration *c)
+ 	if (IS_ERR(f_msg))
+ 		return PTR_ERR(f_msg);
  
--	if (t < BR_MIN_AGEING_TIME || t > BR_MAX_AGEING_TIME)
--		return -ERANGE;
+-	ret = fsg_common_run_thread(opts->common);
+-	if (ret)
+-		goto put_func;
 -
- 	err = switchdev_port_attr_set(br->dev, &attr);
--	if (err)
-+	if (err && err != -EOPNOTSUPP)
- 		return err;
- 
- 	br->ageing_time = t;
-diff --git a/net/core/filter.c b/net/core/filter.c
-index bba502f7cd57..fb2951c3532d 100644
---- a/net/core/filter.c
-+++ b/net/core/filter.c
-@@ -1147,7 +1147,8 @@ void bpf_prog_destroy(struct bpf_prog *fp)
- }
- EXPORT_SYMBOL_GPL(bpf_prog_destroy);
+ 	ret = usb_add_function(c, f_msg);
+ 	if (ret)
+ 		goto put_func;
+diff --git a/drivers/usb/gadget/legacy/multi.c b/drivers/usb/gadget/legacy/multi.c
+index 229d704a620b..a70a406580ea 100644
+--- a/drivers/usb/gadget/legacy/multi.c
++++ b/drivers/usb/gadget/legacy/multi.c
+@@ -137,7 +137,6 @@ static struct usb_function *f_msg_rndis;
  
--static int __sk_attach_prog(struct bpf_prog *prog, struct sock *sk)
-+static int __sk_attach_prog(struct bpf_prog *prog, struct sock *sk,
-+			    bool locked)
+ static int rndis_do_config(struct usb_configuration *c)
  {
- 	struct sk_filter *fp, *old_fp;
+-	struct fsg_opts *fsg_opts;
+ 	int ret;
  
-@@ -1163,10 +1164,8 @@ static int __sk_attach_prog(struct bpf_prog *prog, struct sock *sk)
- 		return -ENOMEM;
+ 	if (gadget_is_otg(c->cdev->gadget)) {
+@@ -169,11 +168,6 @@ static int rndis_do_config(struct usb_configuration *c)
+ 		goto err_fsg;
  	}
  
--	old_fp = rcu_dereference_protected(sk->sk_filter,
--					   sock_owned_by_user(sk));
-+	old_fp = rcu_dereference_protected(sk->sk_filter, locked);
- 	rcu_assign_pointer(sk->sk_filter, fp);
+-	fsg_opts = fsg_opts_from_func_inst(fi_msg);
+-	ret = fsg_common_run_thread(fsg_opts->common);
+-	if (ret)
+-		goto err_run;
 -
- 	if (old_fp)
- 		sk_filter_uncharge(sk, old_fp);
+ 	ret = usb_add_function(c, f_msg_rndis);
+ 	if (ret)
+ 		goto err_run;
+@@ -225,7 +219,6 @@ static struct usb_function *f_msg_multi;
  
-@@ -1245,7 +1244,8 @@ struct bpf_prog *__get_filter(struct sock_fprog *fprog, struct sock *sk)
-  * occurs or there is insufficient memory for the filter a negative
-  * errno code is returned. On success the return is zero.
-  */
--int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
-+int __sk_attach_filter(struct sock_fprog *fprog, struct sock *sk,
-+		       bool locked)
+ static int cdc_do_config(struct usb_configuration *c)
  {
- 	struct bpf_prog *prog = __get_filter(fprog, sk);
- 	int err;
-@@ -1253,7 +1253,7 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
- 	if (IS_ERR(prog))
- 		return PTR_ERR(prog);
+-	struct fsg_opts *fsg_opts;
+ 	int ret;
  
--	err = __sk_attach_prog(prog, sk);
-+	err = __sk_attach_prog(prog, sk, locked);
- 	if (err < 0) {
- 		__bpf_prog_release(prog);
- 		return err;
-@@ -1261,7 +1261,12 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
+ 	if (gadget_is_otg(c->cdev->gadget)) {
+@@ -258,11 +251,6 @@ static int cdc_do_config(struct usb_configuration *c)
+ 		goto err_fsg;
+ 	}
  
- 	return 0;
- }
--EXPORT_SYMBOL_GPL(sk_attach_filter);
-+EXPORT_SYMBOL_GPL(__sk_attach_filter);
-+
-+int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
-+{
-+	return __sk_attach_filter(fprog, sk, sock_owned_by_user(sk));
-+}
+-	fsg_opts = fsg_opts_from_func_inst(fi_msg);
+-	ret = fsg_common_run_thread(fsg_opts->common);
+-	if (ret)
+-		goto err_run;
+-
+ 	ret = usb_add_function(c, f_msg_multi);
+ 	if (ret)
+ 		goto err_run;
+diff --git a/drivers/usb/gadget/legacy/nokia.c b/drivers/usb/gadget/legacy/nokia.c
+index 09975046c694..b1e535f4022e 100644
+--- a/drivers/usb/gadget/legacy/nokia.c
++++ b/drivers/usb/gadget/legacy/nokia.c
+@@ -152,7 +152,6 @@ static int nokia_bind_config(struct usb_configuration *c)
+ 	struct usb_function *f_ecm;
+ 	struct usb_function *f_obex2 = NULL;
+ 	struct usb_function *f_msg;
+-	struct fsg_opts *fsg_opts;
+ 	int status = 0;
+ 	int obex1_stat = -1;
+ 	int obex2_stat = -1;
+@@ -222,12 +221,6 @@ static int nokia_bind_config(struct usb_configuration *c)
+ 		goto err_ecm;
+ 	}
  
- int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk)
- {
-@@ -1307,7 +1312,7 @@ int sk_attach_bpf(u32 ufd, struct sock *sk)
- 	if (IS_ERR(prog))
- 		return PTR_ERR(prog);
- 
--	err = __sk_attach_prog(prog, sk);
-+	err = __sk_attach_prog(prog, sk, sock_owned_by_user(sk));
- 	if (err < 0) {
- 		bpf_prog_put(prog);
- 		return err;
-@@ -2105,7 +2110,7 @@ static int __init register_sk_filter_ops(void)
- }
- late_initcall(register_sk_filter_ops);
+-	fsg_opts = fsg_opts_from_func_inst(fi_msg);
+-
+-	status = fsg_common_run_thread(fsg_opts->common);
+-	if (status)
+-		goto err_msg;
+-
+ 	status = usb_add_function(c, f_msg);
+ 	if (status)
+ 		goto err_msg;
+diff --git a/drivers/usb/gadget/udc/udc-core.c b/drivers/usb/gadget/udc/udc-core.c
+index b86a6f03592e..e272b3ba1d14 100644
+--- a/drivers/usb/gadget/udc/udc-core.c
++++ b/drivers/usb/gadget/udc/udc-core.c
+@@ -75,7 +75,7 @@ int usb_gadget_map_request(struct usb_gadget *gadget,
+ 		mapped = dma_map_sg(dev, req->sg, req->num_sgs,
+ 				is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ 		if (mapped == 0) {
+-			dev_err(&gadget->dev, "failed to map SGs\n");
++			dev_err(dev, "failed to map SGs\n");
+ 			return -EFAULT;
+ 		}
  
--int sk_detach_filter(struct sock *sk)
-+int __sk_detach_filter(struct sock *sk, bool locked)
+diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c
+index 92fdb6e9faff..c78ff95a43be 100644
+--- a/drivers/usb/misc/usbtest.c
++++ b/drivers/usb/misc/usbtest.c
+@@ -529,6 +529,7 @@ static struct scatterlist *
+ alloc_sglist(int nents, int max, int vary, struct usbtest_dev *dev, int pipe)
  {
- 	int ret = -ENOENT;
- 	struct sk_filter *filter;
-@@ -2113,8 +2118,7 @@ int sk_detach_filter(struct sock *sk)
- 	if (sock_flag(sk, SOCK_FILTER_LOCKED))
- 		return -EPERM;
- 
--	filter = rcu_dereference_protected(sk->sk_filter,
--					   sock_owned_by_user(sk));
-+	filter = rcu_dereference_protected(sk->sk_filter, locked);
- 	if (filter) {
- 		RCU_INIT_POINTER(sk->sk_filter, NULL);
- 		sk_filter_uncharge(sk, filter);
-@@ -2123,7 +2127,12 @@ int sk_detach_filter(struct sock *sk)
+ 	struct scatterlist	*sg;
++	unsigned int		n_size = 0;
+ 	unsigned		i;
+ 	unsigned		size = max;
+ 	unsigned		maxpacket =
+@@ -561,7 +562,8 @@ alloc_sglist(int nents, int max, int vary, struct usbtest_dev *dev, int pipe)
+ 			break;
+ 		case 1:
+ 			for (j = 0; j < size; j++)
+-				*buf++ = (u8) ((j % maxpacket) % 63);
++				*buf++ = (u8) (((j + n_size) % maxpacket) % 63);
++			n_size += size;
+ 			break;
+ 		}
  
- 	return ret;
- }
--EXPORT_SYMBOL_GPL(sk_detach_filter);
-+EXPORT_SYMBOL_GPL(__sk_detach_filter);
-+
-+int sk_detach_filter(struct sock *sk)
-+{
-+	return __sk_detach_filter(sk, sock_owned_by_user(sk));
-+}
+diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c
+index f49327d20ee8..0a935b1e5475 100644
+--- a/drivers/usb/serial/io_edgeport.c
++++ b/drivers/usb/serial/io_edgeport.c
+@@ -2849,14 +2849,16 @@ static int edge_startup(struct usb_serial *serial)
+ 				/* not set up yet, so do it now */
+ 				edge_serial->interrupt_read_urb =
+ 						usb_alloc_urb(0, GFP_KERNEL);
+-				if (!edge_serial->interrupt_read_urb)
+-					return -ENOMEM;
++				if (!edge_serial->interrupt_read_urb) {
++					response = -ENOMEM;
++					break;
++				}
  
- int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf,
- 		  unsigned int len)
-diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
-index 8261d95dd846..482c3717a45e 100644
---- a/net/core/rtnetlink.c
-+++ b/net/core/rtnetlink.c
-@@ -905,6 +905,7 @@ static noinline size_t if_nlmsg_size(const struct net_device *dev,
- 	       + rtnl_link_get_af_size(dev, ext_filter_mask) /* IFLA_AF_SPEC */
- 	       + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_PORT_ID */
- 	       + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_SWITCH_ID */
-+	       + nla_total_size(IFNAMSIZ) /* IFLA_PHYS_PORT_NAME */
- 	       + nla_total_size(1); /* IFLA_PROTO_DOWN */
+ 				edge_serial->interrupt_in_buffer =
+ 					kmalloc(buffer_size, GFP_KERNEL);
+ 				if (!edge_serial->interrupt_in_buffer) {
+-					usb_free_urb(edge_serial->interrupt_read_urb);
+-					return -ENOMEM;
++					response = -ENOMEM;
++					break;
+ 				}
+ 				edge_serial->interrupt_in_endpoint =
+ 						endpoint->bEndpointAddress;
+@@ -2884,14 +2886,16 @@ static int edge_startup(struct usb_serial *serial)
+ 				/* not set up yet, so do it now */
+ 				edge_serial->read_urb =
+ 						usb_alloc_urb(0, GFP_KERNEL);
+-				if (!edge_serial->read_urb)
+-					return -ENOMEM;
++				if (!edge_serial->read_urb) {
++					response = -ENOMEM;
++					break;
++				}
  
- }
-@@ -1175,14 +1176,16 @@ static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb,
+ 				edge_serial->bulk_in_buffer =
+ 					kmalloc(buffer_size, GFP_KERNEL);
+ 				if (!edge_serial->bulk_in_buffer) {
+-					usb_free_urb(edge_serial->read_urb);
+-					return -ENOMEM;
++					response = -ENOMEM;
++					break;
+ 				}
+ 				edge_serial->bulk_in_endpoint =
+ 						endpoint->bEndpointAddress;
+@@ -2917,9 +2921,22 @@ static int edge_startup(struct usb_serial *serial)
+ 			}
+ 		}
  
- static int rtnl_fill_link_ifmap(struct sk_buff *skb, struct net_device *dev)
- {
--	struct rtnl_link_ifmap map = {
--		.mem_start   = dev->mem_start,
--		.mem_end     = dev->mem_end,
--		.base_addr   = dev->base_addr,
--		.irq         = dev->irq,
--		.dma         = dev->dma,
--		.port        = dev->if_port,
--	};
-+	struct rtnl_link_ifmap map;
-+
-+	memset(&map, 0, sizeof(map));
-+	map.mem_start   = dev->mem_start;
-+	map.mem_end     = dev->mem_end;
-+	map.base_addr   = dev->base_addr;
-+	map.irq         = dev->irq;
-+	map.dma         = dev->dma;
-+	map.port        = dev->if_port;
+-		if (!interrupt_in_found || !bulk_in_found || !bulk_out_found) {
+-			dev_err(ddev, "Error - the proper endpoints were not found!\n");
+-			return -ENODEV;
++		if (response || !interrupt_in_found || !bulk_in_found ||
++							!bulk_out_found) {
++			if (!response) {
++				dev_err(ddev, "expected endpoints not found\n");
++				response = -ENODEV;
++			}
 +
- 	if (nla_put(skb, IFLA_MAP, sizeof(map), &map))
- 		return -EMSGSIZE;
- 
-diff --git a/net/core/skbuff.c b/net/core/skbuff.c
-index 8616d1147c93..9835d9a8a7a4 100644
---- a/net/core/skbuff.c
-+++ b/net/core/skbuff.c
-@@ -4427,15 +4427,16 @@ int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci)
- 		__skb_push(skb, offset);
- 		err = __vlan_insert_tag(skb, skb->vlan_proto,
- 					skb_vlan_tag_get(skb));
--		if (err)
-+		if (err) {
-+			__skb_pull(skb, offset);
- 			return err;
-+		}
++			usb_free_urb(edge_serial->interrupt_read_urb);
++			kfree(edge_serial->interrupt_in_buffer);
 +
- 		skb->protocol = skb->vlan_proto;
- 		skb->mac_len += VLAN_HLEN;
--		__skb_pull(skb, offset);
- 
--		if (skb->ip_summed == CHECKSUM_COMPLETE)
--			skb->csum = csum_add(skb->csum, csum_partial(skb->data
--					+ (2 * ETH_ALEN), VLAN_HLEN, 0));
-+		skb_postpush_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN);
-+		__skb_pull(skb, offset);
- 	}
- 	__vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci);
- 	return 0;
-diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
-index 902d606324a0..8be8f27bfacc 100644
---- a/net/dccp/ipv4.c
-+++ b/net/dccp/ipv4.c
-@@ -204,8 +204,6 @@ void dccp_req_err(struct sock *sk, u64 seq)
- 	 * ICMPs are not backlogged, hence we cannot get an established
- 	 * socket here.
- 	 */
--	WARN_ON(req->sk);
--
- 	if (!between48(seq, dccp_rsk(req)->dreq_iss, dccp_rsk(req)->dreq_gss)) {
- 		NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
- 	} else {
-diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
-index 607a14f20d88..b1dc096d22f8 100644
---- a/net/decnet/dn_route.c
-+++ b/net/decnet/dn_route.c
-@@ -1034,10 +1034,13 @@ source_ok:
- 	if (!fld.daddr) {
- 		fld.daddr = fld.saddr;
- 
--		err = -EADDRNOTAVAIL;
- 		if (dev_out)
- 			dev_put(dev_out);
-+		err = -EINVAL;
- 		dev_out = init_net.loopback_dev;
-+		if (!dev_out->dn_ptr)
-+			goto out;
-+		err = -EADDRNOTAVAIL;
- 		dev_hold(dev_out);
- 		if (!fld.daddr) {
- 			fld.daddr =
-@@ -1110,6 +1113,8 @@ source_ok:
- 		if (dev_out == NULL)
- 			goto out;
- 		dn_db = rcu_dereference_raw(dev_out->dn_ptr);
-+		if (!dn_db)
-+			goto e_inval;
- 		/* Possible improvement - check all devices for local addr */
- 		if (dn_dev_islocal(dev_out, fld.daddr)) {
- 			dev_put(dev_out);
-@@ -1151,6 +1156,8 @@ select_source:
- 			dev_put(dev_out);
- 		dev_out = init_net.loopback_dev;
- 		dev_hold(dev_out);
-+		if (!dev_out->dn_ptr)
-+			goto e_inval;
- 		fld.flowidn_oif = dev_out->ifindex;
- 		if (res.fi)
- 			dn_fib_info_put(res.fi);
-diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
-index fa4daba8db55..d8fb47fcad05 100644
---- a/net/dsa/dsa.c
-+++ b/net/dsa/dsa.c
-@@ -935,6 +935,14 @@ static void dsa_remove_dst(struct dsa_switch_tree *dst)
- {
- 	int i;
- 
-+	dst->master_netdev->dsa_ptr = NULL;
++			usb_free_urb(edge_serial->read_urb);
++			kfree(edge_serial->bulk_in_buffer);
 +
-+	/* If we used a tagging format that doesn't have an ethertype
-+	 * field, make sure that all packets from this point get sent
-+	 * without the tag and go through the regular receive path.
-+	 */
-+	wmb();
++			kfree(edge_serial);
 +
- 	for (i = 0; i < dst->pd->nr_chips; i++) {
- 		struct dsa_switch *ds = dst->ds[i];
++			return response;
+ 		}
  
-@@ -988,14 +996,6 @@ static int dsa_suspend(struct device *d)
- 	struct dsa_switch_tree *dst = platform_get_drvdata(pdev);
- 	int i, ret = 0;
+ 		/* start interrupt read for this edgeport this interrupt will
+@@ -2942,16 +2959,9 @@ static void edge_disconnect(struct usb_serial *serial)
+ {
+ 	struct edgeport_serial *edge_serial = usb_get_serial_data(serial);
  
--	dst->master_netdev->dsa_ptr = NULL;
--
--	/* If we used a tagging format that doesn't have an ethertype
--	 * field, make sure that all packets from this point get sent
--	 * without the tag and go through the regular receive path.
--	 */
--	wmb();
+-	/* stop reads and writes on all ports */
+-	/* free up our endpoint stuff */
+ 	if (edge_serial->is_epic) {
+ 		usb_kill_urb(edge_serial->interrupt_read_urb);
+-		usb_free_urb(edge_serial->interrupt_read_urb);
+-		kfree(edge_serial->interrupt_in_buffer);
 -
- 	for (i = 0; i < dst->pd->nr_chips; i++) {
- 		struct dsa_switch *ds = dst->ds[i];
- 
-diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
-index f6303b17546b..0212591b0077 100644
---- a/net/ipv4/devinet.c
-+++ b/net/ipv4/devinet.c
-@@ -334,6 +334,9 @@ static void __inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap,
- 
- 	ASSERT_RTNL();
- 
-+	if (in_dev->dead)
-+		goto no_promotions;
-+
- 	/* 1. Deleting primary ifaddr forces deletion all secondaries
- 	 * unless alias promotion is set
- 	 **/
-@@ -380,6 +383,7 @@ static void __inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap,
- 			fib_del_ifaddr(ifa, ifa1);
- 	}
- 
-+no_promotions:
- 	/* 2. Unlink it */
- 
- 	*ifap = ifa1->ifa_next;
-diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
-index 473447593060..63566ec54794 100644
---- a/net/ipv4/fib_frontend.c
-+++ b/net/ipv4/fib_frontend.c
-@@ -280,7 +280,6 @@ __be32 fib_compute_spec_dst(struct sk_buff *skb)
- 	struct in_device *in_dev;
- 	struct fib_result res;
- 	struct rtable *rt;
--	struct flowi4 fl4;
- 	struct net *net;
- 	int scope;
- 
-@@ -296,14 +295,13 @@ __be32 fib_compute_spec_dst(struct sk_buff *skb)
- 
- 	scope = RT_SCOPE_UNIVERSE;
- 	if (!ipv4_is_zeronet(ip_hdr(skb)->saddr)) {
--		fl4.flowi4_oif = 0;
--		fl4.flowi4_iif = LOOPBACK_IFINDEX;
--		fl4.daddr = ip_hdr(skb)->saddr;
--		fl4.saddr = 0;
--		fl4.flowi4_tos = RT_TOS(ip_hdr(skb)->tos);
--		fl4.flowi4_scope = scope;
--		fl4.flowi4_mark = IN_DEV_SRC_VMARK(in_dev) ? skb->mark : 0;
--		fl4.flowi4_tun_key.tun_id = 0;
-+		struct flowi4 fl4 = {
-+			.flowi4_iif = LOOPBACK_IFINDEX,
-+			.daddr = ip_hdr(skb)->saddr,
-+			.flowi4_tos = RT_TOS(ip_hdr(skb)->tos),
-+			.flowi4_scope = scope,
-+			.flowi4_mark = IN_DEV_SRC_VMARK(in_dev) ? skb->mark : 0,
-+		};
- 		if (!fib_lookup(net, &fl4, &res, 0))
- 			return FIB_RES_PREFSRC(net, res);
- 	} else {
-@@ -906,7 +904,11 @@ void fib_del_ifaddr(struct in_ifaddr *ifa, struct in_ifaddr *iprim)
- 	if (ifa->ifa_flags & IFA_F_SECONDARY) {
- 		prim = inet_ifa_byprefix(in_dev, any, ifa->ifa_mask);
- 		if (!prim) {
--			pr_warn("%s: bug: prim == NULL\n", __func__);
-+			/* if the device has been deleted, we don't perform
-+			 * address promotion
-+			 */
-+			if (!in_dev->dead)
-+				pr_warn("%s: bug: prim == NULL\n", __func__);
- 			return;
- 		}
- 		if (iprim && iprim != prim) {
-@@ -922,6 +924,9 @@ void fib_del_ifaddr(struct in_ifaddr *ifa, struct in_ifaddr *iprim)
- 		subnet = 1;
- 	}
- 
-+	if (in_dev->dead)
-+		goto no_promotions;
-+
- 	/* Deletion is more complicated than add.
- 	 * We should take care of not to delete too much :-)
- 	 *
-@@ -997,6 +1002,7 @@ void fib_del_ifaddr(struct in_ifaddr *ifa, struct in_ifaddr *iprim)
- 		}
+ 		usb_kill_urb(edge_serial->read_urb);
+-		usb_free_urb(edge_serial->read_urb);
+-		kfree(edge_serial->bulk_in_buffer);
  	}
- 
-+no_promotions:
- 	if (!(ok & BRD_OK))
- 		fib_magic(RTM_DELROUTE, RTN_BROADCAST, ifa->ifa_broadcast, 32, prim);
- 	if (subnet && ifa->ifa_prefixlen < 31) {
-diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
-index d97268e8ff10..2b68418c7198 100644
---- a/net/ipv4/fib_semantics.c
-+++ b/net/ipv4/fib_semantics.c
-@@ -975,6 +975,8 @@ fib_convert_metrics(struct fib_info *fi, const struct fib_config *cfg)
- 			val = 65535 - 40;
- 		if (type == RTAX_MTU && val > 65535 - 15)
- 			val = 65535 - 15;
-+		if (type == RTAX_HOPLIMIT && val > 255)
-+			val = 255;
- 		if (type == RTAX_FEATURES && (val & ~RTAX_FEATURE_MASK))
- 			return -EINVAL;
- 		fi->fib_metrics[type - 1] = val;
-diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
-index 41ba68de46d8..d0c1e7766214 100644
---- a/net/ipv4/ip_gre.c
-+++ b/net/ipv4/ip_gre.c
-@@ -179,6 +179,7 @@ static __be16 tnl_flags_to_gre_flags(__be16 tflags)
- 	return flags;
  }
  
-+/* Fills in tpi and returns header length to be pulled. */
- static int parse_gre_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
- 			    bool *csum_err)
+@@ -2964,6 +2974,16 @@ static void edge_release(struct usb_serial *serial)
  {
-@@ -238,7 +239,7 @@ static int parse_gre_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
- 				return -EINVAL;
- 		}
- 	}
--	return iptunnel_pull_header(skb, hdr_len, tpi->proto);
-+	return hdr_len;
+ 	struct edgeport_serial *edge_serial = usb_get_serial_data(serial);
+ 
++	if (edge_serial->is_epic) {
++		usb_kill_urb(edge_serial->interrupt_read_urb);
++		usb_free_urb(edge_serial->interrupt_read_urb);
++		kfree(edge_serial->interrupt_in_buffer);
++
++		usb_kill_urb(edge_serial->read_urb);
++		usb_free_urb(edge_serial->read_urb);
++		kfree(edge_serial->bulk_in_buffer);
++	}
++
+ 	kfree(edge_serial);
  }
  
- static void ipgre_err(struct sk_buff *skb, u32 info,
-@@ -341,7 +342,7 @@ static void gre_err(struct sk_buff *skb, u32 info)
- 	struct tnl_ptk_info tpi;
- 	bool csum_err = false;
- 
--	if (parse_gre_header(skb, &tpi, &csum_err)) {
-+	if (parse_gre_header(skb, &tpi, &csum_err) < 0) {
- 		if (!csum_err)		/* ignore csum errors. */
- 			return;
- 	}
-@@ -419,6 +420,7 @@ static int gre_rcv(struct sk_buff *skb)
- {
- 	struct tnl_ptk_info tpi;
- 	bool csum_err = false;
-+	int hdr_len;
+diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c
+index e07b15ed5814..7faa901ee47f 100644
+--- a/drivers/usb/serial/keyspan.c
++++ b/drivers/usb/serial/keyspan.c
+@@ -2376,6 +2376,10 @@ static void keyspan_release(struct usb_serial *serial)
  
- #ifdef CONFIG_NET_IPGRE_BROADCAST
- 	if (ipv4_is_multicast(ip_hdr(skb)->daddr)) {
-@@ -428,7 +430,10 @@ static int gre_rcv(struct sk_buff *skb)
- 	}
- #endif
+ 	s_priv = usb_get_serial_data(serial);
  
--	if (parse_gre_header(skb, &tpi, &csum_err) < 0)
-+	hdr_len = parse_gre_header(skb, &tpi, &csum_err);
-+	if (hdr_len < 0)
-+		goto drop;
-+	if (iptunnel_pull_header(skb, hdr_len, tpi.proto) < 0)
- 		goto drop;
- 
- 	if (ipgre_rcv(skb, &tpi) == PACKET_RCVD)
-diff --git a/net/ipv4/netfilter/nf_nat_masquerade_ipv4.c b/net/ipv4/netfilter/nf_nat_masquerade_ipv4.c
-index c6eb42100e9a..ea91058b5f6f 100644
---- a/net/ipv4/netfilter/nf_nat_masquerade_ipv4.c
-+++ b/net/ipv4/netfilter/nf_nat_masquerade_ipv4.c
-@@ -108,10 +108,18 @@ static int masq_inet_event(struct notifier_block *this,
- 			   unsigned long event,
- 			   void *ptr)
- {
--	struct net_device *dev = ((struct in_ifaddr *)ptr)->ifa_dev->dev;
-+	struct in_device *idev = ((struct in_ifaddr *)ptr)->ifa_dev;
- 	struct netdev_notifier_info info;
- 
--	netdev_notifier_info_init(&info, dev);
-+	/* The masq_dev_notifier will catch the case of the device going
-+	 * down.  So if the inetdev is dead and being destroyed we have
-+	 * no work to do.  Otherwise this is an individual address removal
-+	 * and we have to perform the flush.
-+	 */
-+	if (idev->dead)
-+		return NOTIFY_DONE;
++	/* Make sure to unlink the URBs submitted in attach. */
++	usb_kill_urb(s_priv->instat_urb);
++	usb_kill_urb(s_priv->indat_urb);
 +
-+	netdev_notifier_info_init(&info, idev->dev);
- 	return masq_device_event(this, event, &info);
+ 	usb_free_urb(s_priv->instat_urb);
+ 	usb_free_urb(s_priv->indat_urb);
+ 	usb_free_urb(s_priv->glocont_urb);
+diff --git a/drivers/usb/serial/mxuport.c b/drivers/usb/serial/mxuport.c
+index 31a8b47f1ac6..c6596cbcc4b6 100644
+--- a/drivers/usb/serial/mxuport.c
++++ b/drivers/usb/serial/mxuport.c
+@@ -1259,6 +1259,15 @@ static int mxuport_attach(struct usb_serial *serial)
+ 	return 0;
  }
  
-diff --git a/net/ipv4/route.c b/net/ipv4/route.c
-index 02c62299d717..b050cf980a57 100644
---- a/net/ipv4/route.c
-+++ b/net/ipv4/route.c
-@@ -2045,6 +2045,18 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
- 		 */
- 		if (fi && res->prefixlen < 4)
- 			fi = NULL;
-+	} else if ((type == RTN_LOCAL) && (orig_oif != 0) &&
-+		   (orig_oif != dev_out->ifindex)) {
-+		/* For local routes that require a particular output interface
-+		 * we do not want to cache the result.  Caching the result
-+		 * causes incorrect behaviour when there are multiple source
-+		 * addresses on the interface, the end result being that if the
-+		 * intended recipient is waiting on that interface for the
-+		 * packet he won't receive it because it will be delivered on
-+		 * the loopback interface and the IP_PKTINFO ipi_ifindex will
-+		 * be set to the loopback interface as well.
-+		 */
-+		fi = NULL;
- 	}
- 
- 	fnhe = NULL;
-diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
-index 487ac67059e2..a7b1a905580b 100644
---- a/net/ipv4/tcp_ipv4.c
-+++ b/net/ipv4/tcp_ipv4.c
-@@ -319,8 +319,6 @@ void tcp_req_err(struct sock *sk, u32 seq, bool abort)
- 	/* ICMPs are not backlogged, hence we cannot get
- 	 * an established socket here.
- 	 */
--	WARN_ON(req->sk);
--
- 	if (seq != tcp_rsk(req)->snt_isn) {
- 		NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
- 	} else if (abort) {
-diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
-index fda379cd600d..b6876f2f4cf2 100644
---- a/net/ipv4/tcp_output.c
-+++ b/net/ipv4/tcp_output.c
-@@ -2625,8 +2625,10 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
- 	 */
- 	if (unlikely((NET_IP_ALIGN && ((unsigned long)skb->data & 3)) ||
- 		     skb_headroom(skb) >= 0xFFFF)) {
--		struct sk_buff *nskb = __pskb_copy(skb, MAX_TCP_HEADER,
--						   GFP_ATOMIC);
-+		struct sk_buff *nskb;
++static void mxuport_release(struct usb_serial *serial)
++{
++	struct usb_serial_port *port0 = serial->port[0];
++	struct usb_serial_port *port1 = serial->port[1];
 +
-+		skb_mstamp_get(&skb->skb_mstamp);
-+		nskb = __pskb_copy(skb, MAX_TCP_HEADER, GFP_ATOMIC);
- 		err = nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) :
- 			     -ENOBUFS;
- 	} else {
-diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
-index 95d2f198017e..56218ff388c7 100644
---- a/net/ipv4/udp.c
-+++ b/net/ipv4/udp.c
-@@ -339,8 +339,13 @@ found:
- 
- 		hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash);
- 		spin_lock(&hslot2->lock);
--		hlist_nulls_add_head_rcu(&udp_sk(sk)->udp_portaddr_node,
--					 &hslot2->head);
-+		if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport &&
-+			sk->sk_family == AF_INET6)
-+			hlist_nulls_add_tail_rcu(&udp_sk(sk)->udp_portaddr_node,
-+						 &hslot2->head);
-+		else
-+			hlist_nulls_add_head_rcu(&udp_sk(sk)->udp_portaddr_node,
-+						 &hslot2->head);
- 		hslot2->count++;
- 		spin_unlock(&hslot2->lock);
- 	}
-@@ -2082,10 +2087,14 @@ void udp_v4_early_demux(struct sk_buff *skb)
- 		if (!in_dev)
- 			return;
- 
--		ours = ip_check_mc_rcu(in_dev, iph->daddr, iph->saddr,
--				       iph->protocol);
--		if (!ours)
--			return;
-+		/* we are supposed to accept bcast packets */
-+		if (skb->pkt_type == PACKET_MULTICAST) {
-+			ours = ip_check_mc_rcu(in_dev, iph->daddr, iph->saddr,
-+					       iph->protocol);
-+			if (!ours)
-+				return;
-+		}
++	usb_serial_generic_close(port1);
++	usb_serial_generic_close(port0);
++}
 +
- 		sk = __udp4_lib_mcast_demux_lookup(net, uh->dest, iph->daddr,
- 						   uh->source, iph->saddr, dif);
- 	} else if (skb->pkt_type == PACKET_HOST) {
-diff --git a/net/ipv6/ila/ila_lwt.c b/net/ipv6/ila/ila_lwt.c
-index 2ae3c4fd8aab..41f18de5dcc2 100644
---- a/net/ipv6/ila/ila_lwt.c
-+++ b/net/ipv6/ila/ila_lwt.c
-@@ -120,8 +120,7 @@ nla_put_failure:
- 
- static int ila_encap_nlsize(struct lwtunnel_state *lwtstate)
+ static int mxuport_open(struct tty_struct *tty, struct usb_serial_port *port)
  {
--	/* No encapsulation overhead */
--	return 0;
-+	return nla_total_size(sizeof(u64)); /* ILA_ATTR_LOCATOR */
- }
+ 	struct mxuport_port *mxport = usb_get_serial_port_data(port);
+@@ -1361,6 +1370,7 @@ static struct usb_serial_driver mxuport_device = {
+ 	.probe			= mxuport_probe,
+ 	.port_probe		= mxuport_port_probe,
+ 	.attach			= mxuport_attach,
++	.release		= mxuport_release,
+ 	.calc_num_ports		= mxuport_calc_num_ports,
+ 	.open			= mxuport_open,
+ 	.close			= mxuport_close,
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index c6f497f16526..d96d423d00e6 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -375,18 +375,22 @@ static void option_instat_callback(struct urb *urb);
+ #define HAIER_PRODUCT_CE81B			0x10f8
+ #define HAIER_PRODUCT_CE100			0x2009
+ 
+-/* Cinterion (formerly Siemens) products */
+-#define SIEMENS_VENDOR_ID				0x0681
+-#define CINTERION_VENDOR_ID				0x1e2d
++/* Gemalto's Cinterion products (formerly Siemens) */
++#define SIEMENS_VENDOR_ID			0x0681
++#define CINTERION_VENDOR_ID			0x1e2d
++#define CINTERION_PRODUCT_HC25_MDMNET		0x0040
+ #define CINTERION_PRODUCT_HC25_MDM		0x0047
+-#define CINTERION_PRODUCT_HC25_MDMNET	0x0040
++#define CINTERION_PRODUCT_HC28_MDMNET		0x004A /* same for HC28J */
+ #define CINTERION_PRODUCT_HC28_MDM		0x004C
+-#define CINTERION_PRODUCT_HC28_MDMNET	0x004A /* same for HC28J */
+ #define CINTERION_PRODUCT_EU3_E			0x0051
+ #define CINTERION_PRODUCT_EU3_P			0x0052
+ #define CINTERION_PRODUCT_PH8			0x0053
+ #define CINTERION_PRODUCT_AHXX			0x0055
+ #define CINTERION_PRODUCT_PLXX			0x0060
++#define CINTERION_PRODUCT_PH8_2RMNET		0x0082
++#define CINTERION_PRODUCT_PH8_AUDIO		0x0083
++#define CINTERION_PRODUCT_AHXX_2RMNET		0x0084
++#define CINTERION_PRODUCT_AHXX_AUDIO		0x0085
  
- static int ila_encap_cmp(struct lwtunnel_state *a, struct lwtunnel_state *b)
-diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
-index a163102f1803..2a6606c935cc 100644
---- a/net/ipv6/ip6_output.c
-+++ b/net/ipv6/ip6_output.c
-@@ -1091,8 +1091,8 @@ static inline int ip6_ufo_append_data(struct sock *sk,
- 			int getfrag(void *from, char *to, int offset, int len,
- 			int odd, struct sk_buff *skb),
- 			void *from, int length, int hh_len, int fragheaderlen,
--			int transhdrlen, int mtu, unsigned int flags,
--			const struct flowi6 *fl6)
-+			int exthdrlen, int transhdrlen, int mtu,
-+			unsigned int flags, const struct flowi6 *fl6)
+ /* Olivetti products */
+ #define OLIVETTI_VENDOR_ID			0x0b3c
+@@ -633,6 +637,10 @@ static const struct option_blacklist_info telit_le922_blacklist_usbcfg3 = {
+ 	.reserved = BIT(1) | BIT(2) | BIT(3),
+ };
  
- {
- 	struct sk_buff *skb;
-@@ -1117,7 +1117,7 @@ static inline int ip6_ufo_append_data(struct sock *sk,
- 		skb_put(skb, fragheaderlen + transhdrlen);
- 
- 		/* initialize network header pointer */
--		skb_reset_network_header(skb);
-+		skb_set_network_header(skb, exthdrlen);
- 
- 		/* initialize protocol header pointer */
- 		skb->transport_header = skb->network_header + fragheaderlen;
-@@ -1359,7 +1359,7 @@ emsgsize:
- 	    (rt->dst.dev->features & NETIF_F_UFO) &&
- 	    (sk->sk_type == SOCK_DGRAM) && !udp_get_no_check6_tx(sk)) {
- 		err = ip6_ufo_append_data(sk, queue, getfrag, from, length,
--					  hh_len, fragheaderlen,
-+					  hh_len, fragheaderlen, exthdrlen,
- 					  transhdrlen, mtu, flags, fl6);
- 		if (err)
- 			goto error;
-diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
-index 6c5dfec7a377..3991b21e24ad 100644
---- a/net/ipv6/ip6_tunnel.c
-+++ b/net/ipv6/ip6_tunnel.c
-@@ -343,12 +343,12 @@ static int ip6_tnl_create2(struct net_device *dev)
- 
- 	t = netdev_priv(dev);
- 
-+	dev->rtnl_link_ops = &ip6_link_ops;
- 	err = register_netdevice(dev);
- 	if (err < 0)
- 		goto out;
++static const struct option_blacklist_info cinterion_rmnet2_blacklist = {
++	.reserved = BIT(4) | BIT(5),
++};
++
+ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
+ 	{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) },
+@@ -1602,7 +1610,79 @@ static const struct usb_device_id option_ids[] = {
+ 		.driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0178, 0xff, 0xff, 0xff),
+ 		.driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+-	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffe9, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff42, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff43, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff44, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff45, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff46, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff47, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff48, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff49, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff4a, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff4b, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff4c, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff4d, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff4e, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff4f, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff50, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff51, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff52, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff53, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff54, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff55, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff56, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff57, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff58, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff59, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff5a, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff5b, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff5c, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff5d, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff5e, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff5f, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff60, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff61, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff62, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff63, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff64, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff65, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff66, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff67, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff68, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff69, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff6a, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff6b, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff6c, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff6d, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff6e, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff6f, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff70, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff71, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff72, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff73, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff74, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff75, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff76, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff77, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff78, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff79, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff7a, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff7b, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff7c, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff7d, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff7e, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff7f, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff80, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff81, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff82, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff83, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff84, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff85, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff86, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff87, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff88, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff89, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff8a, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff8b, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff8c, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff8d, 0xff, 0xff, 0xff) },
+@@ -1613,6 +1693,61 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff92, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff93, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff94, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff9f, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa0, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa1, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa2, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa3, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa4, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa5, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa6, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa7, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa8, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa9, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffaa, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffab, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffac, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffae, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffaf, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb0, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb1, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb2, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb3, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb4, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb5, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb6, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb7, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb8, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb9, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffba, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffbb, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffbc, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffbd, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffbe, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffbf, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc0, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc1, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc2, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc3, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc4, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc5, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc6, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc7, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc8, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc9, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffca, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffcb, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffcc, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffcd, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffce, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffcf, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffd0, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffd1, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffd2, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffd3, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffd4, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffd5, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffe9, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffec, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffee, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xfff6, 0xff, 0xff, 0xff) },
+@@ -1712,7 +1847,13 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX, 0xff) },
+ 	{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PLXX),
+ 		.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+-	{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) }, 
++	{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8_2RMNET, 0xff),
++		.driver_info = (kernel_ulong_t)&cinterion_rmnet2_blacklist },
++	{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8_AUDIO, 0xff),
++		.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
++	{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX_2RMNET, 0xff) },
++	{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX_AUDIO, 0xff) },
++	{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) },
+ 	{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) },
+ 	{ USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDM) },
+ 	{ USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDMNET) },
+diff --git a/drivers/usb/serial/quatech2.c b/drivers/usb/serial/quatech2.c
+index 504f5bff79c0..b18974cbd995 100644
+--- a/drivers/usb/serial/quatech2.c
++++ b/drivers/usb/serial/quatech2.c
+@@ -141,6 +141,7 @@ static void qt2_release(struct usb_serial *serial)
  
- 	strcpy(t->parms.name, dev->name);
--	dev->rtnl_link_ops = &ip6_link_ops;
- 
- 	dev_hold(dev);
- 	ip6_tnl_link(ip6n, t);
-diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
-index 18f3498a6c80..e2ea31175ef9 100644
---- a/net/ipv6/reassembly.c
-+++ b/net/ipv6/reassembly.c
-@@ -496,10 +496,8 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
- 	IP6CB(head)->flags |= IP6SKB_FRAGMENTED;
- 
- 	/* Yes, and fold redundant checksum back. 8) */
--	if (head->ip_summed == CHECKSUM_COMPLETE)
--		head->csum = csum_partial(skb_network_header(head),
--					  skb_network_header_len(head),
--					  head->csum);
-+	skb_postpush_rcsum(head, skb_network_header(head),
-+			   skb_network_header_len(head));
+ 	serial_priv = usb_get_serial_data(serial);
  
- 	rcu_read_lock();
- 	IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMOKS);
-diff --git a/net/ipv6/route.c b/net/ipv6/route.c
-index ed446639219c..18e29e2f8877 100644
---- a/net/ipv6/route.c
-+++ b/net/ipv6/route.c
-@@ -1737,6 +1737,8 @@ static int ip6_convert_metrics(struct mx6_config *mxc,
- 		} else {
- 			val = nla_get_u32(nla);
- 		}
-+		if (type == RTAX_HOPLIMIT && val > 255)
-+			val = 255;
- 		if (type == RTAX_FEATURES && (val & ~RTAX_FEATURE_MASK))
- 			goto err;
- 
-diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
-index 422dd014aa2c..6794120f53b8 100644
---- a/net/ipv6/udp.c
-+++ b/net/ipv6/udp.c
-@@ -883,8 +883,8 @@ start_lookup:
- 		flush_stack(stack, count, skb, count - 1);
- 	} else {
- 		if (!inner_flushed)
--			UDP_INC_STATS_BH(net, UDP_MIB_IGNOREDMULTI,
--					 proto == IPPROTO_UDPLITE);
-+			UDP6_INC_STATS_BH(net, UDP_MIB_IGNOREDMULTI,
-+					  proto == IPPROTO_UDPLITE);
- 		consume_skb(skb);
- 	}
- 	return 0;
-diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
-index ec22078b0914..42de4ccd159f 100644
---- a/net/l2tp/l2tp_ip.c
-+++ b/net/l2tp/l2tp_ip.c
-@@ -123,12 +123,11 @@ static int l2tp_ip_recv(struct sk_buff *skb)
- 	struct l2tp_tunnel *tunnel = NULL;
- 	int length;
- 
--	/* Point to L2TP header */
--	optr = ptr = skb->data;
--
- 	if (!pskb_may_pull(skb, 4))
- 		goto discard;
- 
-+	/* Point to L2TP header */
-+	optr = ptr = skb->data;
- 	session_id = ntohl(*((__be32 *) ptr));
- 	ptr += 4;
- 
-@@ -156,6 +155,9 @@ static int l2tp_ip_recv(struct sk_buff *skb)
- 		if (!pskb_may_pull(skb, length))
- 			goto discard;
- 
-+		/* Point to L2TP header */
-+		optr = ptr = skb->data;
-+		ptr += 4;
- 		pr_debug("%s: ip recv\n", tunnel->name);
- 		print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length);
- 	}
-diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
-index a2c8747d2936..9ee4ddb6b397 100644
---- a/net/l2tp/l2tp_ip6.c
-+++ b/net/l2tp/l2tp_ip6.c
-@@ -135,12 +135,11 @@ static int l2tp_ip6_recv(struct sk_buff *skb)
- 	struct l2tp_tunnel *tunnel = NULL;
- 	int length;
- 
--	/* Point to L2TP header */
--	optr = ptr = skb->data;
--
- 	if (!pskb_may_pull(skb, 4))
- 		goto discard;
- 
-+	/* Point to L2TP header */
-+	optr = ptr = skb->data;
- 	session_id = ntohl(*((__be32 *) ptr));
- 	ptr += 4;
- 
-@@ -168,6 +167,9 @@ static int l2tp_ip6_recv(struct sk_buff *skb)
- 		if (!pskb_may_pull(skb, length))
- 			goto discard;
- 
-+		/* Point to L2TP header */
-+		optr = ptr = skb->data;
-+		ptr += 4;
- 		pr_debug("%s: ip recv\n", tunnel->name);
- 		print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length);
- 	}
-diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
-index 8dab4e569571..bb8edb9ef506 100644
---- a/net/llc/af_llc.c
-+++ b/net/llc/af_llc.c
-@@ -626,6 +626,7 @@ static void llc_cmsg_rcv(struct msghdr *msg, struct sk_buff *skb)
- 	if (llc->cmsg_flags & LLC_CMSG_PKTINFO) {
- 		struct llc_pktinfo info;
- 
-+		memset(&info, 0, sizeof(info));
- 		info.lpi_ifindex = llc_sk(skb->sk)->dev->ifindex;
- 		llc_pdu_decode_dsap(skb, &info.lpi_sap);
- 		llc_pdu_decode_da(skb, info.lpi_mac);
-diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
-index 978d3bc31df7..1b33d89906e1 100644
---- a/net/mac80211/ibss.c
-+++ b/net/mac80211/ibss.c
-@@ -7,6 +7,7 @@
-  * Copyright 2007, Michael Wu <flamingice@sourmilk.net>
-  * Copyright 2009, Johannes Berg <johannes@sipsolutions.net>
-  * Copyright 2013-2014  Intel Mobile Communications GmbH
-+ * Copyright(c) 2016 Intel Deutschland GmbH
-  *
-  * This program is free software; you can redistribute it and/or modify
-  * it under the terms of the GNU General Public License version 2 as
-@@ -1485,14 +1486,21 @@ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata)
- 
- 		sdata_info(sdata, "Trigger new scan to find an IBSS to join\n");
- 
--		num = ieee80211_ibss_setup_scan_channels(local->hw.wiphy,
--							 &ifibss->chandef,
--							 channels,
--							 ARRAY_SIZE(channels));
- 		scan_width = cfg80211_chandef_to_scan_width(&ifibss->chandef);
--		ieee80211_request_ibss_scan(sdata, ifibss->ssid,
--					    ifibss->ssid_len, channels, num,
--					    scan_width);
++	usb_kill_urb(serial_priv->read_urb);
+ 	usb_free_urb(serial_priv->read_urb);
+ 	kfree(serial_priv->read_buffer);
+ 	kfree(serial_priv);
+diff --git a/drivers/watchdog/sp5100_tco.c b/drivers/watchdog/sp5100_tco.c
+index 6467b91f2245..028618c5eeba 100644
+--- a/drivers/watchdog/sp5100_tco.c
++++ b/drivers/watchdog/sp5100_tco.c
+@@ -73,6 +73,13 @@ MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started."
+ /*
+  * Some TCO specific functions
+  */
 +
-+		if (ifibss->fixed_channel) {
-+			num = ieee80211_ibss_setup_scan_channels(local->hw.wiphy,
-+								 &ifibss->chandef,
-+								 channels,
-+								 ARRAY_SIZE(channels));
-+			ieee80211_request_ibss_scan(sdata, ifibss->ssid,
-+						    ifibss->ssid_len, channels,
-+						    num, scan_width);
-+		} else {
-+			ieee80211_request_ibss_scan(sdata, ifibss->ssid,
-+						    ifibss->ssid_len, NULL,
-+						    0, scan_width);
-+		}
- 	} else {
- 		int interval = IEEE80211_SCAN_INTERVAL;
- 
-diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
-index c9e325d2e120..bcb0a1b64556 100644
---- a/net/mac80211/iface.c
-+++ b/net/mac80211/iface.c
-@@ -977,7 +977,10 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
- 	if (sdata->vif.txq) {
- 		struct txq_info *txqi = to_txq_info(sdata->vif.txq);
- 
-+		spin_lock_bh(&txqi->queue.lock);
- 		ieee80211_purge_tx_queue(&local->hw, &txqi->queue);
-+		spin_unlock_bh(&txqi->queue.lock);
++static bool tco_has_sp5100_reg_layout(struct pci_dev *dev)
++{
++	return dev->device == PCI_DEVICE_ID_ATI_SBX00_SMBUS &&
++	       dev->revision < 0x40;
++}
 +
- 		atomic_set(&sdata->txqs_len[txqi->txq.ac], 0);
- 	}
- 
-@@ -1747,7 +1750,7 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
- 
- 		ret = dev_alloc_name(ndev, ndev->name);
- 		if (ret < 0) {
--			free_netdev(ndev);
-+			ieee80211_if_free(ndev);
- 			return ret;
- 		}
- 
-@@ -1833,7 +1836,7 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
- 
- 		ret = register_netdevice(ndev);
- 		if (ret) {
--			free_netdev(ndev);
-+			ieee80211_if_free(ndev);
- 			return ret;
- 		}
- 	}
-diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
-index 60d093f40f1d..261df74eaf38 100644
---- a/net/mac80211/rx.c
-+++ b/net/mac80211/rx.c
-@@ -2249,7 +2249,7 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
- 	struct ieee80211_local *local = rx->local;
- 	struct ieee80211_sub_if_data *sdata = rx->sdata;
- 	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
--	u16 q, hdrlen;
-+	u16 ac, q, hdrlen;
- 
- 	hdr = (struct ieee80211_hdr *) skb->data;
- 	hdrlen = ieee80211_hdrlen(hdr->frame_control);
-@@ -2318,7 +2318,8 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
- 	    ether_addr_equal(sdata->vif.addr, hdr->addr3))
- 		return RX_CONTINUE;
- 
--	q = ieee80211_select_queue_80211(sdata, skb, hdr);
-+	ac = ieee80211_select_queue_80211(sdata, skb, hdr);
-+	q = sdata->vif.hw_queue[ac];
- 	if (ieee80211_queue_stopped(&local->hw, q)) {
- 		IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_congestion);
- 		return RX_DROP_MONITOR;
-diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
-index a4a4f89d3ba0..23ed038cf7f9 100644
---- a/net/mac80211/sta_info.c
-+++ b/net/mac80211/sta_info.c
-@@ -257,11 +257,11 @@ void sta_info_free(struct ieee80211_local *local, struct sta_info *sta)
- }
- 
- /* Caller must hold local->sta_mtx */
--static void sta_info_hash_add(struct ieee80211_local *local,
--			      struct sta_info *sta)
-+static int sta_info_hash_add(struct ieee80211_local *local,
-+			     struct sta_info *sta)
- {
--	rhashtable_insert_fast(&local->sta_hash, &sta->hash_node,
--			       sta_rht_params);
-+	return rhashtable_insert_fast(&local->sta_hash, &sta->hash_node,
-+				      sta_rht_params);
- }
- 
- static void sta_deliver_ps_frames(struct work_struct *wk)
-@@ -498,11 +498,17 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU)
+ static void tco_timer_start(void)
  {
- 	struct ieee80211_local *local = sta->local;
- 	struct ieee80211_sub_if_data *sdata = sta->sdata;
--	struct station_info sinfo;
-+	struct station_info *sinfo;
- 	int err = 0;
- 
- 	lockdep_assert_held(&local->sta_mtx);
- 
-+	sinfo = kzalloc(sizeof(struct station_info), GFP_KERNEL);
-+	if (!sinfo) {
-+		err = -ENOMEM;
-+		goto out_err;
-+	}
-+
- 	/* check if STA exists already */
- 	if (sta_info_get_bss(sdata, sta->sta.addr)) {
- 		err = -EEXIST;
-@@ -517,7 +523,9 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU)
- 	set_sta_flag(sta, WLAN_STA_BLOCK_BA);
- 
- 	/* make the station visible */
--	sta_info_hash_add(local, sta);
-+	err = sta_info_hash_add(local, sta);
-+	if (err)
-+		goto out_drop_sta;
- 
- 	list_add_tail_rcu(&sta->list, &local->sta_list);
- 
-@@ -534,10 +542,9 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU)
- 	ieee80211_sta_debugfs_add(sta);
- 	rate_control_add_sta_debugfs(sta);
- 
--	memset(&sinfo, 0, sizeof(sinfo));
--	sinfo.filled = 0;
--	sinfo.generation = local->sta_generation;
--	cfg80211_new_sta(sdata->dev, sta->sta.addr, &sinfo, GFP_KERNEL);
-+	sinfo->generation = local->sta_generation;
-+	cfg80211_new_sta(sdata->dev, sta->sta.addr, sinfo, GFP_KERNEL);
-+	kfree(sinfo);
- 
- 	sta_dbg(sdata, "Inserted STA %pM\n", sta->sta.addr);
- 
-@@ -552,6 +559,7 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU)
-  out_remove:
- 	sta_info_hash_del(local, sta);
- 	list_del_rcu(&sta->list);
-+ out_drop_sta:
- 	local->num_sta--;
- 	synchronize_net();
- 	__cleanup_single_sta(sta);
-@@ -898,7 +906,7 @@ static void __sta_info_destroy_part2(struct sta_info *sta)
+ 	u32 val;
+@@ -129,7 +136,7 @@ static void tco_timer_enable(void)
  {
- 	struct ieee80211_local *local = sta->local;
- 	struct ieee80211_sub_if_data *sdata = sta->sdata;
--	struct station_info sinfo = {};
-+	struct station_info *sinfo;
- 	int ret;
+ 	int val;
  
+-	if (sp5100_tco_pci->revision >= 0x40) {
++	if (!tco_has_sp5100_reg_layout(sp5100_tco_pci)) {
+ 		/* For SB800 or later */
+ 		/* Set the Watchdog timer resolution to 1 sec */
+ 		outb(SB800_PM_WATCHDOG_CONFIG, SB800_IO_PM_INDEX_REG);
+@@ -342,8 +349,7 @@ static unsigned char sp5100_tco_setupdevice(void)
  	/*
-@@ -936,8 +944,11 @@ static void __sta_info_destroy_part2(struct sta_info *sta)
- 
- 	sta_dbg(sdata, "Removed STA %pM\n", sta->sta.addr);
- 
--	sta_set_sinfo(sta, &sinfo);
--	cfg80211_del_sta_sinfo(sdata->dev, sta->sta.addr, &sinfo, GFP_KERNEL);
-+	sinfo = kzalloc(sizeof(*sinfo), GFP_KERNEL);
-+	if (sinfo)
-+		sta_set_sinfo(sta, sinfo);
-+	cfg80211_del_sta_sinfo(sdata->dev, sta->sta.addr, sinfo, GFP_KERNEL);
-+	kfree(sinfo);
- 
- 	rate_control_remove_sta_debugfs(sta);
- 	ieee80211_sta_debugfs_remove(sta);
-diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
-index b18c5ed42d95..0b80a7140cc4 100644
---- a/net/mpls/af_mpls.c
-+++ b/net/mpls/af_mpls.c
-@@ -543,6 +543,9 @@ static struct net_device *find_outdev(struct net *net,
- 	if (!dev)
- 		return ERR_PTR(-ENODEV);
- 
-+	if (IS_ERR(dev))
-+		return dev;
-+
- 	/* The caller is holding rtnl anyways, so release the dev reference */
- 	dev_put(dev);
- 
-diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
-index f57b4dcdb233..4da560005b0e 100644
---- a/net/netfilter/ipvs/ip_vs_core.c
-+++ b/net/netfilter/ipvs/ip_vs_core.c
-@@ -1757,15 +1757,34 @@ ip_vs_in(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, int
- 	cp = pp->conn_in_get(ipvs, af, skb, &iph);
- 
- 	conn_reuse_mode = sysctl_conn_reuse_mode(ipvs);
--	if (conn_reuse_mode && !iph.fragoffs &&
--	    is_new_conn(skb, &iph) && cp &&
--	    ((unlikely(sysctl_expire_nodest_conn(ipvs)) && cp->dest &&
--	      unlikely(!atomic_read(&cp->dest->weight))) ||
--	     unlikely(is_new_conn_expected(cp, conn_reuse_mode)))) {
--		if (!atomic_read(&cp->n_control))
--			ip_vs_conn_expire_now(cp);
--		__ip_vs_conn_put(cp);
--		cp = NULL;
-+	if (conn_reuse_mode && !iph.fragoffs && is_new_conn(skb, &iph) && cp) {
-+		bool uses_ct = false, resched = false;
-+
-+		if (unlikely(sysctl_expire_nodest_conn(ipvs)) && cp->dest &&
-+		    unlikely(!atomic_read(&cp->dest->weight))) {
-+			resched = true;
-+			uses_ct = ip_vs_conn_uses_conntrack(cp, skb);
-+		} else if (is_new_conn_expected(cp, conn_reuse_mode)) {
-+			uses_ct = ip_vs_conn_uses_conntrack(cp, skb);
-+			if (!atomic_read(&cp->n_control)) {
-+				resched = true;
-+			} else {
-+				/* Do not reschedule controlling connection
-+				 * that uses conntrack while it is still
-+				 * referenced by controlled connection(s).
-+				 */
-+				resched = !uses_ct;
-+			}
+ 	 * Determine type of southbridge chipset.
+ 	 */
+-	if (sp5100_tco_pci->device == PCI_DEVICE_ID_ATI_SBX00_SMBUS &&
+-	    sp5100_tco_pci->revision < 0x40) {
++	if (tco_has_sp5100_reg_layout(sp5100_tco_pci)) {
+ 		dev_name = SP5100_DEVNAME;
+ 		index_reg = SP5100_IO_PM_INDEX_REG;
+ 		data_reg = SP5100_IO_PM_DATA_REG;
+@@ -388,8 +394,7 @@ static unsigned char sp5100_tco_setupdevice(void)
+ 	 * Secondly, Find the watchdog timer MMIO address
+ 	 * from SBResource_MMIO register.
+ 	 */
+-	if (sp5100_tco_pci->device == PCI_DEVICE_ID_ATI_SBX00_SMBUS &&
+-	    sp5100_tco_pci->revision < 0x40) {
++	if (tco_has_sp5100_reg_layout(sp5100_tco_pci)) {
+ 		/* Read SBResource_MMIO from PCI config(PCI_Reg: 9Ch) */
+ 		pci_read_config_dword(sp5100_tco_pci,
+ 				      SP5100_SB_RESOURCE_MMIO_BASE, &val);
+diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
+index 59727e32ed0f..af0ec2d5ad0e 100644
+--- a/fs/cifs/sess.c
++++ b/fs/cifs/sess.c
+@@ -400,19 +400,27 @@ int build_ntlmssp_auth_blob(unsigned char *pbuffer,
+ 	sec_blob->LmChallengeResponse.MaximumLength = 0;
+ 
+ 	sec_blob->NtChallengeResponse.BufferOffset = cpu_to_le32(tmp - pbuffer);
+-	rc = setup_ntlmv2_rsp(ses, nls_cp);
+-	if (rc) {
+-		cifs_dbg(VFS, "Error %d during NTLMSSP authentication\n", rc);
+-		goto setup_ntlmv2_ret;
++	if (ses->user_name != NULL) {
++		rc = setup_ntlmv2_rsp(ses, nls_cp);
++		if (rc) {
++			cifs_dbg(VFS, "Error %d during NTLMSSP authentication\n", rc);
++			goto setup_ntlmv2_ret;
 +		}
++		memcpy(tmp, ses->auth_key.response + CIFS_SESS_KEY_SIZE,
++				ses->auth_key.len - CIFS_SESS_KEY_SIZE);
++		tmp += ses->auth_key.len - CIFS_SESS_KEY_SIZE;
 +
-+		if (resched) {
-+			if (!atomic_read(&cp->n_control))
-+				ip_vs_conn_expire_now(cp);
-+			__ip_vs_conn_put(cp);
-+			if (uses_ct)
-+				return NF_DROP;
-+			cp = NULL;
-+		}
++		sec_blob->NtChallengeResponse.Length =
++				cpu_to_le16(ses->auth_key.len - CIFS_SESS_KEY_SIZE);
++		sec_blob->NtChallengeResponse.MaximumLength =
++				cpu_to_le16(ses->auth_key.len - CIFS_SESS_KEY_SIZE);
++	} else {
++		/*
++		 * don't send an NT Response for anonymous access
++		 */
++		sec_blob->NtChallengeResponse.Length = 0;
++		sec_blob->NtChallengeResponse.MaximumLength = 0;
  	}
- 
- 	if (unlikely(!cp)) {
-diff --git a/net/netfilter/ipvs/ip_vs_pe_sip.c b/net/netfilter/ipvs/ip_vs_pe_sip.c
-index 1b8d594e493a..0a6eb5c0d9e9 100644
---- a/net/netfilter/ipvs/ip_vs_pe_sip.c
-+++ b/net/netfilter/ipvs/ip_vs_pe_sip.c
-@@ -70,10 +70,10 @@ ip_vs_sip_fill_param(struct ip_vs_conn_param *p, struct sk_buff *skb)
- 	const char *dptr;
- 	int retc;
- 
--	ip_vs_fill_iph_skb(p->af, skb, false, &iph);
-+	retc = ip_vs_fill_iph_skb(p->af, skb, false, &iph);
- 
- 	/* Only useful with UDP */
--	if (iph.protocol != IPPROTO_UDP)
-+	if (!retc || iph.protocol != IPPROTO_UDP)
- 		return -EINVAL;
- 	/* todo: IPv6 fragments:
- 	 *       I think this only should be done for the first fragment. /HS
-@@ -88,7 +88,7 @@ ip_vs_sip_fill_param(struct ip_vs_conn_param *p, struct sk_buff *skb)
- 	dptr = skb->data + dataoff;
- 	datalen = skb->len - dataoff;
- 
--	if (get_callid(dptr, dataoff, datalen, &matchoff, &matchlen))
-+	if (get_callid(dptr, 0, datalen, &matchoff, &matchlen))
- 		return -EINVAL;
- 
- 	/* N.B: pe_data is only set on success,
-diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
-index f60b4fdeeb8c..6b94f0bc11b8 100644
---- a/net/netfilter/nf_conntrack_core.c
-+++ b/net/netfilter/nf_conntrack_core.c
-@@ -1780,6 +1780,7 @@ void nf_conntrack_init_end(void)
- 
- int nf_conntrack_init_net(struct net *net)
- {
-+	static atomic64_t unique_id;
- 	int ret = -ENOMEM;
- 	int cpu;
- 
-@@ -1802,7 +1803,8 @@ int nf_conntrack_init_net(struct net *net)
- 	if (!net->ct.stat)
- 		goto err_pcpu_lists;
- 
--	net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%p", net);
-+	net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%llu",
-+				(u64)atomic64_inc_return(&unique_id));
- 	if (!net->ct.slabname)
- 		goto err_slabname;
- 
-diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
-index f1ffb34e253f..d2bc03f0b4d7 100644
---- a/net/netlink/af_netlink.c
-+++ b/net/netlink/af_netlink.c
-@@ -1305,7 +1305,7 @@ static int netlink_release(struct socket *sock)
- 
- 	skb_queue_purge(&sk->sk_write_queue);
- 
--	if (nlk->portid) {
-+	if (nlk->portid && nlk->bound) {
- 		struct netlink_notify n = {
- 						.net = sock_net(sk),
- 						.protocol = sk->sk_protocol,
-diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
-index 2d59df521915..879185fe183f 100644
---- a/net/openvswitch/actions.c
-+++ b/net/openvswitch/actions.c
-@@ -158,9 +158,7 @@ static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key,
- 	new_mpls_lse = (__be32 *)skb_mpls_header(skb);
- 	*new_mpls_lse = mpls->mpls_lse;
- 
--	if (skb->ip_summed == CHECKSUM_COMPLETE)
--		skb->csum = csum_add(skb->csum, csum_partial(new_mpls_lse,
--							     MPLS_HLEN, 0));
-+	skb_postpush_rcsum(skb, new_mpls_lse, MPLS_HLEN);
- 
- 	hdr = eth_hdr(skb);
- 	hdr->h_proto = mpls->mpls_ethertype;
-@@ -280,7 +278,7 @@ static int set_eth_addr(struct sk_buff *skb, struct sw_flow_key *flow_key,
- 	ether_addr_copy_masked(eth_hdr(skb)->h_dest, key->eth_dst,
- 			       mask->eth_dst);
- 
--	ovs_skb_postpush_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
-+	skb_postpush_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
- 
- 	ether_addr_copy(flow_key->eth.src, eth_hdr(skb)->h_source);
- 	ether_addr_copy(flow_key->eth.dst, eth_hdr(skb)->h_dest);
-@@ -463,7 +461,7 @@ static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *flow_key,
- 		mask_ipv6_addr(saddr, key->ipv6_src, mask->ipv6_src, masked);
- 
- 		if (unlikely(memcmp(saddr, masked, sizeof(masked)))) {
--			set_ipv6_addr(skb, key->ipv6_proto, saddr, masked,
-+			set_ipv6_addr(skb, flow_key->ip.proto, saddr, masked,
- 				      true);
- 			memcpy(&flow_key->ipv6.addr.src, masked,
- 			       sizeof(flow_key->ipv6.addr.src));
-@@ -485,7 +483,7 @@ static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *flow_key,
- 							     NULL, &flags)
- 					       != NEXTHDR_ROUTING);
- 
--			set_ipv6_addr(skb, key->ipv6_proto, daddr, masked,
-+			set_ipv6_addr(skb, flow_key->ip.proto, daddr, masked,
- 				      recalc_csum);
- 			memcpy(&flow_key->ipv6.addr.dst, masked,
- 			       sizeof(flow_key->ipv6.addr.dst));
-@@ -639,7 +637,7 @@ static int ovs_vport_output(struct net *net, struct sock *sk, struct sk_buff *sk
- 	/* Reconstruct the MAC header.  */
- 	skb_push(skb, data->l2_len);
- 	memcpy(skb->data, &data->l2_data, data->l2_len);
--	ovs_skb_postpush_rcsum(skb, skb->data, data->l2_len);
-+	skb_postpush_rcsum(skb, skb->data, data->l2_len);
- 	skb_reset_mac_header(skb);
- 
- 	ovs_vport_send(vport, skb);
-diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
-index ee6ff8ffc12d..9e5b9fc805fb 100644
---- a/net/openvswitch/conntrack.c
-+++ b/net/openvswitch/conntrack.c
-@@ -320,6 +320,7 @@ static int handle_fragments(struct net *net, struct sw_flow_key *key,
- 	} else if (key->eth.type == htons(ETH_P_IPV6)) {
- 		enum ip6_defrag_users user = IP6_DEFRAG_CONNTRACK_IN + zone;
- 
-+		skb_orphan(skb);
- 		memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm));
- 		err = nf_ct_frag6_gather(net, skb, user);
- 		if (err)
-diff --git a/net/openvswitch/vport-netdev.c b/net/openvswitch/vport-netdev.c
-index 6a6adf314363..4e3972344aa6 100644
---- a/net/openvswitch/vport-netdev.c
-+++ b/net/openvswitch/vport-netdev.c
-@@ -58,7 +58,7 @@ static void netdev_port_receive(struct sk_buff *skb)
- 		return;
- 
- 	skb_push(skb, ETH_HLEN);
--	ovs_skb_postpush_rcsum(skb, skb->data, ETH_HLEN);
-+	skb_postpush_rcsum(skb, skb->data, ETH_HLEN);
- 	ovs_vport_receive(vport, skb, skb_tunnel_info(skb));
- 	return;
- error:
-diff --git a/net/openvswitch/vport.h b/net/openvswitch/vport.h
-index c10899cb9040..f01f28a567ad 100644
---- a/net/openvswitch/vport.h
-+++ b/net/openvswitch/vport.h
-@@ -185,13 +185,6 @@ static inline struct vport *vport_from_priv(void *priv)
- int ovs_vport_receive(struct vport *, struct sk_buff *,
- 		      const struct ip_tunnel_info *);
- 
--static inline void ovs_skb_postpush_rcsum(struct sk_buff *skb,
--				      const void *start, unsigned int len)
--{
--	if (skb->ip_summed == CHECKSUM_COMPLETE)
--		skb->csum = csum_add(skb->csum, csum_partial(start, len, 0));
--}
+-	memcpy(tmp, ses->auth_key.response + CIFS_SESS_KEY_SIZE,
+-			ses->auth_key.len - CIFS_SESS_KEY_SIZE);
+-	tmp += ses->auth_key.len - CIFS_SESS_KEY_SIZE;
 -
- static inline const char *ovs_vport_name(struct vport *vport)
- {
- 	return vport->dev->name;
-diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
-index 992396aa635c..9cc7b512b472 100644
---- a/net/packet/af_packet.c
-+++ b/net/packet/af_packet.c
-@@ -1916,6 +1916,10 @@ retry:
- 		goto retry;
- 	}
+-	sec_blob->NtChallengeResponse.Length =
+-			cpu_to_le16(ses->auth_key.len - CIFS_SESS_KEY_SIZE);
+-	sec_blob->NtChallengeResponse.MaximumLength =
+-			cpu_to_le16(ses->auth_key.len - CIFS_SESS_KEY_SIZE);
  
-+	if (!dev_validate_header(dev, skb->data, len)) {
-+		err = -EINVAL;
-+		goto out_unlock;
-+	}
- 	if (len > (dev->mtu + dev->hard_header_len + extra_len) &&
- 	    !packet_extra_vlan_len_allowed(dev, skb)) {
- 		err = -EMSGSIZE;
-@@ -2326,18 +2330,6 @@ static void tpacket_destruct_skb(struct sk_buff *skb)
- 	sock_wfree(skb);
- }
+ 	if (ses->domainName == NULL) {
+ 		sec_blob->DomainName.BufferOffset = cpu_to_le32(tmp - pbuffer);
+@@ -670,20 +678,24 @@ sess_auth_lanman(struct sess_data *sess_data)
  
--static bool ll_header_truncated(const struct net_device *dev, int len)
--{
--	/* net device doesn't like empty head */
--	if (unlikely(len < dev->hard_header_len)) {
--		net_warn_ratelimited("%s: packet size is too short (%d < %d)\n",
--				     current->comm, len, dev->hard_header_len);
--		return true;
--	}
+ 	pSMB->req.hdr.Flags2 &= ~SMBFLG2_UNICODE;
+ 
+-	/* no capabilities flags in old lanman negotiation */
+-	pSMB->old_req.PasswordLength = cpu_to_le16(CIFS_AUTH_RESP_SIZE);
 -
--	return false;
--}
+-	/* Calculate hash with password and copy into bcc_ptr.
+-	 * Encryption Key (stored as in cryptkey) gets used if the
+-	 * security mode bit in Negottiate Protocol response states
+-	 * to use challenge/response method (i.e. Password bit is 1).
+-	 */
+-	rc = calc_lanman_hash(ses->password, ses->server->cryptkey,
+-			      ses->server->sec_mode & SECMODE_PW_ENCRYPT ?
+-			      true : false, lnm_session_key);
 -
- static void tpacket_set_protocol(const struct net_device *dev,
- 				 struct sk_buff *skb)
- {
-@@ -2420,19 +2412,19 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
- 		if (unlikely(err < 0))
- 			return -EINVAL;
- 	} else if (dev->hard_header_len) {
--		if (ll_header_truncated(dev, tp_len))
--			return -EINVAL;
-+		int hdrlen = min_t(int, dev->hard_header_len, tp_len);
- 
- 		skb_push(skb, dev->hard_header_len);
--		err = skb_store_bits(skb, 0, data,
--				dev->hard_header_len);
-+		err = skb_store_bits(skb, 0, data, hdrlen);
- 		if (unlikely(err))
- 			return err;
-+		if (!dev_validate_header(dev, skb->data, hdrlen))
-+			return -EINVAL;
- 		if (!skb->protocol)
- 			tpacket_set_protocol(dev, skb);
- 
--		data += dev->hard_header_len;
--		to_write -= dev->hard_header_len;
-+		data += hdrlen;
-+		to_write -= hdrlen;
- 	}
- 
- 	offset = offset_in_page(data);
-@@ -2763,9 +2755,6 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
- 		offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len);
- 		if (unlikely(offset < 0))
- 			goto out_free;
--	} else {
--		if (ll_header_truncated(dev, len))
--			goto out_free;
- 	}
- 
- 	/* Returns -EFAULT on error */
-@@ -2773,6 +2762,12 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
- 	if (err)
- 		goto out_free;
- 
-+	if (sock->type == SOCK_RAW &&
-+	    !dev_validate_header(dev, skb->data, len)) {
-+		err = -EINVAL;
-+		goto out_free;
-+	}
+-	memcpy(bcc_ptr, (char *)lnm_session_key, CIFS_AUTH_RESP_SIZE);
+-	bcc_ptr += CIFS_AUTH_RESP_SIZE;
++	if (ses->user_name != NULL) {
++		/* no capabilities flags in old lanman negotiation */
++		pSMB->old_req.PasswordLength = cpu_to_le16(CIFS_AUTH_RESP_SIZE);
 +
- 	sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags);
- 
- 	if (!gso_type && (len > dev->mtu + reserve + extra_len) &&
-@@ -3441,6 +3436,7 @@ static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq)
- 	i->ifindex = mreq->mr_ifindex;
- 	i->alen = mreq->mr_alen;
- 	memcpy(i->addr, mreq->mr_address, i->alen);
-+	memset(i->addr + i->alen, 0, sizeof(i->addr) - i->alen);
- 	i->count = 1;
- 	i->next = po->mclist;
- 	po->mclist = i;
-diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
-index af1acf009866..95b560f0b253 100644
---- a/net/sched/sch_api.c
-+++ b/net/sched/sch_api.c
-@@ -744,14 +744,15 @@ static u32 qdisc_alloc_handle(struct net_device *dev)
- 	return 0;
- }
- 
--void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n)
-+void qdisc_tree_reduce_backlog(struct Qdisc *sch, unsigned int n,
-+			       unsigned int len)
- {
- 	const struct Qdisc_class_ops *cops;
- 	unsigned long cl;
- 	u32 parentid;
- 	int drops;
- 
--	if (n == 0)
-+	if (n == 0 && len == 0)
- 		return;
- 	drops = max_t(int, n, 0);
- 	rcu_read_lock();
-@@ -774,11 +775,12 @@ void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n)
- 			cops->put(sch, cl);
- 		}
- 		sch->q.qlen -= n;
-+		sch->qstats.backlog -= len;
- 		__qdisc_qstats_drop(sch, drops);
- 	}
- 	rcu_read_unlock();
- }
--EXPORT_SYMBOL(qdisc_tree_decrease_qlen);
-+EXPORT_SYMBOL(qdisc_tree_reduce_backlog);
- 
- static void notify_and_destroy(struct net *net, struct sk_buff *skb,
- 			       struct nlmsghdr *n, u32 clid,
-diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
-index c538d9e4a8f6..baafddf229ce 100644
---- a/net/sched/sch_cbq.c
-+++ b/net/sched/sch_cbq.c
-@@ -1624,13 +1624,8 @@ static int cbq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
- 			new->reshape_fail = cbq_reshape_fail;
- #endif
- 	}
--	sch_tree_lock(sch);
--	*old = cl->q;
--	cl->q = new;
--	qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
--	qdisc_reset(*old);
--	sch_tree_unlock(sch);
- 
-+	*old = qdisc_replace(sch, new, &cl->q);
- 	return 0;
- }
- 
-@@ -1914,7 +1909,7 @@ static int cbq_delete(struct Qdisc *sch, unsigned long arg)
- {
- 	struct cbq_sched_data *q = qdisc_priv(sch);
- 	struct cbq_class *cl = (struct cbq_class *)arg;
--	unsigned int qlen;
-+	unsigned int qlen, backlog;
- 
- 	if (cl->filters || cl->children || cl == &q->link)
- 		return -EBUSY;
-@@ -1922,8 +1917,9 @@ static int cbq_delete(struct Qdisc *sch, unsigned long arg)
- 	sch_tree_lock(sch);
- 
- 	qlen = cl->q->q.qlen;
-+	backlog = cl->q->qstats.backlog;
- 	qdisc_reset(cl->q);
--	qdisc_tree_decrease_qlen(cl->q, qlen);
-+	qdisc_tree_reduce_backlog(cl->q, qlen, backlog);
- 
- 	if (cl->next_alive)
- 		cbq_deactivate_class(cl);
-diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c
-index 5ffb8b8337c7..0a08c860eee4 100644
---- a/net/sched/sch_choke.c
-+++ b/net/sched/sch_choke.c
-@@ -128,8 +128,8 @@ static void choke_drop_by_idx(struct Qdisc *sch, unsigned int idx)
- 		choke_zap_tail_holes(q);
- 
- 	qdisc_qstats_backlog_dec(sch, skb);
-+	qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(skb));
- 	qdisc_drop(skb, sch);
--	qdisc_tree_decrease_qlen(sch, 1);
- 	--sch->q.qlen;
- }
- 
-@@ -456,6 +456,7 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt)
- 		old = q->tab;
- 		if (old) {
- 			unsigned int oqlen = sch->q.qlen, tail = 0;
-+			unsigned dropped = 0;
- 
- 			while (q->head != q->tail) {
- 				struct sk_buff *skb = q->tab[q->head];
-@@ -467,11 +468,12 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt)
- 					ntab[tail++] = skb;
- 					continue;
- 				}
-+				dropped += qdisc_pkt_len(skb);
- 				qdisc_qstats_backlog_dec(sch, skb);
- 				--sch->q.qlen;
- 				qdisc_drop(skb, sch);
- 			}
--			qdisc_tree_decrease_qlen(sch, oqlen - sch->q.qlen);
-+			qdisc_tree_reduce_backlog(sch, oqlen - sch->q.qlen, dropped);
- 			q->head = 0;
- 			q->tail = tail;
- 		}
-diff --git a/net/sched/sch_codel.c b/net/sched/sch_codel.c
-index 535007d5f0b5..9b7e2980ee5c 100644
---- a/net/sched/sch_codel.c
-+++ b/net/sched/sch_codel.c
-@@ -79,12 +79,13 @@ static struct sk_buff *codel_qdisc_dequeue(struct Qdisc *sch)
- 
- 	skb = codel_dequeue(sch, &q->params, &q->vars, &q->stats, dequeue);
- 
--	/* We cant call qdisc_tree_decrease_qlen() if our qlen is 0,
-+	/* We cant call qdisc_tree_reduce_backlog() if our qlen is 0,
- 	 * or HTB crashes. Defer it for next round.
- 	 */
- 	if (q->stats.drop_count && sch->q.qlen) {
--		qdisc_tree_decrease_qlen(sch, q->stats.drop_count);
-+		qdisc_tree_reduce_backlog(sch, q->stats.drop_count, q->stats.drop_len);
- 		q->stats.drop_count = 0;
-+		q->stats.drop_len = 0;
- 	}
- 	if (skb)
- 		qdisc_bstats_update(sch, skb);
-@@ -116,7 +117,7 @@ static int codel_change(struct Qdisc *sch, struct nlattr *opt)
- {
- 	struct codel_sched_data *q = qdisc_priv(sch);
- 	struct nlattr *tb[TCA_CODEL_MAX + 1];
--	unsigned int qlen;
-+	unsigned int qlen, dropped = 0;
- 	int err;
- 
- 	if (!opt)
-@@ -156,10 +157,11 @@ static int codel_change(struct Qdisc *sch, struct nlattr *opt)
- 	while (sch->q.qlen > sch->limit) {
- 		struct sk_buff *skb = __skb_dequeue(&sch->q);
- 
-+		dropped += qdisc_pkt_len(skb);
- 		qdisc_qstats_backlog_dec(sch, skb);
- 		qdisc_drop(skb, sch);
- 	}
--	qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen);
-+	qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped);
- 
- 	sch_tree_unlock(sch);
- 	return 0;
-diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c
-index a1cd778240cd..a63e879e8975 100644
---- a/net/sched/sch_drr.c
-+++ b/net/sched/sch_drr.c
-@@ -53,9 +53,10 @@ static struct drr_class *drr_find_class(struct Qdisc *sch, u32 classid)
- static void drr_purge_queue(struct drr_class *cl)
- {
- 	unsigned int len = cl->qdisc->q.qlen;
-+	unsigned int backlog = cl->qdisc->qstats.backlog;
- 
- 	qdisc_reset(cl->qdisc);
--	qdisc_tree_decrease_qlen(cl->qdisc, len);
-+	qdisc_tree_reduce_backlog(cl->qdisc, len, backlog);
- }
- 
- static const struct nla_policy drr_policy[TCA_DRR_MAX + 1] = {
-@@ -226,11 +227,7 @@ static int drr_graft_class(struct Qdisc *sch, unsigned long arg,
- 			new = &noop_qdisc;
- 	}
- 
--	sch_tree_lock(sch);
--	drr_purge_queue(cl);
--	*old = cl->qdisc;
--	cl->qdisc = new;
--	sch_tree_unlock(sch);
-+	*old = qdisc_replace(sch, new, &cl->qdisc);
- 	return 0;
- }
- 
-diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c
-index f357f34d02d2..d0dff0cd8186 100644
---- a/net/sched/sch_dsmark.c
-+++ b/net/sched/sch_dsmark.c
-@@ -73,13 +73,7 @@ static int dsmark_graft(struct Qdisc *sch, unsigned long arg,
- 			new = &noop_qdisc;
- 	}
- 
--	sch_tree_lock(sch);
--	*old = p->q;
--	p->q = new;
--	qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
--	qdisc_reset(*old);
--	sch_tree_unlock(sch);
--
-+	*old = qdisc_replace(sch, new, &p->q);
- 	return 0;
- }
- 
-@@ -264,6 +258,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch)
- 		return err;
- 	}
- 
-+	qdisc_qstats_backlog_inc(sch, skb);
- 	sch->q.qlen++;
- 
- 	return NET_XMIT_SUCCESS;
-@@ -286,6 +281,7 @@ static struct sk_buff *dsmark_dequeue(struct Qdisc *sch)
- 		return NULL;
- 
- 	qdisc_bstats_update(sch, skb);
-+	qdisc_qstats_backlog_dec(sch, skb);
- 	sch->q.qlen--;
- 
- 	index = skb->tc_index & (p->indices - 1);
-@@ -401,6 +397,7 @@ static void dsmark_reset(struct Qdisc *sch)
- 
- 	pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p);
- 	qdisc_reset(p->q);
-+	sch->qstats.backlog = 0;
- 	sch->q.qlen = 0;
- }
- 
-diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
-index 109b2322778f..3c6a47d66a04 100644
---- a/net/sched/sch_fq.c
-+++ b/net/sched/sch_fq.c
-@@ -662,6 +662,7 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt)
- 	struct fq_sched_data *q = qdisc_priv(sch);
- 	struct nlattr *tb[TCA_FQ_MAX + 1];
- 	int err, drop_count = 0;
-+	unsigned drop_len = 0;
- 	u32 fq_log;
- 
- 	if (!opt)
-@@ -736,10 +737,11 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt)
- 
- 		if (!skb)
- 			break;
-+		drop_len += qdisc_pkt_len(skb);
- 		kfree_skb(skb);
- 		drop_count++;
- 	}
--	qdisc_tree_decrease_qlen(sch, drop_count);
-+	qdisc_tree_reduce_backlog(sch, drop_count, drop_len);
++		/* Calculate hash with password and copy into bcc_ptr.
++		 * Encryption Key (stored as in cryptkey) gets used if the
++		 * security mode bit in Negottiate Protocol response states
++		 * to use challenge/response method (i.e. Password bit is 1).
++		 */
++		rc = calc_lanman_hash(ses->password, ses->server->cryptkey,
++				      ses->server->sec_mode & SECMODE_PW_ENCRYPT ?
++				      true : false, lnm_session_key);
++
++		memcpy(bcc_ptr, (char *)lnm_session_key, CIFS_AUTH_RESP_SIZE);
++		bcc_ptr += CIFS_AUTH_RESP_SIZE;
++	} else {
++		pSMB->old_req.PasswordLength = 0;
++	}
  
- 	sch_tree_unlock(sch);
- 	return err;
-diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c
-index 4c834e93dafb..d3fc8f9dd3d4 100644
---- a/net/sched/sch_fq_codel.c
-+++ b/net/sched/sch_fq_codel.c
-@@ -175,7 +175,7 @@ static unsigned int fq_codel_qdisc_drop(struct Qdisc *sch)
- static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
- {
- 	struct fq_codel_sched_data *q = qdisc_priv(sch);
--	unsigned int idx;
-+	unsigned int idx, prev_backlog;
- 	struct fq_codel_flow *flow;
- 	int uninitialized_var(ret);
- 
-@@ -203,6 +203,7 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
- 	if (++sch->q.qlen <= sch->limit)
- 		return NET_XMIT_SUCCESS;
- 
-+	prev_backlog = sch->qstats.backlog;
- 	q->drop_overlimit++;
- 	/* Return Congestion Notification only if we dropped a packet
- 	 * from this flow.
-@@ -211,7 +212,7 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
- 		return NET_XMIT_CN;
- 
- 	/* As we dropped a packet, better let upper stack know this */
--	qdisc_tree_decrease_qlen(sch, 1);
-+	qdisc_tree_reduce_backlog(sch, 1, prev_backlog - sch->qstats.backlog);
- 	return NET_XMIT_SUCCESS;
- }
+ 	/*
+ 	 * can not sign if LANMAN negotiated so no need
+@@ -769,26 +781,31 @@ sess_auth_ntlm(struct sess_data *sess_data)
+ 	capabilities = cifs_ssetup_hdr(ses, pSMB);
  
-@@ -241,6 +242,7 @@ static struct sk_buff *fq_codel_dequeue(struct Qdisc *sch)
- 	struct fq_codel_flow *flow;
- 	struct list_head *head;
- 	u32 prev_drop_count, prev_ecn_mark;
-+	unsigned int prev_backlog;
+ 	pSMB->req_no_secext.Capabilities = cpu_to_le32(capabilities);
+-	pSMB->req_no_secext.CaseInsensitivePasswordLength =
+-			cpu_to_le16(CIFS_AUTH_RESP_SIZE);
+-	pSMB->req_no_secext.CaseSensitivePasswordLength =
+-			cpu_to_le16(CIFS_AUTH_RESP_SIZE);
+-
+-	/* calculate ntlm response and session key */
+-	rc = setup_ntlm_response(ses, sess_data->nls_cp);
+-	if (rc) {
+-		cifs_dbg(VFS, "Error %d during NTLM authentication\n",
+-				 rc);
+-		goto out;
+-	}
++	if (ses->user_name != NULL) {
++		pSMB->req_no_secext.CaseInsensitivePasswordLength =
++				cpu_to_le16(CIFS_AUTH_RESP_SIZE);
++		pSMB->req_no_secext.CaseSensitivePasswordLength =
++				cpu_to_le16(CIFS_AUTH_RESP_SIZE);
++
++		/* calculate ntlm response and session key */
++		rc = setup_ntlm_response(ses, sess_data->nls_cp);
++		if (rc) {
++			cifs_dbg(VFS, "Error %d during NTLM authentication\n",
++					 rc);
++			goto out;
++		}
  
- begin:
- 	head = &q->new_flows;
-@@ -259,6 +261,7 @@ begin:
+-	/* copy ntlm response */
+-	memcpy(bcc_ptr, ses->auth_key.response + CIFS_SESS_KEY_SIZE,
+-			CIFS_AUTH_RESP_SIZE);
+-	bcc_ptr += CIFS_AUTH_RESP_SIZE;
+-	memcpy(bcc_ptr, ses->auth_key.response + CIFS_SESS_KEY_SIZE,
+-			CIFS_AUTH_RESP_SIZE);
+-	bcc_ptr += CIFS_AUTH_RESP_SIZE;
++		/* copy ntlm response */
++		memcpy(bcc_ptr, ses->auth_key.response + CIFS_SESS_KEY_SIZE,
++				CIFS_AUTH_RESP_SIZE);
++		bcc_ptr += CIFS_AUTH_RESP_SIZE;
++		memcpy(bcc_ptr, ses->auth_key.response + CIFS_SESS_KEY_SIZE,
++				CIFS_AUTH_RESP_SIZE);
++		bcc_ptr += CIFS_AUTH_RESP_SIZE;
++	} else {
++		pSMB->req_no_secext.CaseInsensitivePasswordLength = 0;
++		pSMB->req_no_secext.CaseSensitivePasswordLength = 0;
++	}
  
- 	prev_drop_count = q->cstats.drop_count;
- 	prev_ecn_mark = q->cstats.ecn_mark;
-+	prev_backlog = sch->qstats.backlog;
+ 	if (ses->capabilities & CAP_UNICODE) {
+ 		/* unicode strings must be word aligned */
+@@ -878,22 +895,26 @@ sess_auth_ntlmv2(struct sess_data *sess_data)
+ 	/* LM2 password would be here if we supported it */
+ 	pSMB->req_no_secext.CaseInsensitivePasswordLength = 0;
  
- 	skb = codel_dequeue(sch, &q->cparams, &flow->cvars, &q->cstats,
- 			    dequeue);
-@@ -276,12 +279,14 @@ begin:
- 	}
- 	qdisc_bstats_update(sch, skb);
- 	flow->deficit -= qdisc_pkt_len(skb);
--	/* We cant call qdisc_tree_decrease_qlen() if our qlen is 0,
-+	/* We cant call qdisc_tree_reduce_backlog() if our qlen is 0,
- 	 * or HTB crashes. Defer it for next round.
- 	 */
- 	if (q->cstats.drop_count && sch->q.qlen) {
--		qdisc_tree_decrease_qlen(sch, q->cstats.drop_count);
-+		qdisc_tree_reduce_backlog(sch, q->cstats.drop_count,
-+					  q->cstats.drop_len);
- 		q->cstats.drop_count = 0;
-+		q->cstats.drop_len = 0;
- 	}
- 	return skb;
- }
-@@ -372,11 +377,13 @@ static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt)
- 	while (sch->q.qlen > sch->limit) {
- 		struct sk_buff *skb = fq_codel_dequeue(sch);
+-	/* calculate nlmv2 response and session key */
+-	rc = setup_ntlmv2_rsp(ses, sess_data->nls_cp);
+-	if (rc) {
+-		cifs_dbg(VFS, "Error %d during NTLMv2 authentication\n", rc);
+-		goto out;
+-	}
++	if (ses->user_name != NULL) {
++		/* calculate nlmv2 response and session key */
++		rc = setup_ntlmv2_rsp(ses, sess_data->nls_cp);
++		if (rc) {
++			cifs_dbg(VFS, "Error %d during NTLMv2 authentication\n", rc);
++			goto out;
++		}
  
-+		q->cstats.drop_len += qdisc_pkt_len(skb);
- 		kfree_skb(skb);
- 		q->cstats.drop_count++;
- 	}
--	qdisc_tree_decrease_qlen(sch, q->cstats.drop_count);
-+	qdisc_tree_reduce_backlog(sch, q->cstats.drop_count, q->cstats.drop_len);
- 	q->cstats.drop_count = 0;
-+	q->cstats.drop_len = 0;
+-	memcpy(bcc_ptr, ses->auth_key.response + CIFS_SESS_KEY_SIZE,
+-			ses->auth_key.len - CIFS_SESS_KEY_SIZE);
+-	bcc_ptr += ses->auth_key.len - CIFS_SESS_KEY_SIZE;
++		memcpy(bcc_ptr, ses->auth_key.response + CIFS_SESS_KEY_SIZE,
++				ses->auth_key.len - CIFS_SESS_KEY_SIZE);
++		bcc_ptr += ses->auth_key.len - CIFS_SESS_KEY_SIZE;
  
- 	sch_tree_unlock(sch);
- 	return 0;
-diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
-index 16bc83b2842a..aa4725038f94 100644
---- a/net/sched/sch_generic.c
-+++ b/net/sched/sch_generic.c
-@@ -159,12 +159,15 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
- 	if (validate)
- 		skb = validate_xmit_skb_list(skb, dev);
- 
--	if (skb) {
-+	if (likely(skb)) {
- 		HARD_TX_LOCK(dev, txq, smp_processor_id());
- 		if (!netif_xmit_frozen_or_stopped(txq))
- 			skb = dev_hard_start_xmit(skb, dev, txq, &ret);
- 
- 		HARD_TX_UNLOCK(dev, txq);
+-	/* set case sensitive password length after tilen may get
+-	 * assigned, tilen is 0 otherwise.
+-	 */
+-	pSMB->req_no_secext.CaseSensitivePasswordLength =
+-		cpu_to_le16(ses->auth_key.len - CIFS_SESS_KEY_SIZE);
++		/* set case sensitive password length after tilen may get
++		 * assigned, tilen is 0 otherwise.
++		 */
++		pSMB->req_no_secext.CaseSensitivePasswordLength =
++			cpu_to_le16(ses->auth_key.len - CIFS_SESS_KEY_SIZE);
 +	} else {
-+		spin_lock(root_lock);
-+		return qdisc_qlen(q);
- 	}
- 	spin_lock(root_lock);
- 
-diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
-index b7ebe2c87586..d783d7cc3348 100644
---- a/net/sched/sch_hfsc.c
-+++ b/net/sched/sch_hfsc.c
-@@ -895,9 +895,10 @@ static void
- hfsc_purge_queue(struct Qdisc *sch, struct hfsc_class *cl)
- {
- 	unsigned int len = cl->qdisc->q.qlen;
-+	unsigned int backlog = cl->qdisc->qstats.backlog;
- 
- 	qdisc_reset(cl->qdisc);
--	qdisc_tree_decrease_qlen(cl->qdisc, len);
-+	qdisc_tree_reduce_backlog(cl->qdisc, len, backlog);
- }
++		pSMB->req_no_secext.CaseSensitivePasswordLength = 0;
++	}
  
- static void
-@@ -1215,11 +1216,7 @@ hfsc_graft_class(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
- 			new = &noop_qdisc;
- 	}
+ 	if (ses->capabilities & CAP_UNICODE) {
+ 		if (sess_data->iov[0].iov_len % 2) {
+diff --git a/fs/cifs/smb2glob.h b/fs/cifs/smb2glob.h
+index bc0bb9c34f72..0ffa18094335 100644
+--- a/fs/cifs/smb2glob.h
++++ b/fs/cifs/smb2glob.h
+@@ -44,6 +44,7 @@
+ #define SMB2_OP_DELETE 7
+ #define SMB2_OP_HARDLINK 8
+ #define SMB2_OP_SET_EOF 9
++#define SMB2_OP_RMDIR 10
  
--	sch_tree_lock(sch);
--	hfsc_purge_queue(sch, cl);
--	*old = cl->qdisc;
--	cl->qdisc = new;
--	sch_tree_unlock(sch);
-+	*old = qdisc_replace(sch, new, &cl->qdisc);
- 	return 0;
+ /* Used when constructing chained read requests. */
+ #define CHAINED_REQUEST 1
+diff --git a/fs/cifs/smb2inode.c b/fs/cifs/smb2inode.c
+index 899bbc86f73e..4f0231e685a9 100644
+--- a/fs/cifs/smb2inode.c
++++ b/fs/cifs/smb2inode.c
+@@ -80,6 +80,10 @@ smb2_open_op_close(const unsigned int xid, struct cifs_tcon *tcon,
+ 		 * SMB2_open() call.
+ 		 */
+ 		break;
++	case SMB2_OP_RMDIR:
++		tmprc = SMB2_rmdir(xid, tcon, fid.persistent_fid,
++				   fid.volatile_fid);
++		break;
+ 	case SMB2_OP_RENAME:
+ 		tmprc = SMB2_rename(xid, tcon, fid.persistent_fid,
+ 				    fid.volatile_fid, (__le16 *)data);
+@@ -191,8 +195,8 @@ smb2_rmdir(const unsigned int xid, struct cifs_tcon *tcon, const char *name,
+ 	   struct cifs_sb_info *cifs_sb)
+ {
+ 	return smb2_open_op_close(xid, tcon, cifs_sb, name, DELETE, FILE_OPEN,
+-				  CREATE_NOT_FILE | CREATE_DELETE_ON_CLOSE,
+-				  NULL, SMB2_OP_DELETE);
++				  CREATE_NOT_FILE,
++				  NULL, SMB2_OP_RMDIR);
  }
  
-diff --git a/net/sched/sch_hhf.c b/net/sched/sch_hhf.c
-index 86b04e31e60b..13d6f83ec491 100644
---- a/net/sched/sch_hhf.c
-+++ b/net/sched/sch_hhf.c
-@@ -382,6 +382,7 @@ static int hhf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
- 	struct hhf_sched_data *q = qdisc_priv(sch);
- 	enum wdrr_bucket_idx idx;
- 	struct wdrr_bucket *bucket;
-+	unsigned int prev_backlog;
- 
- 	idx = hhf_classify(skb, sch);
- 
-@@ -409,6 +410,7 @@ static int hhf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
- 	if (++sch->q.qlen <= sch->limit)
- 		return NET_XMIT_SUCCESS;
- 
-+	prev_backlog = sch->qstats.backlog;
- 	q->drop_overlimit++;
- 	/* Return Congestion Notification only if we dropped a packet from this
- 	 * bucket.
-@@ -417,7 +419,7 @@ static int hhf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
- 		return NET_XMIT_CN;
- 
- 	/* As we dropped a packet, better let upper stack know this. */
--	qdisc_tree_decrease_qlen(sch, 1);
-+	qdisc_tree_reduce_backlog(sch, 1, prev_backlog - sch->qstats.backlog);
- 	return NET_XMIT_SUCCESS;
+ int
+diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
+index 42e1f440eb1e..8f38e33d365b 100644
+--- a/fs/cifs/smb2pdu.c
++++ b/fs/cifs/smb2pdu.c
+@@ -2575,6 +2575,22 @@ SMB2_rename(const unsigned int xid, struct cifs_tcon *tcon,
  }
  
-@@ -527,7 +529,7 @@ static int hhf_change(struct Qdisc *sch, struct nlattr *opt)
+ int
++SMB2_rmdir(const unsigned int xid, struct cifs_tcon *tcon,
++		  u64 persistent_fid, u64 volatile_fid)
++{
++	__u8 delete_pending = 1;
++	void *data;
++	unsigned int size;
++
++	data = &delete_pending;
++	size = 1; /* sizeof __u8 */
++
++	return send_set_info(xid, tcon, persistent_fid, volatile_fid,
++			current->tgid, FILE_DISPOSITION_INFORMATION, 1, &data,
++			&size);
++}
++
++int
+ SMB2_set_hardlink(const unsigned int xid, struct cifs_tcon *tcon,
+ 		  u64 persistent_fid, u64 volatile_fid, __le16 *target_file)
  {
- 	struct hhf_sched_data *q = qdisc_priv(sch);
- 	struct nlattr *tb[TCA_HHF_MAX + 1];
--	unsigned int qlen;
-+	unsigned int qlen, prev_backlog;
- 	int err;
- 	u64 non_hh_quantum;
- 	u32 new_quantum = q->quantum;
-@@ -577,12 +579,14 @@ static int hhf_change(struct Qdisc *sch, struct nlattr *opt)
- 	}
+diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h
+index 4f07dc93608d..eb2cde2f64ba 100644
+--- a/fs/cifs/smb2proto.h
++++ b/fs/cifs/smb2proto.h
+@@ -141,6 +141,8 @@ extern int SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
+ extern int SMB2_rename(const unsigned int xid, struct cifs_tcon *tcon,
+ 		       u64 persistent_fid, u64 volatile_fid,
+ 		       __le16 *target_file);
++extern int SMB2_rmdir(const unsigned int xid, struct cifs_tcon *tcon,
++		      u64 persistent_fid, u64 volatile_fid);
+ extern int SMB2_set_hardlink(const unsigned int xid, struct cifs_tcon *tcon,
+ 			     u64 persistent_fid, u64 volatile_fid,
+ 			     __le16 *target_file);
+diff --git a/fs/hpfs/super.c b/fs/hpfs/super.c
+index 458cf463047b..82067ca22f2b 100644
+--- a/fs/hpfs/super.c
++++ b/fs/hpfs/super.c
+@@ -15,6 +15,7 @@
+ #include <linux/sched.h>
+ #include <linux/bitmap.h>
+ #include <linux/slab.h>
++#include <linux/seq_file.h>
  
- 	qlen = sch->q.qlen;
-+	prev_backlog = sch->qstats.backlog;
- 	while (sch->q.qlen > sch->limit) {
- 		struct sk_buff *skb = hhf_dequeue(sch);
+ /* Mark the filesystem dirty, so that chkdsk checks it when os/2 booted */
  
- 		kfree_skb(skb);
- 	}
--	qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen);
-+	qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen,
-+				  prev_backlog - sch->qstats.backlog);
+@@ -453,10 +454,6 @@ static int hpfs_remount_fs(struct super_block *s, int *flags, char *data)
+ 	int lowercase, eas, chk, errs, chkdsk, timeshift;
+ 	int o;
+ 	struct hpfs_sb_info *sbi = hpfs_sb(s);
+-	char *new_opts = kstrdup(data, GFP_KERNEL);
+-
+-	if (!new_opts)
+-		return -ENOMEM;
  
- 	sch_tree_unlock(sch);
- 	return 0;
-diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
-index 15ccd7f8fb2a..87b02ed3d5f2 100644
---- a/net/sched/sch_htb.c
-+++ b/net/sched/sch_htb.c
-@@ -600,6 +600,7 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
- 		htb_activate(q, cl);
- 	}
+ 	sync_filesystem(s);
  
-+	qdisc_qstats_backlog_inc(sch, skb);
- 	sch->q.qlen++;
- 	return NET_XMIT_SUCCESS;
- }
-@@ -889,6 +890,7 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch)
- ok:
- 		qdisc_bstats_update(sch, skb);
- 		qdisc_unthrottled(sch);
-+		qdisc_qstats_backlog_dec(sch, skb);
- 		sch->q.qlen--;
- 		return skb;
- 	}
-@@ -955,6 +957,7 @@ static unsigned int htb_drop(struct Qdisc *sch)
- 			unsigned int len;
- 			if (cl->un.leaf.q->ops->drop &&
- 			    (len = cl->un.leaf.q->ops->drop(cl->un.leaf.q))) {
-+				sch->qstats.backlog -= len;
- 				sch->q.qlen--;
- 				if (!cl->un.leaf.q->q.qlen)
- 					htb_deactivate(q, cl);
-@@ -984,12 +987,12 @@ static void htb_reset(struct Qdisc *sch)
- 			}
- 			cl->prio_activity = 0;
- 			cl->cmode = HTB_CAN_SEND;
--
- 		}
- 	}
- 	qdisc_watchdog_cancel(&q->watchdog);
- 	__skb_queue_purge(&q->direct_queue);
- 	sch->q.qlen = 0;
-+	sch->qstats.backlog = 0;
- 	memset(q->hlevel, 0, sizeof(q->hlevel));
- 	memset(q->row_mask, 0, sizeof(q->row_mask));
- 	for (i = 0; i < TC_HTB_NUMPRIO; i++)
-@@ -1163,14 +1166,7 @@ static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
- 				     cl->common.classid)) == NULL)
- 		return -ENOBUFS;
- 
--	sch_tree_lock(sch);
--	*old = cl->un.leaf.q;
--	cl->un.leaf.q = new;
--	if (*old != NULL) {
--		qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
--		qdisc_reset(*old);
--	}
--	sch_tree_unlock(sch);
-+	*old = qdisc_replace(sch, new, &cl->un.leaf.q);
- 	return 0;
- }
+@@ -493,17 +490,44 @@ static int hpfs_remount_fs(struct super_block *s, int *flags, char *data)
  
-@@ -1272,7 +1268,6 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg)
- {
- 	struct htb_sched *q = qdisc_priv(sch);
- 	struct htb_class *cl = (struct htb_class *)arg;
--	unsigned int qlen;
- 	struct Qdisc *new_q = NULL;
- 	int last_child = 0;
- 
-@@ -1292,9 +1287,11 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg)
- 	sch_tree_lock(sch);
- 
- 	if (!cl->level) {
--		qlen = cl->un.leaf.q->q.qlen;
-+		unsigned int qlen = cl->un.leaf.q->q.qlen;
-+		unsigned int backlog = cl->un.leaf.q->qstats.backlog;
-+
- 		qdisc_reset(cl->un.leaf.q);
--		qdisc_tree_decrease_qlen(cl->un.leaf.q, qlen);
-+		qdisc_tree_reduce_backlog(cl->un.leaf.q, qlen, backlog);
- 	}
+ 	if (!(*flags & MS_RDONLY)) mark_dirty(s, 1);
  
- 	/* delete from hash and active; remainder in destroy_class */
-@@ -1428,10 +1425,11 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
- 		sch_tree_lock(sch);
- 		if (parent && !parent->level) {
- 			unsigned int qlen = parent->un.leaf.q->q.qlen;
-+			unsigned int backlog = parent->un.leaf.q->qstats.backlog;
- 
- 			/* turn parent into inner node */
- 			qdisc_reset(parent->un.leaf.q);
--			qdisc_tree_decrease_qlen(parent->un.leaf.q, qlen);
-+			qdisc_tree_reduce_backlog(parent->un.leaf.q, qlen, backlog);
- 			qdisc_destroy(parent->un.leaf.q);
- 			if (parent->prio_activity)
- 				htb_deactivate(q, parent);
-diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c
-index 4e904ca0af9d..bcdd54bb101c 100644
---- a/net/sched/sch_multiq.c
-+++ b/net/sched/sch_multiq.c
-@@ -218,7 +218,8 @@ static int multiq_tune(struct Qdisc *sch, struct nlattr *opt)
- 		if (q->queues[i] != &noop_qdisc) {
- 			struct Qdisc *child = q->queues[i];
- 			q->queues[i] = &noop_qdisc;
--			qdisc_tree_decrease_qlen(child, child->q.qlen);
-+			qdisc_tree_reduce_backlog(child, child->q.qlen,
-+						  child->qstats.backlog);
- 			qdisc_destroy(child);
- 		}
- 	}
-@@ -238,8 +239,9 @@ static int multiq_tune(struct Qdisc *sch, struct nlattr *opt)
- 				q->queues[i] = child;
- 
- 				if (old != &noop_qdisc) {
--					qdisc_tree_decrease_qlen(old,
--								 old->q.qlen);
-+					qdisc_tree_reduce_backlog(old,
-+								  old->q.qlen,
-+								  old->qstats.backlog);
- 					qdisc_destroy(old);
- 				}
- 				sch_tree_unlock(sch);
-@@ -303,13 +305,7 @@ static int multiq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
- 	if (new == NULL)
- 		new = &noop_qdisc;
- 
--	sch_tree_lock(sch);
--	*old = q->queues[band];
--	q->queues[band] = new;
--	qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
--	qdisc_reset(*old);
--	sch_tree_unlock(sch);
+-	replace_mount_options(s, new_opts);
 -
-+	*old = qdisc_replace(sch, new, &q->queues[band]);
+ 	hpfs_unlock(s);
  	return 0;
- }
  
-diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
-index 5abd1d9de989..4befe97a9034 100644
---- a/net/sched/sch_netem.c
-+++ b/net/sched/sch_netem.c
-@@ -395,6 +395,25 @@ static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
- 	sch->q.qlen++;
+ out_err:
+ 	hpfs_unlock(s);
+-	kfree(new_opts);
+ 	return -EINVAL;
  }
  
-+/* netem can't properly corrupt a megapacket (like we get from GSO), so instead
-+ * when we statistically choose to corrupt one, we instead segment it, returning
-+ * the first packet to be corrupted, and re-enqueue the remaining frames
-+ */
-+static struct sk_buff *netem_segment(struct sk_buff *skb, struct Qdisc *sch)
++static int hpfs_show_options(struct seq_file *seq, struct dentry *root)
 +{
-+	struct sk_buff *segs;
-+	netdev_features_t features = netif_skb_features(skb);
-+
-+	segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
-+
-+	if (IS_ERR_OR_NULL(segs)) {
-+		qdisc_reshape_fail(skb, sch);
-+		return NULL;
-+	}
-+	consume_skb(skb);
-+	return segs;
-+}
-+
- /*
-  * Insert one skb into qdisc.
-  * Note: parent depends on return value to account for queue length.
-@@ -407,7 +426,11 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
- 	/* We don't fill cb now as skb_unshare() may invalidate it */
- 	struct netem_skb_cb *cb;
- 	struct sk_buff *skb2;
-+	struct sk_buff *segs = NULL;
-+	unsigned int len = 0, last_len, prev_len = qdisc_pkt_len(skb);
-+	int nb = 0;
- 	int count = 1;
-+	int rc = NET_XMIT_SUCCESS;
- 
- 	/* Random duplication */
- 	if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor))
-@@ -453,10 +476,23 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
- 	 * do it now in software before we mangle it.
- 	 */
- 	if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) {
-+		if (skb_is_gso(skb)) {
-+			segs = netem_segment(skb, sch);
-+			if (!segs)
-+				return NET_XMIT_DROP;
-+		} else {
-+			segs = skb;
-+		}
-+
-+		skb = segs;
-+		segs = segs->next;
++	struct hpfs_sb_info *sbi = hpfs_sb(root->d_sb);
 +
- 		if (!(skb = skb_unshare(skb, GFP_ATOMIC)) ||
- 		    (skb->ip_summed == CHECKSUM_PARTIAL &&
--		     skb_checksum_help(skb)))
--			return qdisc_drop(skb, sch);
-+		     skb_checksum_help(skb))) {
-+			rc = qdisc_drop(skb, sch);
-+			goto finish_segs;
-+		}
- 
- 		skb->data[prandom_u32() % skb_headlen(skb)] ^=
- 			1<<(prandom_u32() % 8);
-@@ -516,6 +552,27 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
- 		sch->qstats.requeues++;
- 	}
- 
-+finish_segs:
-+	if (segs) {
-+		while (segs) {
-+			skb2 = segs->next;
-+			segs->next = NULL;
-+			qdisc_skb_cb(segs)->pkt_len = segs->len;
-+			last_len = segs->len;
-+			rc = qdisc_enqueue(segs, sch);
-+			if (rc != NET_XMIT_SUCCESS) {
-+				if (net_xmit_drop_count(rc))
-+					qdisc_qstats_drop(sch);
-+			} else {
-+				nb++;
-+				len += last_len;
-+			}
-+			segs = skb2;
-+		}
-+		sch->q.qlen += nb;
-+		if (nb > 1)
-+			qdisc_tree_reduce_backlog(sch, 1 - nb, prev_len - len);
-+	}
- 	return NET_XMIT_SUCCESS;
- }
- 
-@@ -598,7 +655,8 @@ deliver:
- 				if (unlikely(err != NET_XMIT_SUCCESS)) {
- 					if (net_xmit_drop_count(err)) {
- 						qdisc_qstats_drop(sch);
--						qdisc_tree_decrease_qlen(sch, 1);
-+						qdisc_tree_reduce_backlog(sch, 1,
-+									  qdisc_pkt_len(skb));
- 					}
- 				}
- 				goto tfifo_dequeue;
-@@ -1037,15 +1095,7 @@ static int netem_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
- {
- 	struct netem_sched_data *q = qdisc_priv(sch);
- 
--	sch_tree_lock(sch);
--	*old = q->qdisc;
--	q->qdisc = new;
--	if (*old) {
--		qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
--		qdisc_reset(*old);
--	}
--	sch_tree_unlock(sch);
--
-+	*old = qdisc_replace(sch, new, &q->qdisc);
- 	return 0;
- }
++	seq_printf(seq, ",uid=%u", from_kuid_munged(&init_user_ns, sbi->sb_uid));
++	seq_printf(seq, ",gid=%u", from_kgid_munged(&init_user_ns, sbi->sb_gid));
++	seq_printf(seq, ",umask=%03o", (~sbi->sb_mode & 0777));
++	if (sbi->sb_lowercase)
++		seq_printf(seq, ",case=lower");
++	if (!sbi->sb_chk)
++		seq_printf(seq, ",check=none");
++	if (sbi->sb_chk == 2)
++		seq_printf(seq, ",check=strict");
++	if (!sbi->sb_err)
++		seq_printf(seq, ",errors=continue");
++	if (sbi->sb_err == 2)
++		seq_printf(seq, ",errors=panic");
++	if (!sbi->sb_chkdsk)
++		seq_printf(seq, ",chkdsk=no");
++	if (sbi->sb_chkdsk == 2)
++		seq_printf(seq, ",chkdsk=always");
++	if (!sbi->sb_eas)
++		seq_printf(seq, ",eas=no");
++	if (sbi->sb_eas == 1)
++		seq_printf(seq, ",eas=ro");
++	if (sbi->sb_timeshift)
++		seq_printf(seq, ",timeshift=%d", sbi->sb_timeshift);
++	return 0;
++}
++
+ /* Super operations */
  
-diff --git a/net/sched/sch_pie.c b/net/sched/sch_pie.c
-index b783a446d884..71ae3b9629f9 100644
---- a/net/sched/sch_pie.c
-+++ b/net/sched/sch_pie.c
-@@ -183,7 +183,7 @@ static int pie_change(struct Qdisc *sch, struct nlattr *opt)
- {
- 	struct pie_sched_data *q = qdisc_priv(sch);
- 	struct nlattr *tb[TCA_PIE_MAX + 1];
--	unsigned int qlen;
-+	unsigned int qlen, dropped = 0;
- 	int err;
+ static const struct super_operations hpfs_sops =
+@@ -514,7 +538,7 @@ static const struct super_operations hpfs_sops =
+ 	.put_super	= hpfs_put_super,
+ 	.statfs		= hpfs_statfs,
+ 	.remount_fs	= hpfs_remount_fs,
+-	.show_options	= generic_show_options,
++	.show_options	= hpfs_show_options,
+ };
  
- 	if (!opt)
-@@ -232,10 +232,11 @@ static int pie_change(struct Qdisc *sch, struct nlattr *opt)
- 	while (sch->q.qlen > sch->limit) {
- 		struct sk_buff *skb = __skb_dequeue(&sch->q);
+ static int hpfs_fill_super(struct super_block *s, void *options, int silent)
+@@ -537,8 +561,6 @@ static int hpfs_fill_super(struct super_block *s, void *options, int silent)
  
-+		dropped += qdisc_pkt_len(skb);
- 		qdisc_qstats_backlog_dec(sch, skb);
- 		qdisc_drop(skb, sch);
- 	}
--	qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen);
-+	qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped);
+ 	int o;
  
- 	sch_tree_unlock(sch);
- 	return 0;
-diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
-index ba6487f2741f..fee1b15506b2 100644
---- a/net/sched/sch_prio.c
-+++ b/net/sched/sch_prio.c
-@@ -191,7 +191,7 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt)
- 		struct Qdisc *child = q->queues[i];
- 		q->queues[i] = &noop_qdisc;
- 		if (child != &noop_qdisc) {
--			qdisc_tree_decrease_qlen(child, child->q.qlen);
-+			qdisc_tree_reduce_backlog(child, child->q.qlen, child->qstats.backlog);
- 			qdisc_destroy(child);
- 		}
- 	}
-@@ -210,8 +210,9 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt)
- 				q->queues[i] = child;
- 
- 				if (old != &noop_qdisc) {
--					qdisc_tree_decrease_qlen(old,
--								 old->q.qlen);
-+					qdisc_tree_reduce_backlog(old,
-+								  old->q.qlen,
-+								  old->qstats.backlog);
- 					qdisc_destroy(old);
- 				}
- 				sch_tree_unlock(sch);
-@@ -268,13 +269,7 @@ static int prio_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
- 	if (new == NULL)
- 		new = &noop_qdisc;
- 
--	sch_tree_lock(sch);
--	*old = q->queues[band];
--	q->queues[band] = new;
--	qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
--	qdisc_reset(*old);
--	sch_tree_unlock(sch);
+-	save_mount_options(s, options);
 -
-+	*old = qdisc_replace(sch, new, &q->queues[band]);
- 	return 0;
- }
- 
-diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
-index 3dc3a6e56052..8d2d8d953432 100644
---- a/net/sched/sch_qfq.c
-+++ b/net/sched/sch_qfq.c
-@@ -220,9 +220,10 @@ static struct qfq_class *qfq_find_class(struct Qdisc *sch, u32 classid)
- static void qfq_purge_queue(struct qfq_class *cl)
+ 	sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
+ 	if (!sbi) {
+ 		return -ENOMEM;
+diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h
+index 39e1cb201b8e..332da3ad8eb5 100644
+--- a/include/asm-generic/qspinlock.h
++++ b/include/asm-generic/qspinlock.h
+@@ -28,7 +28,30 @@
+  */
+ static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
  {
- 	unsigned int len = cl->qdisc->q.qlen;
-+	unsigned int backlog = cl->qdisc->qstats.backlog;
- 
- 	qdisc_reset(cl->qdisc);
--	qdisc_tree_decrease_qlen(cl->qdisc, len);
-+	qdisc_tree_reduce_backlog(cl->qdisc, len, backlog);
- }
- 
- static const struct nla_policy qfq_policy[TCA_QFQ_MAX + 1] = {
-@@ -617,11 +618,7 @@ static int qfq_graft_class(struct Qdisc *sch, unsigned long arg,
- 			new = &noop_qdisc;
- 	}
- 
--	sch_tree_lock(sch);
--	qfq_purge_queue(cl);
--	*old = cl->qdisc;
--	cl->qdisc = new;
--	sch_tree_unlock(sch);
-+	*old = qdisc_replace(sch, new, &cl->qdisc);
- 	return 0;
- }
- 
-diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
-index 6c0534cc7758..8c0508c0e287 100644
---- a/net/sched/sch_red.c
-+++ b/net/sched/sch_red.c
-@@ -210,7 +210,8 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt)
- 	q->flags = ctl->flags;
- 	q->limit = ctl->limit;
- 	if (child) {
--		qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen);
-+		qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen,
-+					  q->qdisc->qstats.backlog);
- 		qdisc_destroy(q->qdisc);
- 		q->qdisc = child;
- 	}
-@@ -313,12 +314,7 @@ static int red_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
- 	if (new == NULL)
- 		new = &noop_qdisc;
- 
--	sch_tree_lock(sch);
--	*old = q->qdisc;
--	q->qdisc = new;
--	qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
--	qdisc_reset(*old);
--	sch_tree_unlock(sch);
-+	*old = qdisc_replace(sch, new, &q->qdisc);
- 	return 0;
- }
- 
-diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c
-index 5bbb6332ec57..c69611640fa5 100644
---- a/net/sched/sch_sfb.c
-+++ b/net/sched/sch_sfb.c
-@@ -510,7 +510,8 @@ static int sfb_change(struct Qdisc *sch, struct nlattr *opt)
- 
- 	sch_tree_lock(sch);
- 
--	qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen);
-+	qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen,
-+				  q->qdisc->qstats.backlog);
- 	qdisc_destroy(q->qdisc);
- 	q->qdisc = child;
- 
-@@ -606,12 +607,7 @@ static int sfb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
- 	if (new == NULL)
- 		new = &noop_qdisc;
- 
--	sch_tree_lock(sch);
--	*old = q->qdisc;
--	q->qdisc = new;
--	qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
--	qdisc_reset(*old);
--	sch_tree_unlock(sch);
-+	*old = qdisc_replace(sch, new, &q->qdisc);
- 	return 0;
+-	return atomic_read(&lock->val);
++	/*
++	 * queued_spin_lock_slowpath() can ACQUIRE the lock before
++	 * issuing the unordered store that sets _Q_LOCKED_VAL.
++	 *
++	 * See both smp_cond_acquire() sites for more detail.
++	 *
++	 * This however means that in code like:
++	 *
++	 *   spin_lock(A)		spin_lock(B)
++	 *   spin_unlock_wait(B)	spin_is_locked(A)
++	 *   do_something()		do_something()
++	 *
++	 * Both CPUs can end up running do_something() because the store
++	 * setting _Q_LOCKED_VAL will pass through the loads in
++	 * spin_unlock_wait() and/or spin_is_locked().
++	 *
++	 * Avoid this by issuing a full memory barrier between the spin_lock()
++	 * and the loads in spin_unlock_wait() and spin_is_locked().
++	 *
++	 * Note that regular mutual exclusion doesn't care about this
++	 * delayed store.
++	 */
++	smp_mb();
++	return atomic_read(&lock->val) & _Q_LOCKED_MASK;
  }
  
-diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
-index 3abab534eb5c..498f0a2cb47f 100644
---- a/net/sched/sch_sfq.c
-+++ b/net/sched/sch_sfq.c
-@@ -346,7 +346,7 @@ static int
- sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
+ /**
+@@ -108,6 +131,8 @@ static __always_inline void queued_spin_unlock(struct qspinlock *lock)
+  */
+ static inline void queued_spin_unlock_wait(struct qspinlock *lock)
  {
- 	struct sfq_sched_data *q = qdisc_priv(sch);
--	unsigned int hash;
-+	unsigned int hash, dropped;
- 	sfq_index x, qlen;
- 	struct sfq_slot *slot;
- 	int uninitialized_var(ret);
-@@ -461,7 +461,7 @@ enqueue:
- 		return NET_XMIT_SUCCESS;
- 
- 	qlen = slot->qlen;
--	sfq_drop(sch);
-+	dropped = sfq_drop(sch);
- 	/* Return Congestion Notification only if we dropped a packet
- 	 * from this flow.
- 	 */
-@@ -469,7 +469,7 @@ enqueue:
- 		return NET_XMIT_CN;
- 
- 	/* As we dropped a packet, better let upper stack know this */
--	qdisc_tree_decrease_qlen(sch, 1);
-+	qdisc_tree_reduce_backlog(sch, 1, dropped);
- 	return NET_XMIT_SUCCESS;
- }
- 
-@@ -537,6 +537,7 @@ static void sfq_rehash(struct Qdisc *sch)
- 	struct sfq_slot *slot;
- 	struct sk_buff_head list;
- 	int dropped = 0;
-+	unsigned int drop_len = 0;
- 
- 	__skb_queue_head_init(&list);
- 
-@@ -565,6 +566,7 @@ static void sfq_rehash(struct Qdisc *sch)
- 			if (x >= SFQ_MAX_FLOWS) {
- drop:
- 				qdisc_qstats_backlog_dec(sch, skb);
-+				drop_len += qdisc_pkt_len(skb);
- 				kfree_skb(skb);
- 				dropped++;
- 				continue;
-@@ -594,7 +596,7 @@ drop:
- 		}
- 	}
- 	sch->q.qlen -= dropped;
--	qdisc_tree_decrease_qlen(sch, dropped);
-+	qdisc_tree_reduce_backlog(sch, dropped, drop_len);
++	/* See queued_spin_is_locked() */
++	smp_mb();
+ 	while (atomic_read(&lock->val) & _Q_LOCKED_MASK)
+ 		cpu_relax();
  }
+diff --git a/include/asm-generic/siginfo.h b/include/asm-generic/siginfo.h
+index 3d1a3af5cf59..a2508a8f9a9c 100644
+--- a/include/asm-generic/siginfo.h
++++ b/include/asm-generic/siginfo.h
+@@ -17,21 +17,6 @@
+ struct siginfo;
+ void do_schedule_next_timer(struct siginfo *info);
  
- static void sfq_perturbation(unsigned long arg)
-@@ -618,7 +620,7 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
- 	struct sfq_sched_data *q = qdisc_priv(sch);
- 	struct tc_sfq_qopt *ctl = nla_data(opt);
- 	struct tc_sfq_qopt_v1 *ctl_v1 = NULL;
--	unsigned int qlen;
-+	unsigned int qlen, dropped = 0;
- 	struct red_parms *p = NULL;
- 
- 	if (opt->nla_len < nla_attr_size(sizeof(*ctl)))
-@@ -667,8 +669,8 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
- 
- 	qlen = sch->q.qlen;
- 	while (sch->q.qlen > q->limit)
--		sfq_drop(sch);
--	qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen);
-+		dropped += sfq_drop(sch);
-+	qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped);
- 
- 	del_timer(&q->perturb_timer);
- 	if (q->perturb_period) {
-diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
-index a4afde14e865..c2fbde742f37 100644
---- a/net/sched/sch_tbf.c
-+++ b/net/sched/sch_tbf.c
-@@ -160,6 +160,7 @@ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch)
- 	struct tbf_sched_data *q = qdisc_priv(sch);
- 	struct sk_buff *segs, *nskb;
- 	netdev_features_t features = netif_skb_features(skb);
-+	unsigned int len = 0, prev_len = qdisc_pkt_len(skb);
- 	int ret, nb;
- 
- 	segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
-@@ -172,6 +173,7 @@ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch)
- 		nskb = segs->next;
- 		segs->next = NULL;
- 		qdisc_skb_cb(segs)->pkt_len = segs->len;
-+		len += segs->len;
- 		ret = qdisc_enqueue(segs, q->qdisc);
- 		if (ret != NET_XMIT_SUCCESS) {
- 			if (net_xmit_drop_count(ret))
-@@ -183,7 +185,7 @@ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch)
- 	}
- 	sch->q.qlen += nb;
- 	if (nb > 1)
--		qdisc_tree_decrease_qlen(sch, 1 - nb);
-+		qdisc_tree_reduce_backlog(sch, 1 - nb, prev_len - len);
- 	consume_skb(skb);
- 	return nb > 0 ? NET_XMIT_SUCCESS : NET_XMIT_DROP;
- }
-@@ -399,7 +401,8 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt)
- 
- 	sch_tree_lock(sch);
- 	if (child) {
--		qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen);
-+		qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen,
-+					  q->qdisc->qstats.backlog);
- 		qdisc_destroy(q->qdisc);
- 		q->qdisc = child;
- 	}
-@@ -502,13 +505,7 @@ static int tbf_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
- 	if (new == NULL)
- 		new = &noop_qdisc;
- 
--	sch_tree_lock(sch);
--	*old = q->qdisc;
--	q->qdisc = new;
--	qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
--	qdisc_reset(*old);
--	sch_tree_unlock(sch);
+-#ifndef HAVE_ARCH_COPY_SIGINFO
 -
-+	*old = qdisc_replace(sch, new, &q->qdisc);
- 	return 0;
- }
+-#include <linux/string.h>
+-
+-static inline void copy_siginfo(struct siginfo *to, struct siginfo *from)
+-{
+-	if (from->si_code < 0)
+-		memcpy(to, from, sizeof(*to));
+-	else
+-		/* _sigchld is currently the largest know union member */
+-		memcpy(to, from, __ARCH_SI_PREAMBLE_SIZE + sizeof(from->_sifields._sigchld));
+-}
+-
+-#endif
+-
+ extern int copy_siginfo_to_user(struct siginfo __user *to, const struct siginfo *from);
  
-diff --git a/net/socket.c b/net/socket.c
-index c044d1e8508c..db13ae893dce 100644
---- a/net/socket.c
-+++ b/net/socket.c
-@@ -2240,31 +2240,31 @@ int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen,
- 		cond_resched();
- 	}
+ #endif
+diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h
+index 735f9f8c4e43..5261751f6bd4 100644
+--- a/include/linux/can/dev.h
++++ b/include/linux/can/dev.h
+@@ -40,8 +40,11 @@ struct can_priv {
+ 	struct can_clock clock;
  
--out_put:
--	fput_light(sock->file, fput_needed);
--
- 	if (err == 0)
--		return datagrams;
-+		goto out_put;
- 
--	if (datagrams != 0) {
-+	if (datagrams == 0) {
-+		datagrams = err;
-+		goto out_put;
-+	}
+ 	enum can_state state;
+-	u32 ctrlmode;
+-	u32 ctrlmode_supported;
 +
-+	/*
-+	 * We may return less entries than requested (vlen) if the
-+	 * sock is non block and there aren't enough datagrams...
-+	 */
-+	if (err != -EAGAIN) {
- 		/*
--		 * We may return less entries than requested (vlen) if the
--		 * sock is non block and there aren't enough datagrams...
-+		 * ... or  if recvmsg returns an error after we
-+		 * received some datagrams, where we record the
-+		 * error to return on the next call or if the
-+		 * app asks about it using getsockopt(SO_ERROR).
- 		 */
--		if (err != -EAGAIN) {
--			/*
--			 * ... or  if recvmsg returns an error after we
--			 * received some datagrams, where we record the
--			 * error to return on the next call or if the
--			 * app asks about it using getsockopt(SO_ERROR).
--			 */
--			sock->sk->sk_err = -err;
--		}
--
--		return datagrams;
-+		sock->sk->sk_err = -err;
- 	}
-+out_put:
-+	fput_light(sock->file, fput_needed);
++	/* CAN controller features - see include/uapi/linux/can/netlink.h */
++	u32 ctrlmode;		/* current options setting */
++	u32 ctrlmode_supported;	/* options that can be modified by netlink */
++	u32 ctrlmode_static;	/* static enabled options for driver/hardware */
  
--	return err;
-+	return datagrams;
+ 	int restart_ms;
+ 	struct timer_list restart_timer;
+@@ -108,6 +111,21 @@ static inline bool can_is_canfd_skb(const struct sk_buff *skb)
+ 	return skb->len == CANFD_MTU;
  }
  
- SYSCALL_DEFINE5(recvmmsg, int, fd, struct mmsghdr __user *, mmsg,
-diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
-index 273bc3a35425..008c25d1b9f9 100644
---- a/net/sunrpc/cache.c
-+++ b/net/sunrpc/cache.c
-@@ -1182,14 +1182,14 @@ int sunrpc_cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h)
- 	}
++/* helper to define static CAN controller features at device creation time */
++static inline void can_set_static_ctrlmode(struct net_device *dev,
++					   u32 static_mode)
++{
++	struct can_priv *priv = netdev_priv(dev);
++
++	/* alloc_candev() succeeded => netdev_priv() is valid at this point */
++	priv->ctrlmode = static_mode;
++	priv->ctrlmode_static = static_mode;
++
++	/* override MTU which was set by default in can_setup()? */
++	if (static_mode & CAN_CTRLMODE_FD)
++		dev->mtu = CANFD_MTU;
++}
++
+ /* get data length from can_dlc with sanitized can_dlc */
+ u8 can_dlc2len(u8 can_dlc);
  
- 	crq->q.reader = 0;
--	crq->item = cache_get(h);
- 	crq->buf = buf;
- 	crq->len = 0;
- 	crq->readers = 0;
- 	spin_lock(&queue_lock);
--	if (test_bit(CACHE_PENDING, &h->flags))
-+	if (test_bit(CACHE_PENDING, &h->flags)) {
-+		crq->item = cache_get(h);
- 		list_add_tail(&crq->q.list, &detail->queue);
--	else
-+	} else
- 		/* Lost a race, no longer PENDING, so don't enqueue */
- 		ret = -EAGAIN;
- 	spin_unlock(&queue_lock);
-diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
-index bbe65dcb9738..c93e67beaea7 100644
---- a/net/vmw_vsock/af_vsock.c
-+++ b/net/vmw_vsock/af_vsock.c
-@@ -1789,27 +1789,8 @@ vsock_stream_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
- 	else if (sk->sk_shutdown & RCV_SHUTDOWN)
- 		err = 0;
- 
--	if (copied > 0) {
--		/* We only do these additional bookkeeping/notification steps
--		 * if we actually copied something out of the queue pair
--		 * instead of just peeking ahead.
--		 */
--
--		if (!(flags & MSG_PEEK)) {
--			/* If the other side has shutdown for sending and there
--			 * is nothing more to read, then modify the socket
--			 * state.
--			 */
--			if (vsk->peer_shutdown & SEND_SHUTDOWN) {
--				if (vsock_stream_has_data(vsk) <= 0) {
--					sk->sk_state = SS_UNCONNECTED;
--					sock_set_flag(sk, SOCK_DONE);
--					sk->sk_state_change(sk);
--				}
--			}
--		}
-+	if (copied > 0)
- 		err = copied;
--	}
+diff --git a/include/linux/signal.h b/include/linux/signal.h
+index 92557bbce7e7..d80259afb9e5 100644
+--- a/include/linux/signal.h
++++ b/include/linux/signal.h
+@@ -28,6 +28,21 @@ struct sigpending {
+ 	sigset_t signal;
+ };
  
- out:
- 	release_sock(sk);
-diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
-index 711cb7ad6ae0..ab62d305b48b 100644
---- a/net/wireless/nl80211.c
-+++ b/net/wireless/nl80211.c
-@@ -13201,7 +13201,7 @@ static int nl80211_netlink_notify(struct notifier_block * nb,
- 	struct wireless_dev *wdev;
- 	struct cfg80211_beacon_registration *reg, *tmp;
++#ifndef HAVE_ARCH_COPY_SIGINFO
++
++#include <linux/string.h>
++
++static inline void copy_siginfo(struct siginfo *to, struct siginfo *from)
++{
++	if (from->si_code < 0)
++		memcpy(to, from, sizeof(*to));
++	else
++		/* _sigchld is currently the largest know union member */
++		memcpy(to, from, __ARCH_SI_PREAMBLE_SIZE + sizeof(from->_sifields._sigchld));
++}
++
++#endif
++
+ /*
+  * Define some primitives to manipulate sigset_t.
+  */
+diff --git a/include/linux/tty.h b/include/linux/tty.h
+index 19199c26783f..e5b996d887ce 100644
+--- a/include/linux/tty.h
++++ b/include/linux/tty.h
+@@ -338,7 +338,6 @@ struct tty_file_private {
+ #define TTY_EXCLUSIVE 		3	/* Exclusive open mode */
+ #define TTY_DEBUG 		4	/* Debugging */
+ #define TTY_DO_WRITE_WAKEUP 	5	/* Call write_wakeup after queuing new */
+-#define TTY_OTHER_DONE		6	/* Closed pty has completed input processing */
+ #define TTY_LDISC_OPEN	 	11	/* Line discipline is open */
+ #define TTY_PTY_LOCK 		16	/* pty private */
+ #define TTY_NO_WRITE_SPLIT 	17	/* Preserve write boundaries to driver */
+@@ -464,6 +463,7 @@ extern void tty_buffer_init(struct tty_port *port);
+ extern void tty_buffer_set_lock_subclass(struct tty_port *port);
+ extern bool tty_buffer_restart_work(struct tty_port *port);
+ extern bool tty_buffer_cancel_work(struct tty_port *port);
++extern void tty_buffer_flush_work(struct tty_port *port);
+ extern speed_t tty_termios_baud_rate(struct ktermios *termios);
+ extern speed_t tty_termios_input_baud_rate(struct ktermios *termios);
+ extern void tty_termios_encode_baud_rate(struct ktermios *termios,
+diff --git a/include/linux/usb.h b/include/linux/usb.h
+index 89533ba38691..f3dbc217ff41 100644
+--- a/include/linux/usb.h
++++ b/include/linux/usb.h
+@@ -1066,7 +1066,7 @@ struct usbdrv_wrap {
+  *	for interfaces bound to this driver.
+  * @soft_unbind: if set to 1, the USB core will not kill URBs and disable
+  *	endpoints before calling the driver's disconnect method.
+- * @disable_hub_initiated_lpm: if set to 0, the USB core will not allow hubs
++ * @disable_hub_initiated_lpm: if set to 1, the USB core will not allow hubs
+  *	to initiate lower power link state transitions when an idle timeout
+  *	occurs.  Device-initiated USB 3.0 link PM will still be allowed.
+  *
+diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
+index ba93c0f69870..a5d31f794cac 100644
+--- a/include/scsi/scsi_device.h
++++ b/include/scsi/scsi_device.h
+@@ -240,6 +240,7 @@ scmd_printk(const char *, const struct scsi_cmnd *, const char *, ...);
+ enum scsi_target_state {
+ 	STARGET_CREATED = 1,
+ 	STARGET_RUNNING,
++	STARGET_REMOVE,
+ 	STARGET_DEL,
+ };
  
--	if (state != NETLINK_URELEASE)
-+	if (state != NETLINK_URELEASE || notify->protocol != NETLINK_GENERIC)
- 		return NOTIFY_DONE;
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index a0ef98b258d7..477fb6b8ca20 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -1090,6 +1090,7 @@ static void put_ctx(struct perf_event_context *ctx)
+  * function.
+  *
+  * Lock order:
++ *    cred_guard_mutex
+  *	task_struct::perf_event_mutex
+  *	  perf_event_context::mutex
+  *	    perf_event::child_mutex;
+@@ -3415,7 +3416,6 @@ static struct task_struct *
+ find_lively_task_by_vpid(pid_t vpid)
+ {
+ 	struct task_struct *task;
+-	int err;
  
  	rcu_read_lock();
-diff --git a/net/x25/x25_facilities.c b/net/x25/x25_facilities.c
-index 7ecd04c21360..997ff7b2509b 100644
---- a/net/x25/x25_facilities.c
-+++ b/net/x25/x25_facilities.c
-@@ -277,6 +277,7 @@ int x25_negotiate_facilities(struct sk_buff *skb, struct sock *sk,
- 
- 	memset(&theirs, 0, sizeof(theirs));
- 	memcpy(new, ours, sizeof(*new));
-+	memset(dte, 0, sizeof(*dte));
- 
- 	len = x25_parse_facilities(skb, &theirs, dte, &x25->vc_facil_mask);
- 	if (len < 0)
-diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
-index ad7f5b3f9b61..1c4ad477ce93 100644
---- a/net/xfrm/xfrm_input.c
-+++ b/net/xfrm/xfrm_input.c
-@@ -292,12 +292,15 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
- 		XFRM_SKB_CB(skb)->seq.input.hi = seq_hi;
- 
- 		skb_dst_force(skb);
-+		dev_hold(skb->dev);
- 
- 		nexthdr = x->type->input(x, skb);
- 
- 		if (nexthdr == -EINPROGRESS)
- 			return 0;
- resume:
-+		dev_put(skb->dev);
-+
- 		spin_lock(&x->lock);
- 		if (nexthdr <= 0) {
- 			if (nexthdr == -EBADMSG) {
-diff --git a/samples/bpf/trace_output_kern.c b/samples/bpf/trace_output_kern.c
-index 8d8d1ec429eb..9b96f4fb8cea 100644
---- a/samples/bpf/trace_output_kern.c
-+++ b/samples/bpf/trace_output_kern.c
-@@ -18,7 +18,6 @@ int bpf_prog1(struct pt_regs *ctx)
- 		u64 cookie;
- 	} data;
- 
--	memset(&data, 0, sizeof(data));
- 	data.pid = bpf_get_current_pid_tgid();
- 	data.cookie = 0x12345678;
- 
-diff --git a/scripts/Makefile.extrawarn b/scripts/Makefile.extrawarn
-index f9e47a70509c..53449a6ff6aa 100644
---- a/scripts/Makefile.extrawarn
-+++ b/scripts/Makefile.extrawarn
-@@ -24,6 +24,7 @@ warning-1 += $(call cc-option, -Wmissing-prototypes)
- warning-1 += -Wold-style-definition
- warning-1 += $(call cc-option, -Wmissing-include-dirs)
- warning-1 += $(call cc-option, -Wunused-but-set-variable)
-+warning-1 += $(call cc-option, -Wunused-const-variable)
- warning-1 += $(call cc-disable-warning, missing-field-initializers)
- warning-1 += $(call cc-disable-warning, sign-compare)
- 
-diff --git a/scripts/coccinelle/iterators/use_after_iter.cocci b/scripts/coccinelle/iterators/use_after_iter.cocci
-index f085f5968c52..ce8cc9c006e5 100644
---- a/scripts/coccinelle/iterators/use_after_iter.cocci
-+++ b/scripts/coccinelle/iterators/use_after_iter.cocci
-@@ -123,7 +123,7 @@ list_remove_head(x,c,...)
- |
- sizeof(<+...c...+>)
- |
--&c->member
-+ &c->member
- |
- c = E
- |
-diff --git a/scripts/gdb/linux/modules.py b/scripts/gdb/linux/modules.py
-index 25db8cff44a2..0a35d6dbfb80 100644
---- a/scripts/gdb/linux/modules.py
-+++ b/scripts/gdb/linux/modules.py
-@@ -73,10 +73,11 @@ class LxLsmod(gdb.Command):
-                 "        " if utils.get_long_type().sizeof == 8 else ""))
- 
-         for module in module_list():
-+            layout = module['core_layout']
-             gdb.write("{address} {name:<19} {size:>8}  {ref}".format(
--                address=str(module['module_core']).split()[0],
-+                address=str(layout['base']).split()[0],
-                 name=module['name'].string(),
--                size=str(module['core_size']),
-+                size=str(layout['size']),
-                 ref=str(module['refcnt']['counter'])))
- 
-             source_list = module['source_list']
-diff --git a/scripts/gdb/linux/symbols.py b/scripts/gdb/linux/symbols.py
-index 627750cb420d..9a0f8923f67c 100644
---- a/scripts/gdb/linux/symbols.py
-+++ b/scripts/gdb/linux/symbols.py
-@@ -108,7 +108,7 @@ lx-symbols command."""
- 
-     def load_module_symbols(self, module):
-         module_name = module['name'].string()
--        module_addr = str(module['module_core']).split()[0]
-+        module_addr = str(module['core_layout']['base']).split()[0]
- 
-         module_file = self._get_module_file(module_name)
-         if not module_file and not self.module_files_updated:
-diff --git a/scripts/kconfig/Makefile b/scripts/kconfig/Makefile
-index d79cba4ce3eb..ebced77deb9c 100644
---- a/scripts/kconfig/Makefile
-+++ b/scripts/kconfig/Makefile
-@@ -96,13 +96,15 @@ savedefconfig: $(obj)/conf
- defconfig: $(obj)/conf
- ifeq ($(KBUILD_DEFCONFIG),)
- 	$< $(silent) --defconfig $(Kconfig)
--else ifneq ($(wildcard $(srctree)/arch/$(SRCARCH)/configs/$(KBUILD_DEFCONFIG)),)
-+else
-+ifneq ($(wildcard $(srctree)/arch/$(SRCARCH)/configs/$(KBUILD_DEFCONFIG)),)
- 	@$(kecho) "*** Default configuration is based on '$(KBUILD_DEFCONFIG)'"
- 	$(Q)$< $(silent) --defconfig=arch/$(SRCARCH)/configs/$(KBUILD_DEFCONFIG) $(Kconfig)
- else
- 	@$(kecho) "*** Default configuration is based on target '$(KBUILD_DEFCONFIG)'"
- 	$(Q)$(MAKE) -f $(srctree)/Makefile $(KBUILD_DEFCONFIG)
- endif
-+endif
- 
- %_defconfig: $(obj)/conf
- 	$(Q)$< $(silent) --defconfig=arch/$(SRCARCH)/configs/$@ $(Kconfig)
-diff --git a/scripts/kconfig/confdata.c b/scripts/kconfig/confdata.c
-index 0b7dc2fd7bac..dd243d2abd87 100644
---- a/scripts/kconfig/confdata.c
-+++ b/scripts/kconfig/confdata.c
-@@ -267,10 +267,8 @@ int conf_read_simple(const char *name, int def)
- 		if (in)
- 			goto load;
- 		sym_add_change_count(1);
--		if (!sym_defconfig_list) {
--			sym_calc_value(modules_sym);
-+		if (!sym_defconfig_list)
- 			return 1;
--		}
+ 	if (!vpid)
+@@ -3429,16 +3429,7 @@ find_lively_task_by_vpid(pid_t vpid)
+ 	if (!task)
+ 		return ERR_PTR(-ESRCH);
  
- 		for_all_defaults(sym_defconfig_list, prop) {
- 			if (expr_calc_value(prop->visible.expr) == no ||
-@@ -403,7 +401,6 @@ setsym:
- 	}
- 	free(line);
- 	fclose(in);
--	sym_calc_value(modules_sym);
- 	return 0;
+-	/* Reuse ptrace permission checks for now. */
+-	err = -EACCES;
+-	if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS))
+-		goto errout;
+-
+ 	return task;
+-errout:
+-	put_task_struct(task);
+-	return ERR_PTR(err);
+-
  }
  
-@@ -414,8 +411,12 @@ int conf_read(const char *name)
+ /*
+@@ -8360,6 +8351,24 @@ SYSCALL_DEFINE5(perf_event_open,
  
- 	sym_set_change_count(0);
+ 	get_online_cpus();
  
--	if (conf_read_simple(name, S_DEF_USER))
-+	if (conf_read_simple(name, S_DEF_USER)) {
-+		sym_calc_value(modules_sym);
- 		return 1;
++	if (task) {
++		err = mutex_lock_interruptible(&task->signal->cred_guard_mutex);
++		if (err)
++			goto err_cpus;
++
++		/*
++		 * Reuse ptrace permission checks for now.
++		 *
++		 * We must hold cred_guard_mutex across this and any potential
++		 * perf_install_in_context() call for this new event to
++		 * serialize against exec() altering our credentials (and the
++		 * perf_event_exit_task() that could imply).
++		 */
++		err = -EACCES;
++		if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS))
++			goto err_cred;
 +	}
 +
-+	sym_calc_value(modules_sym);
- 
- 	for_all_symbols(i, sym) {
- 		sym_calc_value(sym);
-@@ -846,6 +847,7 @@ static int conf_split_config(void)
- 
- 	name = conf_get_autoconfig_name();
- 	conf_read_simple(name, S_DEF_AUTO);
-+	sym_calc_value(modules_sym);
- 
- 	if (chdir("include/config"))
- 		return 1;
-diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
-index 161dd0d67da8..a9155077feef 100644
---- a/scripts/mod/file2alias.c
-+++ b/scripts/mod/file2alias.c
-@@ -371,6 +371,49 @@ static void do_usb_table(void *symval, unsigned long size,
- 		do_usb_entry_multi(symval + i, mod);
- }
+ 	if (flags & PERF_FLAG_PID_CGROUP)
+ 		cgroup_fd = pid;
  
-+static void do_of_entry_multi(void *symval, struct module *mod)
-+{
-+	char alias[500];
-+	int len;
-+	char *tmp;
-+
-+	DEF_FIELD_ADDR(symval, of_device_id, name);
-+	DEF_FIELD_ADDR(symval, of_device_id, type);
-+	DEF_FIELD_ADDR(symval, of_device_id, compatible);
-+
-+	len = sprintf(alias, "of:N%sT%s", (*name)[0] ? *name : "*",
-+		      (*type)[0] ? *type : "*");
-+
-+	if (compatible[0])
-+		sprintf(&alias[len], "%sC%s", (*type)[0] ? "*" : "",
-+			*compatible);
-+
-+	/* Replace all whitespace with underscores */
-+	for (tmp = alias; tmp && *tmp; tmp++)
-+		if (isspace(*tmp))
-+			*tmp = '_';
-+
-+	buf_printf(&mod->dev_table_buf, "MODULE_ALIAS(\"%s\");\n", alias);
-+	strcat(alias, "C");
-+	add_wildcard(alias);
-+	buf_printf(&mod->dev_table_buf, "MODULE_ALIAS(\"%s\");\n", alias);
-+}
-+
-+static void do_of_table(void *symval, unsigned long size,
-+			struct module *mod)
-+{
-+	unsigned int i;
-+	const unsigned long id_size = SIZE_of_device_id;
-+
-+	device_id_check(mod->name, "of", size, id_size, symval);
-+
-+	/* Leave last one: it's the terminator. */
-+	size -= id_size;
-+
-+	for (i = 0; i < size; i += id_size)
-+		do_of_entry_multi(symval + i, mod);
-+}
-+
- /* Looks like: hid:bNvNpN */
- static int do_hid_entry(const char *filename,
- 			     void *symval, char *alias)
-@@ -684,30 +727,6 @@ static int do_pcmcia_entry(const char *filename,
- }
- ADD_TO_DEVTABLE("pcmcia", pcmcia_device_id, do_pcmcia_entry);
+@@ -8367,7 +8376,7 @@ SYSCALL_DEFINE5(perf_event_open,
+ 				 NULL, NULL, cgroup_fd);
+ 	if (IS_ERR(event)) {
+ 		err = PTR_ERR(event);
+-		goto err_cpus;
++		goto err_cred;
+ 	}
  
--static int do_of_entry (const char *filename, void *symval, char *alias)
--{
--	int len;
--	char *tmp;
--	DEF_FIELD_ADDR(symval, of_device_id, name);
--	DEF_FIELD_ADDR(symval, of_device_id, type);
--	DEF_FIELD_ADDR(symval, of_device_id, compatible);
--
--	len = sprintf(alias, "of:N%sT%s", (*name)[0] ? *name : "*",
--		      (*type)[0] ? *type : "*");
--
--	if (compatible[0])
--		sprintf(&alias[len], "%sC%s", (*type)[0] ? "*" : "",
--			*compatible);
--
--	/* Replace all whitespace with underscores */
--	for (tmp = alias; tmp && *tmp; tmp++)
--		if (isspace (*tmp))
--			*tmp = '_';
--
--	return 1;
--}
--ADD_TO_DEVTABLE("of", of_device_id, do_of_entry);
--
- static int do_vio_entry(const char *filename, void *symval,
- 		char *alias)
- {
-@@ -1348,6 +1367,8 @@ void handle_moddevtable(struct module *mod, struct elf_info *info,
- 	/* First handle the "special" cases */
- 	if (sym_is(name, namelen, "usb"))
- 		do_usb_table(symval, sym->st_size, mod);
-+	if (sym_is(name, namelen, "of"))
-+		do_of_table(symval, sym->st_size, mod);
- 	else if (sym_is(name, namelen, "pnp"))
- 		do_pnp_device_entry(symval, sym->st_size, mod);
- 	else if (sym_is(name, namelen, "pnp_card"))
-diff --git a/scripts/package/mkspec b/scripts/package/mkspec
-index 71004daefe31..fe44d68e9344 100755
---- a/scripts/package/mkspec
-+++ b/scripts/package/mkspec
-@@ -131,11 +131,11 @@ echo 'rm -rf $RPM_BUILD_ROOT'
- echo ""
- echo "%post"
- echo "if [ -x /sbin/installkernel -a -r /boot/vmlinuz-$KERNELRELEASE -a -r /boot/System.map-$KERNELRELEASE ]; then"
--echo "cp /boot/vmlinuz-$KERNELRELEASE /boot/vmlinuz-$KERNELRELEASE-rpm"
--echo "cp /boot/System.map-$KERNELRELEASE /boot/System.map-$KERNELRELEASE-rpm"
-+echo "cp /boot/vmlinuz-$KERNELRELEASE /boot/.vmlinuz-$KERNELRELEASE-rpm"
-+echo "cp /boot/System.map-$KERNELRELEASE /boot/.System.map-$KERNELRELEASE-rpm"
- echo "rm -f /boot/vmlinuz-$KERNELRELEASE /boot/System.map-$KERNELRELEASE"
--echo "/sbin/installkernel $KERNELRELEASE /boot/vmlinuz-$KERNELRELEASE-rpm /boot/System.map-$KERNELRELEASE-rpm"
--echo "rm -f /boot/vmlinuz-$KERNELRELEASE-rpm /boot/System.map-$KERNELRELEASE-rpm"
-+echo "/sbin/installkernel $KERNELRELEASE /boot/.vmlinuz-$KERNELRELEASE-rpm /boot/.System.map-$KERNELRELEASE-rpm"
-+echo "rm -f /boot/.vmlinuz-$KERNELRELEASE-rpm /boot/.System.map-$KERNELRELEASE-rpm"
- echo "fi"
- echo ""
- echo "%files"
-diff --git a/security/keys/trusted.c b/security/keys/trusted.c
-index 0dcab20cdacd..90d61751ff12 100644
---- a/security/keys/trusted.c
-+++ b/security/keys/trusted.c
-@@ -744,6 +744,7 @@ static int getoptions(char *c, struct trusted_key_payload *pay,
- 	unsigned long handle;
- 	unsigned long lock;
- 	unsigned long token_mask = 0;
-+	unsigned int digest_len;
- 	int i;
- 	int tpm2;
- 
-@@ -752,7 +753,6 @@ static int getoptions(char *c, struct trusted_key_payload *pay,
- 		return tpm2;
- 
- 	opt->hash = tpm2 ? HASH_ALGO_SHA256 : HASH_ALGO_SHA1;
--	opt->digest_len = hash_digest_size[opt->hash];
- 
- 	while ((p = strsep(&c, " \t"))) {
- 		if (*p == '\0' || *p == ' ' || *p == '\t')
-@@ -812,8 +812,6 @@ static int getoptions(char *c, struct trusted_key_payload *pay,
- 			for (i = 0; i < HASH_ALGO__LAST; i++) {
- 				if (!strcmp(args[0].from, hash_algo_name[i])) {
- 					opt->hash = i;
--					opt->digest_len =
--						hash_digest_size[opt->hash];
- 					break;
- 				}
- 			}
-@@ -825,13 +823,14 @@ static int getoptions(char *c, struct trusted_key_payload *pay,
- 			}
- 			break;
- 		case Opt_policydigest:
--			if (!tpm2 ||
--			    strlen(args[0].from) != (2 * opt->digest_len))
-+			digest_len = hash_digest_size[opt->hash];
-+			if (!tpm2 || strlen(args[0].from) != (2 * digest_len))
- 				return -EINVAL;
- 			res = hex2bin(opt->policydigest, args[0].from,
--				      opt->digest_len);
-+				      digest_len);
- 			if (res < 0)
- 				return -EINVAL;
-+			opt->policydigest_len = digest_len;
- 			break;
- 		case Opt_policyhandle:
- 			if (!tpm2)
-diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
-index 6b5a811e01a5..3a9b66c6e09c 100644
---- a/sound/core/pcm_lib.c
-+++ b/sound/core/pcm_lib.c
-@@ -322,7 +322,7 @@ static int snd_pcm_update_hw_ptr0(struct snd_pcm_substream *substream,
- 			char name[16];
- 			snd_pcm_debug_name(substream, name, sizeof(name));
- 			pcm_err(substream->pcm,
--				"BUG: %s, pos = %ld, buffer size = %ld, period size = %ld\n",
-+				"invalid position: %s, pos = %ld, buffer size = %ld, period size = %ld\n",
- 				name, pos, runtime->buffer_size,
- 				runtime->period_size);
- 		}
-diff --git a/sound/core/timer.c b/sound/core/timer.c
-index dca817fc7894..e5e7e4368996 100644
---- a/sound/core/timer.c
-+++ b/sound/core/timer.c
-@@ -1041,8 +1041,8 @@ static int snd_timer_s_start(struct snd_timer * timer)
- 		njiff += timer->sticks - priv->correction;
- 		priv->correction = 0;
+ 	if (is_sampling_event(event)) {
+@@ -8426,11 +8435,6 @@ SYSCALL_DEFINE5(perf_event_open,
+ 		goto err_context;
  	}
--	priv->last_expires = priv->tlist.expires = njiff;
--	add_timer(&priv->tlist);
-+	priv->last_expires = njiff;
-+	mod_timer(&priv->tlist, njiff);
- 	return 0;
- }
  
-diff --git a/sound/hda/hdac_device.c b/sound/hda/hdac_device.c
-index e361024eabb6..d1a4d6973330 100644
---- a/sound/hda/hdac_device.c
-+++ b/sound/hda/hdac_device.c
-@@ -611,6 +611,22 @@ int snd_hdac_power_up_pm(struct hdac_device *codec)
- }
- EXPORT_SYMBOL_GPL(snd_hdac_power_up_pm);
+-	if (task) {
+-		put_task_struct(task);
+-		task = NULL;
+-	}
+-
+ 	/*
+ 	 * Look up the group leader (we will attach this event to it):
+ 	 */
+@@ -8528,6 +8532,11 @@ SYSCALL_DEFINE5(perf_event_open,
  
-+/* like snd_hdac_power_up_pm(), but only increment the pm count when
-+ * already powered up.  Returns -1 if not powered up, 1 if incremented
-+ * or 0 if unchanged.  Only used in hdac_regmap.c
-+ */
-+int snd_hdac_keep_power_up(struct hdac_device *codec)
-+{
-+	if (!atomic_inc_not_zero(&codec->in_pm)) {
-+		int ret = pm_runtime_get_if_in_use(&codec->dev);
-+		if (!ret)
-+			return -1;
-+		if (ret < 0)
-+			return 0;
-+	}
-+	return 1;
-+}
+ 	WARN_ON_ONCE(ctx->parent_ctx);
+ 
++	/*
++	 * This is the point on no return; we cannot fail hereafter. This is
++	 * where we start modifying current state.
++	 */
 +
- /**
-  * snd_hdac_power_down_pm - power down the codec
-  * @codec: the codec object
-diff --git a/sound/hda/hdac_i915.c b/sound/hda/hdac_i915.c
-index f6854dbd7d8d..69ead7150a5c 100644
---- a/sound/hda/hdac_i915.c
-+++ b/sound/hda/hdac_i915.c
-@@ -20,6 +20,7 @@
- #include <sound/core.h>
- #include <sound/hdaudio.h>
- #include <sound/hda_i915.h>
-+#include <sound/hda_register.h>
- 
- static struct i915_audio_component *hdac_acomp;
- 
-@@ -97,26 +98,65 @@ int snd_hdac_display_power(struct hdac_bus *bus, bool enable)
- }
- EXPORT_SYMBOL_GPL(snd_hdac_display_power);
+ 	if (move_group) {
+ 		/*
+ 		 * See perf_event_ctx_lock() for comments on the details
+@@ -8599,6 +8608,11 @@ SYSCALL_DEFINE5(perf_event_open,
+ 		mutex_unlock(&gctx->mutex);
+ 	mutex_unlock(&ctx->mutex);
  
-+#define CONTROLLER_IN_GPU(pci) (((pci)->device == 0x0a0c) || \
-+				((pci)->device == 0x0c0c) || \
-+				((pci)->device == 0x0d0c) || \
-+				((pci)->device == 0x160c))
++	if (task) {
++		mutex_unlock(&task->signal->cred_guard_mutex);
++		put_task_struct(task);
++	}
 +
- /**
-- * snd_hdac_get_display_clk - Get CDCLK in kHz
-+ * snd_hdac_i915_set_bclk - Reprogram BCLK for HSW/BDW
-  * @bus: HDA core bus
-  *
-- * This function is supposed to be used only by a HD-audio controller
-- * driver that needs the interaction with i915 graphics.
-+ * Intel HSW/BDW display HDA controller is in GPU. Both its power and link BCLK
-+ * depends on GPU. Two Extended Mode registers EM4 (M value) and EM5 (N Value)
-+ * are used to convert CDClk (Core Display Clock) to 24MHz BCLK:
-+ * BCLK = CDCLK * M / N
-+ * The values will be lost when the display power well is disabled and need to
-+ * be restored to avoid abnormal playback speed.
-  *
-- * This function queries CDCLK value in kHz from the graphics driver and
-- * returns the value.  A negative code is returned in error.
-+ * Call this function at initializing and changing power well, as well as
-+ * at ELD notifier for the hotplug.
+ 	put_online_cpus();
+ 
+ 	mutex_lock(&current->perf_event_mutex);
+@@ -8631,6 +8645,9 @@ err_alloc:
+ 	 */
+ 	if (!event_file)
+ 		free_event(event);
++err_cred:
++	if (task)
++		mutex_unlock(&task->signal->cred_guard_mutex);
+ err_cpus:
+ 	put_online_cpus();
+ err_task:
+@@ -8915,6 +8932,9 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
+ 
+ /*
+  * When a child task exits, feed back event values to parent events.
++ *
++ * Can be called with cred_guard_mutex held when called from
++ * install_exec_creds().
   */
--int snd_hdac_get_display_clk(struct hdac_bus *bus)
-+void snd_hdac_i915_set_bclk(struct hdac_bus *bus)
+ void perf_event_exit_task(struct task_struct *child)
  {
- 	struct i915_audio_component *acomp = bus->audio_component;
-+	struct pci_dev *pci = to_pci_dev(bus->dev);
-+	int cdclk_freq;
-+	unsigned int bclk_m, bclk_n;
-+
-+	if (!acomp || !acomp->ops || !acomp->ops->get_cdclk_freq)
-+		return; /* only for i915 binding */
-+	if (!CONTROLLER_IN_GPU(pci))
-+		return; /* only HSW/BDW */
-+
-+	cdclk_freq = acomp->ops->get_cdclk_freq(acomp->dev);
-+	switch (cdclk_freq) {
-+	case 337500:
-+		bclk_m = 16;
-+		bclk_n = 225;
-+		break;
-+
-+	case 450000:
-+	default: /* default CDCLK 450MHz */
-+		bclk_m = 4;
-+		bclk_n = 75;
-+		break;
+diff --git a/kernel/sched/loadavg.c b/kernel/sched/loadavg.c
+index ef7159012cf3..b0b93fd33af9 100644
+--- a/kernel/sched/loadavg.c
++++ b/kernel/sched/loadavg.c
+@@ -99,10 +99,13 @@ long calc_load_fold_active(struct rq *this_rq)
+ static unsigned long
+ calc_load(unsigned long load, unsigned long exp, unsigned long active)
+ {
+-	load *= exp;
+-	load += active * (FIXED_1 - exp);
+-	load += 1UL << (FSHIFT - 1);
+-	return load >> FSHIFT;
++	unsigned long newload;
 +
-+	case 540000:
-+		bclk_m = 4;
-+		bclk_n = 90;
-+		break;
++	newload = load * exp + active * (FIXED_1 - exp);
++	if (active >= load)
++		newload += FIXED_1-1;
 +
-+	case 675000:
-+		bclk_m = 8;
-+		bclk_n = 225;
-+		break;
-+	}
- 
--	if (!acomp || !acomp->ops)
--		return -ENODEV;
--
--	return acomp->ops->get_cdclk_freq(acomp->dev);
-+	snd_hdac_chip_writew(bus, HSW_EM4, bclk_m);
-+	snd_hdac_chip_writew(bus, HSW_EM5, bclk_n);
++	return newload / FIXED_1;
  }
--EXPORT_SYMBOL_GPL(snd_hdac_get_display_clk);
-+EXPORT_SYMBOL_GPL(snd_hdac_i915_set_bclk);
- 
- /* There is a fixed mapping between audio pin node and display port
-  * on current Intel platforms:
-diff --git a/sound/hda/hdac_regmap.c b/sound/hda/hdac_regmap.c
-index eb8f7c30cb09..bdbcd6b75ff6 100644
---- a/sound/hda/hdac_regmap.c
-+++ b/sound/hda/hdac_regmap.c
-@@ -21,13 +21,16 @@
- #include <sound/hdaudio.h>
- #include <sound/hda_regmap.h>
- 
--#ifdef CONFIG_PM
--#define codec_is_running(codec)				\
--	(atomic_read(&(codec)->in_pm) ||		\
--	 !pm_runtime_suspended(&(codec)->dev))
--#else
--#define codec_is_running(codec)		true
--#endif
-+static int codec_pm_lock(struct hdac_device *codec)
-+{
-+	return snd_hdac_keep_power_up(codec);
-+}
-+
-+static void codec_pm_unlock(struct hdac_device *codec, int lock)
-+{
-+	if (lock == 1)
-+		snd_hdac_power_down_pm(codec);
-+}
- 
- #define get_verb(reg)	(((reg) >> 8) & 0xfff)
  
-@@ -238,20 +241,28 @@ static int hda_reg_read(void *context, unsigned int reg, unsigned int *val)
- 	struct hdac_device *codec = context;
- 	int verb = get_verb(reg);
- 	int err;
-+	int pm_lock = 0;
- 
--	if (!codec_is_running(codec) && verb != AC_VERB_GET_POWER_STATE)
--		return -EAGAIN;
-+	if (verb != AC_VERB_GET_POWER_STATE) {
-+		pm_lock = codec_pm_lock(codec);
-+		if (pm_lock < 0)
-+			return -EAGAIN;
-+	}
- 	reg |= (codec->addr << 28);
--	if (is_stereo_amp_verb(reg))
--		return hda_reg_read_stereo_amp(codec, reg, val);
--	if (verb == AC_VERB_GET_PROC_COEF)
--		return hda_reg_read_coef(codec, reg, val);
-+	if (is_stereo_amp_verb(reg)) {
-+		err = hda_reg_read_stereo_amp(codec, reg, val);
-+		goto out;
-+	}
-+	if (verb == AC_VERB_GET_PROC_COEF) {
-+		err = hda_reg_read_coef(codec, reg, val);
-+		goto out;
-+	}
- 	if ((verb & 0x700) == AC_VERB_SET_AMP_GAIN_MUTE)
- 		reg &= ~AC_AMP_FAKE_MUTE;
- 
- 	err = snd_hdac_exec_verb(codec, reg, 0, val);
- 	if (err < 0)
--		return err;
-+		goto out;
- 	/* special handling for asymmetric reads */
- 	if (verb == AC_VERB_GET_POWER_STATE) {
- 		if (*val & AC_PWRST_ERROR)
-@@ -259,7 +270,9 @@ static int hda_reg_read(void *context, unsigned int reg, unsigned int *val)
- 		else /* take only the actual state */
- 			*val = (*val >> 4) & 0x0f;
- 	}
--	return 0;
-+ out:
-+	codec_pm_unlock(codec, pm_lock);
-+	return err;
+ #ifdef CONFIG_NO_HZ_COMMON
+diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
+index 95181e36891a..9c143739b8d7 100644
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -437,7 +437,7 @@ struct ring_buffer_per_cpu {
+ 	raw_spinlock_t			reader_lock;	/* serialize readers */
+ 	arch_spinlock_t			lock;
+ 	struct lock_class_key		lock_key;
+-	unsigned int			nr_pages;
++	unsigned long			nr_pages;
+ 	unsigned int			current_context;
+ 	struct list_head		*pages;
+ 	struct buffer_page		*head_page;	/* read from head */
+@@ -458,7 +458,7 @@ struct ring_buffer_per_cpu {
+ 	u64				write_stamp;
+ 	u64				read_stamp;
+ 	/* ring buffer pages to update, > 0 to add, < 0 to remove */
+-	int				nr_pages_to_update;
++	long				nr_pages_to_update;
+ 	struct list_head		new_pages; /* new pages to add */
+ 	struct work_struct		update_pages_work;
+ 	struct completion		update_done;
+@@ -1128,10 +1128,10 @@ static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
+ 	return 0;
  }
  
- static int hda_reg_write(void *context, unsigned int reg, unsigned int val)
-@@ -267,6 +280,7 @@ static int hda_reg_write(void *context, unsigned int reg, unsigned int val)
- 	struct hdac_device *codec = context;
- 	unsigned int verb;
- 	int i, bytes, err;
-+	int pm_lock = 0;
- 
- 	if (codec->caps_overwriting)
- 		return 0;
-@@ -275,14 +289,21 @@ static int hda_reg_write(void *context, unsigned int reg, unsigned int val)
- 	reg |= (codec->addr << 28);
- 	verb = get_verb(reg);
- 
--	if (!codec_is_running(codec) && verb != AC_VERB_SET_POWER_STATE)
--		return codec->lazy_cache ? 0 : -EAGAIN;
-+	if (verb != AC_VERB_SET_POWER_STATE) {
-+		pm_lock = codec_pm_lock(codec);
-+		if (pm_lock < 0)
-+			return codec->lazy_cache ? 0 : -EAGAIN;
-+	}
+-static int __rb_allocate_pages(int nr_pages, struct list_head *pages, int cpu)
++static int __rb_allocate_pages(long nr_pages, struct list_head *pages, int cpu)
+ {
+-	int i;
+ 	struct buffer_page *bpage, *tmp;
++	long i;
  
--	if (is_stereo_amp_verb(reg))
--		return hda_reg_write_stereo_amp(codec, reg, val);
-+	if (is_stereo_amp_verb(reg)) {
-+		err = hda_reg_write_stereo_amp(codec, reg, val);
-+		goto out;
-+	}
+ 	for (i = 0; i < nr_pages; i++) {
+ 		struct page *page;
+@@ -1168,7 +1168,7 @@ free_pages:
+ }
  
--	if (verb == AC_VERB_SET_PROC_COEF)
--		return hda_reg_write_coef(codec, reg, val);
-+	if (verb == AC_VERB_SET_PROC_COEF) {
-+		err = hda_reg_write_coef(codec, reg, val);
-+		goto out;
-+	}
+ static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
+-			     unsigned nr_pages)
++			     unsigned long nr_pages)
+ {
+ 	LIST_HEAD(pages);
  
- 	switch (verb & 0xf00) {
- 	case AC_VERB_SET_AMP_GAIN_MUTE:
-@@ -319,10 +340,12 @@ static int hda_reg_write(void *context, unsigned int reg, unsigned int val)
- 		reg |= (verb + i) << 8 | ((val >> (8 * i)) & 0xff);
- 		err = snd_hdac_exec_verb(codec, reg, 0, NULL);
- 		if (err < 0)
--			return err;
-+			goto out;
- 	}
+@@ -1193,7 +1193,7 @@ static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
+ }
  
--	return 0;
-+ out:
-+	codec_pm_unlock(codec, pm_lock);
-+	return err;
+ static struct ring_buffer_per_cpu *
+-rb_allocate_cpu_buffer(struct ring_buffer *buffer, int nr_pages, int cpu)
++rb_allocate_cpu_buffer(struct ring_buffer *buffer, long nr_pages, int cpu)
+ {
+ 	struct ring_buffer_per_cpu *cpu_buffer;
+ 	struct buffer_page *bpage;
+@@ -1293,8 +1293,9 @@ struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
+ 					struct lock_class_key *key)
+ {
+ 	struct ring_buffer *buffer;
++	long nr_pages;
+ 	int bsize;
+-	int cpu, nr_pages;
++	int cpu;
+ 
+ 	/* keep it in its own cache line */
+ 	buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
+@@ -1420,12 +1421,12 @@ static inline unsigned long rb_page_write(struct buffer_page *bpage)
  }
  
- static const struct regmap_config hda_regmap_cfg = {
-diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
-index 7ca5b89f088a..dfaf1a93fb8a 100644
---- a/sound/pci/hda/hda_generic.c
-+++ b/sound/pci/hda/hda_generic.c
-@@ -826,7 +826,7 @@ static hda_nid_t path_power_update(struct hda_codec *codec,
- 				   bool allow_powerdown)
- {
- 	hda_nid_t nid, changed = 0;
--	int i, state;
-+	int i, state, power;
- 
- 	for (i = 0; i < path->depth; i++) {
- 		nid = path->path[i];
-@@ -838,7 +838,9 @@ static hda_nid_t path_power_update(struct hda_codec *codec,
- 			state = AC_PWRST_D0;
- 		else
- 			state = AC_PWRST_D3;
--		if (!snd_hda_check_power_state(codec, nid, state)) {
-+		power = snd_hda_codec_read(codec, nid, 0,
-+					   AC_VERB_GET_POWER_STATE, 0);
-+		if (power != (state | (state << 4))) {
- 			snd_hda_codec_write(codec, nid, 0,
- 					    AC_VERB_SET_POWER_STATE, state);
- 			changed = nid;
-diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
-index e5240cb3749f..c0b772bb49af 100644
---- a/sound/pci/hda/hda_intel.c
-+++ b/sound/pci/hda/hda_intel.c
-@@ -857,50 +857,6 @@ static int param_set_xint(const char *val, const struct kernel_param *kp)
- #define azx_del_card_list(chip) /* NOP */
- #endif /* CONFIG_PM */
- 
--/* Intel HSW/BDW display HDA controller is in GPU. Both its power and link BCLK
-- * depends on GPU. Two Extended Mode registers EM4 (M value) and EM5 (N Value)
-- * are used to convert CDClk (Core Display Clock) to 24MHz BCLK:
-- * BCLK = CDCLK * M / N
-- * The values will be lost when the display power well is disabled and need to
-- * be restored to avoid abnormal playback speed.
-- */
--static void haswell_set_bclk(struct hda_intel *hda)
--{
--	struct azx *chip = &hda->chip;
--	int cdclk_freq;
--	unsigned int bclk_m, bclk_n;
--
--	if (!hda->need_i915_power)
--		return;
--
--	cdclk_freq = snd_hdac_get_display_clk(azx_bus(chip));
--	switch (cdclk_freq) {
--	case 337500:
--		bclk_m = 16;
--		bclk_n = 225;
--		break;
--
--	case 450000:
--	default: /* default CDCLK 450MHz */
--		bclk_m = 4;
--		bclk_n = 75;
--		break;
--
--	case 540000:
--		bclk_m = 4;
--		bclk_n = 90;
--		break;
--
--	case 675000:
--		bclk_m = 8;
--		bclk_n = 225;
--		break;
--	}
--
--	azx_writew(chip, HSW_EM4, bclk_m);
--	azx_writew(chip, HSW_EM5, bclk_n);
--}
--
- #if defined(CONFIG_PM_SLEEP) || defined(SUPPORT_VGA_SWITCHEROO)
- /*
-  * power management
-@@ -958,7 +914,7 @@ static int azx_resume(struct device *dev)
- 	if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL
- 		&& hda->need_i915_power) {
- 		snd_hdac_display_power(azx_bus(chip), true);
--		haswell_set_bclk(hda);
-+		snd_hdac_i915_set_bclk(azx_bus(chip));
- 	}
- 	if (chip->msi)
- 		if (pci_enable_msi(pci) < 0)
-@@ -1058,7 +1014,7 @@ static int azx_runtime_resume(struct device *dev)
- 		bus = azx_bus(chip);
- 		if (hda->need_i915_power) {
- 			snd_hdac_display_power(bus, true);
--			haswell_set_bclk(hda);
-+			snd_hdac_i915_set_bclk(bus);
- 		} else {
- 			/* toggle codec wakeup bit for STATESTS read */
- 			snd_hdac_set_codec_wakeup(bus, true);
-@@ -1796,12 +1752,8 @@ static int azx_first_init(struct azx *chip)
- 	/* initialize chip */
- 	azx_init_pci(chip);
- 
--	if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) {
--		struct hda_intel *hda;
--
--		hda = container_of(chip, struct hda_intel, chip);
--		haswell_set_bclk(hda);
--	}
-+	if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL)
-+		snd_hdac_i915_set_bclk(bus);
- 
- 	hda_intel_init_chip(chip, (probe_only[dev] & 2) == 0);
- 
-@@ -2232,6 +2184,9 @@ static const struct pci_device_id azx_ids[] = {
- 	/* Broxton-P(Apollolake) */
- 	{ PCI_DEVICE(0x8086, 0x5a98),
- 	  .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_BROXTON },
-+	/* Broxton-T */
-+	{ PCI_DEVICE(0x8086, 0x1a98),
-+	  .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_BROXTON },
- 	/* Haswell */
- 	{ PCI_DEVICE(0x8086, 0x0a0c),
- 	  .driver_data = AZX_DRIVER_HDMI | AZX_DCAPS_INTEL_HASWELL },
-diff --git a/sound/pci/hda/hda_sysfs.c b/sound/pci/hda/hda_sysfs.c
-index 64e0d1d81ca5..9739fce9e032 100644
---- a/sound/pci/hda/hda_sysfs.c
-+++ b/sound/pci/hda/hda_sysfs.c
-@@ -141,14 +141,6 @@ static int reconfig_codec(struct hda_codec *codec)
- 	err = snd_hda_codec_configure(codec);
- 	if (err < 0)
- 		goto error;
--	/* rebuild PCMs */
--	err = snd_hda_codec_build_pcms(codec);
--	if (err < 0)
--		goto error;
--	/* rebuild mixers */
--	err = snd_hda_codec_build_controls(codec);
--	if (err < 0)
--		goto error;
- 	err = snd_card_register(codec->card);
-  error:
- 	snd_hda_power_down(codec);
-diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
-index c1c855a6c0af..80bbadc83721 100644
---- a/sound/pci/hda/patch_cirrus.c
-+++ b/sound/pci/hda/patch_cirrus.c
-@@ -174,8 +174,12 @@ static void cs_automute(struct hda_codec *codec)
- 	snd_hda_gen_update_outputs(codec);
- 
- 	if (spec->gpio_eapd_hp || spec->gpio_eapd_speaker) {
--		spec->gpio_data = spec->gen.hp_jack_present ?
--			spec->gpio_eapd_hp : spec->gpio_eapd_speaker;
-+		if (spec->gen.automute_speaker)
-+			spec->gpio_data = spec->gen.hp_jack_present ?
-+				spec->gpio_eapd_hp : spec->gpio_eapd_speaker;
-+		else
-+			spec->gpio_data =
-+				spec->gpio_eapd_hp | spec->gpio_eapd_speaker;
- 		snd_hda_codec_write(codec, 0x01, 0,
- 				    AC_VERB_SET_GPIO_DATA, spec->gpio_data);
- 	}
-@@ -357,6 +361,7 @@ static int cs_parse_auto_config(struct hda_codec *codec)
+ static int
+-rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned int nr_pages)
++rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages)
  {
- 	struct cs_spec *spec = codec->spec;
- 	int err;
-+	int i;
- 
- 	err = snd_hda_parse_pin_defcfg(codec, &spec->gen.autocfg, NULL, 0);
- 	if (err < 0)
-@@ -366,6 +371,19 @@ static int cs_parse_auto_config(struct hda_codec *codec)
- 	if (err < 0)
- 		return err;
- 
-+	/* keep the ADCs powered up when it's dynamically switchable */
-+	if (spec->gen.dyn_adc_switch) {
-+		unsigned int done = 0;
-+		for (i = 0; i < spec->gen.input_mux.num_items; i++) {
-+			int idx = spec->gen.dyn_adc_idx[i];
-+			if (done & (1 << idx))
-+				continue;
-+			snd_hda_gen_fix_pin_power(codec,
-+						  spec->gen.adc_nids[idx]);
-+			done |= 1 << idx;
-+		}
-+	}
-+
- 	return 0;
- }
+ 	struct list_head *tail_page, *to_remove, *next_page;
+ 	struct buffer_page *to_remove_page, *tmp_iter_page;
+ 	struct buffer_page *last_page, *first_page;
+-	unsigned int nr_removed;
++	unsigned long nr_removed;
+ 	unsigned long head_bit;
+ 	int page_entries;
  
-diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
-index 6122b8ca872f..56fefbd85782 100644
---- a/sound/pci/hda/patch_conexant.c
-+++ b/sound/pci/hda/patch_conexant.c
-@@ -204,8 +204,13 @@ static void cx_auto_reboot_notify(struct hda_codec *codec)
+@@ -1642,7 +1643,7 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size,
+ 			int cpu_id)
  {
- 	struct conexant_spec *spec = codec->spec;
+ 	struct ring_buffer_per_cpu *cpu_buffer;
+-	unsigned nr_pages;
++	unsigned long nr_pages;
+ 	int cpu, err = 0;
  
--	if (codec->core.vendor_id != 0x14f150f2)
-+	switch (codec->core.vendor_id) {
-+	case 0x14f150f2: /* CX20722 */
-+	case 0x14f150f4: /* CX20724 */
-+		break;
-+	default:
- 		return;
-+	}
+ 	/*
+@@ -1656,14 +1657,13 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size,
+ 	    !cpumask_test_cpu(cpu_id, buffer->cpumask))
+ 		return size;
  
- 	/* Turn the CX20722 codec into D3 to avoid spurious noises
- 	   from the internal speaker during (and after) reboot */
-diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
-index bcbc4ee10130..c98e404afbe0 100644
---- a/sound/pci/hda/patch_hdmi.c
-+++ b/sound/pci/hda/patch_hdmi.c
-@@ -152,13 +152,17 @@ struct hdmi_spec {
- 	struct hda_pcm_stream pcm_playback;
- 
- 	/* i915/powerwell (Haswell+/Valleyview+) specific */
-+	bool use_acomp_notifier; /* use i915 eld_notify callback for hotplug */
- 	struct i915_audio_component_audio_ops i915_audio_ops;
- 	bool i915_bound; /* was i915 bound in this driver? */
- };
+-	size = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
+-	size *= BUF_PAGE_SIZE;
++	nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
  
- #ifdef CONFIG_SND_HDA_I915
--#define codec_has_acomp(codec) \
--	((codec)->bus->core.audio_component != NULL)
-+static inline bool codec_has_acomp(struct hda_codec *codec)
-+{
-+	struct hdmi_spec *spec = codec->spec;
-+	return spec->use_acomp_notifier;
-+}
- #else
- #define codec_has_acomp(codec)	false
- #endif
-@@ -1562,6 +1566,7 @@ static void update_eld(struct hda_codec *codec,
- 			   eld->eld_size) != 0)
- 			eld_changed = true;
- 
-+	pin_eld->monitor_present = eld->monitor_present;
- 	pin_eld->eld_valid = eld->eld_valid;
- 	pin_eld->eld_size = eld->eld_size;
- 	if (eld->eld_valid)
-@@ -1618,6 +1623,8 @@ static bool hdmi_present_sense_via_verbs(struct hdmi_spec_per_pin *per_pin,
- 
- 	mutex_lock(&per_pin->lock);
- 	pin_eld->monitor_present = !!(present & AC_PINSENSE_PRESENCE);
-+	eld->monitor_present = pin_eld->monitor_present;
-+
- 	if (pin_eld->monitor_present)
- 		eld->eld_valid  = !!(present & AC_PINSENSE_ELDV);
- 	else
-@@ -1665,11 +1672,10 @@ static void sync_eld_via_acomp(struct hda_codec *codec,
- 	int size;
- 
- 	mutex_lock(&per_pin->lock);
-+	eld->monitor_present = false;
- 	size = snd_hdac_acomp_get_eld(&codec->bus->core, per_pin->pin_nid,
- 				      &eld->monitor_present, eld->eld_buffer,
- 				      ELD_MAX_SIZE);
--	if (size < 0)
--		goto unlock;
- 	if (size > 0) {
- 		size = min(size, ELD_MAX_SIZE);
- 		if (snd_hdmi_parse_eld(codec, &eld->info,
-@@ -1873,7 +1879,8 @@ static int generic_hdmi_playback_pcm_prepare(struct hda_pcm_stream *hinfo,
- 
- 	/* Call sync_audio_rate to set the N/CTS/M manually if necessary */
- 	/* Todo: add DP1.2 MST audio support later */
--	snd_hdac_sync_audio_rate(&codec->bus->core, pin_nid, runtime->rate);
-+	if (codec_has_acomp(codec))
-+		snd_hdac_sync_audio_rate(&codec->bus->core, pin_nid, runtime->rate);
- 
- 	non_pcm = check_non_pcm_per_cvt(codec, cvt_nid);
- 	mutex_lock(&per_pin->lock);
-@@ -2432,6 +2439,10 @@ static void intel_pin_eld_notify(void *audio_ptr, int port)
- 	struct hda_codec *codec = audio_ptr;
- 	int pin_nid = port + 0x04;
- 
-+	/* we assume only from port-B to port-D */
-+	if (port < 1 || port > 3)
-+		return;
-+
- 	/* skip notification during system suspend (but not in runtime PM);
- 	 * the state will be updated at resume
- 	 */
-@@ -2441,6 +2452,7 @@ static void intel_pin_eld_notify(void *audio_ptr, int port)
- 	if (atomic_read(&(codec)->core.in_pm))
- 		return;
+ 	/* we need a minimum of two pages */
+-	if (size < BUF_PAGE_SIZE * 2)
+-		size = BUF_PAGE_SIZE * 2;
++	if (nr_pages < 2)
++		nr_pages = 2;
  
-+	snd_hdac_i915_set_bclk(&codec->bus->core);
- 	check_presence_and_report(codec, pin_nid);
- }
+-	nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
++	size = nr_pages * BUF_PAGE_SIZE;
  
-@@ -2456,11 +2468,24 @@ static int patch_generic_hdmi(struct hda_codec *codec)
- 	codec->spec = spec;
- 	hdmi_array_init(spec, 4);
- 
--	/* Try to bind with i915 for any Intel codecs (if not done yet) */
--	if (!codec_has_acomp(codec) &&
--	    (codec->core.vendor_id >> 16) == 0x8086)
--		if (!snd_hdac_i915_init(&codec->bus->core))
--			spec->i915_bound = true;
-+#ifdef CONFIG_SND_HDA_I915
-+	/* Try to bind with i915 for Intel HSW+ codecs (if not done yet) */
-+	if ((codec->core.vendor_id >> 16) == 0x8086 &&
-+	    is_haswell_plus(codec)) {
-+#if 0
-+		/* on-demand binding leads to an unbalanced refcount when
-+		 * both i915 and hda drivers are probed concurrently;
-+		 * disabled temporarily for now
-+		 */
-+		if (!codec->bus->core.audio_component)
-+			if (!snd_hdac_i915_init(&codec->bus->core))
-+				spec->i915_bound = true;
-+#endif
-+		/* use i915 audio component notifier for hotplug */
-+		if (codec->bus->core.audio_component)
-+			spec->use_acomp_notifier = true;
-+	}
-+#endif
+ 	/*
+ 	 * Don't succeed if resizing is disabled, as a reader might be
+@@ -4640,8 +4640,9 @@ static int rb_cpu_notify(struct notifier_block *self,
+ 	struct ring_buffer *buffer =
+ 		container_of(self, struct ring_buffer, cpu_notify);
+ 	long cpu = (long)hcpu;
+-	int cpu_i, nr_pages_same;
+-	unsigned int nr_pages;
++	long nr_pages_same;
++	int cpu_i;
++	unsigned long nr_pages;
+ 
+ 	switch (action) {
+ 	case CPU_UP_PREPARE:
+diff --git a/scripts/Makefile.extrawarn b/scripts/Makefile.extrawarn
+index f9e47a70509c..53449a6ff6aa 100644
+--- a/scripts/Makefile.extrawarn
++++ b/scripts/Makefile.extrawarn
+@@ -24,6 +24,7 @@ warning-1 += $(call cc-option, -Wmissing-prototypes)
+ warning-1 += -Wold-style-definition
+ warning-1 += $(call cc-option, -Wmissing-include-dirs)
+ warning-1 += $(call cc-option, -Wunused-but-set-variable)
++warning-1 += $(call cc-option, -Wunused-const-variable)
+ warning-1 += $(call cc-disable-warning, missing-field-initializers)
+ warning-1 += $(call cc-disable-warning, sign-compare)
  
- 	if (is_haswell_plus(codec)) {
- 		intel_haswell_enable_all_pins(codec, true);
-@@ -3659,6 +3684,7 @@ HDA_CODEC_ENTRY(0x10de0070, "GPU 70 HDMI/DP",	patch_nvhdmi),
- HDA_CODEC_ENTRY(0x10de0071, "GPU 71 HDMI/DP",	patch_nvhdmi),
- HDA_CODEC_ENTRY(0x10de0072, "GPU 72 HDMI/DP",	patch_nvhdmi),
- HDA_CODEC_ENTRY(0x10de007d, "GPU 7d HDMI/DP",	patch_nvhdmi),
-+HDA_CODEC_ENTRY(0x10de0082, "GPU 82 HDMI/DP",	patch_nvhdmi),
- HDA_CODEC_ENTRY(0x10de0083, "GPU 83 HDMI/DP",	patch_nvhdmi),
- HDA_CODEC_ENTRY(0x10de8001, "MCP73 HDMI",	patch_nvhdmi_2ch),
- HDA_CODEC_ENTRY(0x11069f80, "VX900 HDMI/DP",	patch_via_hdmi),
 diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
-index 93d2156b6241..d53c25e7a1c1 100644
+index 4918ffa5ba68..d53c25e7a1c1 100644
 --- a/sound/pci/hda/patch_realtek.c
 +++ b/sound/pci/hda/patch_realtek.c
 @@ -335,6 +335,7 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
@@ -31153,44 +3929,7 @@ index 93d2156b6241..d53c25e7a1c1 100644
  		alc_process_coef_fw(codec, coef0225);
  		msleep(800);
  		val = alc_read_coef_idx(codec, 0x46);
-@@ -4759,6 +4774,8 @@ enum {
- 	ALC255_FIXUP_DELL_SPK_NOISE,
- 	ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
- 	ALC280_FIXUP_HP_HEADSET_MIC,
-+	ALC221_FIXUP_HP_FRONT_MIC,
-+	ALC292_FIXUP_TPT460,
- };
- 
- static const struct hda_fixup alc269_fixups[] = {
-@@ -5401,6 +5418,19 @@ static const struct hda_fixup alc269_fixups[] = {
- 		.chained = true,
- 		.chain_id = ALC269_FIXUP_HEADSET_MIC,
- 	},
-+	[ALC221_FIXUP_HP_FRONT_MIC] = {
-+		.type = HDA_FIXUP_PINS,
-+		.v.pins = (const struct hda_pintbl[]) {
-+			{ 0x19, 0x02a19020 }, /* Front Mic */
-+			{ }
-+		},
-+	},
-+	[ALC292_FIXUP_TPT460] = {
-+		.type = HDA_FIXUP_FUNC,
-+		.v.func = alc_fixup_tpt440_dock,
-+		.chained = true,
-+		.chain_id = ALC293_FIXUP_LENOVO_SPK_NOISE,
-+	},
- };
- 
- static const struct snd_pci_quirk alc269_fixup_tbl[] = {
-@@ -5434,6 +5464,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
- 	SND_PCI_QUIRK(0x1028, 0x064a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
- 	SND_PCI_QUIRK(0x1028, 0x064b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
- 	SND_PCI_QUIRK(0x1028, 0x0665, "Dell XPS 13", ALC288_FIXUP_DELL_XPS_13),
-+	SND_PCI_QUIRK(0x1028, 0x0669, "Dell Optiplex 9020m", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
- 	SND_PCI_QUIRK(0x1028, 0x069a, "Dell Vostro 5480", ALC290_FIXUP_SUBWOOFER_HSJACK),
- 	SND_PCI_QUIRK(0x1028, 0x06c7, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
- 	SND_PCI_QUIRK(0x1028, 0x06d9, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
-@@ -5443,8 +5474,9 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+@@ -5459,8 +5474,9 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
  	SND_PCI_QUIRK(0x1028, 0x06de, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK),
  	SND_PCI_QUIRK(0x1028, 0x06df, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK),
  	SND_PCI_QUIRK(0x1028, 0x06e0, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK),
@@ -31201,42 +3940,7 @@ index 93d2156b6241..d53c25e7a1c1 100644
  	SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
  	SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
  	SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
-@@ -5506,6 +5538,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
- 	SND_PCI_QUIRK(0x103c, 0x2336, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
- 	SND_PCI_QUIRK(0x103c, 0x2337, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
- 	SND_PCI_QUIRK(0x103c, 0x221c, "HP EliteBook 755 G2", ALC280_FIXUP_HP_HEADSET_MIC),
-+	SND_PCI_QUIRK(0x103c, 0x8256, "HP", ALC221_FIXUP_HP_FRONT_MIC),
- 	SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
- 	SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
- 	SND_PCI_QUIRK(0x1043, 0x115d, "Asus 1015E", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
-@@ -5554,8 +5587,9 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
- 	SND_PCI_QUIRK(0x17aa, 0x2218, "Thinkpad X1 Carbon 2nd", ALC292_FIXUP_TPT440_DOCK),
- 	SND_PCI_QUIRK(0x17aa, 0x2223, "ThinkPad T550", ALC292_FIXUP_TPT440_DOCK),
- 	SND_PCI_QUIRK(0x17aa, 0x2226, "ThinkPad X250", ALC292_FIXUP_TPT440_DOCK),
--	SND_PCI_QUIRK(0x17aa, 0x2233, "Thinkpad", ALC293_FIXUP_LENOVO_SPK_NOISE),
-+	SND_PCI_QUIRK(0x17aa, 0x2233, "Thinkpad", ALC292_FIXUP_TPT460),
- 	SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
-+	SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
- 	SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
- 	SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
- 	SND_PCI_QUIRK(0x17aa, 0x3978, "IdeaPad Y410P", ALC269_FIXUP_NO_SHUTUP),
-@@ -5566,6 +5600,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
- 	SND_PCI_QUIRK(0x17aa, 0x5034, "Thinkpad T450", ALC292_FIXUP_TPT440_DOCK),
- 	SND_PCI_QUIRK(0x17aa, 0x5036, "Thinkpad T450s", ALC292_FIXUP_TPT440_DOCK),
- 	SND_PCI_QUIRK(0x17aa, 0x503c, "Thinkpad L450", ALC292_FIXUP_TPT440_DOCK),
-+	SND_PCI_QUIRK(0x17aa, 0x504a, "ThinkPad X260", ALC292_FIXUP_TPT440_DOCK),
- 	SND_PCI_QUIRK(0x17aa, 0x504b, "Thinkpad", ALC293_FIXUP_LENOVO_SPK_NOISE),
- 	SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
- 	SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
-@@ -5648,6 +5683,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
- 	{.id = ALC283_FIXUP_SENSE_COMBO_JACK, .name = "alc283-sense-combo"},
- 	{.id = ALC292_FIXUP_TPT440_DOCK, .name = "tpt440-dock"},
- 	{.id = ALC292_FIXUP_TPT440, .name = "tpt440"},
-+	{.id = ALC292_FIXUP_TPT460, .name = "tpt460"},
- 	{}
- };
- #define ALC225_STANDARD_PINS \
-@@ -5684,6 +5720,9 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
+@@ -5704,6 +5720,9 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
  		{0x14, 0x90170110},
  		{0x21, 0x02211020}),
  	SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
@@ -31246,7 +3950,7 @@ index 93d2156b6241..d53c25e7a1c1 100644
  		{0x12, 0x90a60140},
  		{0x14, 0x90170110},
  		{0x21, 0x02211020}),
-@@ -6006,8 +6045,14 @@ static int patch_alc269(struct hda_codec *codec)
+@@ -6026,8 +6045,14 @@ static int patch_alc269(struct hda_codec *codec)
  		alc_update_coef_idx(codec, 0x36, 1 << 13, 1 << 5); /* Switch pcbeep path to Line in path*/
  		break;
  	case 0x10ec0225:
@@ -31261,54 +3965,7 @@ index 93d2156b6241..d53c25e7a1c1 100644
  	}
  
  	if (snd_hda_codec_read(codec, 0x51, 0, AC_VERB_PARAMETERS, 0) == 0x10ec5505) {
-@@ -6405,6 +6450,8 @@ enum {
- 	ALC668_FIXUP_AUTO_MUTE,
- 	ALC668_FIXUP_DELL_DISABLE_AAMIX,
- 	ALC668_FIXUP_DELL_XPS13,
-+	ALC662_FIXUP_ASUS_Nx50,
-+	ALC668_FIXUP_ASUS_Nx51,
- };
- 
- static const struct hda_fixup alc662_fixups[] = {
-@@ -6645,6 +6692,21 @@ static const struct hda_fixup alc662_fixups[] = {
- 		.type = HDA_FIXUP_FUNC,
- 		.v.func = alc_fixup_bass_chmap,
- 	},
-+	[ALC662_FIXUP_ASUS_Nx50] = {
-+		.type = HDA_FIXUP_FUNC,
-+		.v.func = alc_fixup_auto_mute_via_amp,
-+		.chained = true,
-+		.chain_id = ALC662_FIXUP_BASS_1A
-+	},
-+	[ALC668_FIXUP_ASUS_Nx51] = {
-+		.type = HDA_FIXUP_PINS,
-+		.v.pins = (const struct hda_pintbl[]) {
-+			{0x1a, 0x90170151}, /* bass speaker */
-+			{}
-+		},
-+		.chained = true,
-+		.chain_id = ALC662_FIXUP_BASS_CHMAP,
-+	},
- };
- 
- static const struct snd_pci_quirk alc662_fixup_tbl[] = {
-@@ -6667,10 +6729,14 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
- 	SND_PCI_QUIRK(0x1028, 0x0698, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
- 	SND_PCI_QUIRK(0x1028, 0x069f, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
- 	SND_PCI_QUIRK(0x103c, 0x1632, "HP RP5800", ALC662_FIXUP_HP_RP5800),
--	SND_PCI_QUIRK(0x1043, 0x11cd, "Asus N550", ALC662_FIXUP_BASS_1A),
-+	SND_PCI_QUIRK(0x1043, 0x1080, "Asus UX501VW", ALC668_FIXUP_HEADSET_MODE),
-+	SND_PCI_QUIRK(0x1043, 0x11cd, "Asus N550", ALC662_FIXUP_ASUS_Nx50),
- 	SND_PCI_QUIRK(0x1043, 0x13df, "Asus N550JX", ALC662_FIXUP_BASS_1A),
-+	SND_PCI_QUIRK(0x1043, 0x129d, "Asus N750", ALC662_FIXUP_ASUS_Nx50),
- 	SND_PCI_QUIRK(0x1043, 0x1477, "ASUS N56VZ", ALC662_FIXUP_BASS_MODE4_CHMAP),
- 	SND_PCI_QUIRK(0x1043, 0x15a7, "ASUS UX51VZH", ALC662_FIXUP_BASS_16),
-+	SND_PCI_QUIRK(0x1043, 0x177d, "ASUS N551", ALC668_FIXUP_ASUS_Nx51),
-+	SND_PCI_QUIRK(0x1043, 0x17bd, "ASUS N751", ALC668_FIXUP_ASUS_Nx51),
- 	SND_PCI_QUIRK(0x1043, 0x1b73, "ASUS N55SF", ALC662_FIXUP_BASS_16),
- 	SND_PCI_QUIRK(0x1043, 0x1bf3, "ASUS N76VZ", ALC662_FIXUP_BASS_MODE4_CHMAP),
- 	SND_PCI_QUIRK(0x1043, 0x8469, "ASUS mobo", ALC662_FIXUP_NO_JACK_DETECT),
-@@ -6901,6 +6967,7 @@ static const struct hda_device_id snd_hda_id_realtek[] = {
+@@ -6942,6 +6967,7 @@ static const struct hda_device_id snd_hda_id_realtek[] = {
  	HDA_CODEC_ENTRY(0x10ec0225, "ALC225", patch_alc269),
  	HDA_CODEC_ENTRY(0x10ec0231, "ALC231", patch_alc269),
  	HDA_CODEC_ENTRY(0x10ec0233, "ALC233", patch_alc269),
@@ -31316,7 +3973,7 @@ index 93d2156b6241..d53c25e7a1c1 100644
  	HDA_CODEC_ENTRY(0x10ec0235, "ALC233", patch_alc269),
  	HDA_CODEC_ENTRY(0x10ec0255, "ALC255", patch_alc269),
  	HDA_CODEC_ENTRY(0x10ec0256, "ALC256", patch_alc269),
-@@ -6911,6 +6978,7 @@ static const struct hda_device_id snd_hda_id_realtek[] = {
+@@ -6952,6 +6978,7 @@ static const struct hda_device_id snd_hda_id_realtek[] = {
  	HDA_CODEC_ENTRY(0x10ec0269, "ALC269", patch_alc269),
  	HDA_CODEC_ENTRY(0x10ec0270, "ALC270", patch_alc269),
  	HDA_CODEC_ENTRY(0x10ec0272, "ALC272", patch_alc662),
@@ -31324,7 +3981,7 @@ index 93d2156b6241..d53c25e7a1c1 100644
  	HDA_CODEC_ENTRY(0x10ec0275, "ALC275", patch_alc269),
  	HDA_CODEC_ENTRY(0x10ec0276, "ALC276", patch_alc269),
  	HDA_CODEC_ENTRY(0x10ec0280, "ALC280", patch_alc269),
-@@ -6923,6 +6991,8 @@ static const struct hda_device_id snd_hda_id_realtek[] = {
+@@ -6964,6 +6991,8 @@ static const struct hda_device_id snd_hda_id_realtek[] = {
  	HDA_CODEC_ENTRY(0x10ec0290, "ALC290", patch_alc269),
  	HDA_CODEC_ENTRY(0x10ec0292, "ALC292", patch_alc269),
  	HDA_CODEC_ENTRY(0x10ec0293, "ALC293", patch_alc269),
@@ -31333,860 +3990,3 @@ index 93d2156b6241..d53c25e7a1c1 100644
  	HDA_CODEC_ENTRY(0x10ec0298, "ALC298", patch_alc269),
  	HDA_CODEC_REV_ENTRY(0x10ec0861, 0x100340, "ALC660", patch_alc861),
  	HDA_CODEC_ENTRY(0x10ec0660, "ALC660-VD", patch_alc861vd),
-diff --git a/sound/pci/intel8x0.c b/sound/pci/intel8x0.c
-index 42bcbac801a3..ccdab29a8b66 100644
---- a/sound/pci/intel8x0.c
-+++ b/sound/pci/intel8x0.c
-@@ -2879,6 +2879,7 @@ static void intel8x0_measure_ac97_clock(struct intel8x0 *chip)
- 
- static struct snd_pci_quirk intel8x0_clock_list[] = {
- 	SND_PCI_QUIRK(0x0e11, 0x008a, "AD1885", 41000),
-+	SND_PCI_QUIRK(0x1014, 0x0581, "AD1981B", 48000),
- 	SND_PCI_QUIRK(0x1028, 0x00be, "AD1885", 44100),
- 	SND_PCI_QUIRK(0x1028, 0x0177, "AD1980", 48000),
- 	SND_PCI_QUIRK(0x1028, 0x01ad, "AD1981B", 48000),
-diff --git a/sound/pci/pcxhr/pcxhr_core.c b/sound/pci/pcxhr/pcxhr_core.c
-index c5194f5b150a..d7e71f309299 100644
---- a/sound/pci/pcxhr/pcxhr_core.c
-+++ b/sound/pci/pcxhr/pcxhr_core.c
-@@ -1341,5 +1341,6 @@ irqreturn_t pcxhr_threaded_irq(int irq, void *dev_id)
- 	}
- 
- 	pcxhr_msg_thread(mgr);
-+	mutex_unlock(&mgr->lock);
- 	return IRQ_HANDLED;
- }
-diff --git a/sound/soc/codecs/rt5640.c b/sound/soc/codecs/rt5640.c
-index 11d032cdc658..48dbb2fdeb09 100644
---- a/sound/soc/codecs/rt5640.c
-+++ b/sound/soc/codecs/rt5640.c
-@@ -359,7 +359,7 @@ static const DECLARE_TLV_DB_RANGE(bst_tlv,
- 
- /* Interface data select */
- static const char * const rt5640_data_select[] = {
--	"Normal", "left copy to right", "right copy to left", "Swap"};
-+	"Normal", "Swap", "left copy to right", "right copy to left"};
- 
- static SOC_ENUM_SINGLE_DECL(rt5640_if1_dac_enum, RT5640_DIG_INF_DATA,
- 			    RT5640_IF1_DAC_SEL_SFT, rt5640_data_select);
-diff --git a/sound/soc/codecs/rt5640.h b/sound/soc/codecs/rt5640.h
-index 83a7150ddc24..f84231e7d1dd 100644
---- a/sound/soc/codecs/rt5640.h
-+++ b/sound/soc/codecs/rt5640.h
-@@ -442,39 +442,39 @@
- #define RT5640_IF1_DAC_SEL_MASK			(0x3 << 14)
- #define RT5640_IF1_DAC_SEL_SFT			14
- #define RT5640_IF1_DAC_SEL_NOR			(0x0 << 14)
--#define RT5640_IF1_DAC_SEL_L2R			(0x1 << 14)
--#define RT5640_IF1_DAC_SEL_R2L			(0x2 << 14)
--#define RT5640_IF1_DAC_SEL_SWAP			(0x3 << 14)
-+#define RT5640_IF1_DAC_SEL_SWAP			(0x1 << 14)
-+#define RT5640_IF1_DAC_SEL_L2R			(0x2 << 14)
-+#define RT5640_IF1_DAC_SEL_R2L			(0x3 << 14)
- #define RT5640_IF1_ADC_SEL_MASK			(0x3 << 12)
- #define RT5640_IF1_ADC_SEL_SFT			12
- #define RT5640_IF1_ADC_SEL_NOR			(0x0 << 12)
--#define RT5640_IF1_ADC_SEL_L2R			(0x1 << 12)
--#define RT5640_IF1_ADC_SEL_R2L			(0x2 << 12)
--#define RT5640_IF1_ADC_SEL_SWAP			(0x3 << 12)
-+#define RT5640_IF1_ADC_SEL_SWAP			(0x1 << 12)
-+#define RT5640_IF1_ADC_SEL_L2R			(0x2 << 12)
-+#define RT5640_IF1_ADC_SEL_R2L			(0x3 << 12)
- #define RT5640_IF2_DAC_SEL_MASK			(0x3 << 10)
- #define RT5640_IF2_DAC_SEL_SFT			10
- #define RT5640_IF2_DAC_SEL_NOR			(0x0 << 10)
--#define RT5640_IF2_DAC_SEL_L2R			(0x1 << 10)
--#define RT5640_IF2_DAC_SEL_R2L			(0x2 << 10)
--#define RT5640_IF2_DAC_SEL_SWAP			(0x3 << 10)
-+#define RT5640_IF2_DAC_SEL_SWAP			(0x1 << 10)
-+#define RT5640_IF2_DAC_SEL_L2R			(0x2 << 10)
-+#define RT5640_IF2_DAC_SEL_R2L			(0x3 << 10)
- #define RT5640_IF2_ADC_SEL_MASK			(0x3 << 8)
- #define RT5640_IF2_ADC_SEL_SFT			8
- #define RT5640_IF2_ADC_SEL_NOR			(0x0 << 8)
--#define RT5640_IF2_ADC_SEL_L2R			(0x1 << 8)
--#define RT5640_IF2_ADC_SEL_R2L			(0x2 << 8)
--#define RT5640_IF2_ADC_SEL_SWAP			(0x3 << 8)
-+#define RT5640_IF2_ADC_SEL_SWAP			(0x1 << 8)
-+#define RT5640_IF2_ADC_SEL_L2R			(0x2 << 8)
-+#define RT5640_IF2_ADC_SEL_R2L			(0x3 << 8)
- #define RT5640_IF3_DAC_SEL_MASK			(0x3 << 6)
- #define RT5640_IF3_DAC_SEL_SFT			6
- #define RT5640_IF3_DAC_SEL_NOR			(0x0 << 6)
--#define RT5640_IF3_DAC_SEL_L2R			(0x1 << 6)
--#define RT5640_IF3_DAC_SEL_R2L			(0x2 << 6)
--#define RT5640_IF3_DAC_SEL_SWAP			(0x3 << 6)
-+#define RT5640_IF3_DAC_SEL_SWAP			(0x1 << 6)
-+#define RT5640_IF3_DAC_SEL_L2R			(0x2 << 6)
-+#define RT5640_IF3_DAC_SEL_R2L			(0x3 << 6)
- #define RT5640_IF3_ADC_SEL_MASK			(0x3 << 4)
- #define RT5640_IF3_ADC_SEL_SFT			4
- #define RT5640_IF3_ADC_SEL_NOR			(0x0 << 4)
--#define RT5640_IF3_ADC_SEL_L2R			(0x1 << 4)
--#define RT5640_IF3_ADC_SEL_R2L			(0x2 << 4)
--#define RT5640_IF3_ADC_SEL_SWAP			(0x3 << 4)
-+#define RT5640_IF3_ADC_SEL_SWAP			(0x1 << 4)
-+#define RT5640_IF3_ADC_SEL_L2R			(0x2 << 4)
-+#define RT5640_IF3_ADC_SEL_R2L			(0x3 << 4)
- 
- /* REC Left Mixer Control 1 (0x3b) */
- #define RT5640_G_HP_L_RM_L_MASK			(0x7 << 13)
-diff --git a/sound/soc/codecs/ssm4567.c b/sound/soc/codecs/ssm4567.c
-index e619d5651b09..080c78e88e10 100644
---- a/sound/soc/codecs/ssm4567.c
-+++ b/sound/soc/codecs/ssm4567.c
-@@ -352,6 +352,11 @@ static int ssm4567_set_power(struct ssm4567 *ssm4567, bool enable)
- 	regcache_cache_only(ssm4567->regmap, !enable);
- 
- 	if (enable) {
-+		ret = regmap_write(ssm4567->regmap, SSM4567_REG_SOFT_RESET,
-+			0x00);
-+		if (ret)
-+			return ret;
-+
- 		ret = regmap_update_bits(ssm4567->regmap,
- 			SSM4567_REG_POWER_CTRL,
- 			SSM4567_POWER_SPWDN, 0x00);
-diff --git a/sound/soc/samsung/s3c-i2s-v2.c b/sound/soc/samsung/s3c-i2s-v2.c
-index df65c5b494b1..b6ab3fc5789e 100644
---- a/sound/soc/samsung/s3c-i2s-v2.c
-+++ b/sound/soc/samsung/s3c-i2s-v2.c
-@@ -709,7 +709,7 @@ static int s3c2412_i2s_resume(struct snd_soc_dai *dai)
- #endif
- 
- int s3c_i2sv2_register_component(struct device *dev, int id,
--			   struct snd_soc_component_driver *cmp_drv,
-+			   const struct snd_soc_component_driver *cmp_drv,
- 			   struct snd_soc_dai_driver *dai_drv)
- {
- 	struct snd_soc_dai_ops *ops = (struct snd_soc_dai_ops *)dai_drv->ops;
-diff --git a/sound/soc/samsung/s3c-i2s-v2.h b/sound/soc/samsung/s3c-i2s-v2.h
-index 90abab364b49..d0684145ed1f 100644
---- a/sound/soc/samsung/s3c-i2s-v2.h
-+++ b/sound/soc/samsung/s3c-i2s-v2.h
-@@ -101,7 +101,7 @@ extern int s3c_i2sv2_probe(struct snd_soc_dai *dai,
-  * soc core.
-  */
- extern int s3c_i2sv2_register_component(struct device *dev, int id,
--					struct snd_soc_component_driver *cmp_drv,
-+					const struct snd_soc_component_driver *cmp_drv,
- 					struct snd_soc_dai_driver *dai_drv);
- 
- #endif /* __SND_SOC_S3C24XX_S3C_I2SV2_I2S_H */
-diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
-index 581175a51ecf..5e811dc02fb9 100644
---- a/sound/soc/soc-dapm.c
-+++ b/sound/soc/soc-dapm.c
-@@ -2188,6 +2188,13 @@ static ssize_t dapm_widget_show_component(struct snd_soc_component *cmpnt,
- 	int count = 0;
- 	char *state = "not set";
- 
-+	/* card won't be set for the dummy component, as a spot fix
-+	 * we're checking for that case specifically here but in future
-+	 * we will ensure that the dummy component looks like others.
-+	 */
-+	if (!cmpnt->card)
-+		return 0;
-+
- 	list_for_each_entry(w, &cmpnt->card->widgets, list) {
- 		if (w->dapm != dapm)
- 			continue;
-diff --git a/sound/usb/clock.c b/sound/usb/clock.c
-index 2ed260b10f6d..7ccbcaf6a147 100644
---- a/sound/usb/clock.c
-+++ b/sound/usb/clock.c
-@@ -285,6 +285,8 @@ static int set_sample_rate_v1(struct snd_usb_audio *chip, int iface,
- 	unsigned char data[3];
- 	int err, crate;
- 
-+	if (get_iface_desc(alts)->bNumEndpoints < 1)
-+		return -EINVAL;
- 	ep = get_endpoint(alts, 0)->bEndpointAddress;
- 
- 	/* if endpoint doesn't have sampling rate control, bail out */
-diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
-index 7b1cb365ffab..c07a7eda42a2 100644
---- a/sound/usb/endpoint.c
-+++ b/sound/usb/endpoint.c
-@@ -438,6 +438,9 @@ exit_clear:
-  *
-  * New endpoints will be added to chip->ep_list and must be freed by
-  * calling snd_usb_endpoint_free().
-+ *
-+ * For SND_USB_ENDPOINT_TYPE_SYNC, the caller needs to guarantee that
-+ * bNumEndpoints > 1 beforehand.
-  */
- struct snd_usb_endpoint *snd_usb_add_endpoint(struct snd_usb_audio *chip,
- 					      struct usb_host_interface *alts,
-diff --git a/sound/usb/mixer_maps.c b/sound/usb/mixer_maps.c
-index ddca6547399b..1f8fb0d904e0 100644
---- a/sound/usb/mixer_maps.c
-+++ b/sound/usb/mixer_maps.c
-@@ -349,6 +349,16 @@ static struct usbmix_name_map bose_companion5_map[] = {
- };
- 
- /*
-+ * Dell usb dock with ALC4020 codec had a firmware problem where it got
-+ * screwed up when zero volume is passed; just skip it as a workaround
-+ */
-+static const struct usbmix_name_map dell_alc4020_map[] = {
-+	{ 16, NULL },
-+	{ 19, NULL },
-+	{ 0 }
-+};
-+
-+/*
-  * Control map entries
-  */
- 
-@@ -431,6 +441,10 @@ static struct usbmix_ctl_map usbmix_ctl_maps[] = {
- 		.map = aureon_51_2_map,
- 	},
- 	{
-+		.id = USB_ID(0x0bda, 0x4014),
-+		.map = dell_alc4020_map,
-+	},
-+	{
- 		.id = USB_ID(0x0dba, 0x1000),
- 		.map = mbox1_map,
- 	},
-diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c
-index 279025650568..f6c3bf79af9a 100644
---- a/sound/usb/mixer_quirks.c
-+++ b/sound/usb/mixer_quirks.c
-@@ -1519,7 +1519,11 @@ static int snd_microii_spdif_default_get(struct snd_kcontrol *kcontrol,
- 
- 	/* use known values for that card: interface#1 altsetting#1 */
- 	iface = usb_ifnum_to_if(chip->dev, 1);
-+	if (!iface || iface->num_altsetting < 2)
-+		return -EINVAL;
- 	alts = &iface->altsetting[1];
-+	if (get_iface_desc(alts)->bNumEndpoints < 1)
-+		return -EINVAL;
- 	ep = get_endpoint(alts, 0)->bEndpointAddress;
- 
- 	err = snd_usb_ctl_msg(chip->dev,
-diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
-index 9245f52d43bd..44d178ee9177 100644
---- a/sound/usb/pcm.c
-+++ b/sound/usb/pcm.c
-@@ -159,6 +159,8 @@ static int init_pitch_v1(struct snd_usb_audio *chip, int iface,
- 	unsigned char data[1];
- 	int err;
- 
-+	if (get_iface_desc(alts)->bNumEndpoints < 1)
-+		return -EINVAL;
- 	ep = get_endpoint(alts, 0)->bEndpointAddress;
- 
- 	data[0] = 1;
-diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
-index c458d60d5030..db11ecf0b74d 100644
---- a/sound/usb/quirks.c
-+++ b/sound/usb/quirks.c
-@@ -150,6 +150,7 @@ static int create_fixed_stream_quirk(struct snd_usb_audio *chip,
- 		usb_audio_err(chip, "cannot memdup\n");
- 		return -ENOMEM;
- 	}
-+	INIT_LIST_HEAD(&fp->list);
- 	if (fp->nr_rates > MAX_NR_RATES) {
- 		kfree(fp);
- 		return -EINVAL;
-@@ -167,19 +168,20 @@ static int create_fixed_stream_quirk(struct snd_usb_audio *chip,
- 	stream = (fp->endpoint & USB_DIR_IN)
- 		? SNDRV_PCM_STREAM_CAPTURE : SNDRV_PCM_STREAM_PLAYBACK;
- 	err = snd_usb_add_audio_stream(chip, stream, fp);
--	if (err < 0) {
--		kfree(fp);
--		kfree(rate_table);
--		return err;
--	}
-+	if (err < 0)
-+		goto error;
- 	if (fp->iface != get_iface_desc(&iface->altsetting[0])->bInterfaceNumber ||
- 	    fp->altset_idx >= iface->num_altsetting) {
--		kfree(fp);
--		kfree(rate_table);
--		return -EINVAL;
-+		err = -EINVAL;
-+		goto error;
- 	}
- 	alts = &iface->altsetting[fp->altset_idx];
- 	altsd = get_iface_desc(alts);
-+	if (altsd->bNumEndpoints < 1) {
-+		err = -EINVAL;
-+		goto error;
-+	}
-+
- 	fp->protocol = altsd->bInterfaceProtocol;
- 
- 	if (fp->datainterval == 0)
-@@ -190,6 +192,12 @@ static int create_fixed_stream_quirk(struct snd_usb_audio *chip,
- 	snd_usb_init_pitch(chip, fp->iface, alts, fp);
- 	snd_usb_init_sample_rate(chip, fp->iface, alts, fp, fp->rate_max);
- 	return 0;
-+
-+ error:
-+	list_del(&fp->list); /* unlink for avoiding double-free */
-+	kfree(fp);
-+	kfree(rate_table);
-+	return err;
- }
- 
- static int create_auto_pcm_quirk(struct snd_usb_audio *chip,
-@@ -462,6 +470,7 @@ static int create_uaxx_quirk(struct snd_usb_audio *chip,
- 	fp->ep_attr = get_endpoint(alts, 0)->bmAttributes;
- 	fp->datainterval = 0;
- 	fp->maxpacksize = le16_to_cpu(get_endpoint(alts, 0)->wMaxPacketSize);
-+	INIT_LIST_HEAD(&fp->list);
- 
- 	switch (fp->maxpacksize) {
- 	case 0x120:
-@@ -485,6 +494,7 @@ static int create_uaxx_quirk(struct snd_usb_audio *chip,
- 		? SNDRV_PCM_STREAM_CAPTURE : SNDRV_PCM_STREAM_PLAYBACK;
- 	err = snd_usb_add_audio_stream(chip, stream, fp);
- 	if (err < 0) {
-+		list_del(&fp->list); /* unlink for avoiding double-free */
- 		kfree(fp);
- 		return err;
- 	}
-@@ -1121,12 +1131,18 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
- 	switch (chip->usb_id) {
- 	case USB_ID(0x045E, 0x075D): /* MS Lifecam Cinema  */
- 	case USB_ID(0x045E, 0x076D): /* MS Lifecam HD-5000 */
-+	case USB_ID(0x045E, 0x076E): /* MS Lifecam HD-5001 */
- 	case USB_ID(0x045E, 0x076F): /* MS Lifecam HD-6000 */
- 	case USB_ID(0x045E, 0x0772): /* MS Lifecam Studio */
- 	case USB_ID(0x045E, 0x0779): /* MS Lifecam HD-3000 */
-+	case USB_ID(0x047F, 0x0415): /* Plantronics BT-300 */
- 	case USB_ID(0x047F, 0xAA05): /* Plantronics DA45 */
- 	case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */
-+	case USB_ID(0x0556, 0x0014): /* Phoenix Audio TMX320VC */
- 	case USB_ID(0x074D, 0x3553): /* Outlaw RR2150 (Micronas UAC3553B) */
-+	case USB_ID(0x1de7, 0x0013): /* Phoenix Audio MT202exe */
-+	case USB_ID(0x1de7, 0x0014): /* Phoenix Audio TMX320 */
-+	case USB_ID(0x1de7, 0x0114): /* Phoenix Audio MT202pcs */
- 	case USB_ID(0x21B4, 0x0081): /* AudioQuest DragonFly */
- 		return true;
- 	}
-diff --git a/sound/usb/stream.c b/sound/usb/stream.c
-index c4dc577ab1bd..8e9548bc1f1a 100644
---- a/sound/usb/stream.c
-+++ b/sound/usb/stream.c
-@@ -314,7 +314,9 @@ static struct snd_pcm_chmap_elem *convert_chmap(int channels, unsigned int bits,
- /*
-  * add this endpoint to the chip instance.
-  * if a stream with the same endpoint already exists, append to it.
-- * if not, create a new pcm stream.
-+ * if not, create a new pcm stream. note, fp is added to the substream
-+ * fmt_list and will be freed on the chip instance release. do not free
-+ * fp or do remove it from the substream fmt_list to avoid double-free.
-  */
- int snd_usb_add_audio_stream(struct snd_usb_audio *chip,
- 			     int stream,
-@@ -675,6 +677,7 @@ int snd_usb_parse_audio_interface(struct snd_usb_audio *chip, int iface_no)
- 					* (fp->maxpacksize & 0x7ff);
- 		fp->attributes = parse_uac_endpoint_attributes(chip, alts, protocol, iface_no);
- 		fp->clock = clock;
-+		INIT_LIST_HEAD(&fp->list);
- 
- 		/* some quirks for attributes here */
- 
-@@ -723,6 +726,7 @@ int snd_usb_parse_audio_interface(struct snd_usb_audio *chip, int iface_no)
- 		dev_dbg(&dev->dev, "%u:%d: add audio endpoint %#x\n", iface_no, altno, fp->endpoint);
- 		err = snd_usb_add_audio_stream(chip, stream, fp);
- 		if (err < 0) {
-+			list_del(&fp->list); /* unlink for avoiding double-free */
- 			kfree(fp->rate_table);
- 			kfree(fp->chmap);
- 			kfree(fp);
-diff --git a/tools/hv/Makefile b/tools/hv/Makefile
-index a8ab79556926..a8c4644022a6 100644
---- a/tools/hv/Makefile
-+++ b/tools/hv/Makefile
-@@ -5,6 +5,8 @@ PTHREAD_LIBS = -lpthread
- WARNINGS = -Wall -Wextra
- CFLAGS = $(WARNINGS) -g $(PTHREAD_LIBS) $(shell getconf LFS_CFLAGS)
- 
-+CFLAGS += -D__EXPORTED_HEADERS__ -I../../include/uapi -I../../include
-+
- all: hv_kvp_daemon hv_vss_daemon hv_fcopy_daemon
- %: %.c
- 	$(CC) $(CFLAGS) -o $@ $^
-diff --git a/tools/lib/traceevent/parse-filter.c b/tools/lib/traceevent/parse-filter.c
-index 0144b3d1bb77..88cccea3ca99 100644
---- a/tools/lib/traceevent/parse-filter.c
-+++ b/tools/lib/traceevent/parse-filter.c
-@@ -1164,11 +1164,11 @@ process_filter(struct event_format *event, struct filter_arg **parg,
- 		current_op = current_exp;
- 
- 	ret = collapse_tree(current_op, parg, error_str);
-+	/* collapse_tree() may free current_op, and updates parg accordingly */
-+	current_op = NULL;
- 	if (ret < 0)
- 		goto fail;
- 
--	*parg = current_op;
--
- 	free(token);
- 	return 0;
- 
-diff --git a/tools/perf/Documentation/perf-stat.txt b/tools/perf/Documentation/perf-stat.txt
-index 52ef7a9d50aa..14d9e8ffaff7 100644
---- a/tools/perf/Documentation/perf-stat.txt
-+++ b/tools/perf/Documentation/perf-stat.txt
-@@ -69,6 +69,14 @@ report::
- --scale::
- 	scale/normalize counter values
- 
-+-d::
-+--detailed::
-+	print more detailed statistics, can be specified up to 3 times
-+
-+	   -d:          detailed events, L1 and LLC data cache
-+        -d -d:     more detailed events, dTLB and iTLB events
-+     -d -d -d:     very detailed events, adding prefetch events
-+
- -r::
- --repeat=<n>::
- 	repeat command and print average + stddev (max: 100). 0 means forever.
-diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c
-index 08c09ad755d2..7bb47424bc49 100644
---- a/tools/perf/ui/browsers/hists.c
-+++ b/tools/perf/ui/browsers/hists.c
-@@ -302,7 +302,7 @@ static void callchain_node__init_have_children(struct callchain_node *node,
- 	chain = list_entry(node->val.next, struct callchain_list, list);
- 	chain->has_children = has_sibling;
- 
--	if (node->val.next != node->val.prev) {
-+	if (!list_empty(&node->val)) {
- 		chain = list_entry(node->val.prev, struct callchain_list, list);
- 		chain->has_children = !RB_EMPTY_ROOT(&node->rb_root);
- 	}
-@@ -844,7 +844,7 @@ next:
- 	return row - first_row;
- }
- 
--static int hist_browser__show_callchain(struct hist_browser *browser,
-+static int hist_browser__show_callchain_graph(struct hist_browser *browser,
- 					struct rb_root *root, int level,
- 					unsigned short row, u64 total,
- 					print_callchain_entry_fn print,
-@@ -898,7 +898,7 @@ static int hist_browser__show_callchain(struct hist_browser *browser,
- 			else
- 				new_total = total;
- 
--			row += hist_browser__show_callchain(browser, &child->rb_root,
-+			row += hist_browser__show_callchain_graph(browser, &child->rb_root,
- 							    new_level, row, new_total,
- 							    print, arg, is_output_full);
- 		}
-@@ -910,6 +910,43 @@ out:
- 	return row - first_row;
- }
- 
-+static int hist_browser__show_callchain(struct hist_browser *browser,
-+					struct hist_entry *entry, int level,
-+					unsigned short row,
-+					print_callchain_entry_fn print,
-+					struct callchain_print_arg *arg,
-+					check_output_full_fn is_output_full)
-+{
-+	u64 total = hists__total_period(entry->hists);
-+	int printed;
-+
-+	if (callchain_param.mode == CHAIN_GRAPH_REL) {
-+		if (symbol_conf.cumulate_callchain)
-+			total = entry->stat_acc->period;
-+		else
-+			total = entry->stat.period;
-+	}
-+
-+	if (callchain_param.mode == CHAIN_FLAT) {
-+		printed = hist_browser__show_callchain_flat(browser,
-+						&entry->sorted_chain, row, total,
-+						print, arg, is_output_full);
-+	} else if (callchain_param.mode == CHAIN_FOLDED) {
-+		printed = hist_browser__show_callchain_folded(browser,
-+						&entry->sorted_chain, row, total,
-+						print, arg, is_output_full);
-+	} else {
-+		printed = hist_browser__show_callchain_graph(browser,
-+						&entry->sorted_chain, level, row, total,
-+						print, arg, is_output_full);
-+	}
-+
-+	if (arg->is_current_entry)
-+		browser->he_selection = entry;
-+
-+	return printed;
-+}
-+
- struct hpp_arg {
- 	struct ui_browser *b;
- 	char folded_sign;
-@@ -1084,38 +1121,14 @@ static int hist_browser__show_entry(struct hist_browser *browser,
- 		--row_offset;
- 
- 	if (folded_sign == '-' && row != browser->b.rows) {
--		u64 total = hists__total_period(entry->hists);
- 		struct callchain_print_arg arg = {
- 			.row_offset = row_offset,
- 			.is_current_entry = current_entry,
- 		};
- 
--		if (callchain_param.mode == CHAIN_GRAPH_REL) {
--			if (symbol_conf.cumulate_callchain)
--				total = entry->stat_acc->period;
--			else
--				total = entry->stat.period;
--		}
--
--		if (callchain_param.mode == CHAIN_FLAT) {
--			printed += hist_browser__show_callchain_flat(browser,
--					&entry->sorted_chain, row, total,
-+		printed += hist_browser__show_callchain(browser, entry, 1, row,
- 					hist_browser__show_callchain_entry, &arg,
- 					hist_browser__check_output_full);
--		} else if (callchain_param.mode == CHAIN_FOLDED) {
--			printed += hist_browser__show_callchain_folded(browser,
--					&entry->sorted_chain, row, total,
--					hist_browser__show_callchain_entry, &arg,
--					hist_browser__check_output_full);
--		} else {
--			printed += hist_browser__show_callchain(browser,
--					&entry->sorted_chain, 1, row, total,
--					hist_browser__show_callchain_entry, &arg,
--					hist_browser__check_output_full);
--		}
--
--		if (arg.is_current_entry)
--			browser->he_selection = entry;
- 	}
- 
- 	return printed;
-@@ -1380,15 +1393,11 @@ do_offset:
- static int hist_browser__fprintf_callchain(struct hist_browser *browser,
- 					   struct hist_entry *he, FILE *fp)
- {
--	u64 total = hists__total_period(he->hists);
- 	struct callchain_print_arg arg  = {
- 		.fp = fp,
- 	};
- 
--	if (symbol_conf.cumulate_callchain)
--		total = he->stat_acc->period;
--
--	hist_browser__show_callchain(browser, &he->sorted_chain, 1, 0, total,
-+	hist_browser__show_callchain(browser, he, 1, 0,
- 				     hist_browser__fprintf_callchain_entry, &arg,
- 				     hist_browser__check_dump_full);
- 	return arg.printed;
-@@ -2320,10 +2329,12 @@ skip_annotation:
- 			 *
- 			 * See hist_browser__show_entry.
- 			 */
--			nr_options += add_script_opt(browser,
--						     &actions[nr_options],
--						     &options[nr_options],
--						     NULL, browser->selection->sym);
-+			if (sort__has_sym && browser->selection->sym) {
-+				nr_options += add_script_opt(browser,
-+							     &actions[nr_options],
-+							     &options[nr_options],
-+							     NULL, browser->selection->sym);
-+			}
- 		}
- 		nr_options += add_script_opt(browser, &actions[nr_options],
- 					     &options[nr_options], NULL, NULL);
-diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
-index 85155e91b61b..7bad5c3fa7b7 100644
---- a/tools/perf/util/event.c
-+++ b/tools/perf/util/event.c
-@@ -282,7 +282,7 @@ int perf_event__synthesize_mmap_events(struct perf_tool *tool,
- 		strcpy(execname, "");
- 
- 		/* 00400000-0040c000 r-xp 00000000 fd:01 41038  /bin/cat */
--		n = sscanf(bf, "%"PRIx64"-%"PRIx64" %s %"PRIx64" %x:%x %u %s\n",
-+		n = sscanf(bf, "%"PRIx64"-%"PRIx64" %s %"PRIx64" %x:%x %u %[^\n]\n",
- 		       &event->mmap2.start, &event->mmap2.len, prot,
- 		       &event->mmap2.pgoff, &event->mmap2.maj,
- 		       &event->mmap2.min,
-diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
-index d81f13de2476..a7eb0eae9938 100644
---- a/tools/perf/util/evlist.c
-+++ b/tools/perf/util/evlist.c
-@@ -1181,12 +1181,12 @@ void perf_evlist__set_maps(struct perf_evlist *evlist, struct cpu_map *cpus,
- 	 */
- 	if (cpus != evlist->cpus) {
- 		cpu_map__put(evlist->cpus);
--		evlist->cpus = cpus;
-+		evlist->cpus = cpu_map__get(cpus);
- 	}
- 
- 	if (threads != evlist->threads) {
- 		thread_map__put(evlist->threads);
--		evlist->threads = threads;
-+		evlist->threads = thread_map__get(threads);
- 	}
- 
- 	perf_evlist__propagate_maps(evlist);
-diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
-index 8e75434bd01c..4d8037a3d8a4 100644
---- a/tools/perf/util/evsel.h
-+++ b/tools/perf/util/evsel.h
-@@ -93,10 +93,8 @@ struct perf_evsel {
- 	const char		*unit;
- 	struct event_format	*tp_format;
- 	off_t			id_offset;
--	union {
--		void		*priv;
--		u64		db_id;
--	};
-+	void			*priv;
-+	u64			db_id;
- 	struct cgroup_sel	*cgrp;
- 	void			*handler;
- 	struct cpu_map		*cpus;
-diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
-index 05d815851be1..4e1590ba8902 100644
---- a/tools/perf/util/intel-pt.c
-+++ b/tools/perf/util/intel-pt.c
-@@ -1127,7 +1127,7 @@ static int intel_pt_synth_transaction_sample(struct intel_pt_queue *ptq)
- 		pr_err("Intel Processor Trace: failed to deliver transaction event, error %d\n",
- 		       ret);
- 
--	if (pt->synth_opts.callchain)
-+	if (pt->synth_opts.last_branch)
- 		intel_pt_reset_last_branch_rb(ptq);
- 
- 	return ret;
-diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
-index 813d9b272c81..48a1c5e7dc0d 100644
---- a/tools/perf/util/parse-events.c
-+++ b/tools/perf/util/parse-events.c
-@@ -2101,11 +2101,11 @@ char *parse_events_formats_error_string(char *additional_terms)
- 
- 	/* valid terms */
- 	if (additional_terms) {
--		if (!asprintf(&str, "valid terms: %s,%s",
--			      additional_terms, static_terms))
-+		if (asprintf(&str, "valid terms: %s,%s",
-+			     additional_terms, static_terms) < 0)
- 			goto fail;
- 	} else {
--		if (!asprintf(&str, "valid terms: %s", static_terms))
-+		if (asprintf(&str, "valid terms: %s", static_terms) < 0)
- 			goto fail;
- 	}
- 	return str;
-diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
-index b597bcc8fc78..37b4f5070353 100644
---- a/tools/perf/util/pmu.c
-+++ b/tools/perf/util/pmu.c
-@@ -284,13 +284,12 @@ static int pmu_aliases_parse(char *dir, struct list_head *head)
- {
- 	struct dirent *evt_ent;
- 	DIR *event_dir;
--	int ret = 0;
- 
- 	event_dir = opendir(dir);
- 	if (!event_dir)
- 		return -EINVAL;
- 
--	while (!ret && (evt_ent = readdir(event_dir))) {
-+	while ((evt_ent = readdir(event_dir))) {
- 		char path[PATH_MAX];
- 		char *name = evt_ent->d_name;
- 		FILE *file;
-@@ -306,17 +305,19 @@ static int pmu_aliases_parse(char *dir, struct list_head *head)
- 
- 		snprintf(path, PATH_MAX, "%s/%s", dir, name);
- 
--		ret = -EINVAL;
- 		file = fopen(path, "r");
--		if (!file)
--			break;
-+		if (!file) {
-+			pr_debug("Cannot open %s\n", path);
-+			continue;
-+		}
- 
--		ret = perf_pmu__new_alias(head, dir, name, file);
-+		if (perf_pmu__new_alias(head, dir, name, file) < 0)
-+			pr_debug("Cannot set up %s\n", name);
- 		fclose(file);
- 	}
- 
- 	closedir(event_dir);
--	return ret;
-+	return 0;
- }
- 
- /*
-diff --git a/tools/perf/util/setup.py b/tools/perf/util/setup.py
-index 1833103768cb..c8680984d2d6 100644
---- a/tools/perf/util/setup.py
-+++ b/tools/perf/util/setup.py
-@@ -22,6 +22,7 @@ cflags = getenv('CFLAGS', '').split()
- # switch off several checks (need to be at the end of cflags list)
- cflags += ['-fno-strict-aliasing', '-Wno-write-strings', '-Wno-unused-parameter' ]
- 
-+src_perf  = getenv('srctree') + '/tools/perf'
- build_lib = getenv('PYTHON_EXTBUILD_LIB')
- build_tmp = getenv('PYTHON_EXTBUILD_TMP')
- libtraceevent = getenv('LIBTRACEEVENT')
-@@ -30,6 +31,9 @@ libapikfs = getenv('LIBAPI')
- ext_sources = [f.strip() for f in file('util/python-ext-sources')
- 				if len(f.strip()) > 0 and f[0] != '#']
- 
-+# use full paths with source files
-+ext_sources = map(lambda x: '%s/%s' % (src_perf, x) , ext_sources)
-+
- perf = Extension('perf',
- 		  sources = ext_sources,
- 		  include_dirs = ['util/include'],
-diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c
-index ec722346e6ff..16892a7ca27e 100644
---- a/tools/perf/util/sort.c
-+++ b/tools/perf/util/sort.c
-@@ -2272,6 +2272,9 @@ static char *prefix_if_not_in(const char *pre, char *str)
- 
- static char *setup_overhead(char *keys)
- {
-+	if (sort__mode == SORT_MODE__DIFF)
-+		return keys;
-+
- 	keys = prefix_if_not_in("overhead", keys);
- 
- 	if (symbol_conf.cumulate_callchain)
-diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c
-index ea6064696fe4..a7b9022b5c8f 100644
---- a/virt/kvm/arm/arch_timer.c
-+++ b/virt/kvm/arm/arch_timer.c
-@@ -86,6 +86,8 @@ static void kvm_timer_inject_irq_work(struct work_struct *work)
- 	vcpu = container_of(work, struct kvm_vcpu, arch.timer_cpu.expired);
- 	vcpu->arch.timer_cpu.armed = false;
- 
-+	WARN_ON(!kvm_timer_should_fire(vcpu));
-+
- 	/*
- 	 * If the vcpu is blocked we want to wake it up so that it will see
- 	 * the timer has expired when entering the guest.
-@@ -93,10 +95,46 @@ static void kvm_timer_inject_irq_work(struct work_struct *work)
- 	kvm_vcpu_kick(vcpu);
- }
- 
-+static u64 kvm_timer_compute_delta(struct kvm_vcpu *vcpu)
-+{
-+	cycle_t cval, now;
-+
-+	cval = vcpu->arch.timer_cpu.cntv_cval;
-+	now = kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff;
-+
-+	if (now < cval) {
-+		u64 ns;
-+
-+		ns = cyclecounter_cyc2ns(timecounter->cc,
-+					 cval - now,
-+					 timecounter->mask,
-+					 &timecounter->frac);
-+		return ns;
-+	}
-+
-+	return 0;
-+}
-+
- static enum hrtimer_restart kvm_timer_expire(struct hrtimer *hrt)
- {
- 	struct arch_timer_cpu *timer;
-+	struct kvm_vcpu *vcpu;
-+	u64 ns;
-+
- 	timer = container_of(hrt, struct arch_timer_cpu, timer);
-+	vcpu = container_of(timer, struct kvm_vcpu, arch.timer_cpu);
-+
-+	/*
-+	 * Check that the timer has really expired from the guest's
-+	 * PoV (NTP on the host may have forced it to expire
-+	 * early). If we should have slept longer, restart it.
-+	 */
-+	ns = kvm_timer_compute_delta(vcpu);
-+	if (unlikely(ns)) {
-+		hrtimer_forward_now(hrt, ns_to_ktime(ns));
-+		return HRTIMER_RESTART;
-+	}
-+
- 	queue_work(wqueue, &timer->expired);
- 	return HRTIMER_NORESTART;
- }
-@@ -170,8 +208,6 @@ static int kvm_timer_update_state(struct kvm_vcpu *vcpu)
- void kvm_timer_schedule(struct kvm_vcpu *vcpu)
- {
- 	struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
--	u64 ns;
--	cycle_t cval, now;
- 
- 	BUG_ON(timer_is_armed(timer));
- 
-@@ -191,14 +227,7 @@ void kvm_timer_schedule(struct kvm_vcpu *vcpu)
- 		return;
- 
- 	/*  The timer has not yet expired, schedule a background timer */
--	cval = timer->cntv_cval;
--	now = kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff;
--
--	ns = cyclecounter_cyc2ns(timecounter->cc,
--				 cval - now,
--				 timecounter->mask,
--				 &timecounter->frac);
--	timer_arm(timer, ns);
-+	timer_arm(timer, kvm_timer_compute_delta(vcpu));
- }
- 
- void kvm_timer_unschedule(struct kvm_vcpu *vcpu)
-diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
-index 9102ae172d2a..298473707f17 100644
---- a/virt/kvm/kvm_main.c
-+++ b/virt/kvm/kvm_main.c
-@@ -537,6 +537,16 @@ static struct kvm *kvm_create_vm(unsigned long type)
- 	if (!kvm)
- 		return ERR_PTR(-ENOMEM);
- 
-+	spin_lock_init(&kvm->mmu_lock);
-+	atomic_inc(&current->mm->mm_count);
-+	kvm->mm = current->mm;
-+	kvm_eventfd_init(kvm);
-+	mutex_init(&kvm->lock);
-+	mutex_init(&kvm->irq_lock);
-+	mutex_init(&kvm->slots_lock);
-+	atomic_set(&kvm->users_count, 1);
-+	INIT_LIST_HEAD(&kvm->devices);
-+
- 	r = kvm_arch_init_vm(kvm, type);
- 	if (r)
- 		goto out_err_no_disable;
-@@ -569,16 +579,6 @@ static struct kvm *kvm_create_vm(unsigned long type)
- 			goto out_err;
- 	}
- 
--	spin_lock_init(&kvm->mmu_lock);
--	kvm->mm = current->mm;
--	atomic_inc(&kvm->mm->mm_count);
--	kvm_eventfd_init(kvm);
--	mutex_init(&kvm->lock);
--	mutex_init(&kvm->irq_lock);
--	mutex_init(&kvm->slots_lock);
--	atomic_set(&kvm->users_count, 1);
--	INIT_LIST_HEAD(&kvm->devices);
--
- 	r = kvm_init_mmu_notifier(kvm);
- 	if (r)
- 		goto out_err;
-@@ -603,6 +603,7 @@ out_err_no_disable:
- 	for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++)
- 		kvm_free_memslots(kvm, kvm->memslots[i]);
- 	kvm_arch_free_vm(kvm);
-+	mmdrop(current->mm);
- 	return ERR_PTR(r);
- }
- 


^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [gentoo-commits] proj/linux-patches:4.5 commit in: /
@ 2016-06-08 10:11 Mike Pagano
  0 siblings, 0 replies; 12+ messages in thread
From: Mike Pagano @ 2016-06-08 10:11 UTC (permalink / raw
  To: gentoo-commits

commit:     0ee489d1a3a6a217d26da42a6151dc67d33ca20c
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Jun  8 10:11:15 2016 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Jun  8 10:11:15 2016 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=0ee489d1

Linux patch 4.5.7

 0000_README            |    4 +
 1006_linux-4.5.7.patch | 3944 ++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 3948 insertions(+)

diff --git a/0000_README b/0000_README
index 9a3ebeb..67c9b4b 100644
--- a/0000_README
+++ b/0000_README
@@ -67,6 +67,10 @@ Patch:  1005_linux-4.5.6.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.5.6
 
+Patch:  1006_linux-4.5.7.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.5.7
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1006_linux-4.5.7.patch b/1006_linux-4.5.7.patch
new file mode 100644
index 0000000..a6fbea9
--- /dev/null
+++ b/1006_linux-4.5.7.patch
@@ -0,0 +1,3944 @@
+diff --git a/Documentation/devicetree/bindings/clock/imx35-clock.txt b/Documentation/devicetree/bindings/clock/imx35-clock.txt
+index a70356452a82..f49783213c56 100644
+--- a/Documentation/devicetree/bindings/clock/imx35-clock.txt
++++ b/Documentation/devicetree/bindings/clock/imx35-clock.txt
+@@ -94,6 +94,7 @@ clocks and IDs.
+ 	csi_sel			79
+ 	iim_gate		80
+ 	gpu2d_gate		81
++	ckli_gate		82
+ 
+ Examples:
+ 
+diff --git a/Makefile b/Makefile
+index 07a1786f695a..90e4bd904d36 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 5
+-SUBLEVEL = 6
++SUBLEVEL = 7
+ EXTRAVERSION =
+ NAME = Blurry Fish Butt
+ 
+@@ -364,7 +364,7 @@ AFLAGS_MODULE   =
+ LDFLAGS_MODULE  =
+ CFLAGS_KERNEL	=
+ AFLAGS_KERNEL	=
+-CFLAGS_GCOV	= -fprofile-arcs -ftest-coverage
++CFLAGS_GCOV	= -fprofile-arcs -ftest-coverage -fno-tree-loop-im
+ 
+ 
+ # Use USERINCLUDE when you must reference the UAPI directories only.
+diff --git a/arch/arm/boot/dts/armada-385-linksys.dtsi b/arch/arm/boot/dts/armada-385-linksys.dtsi
+index 85d2c377c332..8450944b28e6 100644
+--- a/arch/arm/boot/dts/armada-385-linksys.dtsi
++++ b/arch/arm/boot/dts/armada-385-linksys.dtsi
+@@ -245,7 +245,7 @@
+ 		button@2 {
+ 			label = "Factory Reset Button";
+ 			linux,code = <KEY_RESTART>;
+-			gpios = <&gpio1 15 GPIO_ACTIVE_LOW>;
++			gpios = <&gpio0 29 GPIO_ACTIVE_LOW>;
+ 		};
+ 	};
+ 
+@@ -260,7 +260,7 @@
+ 		};
+ 
+ 		sata {
+-			gpios = <&gpio1 22 GPIO_ACTIVE_HIGH>;
++			gpios = <&gpio1 22 GPIO_ACTIVE_LOW>;
+ 			default-state = "off";
+ 		};
+ 	};
+@@ -313,7 +313,7 @@
+ 
+ &pinctrl {
+ 	keys_pin: keys-pin {
+-		marvell,pins = "mpp24", "mpp47";
++		marvell,pins = "mpp24", "mpp29";
+ 		marvell,function = "gpio";
+ 	};
+ 
+diff --git a/arch/arm/boot/dts/armada-xp-linksys-mamba.dts b/arch/arm/boot/dts/armada-xp-linksys-mamba.dts
+index b89e6cf1271a..7a461541ce50 100644
+--- a/arch/arm/boot/dts/armada-xp-linksys-mamba.dts
++++ b/arch/arm/boot/dts/armada-xp-linksys-mamba.dts
+@@ -304,13 +304,13 @@
+ 		button@1 {
+ 			label = "WPS";
+ 			linux,code = <KEY_WPS_BUTTON>;
+-			gpios = <&gpio1 0 GPIO_ACTIVE_HIGH>;
++			gpios = <&gpio1 0 GPIO_ACTIVE_LOW>;
+ 		};
+ 
+ 		button@2 {
+ 			label = "Factory Reset Button";
+ 			linux,code = <KEY_RESTART>;
+-			gpios = <&gpio1 1 GPIO_ACTIVE_HIGH>;
++			gpios = <&gpio1 1 GPIO_ACTIVE_LOW>;
+ 		};
+ 	};
+ 
+diff --git a/arch/arm/boot/dts/exynos4210-trats.dts b/arch/arm/boot/dts/exynos4210-trats.dts
+index a50be640f1b0..59411e447fa0 100644
+--- a/arch/arm/boot/dts/exynos4210-trats.dts
++++ b/arch/arm/boot/dts/exynos4210-trats.dts
+@@ -298,6 +298,8 @@
+ 		compatible = "maxim,max8997-pmic";
+ 
+ 		reg = <0x66>;
++		interrupt-parent = <&gpx0>;
++		interrupts = <7 0>;
+ 
+ 		max8997,pmic-buck1-uses-gpio-dvs;
+ 		max8997,pmic-buck2-uses-gpio-dvs;
+diff --git a/arch/arm/boot/dts/sama5d2-pinfunc.h b/arch/arm/boot/dts/sama5d2-pinfunc.h
+index b0c912feaa2f..8a394f336003 100644
+--- a/arch/arm/boot/dts/sama5d2-pinfunc.h
++++ b/arch/arm/boot/dts/sama5d2-pinfunc.h
+@@ -837,8 +837,8 @@
+ #define PIN_PD23__ISC_FIELD		PINMUX_PIN(PIN_PD23, 6, 4)
+ #define PIN_PD24			120
+ #define PIN_PD24__GPIO			PINMUX_PIN(PIN_PD24, 0, 0)
+-#define PIN_PD24__UTXD2			PINMUX_PIN(PIN_PD23, 1, 2)
+-#define PIN_PD24__FLEXCOM4_IO3		PINMUX_PIN(PIN_PD23, 3, 3)
++#define PIN_PD24__UTXD2			PINMUX_PIN(PIN_PD24, 1, 2)
++#define PIN_PD24__FLEXCOM4_IO3		PINMUX_PIN(PIN_PD24, 3, 3)
+ #define PIN_PD25			121
+ #define PIN_PD25__GPIO			PINMUX_PIN(PIN_PD25, 0, 0)
+ #define PIN_PD25__SPI1_SPCK		PINMUX_PIN(PIN_PD25, 1, 3)
+diff --git a/arch/arm/boot/dts/sun4i-a10.dtsi b/arch/arm/boot/dts/sun4i-a10.dtsi
+index 2c8f5e6ad905..bf70d0ae93ce 100644
+--- a/arch/arm/boot/dts/sun4i-a10.dtsi
++++ b/arch/arm/boot/dts/sun4i-a10.dtsi
+@@ -96,7 +96,7 @@
+ 			allwinner,pipeline = "de_fe0-de_be0-lcd0-tve0";
+ 			clocks = <&pll5 1>, <&ahb_gates 34>, <&ahb_gates 36>,
+ 				 <&ahb_gates 44>, <&ahb_gates 46>,
+-				 <&dram_gates 25>, <&dram_gates 26>;
++				 <&dram_gates 5>, <&dram_gates 25>, <&dram_gates 26>;
+ 			status = "disabled";
+ 		};
+ 	};
+diff --git a/arch/arm/boot/dts/sun7i-a20.dtsi b/arch/arm/boot/dts/sun7i-a20.dtsi
+index 0940a788f824..ee4e8e7d3e30 100644
+--- a/arch/arm/boot/dts/sun7i-a20.dtsi
++++ b/arch/arm/boot/dts/sun7i-a20.dtsi
+@@ -85,8 +85,9 @@
+ 			compatible = "allwinner,simple-framebuffer",
+ 				     "simple-framebuffer";
+ 			allwinner,pipeline = "de_be0-lcd0-tve0";
+-			clocks = <&pll5 1>, <&ahb_gates 34>, <&ahb_gates 36>,
+-				 <&ahb_gates 44>, <&dram_gates 26>;
++			clocks = <&pll5 1>,
++				 <&ahb_gates 34>, <&ahb_gates 36>, <&ahb_gates 44>,
++				 <&dram_gates 5>, <&dram_gates 26>;
+ 			status = "disabled";
+ 		};
+ 	};
+diff --git a/arch/mips/ath79/early_printk.c b/arch/mips/ath79/early_printk.c
+index b955fafc58ba..d1adc59af5bf 100644
+--- a/arch/mips/ath79/early_printk.c
++++ b/arch/mips/ath79/early_printk.c
+@@ -31,13 +31,15 @@ static inline void prom_putchar_wait(void __iomem *reg, u32 mask, u32 val)
+ 	} while (1);
+ }
+ 
++#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
++
+ static void prom_putchar_ar71xx(unsigned char ch)
+ {
+ 	void __iomem *base = (void __iomem *)(KSEG1ADDR(AR71XX_UART_BASE));
+ 
+-	prom_putchar_wait(base + UART_LSR * 4, UART_LSR_THRE, UART_LSR_THRE);
++	prom_putchar_wait(base + UART_LSR * 4, BOTH_EMPTY, BOTH_EMPTY);
+ 	__raw_writel(ch, base + UART_TX * 4);
+-	prom_putchar_wait(base + UART_LSR * 4, UART_LSR_THRE, UART_LSR_THRE);
++	prom_putchar_wait(base + UART_LSR * 4, BOTH_EMPTY, BOTH_EMPTY);
+ }
+ 
+ static void prom_putchar_ar933x(unsigned char ch)
+diff --git a/arch/mips/include/asm/asmmacro.h b/arch/mips/include/asm/asmmacro.h
+index 867f924b05c7..e689b894353c 100644
+--- a/arch/mips/include/asm/asmmacro.h
++++ b/arch/mips/include/asm/asmmacro.h
+@@ -298,21 +298,21 @@
+ 	.set	pop
+ 	.endm
+ 
+-	.macro	copy_u_w	ws, n
++	.macro	copy_s_w	ws, n
+ 	.set	push
+ 	.set	mips32r2
+ 	.set	fp=64
+ 	.set	msa
+-	copy_u.w $1, $w\ws[\n]
++	copy_s.w $1, $w\ws[\n]
+ 	.set	pop
+ 	.endm
+ 
+-	.macro	copy_u_d	ws, n
++	.macro	copy_s_d	ws, n
+ 	.set	push
+ 	.set	mips64r2
+ 	.set	fp=64
+ 	.set	msa
+-	copy_u.d $1, $w\ws[\n]
++	copy_s.d $1, $w\ws[\n]
+ 	.set	pop
+ 	.endm
+ 
+@@ -346,8 +346,8 @@
+ #define STH_MSA_INSN		0x5800081f
+ #define STW_MSA_INSN		0x5800082f
+ #define STD_MSA_INSN		0x5800083f
+-#define COPY_UW_MSA_INSN	0x58f00056
+-#define COPY_UD_MSA_INSN	0x58f80056
++#define COPY_SW_MSA_INSN	0x58b00056
++#define COPY_SD_MSA_INSN	0x58b80056
+ #define INSERT_W_MSA_INSN	0x59300816
+ #define INSERT_D_MSA_INSN	0x59380816
+ #else
+@@ -361,8 +361,8 @@
+ #define STH_MSA_INSN		0x78000825
+ #define STW_MSA_INSN		0x78000826
+ #define STD_MSA_INSN		0x78000827
+-#define COPY_UW_MSA_INSN	0x78f00059
+-#define COPY_UD_MSA_INSN	0x78f80059
++#define COPY_SW_MSA_INSN	0x78b00059
++#define COPY_SD_MSA_INSN	0x78b80059
+ #define INSERT_W_MSA_INSN	0x79300819
+ #define INSERT_D_MSA_INSN	0x79380819
+ #endif
+@@ -393,7 +393,7 @@
+ 	.set	push
+ 	.set	noat
+ 	SET_HARDFLOAT
+-	addu	$1, \base, \off
++	PTR_ADDU $1, \base, \off
+ 	.word	LDB_MSA_INSN | (\wd << 6)
+ 	.set	pop
+ 	.endm
+@@ -402,7 +402,7 @@
+ 	.set	push
+ 	.set	noat
+ 	SET_HARDFLOAT
+-	addu	$1, \base, \off
++	PTR_ADDU $1, \base, \off
+ 	.word	LDH_MSA_INSN | (\wd << 6)
+ 	.set	pop
+ 	.endm
+@@ -411,7 +411,7 @@
+ 	.set	push
+ 	.set	noat
+ 	SET_HARDFLOAT
+-	addu	$1, \base, \off
++	PTR_ADDU $1, \base, \off
+ 	.word	LDW_MSA_INSN | (\wd << 6)
+ 	.set	pop
+ 	.endm
+@@ -420,7 +420,7 @@
+ 	.set	push
+ 	.set	noat
+ 	SET_HARDFLOAT
+-	addu	$1, \base, \off
++	PTR_ADDU $1, \base, \off
+ 	.word	LDD_MSA_INSN | (\wd << 6)
+ 	.set	pop
+ 	.endm
+@@ -429,7 +429,7 @@
+ 	.set	push
+ 	.set	noat
+ 	SET_HARDFLOAT
+-	addu	$1, \base, \off
++	PTR_ADDU $1, \base, \off
+ 	.word	STB_MSA_INSN | (\wd << 6)
+ 	.set	pop
+ 	.endm
+@@ -438,7 +438,7 @@
+ 	.set	push
+ 	.set	noat
+ 	SET_HARDFLOAT
+-	addu	$1, \base, \off
++	PTR_ADDU $1, \base, \off
+ 	.word	STH_MSA_INSN | (\wd << 6)
+ 	.set	pop
+ 	.endm
+@@ -447,7 +447,7 @@
+ 	.set	push
+ 	.set	noat
+ 	SET_HARDFLOAT
+-	addu	$1, \base, \off
++	PTR_ADDU $1, \base, \off
+ 	.word	STW_MSA_INSN | (\wd << 6)
+ 	.set	pop
+ 	.endm
+@@ -456,26 +456,26 @@
+ 	.set	push
+ 	.set	noat
+ 	SET_HARDFLOAT
+-	addu	$1, \base, \off
++	PTR_ADDU $1, \base, \off
+ 	.word	STD_MSA_INSN | (\wd << 6)
+ 	.set	pop
+ 	.endm
+ 
+-	.macro	copy_u_w	ws, n
++	.macro	copy_s_w	ws, n
+ 	.set	push
+ 	.set	noat
+ 	SET_HARDFLOAT
+ 	.insn
+-	.word	COPY_UW_MSA_INSN | (\n << 16) | (\ws << 11)
++	.word	COPY_SW_MSA_INSN | (\n << 16) | (\ws << 11)
+ 	.set	pop
+ 	.endm
+ 
+-	.macro	copy_u_d	ws, n
++	.macro	copy_s_d	ws, n
+ 	.set	push
+ 	.set	noat
+ 	SET_HARDFLOAT
+ 	.insn
+-	.word	COPY_UD_MSA_INSN | (\n << 16) | (\ws << 11)
++	.word	COPY_SD_MSA_INSN | (\n << 16) | (\ws << 11)
+ 	.set	pop
+ 	.endm
+ 
+diff --git a/arch/mips/include/asm/cacheflush.h b/arch/mips/include/asm/cacheflush.h
+index 723229f4cf27..176de586a71a 100644
+--- a/arch/mips/include/asm/cacheflush.h
++++ b/arch/mips/include/asm/cacheflush.h
+@@ -51,7 +51,6 @@ extern void (*flush_cache_range)(struct vm_area_struct *vma,
+ 	unsigned long start, unsigned long end);
+ extern void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page, unsigned long pfn);
+ extern void __flush_dcache_page(struct page *page);
+-extern void __flush_icache_page(struct vm_area_struct *vma, struct page *page);
+ 
+ #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
+ static inline void flush_dcache_page(struct page *page)
+@@ -77,11 +76,6 @@ static inline void flush_anon_page(struct vm_area_struct *vma,
+ static inline void flush_icache_page(struct vm_area_struct *vma,
+ 	struct page *page)
+ {
+-	if (!cpu_has_ic_fills_f_dc && (vma->vm_flags & VM_EXEC) &&
+-	    Page_dcache_dirty(page)) {
+-		__flush_icache_page(vma, page);
+-		ClearPageDcacheDirty(page);
+-	}
+ }
+ 
+ extern void (*flush_icache_range)(unsigned long start, unsigned long end);
+diff --git a/arch/mips/include/asm/msa.h b/arch/mips/include/asm/msa.h
+index bbb85fe21642..6e4effa6f626 100644
+--- a/arch/mips/include/asm/msa.h
++++ b/arch/mips/include/asm/msa.h
+@@ -147,6 +147,19 @@ static inline void restore_msa(struct task_struct *t)
+ 		_restore_msa(t);
+ }
+ 
++static inline void init_msa_upper(void)
++{
++	/*
++	 * Check cpu_has_msa only if it's a constant. This will allow the
++	 * compiler to optimise out code for CPUs without MSA without adding
++	 * an extra redundant check for CPUs with MSA.
++	 */
++	if (__builtin_constant_p(cpu_has_msa) && !cpu_has_msa)
++		return;
++
++	_init_msa_upper();
++}
++
+ #ifdef TOOLCHAIN_SUPPORTS_MSA
+ 
+ #define __BUILD_MSA_CTL_REG(name, cs)				\
+diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
+index 9a4fe0133ff1..65bf2c065db5 100644
+--- a/arch/mips/include/asm/pgtable.h
++++ b/arch/mips/include/asm/pgtable.h
+@@ -127,10 +127,14 @@ do {									\
+ 	}								\
+ } while(0)
+ 
++static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
++			      pte_t *ptep, pte_t pteval);
++
+ #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
+ 
+ #define pte_none(pte)		(!(((pte).pte_high) & ~_PAGE_GLOBAL))
+ #define pte_present(pte)	((pte).pte_low & _PAGE_PRESENT)
++#define pte_no_exec(pte)	((pte).pte_low & _PAGE_NO_EXEC)
+ 
+ static inline void set_pte(pte_t *ptep, pte_t pte)
+ {
+@@ -148,7 +152,6 @@ static inline void set_pte(pte_t *ptep, pte_t pte)
+ 			buddy->pte_high |= _PAGE_GLOBAL;
+ 	}
+ }
+-#define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval)
+ 
+ static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+ {
+@@ -166,6 +169,7 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *pt
+ 
+ #define pte_none(pte)		(!(pte_val(pte) & ~_PAGE_GLOBAL))
+ #define pte_present(pte)	(pte_val(pte) & _PAGE_PRESENT)
++#define pte_no_exec(pte)	(pte_val(pte) & _PAGE_NO_EXEC)
+ 
+ /*
+  * Certain architectures need to do special things when pte's
+@@ -218,7 +222,6 @@ static inline void set_pte(pte_t *ptep, pte_t pteval)
+ 	}
+ #endif
+ }
+-#define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval)
+ 
+ static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+ {
+@@ -234,6 +237,22 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *pt
+ }
+ #endif
+ 
++static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
++			      pte_t *ptep, pte_t pteval)
++{
++	extern void __update_cache(unsigned long address, pte_t pte);
++
++	if (!pte_present(pteval))
++		goto cache_sync_done;
++
++	if (pte_present(*ptep) && (pte_pfn(*ptep) == pte_pfn(pteval)))
++		goto cache_sync_done;
++
++	__update_cache(addr, pteval);
++cache_sync_done:
++	set_pte(ptep, pteval);
++}
++
+ /*
+  * (pmds are folded into puds so this doesn't get actually called,
+  * but the define is needed for a generic inline function.)
+@@ -430,15 +449,12 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
+ 
+ extern void __update_tlb(struct vm_area_struct *vma, unsigned long address,
+ 	pte_t pte);
+-extern void __update_cache(struct vm_area_struct *vma, unsigned long address,
+-	pte_t pte);
+ 
+ static inline void update_mmu_cache(struct vm_area_struct *vma,
+ 	unsigned long address, pte_t *ptep)
+ {
+ 	pte_t pte = *ptep;
+ 	__update_tlb(vma, address, pte);
+-	__update_cache(vma, address, pte);
+ }
+ 
+ static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
+diff --git a/arch/mips/include/asm/switch_to.h b/arch/mips/include/asm/switch_to.h
+index 28b5d84a5022..ebb5c0f2f90d 100644
+--- a/arch/mips/include/asm/switch_to.h
++++ b/arch/mips/include/asm/switch_to.h
+@@ -105,7 +105,7 @@ do {									\
+ 	__clear_software_ll_bit();					\
+ 	if (cpu_has_userlocal)						\
+ 		write_c0_userlocal(task_thread_info(next)->tp_value);	\
+-	__restore_watch();						\
++	__restore_watch(next);						\
+ 	(last) = resume(prev, next, task_thread_info(next));		\
+ } while (0)
+ 
+diff --git a/arch/mips/include/asm/watch.h b/arch/mips/include/asm/watch.h
+index 20126ec79359..6ffe3eadf105 100644
+--- a/arch/mips/include/asm/watch.h
++++ b/arch/mips/include/asm/watch.h
+@@ -12,21 +12,21 @@
+ 
+ #include <asm/mipsregs.h>
+ 
+-void mips_install_watch_registers(void);
++void mips_install_watch_registers(struct task_struct *t);
+ void mips_read_watch_registers(void);
+ void mips_clear_watch_registers(void);
+ void mips_probe_watch_registers(struct cpuinfo_mips *c);
+ 
+ #ifdef CONFIG_HARDWARE_WATCHPOINTS
+-#define __restore_watch() do {						\
++#define __restore_watch(task) do {					\
+ 	if (unlikely(test_bit(TIF_LOAD_WATCH,				\
+-			      &current_thread_info()->flags))) {	\
+-		mips_install_watch_registers();				\
++			      &task_thread_info(task)->flags))) {	\
++		mips_install_watch_registers(task);			\
+ 	}								\
+ } while (0)
+ 
+ #else
+-#define __restore_watch() do {} while (0)
++#define __restore_watch(task) do {} while (0)
+ #endif
+ 
+ #endif /* _ASM_WATCH_H */
+diff --git a/arch/mips/include/uapi/asm/siginfo.h b/arch/mips/include/uapi/asm/siginfo.h
+index 2cb7fdead570..e2b5337e840f 100644
+--- a/arch/mips/include/uapi/asm/siginfo.h
++++ b/arch/mips/include/uapi/asm/siginfo.h
+@@ -28,7 +28,7 @@
+ 
+ #define __ARCH_SIGSYS
+ 
+-#include <uapi/asm-generic/siginfo.h>
++#include <asm-generic/siginfo.h>
+ 
+ /* We can't use generic siginfo_t, because our si_code and si_errno are swapped */
+ typedef struct siginfo {
+@@ -42,13 +42,13 @@ typedef struct siginfo {
+ 
+ 		/* kill() */
+ 		struct {
+-			pid_t _pid;		/* sender's pid */
++			__kernel_pid_t _pid;	/* sender's pid */
+ 			__ARCH_SI_UID_T _uid;	/* sender's uid */
+ 		} _kill;
+ 
+ 		/* POSIX.1b timers */
+ 		struct {
+-			timer_t _tid;		/* timer id */
++			__kernel_timer_t _tid;	/* timer id */
+ 			int _overrun;		/* overrun count */
+ 			char _pad[sizeof( __ARCH_SI_UID_T) - sizeof(int)];
+ 			sigval_t _sigval;	/* same as below */
+@@ -57,26 +57,26 @@ typedef struct siginfo {
+ 
+ 		/* POSIX.1b signals */
+ 		struct {
+-			pid_t _pid;		/* sender's pid */
++			__kernel_pid_t _pid;	/* sender's pid */
+ 			__ARCH_SI_UID_T _uid;	/* sender's uid */
+ 			sigval_t _sigval;
+ 		} _rt;
+ 
+ 		/* SIGCHLD */
+ 		struct {
+-			pid_t _pid;		/* which child */
++			__kernel_pid_t _pid;	/* which child */
+ 			__ARCH_SI_UID_T _uid;	/* sender's uid */
+ 			int _status;		/* exit code */
+-			clock_t _utime;
+-			clock_t _stime;
++			__kernel_clock_t _utime;
++			__kernel_clock_t _stime;
+ 		} _sigchld;
+ 
+ 		/* IRIX SIGCHLD */
+ 		struct {
+-			pid_t _pid;		/* which child */
+-			clock_t _utime;
++			__kernel_pid_t _pid;	/* which child */
++			__kernel_clock_t _utime;
+ 			int _status;		/* exit code */
+-			clock_t _stime;
++			__kernel_clock_t _stime;
+ 		} _irix_sigchld;
+ 
+ 		/* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
+@@ -118,6 +118,4 @@ typedef struct siginfo {
+ #define SI_TIMER __SI_CODE(__SI_TIMER, -3) /* sent by timer expiration */
+ #define SI_MESGQ __SI_CODE(__SI_MESGQ, -4) /* sent by real time mesq state change */
+ 
+-#include <asm-generic/siginfo.h>
+-
+ #endif /* _UAPI_ASM_SIGINFO_H */
+diff --git a/arch/mips/kernel/mips-r2-to-r6-emul.c b/arch/mips/kernel/mips-r2-to-r6-emul.c
+index 1f5aac7f9ec3..4674a74a08b5 100644
+--- a/arch/mips/kernel/mips-r2-to-r6-emul.c
++++ b/arch/mips/kernel/mips-r2-to-r6-emul.c
+@@ -28,6 +28,7 @@
+ #include <asm/inst.h>
+ #include <asm/mips-r2-to-r6-emul.h>
+ #include <asm/local.h>
++#include <asm/mipsregs.h>
+ #include <asm/ptrace.h>
+ #include <asm/uaccess.h>
+ 
+@@ -1251,10 +1252,10 @@ fpu_emul:
+ 			"	j	10b\n"
+ 			"	.previous\n"
+ 			"	.section	__ex_table,\"a\"\n"
+-			"	.word	1b,8b\n"
+-			"	.word	2b,8b\n"
+-			"	.word	3b,8b\n"
+-			"	.word	4b,8b\n"
++			STR(PTR) " 1b,8b\n"
++			STR(PTR) " 2b,8b\n"
++			STR(PTR) " 3b,8b\n"
++			STR(PTR) " 4b,8b\n"
+ 			"	.previous\n"
+ 			"	.set	pop\n"
+ 			: "+&r"(rt), "=&r"(rs),
+@@ -1326,10 +1327,10 @@ fpu_emul:
+ 			"	j	10b\n"
+ 			"       .previous\n"
+ 			"	.section	__ex_table,\"a\"\n"
+-			"	.word	1b,8b\n"
+-			"	.word	2b,8b\n"
+-			"	.word	3b,8b\n"
+-			"	.word	4b,8b\n"
++			STR(PTR) " 1b,8b\n"
++			STR(PTR) " 2b,8b\n"
++			STR(PTR) " 3b,8b\n"
++			STR(PTR) " 4b,8b\n"
+ 			"	.previous\n"
+ 			"	.set	pop\n"
+ 			: "+&r"(rt), "=&r"(rs),
+@@ -1397,10 +1398,10 @@ fpu_emul:
+ 			"	j	9b\n"
+ 			"	.previous\n"
+ 			"	.section        __ex_table,\"a\"\n"
+-			"	.word	1b,8b\n"
+-			"	.word	2b,8b\n"
+-			"	.word	3b,8b\n"
+-			"	.word	4b,8b\n"
++			STR(PTR) " 1b,8b\n"
++			STR(PTR) " 2b,8b\n"
++			STR(PTR) " 3b,8b\n"
++			STR(PTR) " 4b,8b\n"
+ 			"	.previous\n"
+ 			"	.set	pop\n"
+ 			: "+&r"(rt), "=&r"(rs),
+@@ -1467,10 +1468,10 @@ fpu_emul:
+ 			"	j	9b\n"
+ 			"	.previous\n"
+ 			"	.section        __ex_table,\"a\"\n"
+-			"	.word	1b,8b\n"
+-			"	.word	2b,8b\n"
+-			"	.word	3b,8b\n"
+-			"	.word	4b,8b\n"
++			STR(PTR) " 1b,8b\n"
++			STR(PTR) " 2b,8b\n"
++			STR(PTR) " 3b,8b\n"
++			STR(PTR) " 4b,8b\n"
+ 			"	.previous\n"
+ 			"	.set	pop\n"
+ 			: "+&r"(rt), "=&r"(rs),
+@@ -1582,14 +1583,14 @@ fpu_emul:
+ 			"	j	9b\n"
+ 			"	.previous\n"
+ 			"	.section        __ex_table,\"a\"\n"
+-			"	.word	1b,8b\n"
+-			"	.word	2b,8b\n"
+-			"	.word	3b,8b\n"
+-			"	.word	4b,8b\n"
+-			"	.word	5b,8b\n"
+-			"	.word	6b,8b\n"
+-			"	.word	7b,8b\n"
+-			"	.word	0b,8b\n"
++			STR(PTR) " 1b,8b\n"
++			STR(PTR) " 2b,8b\n"
++			STR(PTR) " 3b,8b\n"
++			STR(PTR) " 4b,8b\n"
++			STR(PTR) " 5b,8b\n"
++			STR(PTR) " 6b,8b\n"
++			STR(PTR) " 7b,8b\n"
++			STR(PTR) " 0b,8b\n"
+ 			"	.previous\n"
+ 			"	.set	pop\n"
+ 			: "+&r"(rt), "=&r"(rs),
+@@ -1701,14 +1702,14 @@ fpu_emul:
+ 			"	j      9b\n"
+ 			"	.previous\n"
+ 			"	.section        __ex_table,\"a\"\n"
+-			"	.word  1b,8b\n"
+-			"	.word  2b,8b\n"
+-			"	.word  3b,8b\n"
+-			"	.word  4b,8b\n"
+-			"	.word  5b,8b\n"
+-			"	.word  6b,8b\n"
+-			"	.word  7b,8b\n"
+-			"	.word  0b,8b\n"
++			STR(PTR) " 1b,8b\n"
++			STR(PTR) " 2b,8b\n"
++			STR(PTR) " 3b,8b\n"
++			STR(PTR) " 4b,8b\n"
++			STR(PTR) " 5b,8b\n"
++			STR(PTR) " 6b,8b\n"
++			STR(PTR) " 7b,8b\n"
++			STR(PTR) " 0b,8b\n"
+ 			"	.previous\n"
+ 			"	.set    pop\n"
+ 			: "+&r"(rt), "=&r"(rs),
+@@ -1820,14 +1821,14 @@ fpu_emul:
+ 			"	j	9b\n"
+ 			"	.previous\n"
+ 			"	.section        __ex_table,\"a\"\n"
+-			"	.word	1b,8b\n"
+-			"	.word	2b,8b\n"
+-			"	.word	3b,8b\n"
+-			"	.word	4b,8b\n"
+-			"	.word	5b,8b\n"
+-			"	.word	6b,8b\n"
+-			"	.word	7b,8b\n"
+-			"	.word	0b,8b\n"
++			STR(PTR) " 1b,8b\n"
++			STR(PTR) " 2b,8b\n"
++			STR(PTR) " 3b,8b\n"
++			STR(PTR) " 4b,8b\n"
++			STR(PTR) " 5b,8b\n"
++			STR(PTR) " 6b,8b\n"
++			STR(PTR) " 7b,8b\n"
++			STR(PTR) " 0b,8b\n"
+ 			"	.previous\n"
+ 			"	.set	pop\n"
+ 			: "+&r"(rt), "=&r"(rs),
+@@ -1938,14 +1939,14 @@ fpu_emul:
+ 			"       j	9b\n"
+ 			"       .previous\n"
+ 			"       .section        __ex_table,\"a\"\n"
+-			"       .word	1b,8b\n"
+-			"       .word	2b,8b\n"
+-			"       .word	3b,8b\n"
+-			"       .word	4b,8b\n"
+-			"       .word	5b,8b\n"
+-			"       .word	6b,8b\n"
+-			"       .word	7b,8b\n"
+-			"       .word	0b,8b\n"
++			STR(PTR) " 1b,8b\n"
++			STR(PTR) " 2b,8b\n"
++			STR(PTR) " 3b,8b\n"
++			STR(PTR) " 4b,8b\n"
++			STR(PTR) " 5b,8b\n"
++			STR(PTR) " 6b,8b\n"
++			STR(PTR) " 7b,8b\n"
++			STR(PTR) " 0b,8b\n"
+ 			"       .previous\n"
+ 			"       .set	pop\n"
+ 			: "+&r"(rt), "=&r"(rs),
+@@ -2000,7 +2001,7 @@ fpu_emul:
+ 			"j	2b\n"
+ 			".previous\n"
+ 			".section        __ex_table,\"a\"\n"
+-			".word  1b, 3b\n"
++			STR(PTR) " 1b,3b\n"
+ 			".previous\n"
+ 			: "=&r"(res), "+&r"(err)
+ 			: "r"(vaddr), "i"(SIGSEGV)
+@@ -2058,7 +2059,7 @@ fpu_emul:
+ 			"j	2b\n"
+ 			".previous\n"
+ 			".section        __ex_table,\"a\"\n"
+-			".word	1b, 3b\n"
++			STR(PTR) " 1b,3b\n"
+ 			".previous\n"
+ 			: "+&r"(res), "+&r"(err)
+ 			: "r"(vaddr), "i"(SIGSEGV));
+@@ -2119,7 +2120,7 @@ fpu_emul:
+ 			"j	2b\n"
+ 			".previous\n"
+ 			".section        __ex_table,\"a\"\n"
+-			".word  1b, 3b\n"
++			STR(PTR) " 1b,3b\n"
+ 			".previous\n"
+ 			: "=&r"(res), "+&r"(err)
+ 			: "r"(vaddr), "i"(SIGSEGV)
+@@ -2182,7 +2183,7 @@ fpu_emul:
+ 			"j	2b\n"
+ 			".previous\n"
+ 			".section        __ex_table,\"a\"\n"
+-			".word	1b, 3b\n"
++			STR(PTR) " 1b,3b\n"
+ 			".previous\n"
+ 			: "+&r"(res), "+&r"(err)
+ 			: "r"(vaddr), "i"(SIGSEGV));
+diff --git a/arch/mips/kernel/pm.c b/arch/mips/kernel/pm.c
+index fefdf39d3df3..dc814892133c 100644
+--- a/arch/mips/kernel/pm.c
++++ b/arch/mips/kernel/pm.c
+@@ -56,7 +56,7 @@ static void mips_cpu_restore(void)
+ 		write_c0_userlocal(current_thread_info()->tp_value);
+ 
+ 	/* Restore watch registers */
+-	__restore_watch();
++	__restore_watch(current);
+ }
+ 
+ /**
+diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
+index eddd5fd6fdfa..0c0456f0c217 100644
+--- a/arch/mips/kernel/process.c
++++ b/arch/mips/kernel/process.c
+@@ -455,7 +455,7 @@ unsigned long notrace unwind_stack_by_address(unsigned long stack_page,
+ 		    *sp + sizeof(*regs) <= stack_page + THREAD_SIZE - 32) {
+ 			regs = (struct pt_regs *)*sp;
+ 			pc = regs->cp0_epc;
+-			if (__kernel_text_address(pc)) {
++			if (!user_mode(regs) && __kernel_text_address(pc)) {
+ 				*sp = regs->regs[29];
+ 				*ra = regs->regs[31];
+ 				return pc;
+@@ -601,6 +601,9 @@ int mips_set_process_fp_mode(struct task_struct *task, unsigned int value)
+ 	if (!(value & PR_FP_MODE_FR) && cpu_has_fpu && cpu_has_mips_r6)
+ 		return -EOPNOTSUPP;
+ 
++	/* Proceed with the mode switch */
++	preempt_disable();
++
+ 	/* Save FP & vector context, then disable FPU & MSA */
+ 	if (task->signal == current->signal)
+ 		lose_fpu(1);
+@@ -659,6 +662,7 @@ int mips_set_process_fp_mode(struct task_struct *task, unsigned int value)
+ 
+ 	/* Allow threads to use FP again */
+ 	atomic_set(&task->mm->context.fp_mode_switching, 0);
++	preempt_enable();
+ 
+ 	return 0;
+ }
+diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
+index a5279b2f3198..4285d8b7c566 100644
+--- a/arch/mips/kernel/ptrace.c
++++ b/arch/mips/kernel/ptrace.c
+@@ -57,8 +57,7 @@ static void init_fp_ctx(struct task_struct *target)
+ 	/* Begin with data registers set to all 1s... */
+ 	memset(&target->thread.fpu.fpr, ~0, sizeof(target->thread.fpu.fpr));
+ 
+-	/* ...and FCSR zeroed */
+-	target->thread.fpu.fcr31 = 0;
++	/* FCSR has been preset by `mips_set_personality_nan'.  */
+ 
+ 	/*
+ 	 * Record that the target has "used" math, such that the context
+@@ -80,6 +79,22 @@ void ptrace_disable(struct task_struct *child)
+ }
+ 
+ /*
++ * Poke at FCSR according to its mask.  Don't set the cause bits as
++ * this is currently not handled correctly in FP context restoration
++ * and will cause an oops if a corresponding enable bit is set.
++ */
++static void ptrace_setfcr31(struct task_struct *child, u32 value)
++{
++	u32 fcr31;
++	u32 mask;
++
++	value &= ~FPU_CSR_ALL_X;
++	fcr31 = child->thread.fpu.fcr31;
++	mask = boot_cpu_data.fpu_msk31;
++	child->thread.fpu.fcr31 = (value & ~mask) | (fcr31 & mask);
++}
++
++/*
+  * Read a general register set.	 We always use the 64-bit format, even
+  * for 32-bit kernels and for 32-bit processes on a 64-bit kernel.
+  * Registers are sign extended to fill the available space.
+@@ -159,9 +174,7 @@ int ptrace_setfpregs(struct task_struct *child, __u32 __user *data)
+ {
+ 	union fpureg *fregs;
+ 	u64 fpr_val;
+-	u32 fcr31;
+ 	u32 value;
+-	u32 mask;
+ 	int i;
+ 
+ 	if (!access_ok(VERIFY_READ, data, 33 * 8))
+@@ -176,9 +189,7 @@ int ptrace_setfpregs(struct task_struct *child, __u32 __user *data)
+ 	}
+ 
+ 	__get_user(value, data + 64);
+-	fcr31 = child->thread.fpu.fcr31;
+-	mask = boot_cpu_data.fpu_msk31;
+-	child->thread.fpu.fcr31 = (value & ~mask) | (fcr31 & mask);
++	ptrace_setfcr31(child, value);
+ 
+ 	/* FIR may not be written.  */
+ 
+@@ -805,7 +816,7 @@ long arch_ptrace(struct task_struct *child, long request,
+ 			break;
+ #endif
+ 		case FPC_CSR:
+-			child->thread.fpu.fcr31 = data & ~FPU_CSR_ALL_X;
++			ptrace_setfcr31(child, data);
+ 			break;
+ 		case DSP_BASE ... DSP_BASE + 5: {
+ 			dspreg_t *dregs;
+diff --git a/arch/mips/kernel/r4k_fpu.S b/arch/mips/kernel/r4k_fpu.S
+index 17732f876eff..56d86b09c917 100644
+--- a/arch/mips/kernel/r4k_fpu.S
++++ b/arch/mips/kernel/r4k_fpu.S
+@@ -244,17 +244,17 @@ LEAF(\name)
+ 	.set	push
+ 	.set	noat
+ #ifdef CONFIG_64BIT
+-	copy_u_d \wr, 1
++	copy_s_d \wr, 1
+ 	EX sd	$1, \off(\base)
+ #elif defined(CONFIG_CPU_LITTLE_ENDIAN)
+-	copy_u_w \wr, 2
++	copy_s_w \wr, 2
+ 	EX sw	$1, \off(\base)
+-	copy_u_w \wr, 3
++	copy_s_w \wr, 3
+ 	EX sw	$1, (\off+4)(\base)
+ #else /* CONFIG_CPU_BIG_ENDIAN */
+-	copy_u_w \wr, 2
++	copy_s_w \wr, 2
+ 	EX sw	$1, (\off+4)(\base)
+-	copy_u_w \wr, 3
++	copy_s_w \wr, 3
+ 	EX sw	$1, \off(\base)
+ #endif
+ 	.set	pop
+diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
+index 5fdaf8bdcd2e..6f68cddc0573 100644
+--- a/arch/mips/kernel/setup.c
++++ b/arch/mips/kernel/setup.c
+@@ -706,6 +706,9 @@ static void __init arch_mem_init(char **cmdline_p)
+ 	for_each_memblock(reserved, reg)
+ 		if (reg->size != 0)
+ 			reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT);
++
++	reserve_bootmem_region(__pa_symbol(&__nosave_begin),
++			__pa_symbol(&__nosave_end)); /* Reserve for hibernation */
+ }
+ 
+ static void __init resource_init(void)
+diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c
+index bf792e2839a6..9e35b6b26aa8 100644
+--- a/arch/mips/kernel/signal.c
++++ b/arch/mips/kernel/signal.c
+@@ -195,6 +195,9 @@ static int restore_msa_extcontext(void __user *buf, unsigned int size)
+ 	unsigned int csr;
+ 	int i, err;
+ 
++	if (!config_enabled(CONFIG_CPU_HAS_MSA))
++		return SIGSYS;
++
+ 	if (size != sizeof(*msa))
+ 		return -EINVAL;
+ 
+@@ -398,8 +401,8 @@ int protected_restore_fp_context(void __user *sc)
+ 	}
+ 
+ fp_done:
+-	if (used & USED_EXTCONTEXT)
+-		err |= restore_extcontext(sc_to_extcontext(sc));
++	if (!err && (used & USED_EXTCONTEXT))
++		err = restore_extcontext(sc_to_extcontext(sc));
+ 
+ 	return err ?: sig;
+ }
+@@ -767,15 +770,7 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
+ 	sigset_t *oldset = sigmask_to_save();
+ 	int ret;
+ 	struct mips_abi *abi = current->thread.abi;
+-#ifdef CONFIG_CPU_MICROMIPS
+-	void *vdso;
+-	unsigned long tmp = (unsigned long)current->mm->context.vdso;
+-
+-	set_isa16_mode(tmp);
+-	vdso = (void *)tmp;
+-#else
+ 	void *vdso = current->mm->context.vdso;
+-#endif
+ 
+ 	if (regs->regs[0]) {
+ 		switch(regs->regs[2]) {
+diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
+index bf14da9f3e33..58a7cad0381b 100644
+--- a/arch/mips/kernel/traps.c
++++ b/arch/mips/kernel/traps.c
+@@ -144,7 +144,7 @@ static void show_backtrace(struct task_struct *task, const struct pt_regs *regs)
+ 	if (!task)
+ 		task = current;
+ 
+-	if (raw_show_trace || !__kernel_text_address(pc)) {
++	if (raw_show_trace || user_mode(regs) || !__kernel_text_address(pc)) {
+ 		show_raw_backtrace(sp);
+ 		return;
+ 	}
+@@ -1242,7 +1242,7 @@ static int enable_restore_fp_context(int msa)
+ 		err = init_fpu();
+ 		if (msa && !err) {
+ 			enable_msa();
+-			_init_msa_upper();
++			init_msa_upper();
+ 			set_thread_flag(TIF_USEDMSA);
+ 			set_thread_flag(TIF_MSA_CTX_LIVE);
+ 		}
+@@ -1305,7 +1305,7 @@ static int enable_restore_fp_context(int msa)
+ 	 */
+ 	prior_msa = test_and_set_thread_flag(TIF_MSA_CTX_LIVE);
+ 	if (!prior_msa && was_fpu_owner) {
+-		_init_msa_upper();
++		init_msa_upper();
+ 
+ 		goto out;
+ 	}
+@@ -1322,7 +1322,7 @@ static int enable_restore_fp_context(int msa)
+ 		 * of each vector register such that it cannot see data left
+ 		 * behind by another task.
+ 		 */
+-		_init_msa_upper();
++		init_msa_upper();
+ 	} else {
+ 		/* We need to restore the vector context. */
+ 		restore_msa(current);
+diff --git a/arch/mips/kernel/watch.c b/arch/mips/kernel/watch.c
+index 2a03abb5bd2c..9b78e375118e 100644
+--- a/arch/mips/kernel/watch.c
++++ b/arch/mips/kernel/watch.c
+@@ -15,10 +15,9 @@
+  * Install the watch registers for the current thread.	A maximum of
+  * four registers are installed although the machine may have more.
+  */
+-void mips_install_watch_registers(void)
++void mips_install_watch_registers(struct task_struct *t)
+ {
+-	struct mips3264_watch_reg_state *watches =
+-		&current->thread.watch.mips3264;
++	struct mips3264_watch_reg_state *watches = &t->thread.watch.mips3264;
+ 	switch (current_cpu_data.watch_reg_use_cnt) {
+ 	default:
+ 		BUG();
+diff --git a/arch/mips/lib/ashldi3.c b/arch/mips/lib/ashldi3.c
+index beb80f316095..927dc94a030f 100644
+--- a/arch/mips/lib/ashldi3.c
++++ b/arch/mips/lib/ashldi3.c
+@@ -2,7 +2,7 @@
+ 
+ #include "libgcc.h"
+ 
+-long long __ashldi3(long long u, word_type b)
++long long notrace __ashldi3(long long u, word_type b)
+ {
+ 	DWunion uu, w;
+ 	word_type bm;
+diff --git a/arch/mips/lib/ashrdi3.c b/arch/mips/lib/ashrdi3.c
+index c884a912b660..9fdf1a598428 100644
+--- a/arch/mips/lib/ashrdi3.c
++++ b/arch/mips/lib/ashrdi3.c
+@@ -2,7 +2,7 @@
+ 
+ #include "libgcc.h"
+ 
+-long long __ashrdi3(long long u, word_type b)
++long long notrace __ashrdi3(long long u, word_type b)
+ {
+ 	DWunion uu, w;
+ 	word_type bm;
+diff --git a/arch/mips/lib/bswapdi.c b/arch/mips/lib/bswapdi.c
+index 77e5f9c1f005..e3e77aa52c95 100644
+--- a/arch/mips/lib/bswapdi.c
++++ b/arch/mips/lib/bswapdi.c
+@@ -1,6 +1,6 @@
+ #include <linux/module.h>
+ 
+-unsigned long long __bswapdi2(unsigned long long u)
++unsigned long long notrace __bswapdi2(unsigned long long u)
+ {
+ 	return (((u) & 0xff00000000000000ull) >> 56) |
+ 	       (((u) & 0x00ff000000000000ull) >> 40) |
+diff --git a/arch/mips/lib/bswapsi.c b/arch/mips/lib/bswapsi.c
+index 2b302ff121d2..530a8afe6fda 100644
+--- a/arch/mips/lib/bswapsi.c
++++ b/arch/mips/lib/bswapsi.c
+@@ -1,6 +1,6 @@
+ #include <linux/module.h>
+ 
+-unsigned int __bswapsi2(unsigned int u)
++unsigned int notrace __bswapsi2(unsigned int u)
+ {
+ 	return (((u) & 0xff000000) >> 24) |
+ 	       (((u) & 0x00ff0000) >>  8) |
+diff --git a/arch/mips/lib/cmpdi2.c b/arch/mips/lib/cmpdi2.c
+index 8c1306437ed1..06857da96993 100644
+--- a/arch/mips/lib/cmpdi2.c
++++ b/arch/mips/lib/cmpdi2.c
+@@ -2,7 +2,7 @@
+ 
+ #include "libgcc.h"
+ 
+-word_type __cmpdi2(long long a, long long b)
++word_type notrace __cmpdi2(long long a, long long b)
+ {
+ 	const DWunion au = {
+ 		.ll = a
+diff --git a/arch/mips/lib/lshrdi3.c b/arch/mips/lib/lshrdi3.c
+index dcf8d6810b7c..364547449c65 100644
+--- a/arch/mips/lib/lshrdi3.c
++++ b/arch/mips/lib/lshrdi3.c
+@@ -2,7 +2,7 @@
+ 
+ #include "libgcc.h"
+ 
+-long long __lshrdi3(long long u, word_type b)
++long long notrace __lshrdi3(long long u, word_type b)
+ {
+ 	DWunion uu, w;
+ 	word_type bm;
+diff --git a/arch/mips/lib/ucmpdi2.c b/arch/mips/lib/ucmpdi2.c
+index bb4cb2f828ea..bd599f58234c 100644
+--- a/arch/mips/lib/ucmpdi2.c
++++ b/arch/mips/lib/ucmpdi2.c
+@@ -2,7 +2,7 @@
+ 
+ #include "libgcc.h"
+ 
+-word_type __ucmpdi2(unsigned long long a, unsigned long long b)
++word_type notrace __ucmpdi2(unsigned long long a, unsigned long long b)
+ {
+ 	const DWunion au = {.ll = a};
+ 	const DWunion bu = {.ll = b};
+diff --git a/arch/mips/loongson64/Platform b/arch/mips/loongson64/Platform
+index 85d808924c94..0fce4608aa88 100644
+--- a/arch/mips/loongson64/Platform
++++ b/arch/mips/loongson64/Platform
+@@ -31,7 +31,7 @@ cflags-$(CONFIG_CPU_LOONGSON3)	+= -Wa,--trap
+ # can't easily be used safely within the kbuild framework.
+ #
+ ifeq ($(call cc-ifversion, -ge, 0409, y), y)
+-  ifeq ($(call ld-ifversion, -ge, 22500000, y), y)
++  ifeq ($(call ld-ifversion, -ge, 225000000, y), y)
+     cflags-$(CONFIG_CPU_LOONGSON3)  += \
+       $(call cc-option,-march=loongson3a -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS64)
+   else
+diff --git a/arch/mips/loongson64/loongson-3/numa.c b/arch/mips/loongson64/loongson-3/numa.c
+index 6f9e010cec4d..282c5a8c2fcd 100644
+--- a/arch/mips/loongson64/loongson-3/numa.c
++++ b/arch/mips/loongson64/loongson-3/numa.c
+@@ -213,10 +213,10 @@ static void __init node_mem_init(unsigned int node)
+ 		BOOTMEM_DEFAULT);
+ 
+ 	if (node == 0 && node_end_pfn(0) >= (0xffffffff >> PAGE_SHIFT)) {
+-		/* Reserve 0xff800000~0xffffffff for RS780E integrated GPU */
++		/* Reserve 0xfe000000~0xffffffff for RS780E integrated GPU */
+ 		reserve_bootmem_node(NODE_DATA(node),
+-				(node_addrspace_offset | 0xff800000),
+-				8 << 20, BOOTMEM_DEFAULT);
++				(node_addrspace_offset | 0xfe000000),
++				32 << 20, BOOTMEM_DEFAULT);
+ 	}
+ 
+ 	sparse_memory_present_with_active_regions(node);
+diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c
+index cdfd44ffa51c..41d3e0e7defa 100644
+--- a/arch/mips/math-emu/cp1emu.c
++++ b/arch/mips/math-emu/cp1emu.c
+@@ -445,9 +445,11 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
+ 	case spec_op:
+ 		switch (insn.r_format.func) {
+ 		case jalr_op:
+-			regs->regs[insn.r_format.rd] =
+-				regs->cp0_epc + dec_insn.pc_inc +
+-				dec_insn.next_pc_inc;
++			if (insn.r_format.rd != 0) {
++				regs->regs[insn.r_format.rd] =
++					regs->cp0_epc + dec_insn.pc_inc +
++					dec_insn.next_pc_inc;
++			}
+ 			/* Fall through */
+ 		case jr_op:
+ 			/* For R6, JR already emulated in jalr_op */
+diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c
+index 3f159caf6dbc..bf04c6c479a4 100644
+--- a/arch/mips/mm/cache.c
++++ b/arch/mips/mm/cache.c
+@@ -16,6 +16,7 @@
+ #include <linux/mm.h>
+ 
+ #include <asm/cacheflush.h>
++#include <asm/highmem.h>
+ #include <asm/processor.h>
+ #include <asm/cpu.h>
+ #include <asm/cpu-features.h>
+@@ -83,8 +84,6 @@ void __flush_dcache_page(struct page *page)
+ 	struct address_space *mapping = page_mapping(page);
+ 	unsigned long addr;
+ 
+-	if (PageHighMem(page))
+-		return;
+ 	if (mapping && !mapping_mapped(mapping)) {
+ 		SetPageDcacheDirty(page);
+ 		return;
+@@ -95,8 +94,15 @@ void __flush_dcache_page(struct page *page)
+ 	 * case is for exec env/arg pages and those are %99 certainly going to
+ 	 * get faulted into the tlb (and thus flushed) anyways.
+ 	 */
+-	addr = (unsigned long) page_address(page);
++	if (PageHighMem(page))
++		addr = (unsigned long)kmap_atomic(page);
++	else
++		addr = (unsigned long)page_address(page);
++
+ 	flush_data_cache_page(addr);
++
++	if (PageHighMem(page))
++		__kunmap_atomic((void *)addr);
+ }
+ 
+ EXPORT_SYMBOL(__flush_dcache_page);
+@@ -119,33 +125,28 @@ void __flush_anon_page(struct page *page, unsigned long vmaddr)
+ 
+ EXPORT_SYMBOL(__flush_anon_page);
+ 
+-void __flush_icache_page(struct vm_area_struct *vma, struct page *page)
+-{
+-	unsigned long addr;
+-
+-	if (PageHighMem(page))
+-		return;
+-
+-	addr = (unsigned long) page_address(page);
+-	flush_data_cache_page(addr);
+-}
+-EXPORT_SYMBOL_GPL(__flush_icache_page);
+-
+-void __update_cache(struct vm_area_struct *vma, unsigned long address,
+-	pte_t pte)
++void __update_cache(unsigned long address, pte_t pte)
+ {
+ 	struct page *page;
+ 	unsigned long pfn, addr;
+-	int exec = (vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc;
++	int exec = !pte_no_exec(pte) && !cpu_has_ic_fills_f_dc;
+ 
+ 	pfn = pte_pfn(pte);
+ 	if (unlikely(!pfn_valid(pfn)))
+ 		return;
+ 	page = pfn_to_page(pfn);
+-	if (page_mapping(page) && Page_dcache_dirty(page)) {
+-		addr = (unsigned long) page_address(page);
++	if (Page_dcache_dirty(page)) {
++		if (PageHighMem(page))
++			addr = (unsigned long)kmap_atomic(page);
++		else
++			addr = (unsigned long)page_address(page);
++
+ 		if (exec || pages_do_alias(addr, address & PAGE_MASK))
+ 			flush_data_cache_page(addr);
++
++		if (PageHighMem(page))
++			__kunmap_atomic((void *)addr);
++
+ 		ClearPageDcacheDirty(page);
+ 	}
+ }
+diff --git a/arch/mips/vdso/Makefile b/arch/mips/vdso/Makefile
+index ee3617c0c5e2..f37e6ba40f52 100644
+--- a/arch/mips/vdso/Makefile
++++ b/arch/mips/vdso/Makefile
+@@ -5,10 +5,12 @@ obj-vdso-y := elf.o gettimeofday.o sigreturn.o
+ ccflags-vdso := \
+ 	$(filter -I%,$(KBUILD_CFLAGS)) \
+ 	$(filter -E%,$(KBUILD_CFLAGS)) \
++	$(filter -mmicromips,$(KBUILD_CFLAGS)) \
+ 	$(filter -march=%,$(KBUILD_CFLAGS))
+ cflags-vdso := $(ccflags-vdso) \
+ 	$(filter -W%,$(filter-out -Wa$(comma)%,$(KBUILD_CFLAGS))) \
+-	-O2 -g -fPIC -fno-common -fno-builtin -G 0 -DDISABLE_BRANCH_PROFILING \
++	-O2 -g -fPIC -fno-strict-aliasing -fno-common -fno-builtin -G 0 \
++	-DDISABLE_BRANCH_PROFILING \
+ 	$(call cc-option, -fno-stack-protector)
+ aflags-vdso := $(ccflags-vdso) \
+ 	$(filter -I%,$(KBUILD_CFLAGS)) \
+diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c
+index 40e4d4a27663..b34e8a54f7db 100644
+--- a/arch/powerpc/kernel/eeh.c
++++ b/arch/powerpc/kernel/eeh.c
+@@ -1072,7 +1072,7 @@ void eeh_add_device_early(struct pci_dn *pdn)
+ 	struct pci_controller *phb;
+ 	struct eeh_dev *edev = pdn_to_eeh_dev(pdn);
+ 
+-	if (!edev || !eeh_enabled())
++	if (!edev)
+ 		return;
+ 
+ 	if (!eeh_has_flag(EEH_PROBE_MODE_DEVTREE))
+diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
+index 650cfb31ea3d..54958282604a 100644
+--- a/arch/powerpc/kernel/eeh_driver.c
++++ b/arch/powerpc/kernel/eeh_driver.c
+@@ -166,6 +166,16 @@ static void *eeh_dev_save_state(void *data, void *userdata)
+ 	if (!edev)
+ 		return NULL;
+ 
++	/*
++	 * We cannot access the config space on some adapters.
++	 * Otherwise, it will cause fenced PHB. We don't save
++	 * the content in their config space and will restore
++	 * from the initial config space saved when the EEH
++	 * device is created.
++	 */
++	if (edev->pe && (edev->pe->state & EEH_PE_CFG_RESTRICTED))
++		return NULL;
++
+ 	pdev = eeh_dev_to_pci_dev(edev);
+ 	if (!pdev)
+ 		return NULL;
+@@ -305,6 +315,19 @@ static void *eeh_dev_restore_state(void *data, void *userdata)
+ 	if (!edev)
+ 		return NULL;
+ 
++	/*
++	 * The content in the config space isn't saved because
++	 * the blocked config space on some adapters. We have
++	 * to restore the initial saved config space when the
++	 * EEH device is created.
++	 */
++	if (edev->pe && (edev->pe->state & EEH_PE_CFG_RESTRICTED)) {
++		if (list_is_last(&edev->list, &edev->pe->edevs))
++			eeh_pe_restore_bars(edev->pe);
++
++		return NULL;
++	}
++
+ 	pdev = eeh_dev_to_pci_dev(edev);
+ 	if (!pdev)
+ 		return NULL;
+@@ -504,9 +527,6 @@ int eeh_pe_reset_and_recover(struct eeh_pe *pe)
+ 	/* Save states */
+ 	eeh_pe_dev_traverse(pe, eeh_dev_save_state, NULL);
+ 
+-	/* Report error */
+-	eeh_pe_dev_traverse(pe, eeh_report_error, &result);
+-
+ 	/* Issue reset */
+ 	ret = eeh_reset_pe(pe);
+ 	if (ret) {
+diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
+index 7716cebf4b8e..2b66f25d40db 100644
+--- a/arch/powerpc/kernel/exceptions-64s.S
++++ b/arch/powerpc/kernel/exceptions-64s.S
+@@ -953,11 +953,6 @@ hv_facility_unavailable_relon_trampoline:
+ #endif
+ 	STD_RELON_EXCEPTION_PSERIES(0x5700, 0x1700, altivec_assist)
+ 
+-	/* Other future vectors */
+-	.align	7
+-	.globl	__end_interrupts
+-__end_interrupts:
+-
+ 	.align	7
+ system_call_entry:
+ 	b	system_call_common
+@@ -1244,6 +1239,17 @@ __end_handlers:
+ 	STD_RELON_EXCEPTION_PSERIES_OOL(0xf60, facility_unavailable)
+ 	STD_RELON_EXCEPTION_HV_OOL(0xf80, hv_facility_unavailable)
+ 
++	/*
++	 * The __end_interrupts marker must be past the out-of-line (OOL)
++	 * handlers, so that they are copied to real address 0x100 when running
++	 * a relocatable kernel. This ensures they can be reached from the short
++	 * trampoline handlers (like 0x4f00, 0x4f20, etc.) which branch
++	 * directly, without using LOAD_HANDLER().
++	 */
++	.align	7
++	.globl	__end_interrupts
++__end_interrupts:
++
+ #if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
+ /*
+  * Data area reserved for FWNMI option.
+diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
+index 7ab29518a3b9..e345891450c3 100644
+--- a/arch/x86/xen/setup.c
++++ b/arch/x86/xen/setup.c
+@@ -393,6 +393,9 @@ static unsigned long __init xen_set_identity_and_remap_chunk(
+ 	unsigned long i = 0;
+ 	unsigned long n = end_pfn - start_pfn;
+ 
++	if (remap_pfn == 0)
++		remap_pfn = nr_pages;
++
+ 	while (i < n) {
+ 		unsigned long cur_pfn = start_pfn + i;
+ 		unsigned long left = n - i;
+@@ -438,17 +441,29 @@ static unsigned long __init xen_set_identity_and_remap_chunk(
+ 	return remap_pfn;
+ }
+ 
+-static void __init xen_set_identity_and_remap(unsigned long nr_pages)
++static unsigned long __init xen_count_remap_pages(
++	unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages,
++	unsigned long remap_pages)
++{
++	if (start_pfn >= nr_pages)
++		return remap_pages;
++
++	return remap_pages + min(end_pfn, nr_pages) - start_pfn;
++}
++
++static unsigned long __init xen_foreach_remap_area(unsigned long nr_pages,
++	unsigned long (*func)(unsigned long start_pfn, unsigned long end_pfn,
++			      unsigned long nr_pages, unsigned long last_val))
+ {
+ 	phys_addr_t start = 0;
+-	unsigned long last_pfn = nr_pages;
++	unsigned long ret_val = 0;
+ 	const struct e820entry *entry = xen_e820_map;
+ 	int i;
+ 
+ 	/*
+ 	 * Combine non-RAM regions and gaps until a RAM region (or the
+-	 * end of the map) is reached, then set the 1:1 map and
+-	 * remap the memory in those non-RAM regions.
++	 * end of the map) is reached, then call the provided function
++	 * to perform its duty on the non-RAM region.
+ 	 *
+ 	 * The combined non-RAM regions are rounded to a whole number
+ 	 * of pages so any partial pages are accessible via the 1:1
+@@ -466,14 +481,13 @@ static void __init xen_set_identity_and_remap(unsigned long nr_pages)
+ 				end_pfn = PFN_UP(entry->addr);
+ 
+ 			if (start_pfn < end_pfn)
+-				last_pfn = xen_set_identity_and_remap_chunk(
+-						start_pfn, end_pfn, nr_pages,
+-						last_pfn);
++				ret_val = func(start_pfn, end_pfn, nr_pages,
++					       ret_val);
+ 			start = end;
+ 		}
+ 	}
+ 
+-	pr_info("Released %ld page(s)\n", xen_released_pages);
++	return ret_val;
+ }
+ 
+ /*
+@@ -596,35 +610,6 @@ static void __init xen_ignore_unusable(void)
+ 	}
+ }
+ 
+-static unsigned long __init xen_count_remap_pages(unsigned long max_pfn)
+-{
+-	unsigned long extra = 0;
+-	unsigned long start_pfn, end_pfn;
+-	const struct e820entry *entry = xen_e820_map;
+-	int i;
+-
+-	end_pfn = 0;
+-	for (i = 0; i < xen_e820_map_entries; i++, entry++) {
+-		start_pfn = PFN_DOWN(entry->addr);
+-		/* Adjacent regions on non-page boundaries handling! */
+-		end_pfn = min(end_pfn, start_pfn);
+-
+-		if (start_pfn >= max_pfn)
+-			return extra + max_pfn - end_pfn;
+-
+-		/* Add any holes in map to result. */
+-		extra += start_pfn - end_pfn;
+-
+-		end_pfn = PFN_UP(entry->addr + entry->size);
+-		end_pfn = min(end_pfn, max_pfn);
+-
+-		if (entry->type != E820_RAM)
+-			extra += end_pfn - start_pfn;
+-	}
+-
+-	return extra;
+-}
+-
+ bool __init xen_is_e820_reserved(phys_addr_t start, phys_addr_t size)
+ {
+ 	struct e820entry *entry;
+@@ -804,7 +789,7 @@ char * __init xen_memory_setup(void)
+ 	max_pages = xen_get_max_pages();
+ 
+ 	/* How many extra pages do we need due to remapping? */
+-	max_pages += xen_count_remap_pages(max_pfn);
++	max_pages += xen_foreach_remap_area(max_pfn, xen_count_remap_pages);
+ 
+ 	if (max_pages > max_pfn)
+ 		extra_pages += max_pages - max_pfn;
+@@ -922,7 +907,9 @@ char * __init xen_memory_setup(void)
+ 	 * Set identity map on non-RAM pages and prepare remapping the
+ 	 * underlying RAM.
+ 	 */
+-	xen_set_identity_and_remap(max_pfn);
++	xen_foreach_remap_area(max_pfn, xen_set_identity_and_remap_chunk);
++
++	pr_info("Released %ld page(s)\n", xen_released_pages);
+ 
+ 	return "Xen";
+ }
+diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
+index 6e7c3ccea24b..27aea96036c4 100644
+--- a/drivers/base/power/main.c
++++ b/drivers/base/power/main.c
+@@ -1267,14 +1267,15 @@ int dpm_suspend_late(pm_message_t state)
+ 		error = device_suspend_late(dev);
+ 
+ 		mutex_lock(&dpm_list_mtx);
++		if (!list_empty(&dev->power.entry))
++			list_move(&dev->power.entry, &dpm_late_early_list);
++
+ 		if (error) {
+ 			pm_dev_err(dev, state, " late", error);
+ 			dpm_save_failed_dev(dev_name(dev));
+ 			put_device(dev);
+ 			break;
+ 		}
+-		if (!list_empty(&dev->power.entry))
+-			list_move(&dev->power.entry, &dpm_late_early_list);
+ 		put_device(dev);
+ 
+ 		if (async_error)
+diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
+index 4c7055009bd6..b74690418504 100644
+--- a/drivers/base/power/runtime.c
++++ b/drivers/base/power/runtime.c
+@@ -1506,11 +1506,16 @@ int pm_runtime_force_resume(struct device *dev)
+ 		goto out;
+ 	}
+ 
+-	ret = callback(dev);
++	ret = pm_runtime_set_active(dev);
+ 	if (ret)
+ 		goto out;
+ 
+-	pm_runtime_set_active(dev);
++	ret = callback(dev);
++	if (ret) {
++		pm_runtime_set_suspended(dev);
++		goto out;
++	}
++
+ 	pm_runtime_mark_last_busy(dev);
+ out:
+ 	pm_runtime_enable(dev);
+diff --git a/drivers/char/hw_random/exynos-rng.c b/drivers/char/hw_random/exynos-rng.c
+index 30cf4623184f..aa30af5f0f2b 100644
+--- a/drivers/char/hw_random/exynos-rng.c
++++ b/drivers/char/hw_random/exynos-rng.c
+@@ -89,6 +89,7 @@ static int exynos_read(struct hwrng *rng, void *buf,
+ 						struct exynos_rng, rng);
+ 	u32 *data = buf;
+ 	int retry = 100;
++	int ret = 4;
+ 
+ 	pm_runtime_get_sync(exynos_rng->dev);
+ 
+@@ -97,17 +98,20 @@ static int exynos_read(struct hwrng *rng, void *buf,
+ 	while (!(exynos_rng_readl(exynos_rng,
+ 			EXYNOS_PRNG_STATUS_OFFSET) & PRNG_DONE) && --retry)
+ 		cpu_relax();
+-	if (!retry)
+-		return -ETIMEDOUT;
++	if (!retry) {
++		ret = -ETIMEDOUT;
++		goto out;
++	}
+ 
+ 	exynos_rng_writel(exynos_rng, PRNG_DONE, EXYNOS_PRNG_STATUS_OFFSET);
+ 
+ 	*data = exynos_rng_readl(exynos_rng, EXYNOS_PRNG_OUT1_OFFSET);
+ 
++out:
+ 	pm_runtime_mark_last_busy(exynos_rng->dev);
+ 	pm_runtime_put_sync_autosuspend(exynos_rng->dev);
+ 
+-	return 4;
++	return ret;
+ }
+ 
+ static int exynos_rng_probe(struct platform_device *pdev)
+diff --git a/drivers/clk/at91/clk-h32mx.c b/drivers/clk/at91/clk-h32mx.c
+index 61566bcefa53..a165230e7eda 100644
+--- a/drivers/clk/at91/clk-h32mx.c
++++ b/drivers/clk/at91/clk-h32mx.c
+@@ -116,7 +116,7 @@ void __init of_sama5d4_clk_h32mx_setup(struct device_node *np,
+ 	h32mxclk->pmc = pmc;
+ 
+ 	clk = clk_register(NULL, &h32mxclk->hw);
+-	if (!clk) {
++	if (IS_ERR(clk)) {
+ 		kfree(h32mxclk);
+ 		return;
+ 	}
+diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c
+index dd2856b5633c..af19245e6a61 100644
+--- a/drivers/clk/bcm/clk-bcm2835.c
++++ b/drivers/clk/bcm/clk-bcm2835.c
+@@ -51,6 +51,7 @@
+ #define CM_GNRICCTL		0x000
+ #define CM_GNRICDIV		0x004
+ # define CM_DIV_FRAC_BITS	12
++# define CM_DIV_FRAC_MASK	GENMASK(CM_DIV_FRAC_BITS - 1, 0)
+ 
+ #define CM_VPUCTL		0x008
+ #define CM_VPUDIV		0x00c
+@@ -115,6 +116,7 @@
+ # define CM_GATE			BIT(CM_GATE_BIT)
+ # define CM_BUSY			BIT(7)
+ # define CM_BUSYD			BIT(8)
++# define CM_FRAC			BIT(9)
+ # define CM_SRC_SHIFT			0
+ # define CM_SRC_BITS			4
+ # define CM_SRC_MASK			0xf
+@@ -634,6 +636,7 @@ struct bcm2835_clock_data {
+ 	u32 frac_bits;
+ 
+ 	bool is_vpu_clock;
++	bool is_mash_clock;
+ };
+ 
+ static const char *const bcm2835_clock_per_parents[] = {
+@@ -815,6 +818,7 @@ static const struct bcm2835_clock_data bcm2835_clock_pwm_data = {
+ 	.div_reg = CM_PWMDIV,
+ 	.int_bits = 12,
+ 	.frac_bits = 12,
++	.is_mash_clock = true,
+ };
+ 
+ struct bcm2835_pll {
+@@ -900,8 +904,14 @@ static void bcm2835_pll_off(struct clk_hw *hw)
+ 	struct bcm2835_cprman *cprman = pll->cprman;
+ 	const struct bcm2835_pll_data *data = pll->data;
+ 
+-	cprman_write(cprman, data->cm_ctrl_reg, CM_PLL_ANARST);
+-	cprman_write(cprman, data->a2w_ctrl_reg, A2W_PLL_CTRL_PWRDN);
++	spin_lock(&cprman->regs_lock);
++	cprman_write(cprman, data->cm_ctrl_reg,
++		     cprman_read(cprman, data->cm_ctrl_reg) |
++		     CM_PLL_ANARST);
++	cprman_write(cprman, data->a2w_ctrl_reg,
++		     cprman_read(cprman, data->a2w_ctrl_reg) |
++		     A2W_PLL_CTRL_PWRDN);
++	spin_unlock(&cprman->regs_lock);
+ }
+ 
+ static int bcm2835_pll_on(struct clk_hw *hw)
+@@ -911,6 +921,10 @@ static int bcm2835_pll_on(struct clk_hw *hw)
+ 	const struct bcm2835_pll_data *data = pll->data;
+ 	ktime_t timeout;
+ 
++	cprman_write(cprman, data->a2w_ctrl_reg,
++		     cprman_read(cprman, data->a2w_ctrl_reg) &
++		     ~A2W_PLL_CTRL_PWRDN);
++
+ 	/* Take the PLL out of reset. */
+ 	cprman_write(cprman, data->cm_ctrl_reg,
+ 		     cprman_read(cprman, data->cm_ctrl_reg) & ~CM_PLL_ANARST);
+@@ -1173,7 +1187,7 @@ static u32 bcm2835_clock_choose_div(struct clk_hw *hw,
+ 		GENMASK(CM_DIV_FRAC_BITS - data->frac_bits, 0) >> 1;
+ 	u64 temp = (u64)parent_rate << CM_DIV_FRAC_BITS;
+ 	u64 rem;
+-	u32 div;
++	u32 div, mindiv, maxdiv;
+ 
+ 	rem = do_div(temp, rate);
+ 	div = temp;
+@@ -1183,10 +1197,23 @@ static u32 bcm2835_clock_choose_div(struct clk_hw *hw,
+ 		div += unused_frac_mask + 1;
+ 	div &= ~unused_frac_mask;
+ 
+-	/* Clamp to the limits. */
+-	div = max(div, unused_frac_mask + 1);
+-	div = min_t(u32, div, GENMASK(data->int_bits + CM_DIV_FRAC_BITS - 1,
+-				      CM_DIV_FRAC_BITS - data->frac_bits));
++	/* different clamping limits apply for a mash clock */
++	if (data->is_mash_clock) {
++		/* clamp to min divider of 2 */
++		mindiv = 2 << CM_DIV_FRAC_BITS;
++		/* clamp to the highest possible integer divider */
++		maxdiv = (BIT(data->int_bits) - 1) << CM_DIV_FRAC_BITS;
++	} else {
++		/* clamp to min divider of 1 */
++		mindiv = 1 << CM_DIV_FRAC_BITS;
++		/* clamp to the highest possible fractional divider */
++		maxdiv = GENMASK(data->int_bits + CM_DIV_FRAC_BITS - 1,
++				 CM_DIV_FRAC_BITS - data->frac_bits);
++	}
++
++	/* apply the clamping  limits */
++	div = max_t(u32, div, mindiv);
++	div = min_t(u32, div, maxdiv);
+ 
+ 	return div;
+ }
+@@ -1280,9 +1307,26 @@ static int bcm2835_clock_set_rate(struct clk_hw *hw,
+ 	struct bcm2835_cprman *cprman = clock->cprman;
+ 	const struct bcm2835_clock_data *data = clock->data;
+ 	u32 div = bcm2835_clock_choose_div(hw, rate, parent_rate, false);
++	u32 ctl;
++
++	spin_lock(&cprman->regs_lock);
++
++	/*
++	 * Setting up frac support
++	 *
++	 * In principle it is recommended to stop/start the clock first,
++	 * but as we set CLK_SET_RATE_GATE during registration of the
++	 * clock this requirement should be take care of by the
++	 * clk-framework.
++	 */
++	ctl = cprman_read(cprman, data->ctl_reg) & ~CM_FRAC;
++	ctl |= (div & CM_DIV_FRAC_MASK) ? CM_FRAC : 0;
++	cprman_write(cprman, data->ctl_reg, ctl);
+ 
+ 	cprman_write(cprman, data->div_reg, div);
+ 
++	spin_unlock(&cprman->regs_lock);
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/clk/imx/clk-imx35.c b/drivers/clk/imx/clk-imx35.c
+index a71d24cb4c06..b0978d3b83e2 100644
+--- a/drivers/clk/imx/clk-imx35.c
++++ b/drivers/clk/imx/clk-imx35.c
+@@ -66,7 +66,7 @@ static const char *std_sel[] = {"ppll", "arm"};
+ static const char *ipg_per_sel[] = {"ahb_per_div", "arm_per_div"};
+ 
+ enum mx35_clks {
+-	ckih, ckil, mpll, ppll, mpll_075, arm, hsp, hsp_div, hsp_sel, ahb, ipg,
++	ckih, mpll, ppll, mpll_075, arm, hsp, hsp_div, hsp_sel, ahb, ipg,
+ 	arm_per_div, ahb_per_div, ipg_per, uart_sel, uart_div, esdhc_sel,
+ 	esdhc1_div, esdhc2_div, esdhc3_div, spdif_sel, spdif_div_pre,
+ 	spdif_div_post, ssi_sel, ssi1_div_pre, ssi1_div_post, ssi2_div_pre,
+@@ -79,7 +79,7 @@ enum mx35_clks {
+ 	rtc_gate, rtic_gate, scc_gate, sdma_gate, spba_gate, spdif_gate,
+ 	ssi1_gate, ssi2_gate, uart1_gate, uart2_gate, uart3_gate, usbotg_gate,
+ 	wdog_gate, max_gate, admux_gate, csi_gate, csi_div, csi_sel, iim_gate,
+-	gpu2d_gate, clk_max
++	gpu2d_gate, ckil, clk_max
+ };
+ 
+ static struct clk *clk[clk_max];
+diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
+index f996efc56605..0db185762a10 100644
+--- a/drivers/cpuidle/cpuidle.c
++++ b/drivers/cpuidle/cpuidle.c
+@@ -214,7 +214,7 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
+ 		tick_broadcast_exit();
+ 	}
+ 
+-	if (!cpuidle_state_is_coupled(drv, entered_state))
++	if (!cpuidle_state_is_coupled(drv, index))
+ 		local_irq_enable();
+ 
+ 	diff = ktime_to_us(ktime_sub(time_end, time_start));
+@@ -433,6 +433,8 @@ static void __cpuidle_unregister_device(struct cpuidle_device *dev)
+ 	list_del(&dev->device_list);
+ 	per_cpu(cpuidle_devices, dev->cpu) = NULL;
+ 	module_put(drv->owner);
++
++	dev->registered = 0;
+ }
+ 
+ static void __cpuidle_device_init(struct cpuidle_device *dev)
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+index 119cdc2c43e7..7ef2c13921b4 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+@@ -194,12 +194,12 @@ int amdgpu_connector_get_monitor_bpc(struct drm_connector *connector)
+ 				bpc = 8;
+ 				DRM_DEBUG("%s: HDMI deep color 10 bpc exceeds max tmds clock. Using %d bpc.\n",
+ 					  connector->name, bpc);
+-			} else if (bpc > 8) {
+-				/* max_tmds_clock missing, but hdmi spec mandates it for deep color. */
+-				DRM_DEBUG("%s: Required max tmds clock for HDMI deep color missing. Using 8 bpc.\n",
+-					  connector->name);
+-				bpc = 8;
+ 			}
++		} else if (bpc > 8) {
++			/* max_tmds_clock missing, but hdmi spec mandates it for deep color. */
++			DRM_DEBUG("%s: Required max tmds clock for HDMI deep color missing. Using 8 bpc.\n",
++				  connector->name);
++			bpc = 8;
+ 		}
+ 	}
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
+index 7b7f4aba60c0..fe36caf1b7d7 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
+@@ -150,7 +150,7 @@ u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev)
+ 		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ 			amdgpu_crtc = to_amdgpu_crtc(crtc);
+ 			if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) {
+-				vrefresh = amdgpu_crtc->hw_mode.vrefresh;
++				vrefresh = drm_mode_vrefresh(&amdgpu_crtc->hw_mode);
+ 				break;
+ 			}
+ 		}
+diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
+index 9a7b44616b55..ee9914d0e5c6 100644
+--- a/drivers/gpu/drm/drm_atomic.c
++++ b/drivers/gpu/drm/drm_atomic.c
+@@ -138,7 +138,7 @@ void drm_atomic_state_default_clear(struct drm_atomic_state *state)
+ 	for (i = 0; i < state->num_connector; i++) {
+ 		struct drm_connector *connector = state->connectors[i];
+ 
+-		if (!connector)
++		if (!connector || !connector->funcs)
+ 			continue;
+ 
+ 		/*
+diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
+index 1e103c4c6ee0..f8e07477b273 100644
+--- a/drivers/gpu/drm/drm_fb_helper.c
++++ b/drivers/gpu/drm/drm_fb_helper.c
+@@ -1899,7 +1899,6 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
+ 			  int n, int width, int height)
+ {
+ 	int c, o;
+-	struct drm_device *dev = fb_helper->dev;
+ 	struct drm_connector *connector;
+ 	const struct drm_connector_helper_funcs *connector_funcs;
+ 	struct drm_encoder *encoder;
+@@ -1918,7 +1917,7 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
+ 	if (modes[n] == NULL)
+ 		return best_score;
+ 
+-	crtcs = kzalloc(dev->mode_config.num_connector *
++	crtcs = kzalloc(fb_helper->connector_count *
+ 			sizeof(struct drm_fb_helper_crtc *), GFP_KERNEL);
+ 	if (!crtcs)
+ 		return best_score;
+@@ -1964,7 +1963,7 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
+ 		if (score > best_score) {
+ 			best_score = score;
+ 			memcpy(best_crtcs, crtcs,
+-			       dev->mode_config.num_connector *
++			       fb_helper->connector_count *
+ 			       sizeof(struct drm_fb_helper_crtc *));
+ 		}
+ 	}
+diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c b/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c
+index 6b43ae3ffd73..1616af209bfc 100644
+--- a/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c
++++ b/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c
+@@ -72,7 +72,7 @@ static const char *const dsi_errors[] = {
+ 	"RX Prot Violation",
+ 	"HS Generic Write FIFO Full",
+ 	"LP Generic Write FIFO Full",
+-	"Generic Read Data Avail"
++	"Generic Read Data Avail",
+ 	"Special Packet Sent",
+ 	"Tearing Effect",
+ };
+diff --git a/drivers/gpu/drm/i915/intel_atomic.c b/drivers/gpu/drm/i915/intel_atomic.c
+index d0b1c9afa35e..6314446f58fa 100644
+--- a/drivers/gpu/drm/i915/intel_atomic.c
++++ b/drivers/gpu/drm/i915/intel_atomic.c
+@@ -96,7 +96,8 @@ intel_crtc_duplicate_state(struct drm_crtc *crtc)
+ 	crtc_state->update_pipe = false;
+ 	crtc_state->disable_lp_wm = false;
+ 	crtc_state->disable_cxsr = false;
+-	crtc_state->wm_changed = false;
++	crtc_state->update_wm_pre = false;
++	crtc_state->update_wm_post = false;
+ 
+ 	return &crtc_state->base;
+ }
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index 39b00b9daf2d..207391e89599 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -4816,7 +4816,7 @@ static void intel_post_plane_update(struct intel_crtc *crtc)
+ 
+ 	crtc->wm.cxsr_allowed = true;
+ 
+-	if (pipe_config->wm_changed && pipe_config->base.active)
++	if (pipe_config->update_wm_post && pipe_config->base.active)
+ 		intel_update_watermarks(&crtc->base);
+ 
+ 	if (atomic->update_fbc)
+@@ -4850,7 +4850,7 @@ static void intel_pre_plane_update(struct intel_crtc *crtc)
+ 		intel_set_memory_cxsr(dev_priv, false);
+ 	}
+ 
+-	if (!needs_modeset(&pipe_config->base) && pipe_config->wm_changed)
++	if (!needs_modeset(&pipe_config->base) && pipe_config->update_wm_pre)
+ 		intel_update_watermarks(&crtc->base);
+ }
+ 
+@@ -6229,6 +6229,7 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
+ 
+ 	intel_crtc_load_lut(crtc);
+ 
++	intel_update_watermarks(crtc);
+ 	intel_enable_pipe(intel_crtc);
+ 
+ 	assert_vblank_disabled(crtc);
+@@ -11881,8 +11882,14 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
+ 			 plane->base.id, was_visible, visible,
+ 			 turn_off, turn_on, mode_changed);
+ 
+-	if (turn_on || turn_off) {
+-		pipe_config->wm_changed = true;
++	if (turn_on) {
++		pipe_config->update_wm_pre = true;
++
++		/* must disable cxsr around plane enable/disable */
++		if (plane->type != DRM_PLANE_TYPE_CURSOR)
++			pipe_config->disable_cxsr = true;
++	} else if (turn_off) {
++		pipe_config->update_wm_post = true;
+ 
+ 		/* must disable cxsr around plane enable/disable */
+ 		if (plane->type != DRM_PLANE_TYPE_CURSOR) {
+@@ -11891,7 +11898,9 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
+ 			pipe_config->disable_cxsr = true;
+ 		}
+ 	} else if (intel_wm_need_update(plane, plane_state)) {
+-		pipe_config->wm_changed = true;
++		/* FIXME bollocks */
++		pipe_config->update_wm_pre = true;
++		pipe_config->update_wm_post = true;
+ 	}
+ 
+ 	if (visible || was_visible)
+@@ -12036,7 +12045,7 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc,
+ 	}
+ 
+ 	if (mode_changed && !crtc_state->active)
+-		pipe_config->wm_changed = true;
++		pipe_config->update_wm_post = true;
+ 
+ 	if (mode_changed && crtc_state->enable &&
+ 	    dev_priv->display.crtc_compute_clock &&
+diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
+index 1ae61f488987..c6f045ecb15a 100644
+--- a/drivers/gpu/drm/i915/intel_drv.h
++++ b/drivers/gpu/drm/i915/intel_drv.h
+@@ -367,7 +367,7 @@ struct intel_crtc_state {
+ 
+ 	bool update_pipe; /* can a fast modeset be performed? */
+ 	bool disable_cxsr;
+-	bool wm_changed; /* watermarks are updated */
++	bool update_wm_pre, update_wm_post; /* watermarks are updated */
+ 
+ 	/* Pipe source size (ie. panel fitter input size)
+ 	 * All planes will be positioned inside this space,
+diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
+index bea75cafc623..1281cfce1343 100644
+--- a/drivers/gpu/drm/i915/intel_fbdev.c
++++ b/drivers/gpu/drm/i915/intel_fbdev.c
+@@ -368,12 +368,12 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
+ 	uint64_t conn_configured = 0, mask;
+ 	int pass = 0;
+ 
+-	save_enabled = kcalloc(dev->mode_config.num_connector, sizeof(bool),
++	save_enabled = kcalloc(fb_helper->connector_count, sizeof(bool),
+ 			       GFP_KERNEL);
+ 	if (!save_enabled)
+ 		return false;
+ 
+-	memcpy(save_enabled, enabled, dev->mode_config.num_connector);
++	memcpy(save_enabled, enabled, fb_helper->connector_count);
+ 	mask = (1 << fb_helper->connector_count) - 1;
+ retry:
+ 	for (i = 0; i < fb_helper->connector_count; i++) {
+@@ -507,7 +507,7 @@ retry:
+ 	if (fallback) {
+ bail:
+ 		DRM_DEBUG_KMS("Not using firmware configuration\n");
+-		memcpy(enabled, save_enabled, dev->mode_config.num_connector);
++		memcpy(enabled, save_enabled, fb_helper->connector_count);
+ 		kfree(save_enabled);
+ 		return false;
+ 	}
+diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
+index 0eae3994e5e3..52bb509a1025 100644
+--- a/drivers/gpu/drm/i915/intel_pm.c
++++ b/drivers/gpu/drm/i915/intel_pm.c
+@@ -3812,6 +3812,8 @@ static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
+ 	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ 		hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
+ 
++	memset(active, 0, sizeof(*active));
++
+ 	active->pipe_enabled = intel_crtc->active;
+ 
+ 	if (active->pipe_enabled) {
+diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
+index 9ccff3011523..ab0e4b69ce21 100644
+--- a/drivers/gpu/drm/i915/intel_psr.c
++++ b/drivers/gpu/drm/i915/intel_psr.c
+@@ -275,19 +275,58 @@ static void hsw_psr_enable_source(struct intel_dp *intel_dp)
+ 	 * with the 5 or 6 idle patterns.
+ 	 */
+ 	uint32_t idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
+-	uint32_t val = 0x0;
++	uint32_t val = EDP_PSR_ENABLE;
++
++	val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT;
++	val |= idle_frames << EDP_PSR_IDLE_FRAME_SHIFT;
+ 
+ 	if (IS_HASWELL(dev))
+ 		val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
+ 
+-	I915_WRITE(EDP_PSR_CTL, val |
+-		   max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT |
+-		   idle_frames << EDP_PSR_IDLE_FRAME_SHIFT |
+-		   EDP_PSR_ENABLE);
++	if (dev_priv->vbt.psr.tp1_wakeup_time > 5)
++		val |= EDP_PSR_TP1_TIME_2500us;
++	else if (dev_priv->vbt.psr.tp1_wakeup_time > 1)
++		val |= EDP_PSR_TP1_TIME_500us;
++	else if (dev_priv->vbt.psr.tp1_wakeup_time > 0)
++		val |= EDP_PSR_TP1_TIME_100us;
++	else
++		val |= EDP_PSR_TP1_TIME_0us;
++
++	if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 5)
++		val |= EDP_PSR_TP2_TP3_TIME_2500us;
++	else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 1)
++		val |= EDP_PSR_TP2_TP3_TIME_500us;
++	else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 0)
++		val |= EDP_PSR_TP2_TP3_TIME_100us;
++	else
++		val |= EDP_PSR_TP2_TP3_TIME_0us;
++
++	if (intel_dp_source_supports_hbr2(intel_dp) &&
++	    drm_dp_tps3_supported(intel_dp->dpcd))
++		val |= EDP_PSR_TP1_TP3_SEL;
++	else
++		val |= EDP_PSR_TP1_TP2_SEL;
++
++	I915_WRITE(EDP_PSR_CTL, val);
++
++	if (!dev_priv->psr.psr2_support)
++		return;
++
++	/* FIXME: selective update is probably totally broken because it doesn't
++	 * mesh at all with our frontbuffer tracking. And the hw alone isn't
++	 * good enough. */
++	val = EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE;
++
++	if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 5)
++		val |= EDP_PSR2_TP2_TIME_2500;
++	else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 1)
++		val |= EDP_PSR2_TP2_TIME_500;
++	else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 0)
++		val |= EDP_PSR2_TP2_TIME_100;
++	else
++		val |= EDP_PSR2_TP2_TIME_50;
+ 
+-	if (dev_priv->psr.psr2_support)
+-		I915_WRITE(EDP_PSR2_CTL, EDP_PSR2_ENABLE |
+-				EDP_SU_TRACK_ENABLE | EDP_PSR2_TP2_TIME_100);
++	I915_WRITE(EDP_PSR2_CTL, val);
+ }
+ 
+ static bool intel_psr_match_conditions(struct intel_dp *intel_dp)
+diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c
+index 2f57d7967417..286f539e0038 100644
+--- a/drivers/gpu/drm/imx/imx-drm-core.c
++++ b/drivers/gpu/drm/imx/imx-drm-core.c
+@@ -26,6 +26,7 @@
+ #include <drm/drm_fb_cma_helper.h>
+ #include <drm/drm_plane_helper.h>
+ #include <drm/drm_of.h>
++#include <video/imx-ipu-v3.h>
+ 
+ #include "imx-drm.h"
+ 
+@@ -498,6 +499,13 @@ static int compare_of(struct device *dev, void *data)
+ {
+ 	struct device_node *np = data;
+ 
++	/* Special case for DI, dev->of_node may not be set yet */
++	if (strcmp(dev->driver->name, "imx-ipuv3-crtc") == 0) {
++		struct ipu_client_platformdata *pdata = dev->platform_data;
++
++		return pdata->of_node == np;
++	}
++
+ 	/* Special case for LDB, one device for two channels */
+ 	if (of_node_cmp(np->name, "lvds-channel") == 0) {
+ 		np = of_get_parent(np);
+diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c
+index 287226311413..8fc4f6d11339 100644
+--- a/drivers/gpu/drm/imx/ipuv3-crtc.c
++++ b/drivers/gpu/drm/imx/ipuv3-crtc.c
+@@ -371,7 +371,7 @@ static int ipu_crtc_init(struct ipu_crtc *ipu_crtc,
+ 
+ 	ret = imx_drm_add_crtc(drm, &ipu_crtc->base, &ipu_crtc->imx_crtc,
+ 			&ipu_crtc->plane[0]->base, &ipu_crtc_helper_funcs,
+-			ipu_crtc->dev->of_node);
++			pdata->of_node);
+ 	if (ret) {
+ 		dev_err(ipu_crtc->dev, "adding crtc failed with %d.\n", ret);
+ 		goto err_put_resources;
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+index 5da5de0cb522..4948c1529836 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+@@ -3273,19 +3273,19 @@ static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
+ 		    &vmw_cmd_dx_cid_check, true, false, true),
+ 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_QUERY, &vmw_cmd_dx_define_query,
+ 		    true, false, true),
+-	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_QUERY, &vmw_cmd_ok,
++	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_QUERY, &vmw_cmd_dx_cid_check,
+ 		    true, false, true),
+ 	VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_QUERY, &vmw_cmd_dx_bind_query,
+ 		    true, false, true),
+ 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_QUERY_OFFSET,
+-		    &vmw_cmd_ok, true, false, true),
+-	VMW_CMD_DEF(SVGA_3D_CMD_DX_BEGIN_QUERY, &vmw_cmd_ok,
++		    &vmw_cmd_dx_cid_check, true, false, true),
++	VMW_CMD_DEF(SVGA_3D_CMD_DX_BEGIN_QUERY, &vmw_cmd_dx_cid_check,
+ 		    true, false, true),
+-	VMW_CMD_DEF(SVGA_3D_CMD_DX_END_QUERY, &vmw_cmd_ok,
++	VMW_CMD_DEF(SVGA_3D_CMD_DX_END_QUERY, &vmw_cmd_dx_cid_check,
+ 		    true, false, true),
+ 	VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_QUERY, &vmw_cmd_invalid,
+ 		    true, false, true),
+-	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PREDICATION, &vmw_cmd_invalid,
++	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PREDICATION, &vmw_cmd_dx_cid_check,
+ 		    true, false, true),
+ 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VIEWPORTS, &vmw_cmd_dx_cid_check,
+ 		    true, false, true),
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+index 3b1faf7862a5..679a4cb98ee3 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+@@ -573,9 +573,9 @@ static int vmw_fb_set_par(struct fb_info *info)
+ 		mode = old_mode;
+ 		old_mode = NULL;
+ 	} else if (!vmw_kms_validate_mode_vram(vmw_priv,
+-					       mode->hdisplay *
+-					       (var->bits_per_pixel + 7) / 8,
+-					       mode->vdisplay)) {
++					mode->hdisplay *
++					DIV_ROUND_UP(var->bits_per_pixel, 8),
++					mode->vdisplay)) {
+ 		drm_mode_destroy(vmw_priv->dev, mode);
+ 		return -EINVAL;
+ 	}
+diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c
+index abb98c77bad2..99dcacf05b99 100644
+--- a/drivers/gpu/ipu-v3/ipu-common.c
++++ b/drivers/gpu/ipu-v3/ipu-common.c
+@@ -997,7 +997,7 @@ struct ipu_platform_reg {
+ };
+ 
+ /* These must be in the order of the corresponding device tree port nodes */
+-static const struct ipu_platform_reg client_reg[] = {
++static struct ipu_platform_reg client_reg[] = {
+ 	{
+ 		.pdata = {
+ 			.csi = 0,
+@@ -1048,7 +1048,7 @@ static int ipu_add_client_devices(struct ipu_soc *ipu, unsigned long ipu_base)
+ 	mutex_unlock(&ipu_client_id_mutex);
+ 
+ 	for (i = 0; i < ARRAY_SIZE(client_reg); i++) {
+-		const struct ipu_platform_reg *reg = &client_reg[i];
++		struct ipu_platform_reg *reg = &client_reg[i];
+ 		struct platform_device *pdev;
+ 		struct device_node *of_node;
+ 
+@@ -1070,6 +1070,7 @@ static int ipu_add_client_devices(struct ipu_soc *ipu, unsigned long ipu_base)
+ 
+ 		pdev->dev.parent = dev;
+ 
++		reg->pdata.of_node = of_node;
+ 		ret = platform_device_add_data(pdev, &reg->pdata,
+ 					       sizeof(reg->pdata));
+ 		if (!ret)
+diff --git a/drivers/hwmon/ads7828.c b/drivers/hwmon/ads7828.c
+index 6c99ee7bafa3..ee396ff167d9 100644
+--- a/drivers/hwmon/ads7828.c
++++ b/drivers/hwmon/ads7828.c
+@@ -120,6 +120,7 @@ static int ads7828_probe(struct i2c_client *client,
+ 	unsigned int vref_mv = ADS7828_INT_VREF_MV;
+ 	bool diff_input = false;
+ 	bool ext_vref = false;
++	unsigned int regval;
+ 
+ 	data = devm_kzalloc(dev, sizeof(struct ads7828_data), GFP_KERNEL);
+ 	if (!data)
+@@ -154,6 +155,15 @@ static int ads7828_probe(struct i2c_client *client,
+ 	if (!diff_input)
+ 		data->cmd_byte |= ADS7828_CMD_SD_SE;
+ 
++	/*
++	 * Datasheet specifies internal reference voltage is disabled by
++	 * default. The internal reference voltage needs to be enabled and
++	 * voltage needs to settle before getting valid ADC data. So perform a
++	 * dummy read to enable the internal reference voltage.
++	 */
++	if (!ext_vref)
++		regmap_read(data->regmap, data->cmd_byte, &regval);
++
+ 	hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
+ 							   data,
+ 							   ads7828_groups);
+diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
+index e8a84d12b7ff..03c7ecfc5732 100644
+--- a/drivers/input/joystick/xpad.c
++++ b/drivers/input/joystick/xpad.c
+@@ -455,6 +455,10 @@ static void xpad_process_packet(struct usb_xpad *xpad, u16 cmd, unsigned char *d
+ static void xpad360_process_packet(struct usb_xpad *xpad, struct input_dev *dev,
+ 				   u16 cmd, unsigned char *data)
+ {
++	/* valid pad data */
++	if (data[0] != 0x00)
++		return;
++
+ 	/* digital pad */
+ 	if (xpad->mapping & MAP_DPAD_TO_BUTTONS) {
+ 		/* dpad as buttons (left, right, up, down) */
+@@ -754,6 +758,7 @@ static bool xpad_prepare_next_out_packet(struct usb_xpad *xpad)
+ 	if (packet) {
+ 		memcpy(xpad->odata, packet->data, packet->len);
+ 		xpad->irq_out->transfer_buffer_length = packet->len;
++		packet->pending = false;
+ 		return true;
+ 	}
+ 
+@@ -795,7 +800,6 @@ static void xpad_irq_out(struct urb *urb)
+ 	switch (status) {
+ 	case 0:
+ 		/* success */
+-		xpad->out_packets[xpad->last_out_packet].pending = false;
+ 		xpad->irq_out_active = xpad_prepare_next_out_packet(xpad);
+ 		break;
+ 
+diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c
+index 4eb9e4d94f46..79338f4bdecb 100644
+--- a/drivers/input/misc/uinput.c
++++ b/drivers/input/misc/uinput.c
+@@ -981,9 +981,15 @@ static long uinput_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+ }
+ 
+ #ifdef CONFIG_COMPAT
++
++#define UI_SET_PHYS_COMPAT	_IOW(UINPUT_IOCTL_BASE, 108, compat_uptr_t)
++
+ static long uinput_compat_ioctl(struct file *file,
+ 				unsigned int cmd, unsigned long arg)
+ {
++	if (cmd == UI_SET_PHYS_COMPAT)
++		cmd = UI_SET_PHYS;
++
+ 	return uinput_ioctl_handler(file, cmd, arg, compat_ptr(arg));
+ }
+ #endif
+diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
+index 019644ff627d..bacecbd68a6d 100644
+--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
++++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
+@@ -280,7 +280,8 @@ static int put_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user
+ static int put_v4l2_create32(struct v4l2_create_buffers *kp, struct v4l2_create_buffers32 __user *up)
+ {
+ 	if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_create_buffers32)) ||
+-	    copy_to_user(up, kp, offsetof(struct v4l2_create_buffers32, format)))
++	    copy_to_user(up, kp, offsetof(struct v4l2_create_buffers32, format)) ||
++	    copy_to_user(up->reserved, kp->reserved, sizeof(kp->reserved)))
+ 		return -EFAULT;
+ 	return __put_v4l2_format32(&kp->format, &up->format);
+ }
+diff --git a/drivers/mfd/intel-lpss.c b/drivers/mfd/intel-lpss.c
+index 1bbbe877ba7e..807a3e3ec29a 100644
+--- a/drivers/mfd/intel-lpss.c
++++ b/drivers/mfd/intel-lpss.c
+@@ -34,6 +34,7 @@
+ #define LPSS_DEV_SIZE		0x200
+ #define LPSS_PRIV_OFFSET	0x200
+ #define LPSS_PRIV_SIZE		0x100
++#define LPSS_PRIV_REG_COUNT	(LPSS_PRIV_SIZE / 4)
+ #define LPSS_IDMA64_OFFSET	0x800
+ #define LPSS_IDMA64_SIZE	0x800
+ 
+@@ -76,6 +77,7 @@ struct intel_lpss {
+ 	struct mfd_cell *cell;
+ 	struct device *dev;
+ 	void __iomem *priv;
++	u32 priv_ctx[LPSS_PRIV_REG_COUNT];
+ 	int devid;
+ 	u32 caps;
+ 	u32 active_ltr;
+@@ -493,6 +495,16 @@ EXPORT_SYMBOL_GPL(intel_lpss_prepare);
+ 
+ int intel_lpss_suspend(struct device *dev)
+ {
++	struct intel_lpss *lpss = dev_get_drvdata(dev);
++	unsigned int i;
++
++	/* Save device context */
++	for (i = 0; i < LPSS_PRIV_REG_COUNT; i++)
++		lpss->priv_ctx[i] = readl(lpss->priv + i * 4);
++
++	/* Put the device into reset state */
++	writel(0, lpss->priv + LPSS_PRIV_RESETS);
++
+ 	return 0;
+ }
+ EXPORT_SYMBOL_GPL(intel_lpss_suspend);
+@@ -500,8 +512,13 @@ EXPORT_SYMBOL_GPL(intel_lpss_suspend);
+ int intel_lpss_resume(struct device *dev)
+ {
+ 	struct intel_lpss *lpss = dev_get_drvdata(dev);
++	unsigned int i;
+ 
+-	intel_lpss_init_dev(lpss);
++	intel_lpss_deassert_reset(lpss);
++
++	/* Restore device context */
++	for (i = 0; i < LPSS_PRIV_REG_COUNT; i++)
++		writel(lpss->priv_ctx[i], lpss->priv + i * 4);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/mfd/intel_soc_pmic_core.c b/drivers/mfd/intel_soc_pmic_core.c
+index d9e15cf7c6c8..12d6ebb4ae5d 100644
+--- a/drivers/mfd/intel_soc_pmic_core.c
++++ b/drivers/mfd/intel_soc_pmic_core.c
+@@ -35,6 +35,7 @@ static struct gpiod_lookup_table panel_gpio_table = {
+ 	.table = {
+ 		/* Panel EN/DISABLE */
+ 		GPIO_LOOKUP("gpio_crystalcove", 94, "panel", GPIO_ACTIVE_HIGH),
++		{ },
+ 	},
+ };
+ 
+diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
+index b41eb3f4ee56..8dd0a15dd6e0 100644
+--- a/drivers/net/wireless/ath/ath10k/core.c
++++ b/drivers/net/wireless/ath/ath10k/core.c
+@@ -1687,6 +1687,10 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode)
+ 		goto err_hif_stop;
+ 	}
+ 
++	ar->free_vdev_map = (1LL << ar->max_num_vdevs) - 1;
++
++	INIT_LIST_HEAD(&ar->arvifs);
++
+ 	/* we don't care about HTT in UTF mode */
+ 	if (mode == ATH10K_FIRMWARE_MODE_NORMAL) {
+ 		status = ath10k_htt_setup(&ar->htt);
+@@ -1700,10 +1704,6 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode)
+ 	if (status)
+ 		goto err_hif_stop;
+ 
+-	ar->free_vdev_map = (1LL << ar->max_num_vdevs) - 1;
+-
+-	INIT_LIST_HEAD(&ar->arvifs);
+-
+ 	return 0;
+ 
+ err_hif_stop:
+diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
+index 2bdf5408b0d9..5d621428a8cb 100644
+--- a/drivers/net/wireless/ath/ath10k/debug.c
++++ b/drivers/net/wireless/ath/ath10k/debug.c
+@@ -2011,7 +2011,12 @@ static ssize_t ath10k_write_pktlog_filter(struct file *file,
+ 		goto out;
+ 	}
+ 
+-	if (filter && (filter != ar->debug.pktlog_filter)) {
++	if (filter == ar->debug.pktlog_filter) {
++		ret = count;
++		goto out;
++	}
++
++	if (filter) {
+ 		ret = ath10k_wmi_pdev_pktlog_enable(ar, filter);
+ 		if (ret) {
+ 			ath10k_warn(ar, "failed to enable pktlog filter %x: %d\n",
+diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
+index 6146a293601a..aaae4941a5df 100644
+--- a/drivers/net/wireless/ath/ath10k/mac.c
++++ b/drivers/net/wireless/ath/ath10k/mac.c
+@@ -4474,7 +4474,10 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
+ 		goto err_vdev_delete;
+ 	}
+ 
+-	if (ar->cfg_tx_chainmask) {
++	/* Configuring number of spatial stream for monitor interface is causing
++	 * target assert in qca9888 and qca6174.
++	 */
++	if (ar->cfg_tx_chainmask && (vif->type != NL80211_IFTYPE_MONITOR)) {
+ 		u16 nss = get_nss_from_chainmask(ar->cfg_tx_chainmask);
+ 
+ 		vdev_param = ar->wmi.vdev_param->nss;
+@@ -6434,7 +6437,13 @@ ath10k_mac_update_rx_channel(struct ath10k *ar,
+ 			def = &vifs[0].new_ctx->def;
+ 
+ 		ar->rx_channel = def->chan;
+-	} else if (ctx && ath10k_mac_num_chanctxs(ar) == 0) {
++	} else if ((ctx && ath10k_mac_num_chanctxs(ar) == 0) ||
++		   (ctx && (ar->state == ATH10K_STATE_RESTARTED))) {
++		/* During driver restart due to firmware assert, since mac80211
++		 * already has valid channel context for given radio, channel
++		 * context iteration return num_chanctx > 0. So fix rx_channel
++		 * when restart is in progress.
++		 */
+ 		ar->rx_channel = ctx->def.chan;
+ 	} else {
+ 		ar->rx_channel = NULL;
+diff --git a/drivers/net/wireless/ath/ath5k/led.c b/drivers/net/wireless/ath/ath5k/led.c
+index 803030fd17d3..6a2a16856763 100644
+--- a/drivers/net/wireless/ath/ath5k/led.c
++++ b/drivers/net/wireless/ath/ath5k/led.c
+@@ -77,7 +77,7 @@ static const struct pci_device_id ath5k_led_devices[] = {
+ 	/* HP Compaq CQ60-206US (ddreggors@jumptv.com) */
+ 	{ ATH_SDEVICE(PCI_VENDOR_ID_HP, 0x0137a), ATH_LED(3, 1) },
+ 	/* HP Compaq C700 (nitrousnrg@gmail.com) */
+-	{ ATH_SDEVICE(PCI_VENDOR_ID_HP, 0x0137b), ATH_LED(3, 1) },
++	{ ATH_SDEVICE(PCI_VENDOR_ID_HP, 0x0137b), ATH_LED(3, 0) },
+ 	/* LiteOn AR5BXB63 (magooz@salug.it) */
+ 	{ ATH_SDEVICE(PCI_VENDOR_ID_ATHEROS, 0x3067), ATH_LED(3, 0) },
+ 	/* IBM-specific AR5212 (all others) */
+diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
+index ab7a1ac37849..b66a30e25616 100644
+--- a/drivers/net/wireless/ath/ath9k/init.c
++++ b/drivers/net/wireless/ath/ath9k/init.c
+@@ -49,6 +49,10 @@ int ath9k_led_blink;
+ module_param_named(blink, ath9k_led_blink, int, 0444);
+ MODULE_PARM_DESC(blink, "Enable LED blink on activity");
+ 
++static int ath9k_led_active_high = -1;
++module_param_named(led_active_high, ath9k_led_active_high, int, 0444);
++MODULE_PARM_DESC(led_active_high, "Invert LED polarity");
++
+ static int ath9k_btcoex_enable;
+ module_param_named(btcoex_enable, ath9k_btcoex_enable, int, 0444);
+ MODULE_PARM_DESC(btcoex_enable, "Enable wifi-BT coexistence");
+@@ -600,6 +604,9 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
+ 	if (ret)
+ 		return ret;
+ 
++	if (ath9k_led_active_high != -1)
++		ah->config.led_active_high = ath9k_led_active_high == 1;
++
+ 	/*
+ 	 * Enable WLAN/BT RX Antenna diversity only when:
+ 	 *
+diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
+index e6fef1be9977..7cdaf40c3057 100644
+--- a/drivers/net/wireless/ath/ath9k/pci.c
++++ b/drivers/net/wireless/ath/ath9k/pci.c
+@@ -28,6 +28,16 @@ static const struct pci_device_id ath_pci_id_table[] = {
+ 	{ PCI_VDEVICE(ATHEROS, 0x0024) }, /* PCI-E */
+ 	{ PCI_VDEVICE(ATHEROS, 0x0027) }, /* PCI   */
+ 	{ PCI_VDEVICE(ATHEROS, 0x0029) }, /* PCI   */
++
++#ifdef CONFIG_ATH9K_PCOEM
++	/* Mini PCI AR9220 MB92 cards: Compex WLM200NX, Wistron DNMA-92 */
++	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
++			 0x0029,
++			 PCI_VENDOR_ID_ATHEROS,
++			 0x2096),
++	  .driver_data = ATH9K_PCI_LED_ACT_HI },
++#endif
++
+ 	{ PCI_VDEVICE(ATHEROS, 0x002A) }, /* PCI-E */
+ 
+ #ifdef CONFIG_ATH9K_PCOEM
+diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c
+index 0517a4f2d3f2..7a40d8dffa36 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/base.c
++++ b/drivers/net/wireless/realtek/rtlwifi/base.c
+@@ -1660,9 +1660,9 @@ void rtl_watchdog_wq_callback(void *data)
+ 		if (((rtlpriv->link_info.num_rx_inperiod +
+ 		      rtlpriv->link_info.num_tx_inperiod) > 8) ||
+ 		    (rtlpriv->link_info.num_rx_inperiod > 2))
+-			rtl_lps_enter(hw);
+-		else
+ 			rtl_lps_leave(hw);
++		else
++			rtl_lps_enter(hw);
+ 	}
+ 
+ 	rtlpriv->link_info.num_rx_inperiod = 0;
+diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c
+index c43ab59a690a..77cbd10e807d 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c
++++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c
+@@ -1203,7 +1203,6 @@ static void btc8723b2ant_set_ant_path(struct btc_coexist *btcoexist,
+ 
+ 		/* Force GNT_BT to low */
+ 		btcoexist->btc_write_1byte_bitmask(btcoexist, 0x765, 0x18, 0x0);
+-		btcoexist->btc_write_2byte(btcoexist, 0x948, 0x0);
+ 
+ 		if (board_info->btdm_ant_pos == BTC_ANTENNA_AT_MAIN_PORT) {
+ 			/* tell firmware "no antenna inverse" */
+@@ -1211,19 +1210,25 @@ static void btc8723b2ant_set_ant_path(struct btc_coexist *btcoexist,
+ 			h2c_parameter[1] = 1;  /* ext switch type */
+ 			btcoexist->btc_fill_h2c(btcoexist, 0x65, 2,
+ 						h2c_parameter);
++			btcoexist->btc_write_2byte(btcoexist, 0x948, 0x0);
+ 		} else {
+ 			/* tell firmware "antenna inverse" */
+ 			h2c_parameter[0] = 1;
+ 			h2c_parameter[1] = 1;  /* ext switch type */
+ 			btcoexist->btc_fill_h2c(btcoexist, 0x65, 2,
+ 						h2c_parameter);
++			btcoexist->btc_write_2byte(btcoexist, 0x948, 0x280);
+ 		}
+ 	}
+ 
+ 	/* ext switch setting */
+ 	if (use_ext_switch) {
+ 		/* fixed internal switch S1->WiFi, S0->BT */
+-		btcoexist->btc_write_2byte(btcoexist, 0x948, 0x0);
++		if (board_info->btdm_ant_pos == BTC_ANTENNA_AT_MAIN_PORT)
++			btcoexist->btc_write_2byte(btcoexist, 0x948, 0x0);
++		else
++			btcoexist->btc_write_2byte(btcoexist, 0x948, 0x280);
++
+ 		switch (antpos_type) {
+ 		case BTC_ANT_WIFI_AT_MAIN:
+ 			/* ext switch main at wifi */
+diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c
+index b2791c893417..babd1490f20c 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c
++++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c
+@@ -965,13 +965,38 @@ void exhalbtc_set_chip_type(u8 chip_type)
+ 	}
+ }
+ 
+-void exhalbtc_set_ant_num(u8 type, u8 ant_num)
++void exhalbtc_set_ant_num(struct rtl_priv *rtlpriv, u8 type, u8 ant_num)
+ {
+ 	if (BT_COEX_ANT_TYPE_PG == type) {
+ 		gl_bt_coexist.board_info.pg_ant_num = ant_num;
+ 		gl_bt_coexist.board_info.btdm_ant_num = ant_num;
++		/* The antenna position:
++		 * Main (default) or Aux for pgAntNum=2 && btdmAntNum =1.
++		 * The antenna position should be determined by
++		 * auto-detect mechanism.
++		 * The following is assumed to main,
++		 * and those must be modified
++		 * if y auto-detect mechanism is ready
++		 */
++		if ((gl_bt_coexist.board_info.pg_ant_num == 2) &&
++		    (gl_bt_coexist.board_info.btdm_ant_num == 1))
++			gl_bt_coexist.board_info.btdm_ant_pos =
++						       BTC_ANTENNA_AT_MAIN_PORT;
++		else
++			gl_bt_coexist.board_info.btdm_ant_pos =
++						       BTC_ANTENNA_AT_MAIN_PORT;
+ 	} else if (BT_COEX_ANT_TYPE_ANTDIV == type) {
+ 		gl_bt_coexist.board_info.btdm_ant_num = ant_num;
++		gl_bt_coexist.board_info.btdm_ant_pos =
++						       BTC_ANTENNA_AT_MAIN_PORT;
++	} else if (type == BT_COEX_ANT_TYPE_DETECTED) {
++		gl_bt_coexist.board_info.btdm_ant_num = ant_num;
++		if (rtlpriv->cfg->mod_params->ant_sel == 1)
++			gl_bt_coexist.board_info.btdm_ant_pos =
++				BTC_ANTENNA_AT_AUX_PORT;
++		else
++			gl_bt_coexist.board_info.btdm_ant_pos =
++				BTC_ANTENNA_AT_MAIN_PORT;
+ 	}
+ }
+ 
+diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h
+index 0a903ea179ef..f41ca57dd8a7 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h
++++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h
+@@ -535,7 +535,7 @@ void exhalbtc_set_bt_patch_version(u16 bt_hci_version, u16 bt_patch_version);
+ void exhalbtc_update_min_bt_rssi(char bt_rssi);
+ void exhalbtc_set_bt_exist(bool bt_exist);
+ void exhalbtc_set_chip_type(u8 chip_type);
+-void exhalbtc_set_ant_num(u8 type, u8 ant_num);
++void exhalbtc_set_ant_num(struct rtl_priv *rtlpriv, u8 type, u8 ant_num);
+ void exhalbtc_display_bt_coex_info(struct btc_coexist *btcoexist);
+ void exhalbtc_signal_compensation(struct btc_coexist *btcoexist,
+ 				  u8 *rssi_wifi, u8 *rssi_bt);
+diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c
+index b9b0cb7af8ea..d3fd9211b3a4 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c
++++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c
+@@ -72,7 +72,10 @@ void rtl_btc_init_hal_vars(struct rtl_priv *rtlpriv)
+ 		 __func__, bt_type);
+ 	exhalbtc_set_chip_type(bt_type);
+ 
+-	exhalbtc_set_ant_num(BT_COEX_ANT_TYPE_PG, ant_num);
++	if (rtlpriv->cfg->mod_params->ant_sel == 1)
++		exhalbtc_set_ant_num(rtlpriv, BT_COEX_ANT_TYPE_DETECTED, 1);
++	else
++		exhalbtc_set_ant_num(rtlpriv, BT_COEX_ANT_TYPE_PG, ant_num);
+ }
+ 
+ void rtl_btc_init_hw_config(struct rtl_priv *rtlpriv)
+diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c
+index 7f471bff435c..5b4048041147 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/pci.c
++++ b/drivers/net/wireless/realtek/rtlwifi/pci.c
+@@ -1573,7 +1573,7 @@ int rtl_pci_reset_trx_ring(struct ieee80211_hw *hw)
+ 							 true,
+ 							 HW_DESC_TXBUFF_ADDR),
+ 						 skb->len, PCI_DMA_TODEVICE);
+-				kfree_skb(skb);
++				dev_kfree_skb_irq(skb);
+ 				ring->idx = (ring->idx + 1) % ring->entries;
+ 			}
+ 			ring->idx = 0;
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
+index c983d2fe147f..5a3df9198ddf 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
+@@ -2684,6 +2684,7 @@ void rtl8723be_read_bt_coexist_info_from_hwpg(struct ieee80211_hw *hw,
+ 					      bool auto_load_fail, u8 *hwinfo)
+ {
+ 	struct rtl_priv *rtlpriv = rtl_priv(hw);
++	struct rtl_mod_params *mod_params = rtlpriv->cfg->mod_params;
+ 	u8 value;
+ 	u32 tmpu_32;
+ 
+@@ -2702,6 +2703,10 @@ void rtl8723be_read_bt_coexist_info_from_hwpg(struct ieee80211_hw *hw,
+ 		rtlpriv->btcoexist.btc_info.ant_num = ANT_X2;
+ 	}
+ 
++	/* override ant_num / ant_path */
++	if (mod_params->ant_sel)
++		rtlpriv->btcoexist.btc_info.ant_num =
++			(mod_params->ant_sel == 1 ? ANT_X2 : ANT_X1);
+ }
+ 
+ void rtl8723be_bt_reg_init(struct ieee80211_hw *hw)
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c
+index a78eaeda0008..2101793438ed 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c
+@@ -273,6 +273,7 @@ static struct rtl_mod_params rtl8723be_mod_params = {
+ 	.msi_support = false,
+ 	.disable_watchdog = false,
+ 	.debug = DBG_EMERG,
++	.ant_sel = 0,
+ };
+ 
+ static struct rtl_hal_cfg rtl8723be_hal_cfg = {
+@@ -394,6 +395,7 @@ module_param_named(fwlps, rtl8723be_mod_params.fwctrl_lps, bool, 0444);
+ module_param_named(msi, rtl8723be_mod_params.msi_support, bool, 0444);
+ module_param_named(disable_watchdog, rtl8723be_mod_params.disable_watchdog,
+ 		   bool, 0444);
++module_param_named(ant_sel, rtl8723be_mod_params.ant_sel, int, 0444);
+ MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n");
+ MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n");
+ MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 0)\n");
+@@ -402,6 +404,7 @@ MODULE_PARM_DESC(msi, "Set to 1 to use MSI interrupts mode (default 0)\n");
+ MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)");
+ MODULE_PARM_DESC(disable_watchdog,
+ 		 "Set to 1 to disable the watchdog (default 0)\n");
++MODULE_PARM_DESC(ant_sel, "Set to 1 or 2 to force antenna number (default 0)\n");
+ 
+ static SIMPLE_DEV_PM_OPS(rtlwifi_pm_ops, rtl_pci_suspend, rtl_pci_resume);
+ 
+diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h
+index 84397b190cc0..3c18902d21be 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/wifi.h
++++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h
+@@ -2251,6 +2251,9 @@ struct rtl_mod_params {
+ 
+ 	/* default 0: 1 means do not disable interrupts */
+ 	bool int_clear;
++
++	/* select antenna */
++	int ant_sel;
+ };
+ 
+ struct rtl_hal_usbint_cfg {
+diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
+index 6b0056e9c33e..6e3feecf0301 100644
+--- a/drivers/pci/probe.c
++++ b/drivers/pci/probe.c
+@@ -179,9 +179,6 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
+ 	u16 orig_cmd;
+ 	struct pci_bus_region region, inverted_region;
+ 
+-	if (dev->non_compliant_bars)
+-		return 0;
+-
+ 	mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
+ 
+ 	/* No printks while decoding is disabled! */
+@@ -322,6 +319,9 @@ static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
+ {
+ 	unsigned int pos, reg;
+ 
++	if (dev->non_compliant_bars)
++		return;
++
+ 	for (pos = 0; pos < howmany; pos++) {
+ 		struct resource *res = &dev->resource[pos];
+ 		reg = PCI_BASE_ADDRESS_0 + (pos << 2);
+diff --git a/drivers/pinctrl/samsung/pinctrl-exynos5440.c b/drivers/pinctrl/samsung/pinctrl-exynos5440.c
+index 00ab63abf1d9..dbbdf652c34a 100644
+--- a/drivers/pinctrl/samsung/pinctrl-exynos5440.c
++++ b/drivers/pinctrl/samsung/pinctrl-exynos5440.c
+@@ -107,6 +107,7 @@ struct exynos5440_pmx_func {
+  * @nr_groups: number of pin groups available.
+  * @pmx_functions: list of pin functions parsed from device tree.
+  * @nr_functions: number of pin functions available.
++ * @range: gpio range to register with pinctrl
+  */
+ struct exynos5440_pinctrl_priv_data {
+ 	void __iomem			*reg_base;
+@@ -117,6 +118,7 @@ struct exynos5440_pinctrl_priv_data {
+ 	unsigned int			nr_groups;
+ 	const struct exynos5440_pmx_func	*pmx_functions;
+ 	unsigned int			nr_functions;
++	struct pinctrl_gpio_range	range;
+ };
+ 
+ /**
+@@ -742,7 +744,6 @@ static int exynos5440_pinctrl_register(struct platform_device *pdev,
+ 	struct pinctrl_desc *ctrldesc;
+ 	struct pinctrl_dev *pctl_dev;
+ 	struct pinctrl_pin_desc *pindesc, *pdesc;
+-	struct pinctrl_gpio_range grange;
+ 	char *pin_names;
+ 	int pin, ret;
+ 
+@@ -794,12 +795,12 @@ static int exynos5440_pinctrl_register(struct platform_device *pdev,
+ 		return PTR_ERR(pctl_dev);
+ 	}
+ 
+-	grange.name = "exynos5440-pctrl-gpio-range";
+-	grange.id = 0;
+-	grange.base = 0;
+-	grange.npins = EXYNOS5440_MAX_PINS;
+-	grange.gc = priv->gc;
+-	pinctrl_add_gpio_range(pctl_dev, &grange);
++	priv->range.name = "exynos5440-pctrl-gpio-range";
++	priv->range.id = 0;
++	priv->range.base = 0;
++	priv->range.npins = EXYNOS5440_MAX_PINS;
++	priv->range.gc = priv->gc;
++	pinctrl_add_gpio_range(pctl_dev, &priv->range);
+ 	return 0;
+ }
+ 
+diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
+index d044f3f273be..467773033a20 100644
+--- a/drivers/scsi/aacraid/aacraid.h
++++ b/drivers/scsi/aacraid/aacraid.h
+@@ -29,6 +29,7 @@ enum {
+ #define AAC_INT_MODE_MSI		(1<<1)
+ #define AAC_INT_MODE_AIF		(1<<2)
+ #define AAC_INT_MODE_SYNC		(1<<3)
++#define AAC_INT_MODE_MSIX		(1<<16)
+ 
+ #define AAC_INT_ENABLE_TYPE1_INTX	0xfffffffb
+ #define AAC_INT_ENABLE_TYPE1_MSIX	0xfffffffa
+diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c
+index 0e954e37f0b5..0d351cd3191b 100644
+--- a/drivers/scsi/aacraid/comminit.c
++++ b/drivers/scsi/aacraid/comminit.c
+@@ -37,6 +37,7 @@
+ #include <linux/spinlock.h>
+ #include <linux/slab.h>
+ #include <linux/blkdev.h>
++#include <linux/delay.h>
+ #include <linux/completion.h>
+ #include <linux/mm.h>
+ #include <scsi/scsi_host.h>
+@@ -47,6 +48,20 @@ struct aac_common aac_config = {
+ 	.irq_mod = 1
+ };
+ 
++static inline int aac_is_msix_mode(struct aac_dev *dev)
++{
++	u32 status;
++
++	status = src_readl(dev, MUnit.OMR);
++	return (status & AAC_INT_MODE_MSIX);
++}
++
++static inline void aac_change_to_intx(struct aac_dev *dev)
++{
++	aac_src_access_devreg(dev, AAC_DISABLE_MSIX);
++	aac_src_access_devreg(dev, AAC_ENABLE_INTX);
++}
++
+ static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long commsize, unsigned long commalign)
+ {
+ 	unsigned char *base;
+@@ -425,6 +440,15 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev)
+ 	dev->comm_interface = AAC_COMM_PRODUCER;
+ 	dev->raw_io_interface = dev->raw_io_64 = 0;
+ 
++
++	/*
++	 * Enable INTX mode, if not done already Enabled
++	 */
++	if (aac_is_msix_mode(dev)) {
++		aac_change_to_intx(dev);
++		dev_info(&dev->pdev->dev, "Changed firmware to INTX mode");
++	}
++
+ 	if ((!aac_adapter_sync_cmd(dev, GET_ADAPTER_PROPERTIES,
+ 		0, 0, 0, 0, 0, 0,
+ 		status+0, status+1, status+2, status+3, NULL)) &&
+diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
+index 4cbf54928640..8c758c36fc70 100644
+--- a/drivers/scsi/aacraid/commsup.c
++++ b/drivers/scsi/aacraid/commsup.c
+@@ -611,10 +611,10 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
+ 					}
+ 					return -EFAULT;
+ 				}
+-				/* We used to udelay() here but that absorbed
+-				 * a CPU when a timeout occured. Not very
+-				 * useful. */
+-				cpu_relax();
++				/*
++				 * Allow other processes / CPUS to use core
++				 */
++				schedule();
+ 			}
+ 		} else if (down_interruptible(&fibptr->event_wait)) {
+ 			/* Do nothing ... satisfy
+@@ -1970,6 +1970,10 @@ int aac_command_thread(void *data)
+ 		if (difference <= 0)
+ 			difference = 1;
+ 		set_current_state(TASK_INTERRUPTIBLE);
++
++		if (kthread_should_stop())
++			break;
++
+ 		schedule_timeout(difference);
+ 
+ 		if (kthread_should_stop())
+diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
+index 4fb3581d4614..3fa65338d3f5 100644
+--- a/drivers/scsi/lpfc/lpfc_mem.c
++++ b/drivers/scsi/lpfc/lpfc_mem.c
+@@ -231,13 +231,15 @@ lpfc_mem_free(struct lpfc_hba *phba)
+ 	if (phba->lpfc_hbq_pool)
+ 		pci_pool_destroy(phba->lpfc_hbq_pool);
+ 	phba->lpfc_hbq_pool = NULL;
+-	mempool_destroy(phba->rrq_pool);
++
++	if (phba->rrq_pool)
++		mempool_destroy(phba->rrq_pool);
+ 	phba->rrq_pool = NULL;
+ 
+ 	/* Free NLP memory pool */
+ 	mempool_destroy(phba->nlp_mem_pool);
+ 	phba->nlp_mem_pool = NULL;
+-	if (phba->sli_rev == LPFC_SLI_REV4) {
++	if (phba->sli_rev == LPFC_SLI_REV4 && phba->active_rrq_pool) {
+ 		mempool_destroy(phba->active_rrq_pool);
+ 		phba->active_rrq_pool = NULL;
+ 	}
+diff --git a/drivers/tty/serial/ucc_uart.c b/drivers/tty/serial/ucc_uart.c
+index 1a7dc3c590b1..481eb2989a1e 100644
+--- a/drivers/tty/serial/ucc_uart.c
++++ b/drivers/tty/serial/ucc_uart.c
+@@ -1478,6 +1478,9 @@ static const struct of_device_id ucc_uart_match[] = {
+ 		.type = "serial",
+ 		.compatible = "ucc_uart",
+ 	},
++	{
++		.compatible = "fsl,t1040-ucc-uart",
++	},
+ 	{},
+ };
+ MODULE_DEVICE_TABLE(of, ucc_uart_match);
+diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
+index 44367783f07a..83ec7b89d308 100644
+--- a/drivers/xen/events/events_base.c
++++ b/drivers/xen/events/events_base.c
+@@ -487,7 +487,8 @@ static void eoi_pirq(struct irq_data *data)
+ 	if (!VALID_EVTCHN(evtchn))
+ 		return;
+ 
+-	if (unlikely(irqd_is_setaffinity_pending(data))) {
++	if (unlikely(irqd_is_setaffinity_pending(data)) &&
++	    likely(!irqd_irq_disabled(data))) {
+ 		int masked = test_and_set_mask(evtchn);
+ 
+ 		clear_evtchn(evtchn);
+@@ -1370,7 +1371,8 @@ static void ack_dynirq(struct irq_data *data)
+ 	if (!VALID_EVTCHN(evtchn))
+ 		return;
+ 
+-	if (unlikely(irqd_is_setaffinity_pending(data))) {
++	if (unlikely(irqd_is_setaffinity_pending(data)) &&
++	    likely(!irqd_irq_disabled(data))) {
+ 		int masked = test_and_set_mask(evtchn);
+ 
+ 		clear_evtchn(evtchn);
+diff --git a/fs/affs/super.c b/fs/affs/super.c
+index 2a6713b6b9f4..d6384863192c 100644
+--- a/fs/affs/super.c
++++ b/fs/affs/super.c
+@@ -528,7 +528,7 @@ affs_remount(struct super_block *sb, int *flags, char *data)
+ 	char			*prefix = NULL;
+ 
+ 	new_opts = kstrdup(data, GFP_KERNEL);
+-	if (!new_opts)
++	if (data && !new_opts)
+ 		return -ENOMEM;
+ 
+ 	pr_debug("%s(flags=0x%x,opts=\"%s\")\n", __func__, *flags, data);
+@@ -546,7 +546,8 @@ affs_remount(struct super_block *sb, int *flags, char *data)
+ 	}
+ 
+ 	flush_delayed_work(&sbi->sb_work);
+-	replace_mount_options(sb, new_opts);
++	if (new_opts)
++		replace_mount_options(sb, new_opts);
+ 
+ 	sbi->s_flags = mount_flags;
+ 	sbi->s_mode  = mode;
+diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
+index f6dac40f87ff..6442a31c2119 100644
+--- a/fs/btrfs/backref.c
++++ b/fs/btrfs/backref.c
+@@ -1995,7 +1995,7 @@ struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root,
+ 
+ 	ifp = kmalloc(sizeof(*ifp), GFP_NOFS);
+ 	if (!ifp) {
+-		kfree(fspath);
++		vfree(fspath);
+ 		return ERR_PTR(-ENOMEM);
+ 	}
+ 
+diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
+index 6661ad8b4088..7c9a9c026e21 100644
+--- a/fs/btrfs/ctree.h
++++ b/fs/btrfs/ctree.h
+@@ -4089,6 +4089,7 @@ void btrfs_test_inode_set_ops(struct inode *inode);
+ 
+ /* ioctl.c */
+ long btrfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
++long btrfs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
+ void btrfs_update_iflags(struct inode *inode);
+ void btrfs_inherit_iflags(struct inode *inode, struct inode *dir);
+ int btrfs_is_empty_uuid(u8 *uuid);
+diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
+index e2287c7c10be..95e57320dd75 100644
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -4620,7 +4620,7 @@ static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig,
+ 
+ 	/* Calc the number of the pages we need flush for space reservation */
+ 	items = calc_reclaim_items_nr(root, to_reclaim);
+-	to_reclaim = items * EXTENT_SIZE_PER_ITEM;
++	to_reclaim = (u64)items * EXTENT_SIZE_PER_ITEM;
+ 
+ 	trans = (struct btrfs_trans_handle *)current->journal_info;
+ 	block_rsv = &root->fs_info->delalloc_block_rsv;
+diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
+index 392592dc7010..b5ff48d6a0c5 100644
+--- a/fs/btrfs/extent_io.c
++++ b/fs/btrfs/extent_io.c
+@@ -875,8 +875,14 @@ __set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
+ 	bits |= EXTENT_FIRST_DELALLOC;
+ again:
+ 	if (!prealloc && gfpflags_allow_blocking(mask)) {
++		/*
++		 * Don't care for allocation failure here because we might end
++		 * up not needing the pre-allocated extent state at all, which
++		 * is the case if we only have in the tree extent states that
++		 * cover our input range and don't cover too any other range.
++		 * If we end up needing a new extent state we allocate it later.
++		 */
+ 		prealloc = alloc_extent_state(mask);
+-		BUG_ON(!prealloc);
+ 	}
+ 
+ 	spin_lock(&tree->lock);
+@@ -4385,8 +4391,12 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
+ 	if (ret < 0) {
+ 		btrfs_free_path(path);
+ 		return ret;
++	} else {
++		WARN_ON(!ret);
++		if (ret == 1)
++			ret = 0;
+ 	}
+-	WARN_ON(!ret);
++
+ 	path->slots[0]--;
+ 	btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
+ 	found_type = found_key.type;
+diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
+index 5d956b869e03..187fa7b9bae7 100644
+--- a/fs/btrfs/file.c
++++ b/fs/btrfs/file.c
+@@ -2931,7 +2931,7 @@ const struct file_operations btrfs_file_operations = {
+ 	.fallocate	= btrfs_fallocate,
+ 	.unlocked_ioctl	= btrfs_ioctl,
+ #ifdef CONFIG_COMPAT
+-	.compat_ioctl	= btrfs_ioctl,
++	.compat_ioctl	= btrfs_compat_ioctl,
+ #endif
+ 	.copy_file_range = btrfs_copy_file_range,
+ 	.clone_file_range = btrfs_clone_file_range,
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index f407e487c687..0db33cb4a2ac 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -10088,7 +10088,7 @@ static const struct file_operations btrfs_dir_file_operations = {
+ 	.iterate	= btrfs_real_readdir,
+ 	.unlocked_ioctl	= btrfs_ioctl,
+ #ifdef CONFIG_COMPAT
+-	.compat_ioctl	= btrfs_ioctl,
++	.compat_ioctl	= btrfs_compat_ioctl,
+ #endif
+ 	.release        = btrfs_release_file,
+ 	.fsync		= btrfs_sync_file,
+diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
+index e3791f268489..4b035cc7ed54 100644
+--- a/fs/btrfs/ioctl.c
++++ b/fs/btrfs/ioctl.c
+@@ -4849,8 +4849,8 @@ static long btrfs_ioctl_qgroup_assign(struct file *file, void __user *arg)
+ 	/* update qgroup status and info */
+ 	err = btrfs_run_qgroups(trans, root->fs_info);
+ 	if (err < 0)
+-		btrfs_std_error(root->fs_info, ret,
+-			    "failed to update qgroup status and info\n");
++		btrfs_std_error(root->fs_info, err,
++			    "failed to update qgroup status and info");
+ 	err = btrfs_end_transaction(trans, root);
+ 	if (err && !ret)
+ 		ret = err;
+@@ -5397,9 +5397,15 @@ static int btrfs_ioctl_set_features(struct file *file, void __user *arg)
+ 	if (ret)
+ 		return ret;
+ 
++	ret = mnt_want_write_file(file);
++	if (ret)
++		return ret;
++
+ 	trans = btrfs_start_transaction(root, 0);
+-	if (IS_ERR(trans))
+-		return PTR_ERR(trans);
++	if (IS_ERR(trans)) {
++		ret = PTR_ERR(trans);
++		goto out_drop_write;
++	}
+ 
+ 	spin_lock(&root->fs_info->super_lock);
+ 	newflags = btrfs_super_compat_flags(super_block);
+@@ -5418,7 +5424,11 @@ static int btrfs_ioctl_set_features(struct file *file, void __user *arg)
+ 	btrfs_set_super_incompat_flags(super_block, newflags);
+ 	spin_unlock(&root->fs_info->super_lock);
+ 
+-	return btrfs_commit_transaction(trans, root);
++	ret = btrfs_commit_transaction(trans, root);
++out_drop_write:
++	mnt_drop_write_file(file);
++
++	return ret;
+ }
+ 
+ long btrfs_ioctl(struct file *file, unsigned int
+@@ -5555,3 +5565,24 @@ long btrfs_ioctl(struct file *file, unsigned int
+ 
+ 	return -ENOTTY;
+ }
++
++#ifdef CONFIG_COMPAT
++long btrfs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
++{
++	switch (cmd) {
++	case FS_IOC32_GETFLAGS:
++		cmd = FS_IOC_GETFLAGS;
++		break;
++	case FS_IOC32_SETFLAGS:
++		cmd = FS_IOC_SETFLAGS;
++		break;
++	case FS_IOC32_GETVERSION:
++		cmd = FS_IOC_GETVERSION;
++		break;
++	default:
++		return -ENOIOCTLCMD;
++	}
++
++	return btrfs_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
++}
++#endif
+diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
+index 92bf5ee732fb..2b7986e9df03 100644
+--- a/fs/btrfs/scrub.c
++++ b/fs/btrfs/scrub.c
+@@ -2125,6 +2125,8 @@ static void scrub_missing_raid56_end_io(struct bio *bio)
+ 	if (bio->bi_error)
+ 		sblock->no_io_error_seen = 0;
+ 
++	bio_put(bio);
++
+ 	btrfs_queue_work(fs_info->scrub_workers, &sblock->work);
+ }
+ 
+@@ -2177,7 +2179,7 @@ static void scrub_missing_raid56_pages(struct scrub_block *sblock)
+ 	struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
+ 	u64 length = sblock->page_count * PAGE_SIZE;
+ 	u64 logical = sblock->pagev[0]->logical;
+-	struct btrfs_bio *bbio;
++	struct btrfs_bio *bbio = NULL;
+ 	struct bio *bio;
+ 	struct btrfs_raid_bio *rbio;
+ 	int ret;
+@@ -2858,7 +2860,7 @@ static noinline_for_stack int scrub_raid56_parity(struct scrub_ctx *sctx,
+ 	int extent_mirror_num;
+ 	int stop_loop = 0;
+ 
+-	nsectors = map->stripe_len / root->sectorsize;
++	nsectors = div_u64(map->stripe_len, root->sectorsize);
+ 	bitmap_len = scrub_calc_parity_bitmap_len(nsectors);
+ 	sparity = kzalloc(sizeof(struct scrub_parity) + 2 * bitmap_len,
+ 			  GFP_NOFS);
+@@ -2978,6 +2980,7 @@ again:
+ 						       extent_len);
+ 
+ 			mapped_length = extent_len;
++			bbio = NULL;
+ 			ret = btrfs_map_block(fs_info, READ, extent_logical,
+ 					      &mapped_length, &bbio, 0);
+ 			if (!ret) {
+diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
+index d41e09fe8e38..468e988d8110 100644
+--- a/fs/btrfs/super.c
++++ b/fs/btrfs/super.c
+@@ -2024,6 +2024,7 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf)
+ 	struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
+ 	int ret;
+ 	u64 thresh = 0;
++	int mixed = 0;
+ 
+ 	/*
+ 	 * holding chunk_muext to avoid allocating new chunks, holding
+@@ -2049,8 +2050,17 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf)
+ 				}
+ 			}
+ 		}
+-		if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
+-			total_free_meta += found->disk_total - found->disk_used;
++
++		/*
++		 * Metadata in mixed block goup profiles are accounted in data
++		 */
++		if (!mixed && found->flags & BTRFS_BLOCK_GROUP_METADATA) {
++			if (found->flags & BTRFS_BLOCK_GROUP_DATA)
++				mixed = 1;
++			else
++				total_free_meta += found->disk_total -
++					found->disk_used;
++		}
+ 
+ 		total_used += found->disk_used;
+ 	}
+@@ -2063,7 +2073,11 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf)
+ 
+ 	/* Account global block reserve as used, it's in logical size already */
+ 	spin_lock(&block_rsv->lock);
+-	buf->f_bfree -= block_rsv->size >> bits;
++	/* Mixed block groups accounting is not byte-accurate, avoid overflow */
++	if (buf->f_bfree >= block_rsv->size >> bits)
++		buf->f_bfree -= block_rsv->size >> bits;
++	else
++		buf->f_bfree = 0;
+ 	spin_unlock(&block_rsv->lock);
+ 
+ 	buf->f_bavail = div_u64(total_free_data, factor);
+@@ -2088,7 +2102,7 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf)
+ 	 */
+ 	thresh = 4 * 1024 * 1024;
+ 
+-	if (total_free_meta - thresh < block_rsv->size)
++	if (!mixed && total_free_meta - thresh < block_rsv->size)
+ 		buf->f_bavail = 0;
+ 
+ 	buf->f_type = BTRFS_SUPER_MAGIC;
+diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
+index 539e7b5e3f86..3d14618ce54b 100644
+--- a/fs/btrfs/sysfs.c
++++ b/fs/btrfs/sysfs.c
+@@ -120,6 +120,9 @@ static ssize_t btrfs_feature_attr_store(struct kobject *kobj,
+ 	if (!fs_info)
+ 		return -EPERM;
+ 
++	if (fs_info->sb->s_flags & MS_RDONLY)
++		return -EROFS;
++
+ 	ret = kstrtoul(skip_spaces(buf), 0, &val);
+ 	if (ret)
+ 		return ret;
+@@ -374,6 +377,9 @@ static ssize_t btrfs_label_store(struct kobject *kobj,
+ 	struct btrfs_fs_info *fs_info = to_fs_info(kobj);
+ 	size_t p_len;
+ 
++	if (!fs_info)
++		return -EPERM;
++
+ 	if (fs_info->sb->s_flags & MS_RDONLY)
+ 		return -EROFS;
+ 
+diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
+index 58ae0a2ce65c..a2cd42107945 100644
+--- a/fs/btrfs/tree-log.c
++++ b/fs/btrfs/tree-log.c
+@@ -5157,7 +5157,7 @@ process_leaf:
+ 			}
+ 
+ 			ctx->log_new_dentries = false;
+-			if (type == BTRFS_FT_DIR)
++			if (type == BTRFS_FT_DIR || type == BTRFS_FT_SYMLINK)
+ 				log_mode = LOG_INODE_ALL;
+ 			btrfs_release_path(path);
+ 			ret = btrfs_log_inode(trans, root, di_inode,
+@@ -5277,11 +5277,16 @@ static int btrfs_log_all_parents(struct btrfs_trans_handle *trans,
+ 			if (IS_ERR(dir_inode))
+ 				continue;
+ 
++			if (ctx)
++				ctx->log_new_dentries = false;
+ 			ret = btrfs_log_inode(trans, root, dir_inode,
+ 					      LOG_INODE_ALL, 0, LLONG_MAX, ctx);
+ 			if (!ret &&
+ 			    btrfs_must_commit_transaction(trans, dir_inode))
+ 				ret = 1;
++			if (!ret && ctx && ctx->log_new_dentries)
++				ret = log_new_dir_dentries(trans, root,
++							   dir_inode, ctx);
+ 			iput(dir_inode);
+ 			if (ret)
+ 				goto out;
+@@ -5651,11 +5656,9 @@ void btrfs_record_unlink_dir(struct btrfs_trans_handle *trans,
+ 	 * into the file.  When the file is logged we check it and
+ 	 * don't log the parents if the file is fully on disk.
+ 	 */
+-	if (S_ISREG(inode->i_mode)) {
+-		mutex_lock(&BTRFS_I(inode)->log_mutex);
+-		BTRFS_I(inode)->last_unlink_trans = trans->transid;
+-		mutex_unlock(&BTRFS_I(inode)->log_mutex);
+-	}
++	mutex_lock(&BTRFS_I(inode)->log_mutex);
++	BTRFS_I(inode)->last_unlink_trans = trans->transid;
++	mutex_unlock(&BTRFS_I(inode)->log_mutex);
+ 
+ 	/*
+ 	 * if this directory was already logged any new
+diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
+index 366b335946fa..542c04388ba5 100644
+--- a/fs/btrfs/volumes.c
++++ b/fs/btrfs/volumes.c
+@@ -1971,11 +1971,8 @@ void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_fs_info *fs_info,
+ 	if (srcdev->missing)
+ 		fs_devices->missing_devices--;
+ 
+-	if (srcdev->writeable) {
++	if (srcdev->writeable)
+ 		fs_devices->rw_devices--;
+-		/* zero out the old super if it is writable */
+-		btrfs_scratch_superblocks(srcdev->bdev, srcdev->name->str);
+-	}
+ 
+ 	if (srcdev->bdev)
+ 		fs_devices->open_devices--;
+@@ -1986,6 +1983,10 @@ void btrfs_rm_dev_replace_free_srcdev(struct btrfs_fs_info *fs_info,
+ {
+ 	struct btrfs_fs_devices *fs_devices = srcdev->fs_devices;
+ 
++	if (srcdev->writeable) {
++		/* zero out the old super if it is writable */
++		btrfs_scratch_superblocks(srcdev->bdev, srcdev->name->str);
++	}
+ 	call_rcu(&srcdev->rcu, free_device);
+ 
+ 	/*
+@@ -2023,10 +2024,9 @@ void btrfs_destroy_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
+ 
+ 	btrfs_sysfs_rm_device_link(fs_info->fs_devices, tgtdev);
+ 
+-	if (tgtdev->bdev) {
+-		btrfs_scratch_superblocks(tgtdev->bdev, tgtdev->name->str);
++	if (tgtdev->bdev)
+ 		fs_info->fs_devices->open_devices--;
+-	}
++
+ 	fs_info->fs_devices->num_devices--;
+ 
+ 	next_device = list_entry(fs_info->fs_devices->devices.next,
+@@ -2037,10 +2037,18 @@ void btrfs_destroy_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
+ 		fs_info->fs_devices->latest_bdev = next_device->bdev;
+ 	list_del_rcu(&tgtdev->dev_list);
+ 
+-	call_rcu(&tgtdev->rcu, free_device);
+-
+ 	mutex_unlock(&fs_info->fs_devices->device_list_mutex);
+ 	mutex_unlock(&uuid_mutex);
++
++	/*
++	 * The update_dev_time() with in btrfs_scratch_superblocks()
++	 * may lead to a call to btrfs_show_devname() which will try
++	 * to hold device_list_mutex. And here this device
++	 * is already out of device list, so we don't have to hold
++	 * the device_list_mutex lock.
++	 */
++	btrfs_scratch_superblocks(tgtdev->bdev, tgtdev->name->str);
++	call_rcu(&tgtdev->rcu, free_device);
+ }
+ 
+ static int btrfs_find_device_by_path(struct btrfs_root *root, char *device_path,
+@@ -3401,6 +3409,7 @@ static int __btrfs_balance(struct btrfs_fs_info *fs_info)
+ 	u32 count_meta = 0;
+ 	u32 count_sys = 0;
+ 	int chunk_reserved = 0;
++	u64 bytes_used = 0;
+ 
+ 	/* step one make some room on all the devices */
+ 	devices = &fs_info->fs_devices->devices;
+@@ -3539,7 +3548,13 @@ again:
+ 			goto loop;
+ 		}
+ 
+-		if ((chunk_type & BTRFS_BLOCK_GROUP_DATA) && !chunk_reserved) {
++		ASSERT(fs_info->data_sinfo);
++		spin_lock(&fs_info->data_sinfo->lock);
++		bytes_used = fs_info->data_sinfo->bytes_used;
++		spin_unlock(&fs_info->data_sinfo->lock);
++
++		if ((chunk_type & BTRFS_BLOCK_GROUP_DATA) &&
++		    !chunk_reserved && !bytes_used) {
+ 			trans = btrfs_start_transaction(chunk_root, 0);
+ 			if (IS_ERR(trans)) {
+ 				mutex_unlock(&fs_info->delete_unused_bgs_mutex);
+@@ -3692,10 +3707,8 @@ int btrfs_balance(struct btrfs_balance_control *bctl,
+ 		num_devices--;
+ 	}
+ 	btrfs_dev_replace_unlock(&fs_info->dev_replace);
+-	allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
+-	if (num_devices == 1)
+-		allowed |= BTRFS_BLOCK_GROUP_DUP;
+-	else if (num_devices > 1)
++	allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE | BTRFS_BLOCK_GROUP_DUP;
++	if (num_devices > 1)
+ 		allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1);
+ 	if (num_devices > 2)
+ 		allowed |= BTRFS_BLOCK_GROUP_RAID5;
+@@ -5277,7 +5290,15 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
+ 	stripe_nr = div64_u64(stripe_nr, stripe_len);
+ 
+ 	stripe_offset = stripe_nr * stripe_len;
+-	BUG_ON(offset < stripe_offset);
++	if (offset < stripe_offset) {
++		btrfs_crit(fs_info, "stripe math has gone wrong, "
++			   "stripe_offset=%llu, offset=%llu, start=%llu, "
++			   "logical=%llu, stripe_len=%llu",
++			   stripe_offset, offset, em->start, logical,
++			   stripe_len);
++		free_extent_map(em);
++		return -EINVAL;
++	}
+ 
+ 	/* stripe_offset is the offset of this block in its stripe*/
+ 	stripe_offset = offset - stripe_offset;
+@@ -5516,7 +5537,13 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
+ 				&stripe_index);
+ 		mirror_num = stripe_index + 1;
+ 	}
+-	BUG_ON(stripe_index >= map->num_stripes);
++	if (stripe_index >= map->num_stripes) {
++		btrfs_crit(fs_info, "stripe index math went horribly wrong, "
++			   "got stripe_index=%u, num_stripes=%u",
++			   stripe_index, map->num_stripes);
++		ret = -EINVAL;
++		goto out;
++	}
+ 
+ 	num_alloc_stripes = num_stripes;
+ 	if (dev_replace_is_ongoing) {
+@@ -6237,7 +6264,7 @@ static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
+ 			"invalid chunk length %llu", length);
+ 		return -EIO;
+ 	}
+-	if (!is_power_of_2(stripe_len)) {
++	if (!is_power_of_2(stripe_len) || stripe_len != BTRFS_STRIPE_LEN) {
+ 		btrfs_err(root->fs_info, "invalid chunk stripe length: %llu",
+ 			  stripe_len);
+ 		return -EIO;
+diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h
+index 1939ebde63df..7507be74f7da 100644
+--- a/fs/btrfs/volumes.h
++++ b/fs/btrfs/volumes.h
+@@ -347,7 +347,7 @@ struct map_lookup {
+ 	u64 type;
+ 	int io_align;
+ 	int io_width;
+-	int stripe_len;
++	u64 stripe_len;
+ 	int sector_size;
+ 	int num_stripes;
+ 	int sub_stripes;
+diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
+index acc0ad56bf2f..50c0c540a2cf 100644
+--- a/fs/ext4/ialloc.c
++++ b/fs/ext4/ialloc.c
+@@ -1150,25 +1150,20 @@ struct inode *ext4_orphan_get(struct super_block *sb, unsigned long ino)
+ 	unsigned long max_ino = le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count);
+ 	ext4_group_t block_group;
+ 	int bit;
+-	struct buffer_head *bitmap_bh;
++	struct buffer_head *bitmap_bh = NULL;
+ 	struct inode *inode = NULL;
+-	long err = -EIO;
++	int err = -EFSCORRUPTED;
+ 
+-	/* Error cases - e2fsck has already cleaned up for us */
+-	if (ino > max_ino) {
+-		ext4_warning(sb, "bad orphan ino %lu!  e2fsck was run?", ino);
+-		err = -EFSCORRUPTED;
+-		goto error;
+-	}
++	if (ino < EXT4_FIRST_INO(sb) || ino > max_ino)
++		goto bad_orphan;
+ 
+ 	block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb);
+ 	bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb);
+ 	bitmap_bh = ext4_read_inode_bitmap(sb, block_group);
+ 	if (IS_ERR(bitmap_bh)) {
+-		err = PTR_ERR(bitmap_bh);
+-		ext4_warning(sb, "inode bitmap error %ld for orphan %lu",
+-			     ino, err);
+-		goto error;
++		ext4_error(sb, "inode bitmap error %ld for orphan %lu",
++			   ino, PTR_ERR(bitmap_bh));
++		return (struct inode *) bitmap_bh;
+ 	}
+ 
+ 	/* Having the inode bit set should be a 100% indicator that this
+@@ -1179,15 +1174,21 @@ struct inode *ext4_orphan_get(struct super_block *sb, unsigned long ino)
+ 		goto bad_orphan;
+ 
+ 	inode = ext4_iget(sb, ino);
+-	if (IS_ERR(inode))
+-		goto iget_failed;
++	if (IS_ERR(inode)) {
++		err = PTR_ERR(inode);
++		ext4_error(sb, "couldn't read orphan inode %lu (err %d)",
++			   ino, err);
++		return inode;
++	}
+ 
+ 	/*
+-	 * If the orphans has i_nlinks > 0 then it should be able to be
+-	 * truncated, otherwise it won't be removed from the orphan list
+-	 * during processing and an infinite loop will result.
++	 * If the orphans has i_nlinks > 0 then it should be able to
++	 * be truncated, otherwise it won't be removed from the orphan
++	 * list during processing and an infinite loop will result.
++	 * Similarly, it must not be a bad inode.
+ 	 */
+-	if (inode->i_nlink && !ext4_can_truncate(inode))
++	if ((inode->i_nlink && !ext4_can_truncate(inode)) ||
++	    is_bad_inode(inode))
+ 		goto bad_orphan;
+ 
+ 	if (NEXT_ORPHAN(inode) > max_ino)
+@@ -1195,29 +1196,25 @@ struct inode *ext4_orphan_get(struct super_block *sb, unsigned long ino)
+ 	brelse(bitmap_bh);
+ 	return inode;
+ 
+-iget_failed:
+-	err = PTR_ERR(inode);
+-	inode = NULL;
+ bad_orphan:
+-	ext4_warning(sb, "bad orphan inode %lu!  e2fsck was run?", ino);
+-	printk(KERN_WARNING "ext4_test_bit(bit=%d, block=%llu) = %d\n",
+-	       bit, (unsigned long long)bitmap_bh->b_blocknr,
+-	       ext4_test_bit(bit, bitmap_bh->b_data));
+-	printk(KERN_WARNING "inode=%p\n", inode);
++	ext4_error(sb, "bad orphan inode %lu", ino);
++	if (bitmap_bh)
++		printk(KERN_ERR "ext4_test_bit(bit=%d, block=%llu) = %d\n",
++		       bit, (unsigned long long)bitmap_bh->b_blocknr,
++		       ext4_test_bit(bit, bitmap_bh->b_data));
+ 	if (inode) {
+-		printk(KERN_WARNING "is_bad_inode(inode)=%d\n",
++		printk(KERN_ERR "is_bad_inode(inode)=%d\n",
+ 		       is_bad_inode(inode));
+-		printk(KERN_WARNING "NEXT_ORPHAN(inode)=%u\n",
++		printk(KERN_ERR "NEXT_ORPHAN(inode)=%u\n",
+ 		       NEXT_ORPHAN(inode));
+-		printk(KERN_WARNING "max_ino=%lu\n", max_ino);
+-		printk(KERN_WARNING "i_nlink=%u\n", inode->i_nlink);
++		printk(KERN_ERR "max_ino=%lu\n", max_ino);
++		printk(KERN_ERR "i_nlink=%u\n", inode->i_nlink);
+ 		/* Avoid freeing blocks if we got a bad deleted inode */
+ 		if (inode->i_nlink == 0)
+ 			inode->i_blocks = 0;
+ 		iput(inode);
+ 	}
+ 	brelse(bitmap_bh);
+-error:
+ 	return ERR_PTR(err);
+ }
+ 
+diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
+index eae5917c534e..0acf8cacb2be 100644
+--- a/fs/ext4/ioctl.c
++++ b/fs/ext4/ioctl.c
+@@ -365,7 +365,7 @@ static int ext4_ioctl_setproject(struct file *filp, __u32 projid)
+ 		struct dquot *transfer_to[MAXQUOTAS] = { };
+ 
+ 		transfer_to[PRJQUOTA] = dqget(sb, make_kqid_projid(kprojid));
+-		if (transfer_to[PRJQUOTA]) {
++		if (!IS_ERR(transfer_to[PRJQUOTA])) {
+ 			err = __dquot_transfer(inode, transfer_to);
+ 			dqput(transfer_to[PRJQUOTA]);
+ 			if (err)
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index 4424b7bf8ac6..da07228035c7 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -1259,6 +1259,7 @@ static void ext4_mb_unload_buddy(struct ext4_buddy *e4b)
+ static int mb_find_order_for_block(struct ext4_buddy *e4b, int block)
+ {
+ 	int order = 1;
++	int bb_incr = 1 << (e4b->bd_blkbits - 1);
+ 	void *bb;
+ 
+ 	BUG_ON(e4b->bd_bitmap == e4b->bd_buddy);
+@@ -1271,7 +1272,8 @@ static int mb_find_order_for_block(struct ext4_buddy *e4b, int block)
+ 			/* this block is part of buddy of order 'order' */
+ 			return order;
+ 		}
+-		bb += 1 << (e4b->bd_blkbits - order);
++		bb += bb_incr;
++		bb_incr >>= 1;
+ 		order++;
+ 	}
+ 	return 0;
+@@ -2576,7 +2578,7 @@ int ext4_mb_init(struct super_block *sb)
+ {
+ 	struct ext4_sb_info *sbi = EXT4_SB(sb);
+ 	unsigned i, j;
+-	unsigned offset;
++	unsigned offset, offset_incr;
+ 	unsigned max;
+ 	int ret;
+ 
+@@ -2605,11 +2607,13 @@ int ext4_mb_init(struct super_block *sb)
+ 
+ 	i = 1;
+ 	offset = 0;
++	offset_incr = 1 << (sb->s_blocksize_bits - 1);
+ 	max = sb->s_blocksize << 2;
+ 	do {
+ 		sbi->s_mb_offsets[i] = offset;
+ 		sbi->s_mb_maxs[i] = max;
+-		offset += 1 << (sb->s_blocksize_bits - i);
++		offset += offset_incr;
++		offset_incr = offset_incr >> 1;
+ 		max = max >> 1;
+ 		i++;
+ 	} while (i <= sb->s_blocksize_bits + 1);
+diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
+index 48e4b8907826..fdd151f91522 100644
+--- a/fs/ext4/namei.c
++++ b/fs/ext4/namei.c
+@@ -2828,7 +2828,7 @@ int ext4_orphan_add(handle_t *handle, struct inode *inode)
+ 			 * list entries can cause panics at unmount time.
+ 			 */
+ 			mutex_lock(&sbi->s_orphan_lock);
+-			list_del(&EXT4_I(inode)->i_orphan);
++			list_del_init(&EXT4_I(inode)->i_orphan);
+ 			mutex_unlock(&sbi->s_orphan_lock);
+ 		}
+ 	}
+diff --git a/fs/nfs/pnfs_nfs.c b/fs/nfs/pnfs_nfs.c
+index 81ac6480f9e7..c6dd0c4005cb 100644
+--- a/fs/nfs/pnfs_nfs.c
++++ b/fs/nfs/pnfs_nfs.c
+@@ -246,6 +246,23 @@ void pnfs_fetch_commit_bucket_list(struct list_head *pages,
+ 
+ }
+ 
++/* Helper function for pnfs_generic_commit_pagelist to catch an empty
++ * page list. This can happen when two commits race. */
++static bool
++pnfs_generic_commit_cancel_empty_pagelist(struct list_head *pages,
++					  struct nfs_commit_data *data,
++					  struct nfs_commit_info *cinfo)
++{
++	if (list_empty(pages)) {
++		if (atomic_dec_and_test(&cinfo->mds->rpcs_out))
++			wake_up_atomic_t(&cinfo->mds->rpcs_out);
++		nfs_commitdata_release(data);
++		return true;
++	}
++
++	return false;
++}
++
+ /* This follows nfs_commit_list pretty closely */
+ int
+ pnfs_generic_commit_pagelist(struct inode *inode, struct list_head *mds_pages,
+@@ -280,6 +297,11 @@ pnfs_generic_commit_pagelist(struct inode *inode, struct list_head *mds_pages,
+ 	list_for_each_entry_safe(data, tmp, &list, pages) {
+ 		list_del_init(&data->pages);
+ 		if (data->ds_commit_index < 0) {
++			/* another commit raced with us */
++			if (pnfs_generic_commit_cancel_empty_pagelist(mds_pages,
++				data, cinfo))
++				continue;
++
+ 			nfs_init_commit(data, mds_pages, NULL, cinfo);
+ 			nfs_initiate_commit(NFS_CLIENT(inode), data,
+ 					    NFS_PROTO(data->inode),
+@@ -288,6 +310,12 @@ pnfs_generic_commit_pagelist(struct inode *inode, struct list_head *mds_pages,
+ 			LIST_HEAD(pages);
+ 
+ 			pnfs_fetch_commit_bucket_list(&pages, data, cinfo);
++
++			/* another commit raced with us */
++			if (pnfs_generic_commit_cancel_empty_pagelist(&pages,
++				data, cinfo))
++				continue;
++
+ 			nfs_init_commit(data, &pages, data->lseg, cinfo);
+ 			initiate_commit(data, how);
+ 		}
+diff --git a/fs/nfs/write.c b/fs/nfs/write.c
+index 5754835a2886..505bf3e69677 100644
+--- a/fs/nfs/write.c
++++ b/fs/nfs/write.c
+@@ -1709,6 +1709,10 @@ nfs_commit_list(struct inode *inode, struct list_head *head, int how,
+ {
+ 	struct nfs_commit_data	*data;
+ 
++	/* another commit raced with us */
++	if (list_empty(head))
++		return 0;
++
+ 	data = nfs_commitdata_alloc();
+ 
+ 	if (!data)
+diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c
+index ee3aaa0a5317..ca0d3eb44925 100644
+--- a/fs/xfs/xfs_fsops.c
++++ b/fs/xfs/xfs_fsops.c
+@@ -243,8 +243,8 @@ xfs_growfs_data_private(
+ 		agf->agf_roots[XFS_BTNUM_CNTi] = cpu_to_be32(XFS_CNT_BLOCK(mp));
+ 		agf->agf_levels[XFS_BTNUM_BNOi] = cpu_to_be32(1);
+ 		agf->agf_levels[XFS_BTNUM_CNTi] = cpu_to_be32(1);
+-		agf->agf_flfirst = 0;
+-		agf->agf_fllast = cpu_to_be32(XFS_AGFL_SIZE(mp) - 1);
++		agf->agf_flfirst = cpu_to_be32(1);
++		agf->agf_fllast = 0;
+ 		agf->agf_flcount = 0;
+ 		tmpsize = agsize - XFS_PREALLOC_BLOCKS(mp);
+ 		agf->agf_freeblks = cpu_to_be32(tmpsize);
+diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
+index ceba1a83cacc..c738a5297147 100644
+--- a/fs/xfs/xfs_inode.c
++++ b/fs/xfs/xfs_inode.c
+@@ -3225,13 +3225,14 @@ xfs_iflush_cluster(
+ 		 * We need to check under the i_flags_lock for a valid inode
+ 		 * here. Skip it if it is not valid or the wrong inode.
+ 		 */
+-		spin_lock(&ip->i_flags_lock);
+-		if (!ip->i_ino ||
++		spin_lock(&iq->i_flags_lock);
++		if (!iq->i_ino ||
++		    __xfs_iflags_test(iq, XFS_ISTALE) ||
+ 		    (XFS_INO_TO_AGINO(mp, iq->i_ino) & mask) != first_index) {
+-			spin_unlock(&ip->i_flags_lock);
++			spin_unlock(&iq->i_flags_lock);
+ 			continue;
+ 		}
+-		spin_unlock(&ip->i_flags_lock);
++		spin_unlock(&iq->i_flags_lock);
+ 
+ 		/*
+ 		 * Do an un-protected check to see if the inode is dirty and
+@@ -3347,7 +3348,7 @@ xfs_iflush(
+ 	struct xfs_buf		**bpp)
+ {
+ 	struct xfs_mount	*mp = ip->i_mount;
+-	struct xfs_buf		*bp;
++	struct xfs_buf		*bp = NULL;
+ 	struct xfs_dinode	*dip;
+ 	int			error;
+ 
+@@ -3389,14 +3390,22 @@ xfs_iflush(
+ 	}
+ 
+ 	/*
+-	 * Get the buffer containing the on-disk inode.
++	 * Get the buffer containing the on-disk inode. We are doing a try-lock
++	 * operation here, so we may get  an EAGAIN error. In that case, we
++	 * simply want to return with the inode still dirty.
++	 *
++	 * If we get any other error, we effectively have a corruption situation
++	 * and we cannot flush the inode, so we treat it the same as failing
++	 * xfs_iflush_int().
+ 	 */
+ 	error = xfs_imap_to_bp(mp, NULL, &ip->i_imap, &dip, &bp, XBF_TRYLOCK,
+ 			       0);
+-	if (error || !bp) {
++	if (error == -EAGAIN) {
+ 		xfs_ifunlock(ip);
+ 		return error;
+ 	}
++	if (error)
++		goto corrupt_out;
+ 
+ 	/*
+ 	 * First flush out the inode that xfs_iflush was called with.
+@@ -3424,7 +3433,8 @@ xfs_iflush(
+ 	return 0;
+ 
+ corrupt_out:
+-	xfs_buf_relse(bp);
++	if (bp)
++		xfs_buf_relse(bp);
+ 	xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
+ cluster_corrupt_out:
+ 	error = -EFSCORRUPTED;
+diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
+index 59c9b7bd958d..5cc9ae6107af 100644
+--- a/fs/xfs/xfs_super.c
++++ b/fs/xfs/xfs_super.c
+@@ -1233,6 +1233,16 @@ xfs_fs_remount(
+ 			return -EINVAL;
+ 		}
+ 
++		if (XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5 &&
++		    xfs_sb_has_ro_compat_feature(sbp,
++					XFS_SB_FEAT_RO_COMPAT_UNKNOWN)) {
++			xfs_warn(mp,
++"ro->rw transition prohibited on unknown (0x%x) ro-compat filesystem",
++				(sbp->sb_features_ro_compat &
++					XFS_SB_FEAT_RO_COMPAT_UNKNOWN));
++			return -EINVAL;
++		}
++
+ 		mp->m_flags &= ~XFS_MOUNT_RDONLY;
+ 
+ 		/*
+diff --git a/include/linux/mm.h b/include/linux/mm.h
+index e4e59f9d82f1..5eaac6decb2b 100644
+--- a/include/linux/mm.h
++++ b/include/linux/mm.h
+@@ -1763,7 +1763,7 @@ extern void free_highmem_page(struct page *page);
+ extern void adjust_managed_page_count(struct page *page, long count);
+ extern void mem_init_print_info(const char *str);
+ 
+-extern void reserve_bootmem_region(unsigned long start, unsigned long end);
++extern void reserve_bootmem_region(phys_addr_t start, phys_addr_t end);
+ 
+ /* Free the reserved page into the buddy system, so it gets managed. */
+ static inline void __free_reserved_page(struct page *page)
+diff --git a/include/video/imx-ipu-v3.h b/include/video/imx-ipu-v3.h
+index eeba75395f7d..c8529ce28a3f 100644
+--- a/include/video/imx-ipu-v3.h
++++ b/include/video/imx-ipu-v3.h
+@@ -16,6 +16,7 @@
+ #include <linux/videodev2.h>
+ #include <linux/bitmap.h>
+ #include <linux/fb.h>
++#include <linux/of.h>
+ #include <media/v4l2-mediabus.h>
+ #include <video/videomode.h>
+ 
+@@ -344,6 +345,7 @@ struct ipu_client_platformdata {
+ 	int dc;
+ 	int dp;
+ 	int dma[2];
++	struct device_node *of_node;
+ };
+ 
+ #endif /* __DRM_IPU_H__ */
+diff --git a/kernel/exit.c b/kernel/exit.c
+index 10e088237fed..c112abb736f6 100644
+--- a/kernel/exit.c
++++ b/kernel/exit.c
+@@ -916,17 +916,28 @@ static int eligible_pid(struct wait_opts *wo, struct task_struct *p)
+ 		task_pid_type(p, wo->wo_type) == wo->wo_pid;
+ }
+ 
+-static int eligible_child(struct wait_opts *wo, struct task_struct *p)
++static int
++eligible_child(struct wait_opts *wo, bool ptrace, struct task_struct *p)
+ {
+ 	if (!eligible_pid(wo, p))
+ 		return 0;
+-	/* Wait for all children (clone and not) if __WALL is set;
+-	 * otherwise, wait for clone children *only* if __WCLONE is
+-	 * set; otherwise, wait for non-clone children *only*.  (Note:
+-	 * A "clone" child here is one that reports to its parent
+-	 * using a signal other than SIGCHLD.) */
+-	if (((p->exit_signal != SIGCHLD) ^ !!(wo->wo_flags & __WCLONE))
+-	    && !(wo->wo_flags & __WALL))
++
++	/*
++	 * Wait for all children (clone and not) if __WALL is set or
++	 * if it is traced by us.
++	 */
++	if (ptrace || (wo->wo_flags & __WALL))
++		return 1;
++
++	/*
++	 * Otherwise, wait for clone children *only* if __WCLONE is set;
++	 * otherwise, wait for non-clone children *only*.
++	 *
++	 * Note: a "clone" child here is one that reports to its parent
++	 * using a signal other than SIGCHLD, or a non-leader thread which
++	 * we can only see if it is traced by us.
++	 */
++	if ((p->exit_signal != SIGCHLD) ^ !!(wo->wo_flags & __WCLONE))
+ 		return 0;
+ 
+ 	return 1;
+@@ -1298,7 +1309,7 @@ static int wait_consider_task(struct wait_opts *wo, int ptrace,
+ 	if (unlikely(exit_state == EXIT_DEAD))
+ 		return 0;
+ 
+-	ret = eligible_child(wo, p);
++	ret = eligible_child(wo, ptrace, p);
+ 	if (!ret)
+ 		return ret;
+ 
+diff --git a/lib/dma-debug.c b/lib/dma-debug.c
+index 4a1515f4b452..51a76af25c66 100644
+--- a/lib/dma-debug.c
++++ b/lib/dma-debug.c
+@@ -657,9 +657,9 @@ static struct dma_debug_entry *dma_entry_alloc(void)
+ 	spin_lock_irqsave(&free_entries_lock, flags);
+ 
+ 	if (list_empty(&free_entries)) {
+-		pr_err("DMA-API: debugging out of memory - disabling\n");
+ 		global_disable = true;
+ 		spin_unlock_irqrestore(&free_entries_lock, flags);
++		pr_err("DMA-API: debugging out of memory - disabling\n");
+ 		return NULL;
+ 	}
+ 
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index 629ce645cffd..1d11790639f0 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -971,7 +971,7 @@ static inline void init_reserved_page(unsigned long pfn)
+  * marks the pages PageReserved. The remaining valid pages are later
+  * sent to the buddy page allocator.
+  */
+-void __meminit reserve_bootmem_region(unsigned long start, unsigned long end)
++void __meminit reserve_bootmem_region(phys_addr_t start, phys_addr_t end)
+ {
+ 	unsigned long start_pfn = PFN_DOWN(start);
+ 	unsigned long end_pfn = PFN_UP(end);
+diff --git a/mm/rmap.c b/mm/rmap.c
+index 79f3bf047f38..be84cde3b48d 100644
+--- a/mm/rmap.c
++++ b/mm/rmap.c
+@@ -1112,6 +1112,8 @@ void page_move_anon_rmap(struct page *page,
+ 
+ 	VM_BUG_ON_PAGE(!PageLocked(page), page);
+ 	VM_BUG_ON_VMA(!anon_vma, vma);
++	if (IS_ENABLED(CONFIG_DEBUG_VM) && PageTransHuge(page))
++		address &= HPAGE_PMD_MASK;
+ 	VM_BUG_ON_PAGE(page->index != linear_page_index(vma, address), page);
+ 
+ 	anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
+diff --git a/net/bluetooth/6lowpan.c b/net/bluetooth/6lowpan.c
+index 8a4cc2f7f0db..e45cb9155039 100644
+--- a/net/bluetooth/6lowpan.c
++++ b/net/bluetooth/6lowpan.c
+@@ -431,15 +431,18 @@ static int setup_header(struct sk_buff *skb, struct net_device *netdev,
+ 			bdaddr_t *peer_addr, u8 *peer_addr_type)
+ {
+ 	struct in6_addr ipv6_daddr;
++	struct ipv6hdr *hdr;
+ 	struct lowpan_dev *dev;
+ 	struct lowpan_peer *peer;
+ 	bdaddr_t addr, *any = BDADDR_ANY;
+ 	u8 *daddr = any->b;
+ 	int err, status = 0;
+ 
++	hdr = ipv6_hdr(skb);
++
+ 	dev = lowpan_dev(netdev);
+ 
+-	memcpy(&ipv6_daddr, &lowpan_cb(skb)->addr, sizeof(ipv6_daddr));
++	memcpy(&ipv6_daddr, &hdr->daddr, sizeof(ipv6_daddr));
+ 
+ 	if (ipv6_addr_is_multicast(&ipv6_daddr)) {
+ 		lowpan_cb(skb)->chan = NULL;
+@@ -489,15 +492,9 @@ static int header_create(struct sk_buff *skb, struct net_device *netdev,
+ 			 unsigned short type, const void *_daddr,
+ 			 const void *_saddr, unsigned int len)
+ {
+-	struct ipv6hdr *hdr;
+-
+ 	if (type != ETH_P_IPV6)
+ 		return -EINVAL;
+ 
+-	hdr = ipv6_hdr(skb);
+-
+-	memcpy(&lowpan_cb(skb)->addr, &hdr->daddr, sizeof(struct in6_addr));
+-
+ 	return 0;
+ }
+ 
+diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
+index 1095be9c80ab..4605dc73def6 100644
+--- a/net/sunrpc/auth_gss/svcauth_gss.c
++++ b/net/sunrpc/auth_gss/svcauth_gss.c
+@@ -857,8 +857,8 @@ unwrap_integ_data(struct svc_rqst *rqstp, struct xdr_buf *buf, u32 seq, struct g
+ 		goto out;
+ 	if (svc_getnl(&buf->head[0]) != seq)
+ 		goto out;
+-	/* trim off the mic at the end before returning */
+-	xdr_buf_trim(buf, mic.len + 4);
++	/* trim off the mic and padding at the end before returning */
++	xdr_buf_trim(buf, round_up_to_quad(mic.len) + 4);
+ 	stat = 0;
+ out:
+ 	kfree(mic.data);
+diff --git a/scripts/package/Makefile b/scripts/package/Makefile
+index c2c7389bfbab..71b4a8af9d4d 100644
+--- a/scripts/package/Makefile
++++ b/scripts/package/Makefile
+@@ -52,7 +52,7 @@ rpm-pkg rpm: FORCE
+ 	$(call cmd,src_tar,$(KERNELPATH),kernel.spec)
+ 	$(CONFIG_SHELL) $(srctree)/scripts/mkversion > $(objtree)/.tmp_version
+ 	mv -f $(objtree)/.tmp_version $(objtree)/.version
+-	rpmbuild --target $(UTS_MACHINE) -ta $(KERNELPATH).tar.gz
++	rpmbuild $(RPMOPTS) --target $(UTS_MACHINE) -ta $(KERNELPATH).tar.gz
+ 	rm $(KERNELPATH).tar.gz kernel.spec
+ 
+ # binrpm-pkg
+@@ -63,7 +63,7 @@ binrpm-pkg: FORCE
+ 	$(CONFIG_SHELL) $(srctree)/scripts/mkversion > $(objtree)/.tmp_version
+ 	mv -f $(objtree)/.tmp_version $(objtree)/.version
+ 
+-	rpmbuild --define "_builddir $(objtree)" --target \
++	rpmbuild $(RPMOPTS) --define "_builddir $(objtree)" --target \
+ 		$(UTS_MACHINE) -bb $(objtree)/binkernel.spec
+ 	rm binkernel.spec
+ 
+diff --git a/sound/soc/codecs/ak4642.c b/sound/soc/codecs/ak4642.c
+index cda27c22812a..eb8fe212e163 100644
+--- a/sound/soc/codecs/ak4642.c
++++ b/sound/soc/codecs/ak4642.c
+@@ -560,6 +560,7 @@ static const struct regmap_config ak4642_regmap = {
+ 	.max_register		= FIL1_3,
+ 	.reg_defaults		= ak4642_reg,
+ 	.num_reg_defaults	= NUM_AK4642_REG_DEFAULTS,
++	.cache_type		= REGCACHE_RBTREE,
+ };
+ 
+ static const struct regmap_config ak4643_regmap = {
+@@ -568,6 +569,7 @@ static const struct regmap_config ak4643_regmap = {
+ 	.max_register		= SPK_MS,
+ 	.reg_defaults		= ak4643_reg,
+ 	.num_reg_defaults	= ARRAY_SIZE(ak4643_reg),
++	.cache_type		= REGCACHE_RBTREE,
+ };
+ 
+ static const struct regmap_config ak4648_regmap = {
+@@ -576,6 +578,7 @@ static const struct regmap_config ak4648_regmap = {
+ 	.max_register		= EQ_FBEQE,
+ 	.reg_defaults		= ak4648_reg,
+ 	.num_reg_defaults	= ARRAY_SIZE(ak4648_reg),
++	.cache_type		= REGCACHE_RBTREE,
+ };
+ 
+ static const struct ak4642_drvdata ak4642_drvdata = {


^ permalink raw reply related	[flat|nested] 12+ messages in thread

end of thread, other threads:[~2016-06-08 10:11 UTC | newest]

Thread overview: 12+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2016-03-22 22:46 [gentoo-commits] proj/linux-patches:4.5 commit in: / Mike Pagano
  -- strict thread matches above, loose matches on Subject: below --
2016-06-08 10:11 Mike Pagano
2016-06-02 19:12 Mike Pagano
2016-06-02 19:08 Mike Pagano
2016-05-19 13:02 Mike Pagano
2016-05-12  0:16 Mike Pagano
2016-05-04 23:56 Mike Pagano
2016-04-20 11:28 Mike Pagano
2016-04-12 18:53 Mike Pagano
2016-03-29  0:45 Anthony G. Basile
2016-03-27 19:35 Anthony G. Basile
2016-03-09 17:05 Mike Pagano

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox