public inbox for gentoo-commits@lists.gentoo.org
 help / color / mirror / Atom feed
* [gentoo-commits] proj/linux-patches:3.19 commit in: /
@ 2014-12-23 17:32 Mike Pagano
  0 siblings, 0 replies; 19+ messages in thread
From: Mike Pagano @ 2014-12-23 17:32 UTC (permalink / raw
  To: gentoo-commits

commit:     514ae3e4b0d105f8eee62133c0fedded9002638d
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Tue Dec 23 17:32:18 2014 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Tue Dec 23 17:32:18 2014 +0000
URL:        http://sources.gentoo.org/gitweb/?p=proj/linux-patches.git;a=commit;h=514ae3e4

ACPI: Disable Windows 8 compatibility for some Lenovo ThinkPads. Patch to ensure that /dev/root doesn't appear in /proc/mounts when bootint without an initramfs. Patch to not lock when UMH is waiting on current thread spawned by linuxrc. (bug #481344). Bootsplash ported by Uladzimir Bely (bug #513334). Kernel patch enables gcc optimizations for additional CPUs.

---
 0000_README                                        |   20 +
 2700_ThinkPad-30-brightness-control-fix.patch      |   67 +
 2900_dev-root-proc-mount-fix.patch                 |   30 +
 2905_2disk-resume-image-fix.patch                  |   24 +
 4200_fbcondecor-3.16.patch                         | 2119 ++++++++++++++++++++
 ...able-additional-cpu-optimizations-for-gcc.patch |  327 +++
 6 files changed, 2587 insertions(+)

diff --git a/0000_README b/0000_README
index 36c2b96..7a25c41 100644
--- a/0000_README
+++ b/0000_README
@@ -47,6 +47,26 @@ Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.
 
+Patch:  2700_ThinkPad-30-brightness-control-fix.patch
+From:   Seth Forshee <seth.forshee@canonical.com>
+Desc:   ACPI: Disable Windows 8 compatibility for some Lenovo ThinkPads.
+
+Patch:  2900_dev-root-proc-mount-fix.patch
+From:   https://bugs.gentoo.org/show_bug.cgi?id=438380
+Desc:   Ensure that /dev/root doesn't appear in /proc/mounts when bootint without an initramfs.
+
+Patch:  2905_s2disk-resume-image-fix.patch
+From:   Al Viro <viro <at> ZenIV.linux.org.uk>
+Desc:   Do not lock when UMH is waiting on current thread spawned by linuxrc. (bug #481344)
+
+Patch:  4200_fbcondecor-3.16.patch
+From:   http://www.mepiscommunity.org/fbcondecor
+Desc:   Bootsplash ported by Uladzimir Bely (bug #513334)
+
 Patch:  4567_distro-Gentoo-Kconfig.patch
 From:   Tom Wijsman <TomWij@gentoo.org>
 Desc:   Add Gentoo Linux support config settings and defaults.
+
+Patch:  5000_enable-additional-cpu-optimizations-for-gcc.patch
+From:   https://github.com/graysky2/kernel_gcc_patch/
+Desc:   Kernel patch enables gcc optimizations for additional CPUs.

diff --git a/2700_ThinkPad-30-brightness-control-fix.patch b/2700_ThinkPad-30-brightness-control-fix.patch
new file mode 100644
index 0000000..b548c6d
--- /dev/null
+++ b/2700_ThinkPad-30-brightness-control-fix.patch
@@ -0,0 +1,67 @@
+diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
+index cb96296..6c242ed 100644
+--- a/drivers/acpi/blacklist.c
++++ b/drivers/acpi/blacklist.c
+@@ -269,6 +276,61 @@  static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
+ 	},
+ 
+ 	/*
++	 * The following Lenovo models have a broken workaround in the
++	 * acpi_video backlight implementation to meet the Windows 8
++	 * requirement of 101 backlight levels. Reverting to pre-Win8
++	 * behavior fixes the problem.
++	 */
++	{
++	.callback = dmi_disable_osi_win8,
++	.ident = "Lenovo ThinkPad L430",
++	.matches = {
++		     DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++		     DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L430"),
++		},
++	},
++	{
++	.callback = dmi_disable_osi_win8,
++	.ident = "Lenovo ThinkPad T430s",
++	.matches = {
++		     DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++		     DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T430s"),
++		},
++	},
++	{
++	.callback = dmi_disable_osi_win8,
++	.ident = "Lenovo ThinkPad T530",
++	.matches = {
++		     DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++		     DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T530"),
++		},
++	},
++	{
++	.callback = dmi_disable_osi_win8,
++	.ident = "Lenovo ThinkPad W530",
++	.matches = {
++		     DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++		     DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad W530"),
++		},
++	},
++	{
++	.callback = dmi_disable_osi_win8,
++	.ident = "Lenovo ThinkPad X1 Carbon",
++	.matches = {
++		     DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++		     DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X1 Carbon"),
++		},
++	},
++	{
++	.callback = dmi_disable_osi_win8,
++	.ident = "Lenovo ThinkPad X230",
++	.matches = {
++		     DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++		     DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X230"),
++		},
++	},
++
++	/*
+ 	 * BIOS invocation of _OSI(Linux) is almost always a BIOS bug.
+ 	 * Linux ignores it, except for the machines enumerated below.
+ 	 */
+

diff --git a/2900_dev-root-proc-mount-fix.patch b/2900_dev-root-proc-mount-fix.patch
new file mode 100644
index 0000000..6ea86e2
--- /dev/null
+++ b/2900_dev-root-proc-mount-fix.patch
@@ -0,0 +1,30 @@
+--- a/init/do_mounts.c	2014-08-26 08:03:30.000013100 -0400
++++ b/init/do_mounts.c	2014-08-26 08:11:19.720014712 -0400
+@@ -484,7 +484,10 @@ void __init change_floppy(char *fmt, ...
+ 	va_start(args, fmt);
+ 	vsprintf(buf, fmt, args);
+ 	va_end(args);
+-	fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
++	if (saved_root_name[0])
++		fd = sys_open(saved_root_name, O_RDWR | O_NDELAY, 0);
++	else
++		fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
+ 	if (fd >= 0) {
+ 		sys_ioctl(fd, FDEJECT, 0);
+ 		sys_close(fd);
+@@ -527,8 +530,13 @@ void __init mount_root(void)
+ 	}
+ #endif
+ #ifdef CONFIG_BLOCK
+-	create_dev("/dev/root", ROOT_DEV);
+-	mount_block_root("/dev/root", root_mountflags);
++	if (saved_root_name[0]) {
++		create_dev(saved_root_name, ROOT_DEV);
++		mount_block_root(saved_root_name, root_mountflags);
++	} else {
++		create_dev("/dev/root", ROOT_DEV);
++		mount_block_root("/dev/root", root_mountflags);
++	}
+ #endif
+ }
+ 

diff --git a/2905_2disk-resume-image-fix.patch b/2905_2disk-resume-image-fix.patch
new file mode 100644
index 0000000..7e95d29
--- /dev/null
+++ b/2905_2disk-resume-image-fix.patch
@@ -0,0 +1,24 @@
+diff --git a/kernel/kmod.c b/kernel/kmod.c
+index fb32636..d968882 100644
+--- a/kernel/kmod.c
++++ b/kernel/kmod.c
+@@ -575,7 +575,8 @@
+ 		call_usermodehelper_freeinfo(sub_info);
+ 		return -EINVAL;
+ 	}
+-	helper_lock();
++	if (!(current->flags & PF_FREEZER_SKIP))
++		helper_lock();
+ 	if (!khelper_wq || usermodehelper_disabled) {
+ 		retval = -EBUSY;
+ 		goto out;
+@@ -611,7 +612,8 @@ wait_done:
+ out:
+ 	call_usermodehelper_freeinfo(sub_info);
+ unlock:
+-	helper_unlock();
++	if (!(current->flags & PF_FREEZER_SKIP))
++		helper_unlock();
+ 	return retval;
+ }
+ EXPORT_SYMBOL(call_usermodehelper_exec);

diff --git a/4200_fbcondecor-3.16.patch b/4200_fbcondecor-3.16.patch
new file mode 100644
index 0000000..c96e5dc
--- /dev/null
+++ b/4200_fbcondecor-3.16.patch
@@ -0,0 +1,2119 @@
+diff --git a/Documentation/fb/00-INDEX b/Documentation/fb/00-INDEX
+index fe85e7c..2230930 100644
+--- a/Documentation/fb/00-INDEX
++++ b/Documentation/fb/00-INDEX
+@@ -23,6 +23,8 @@ ep93xx-fb.txt
+ 	- info on the driver for EP93xx LCD controller.
+ fbcon.txt
+ 	- intro to and usage guide for the framebuffer console (fbcon).
++fbcondecor.txt
++	- info on the Framebuffer Console Decoration
+ framebuffer.txt
+ 	- introduction to frame buffer devices.
+ gxfb.txt
+diff --git a/Documentation/fb/fbcondecor.txt b/Documentation/fb/fbcondecor.txt
+new file mode 100644
+index 0000000..3388c61
+--- /dev/null
++++ b/Documentation/fb/fbcondecor.txt
+@@ -0,0 +1,207 @@
++What is it?
++-----------
++
++The framebuffer decorations are a kernel feature which allows displaying a 
++background picture on selected consoles.
++
++What do I need to get it to work?
++---------------------------------
++
++To get fbcondecor up-and-running you will have to:
++ 1) get a copy of splashutils [1] or a similar program
++ 2) get some fbcondecor themes
++ 3) build the kernel helper program
++ 4) build your kernel with the FB_CON_DECOR option enabled.
++
++To get fbcondecor operational right after fbcon initialization is finished, you
++will have to include a theme and the kernel helper into your initramfs image.
++Please refer to splashutils documentation for instructions on how to do that.
++
++[1] The splashutils package can be downloaded from:
++    http://github.com/alanhaggai/fbsplash
++
++The userspace helper
++--------------------
++
++The userspace fbcondecor helper (by default: /sbin/fbcondecor_helper) is called by the
++kernel whenever an important event occurs and the kernel needs some kind of
++job to be carried out. Important events include console switches and video
++mode switches (the kernel requests background images and configuration
++parameters for the current console). The fbcondecor helper must be accessible at
++all times. If it's not, fbcondecor will be switched off automatically.
++
++It's possible to set path to the fbcondecor helper by writing it to
++/proc/sys/kernel/fbcondecor.
++
++*****************************************************************************
++
++The information below is mostly technical stuff. There's probably no need to
++read it unless you plan to develop a userspace helper.
++
++The fbcondecor protocol
++-----------------------
++
++The fbcondecor protocol defines a communication interface between the kernel and
++the userspace fbcondecor helper.
++
++The kernel side is responsible for:
++
++ * rendering console text, using an image as a background (instead of a
++   standard solid color fbcon uses),
++ * accepting commands from the user via ioctls on the fbcondecor device,
++ * calling the userspace helper to set things up as soon as the fb subsystem 
++   is initialized.
++
++The userspace helper is responsible for everything else, including parsing
++configuration files, decompressing the image files whenever the kernel needs
++it, and communicating with the kernel if necessary.
++
++The fbcondecor protocol specifies how communication is done in both ways:
++kernel->userspace and userspace->helper.
++  
++Kernel -> Userspace
++-------------------
++
++The kernel communicates with the userspace helper by calling it and specifying
++the task to be done in a series of arguments.
++
++The arguments follow the pattern:
++<fbcondecor protocol version> <command> <parameters>
++
++All commands defined in fbcondecor protocol v2 have the following parameters:
++ virtual console
++ framebuffer number
++ theme
++
++Fbcondecor protocol v1 specified an additional 'fbcondecor mode' after the
++framebuffer number. Fbcondecor protocol v1 is deprecated and should not be used.
++
++Fbcondecor protocol v2 specifies the following commands:
++
++getpic
++------
++ The kernel issues this command to request image data. It's up to the 
++ userspace  helper to find a background image appropriate for the specified 
++ theme and the current resolution. The userspace helper should respond by 
++ issuing the FBIOCONDECOR_SETPIC ioctl.
++
++init
++----
++ The kernel issues this command after the fbcondecor device is created and
++ the fbcondecor interface is initialized. Upon receiving 'init', the userspace
++ helper should parse the kernel command line (/proc/cmdline) or otherwise
++ decide whether fbcondecor is to be activated.
++
++ To activate fbcondecor on the first console the helper should issue the
++ FBIOCONDECOR_SETCFG, FBIOCONDECOR_SETPIC and FBIOCONDECOR_SETSTATE commands,
++ in the above-mentioned order.
++
++ When the userspace helper is called in an early phase of the boot process
++ (right after the initialization of fbcon), no filesystems will be mounted.
++ The helper program should mount sysfs and then create the appropriate
++ framebuffer, fbcondecor and tty0 devices (if they don't already exist) to get
++ current display settings and to be able to communicate with the kernel side.
++ It should probably also mount the procfs to be able to parse the kernel
++ command line parameters.
++
++ Note that the console sem is not held when the kernel calls fbcondecor_helper
++ with the 'init' command. The fbcondecor helper should perform all ioctls with
++ origin set to FBCON_DECOR_IO_ORIG_USER.
++
++modechange
++----------
++ The kernel issues this command on a mode change. The helper's response should
++ be similar to the response to the 'init' command. Note that this time the
++ console sem is held and all ioctls must be performed with origin set to
++ FBCON_DECOR_IO_ORIG_KERNEL.
++
++
++Userspace -> Kernel
++-------------------
++
++Userspace programs can communicate with fbcondecor via ioctls on the
++fbcondecor device. These ioctls are to be used by both the userspace helper
++(called only by the kernel) and userspace configuration tools (run by the users).
++
++The fbcondecor helper should set the origin field to FBCON_DECOR_IO_ORIG_KERNEL
++when doing the appropriate ioctls. All userspace configuration tools should
++use FBCON_DECOR_IO_ORIG_USER. Failure to set the appropriate value in the origin
++field when performing ioctls from the kernel helper will most likely result
++in a console deadlock.
++
++FBCON_DECOR_IO_ORIG_KERNEL instructs fbcondecor not to try to acquire the console
++semaphore. Not surprisingly, FBCON_DECOR_IO_ORIG_USER instructs it to acquire
++the console sem.
++
++The framebuffer console decoration provides the following ioctls (all defined in 
++linux/fb.h):
++
++FBIOCONDECOR_SETPIC
++description: loads a background picture for a virtual console
++argument: struct fbcon_decor_iowrapper*; data: struct fb_image*
++notes: 
++If called for consoles other than the current foreground one, the picture data
++will be ignored.
++
++If the current virtual console is running in a 8-bpp mode, the cmap substruct
++of fb_image has to be filled appropriately: start should be set to 16 (first
++16 colors are reserved for fbcon), len to a value <= 240 and red, green and
++blue should point to valid cmap data. The transp field is ingored. The fields
++dx, dy, bg_color, fg_color in fb_image are ignored as well.
++
++FBIOCONDECOR_SETCFG
++description: sets the fbcondecor config for a virtual console
++argument: struct fbcon_decor_iowrapper*; data: struct vc_decor*
++notes: The structure has to be filled with valid data.
++
++FBIOCONDECOR_GETCFG
++description: gets the fbcondecor config for a virtual console
++argument: struct fbcon_decor_iowrapper*; data: struct vc_decor*
++
++FBIOCONDECOR_SETSTATE
++description: sets the fbcondecor state for a virtual console
++argument: struct fbcon_decor_iowrapper*; data: unsigned int*
++          values: 0 = disabled, 1 = enabled.
++
++FBIOCONDECOR_GETSTATE
++description: gets the fbcondecor state for a virtual console
++argument: struct fbcon_decor_iowrapper*; data: unsigned int*
++          values: as in FBIOCONDECOR_SETSTATE
++
++Info on used structures:
++
++Definition of struct vc_decor can be found in linux/console_decor.h. It's
++heavily commented. Note that the 'theme' field should point to a string
++no longer than FBCON_DECOR_THEME_LEN. When FBIOCONDECOR_GETCFG call is
++performed, the theme field should point to a char buffer of length
++FBCON_DECOR_THEME_LEN.
++
++Definition of struct fbcon_decor_iowrapper can be found in linux/fb.h.
++The fields in this struct have the following meaning:
++
++vc: 
++Virtual console number.
++
++origin: 
++Specifies if the ioctl is performed as a response to a kernel request. The
++fbcondecor helper should set this field to FBCON_DECOR_IO_ORIG_KERNEL, userspace
++programs should set it to FBCON_DECOR_IO_ORIG_USER. This field is necessary to
++avoid console semaphore deadlocks.
++
++data: 
++Pointer to a data structure appropriate for the performed ioctl. Type of
++the data struct is specified in the ioctls description.
++
++*****************************************************************************
++
++Credit
++------
++
++Original 'bootsplash' project & implementation by:
++  Volker Poplawski <volker@poplawski.de>, Stefan Reinauer <stepan@suse.de>,
++  Steffen Winterfeldt <snwint@suse.de>, Michael Schroeder <mls@suse.de>,
++  Ken Wimer <wimer@suse.de>.
++
++Fbcondecor, fbcondecor protocol design, current implementation & docs by:
++  Michal Januszewski <michalj+fbcondecor@gmail.com>
++
+diff --git a/drivers/Makefile b/drivers/Makefile
+index 7183b6a..d576148 100644
+--- a/drivers/Makefile
++++ b/drivers/Makefile
+@@ -17,6 +17,10 @@ obj-y				+= pwm/
+ obj-$(CONFIG_PCI)		+= pci/
+ obj-$(CONFIG_PARISC)		+= parisc/
+ obj-$(CONFIG_RAPIDIO)		+= rapidio/
++# tty/ comes before char/ so that the VT console is the boot-time
++# default.
++obj-y				+= tty/
++obj-y				+= char/
+ obj-y				+= video/
+ obj-y				+= idle/
+ 
+@@ -42,11 +46,6 @@ obj-$(CONFIG_REGULATOR)		+= regulator/
+ # reset controllers early, since gpu drivers might rely on them to initialize
+ obj-$(CONFIG_RESET_CONTROLLER)	+= reset/
+ 
+-# tty/ comes before char/ so that the VT console is the boot-time
+-# default.
+-obj-y				+= tty/
+-obj-y				+= char/
+-
+ # gpu/ comes after char for AGP vs DRM startup
+ obj-y				+= gpu/
+ 
+diff --git a/drivers/video/console/Kconfig b/drivers/video/console/Kconfig
+index fe1cd01..6d2e87a 100644
+--- a/drivers/video/console/Kconfig
++++ b/drivers/video/console/Kconfig
+@@ -126,6 +126,19 @@ config FRAMEBUFFER_CONSOLE_ROTATION
+          such that other users of the framebuffer will remain normally
+          oriented.
+ 
++config FB_CON_DECOR
++	bool "Support for the Framebuffer Console Decorations"
++	depends on FRAMEBUFFER_CONSOLE=y && !FB_TILEBLITTING
++	default n
++	---help---
++	  This option enables support for framebuffer console decorations which
++	  makes it possible to display images in the background of the system
++	  consoles.  Note that userspace utilities are necessary in order to take 
++	  advantage of these features. Refer to Documentation/fb/fbcondecor.txt 
++	  for more information.
++
++	  If unsure, say N.
++
+ config STI_CONSOLE
+         bool "STI text console"
+         depends on PARISC
+diff --git a/drivers/video/console/Makefile b/drivers/video/console/Makefile
+index 43bfa48..cc104b6 100644
+--- a/drivers/video/console/Makefile
++++ b/drivers/video/console/Makefile
+@@ -16,4 +16,5 @@ obj-$(CONFIG_FRAMEBUFFER_CONSOLE)     += fbcon_rotate.o fbcon_cw.o fbcon_ud.o \
+                                          fbcon_ccw.o
+ endif
+ 
++obj-$(CONFIG_FB_CON_DECOR)     	  += fbcondecor.o cfbcondecor.o
+ obj-$(CONFIG_FB_STI)              += sticore.o
+diff --git a/drivers/video/console/bitblit.c b/drivers/video/console/bitblit.c
+index 61b182b..984384b 100644
+--- a/drivers/video/console/bitblit.c
++++ b/drivers/video/console/bitblit.c
+@@ -18,6 +18,7 @@
+ #include <linux/console.h>
+ #include <asm/types.h>
+ #include "fbcon.h"
++#include "fbcondecor.h"
+ 
+ /*
+  * Accelerated handlers.
+@@ -55,6 +56,13 @@ static void bit_bmove(struct vc_data *vc, struct fb_info *info, int sy,
+ 	area.height = height * vc->vc_font.height;
+ 	area.width = width * vc->vc_font.width;
+ 
++	if (fbcon_decor_active(info, vc)) {
++ 		area.sx += vc->vc_decor.tx;
++ 		area.sy += vc->vc_decor.ty;
++ 		area.dx += vc->vc_decor.tx;
++ 		area.dy += vc->vc_decor.ty;
++ 	}
++
+ 	info->fbops->fb_copyarea(info, &area);
+ }
+ 
+@@ -380,11 +388,15 @@ static void bit_cursor(struct vc_data *vc, struct fb_info *info, int mode,
+ 	cursor.image.depth = 1;
+ 	cursor.rop = ROP_XOR;
+ 
+-	if (info->fbops->fb_cursor)
+-		err = info->fbops->fb_cursor(info, &cursor);
++	if (fbcon_decor_active(info, vc)) {
++		fbcon_decor_cursor(info, &cursor);
++	} else {
++		if (info->fbops->fb_cursor)
++			err = info->fbops->fb_cursor(info, &cursor);
+ 
+-	if (err)
+-		soft_cursor(info, &cursor);
++		if (err)
++			soft_cursor(info, &cursor);
++	}
+ 
+ 	ops->cursor_reset = 0;
+ }
+diff --git a/drivers/video/console/cfbcondecor.c b/drivers/video/console/cfbcondecor.c
+new file mode 100644
+index 0000000..a2b4497
+--- /dev/null
++++ b/drivers/video/console/cfbcondecor.c
+@@ -0,0 +1,471 @@
++/*
++ *  linux/drivers/video/cfbcon_decor.c -- Framebuffer decor render functions
++ *
++ *  Copyright (C) 2004 Michal Januszewski <michalj+fbcondecor@gmail.com>
++ *
++ *  Code based upon "Bootdecor" (C) 2001-2003
++ *       Volker Poplawski <volker@poplawski.de>,
++ *       Stefan Reinauer <stepan@suse.de>,
++ *       Steffen Winterfeldt <snwint@suse.de>,
++ *       Michael Schroeder <mls@suse.de>,
++ *       Ken Wimer <wimer@suse.de>.
++ *
++ *  This file is subject to the terms and conditions of the GNU General Public
++ *  License.  See the file COPYING in the main directory of this archive for
++ *  more details.
++ */
++#include <linux/module.h>
++#include <linux/types.h>
++#include <linux/fb.h>
++#include <linux/selection.h>
++#include <linux/slab.h>
++#include <linux/vt_kern.h>
++#include <asm/irq.h>
++
++#include "fbcon.h"
++#include "fbcondecor.h"
++
++#define parse_pixel(shift,bpp,type)						\
++	do {									\
++		if (d & (0x80 >> (shift)))					\
++			dd2[(shift)] = fgx;					\
++		else								\
++			dd2[(shift)] = transparent ? *(type *)decor_src : bgx;	\
++		decor_src += (bpp);						\
++	} while (0)								\
++
++extern int get_color(struct vc_data *vc, struct fb_info *info,
++		     u16 c, int is_fg);
++
++void fbcon_decor_fix_pseudo_pal(struct fb_info *info, struct vc_data *vc)
++{
++	int i, j, k;
++	int minlen = min(min(info->var.red.length, info->var.green.length),
++			     info->var.blue.length);
++	u32 col;
++
++	for (j = i = 0; i < 16; i++) {
++		k = color_table[i];
++
++		col = ((vc->vc_palette[j++]  >> (8-minlen))
++			<< info->var.red.offset);
++		col |= ((vc->vc_palette[j++] >> (8-minlen))
++			<< info->var.green.offset);
++		col |= ((vc->vc_palette[j++] >> (8-minlen))
++			<< info->var.blue.offset);
++			((u32 *)info->pseudo_palette)[k] = col;
++	}
++}
++
++void fbcon_decor_renderc(struct fb_info *info, int ypos, int xpos, int height,
++		      int width, u8* src, u32 fgx, u32 bgx, u8 transparent)
++{
++	unsigned int x, y;
++	u32 dd;
++	int bytespp = ((info->var.bits_per_pixel + 7) >> 3);
++	unsigned int d = ypos * info->fix.line_length + xpos * bytespp;
++	unsigned int ds = (ypos * info->var.xres + xpos) * bytespp;
++	u16 dd2[4];
++
++	u8* decor_src = (u8 *)(info->bgdecor.data + ds);
++	u8* dst = (u8 *)(info->screen_base + d);
++
++	if ((ypos + height) > info->var.yres || (xpos + width) > info->var.xres)
++		return;
++
++	for (y = 0; y < height; y++) {
++		switch (info->var.bits_per_pixel) {
++
++		case 32:
++			for (x = 0; x < width; x++) {
++
++				if ((x & 7) == 0)
++					d = *src++;
++				if (d & 0x80)
++					dd = fgx;
++				else
++					dd = transparent ?
++					     *(u32 *)decor_src : bgx;
++
++				d <<= 1;
++				decor_src += 4;
++				fb_writel(dd, dst);
++				dst += 4;
++			}
++			break;
++		case 24:
++			for (x = 0; x < width; x++) {
++
++				if ((x & 7) == 0)
++					d = *src++;
++				if (d & 0x80)
++					dd = fgx;
++				else
++					dd = transparent ?
++					     (*(u32 *)decor_src & 0xffffff) : bgx;
++
++				d <<= 1;
++				decor_src += 3;
++#ifdef __LITTLE_ENDIAN
++				fb_writew(dd & 0xffff, dst);
++				dst += 2;
++				fb_writeb((dd >> 16), dst);
++#else
++				fb_writew(dd >> 8, dst);
++				dst += 2;
++				fb_writeb(dd & 0xff, dst);
++#endif
++				dst++;
++			}
++			break;
++		case 16:
++			for (x = 0; x < width; x += 2) {
++				if ((x & 7) == 0)
++					d = *src++;
++
++				parse_pixel(0, 2, u16);
++				parse_pixel(1, 2, u16);
++#ifdef __LITTLE_ENDIAN
++				dd = dd2[0] | (dd2[1] << 16);
++#else
++				dd = dd2[1] | (dd2[0] << 16);
++#endif
++				d <<= 2;
++				fb_writel(dd, dst);
++				dst += 4;
++			}
++			break;
++
++		case 8:
++			for (x = 0; x < width; x += 4) {
++				if ((x & 7) == 0)
++					d = *src++;
++
++				parse_pixel(0, 1, u8);
++				parse_pixel(1, 1, u8);
++				parse_pixel(2, 1, u8);
++				parse_pixel(3, 1, u8);
++
++#ifdef __LITTLE_ENDIAN
++				dd = dd2[0] | (dd2[1] << 8) | (dd2[2] << 16) | (dd2[3] << 24);
++#else
++				dd = dd2[3] | (dd2[2] << 8) | (dd2[1] << 16) | (dd2[0] << 24);
++#endif
++				d <<= 4;
++				fb_writel(dd, dst);
++				dst += 4;
++			}
++		}
++
++		dst += info->fix.line_length - width * bytespp;
++		decor_src += (info->var.xres - width) * bytespp;
++	}
++}
++
++#define cc2cx(a) 						\
++	((info->fix.visual == FB_VISUAL_TRUECOLOR || 		\
++	  info->fix.visual == FB_VISUAL_DIRECTCOLOR) ? 		\
++	 ((u32*)info->pseudo_palette)[a] : a)
++
++void fbcon_decor_putcs(struct vc_data *vc, struct fb_info *info,
++		   const unsigned short *s, int count, int yy, int xx)
++{
++	unsigned short charmask = vc->vc_hi_font_mask ? 0x1ff : 0xff;
++	struct fbcon_ops *ops = info->fbcon_par;
++	int fg_color, bg_color, transparent;
++	u8 *src;
++	u32 bgx, fgx;
++	u16 c = scr_readw(s);
++
++	fg_color = get_color(vc, info, c, 1);
++        bg_color = get_color(vc, info, c, 0);
++
++	/* Don't paint the background image if console is blanked */
++	transparent = ops->blank_state ? 0 :
++		(vc->vc_decor.bg_color == bg_color);
++
++	xx = xx * vc->vc_font.width + vc->vc_decor.tx;
++	yy = yy * vc->vc_font.height + vc->vc_decor.ty;
++
++	fgx = cc2cx(fg_color);
++	bgx = cc2cx(bg_color);
++
++	while (count--) {
++		c = scr_readw(s++);
++		src = vc->vc_font.data + (c & charmask) * vc->vc_font.height *
++		      ((vc->vc_font.width + 7) >> 3);
++
++		fbcon_decor_renderc(info, yy, xx, vc->vc_font.height,
++			       vc->vc_font.width, src, fgx, bgx, transparent);
++		xx += vc->vc_font.width;
++	}
++}
++
++void fbcon_decor_cursor(struct fb_info *info, struct fb_cursor *cursor)
++{
++	int i;
++	unsigned int dsize, s_pitch;
++	struct fbcon_ops *ops = info->fbcon_par;
++	struct vc_data* vc;
++	u8 *src;
++
++	/* we really don't need any cursors while the console is blanked */
++	if (info->state != FBINFO_STATE_RUNNING || ops->blank_state)
++		return;
++
++	vc = vc_cons[ops->currcon].d;
++
++	src = kmalloc(64 + sizeof(struct fb_image), GFP_ATOMIC);
++	if (!src)
++		return;
++
++	s_pitch = (cursor->image.width + 7) >> 3;
++	dsize = s_pitch * cursor->image.height;
++	if (cursor->enable) {
++		switch (cursor->rop) {
++		case ROP_XOR:
++			for (i = 0; i < dsize; i++)
++				src[i] = cursor->image.data[i] ^ cursor->mask[i];
++                        break;
++		case ROP_COPY:
++		default:
++			for (i = 0; i < dsize; i++)
++				src[i] = cursor->image.data[i] & cursor->mask[i];
++			break;
++		}
++	} else
++		memcpy(src, cursor->image.data, dsize);
++
++	fbcon_decor_renderc(info,
++			cursor->image.dy + vc->vc_decor.ty,
++			cursor->image.dx + vc->vc_decor.tx,
++			cursor->image.height,
++			cursor->image.width,
++			(u8*)src,
++			cc2cx(cursor->image.fg_color),
++			cc2cx(cursor->image.bg_color),
++			cursor->image.bg_color == vc->vc_decor.bg_color);
++
++	kfree(src);
++}
++
++static void decorset(u8 *dst, int height, int width, int dstbytes,
++		        u32 bgx, int bpp)
++{
++	int i;
++
++	if (bpp == 8)
++		bgx |= bgx << 8;
++	if (bpp == 16 || bpp == 8)
++		bgx |= bgx << 16;
++
++	while (height-- > 0) {
++		u8 *p = dst;
++
++		switch (bpp) {
++
++		case 32:
++			for (i=0; i < width; i++) {
++				fb_writel(bgx, p); p += 4;
++			}
++			break;
++		case 24:
++			for (i=0; i < width; i++) {
++#ifdef __LITTLE_ENDIAN
++				fb_writew((bgx & 0xffff),(u16*)p); p += 2;
++				fb_writeb((bgx >> 16),p++);
++#else
++				fb_writew((bgx >> 8),(u16*)p); p += 2;
++				fb_writeb((bgx & 0xff),p++);
++#endif
++			}
++		case 16:
++			for (i=0; i < width/4; i++) {
++				fb_writel(bgx,p); p += 4;
++				fb_writel(bgx,p); p += 4;
++			}
++			if (width & 2) {
++				fb_writel(bgx,p); p += 4;
++			}
++			if (width & 1)
++				fb_writew(bgx,(u16*)p);
++			break;
++		case 8:
++			for (i=0; i < width/4; i++) {
++				fb_writel(bgx,p); p += 4;
++			}
++
++			if (width & 2) {
++				fb_writew(bgx,p); p += 2;
++			}
++			if (width & 1)
++				fb_writeb(bgx,(u8*)p);
++			break;
++
++		}
++		dst += dstbytes;
++	}
++}
++
++void fbcon_decor_copy(u8 *dst, u8 *src, int height, int width, int linebytes,
++		   int srclinebytes, int bpp)
++{
++	int i;
++
++	while (height-- > 0) {
++		u32 *p = (u32 *)dst;
++		u32 *q = (u32 *)src;
++
++		switch (bpp) {
++
++		case 32:
++			for (i=0; i < width; i++)
++				fb_writel(*q++, p++);
++			break;
++		case 24:
++			for (i=0; i < (width*3/4); i++)
++				fb_writel(*q++, p++);
++			if ((width*3) % 4) {
++				if (width & 2) {
++					fb_writeb(*(u8*)q, (u8*)p);
++				} else if (width & 1) {
++					fb_writew(*(u16*)q, (u16*)p);
++					fb_writeb(*(u8*)((u16*)q+1),(u8*)((u16*)p+2));
++				}
++			}
++			break;
++		case 16:
++			for (i=0; i < width/4; i++) {
++				fb_writel(*q++, p++);
++				fb_writel(*q++, p++);
++			}
++			if (width & 2)
++				fb_writel(*q++, p++);
++			if (width & 1)
++				fb_writew(*(u16*)q, (u16*)p);
++			break;
++		case 8:
++			for (i=0; i < width/4; i++)
++				fb_writel(*q++, p++);
++
++			if (width & 2) {
++				fb_writew(*(u16*)q, (u16*)p);
++				q = (u32*) ((u16*)q + 1);
++				p = (u32*) ((u16*)p + 1);
++			}
++			if (width & 1)
++				fb_writeb(*(u8*)q, (u8*)p);
++			break;
++		}
++
++		dst += linebytes;
++		src += srclinebytes;
++	}
++}
++
++static void decorfill(struct fb_info *info, int sy, int sx, int height,
++		       int width)
++{
++	int bytespp = ((info->var.bits_per_pixel + 7) >> 3);
++	int d  = sy * info->fix.line_length + sx * bytespp;
++	int ds = (sy * info->var.xres + sx) * bytespp;
++
++	fbcon_decor_copy((u8 *)(info->screen_base + d), (u8 *)(info->bgdecor.data + ds),
++		    height, width, info->fix.line_length, info->var.xres * bytespp,
++		    info->var.bits_per_pixel);
++}
++
++void fbcon_decor_clear(struct vc_data *vc, struct fb_info *info, int sy, int sx,
++		    int height, int width)
++{
++	int bgshift = (vc->vc_hi_font_mask) ? 13 : 12;
++	struct fbcon_ops *ops = info->fbcon_par;
++	u8 *dst;
++	int transparent, bg_color = attr_bgcol_ec(bgshift, vc, info);
++
++	transparent = (vc->vc_decor.bg_color == bg_color);
++	sy = sy * vc->vc_font.height + vc->vc_decor.ty;
++	sx = sx * vc->vc_font.width + vc->vc_decor.tx;
++	height *= vc->vc_font.height;
++	width *= vc->vc_font.width;
++
++	/* Don't paint the background image if console is blanked */
++	if (transparent && !ops->blank_state) {
++		decorfill(info, sy, sx, height, width);
++	} else {
++		dst = (u8 *)(info->screen_base + sy * info->fix.line_length +
++			     sx * ((info->var.bits_per_pixel + 7) >> 3));
++		decorset(dst, height, width, info->fix.line_length, cc2cx(bg_color),
++			  info->var.bits_per_pixel);
++	}
++}
++
++void fbcon_decor_clear_margins(struct vc_data *vc, struct fb_info *info,
++			    int bottom_only)
++{
++	unsigned int tw = vc->vc_cols*vc->vc_font.width;
++	unsigned int th = vc->vc_rows*vc->vc_font.height;
++
++	if (!bottom_only) {
++		/* top margin */
++		decorfill(info, 0, 0, vc->vc_decor.ty, info->var.xres);
++		/* left margin */
++		decorfill(info, vc->vc_decor.ty, 0, th, vc->vc_decor.tx);
++		/* right margin */
++		decorfill(info, vc->vc_decor.ty, vc->vc_decor.tx + tw, th, 
++			   info->var.xres - vc->vc_decor.tx - tw);
++	}
++	decorfill(info, vc->vc_decor.ty + th, 0, 
++		   info->var.yres - vc->vc_decor.ty - th, info->var.xres);
++}
++
++void fbcon_decor_bmove_redraw(struct vc_data *vc, struct fb_info *info, int y, 
++			   int sx, int dx, int width)
++{
++	u16 *d = (u16 *) (vc->vc_origin + vc->vc_size_row * y + dx * 2);
++	u16 *s = d + (dx - sx);
++	u16 *start = d;
++	u16 *ls = d;
++	u16 *le = d + width;
++	u16 c;
++	int x = dx;
++	u16 attr = 1;
++
++	do {
++		c = scr_readw(d);
++		if (attr != (c & 0xff00)) {
++			attr = c & 0xff00;
++			if (d > start) {
++				fbcon_decor_putcs(vc, info, start, d - start, y, x);
++				x += d - start;
++				start = d;
++			}
++		}
++		if (s >= ls && s < le && c == scr_readw(s)) {
++			if (d > start) {
++				fbcon_decor_putcs(vc, info, start, d - start, y, x);
++				x += d - start + 1;
++				start = d + 1;
++			} else {
++				x++;
++				start++;
++			}
++		}
++		s++;
++		d++;
++	} while (d < le);
++	if (d > start)
++		fbcon_decor_putcs(vc, info, start, d - start, y, x);
++}
++
++void fbcon_decor_blank(struct vc_data *vc, struct fb_info *info, int blank)
++{
++	if (blank) {
++		decorset((u8 *)info->screen_base, info->var.yres, info->var.xres,
++			  info->fix.line_length, 0, info->var.bits_per_pixel);
++	} else {
++		update_screen(vc);
++		fbcon_decor_clear_margins(vc, info, 0);
++	}
++}
++
+diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
+index f447734..1a840c2 100644
+--- a/drivers/video/console/fbcon.c
++++ b/drivers/video/console/fbcon.c
+@@ -79,6 +79,7 @@
+ #include <asm/irq.h>
+ 
+ #include "fbcon.h"
++#include "fbcondecor.h"
+ 
+ #ifdef FBCONDEBUG
+ #  define DPRINTK(fmt, args...) printk(KERN_DEBUG "%s: " fmt, __func__ , ## args)
+@@ -94,7 +95,7 @@ enum {
+ 
+ static struct display fb_display[MAX_NR_CONSOLES];
+ 
+-static signed char con2fb_map[MAX_NR_CONSOLES];
++signed char con2fb_map[MAX_NR_CONSOLES];
+ static signed char con2fb_map_boot[MAX_NR_CONSOLES];
+ 
+ static int logo_lines;
+@@ -286,7 +287,7 @@ static inline int fbcon_is_inactive(struct vc_data *vc, struct fb_info *info)
+ 		!vt_force_oops_output(vc);
+ }
+ 
+-static int get_color(struct vc_data *vc, struct fb_info *info,
++int get_color(struct vc_data *vc, struct fb_info *info,
+ 	      u16 c, int is_fg)
+ {
+ 	int depth = fb_get_color_depth(&info->var, &info->fix);
+@@ -551,6 +552,9 @@ static int do_fbcon_takeover(int show_logo)
+ 		info_idx = -1;
+ 	} else {
+ 		fbcon_has_console_bind = 1;
++#ifdef CONFIG_FB_CON_DECOR
++		fbcon_decor_init();
++#endif
+ 	}
+ 
+ 	return err;
+@@ -1007,6 +1011,12 @@ static const char *fbcon_startup(void)
+ 	rows = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres);
+ 	cols /= vc->vc_font.width;
+ 	rows /= vc->vc_font.height;
++
++	if (fbcon_decor_active(info, vc)) {
++		cols = vc->vc_decor.twidth / vc->vc_font.width;
++		rows = vc->vc_decor.theight / vc->vc_font.height;
++	}
++
+ 	vc_resize(vc, cols, rows);
+ 
+ 	DPRINTK("mode:   %s\n", info->fix.id);
+@@ -1036,7 +1046,7 @@ static void fbcon_init(struct vc_data *vc, int init)
+ 	cap = info->flags;
+ 
+ 	if (vc != svc || logo_shown == FBCON_LOGO_DONTSHOW ||
+-	    (info->fix.type == FB_TYPE_TEXT))
++	    (info->fix.type == FB_TYPE_TEXT) || fbcon_decor_active(info, vc))
+ 		logo = 0;
+ 
+ 	if (var_to_display(p, &info->var, info))
+@@ -1260,6 +1270,11 @@ static void fbcon_clear(struct vc_data *vc, int sy, int sx, int height,
+ 		fbcon_clear_margins(vc, 0);
+ 	}
+ 
++ 	if (fbcon_decor_active(info, vc)) {
++ 		fbcon_decor_clear(vc, info, sy, sx, height, width);
++ 		return;
++ 	}
++
+ 	/* Split blits that cross physical y_wrap boundary */
+ 
+ 	y_break = p->vrows - p->yscroll;
+@@ -1279,10 +1294,15 @@ static void fbcon_putcs(struct vc_data *vc, const unsigned short *s,
+ 	struct display *p = &fb_display[vc->vc_num];
+ 	struct fbcon_ops *ops = info->fbcon_par;
+ 
+-	if (!fbcon_is_inactive(vc, info))
+-		ops->putcs(vc, info, s, count, real_y(p, ypos), xpos,
+-			   get_color(vc, info, scr_readw(s), 1),
+-			   get_color(vc, info, scr_readw(s), 0));
++	if (!fbcon_is_inactive(vc, info)) {
++
++		if (fbcon_decor_active(info, vc))
++			fbcon_decor_putcs(vc, info, s, count, ypos, xpos);
++		else
++			ops->putcs(vc, info, s, count, real_y(p, ypos), xpos,
++				   get_color(vc, info, scr_readw(s), 1),
++				   get_color(vc, info, scr_readw(s), 0));
++	}
+ }
+ 
+ static void fbcon_putc(struct vc_data *vc, int c, int ypos, int xpos)
+@@ -1298,8 +1318,13 @@ static void fbcon_clear_margins(struct vc_data *vc, int bottom_only)
+ 	struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
+ 	struct fbcon_ops *ops = info->fbcon_par;
+ 
+-	if (!fbcon_is_inactive(vc, info))
+-		ops->clear_margins(vc, info, bottom_only);
++	if (!fbcon_is_inactive(vc, info)) {
++	 	if (fbcon_decor_active(info, vc)) {
++	 		fbcon_decor_clear_margins(vc, info, bottom_only);
++ 		} else {
++			ops->clear_margins(vc, info, bottom_only);
++		}
++	}
+ }
+ 
+ static void fbcon_cursor(struct vc_data *vc, int mode)
+@@ -1819,7 +1844,7 @@ static int fbcon_scroll(struct vc_data *vc, int t, int b, int dir,
+ 			count = vc->vc_rows;
+ 		if (softback_top)
+ 			fbcon_softback_note(vc, t, count);
+-		if (logo_shown >= 0)
++		if (logo_shown >= 0 || fbcon_decor_active(info, vc))
+ 			goto redraw_up;
+ 		switch (p->scrollmode) {
+ 		case SCROLL_MOVE:
+@@ -1912,6 +1937,8 @@ static int fbcon_scroll(struct vc_data *vc, int t, int b, int dir,
+ 			count = vc->vc_rows;
+ 		if (logo_shown >= 0)
+ 			goto redraw_down;
++		if (fbcon_decor_active(info, vc))
++			goto redraw_down;
+ 		switch (p->scrollmode) {
+ 		case SCROLL_MOVE:
+ 			fbcon_redraw_blit(vc, info, p, b - 1, b - t - count,
+@@ -2060,6 +2087,13 @@ static void fbcon_bmove_rec(struct vc_data *vc, struct display *p, int sy, int s
+ 		}
+ 		return;
+ 	}
++
++	if (fbcon_decor_active(info, vc) && sy == dy && height == 1) {
++ 		/* must use slower redraw bmove to keep background pic intact */
++ 		fbcon_decor_bmove_redraw(vc, info, sy, sx, dx, width);
++ 		return;
++ 	}
++
+ 	ops->bmove(vc, info, real_y(p, sy), sx, real_y(p, dy), dx,
+ 		   height, width);
+ }
+@@ -2130,8 +2164,8 @@ static int fbcon_resize(struct vc_data *vc, unsigned int width,
+ 	var.yres = virt_h * virt_fh;
+ 	x_diff = info->var.xres - var.xres;
+ 	y_diff = info->var.yres - var.yres;
+-	if (x_diff < 0 || x_diff > virt_fw ||
+-	    y_diff < 0 || y_diff > virt_fh) {
++	if ((x_diff < 0 || x_diff > virt_fw ||
++		y_diff < 0 || y_diff > virt_fh) && !vc->vc_decor.state) {
+ 		const struct fb_videomode *mode;
+ 
+ 		DPRINTK("attempting resize %ix%i\n", var.xres, var.yres);
+@@ -2167,6 +2201,21 @@ static int fbcon_switch(struct vc_data *vc)
+ 
+ 	info = registered_fb[con2fb_map[vc->vc_num]];
+ 	ops = info->fbcon_par;
++	prev_console = ops->currcon;
++	if (prev_console != -1)
++		old_info = registered_fb[con2fb_map[prev_console]];
++
++#ifdef CONFIG_FB_CON_DECOR
++	if (!fbcon_decor_active_vc(vc) && info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
++		struct vc_data *vc_curr = vc_cons[prev_console].d;
++		if (vc_curr && fbcon_decor_active_vc(vc_curr)) {
++			/* Clear the screen to avoid displaying funky colors during
++			 * palette updates. */
++			memset((u8*)info->screen_base + info->fix.line_length * info->var.yoffset,
++			       0, info->var.yres * info->fix.line_length);
++		}
++	}
++#endif
+ 
+ 	if (softback_top) {
+ 		if (softback_lines)
+@@ -2185,9 +2234,6 @@ static int fbcon_switch(struct vc_data *vc)
+ 		logo_shown = FBCON_LOGO_CANSHOW;
+ 	}
+ 
+-	prev_console = ops->currcon;
+-	if (prev_console != -1)
+-		old_info = registered_fb[con2fb_map[prev_console]];
+ 	/*
+ 	 * FIXME: If we have multiple fbdev's loaded, we need to
+ 	 * update all info->currcon.  Perhaps, we can place this
+@@ -2231,6 +2277,18 @@ static int fbcon_switch(struct vc_data *vc)
+ 			fbcon_del_cursor_timer(old_info);
+ 	}
+ 
++	if (fbcon_decor_active_vc(vc)) {
++		struct vc_data *vc_curr = vc_cons[prev_console].d;
++
++		if (!vc_curr->vc_decor.theme ||
++			strcmp(vc->vc_decor.theme, vc_curr->vc_decor.theme) ||
++			(fbcon_decor_active_nores(info, vc_curr) &&
++			 !fbcon_decor_active(info, vc_curr))) {
++			fbcon_decor_disable(vc, 0);
++			fbcon_decor_call_helper("modechange", vc->vc_num);
++		}
++	}
++
+ 	if (fbcon_is_inactive(vc, info) ||
+ 	    ops->blank_state != FB_BLANK_UNBLANK)
+ 		fbcon_del_cursor_timer(info);
+@@ -2339,15 +2397,20 @@ static int fbcon_blank(struct vc_data *vc, int blank, int mode_switch)
+ 		}
+ 	}
+ 
+- 	if (!fbcon_is_inactive(vc, info)) {
++	if (!fbcon_is_inactive(vc, info)) {
+ 		if (ops->blank_state != blank) {
+ 			ops->blank_state = blank;
+ 			fbcon_cursor(vc, blank ? CM_ERASE : CM_DRAW);
+ 			ops->cursor_flash = (!blank);
+ 
+-			if (!(info->flags & FBINFO_MISC_USEREVENT))
+-				if (fb_blank(info, blank))
+-					fbcon_generic_blank(vc, info, blank);
++			if (!(info->flags & FBINFO_MISC_USEREVENT)) {
++				if (fb_blank(info, blank)) {
++					if (fbcon_decor_active(info, vc))
++						fbcon_decor_blank(vc, info, blank);
++					else
++						fbcon_generic_blank(vc, info, blank);
++				}
++			}
+ 		}
+ 
+ 		if (!blank)
+@@ -2522,13 +2585,22 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h,
+ 	}
+ 
+ 	if (resize) {
++		/* reset wrap/pan */
+ 		int cols, rows;
+ 
+ 		cols = FBCON_SWAP(ops->rotate, info->var.xres, info->var.yres);
+ 		rows = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres);
++
++		if (fbcon_decor_active(info, vc)) {
++			info->var.xoffset = info->var.yoffset = p->yscroll = 0;
++			cols = vc->vc_decor.twidth;
++			rows = vc->vc_decor.theight;
++		}
+ 		cols /= w;
+ 		rows /= h;
++
+ 		vc_resize(vc, cols, rows);
++
+ 		if (CON_IS_VISIBLE(vc) && softback_buf)
+ 			fbcon_update_softback(vc);
+ 	} else if (CON_IS_VISIBLE(vc)
+@@ -2657,7 +2729,11 @@ static int fbcon_set_palette(struct vc_data *vc, unsigned char *table)
+ 	int i, j, k, depth;
+ 	u8 val;
+ 
+-	if (fbcon_is_inactive(vc, info))
++	if (fbcon_is_inactive(vc, info)
++#ifdef CONFIG_FB_CON_DECOR
++			|| vc->vc_num != fg_console
++#endif
++		)
+ 		return -EINVAL;
+ 
+ 	if (!CON_IS_VISIBLE(vc))
+@@ -2683,14 +2759,56 @@ static int fbcon_set_palette(struct vc_data *vc, unsigned char *table)
+ 	} else
+ 		fb_copy_cmap(fb_default_cmap(1 << depth), &palette_cmap);
+ 
+-	return fb_set_cmap(&palette_cmap, info);
++	if (fbcon_decor_active(info, vc_cons[fg_console].d) &&
++	    info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
++
++		u16 *red, *green, *blue;
++		int minlen = min(min(info->var.red.length, info->var.green.length),
++				     info->var.blue.length);
++		int h;
++
++		struct fb_cmap cmap = {
++			.start = 0,
++			.len = (1 << minlen),
++			.red = NULL,
++			.green = NULL,
++			.blue = NULL,
++			.transp = NULL
++		};
++
++		red = kmalloc(256 * sizeof(u16) * 3, GFP_KERNEL);
++
++		if (!red)
++			goto out;
++
++		green = red + 256;
++		blue = green + 256;
++		cmap.red = red;
++		cmap.green = green;
++		cmap.blue = blue;
++
++		for (i = 0; i < cmap.len; i++) {
++			red[i] = green[i] = blue[i] = (0xffff * i)/(cmap.len-1);
++		}
++
++		h = fb_set_cmap(&cmap, info);
++		fbcon_decor_fix_pseudo_pal(info, vc_cons[fg_console].d);
++		kfree(red);
++
++		return h;
++
++	} else if (fbcon_decor_active(info, vc_cons[fg_console].d) &&
++		   info->var.bits_per_pixel == 8 && info->bgdecor.cmap.red != NULL)
++		fb_set_cmap(&info->bgdecor.cmap, info);
++
++out:	return fb_set_cmap(&palette_cmap, info);
+ }
+ 
+ static u16 *fbcon_screen_pos(struct vc_data *vc, int offset)
+ {
+ 	unsigned long p;
+ 	int line;
+-	
++
+ 	if (vc->vc_num != fg_console || !softback_lines)
+ 		return (u16 *) (vc->vc_origin + offset);
+ 	line = offset / vc->vc_size_row;
+@@ -2909,7 +3027,14 @@ static void fbcon_modechanged(struct fb_info *info)
+ 		rows = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres);
+ 		cols /= vc->vc_font.width;
+ 		rows /= vc->vc_font.height;
+-		vc_resize(vc, cols, rows);
++
++		if (!fbcon_decor_active_nores(info, vc)) {
++			vc_resize(vc, cols, rows);
++		} else {
++			fbcon_decor_disable(vc, 0);
++			fbcon_decor_call_helper("modechange", vc->vc_num);
++		}
++
+ 		updatescrollmode(p, info, vc);
+ 		scrollback_max = 0;
+ 		scrollback_current = 0;
+@@ -2954,7 +3079,9 @@ static void fbcon_set_all_vcs(struct fb_info *info)
+ 		rows = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres);
+ 		cols /= vc->vc_font.width;
+ 		rows /= vc->vc_font.height;
+-		vc_resize(vc, cols, rows);
++		if (!fbcon_decor_active_nores(info, vc)) {
++			vc_resize(vc, cols, rows);
++		}
+ 	}
+ 
+ 	if (fg != -1)
+@@ -3596,6 +3723,7 @@ static void fbcon_exit(void)
+ 		}
+ 	}
+ 
++	fbcon_decor_exit();
+ 	fbcon_has_exited = 1;
+ }
+ 
+diff --git a/drivers/video/console/fbcondecor.c b/drivers/video/console/fbcondecor.c
+new file mode 100644
+index 0000000..babc8c5
+--- /dev/null
++++ b/drivers/video/console/fbcondecor.c
+@@ -0,0 +1,555 @@
++/*
++ *  linux/drivers/video/console/fbcondecor.c -- Framebuffer console decorations
++ *
++ *  Copyright (C) 2004-2009 Michal Januszewski <michalj+fbcondecor@gmail.com>
++ *
++ *  Code based upon "Bootsplash" (C) 2001-2003
++ *       Volker Poplawski <volker@poplawski.de>,
++ *       Stefan Reinauer <stepan@suse.de>,
++ *       Steffen Winterfeldt <snwint@suse.de>,
++ *       Michael Schroeder <mls@suse.de>,
++ *       Ken Wimer <wimer@suse.de>.
++ *
++ *  Compat ioctl support by Thorsten Klein <TK@Thorsten-Klein.de>.
++ *
++ *  This file is subject to the terms and conditions of the GNU General Public
++ *  License.  See the file COPYING in the main directory of this archive for
++ *  more details.
++ *
++ */
++#include <linux/module.h>
++#include <linux/kernel.h>
++#include <linux/string.h>
++#include <linux/types.h>
++#include <linux/fb.h>
++#include <linux/vt_kern.h>
++#include <linux/vmalloc.h>
++#include <linux/unistd.h>
++#include <linux/syscalls.h>
++#include <linux/init.h>
++#include <linux/proc_fs.h>
++#include <linux/workqueue.h>
++#include <linux/kmod.h>
++#include <linux/miscdevice.h>
++#include <linux/device.h>
++#include <linux/fs.h>
++#include <linux/compat.h>
++#include <linux/console.h>
++
++#include <asm/uaccess.h>
++#include <asm/irq.h>
++
++#include "fbcon.h"
++#include "fbcondecor.h"
++
++extern signed char con2fb_map[];
++static int fbcon_decor_enable(struct vc_data *vc);
++char fbcon_decor_path[KMOD_PATH_LEN] = "/sbin/fbcondecor_helper";
++static int initialized = 0;
++
++int fbcon_decor_call_helper(char* cmd, unsigned short vc)
++{
++	char *envp[] = {
++		"HOME=/",
++		"PATH=/sbin:/bin",
++		NULL
++	};
++
++	char tfb[5];
++	char tcons[5];
++	unsigned char fb = (int) con2fb_map[vc];
++
++	char *argv[] = {
++		fbcon_decor_path,
++		"2",
++		cmd,
++		tcons,
++		tfb,
++		vc_cons[vc].d->vc_decor.theme,
++		NULL
++	};
++
++	snprintf(tfb,5,"%d",fb);
++	snprintf(tcons,5,"%d",vc);
++
++	return call_usermodehelper(fbcon_decor_path, argv, envp, UMH_WAIT_EXEC);
++}
++
++/* Disables fbcondecor on a virtual console; called with console sem held. */
++int fbcon_decor_disable(struct vc_data *vc, unsigned char redraw)
++{
++	struct fb_info* info;
++
++	if (!vc->vc_decor.state)
++		return -EINVAL;
++
++	info = registered_fb[(int) con2fb_map[vc->vc_num]];
++
++	if (info == NULL)
++		return -EINVAL;
++
++	vc->vc_decor.state = 0;
++	vc_resize(vc, info->var.xres / vc->vc_font.width,
++		  info->var.yres / vc->vc_font.height);
++
++	if (fg_console == vc->vc_num && redraw) {
++		redraw_screen(vc, 0);
++		update_region(vc, vc->vc_origin +
++			      vc->vc_size_row * vc->vc_top,
++			      vc->vc_size_row * (vc->vc_bottom - vc->vc_top) / 2);
++	}
++
++	printk(KERN_INFO "fbcondecor: switched decor state to 'off' on console %d\n",
++			 vc->vc_num);
++
++	return 0;
++}
++
++/* Enables fbcondecor on a virtual console; called with console sem held. */
++static int fbcon_decor_enable(struct vc_data *vc)
++{
++	struct fb_info* info;
++
++	info = registered_fb[(int) con2fb_map[vc->vc_num]];
++
++	if (vc->vc_decor.twidth == 0 || vc->vc_decor.theight == 0 ||
++	    info == NULL || vc->vc_decor.state || (!info->bgdecor.data &&
++	    vc->vc_num == fg_console))
++		return -EINVAL;
++
++	vc->vc_decor.state = 1;
++	vc_resize(vc, vc->vc_decor.twidth / vc->vc_font.width,
++		  vc->vc_decor.theight / vc->vc_font.height);
++
++	if (fg_console == vc->vc_num) {
++		redraw_screen(vc, 0);
++		update_region(vc, vc->vc_origin +
++			      vc->vc_size_row * vc->vc_top,
++			      vc->vc_size_row * (vc->vc_bottom - vc->vc_top) / 2);
++		fbcon_decor_clear_margins(vc, info, 0);
++	}
++
++	printk(KERN_INFO "fbcondecor: switched decor state to 'on' on console %d\n",
++			 vc->vc_num);
++
++	return 0;
++}
++
++static inline int fbcon_decor_ioctl_dosetstate(struct vc_data *vc, unsigned int state, unsigned char origin)
++{
++	int ret;
++
++//	if (origin == FBCON_DECOR_IO_ORIG_USER)
++		console_lock();
++	if (!state)
++		ret = fbcon_decor_disable(vc, 1);
++	else
++		ret = fbcon_decor_enable(vc);
++//	if (origin == FBCON_DECOR_IO_ORIG_USER)
++		console_unlock();
++
++	return ret;
++}
++
++static inline void fbcon_decor_ioctl_dogetstate(struct vc_data *vc, unsigned int *state)
++{
++	*state = vc->vc_decor.state;
++}
++
++static int fbcon_decor_ioctl_dosetcfg(struct vc_data *vc, struct vc_decor *cfg, unsigned char origin)
++{
++	struct fb_info *info;
++	int len;
++	char *tmp;
++
++	info = registered_fb[(int) con2fb_map[vc->vc_num]];
++
++	if (info == NULL || !cfg->twidth || !cfg->theight ||
++	    cfg->tx + cfg->twidth  > info->var.xres ||
++	    cfg->ty + cfg->theight > info->var.yres)
++		return -EINVAL;
++
++	len = strlen_user(cfg->theme);
++	if (!len || len > FBCON_DECOR_THEME_LEN)
++		return -EINVAL;
++	tmp = kmalloc(len, GFP_KERNEL);
++	if (!tmp)
++		return -ENOMEM;
++	if (copy_from_user(tmp, (void __user *)cfg->theme, len))
++		return -EFAULT;
++	cfg->theme = tmp;
++	cfg->state = 0;
++
++	/* If this ioctl is a response to a request from kernel, the console sem
++	 * is already held; we also don't need to disable decor because either the
++	 * new config and background picture will be successfully loaded, and the
++	 * decor will stay on, or in case of a failure it'll be turned off in fbcon. */
++//	if (origin == FBCON_DECOR_IO_ORIG_USER) {
++		console_lock();
++		if (vc->vc_decor.state)
++			fbcon_decor_disable(vc, 1);
++//	}
++
++	if (vc->vc_decor.theme)
++		kfree(vc->vc_decor.theme);
++
++	vc->vc_decor = *cfg;
++
++//	if (origin == FBCON_DECOR_IO_ORIG_USER)
++		console_unlock();
++
++	printk(KERN_INFO "fbcondecor: console %d using theme '%s'\n",
++			 vc->vc_num, vc->vc_decor.theme);
++	return 0;
++}
++
++static int fbcon_decor_ioctl_dogetcfg(struct vc_data *vc, struct vc_decor *decor)
++{
++	char __user *tmp;
++
++	tmp = decor->theme;
++	*decor = vc->vc_decor;
++	decor->theme = tmp;
++
++	if (vc->vc_decor.theme) {
++		if (copy_to_user(tmp, vc->vc_decor.theme, strlen(vc->vc_decor.theme) + 1))
++			return -EFAULT;
++	} else
++		if (put_user(0, tmp))
++			return -EFAULT;
++
++	return 0;
++}
++
++static int fbcon_decor_ioctl_dosetpic(struct vc_data *vc, struct fb_image *img, unsigned char origin)
++{
++	struct fb_info *info;
++	int len;
++	u8 *tmp;
++
++	if (vc->vc_num != fg_console)
++		return -EINVAL;
++
++	info = registered_fb[(int) con2fb_map[vc->vc_num]];
++
++	if (info == NULL)
++		return -EINVAL;
++
++	if (img->width != info->var.xres || img->height != info->var.yres) {
++		printk(KERN_ERR "fbcondecor: picture dimensions mismatch\n");
++		printk(KERN_ERR "%dx%d vs %dx%d\n", img->width, img->height, info->var.xres, info->var.yres);
++		return -EINVAL;
++	}
++
++	if (img->depth != info->var.bits_per_pixel) {
++		printk(KERN_ERR "fbcondecor: picture depth mismatch\n");
++		return -EINVAL;
++	}
++
++	if (img->depth == 8) {
++		if (!img->cmap.len || !img->cmap.red || !img->cmap.green ||
++		    !img->cmap.blue)
++			return -EINVAL;
++
++		tmp = vmalloc(img->cmap.len * 3 * 2);
++		if (!tmp)
++			return -ENOMEM;
++
++		if (copy_from_user(tmp,
++			    	   (void __user*)img->cmap.red, (img->cmap.len << 1)) ||
++		    copy_from_user(tmp + (img->cmap.len << 1),
++			    	   (void __user*)img->cmap.green, (img->cmap.len << 1)) ||
++		    copy_from_user(tmp + (img->cmap.len << 2),
++			    	   (void __user*)img->cmap.blue, (img->cmap.len << 1))) {
++			vfree(tmp);
++			return -EFAULT;
++		}
++
++		img->cmap.transp = NULL;
++		img->cmap.red = (u16*)tmp;
++		img->cmap.green = img->cmap.red + img->cmap.len;
++		img->cmap.blue = img->cmap.green + img->cmap.len;
++	} else {
++		img->cmap.red = NULL;
++	}
++
++	len = ((img->depth + 7) >> 3) * img->width * img->height;
++
++	/*
++	 * Allocate an additional byte so that we never go outside of the
++	 * buffer boundaries in the rendering functions in a 24 bpp mode.
++	 */
++	tmp = vmalloc(len + 1);
++
++	if (!tmp)
++		goto out;
++
++	if (copy_from_user(tmp, (void __user*)img->data, len))
++		goto out;
++
++	img->data = tmp;
++
++	/* If this ioctl is a response to a request from kernel, the console sem
++	 * is already held. */
++//	if (origin == FBCON_DECOR_IO_ORIG_USER)
++		console_lock();
++
++	if (info->bgdecor.data)
++		vfree((u8*)info->bgdecor.data);
++	if (info->bgdecor.cmap.red)
++		vfree(info->bgdecor.cmap.red);
++
++	info->bgdecor = *img;
++
++	if (fbcon_decor_active_vc(vc) && fg_console == vc->vc_num) {
++		redraw_screen(vc, 0);
++		update_region(vc, vc->vc_origin +
++			      vc->vc_size_row * vc->vc_top,
++			      vc->vc_size_row * (vc->vc_bottom - vc->vc_top) / 2);
++		fbcon_decor_clear_margins(vc, info, 0);
++	}
++
++//	if (origin == FBCON_DECOR_IO_ORIG_USER)
++		console_unlock();
++
++	return 0;
++
++out:	if (img->cmap.red)
++		vfree(img->cmap.red);
++
++	if (tmp)
++		vfree(tmp);
++	return -ENOMEM;
++}
++
++static long fbcon_decor_ioctl(struct file *filp, u_int cmd, u_long arg)
++{
++	struct fbcon_decor_iowrapper __user *wrapper = (void __user*) arg;
++	struct vc_data *vc = NULL;
++	unsigned short vc_num = 0;
++	unsigned char origin = 0;
++	void __user *data = NULL;
++
++	if (!access_ok(VERIFY_READ, wrapper,
++			sizeof(struct fbcon_decor_iowrapper)))
++		return -EFAULT;
++
++	__get_user(vc_num, &wrapper->vc);
++	__get_user(origin, &wrapper->origin);
++	__get_user(data, &wrapper->data);
++
++	if (!vc_cons_allocated(vc_num))
++		return -EINVAL;
++
++	vc = vc_cons[vc_num].d;
++
++	switch (cmd) {
++	case FBIOCONDECOR_SETPIC:
++	{
++		struct fb_image img;
++		if (copy_from_user(&img, (struct fb_image __user *)data, sizeof(struct fb_image)))
++			return -EFAULT;
++
++		return fbcon_decor_ioctl_dosetpic(vc, &img, origin);
++	}
++	case FBIOCONDECOR_SETCFG:
++	{
++		struct vc_decor cfg;
++		if (copy_from_user(&cfg, (struct vc_decor __user *)data, sizeof(struct vc_decor)))
++			return -EFAULT;
++
++		return fbcon_decor_ioctl_dosetcfg(vc, &cfg, origin);
++	}
++	case FBIOCONDECOR_GETCFG:
++	{
++		int rval;
++		struct vc_decor cfg;
++
++		if (copy_from_user(&cfg, (struct vc_decor __user *)data, sizeof(struct vc_decor)))
++			return -EFAULT;
++
++		rval = fbcon_decor_ioctl_dogetcfg(vc, &cfg);
++
++		if (copy_to_user(data, &cfg, sizeof(struct vc_decor)))
++			return -EFAULT;
++		return rval;
++	}
++	case FBIOCONDECOR_SETSTATE:
++	{
++		unsigned int state = 0;
++		if (get_user(state, (unsigned int __user *)data))
++			return -EFAULT;
++		return fbcon_decor_ioctl_dosetstate(vc, state, origin);
++	}
++	case FBIOCONDECOR_GETSTATE:
++	{
++		unsigned int state = 0;
++		fbcon_decor_ioctl_dogetstate(vc, &state);
++		return put_user(state, (unsigned int __user *)data);
++	}
++
++	default:
++		return -ENOIOCTLCMD;
++	}
++}
++
++#ifdef CONFIG_COMPAT
++
++static long fbcon_decor_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) {
++
++	struct fbcon_decor_iowrapper32 __user *wrapper = (void __user *)arg;
++	struct vc_data *vc = NULL;
++	unsigned short vc_num = 0;
++	unsigned char origin = 0;
++	compat_uptr_t data_compat = 0;
++	void __user *data = NULL;
++
++	if (!access_ok(VERIFY_READ, wrapper,
++                       sizeof(struct fbcon_decor_iowrapper32)))
++		return -EFAULT;
++
++	__get_user(vc_num, &wrapper->vc);
++	__get_user(origin, &wrapper->origin);
++	__get_user(data_compat, &wrapper->data);
++	data = compat_ptr(data_compat);
++
++	if (!vc_cons_allocated(vc_num))
++		return -EINVAL;
++
++	vc = vc_cons[vc_num].d;
++
++	switch (cmd) {
++	case FBIOCONDECOR_SETPIC32:
++	{
++		struct fb_image32 img_compat;
++		struct fb_image img;
++
++		if (copy_from_user(&img_compat, (struct fb_image32 __user *)data, sizeof(struct fb_image32)))
++			return -EFAULT;
++
++		fb_image_from_compat(img, img_compat);
++
++		return fbcon_decor_ioctl_dosetpic(vc, &img, origin);
++	}
++
++	case FBIOCONDECOR_SETCFG32:
++	{
++		struct vc_decor32 cfg_compat;
++		struct vc_decor cfg;
++
++		if (copy_from_user(&cfg_compat, (struct vc_decor32 __user *)data, sizeof(struct vc_decor32)))
++			return -EFAULT;
++
++		vc_decor_from_compat(cfg, cfg_compat);
++
++		return fbcon_decor_ioctl_dosetcfg(vc, &cfg, origin);
++	}
++
++	case FBIOCONDECOR_GETCFG32:
++	{
++		int rval;
++		struct vc_decor32 cfg_compat;
++		struct vc_decor cfg;
++
++		if (copy_from_user(&cfg_compat, (struct vc_decor32 __user *)data, sizeof(struct vc_decor32)))
++			return -EFAULT;
++		cfg.theme = compat_ptr(cfg_compat.theme);
++
++		rval = fbcon_decor_ioctl_dogetcfg(vc, &cfg);
++
++		vc_decor_to_compat(cfg_compat, cfg);
++
++		if (copy_to_user((struct vc_decor32 __user *)data, &cfg_compat, sizeof(struct vc_decor32)))
++			return -EFAULT;
++		return rval;
++	}
++
++	case FBIOCONDECOR_SETSTATE32:
++	{
++		compat_uint_t state_compat = 0;
++		unsigned int state = 0;
++
++		if (get_user(state_compat, (compat_uint_t __user *)data))
++			return -EFAULT;
++
++		state = (unsigned int)state_compat;
++
++		return fbcon_decor_ioctl_dosetstate(vc, state, origin);
++	}
++
++	case FBIOCONDECOR_GETSTATE32:
++	{
++		compat_uint_t state_compat = 0;
++		unsigned int state = 0;
++
++		fbcon_decor_ioctl_dogetstate(vc, &state);
++		state_compat = (compat_uint_t)state;
++
++		return put_user(state_compat, (compat_uint_t __user *)data);
++	}
++
++	default:
++		return -ENOIOCTLCMD;
++	}
++}
++#else
++  #define fbcon_decor_compat_ioctl NULL
++#endif
++
++static struct file_operations fbcon_decor_ops = {
++	.owner = THIS_MODULE,
++	.unlocked_ioctl = fbcon_decor_ioctl,
++	.compat_ioctl = fbcon_decor_compat_ioctl
++};
++
++static struct miscdevice fbcon_decor_dev = {
++	.minor = MISC_DYNAMIC_MINOR,
++	.name = "fbcondecor",
++	.fops = &fbcon_decor_ops
++};
++
++void fbcon_decor_reset(void)
++{
++	int i;
++
++	for (i = 0; i < num_registered_fb; i++) {
++		registered_fb[i]->bgdecor.data = NULL;
++		registered_fb[i]->bgdecor.cmap.red = NULL;
++	}
++
++	for (i = 0; i < MAX_NR_CONSOLES && vc_cons[i].d; i++) {
++		vc_cons[i].d->vc_decor.state = vc_cons[i].d->vc_decor.twidth =
++						vc_cons[i].d->vc_decor.theight = 0;
++		vc_cons[i].d->vc_decor.theme = NULL;
++	}
++
++	return;
++}
++
++int fbcon_decor_init(void)
++{
++	int i;
++
++	fbcon_decor_reset();
++
++	if (initialized)
++		return 0;
++
++	i = misc_register(&fbcon_decor_dev);
++	if (i) {
++		printk(KERN_ERR "fbcondecor: failed to register device\n");
++		return i;
++	}
++
++	fbcon_decor_call_helper("init", 0);
++	initialized = 1;
++	return 0;
++}
++
++int fbcon_decor_exit(void)
++{
++	fbcon_decor_reset();
++	return 0;
++}
++
++EXPORT_SYMBOL(fbcon_decor_path);
+diff --git a/drivers/video/console/fbcondecor.h b/drivers/video/console/fbcondecor.h
+new file mode 100644
+index 0000000..3b3724b
+--- /dev/null
++++ b/drivers/video/console/fbcondecor.h
+@@ -0,0 +1,78 @@
++/* 
++ *  linux/drivers/video/console/fbcondecor.h -- Framebuffer Console Decoration headers
++ *
++ *  Copyright (C) 2004 Michal Januszewski <michalj+fbcondecor@gmail.com>
++ *
++ */
++
++#ifndef __FBCON_DECOR_H
++#define __FBCON_DECOR_H
++
++#ifndef _LINUX_FB_H
++#include <linux/fb.h>
++#endif
++
++/* This is needed for vc_cons in fbcmap.c */
++#include <linux/vt_kern.h>
++
++struct fb_cursor;
++struct fb_info;
++struct vc_data;
++
++#ifdef CONFIG_FB_CON_DECOR
++/* fbcondecor.c */
++int fbcon_decor_init(void);
++int fbcon_decor_exit(void);
++int fbcon_decor_call_helper(char* cmd, unsigned short cons);
++int fbcon_decor_disable(struct vc_data *vc, unsigned char redraw);
++
++/* cfbcondecor.c */
++void fbcon_decor_putcs(struct vc_data *vc, struct fb_info *info, const unsigned short *s, int count, int yy, int xx);
++void fbcon_decor_cursor(struct fb_info *info, struct fb_cursor *cursor);
++void fbcon_decor_clear(struct vc_data *vc, struct fb_info *info, int sy, int sx, int height, int width);
++void fbcon_decor_clear_margins(struct vc_data *vc, struct fb_info *info, int bottom_only);
++void fbcon_decor_blank(struct vc_data *vc, struct fb_info *info, int blank);
++void fbcon_decor_bmove_redraw(struct vc_data *vc, struct fb_info *info, int y, int sx, int dx, int width);
++void fbcon_decor_copy(u8 *dst, u8 *src, int height, int width, int linebytes, int srclinesbytes, int bpp);
++void fbcon_decor_fix_pseudo_pal(struct fb_info *info, struct vc_data *vc);
++
++/* vt.c */
++void acquire_console_sem(void);
++void release_console_sem(void);
++void do_unblank_screen(int entering_gfx);
++
++/* struct vc_data *y */
++#define fbcon_decor_active_vc(y) (y->vc_decor.state && y->vc_decor.theme) 
++
++/* struct fb_info *x, struct vc_data *y */
++#define fbcon_decor_active_nores(x,y) (x->bgdecor.data && fbcon_decor_active_vc(y))
++
++/* struct fb_info *x, struct vc_data *y */
++#define fbcon_decor_active(x,y) (fbcon_decor_active_nores(x,y) &&		\
++			      x->bgdecor.width == x->var.xres && 	\
++			      x->bgdecor.height == x->var.yres &&	\
++			      x->bgdecor.depth == x->var.bits_per_pixel)
++
++
++#else /* CONFIG_FB_CON_DECOR */
++
++static inline void fbcon_decor_putcs(struct vc_data *vc, struct fb_info *info, const unsigned short *s, int count, int yy, int xx) {}
++static inline void fbcon_decor_putc(struct vc_data *vc, struct fb_info *info, int c, int ypos, int xpos) {}
++static inline void fbcon_decor_cursor(struct fb_info *info, struct fb_cursor *cursor) {}
++static inline void fbcon_decor_clear(struct vc_data *vc, struct fb_info *info, int sy, int sx, int height, int width) {}
++static inline void fbcon_decor_clear_margins(struct vc_data *vc, struct fb_info *info, int bottom_only) {}
++static inline void fbcon_decor_blank(struct vc_data *vc, struct fb_info *info, int blank) {}
++static inline void fbcon_decor_bmove_redraw(struct vc_data *vc, struct fb_info *info, int y, int sx, int dx, int width) {}
++static inline void fbcon_decor_fix_pseudo_pal(struct fb_info *info, struct vc_data *vc) {}
++static inline int fbcon_decor_call_helper(char* cmd, unsigned short cons) { return 0; }
++static inline int fbcon_decor_init(void) { return 0; }
++static inline int fbcon_decor_exit(void) { return 0; }
++static inline int fbcon_decor_disable(struct vc_data *vc, unsigned char redraw) { return 0; }
++
++#define fbcon_decor_active_vc(y) (0)
++#define fbcon_decor_active_nores(x,y) (0)
++#define fbcon_decor_active(x,y) (0)
++
++#endif /* CONFIG_FB_CON_DECOR */
++
++#endif /* __FBCON_DECOR_H */
+diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
+index e1f4727..2952e33 100644
+--- a/drivers/video/fbdev/Kconfig
++++ b/drivers/video/fbdev/Kconfig
+@@ -1204,7 +1204,6 @@ config FB_MATROX
+ 	select FB_CFB_FILLRECT
+ 	select FB_CFB_COPYAREA
+ 	select FB_CFB_IMAGEBLIT
+-	select FB_TILEBLITTING
+ 	select FB_MACMODES if PPC_PMAC
+ 	---help---
+ 	  Say Y here if you have a Matrox Millennium, Matrox Millennium II,
+diff --git a/drivers/video/fbdev/core/fbcmap.c b/drivers/video/fbdev/core/fbcmap.c
+index f89245b..05e036c 100644
+--- a/drivers/video/fbdev/core/fbcmap.c
++++ b/drivers/video/fbdev/core/fbcmap.c
+@@ -17,6 +17,8 @@
+ #include <linux/slab.h>
+ #include <linux/uaccess.h>
+ 
++#include "../../console/fbcondecor.h"
++
+ static u16 red2[] __read_mostly = {
+     0x0000, 0xaaaa
+ };
+@@ -249,14 +251,17 @@ int fb_set_cmap(struct fb_cmap *cmap, struct fb_info *info)
+ 			if (transp)
+ 				htransp = *transp++;
+ 			if (info->fbops->fb_setcolreg(start++,
+-						      hred, hgreen, hblue,
++						      hred, hgreen, hblue, 
+ 						      htransp, info))
+ 				break;
+ 		}
+ 	}
+-	if (rc == 0)
++	if (rc == 0) {
+ 		fb_copy_cmap(cmap, &info->cmap);
+-
++		if (fbcon_decor_active(info, vc_cons[fg_console].d) &&
++		    info->fix.visual == FB_VISUAL_DIRECTCOLOR)
++			fbcon_decor_fix_pseudo_pal(info, vc_cons[fg_console].d);
++	}
+ 	return rc;
+ }
+ 
+diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
+index b6d5008..d6703f2 100644
+--- a/drivers/video/fbdev/core/fbmem.c
++++ b/drivers/video/fbdev/core/fbmem.c
+@@ -1250,15 +1250,6 @@ struct fb_fix_screeninfo32 {
+ 	u16			reserved[3];
+ };
+ 
+-struct fb_cmap32 {
+-	u32			start;
+-	u32			len;
+-	compat_caddr_t	red;
+-	compat_caddr_t	green;
+-	compat_caddr_t	blue;
+-	compat_caddr_t	transp;
+-};
+-
+ static int fb_getput_cmap(struct fb_info *info, unsigned int cmd,
+ 			  unsigned long arg)
+ {
+diff --git a/include/linux/console_decor.h b/include/linux/console_decor.h
+new file mode 100644
+index 0000000..04b8d80
+--- /dev/null
++++ b/include/linux/console_decor.h
+@@ -0,0 +1,46 @@
++#ifndef _LINUX_CONSOLE_DECOR_H_
++#define _LINUX_CONSOLE_DECOR_H_ 1
++
++/* A structure used by the framebuffer console decorations (drivers/video/console/fbcondecor.c) */
++struct vc_decor {
++	__u8 bg_color;				/* The color that is to be treated as transparent */
++	__u8 state;				/* Current decor state: 0 = off, 1 = on */
++	__u16 tx, ty;				/* Top left corner coordinates of the text field */
++	__u16 twidth, theight;			/* Width and height of the text field */
++	char* theme;
++};
++
++#ifdef __KERNEL__
++#ifdef CONFIG_COMPAT
++#include <linux/compat.h>
++
++struct vc_decor32 {
++	__u8 bg_color;				/* The color that is to be treated as transparent */
++	__u8 state;				/* Current decor state: 0 = off, 1 = on */
++	__u16 tx, ty;				/* Top left corner coordinates of the text field */
++	__u16 twidth, theight;			/* Width and height of the text field */
++	compat_uptr_t theme;
++};
++
++#define vc_decor_from_compat(to, from) \
++	(to).bg_color = (from).bg_color; \
++	(to).state    = (from).state; \
++	(to).tx       = (from).tx; \
++	(to).ty       = (from).ty; \
++	(to).twidth   = (from).twidth; \
++	(to).theight  = (from).theight; \
++	(to).theme    = compat_ptr((from).theme)
++
++#define vc_decor_to_compat(to, from) \
++	(to).bg_color = (from).bg_color; \
++	(to).state    = (from).state; \
++	(to).tx       = (from).tx; \
++	(to).ty       = (from).ty; \
++	(to).twidth   = (from).twidth; \
++	(to).theight  = (from).theight; \
++	(to).theme    = ptr_to_compat((from).theme)
++
++#endif /* CONFIG_COMPAT */
++#endif /* __KERNEL__ */
++
++#endif
+diff --git a/include/linux/console_struct.h b/include/linux/console_struct.h
+index 7f0c329..98f5d60 100644
+--- a/include/linux/console_struct.h
++++ b/include/linux/console_struct.h
+@@ -19,6 +19,7 @@
+ struct vt_struct;
+ 
+ #define NPAR 16
++#include <linux/console_decor.h>
+ 
+ struct vc_data {
+ 	struct tty_port port;			/* Upper level data */
+@@ -107,6 +108,8 @@ struct vc_data {
+ 	unsigned long	vc_uni_pagedir;
+ 	unsigned long	*vc_uni_pagedir_loc;  /* [!] Location of uni_pagedir variable for this console */
+ 	bool vc_panic_force_write; /* when oops/panic this VC can accept forced output/blanking */
++
++	struct vc_decor vc_decor;
+ 	/* additional information is in vt_kern.h */
+ };
+ 
+diff --git a/include/linux/fb.h b/include/linux/fb.h
+index fe6ac95..1e36b03 100644
+--- a/include/linux/fb.h
++++ b/include/linux/fb.h
+@@ -219,6 +219,34 @@ struct fb_deferred_io {
+ };
+ #endif
+ 
++#ifdef __KERNEL__
++#ifdef CONFIG_COMPAT
++struct fb_image32 {
++	__u32 dx;			/* Where to place image */
++	__u32 dy;
++	__u32 width;			/* Size of image */
++	__u32 height;
++	__u32 fg_color;			/* Only used when a mono bitmap */
++	__u32 bg_color;
++	__u8  depth;			/* Depth of the image */
++	const compat_uptr_t data;	/* Pointer to image data */
++	struct fb_cmap32 cmap;		/* color map info */
++};
++
++#define fb_image_from_compat(to, from) \
++	(to).dx       = (from).dx; \
++	(to).dy       = (from).dy; \
++	(to).width    = (from).width; \
++	(to).height   = (from).height; \
++	(to).fg_color = (from).fg_color; \
++	(to).bg_color = (from).bg_color; \
++	(to).depth    = (from).depth; \
++	(to).data     = compat_ptr((from).data); \
++	fb_cmap_from_compat((to).cmap, (from).cmap)
++
++#endif /* CONFIG_COMPAT */
++#endif /* __KERNEL__ */
++
+ /*
+  * Frame buffer operations
+  *
+@@ -489,6 +517,9 @@ struct fb_info {
+ #define FBINFO_STATE_SUSPENDED	1
+ 	u32 state;			/* Hardware state i.e suspend */
+ 	void *fbcon_par;                /* fbcon use-only private area */
++
++	struct fb_image bgdecor;
++
+ 	/* From here on everything is device dependent */
+ 	void *par;
+ 	/* we need the PCI or similar aperture base/size not
+diff --git a/include/uapi/linux/fb.h b/include/uapi/linux/fb.h
+index fb795c3..dc77a03 100644
+--- a/include/uapi/linux/fb.h
++++ b/include/uapi/linux/fb.h
+@@ -8,6 +8,25 @@
+ 
+ #define FB_MAX			32	/* sufficient for now */
+ 
++struct fbcon_decor_iowrapper
++{
++	unsigned short vc;		/* Virtual console */
++	unsigned char origin;		/* Point of origin of the request */
++	void *data;
++};
++
++#ifdef __KERNEL__
++#ifdef CONFIG_COMPAT
++#include <linux/compat.h>
++struct fbcon_decor_iowrapper32
++{
++	unsigned short vc;		/* Virtual console */
++	unsigned char origin;		/* Point of origin of the request */
++	compat_uptr_t data;
++};
++#endif /* CONFIG_COMPAT */
++#endif /* __KERNEL__ */
++
+ /* ioctls
+    0x46 is 'F'								*/
+ #define FBIOGET_VSCREENINFO	0x4600
+@@ -35,6 +54,25 @@
+ #define FBIOGET_DISPINFO        0x4618
+ #define FBIO_WAITFORVSYNC	_IOW('F', 0x20, __u32)
+ 
++#define FBIOCONDECOR_SETCFG	_IOWR('F', 0x19, struct fbcon_decor_iowrapper)
++#define FBIOCONDECOR_GETCFG	_IOR('F', 0x1A, struct fbcon_decor_iowrapper)
++#define FBIOCONDECOR_SETSTATE	_IOWR('F', 0x1B, struct fbcon_decor_iowrapper)
++#define FBIOCONDECOR_GETSTATE	_IOR('F', 0x1C, struct fbcon_decor_iowrapper)
++#define FBIOCONDECOR_SETPIC 	_IOWR('F', 0x1D, struct fbcon_decor_iowrapper)
++#ifdef __KERNEL__
++#ifdef CONFIG_COMPAT
++#define FBIOCONDECOR_SETCFG32	_IOWR('F', 0x19, struct fbcon_decor_iowrapper32)
++#define FBIOCONDECOR_GETCFG32	_IOR('F', 0x1A, struct fbcon_decor_iowrapper32)
++#define FBIOCONDECOR_SETSTATE32	_IOWR('F', 0x1B, struct fbcon_decor_iowrapper32)
++#define FBIOCONDECOR_GETSTATE32	_IOR('F', 0x1C, struct fbcon_decor_iowrapper32)
++#define FBIOCONDECOR_SETPIC32	_IOWR('F', 0x1D, struct fbcon_decor_iowrapper32)
++#endif /* CONFIG_COMPAT */
++#endif /* __KERNEL__ */
++
++#define FBCON_DECOR_THEME_LEN		128	/* Maximum lenght of a theme name */
++#define FBCON_DECOR_IO_ORIG_KERNEL	0	/* Kernel ioctl origin */
++#define FBCON_DECOR_IO_ORIG_USER	1	/* User ioctl origin */
++ 
+ #define FB_TYPE_PACKED_PIXELS		0	/* Packed Pixels	*/
+ #define FB_TYPE_PLANES			1	/* Non interleaved planes */
+ #define FB_TYPE_INTERLEAVED_PLANES	2	/* Interleaved planes	*/
+@@ -277,6 +315,29 @@ struct fb_var_screeninfo {
+ 	__u32 reserved[4];		/* Reserved for future compatibility */
+ };
+ 
++#ifdef __KERNEL__
++#ifdef CONFIG_COMPAT
++struct fb_cmap32 {
++	__u32 start;
++	__u32 len;			/* Number of entries */
++	compat_uptr_t red;		/* Red values	*/
++	compat_uptr_t green;
++	compat_uptr_t blue;
++	compat_uptr_t transp;		/* transparency, can be NULL */
++};
++
++#define fb_cmap_from_compat(to, from) \
++	(to).start  = (from).start; \
++	(to).len    = (from).len; \
++	(to).red    = compat_ptr((from).red); \
++	(to).green  = compat_ptr((from).green); \
++	(to).blue   = compat_ptr((from).blue); \
++	(to).transp = compat_ptr((from).transp)
++
++#endif /* CONFIG_COMPAT */
++#endif /* __KERNEL__ */
++
++
+ struct fb_cmap {
+ 	__u32 start;			/* First entry	*/
+ 	__u32 len;			/* Number of entries */
+diff --git a/kernel/sysctl.c b/kernel/sysctl.c
+index 74f5b58..6386ab0 100644
+--- a/kernel/sysctl.c
++++ b/kernel/sysctl.c
+@@ -146,6 +146,10 @@ static const int cap_last_cap = CAP_LAST_CAP;
+ static unsigned long hung_task_timeout_max = (LONG_MAX/HZ);
+ #endif
+ 
++#ifdef CONFIG_FB_CON_DECOR
++extern char fbcon_decor_path[];
++#endif
++
+ #ifdef CONFIG_INOTIFY_USER
+ #include <linux/inotify.h>
+ #endif
+@@ -255,6 +259,15 @@ static struct ctl_table sysctl_base_table[] = {
+ 		.mode		= 0555,
+ 		.child		= dev_table,
+ 	},
++#ifdef CONFIG_FB_CON_DECOR
++	{
++		.procname	= "fbcondecor",
++		.data		= &fbcon_decor_path,
++		.maxlen		= KMOD_PATH_LEN,
++		.mode		= 0644,
++		.proc_handler	= &proc_dostring,
++	},
++#endif
+ 	{ }
+ };
+ 

diff --git a/5000_enable-additional-cpu-optimizations-for-gcc.patch b/5000_enable-additional-cpu-optimizations-for-gcc.patch
new file mode 100644
index 0000000..f7ab6f0
--- /dev/null
+++ b/5000_enable-additional-cpu-optimizations-for-gcc.patch
@@ -0,0 +1,327 @@
+This patch has been tested on and known to work with kernel versions from 3.2
+up to the latest git version (pulled on 12/14/2013).
+
+This patch will expand the number of microarchitectures to include new
+processors including: AMD K10-family, AMD Family 10h (Barcelona), AMD Family
+14h (Bobcat), AMD Family 15h (Bulldozer), AMD Family 15h (Piledriver), AMD
+Family 16h (Jaguar), Intel 1st Gen Core i3/i5/i7 (Nehalem), Intel 2nd Gen Core
+i3/i5/i7 (Sandybridge), Intel 3rd Gen Core i3/i5/i7 (Ivybridge), and Intel 4th
+Gen Core i3/i5/i7 (Haswell). It also offers the compiler the 'native' flag.
+
+Small but real speed increases are measurable using a make endpoint comparing
+a generic kernel to one built with one of the respective microarchs.
+
+See the following experimental evidence supporting this statement:
+https://github.com/graysky2/kernel_gcc_patch
+
+REQUIREMENTS
+linux version >=3.15
+gcc version <4.9
+
+---
+diff -uprN a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
+--- a/arch/x86/include/asm/module.h	2013-11-03 18:41:51.000000000 -0500
++++ b/arch/x86/include/asm/module.h	2013-12-15 06:21:24.351122516 -0500
+@@ -15,6 +15,16 @@
+ #define MODULE_PROC_FAMILY "586MMX "
+ #elif defined CONFIG_MCORE2
+ #define MODULE_PROC_FAMILY "CORE2 "
++#elif defined CONFIG_MNATIVE
++#define MODULE_PROC_FAMILY "NATIVE "
++#elif defined CONFIG_MCOREI7
++#define MODULE_PROC_FAMILY "COREI7 "
++#elif defined CONFIG_MCOREI7AVX
++#define MODULE_PROC_FAMILY "COREI7AVX "
++#elif defined CONFIG_MCOREAVXI
++#define MODULE_PROC_FAMILY "COREAVXI "
++#elif defined CONFIG_MCOREAVX2
++#define MODULE_PROC_FAMILY "COREAVX2 "
+ #elif defined CONFIG_MATOM
+ #define MODULE_PROC_FAMILY "ATOM "
+ #elif defined CONFIG_M686
+@@ -33,6 +43,18 @@
+ #define MODULE_PROC_FAMILY "K7 "
+ #elif defined CONFIG_MK8
+ #define MODULE_PROC_FAMILY "K8 "
++#elif defined CONFIG_MK10
++#define MODULE_PROC_FAMILY "K10 "
++#elif defined CONFIG_MBARCELONA
++#define MODULE_PROC_FAMILY "BARCELONA "
++#elif defined CONFIG_MBOBCAT
++#define MODULE_PROC_FAMILY "BOBCAT "
++#elif defined CONFIG_MBULLDOZER
++#define MODULE_PROC_FAMILY "BULLDOZER "
++#elif defined CONFIG_MPILEDRIVER
++#define MODULE_PROC_FAMILY "PILEDRIVER "
++#elif defined CONFIG_MJAGUAR
++#define MODULE_PROC_FAMILY "JAGUAR "
+ #elif defined CONFIG_MELAN
+ #define MODULE_PROC_FAMILY "ELAN "
+ #elif defined CONFIG_MCRUSOE
+diff -uprN a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
+--- a/arch/x86/Kconfig.cpu	2013-11-03 18:41:51.000000000 -0500
++++ b/arch/x86/Kconfig.cpu	2013-12-15 06:21:24.351122516 -0500
+@@ -139,7 +139,7 @@ config MPENTIUM4
+ 
+ 
+ config MK6
+-	bool "K6/K6-II/K6-III"
++	bool "AMD K6/K6-II/K6-III"
+ 	depends on X86_32
+ 	---help---
+ 	  Select this for an AMD K6-family processor.  Enables use of
+@@ -147,7 +147,7 @@ config MK6
+ 	  flags to GCC.
+ 
+ config MK7
+-	bool "Athlon/Duron/K7"
++	bool "AMD Athlon/Duron/K7"
+ 	depends on X86_32
+ 	---help---
+ 	  Select this for an AMD Athlon K7-family processor.  Enables use of
+@@ -155,12 +155,55 @@ config MK7
+ 	  flags to GCC.
+ 
+ config MK8
+-	bool "Opteron/Athlon64/Hammer/K8"
++	bool "AMD Opteron/Athlon64/Hammer/K8"
+ 	---help---
+ 	  Select this for an AMD Opteron or Athlon64 Hammer-family processor.
+ 	  Enables use of some extended instructions, and passes appropriate
+ 	  optimization flags to GCC.
+ 
++config MK10
++	bool "AMD 61xx/7x50/PhenomX3/X4/II/K10"
++	---help---
++	  Select this for an AMD 61xx Eight-Core Magny-Cours, Athlon X2 7x50,
++		Phenom X3/X4/II, Athlon II X2/X3/X4, or Turion II-family processor.
++	  Enables use of some extended instructions, and passes appropriate
++	  optimization flags to GCC.
++
++config MBARCELONA
++	bool "AMD Barcelona"
++	---help---
++	  Select this for AMD Barcelona and newer processors.
++
++	  Enables -march=barcelona
++
++config MBOBCAT
++	bool "AMD Bobcat"
++	---help---
++	  Select this for AMD Bobcat processors.
++
++	  Enables -march=btver1
++
++config MBULLDOZER
++	bool "AMD Bulldozer"
++	---help---
++	  Select this for AMD Bulldozer processors.
++
++	  Enables -march=bdver1
++
++config MPILEDRIVER
++	bool "AMD Piledriver"
++	---help---
++	  Select this for AMD Piledriver processors.
++
++	  Enables -march=bdver2
++
++config MJAGUAR
++	bool "AMD Jaguar"
++	---help---
++	  Select this for AMD Jaguar processors.
++
++	  Enables -march=btver2
++
+ config MCRUSOE
+ 	bool "Crusoe"
+ 	depends on X86_32
+@@ -251,8 +294,17 @@ config MPSC
+ 	  using the cpu family field
+ 	  in /proc/cpuinfo. Family 15 is an older Xeon, Family 6 a newer one.
+ 
++config MATOM
++	bool "Intel Atom"
++	---help---
++
++	  Select this for the Intel Atom platform. Intel Atom CPUs have an
++	  in-order pipelining architecture and thus can benefit from
++	  accordingly optimized code. Use a recent GCC with specific Atom
++	  support in order to fully benefit from selecting this option.
++
+ config MCORE2
+-	bool "Core 2/newer Xeon"
++	bool "Intel Core 2"
+ 	---help---
+ 
+ 	  Select this for Intel Core 2 and newer Core 2 Xeons (Xeon 51xx and
+@@ -260,14 +312,40 @@ config MCORE2
+ 	  family in /proc/cpuinfo. Newer ones have 6 and older ones 15
+ 	  (not a typo)
+ 
+-config MATOM
+-	bool "Intel Atom"
++	  Enables -march=core2
++
++config MCOREI7
++	bool "Intel Core i7"
+ 	---help---
+ 
+-	  Select this for the Intel Atom platform. Intel Atom CPUs have an
+-	  in-order pipelining architecture and thus can benefit from
+-	  accordingly optimized code. Use a recent GCC with specific Atom
+-	  support in order to fully benefit from selecting this option.
++	  Select this for the Intel Nehalem platform. Intel Nehalem proecessors
++	  include Core i3, i5, i7, Xeon: 34xx, 35xx, 55xx, 56xx, 75xx processors.
++
++	  Enables -march=corei7
++
++config MCOREI7AVX
++	bool "Intel Core 2nd Gen AVX"
++	---help---
++
++	  Select this for 2nd Gen Core processors including Sandy Bridge.
++
++	  Enables -march=corei7-avx
++
++config MCOREAVXI
++	bool "Intel Core 3rd Gen AVX"
++	---help---
++
++	  Select this for 3rd Gen Core processors including Ivy Bridge.
++
++	  Enables -march=core-avx-i
++
++config MCOREAVX2
++	bool "Intel Core AVX2"
++	---help---
++
++	  Select this for AVX2 enabled processors including Haswell.
++
++	  Enables -march=core-avx2
+ 
+ config GENERIC_CPU
+ 	bool "Generic-x86-64"
+@@ -276,6 +354,19 @@ config GENERIC_CPU
+ 	  Generic x86-64 CPU.
+ 	  Run equally well on all x86-64 CPUs.
+ 
++config MNATIVE
++ bool "Native optimizations autodetected by GCC"
++ ---help---
++
++   GCC 4.2 and above support -march=native, which automatically detects
++   the optimum settings to use based on your processor. -march=native
++   also detects and applies additional settings beyond -march specific
++   to your CPU, (eg. -msse4). Unless you have a specific reason not to
++   (e.g. distcc cross-compiling), you should probably be using
++   -march=native rather than anything listed below.
++
++   Enables -march=native
++
+ endchoice
+ 
+ config X86_GENERIC
+@@ -300,7 +391,7 @@ config X86_INTERNODE_CACHE_SHIFT
+ config X86_L1_CACHE_SHIFT
+ 	int
+ 	default "7" if MPENTIUM4 || MPSC
+-	default "6" if MK7 || MK8 || MPENTIUMM || MCORE2 || MATOM || MVIAC7 || X86_GENERIC || GENERIC_CPU
++	default "6" if MK7 || MK8 || MK10 || MBARCELONA || MBOBCAT || MBULLDOZER || MPILEDRIVER || MJAGUAR || MPENTIUMM || MCORE2 || MCOREI7 || MCOREI7AVX || MCOREAVXI || MCOREAVX2 || MATOM || MVIAC7 || X86_GENERIC || MNATIVE || GENERIC_CPU
+ 	default "4" if MELAN || M486 || MGEODEGX1
+ 	default "5" if MWINCHIP3D || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2 || MGEODE_LX
+ 
+@@ -331,11 +422,11 @@ config X86_ALIGNMENT_16
+ 
+ config X86_INTEL_USERCOPY
+ 	def_bool y
+-	depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M586MMX || X86_GENERIC || MK8 || MK7 || MEFFICEON || MCORE2
++	depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M586MMX || MNATIVE || X86_GENERIC || MK8 || MK7 || MK10 || MBARCELONA || MEFFICEON || MCORE2 || MCOREI7 || MCOREI7AVX || MCOREAVXI || MCOREAVX2
+ 
+ config X86_USE_PPRO_CHECKSUM
+ 	def_bool y
+-	depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MVIAC7 || MEFFICEON || MGEODE_LX || MCORE2 || MATOM
++	depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MK10 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MVIAC7 || MEFFICEON || MGEODE_LX || MCORE2 || MCOREI7 || MCOREI7AVX || MCOREAVXI || MCOREAVX2 || MATOM || MNATIVE
+ 
+ config X86_USE_3DNOW
+ 	def_bool y
+@@ -363,17 +454,17 @@ config X86_P6_NOP
+ 
+ config X86_TSC
+ 	def_bool y
+-	depends on (MWINCHIP3D || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MVIAC7 || MGEODEGX1 || MGEODE_LX || MCORE2 || MATOM) || X86_64
++	depends on (MWINCHIP3D || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MK10 || MBARCELONA || MBOBCAT || MBULLDOZER || MPILEDRIVER || MJAGUAR || MVIAC3_2 || MVIAC7 || MGEODEGX1 || MGEODE_LX || MCORE2 || MCOREI7 || MCOREI7-AVX || MATOM) || X86_64 || MNATIVE
+ 
+ config X86_CMPXCHG64
+ 	def_bool y
+-	depends on X86_PAE || X86_64 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MATOM
++	depends on X86_PAE || X86_64 || MCORE2 || MCOREI7 || MCOREI7AVX || MCOREAVXI || MCOREAVX2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MATOM || MNATIVE
+ 
+ # this should be set for all -march=.. options where the compiler
+ # generates cmov.
+ config X86_CMOV
+ 	def_bool y
+-	depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
++	depends on (MK8 || MK10 || MBARCELONA || MBOBCAT || MBULLDOZER || MPILEDRIVER || MJAGUAR || MK7 || MCORE2 || MCOREI7 || MCOREI7AVX || MCOREAVXI || MCOREAVX2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MNATIVE || MATOM || MGEODE_LX)
+ 
+ config X86_MINIMUM_CPU_FAMILY
+ 	int
+diff -uprN a/arch/x86/Makefile b/arch/x86/Makefile
+--- a/arch/x86/Makefile	2013-11-03 18:41:51.000000000 -0500
++++ b/arch/x86/Makefile	2013-12-15 06:21:24.354455723 -0500
+@@ -61,11 +61,26 @@ else
+ 	KBUILD_CFLAGS += $(call cc-option,-mno-sse -mpreferred-stack-boundary=3)
+ 
+         # FIXME - should be integrated in Makefile.cpu (Makefile_32.cpu)
++        cflags-$(CONFIG_MNATIVE) += $(call cc-option,-march=native)
+         cflags-$(CONFIG_MK8) += $(call cc-option,-march=k8)
++        cflags-$(CONFIG_MK10) += $(call cc-option,-march=amdfam10)
++        cflags-$(CONFIG_MBARCELONA) += $(call cc-option,-march=barcelona)
++        cflags-$(CONFIG_MBOBCAT) += $(call cc-option,-march=btver1)
++        cflags-$(CONFIG_MBULLDOZER) += $(call cc-option,-march=bdver1)
++        cflags-$(CONFIG_MPILEDRIVER) += $(call cc-option,-march=bdver2)
++        cflags-$(CONFIG_MJAGUAR) += $(call cc-option,-march=btver2)
+         cflags-$(CONFIG_MPSC) += $(call cc-option,-march=nocona)
+ 
+         cflags-$(CONFIG_MCORE2) += \
+-                $(call cc-option,-march=core2,$(call cc-option,-mtune=generic))
++                $(call cc-option,-march=core2,$(call cc-option,-mtune=core2))
++        cflags-$(CONFIG_MCOREI7) += \
++                $(call cc-option,-march=corei7,$(call cc-option,-mtune=corei7))
++        cflags-$(CONFIG_MCOREI7AVX) += \
++                $(call cc-option,-march=corei7-avx,$(call cc-option,-mtune=corei7-avx))
++        cflags-$(CONFIG_MCOREAVXI) += \
++                $(call cc-option,-march=core-avx-i,$(call cc-option,-mtune=core-avx-i))
++        cflags-$(CONFIG_MCOREAVX2) += \
++                $(call cc-option,-march=core-avx2,$(call cc-option,-mtune=core-avx2))
+ 	cflags-$(CONFIG_MATOM) += $(call cc-option,-march=atom) \
+ 		$(call cc-option,-mtune=atom,$(call cc-option,-mtune=generic))
+         cflags-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=generic)
+diff -uprN a/arch/x86/Makefile_32.cpu b/arch/x86/Makefile_32.cpu
+--- a/arch/x86/Makefile_32.cpu	2013-11-03 18:41:51.000000000 -0500
++++ b/arch/x86/Makefile_32.cpu	2013-12-15 06:21:24.354455723 -0500
+@@ -23,7 +23,14 @@ cflags-$(CONFIG_MK6)		+= -march=k6
+ # Please note, that patches that add -march=athlon-xp and friends are pointless.
+ # They make zero difference whatsosever to performance at this time.
+ cflags-$(CONFIG_MK7)		+= -march=athlon
++cflags-$(CONFIG_MNATIVE) += $(call cc-option,-march=native)
+ cflags-$(CONFIG_MK8)		+= $(call cc-option,-march=k8,-march=athlon)
++cflags-$(CONFIG_MK10)	+= $(call cc-option,-march=amdfam10,-march=athlon)
++cflags-$(CONFIG_MBARCELONA)	+= $(call cc-option,-march=barcelona,-march=athlon)
++cflags-$(CONFIG_MBOBCAT)	+= $(call cc-option,-march=btver1,-march=athlon)
++cflags-$(CONFIG_MBULLDOZER)	+= $(call cc-option,-march=bdver1,-march=athlon)
++cflags-$(CONFIG_MPILEDRIVER)	+= $(call cc-option,-march=bdver2,-march=athlon)
++cflags-$(CONFIG_MJAGUAR)	+= $(call cc-option,-march=btver2,-march=athlon)
+ cflags-$(CONFIG_MCRUSOE)	+= -march=i686 $(align)-functions=0 $(align)-jumps=0 $(align)-loops=0
+ cflags-$(CONFIG_MEFFICEON)	+= -march=i686 $(call tune,pentium3) $(align)-functions=0 $(align)-jumps=0 $(align)-loops=0
+ cflags-$(CONFIG_MWINCHIPC6)	+= $(call cc-option,-march=winchip-c6,-march=i586)
+@@ -32,6 +39,10 @@ cflags-$(CONFIG_MCYRIXIII)	+= $(call cc-
+ cflags-$(CONFIG_MVIAC3_2)	+= $(call cc-option,-march=c3-2,-march=i686)
+ cflags-$(CONFIG_MVIAC7)		+= -march=i686
+ cflags-$(CONFIG_MCORE2)		+= -march=i686 $(call tune,core2)
++cflags-$(CONFIG_MCOREI7)	+= -march=i686 $(call tune,corei7)
++cflags-$(CONFIG_MCOREI7AVX)	+= -march=i686 $(call tune,corei7-avx)
++cflags-$(CONFIG_MCOREAVXI)	+= -march=i686 $(call tune,core-avx-i)
++cflags-$(CONFIG_MCOREAVX2)	+= -march=i686 $(call tune,core-avx2)
+ cflags-$(CONFIG_MATOM)		+= $(call cc-option,-march=atom,$(call cc-option,-march=core2,-march=i686)) \
+ 	$(call cc-option,-mtune=atom,$(call cc-option,-mtune=generic))


^ permalink raw reply related	[flat|nested] 19+ messages in thread

* [gentoo-commits] proj/linux-patches:3.19 commit in: /
@ 2015-01-02 19:08 Mike Pagano
  0 siblings, 0 replies; 19+ messages in thread
From: Mike Pagano @ 2015-01-02 19:08 UTC (permalink / raw
  To: gentoo-commits

commit:     b1dbfed9f92d5977f58ffa78c44e7024f6b2a14b
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri Jan  2 19:08:35 2015 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri Jan  2 19:08:35 2015 +0000
URL:        http://sources.gentoo.org/gitweb/?p=proj/linux-patches.git;a=commit;h=b1dbfed9

Add DEVPTS_MULTIPLE_INSTANCES when GENTOO_LINUX_INIT_SYSTEMD is selected. See bug #534216

---
 4567_distro-Gentoo-Kconfig.patch | 7 ++++---
 1 file changed, 4 insertions(+), 3 deletions(-)

diff --git a/4567_distro-Gentoo-Kconfig.patch b/4567_distro-Gentoo-Kconfig.patch
index 652e2a7..690454a 100644
--- a/4567_distro-Gentoo-Kconfig.patch
+++ b/4567_distro-Gentoo-Kconfig.patch
@@ -7,9 +7,9 @@
 +source "distro/Kconfig"
 +
  source "arch/$SRCARCH/Kconfig"
---- 	1969-12-31 19:00:00.000000000 -0500
-+++ b/distro/Kconfig	2014-04-02 09:57:03.539218861 -0400
-@@ -0,0 +1,108 @@
+--- a/distro/Kconfig	1969-12-31 19:00:00.000000000 -0500
++++ b/distro/Kconfig	2015-01-02 13:54:45.589830665 -0500
+@@ -0,0 +1,109 @@
 +menu "Gentoo Linux"
 +
 +config GENTOO_LINUX
@@ -87,6 +87,7 @@
 +	select AUTOFS4_FS
 +	select BLK_DEV_BSG
 +	select CGROUPS
++	select DEVPTS_MULTIPLE_INSTANCES
 +	select EPOLL
 +	select FANOTIFY
 +	select FHANDLE


^ permalink raw reply related	[flat|nested] 19+ messages in thread

* [gentoo-commits] proj/linux-patches:3.19 commit in: /
@ 2015-02-09 11:12 Mike Pagano
  0 siblings, 0 replies; 19+ messages in thread
From: Mike Pagano @ 2015-02-09 11:12 UTC (permalink / raw
  To: gentoo-commits

commit:     660f7e02c537cb3413d947fd8aa57970acf707eb
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Mon Feb  9 11:12:45 2015 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Mon Feb  9 11:12:45 2015 +0000
URL:        http://sources.gentoo.org/gitweb/?p=proj/linux-patches.git;a=commit;h=660f7e02

Remove broken patch for fbcondecor

---
 4200_fbcondecor-3.16.patch | 2119 --------------------------------------------
 1 file changed, 2119 deletions(-)

diff --git a/4200_fbcondecor-3.16.patch b/4200_fbcondecor-3.16.patch
deleted file mode 100644
index c96e5dc..0000000
--- a/4200_fbcondecor-3.16.patch
+++ /dev/null
@@ -1,2119 +0,0 @@
-diff --git a/Documentation/fb/00-INDEX b/Documentation/fb/00-INDEX
-index fe85e7c..2230930 100644
---- a/Documentation/fb/00-INDEX
-+++ b/Documentation/fb/00-INDEX
-@@ -23,6 +23,8 @@ ep93xx-fb.txt
- 	- info on the driver for EP93xx LCD controller.
- fbcon.txt
- 	- intro to and usage guide for the framebuffer console (fbcon).
-+fbcondecor.txt
-+	- info on the Framebuffer Console Decoration
- framebuffer.txt
- 	- introduction to frame buffer devices.
- gxfb.txt
-diff --git a/Documentation/fb/fbcondecor.txt b/Documentation/fb/fbcondecor.txt
-new file mode 100644
-index 0000000..3388c61
---- /dev/null
-+++ b/Documentation/fb/fbcondecor.txt
-@@ -0,0 +1,207 @@
-+What is it?
-+-----------
-+
-+The framebuffer decorations are a kernel feature which allows displaying a 
-+background picture on selected consoles.
-+
-+What do I need to get it to work?
-+---------------------------------
-+
-+To get fbcondecor up-and-running you will have to:
-+ 1) get a copy of splashutils [1] or a similar program
-+ 2) get some fbcondecor themes
-+ 3) build the kernel helper program
-+ 4) build your kernel with the FB_CON_DECOR option enabled.
-+
-+To get fbcondecor operational right after fbcon initialization is finished, you
-+will have to include a theme and the kernel helper into your initramfs image.
-+Please refer to splashutils documentation for instructions on how to do that.
-+
-+[1] The splashutils package can be downloaded from:
-+    http://github.com/alanhaggai/fbsplash
-+
-+The userspace helper
-+--------------------
-+
-+The userspace fbcondecor helper (by default: /sbin/fbcondecor_helper) is called by the
-+kernel whenever an important event occurs and the kernel needs some kind of
-+job to be carried out. Important events include console switches and video
-+mode switches (the kernel requests background images and configuration
-+parameters for the current console). The fbcondecor helper must be accessible at
-+all times. If it's not, fbcondecor will be switched off automatically.
-+
-+It's possible to set path to the fbcondecor helper by writing it to
-+/proc/sys/kernel/fbcondecor.
-+
-+*****************************************************************************
-+
-+The information below is mostly technical stuff. There's probably no need to
-+read it unless you plan to develop a userspace helper.
-+
-+The fbcondecor protocol
-+-----------------------
-+
-+The fbcondecor protocol defines a communication interface between the kernel and
-+the userspace fbcondecor helper.
-+
-+The kernel side is responsible for:
-+
-+ * rendering console text, using an image as a background (instead of a
-+   standard solid color fbcon uses),
-+ * accepting commands from the user via ioctls on the fbcondecor device,
-+ * calling the userspace helper to set things up as soon as the fb subsystem 
-+   is initialized.
-+
-+The userspace helper is responsible for everything else, including parsing
-+configuration files, decompressing the image files whenever the kernel needs
-+it, and communicating with the kernel if necessary.
-+
-+The fbcondecor protocol specifies how communication is done in both ways:
-+kernel->userspace and userspace->helper.
-+  
-+Kernel -> Userspace
-+-------------------
-+
-+The kernel communicates with the userspace helper by calling it and specifying
-+the task to be done in a series of arguments.
-+
-+The arguments follow the pattern:
-+<fbcondecor protocol version> <command> <parameters>
-+
-+All commands defined in fbcondecor protocol v2 have the following parameters:
-+ virtual console
-+ framebuffer number
-+ theme
-+
-+Fbcondecor protocol v1 specified an additional 'fbcondecor mode' after the
-+framebuffer number. Fbcondecor protocol v1 is deprecated and should not be used.
-+
-+Fbcondecor protocol v2 specifies the following commands:
-+
-+getpic
-+------
-+ The kernel issues this command to request image data. It's up to the 
-+ userspace  helper to find a background image appropriate for the specified 
-+ theme and the current resolution. The userspace helper should respond by 
-+ issuing the FBIOCONDECOR_SETPIC ioctl.
-+
-+init
-+----
-+ The kernel issues this command after the fbcondecor device is created and
-+ the fbcondecor interface is initialized. Upon receiving 'init', the userspace
-+ helper should parse the kernel command line (/proc/cmdline) or otherwise
-+ decide whether fbcondecor is to be activated.
-+
-+ To activate fbcondecor on the first console the helper should issue the
-+ FBIOCONDECOR_SETCFG, FBIOCONDECOR_SETPIC and FBIOCONDECOR_SETSTATE commands,
-+ in the above-mentioned order.
-+
-+ When the userspace helper is called in an early phase of the boot process
-+ (right after the initialization of fbcon), no filesystems will be mounted.
-+ The helper program should mount sysfs and then create the appropriate
-+ framebuffer, fbcondecor and tty0 devices (if they don't already exist) to get
-+ current display settings and to be able to communicate with the kernel side.
-+ It should probably also mount the procfs to be able to parse the kernel
-+ command line parameters.
-+
-+ Note that the console sem is not held when the kernel calls fbcondecor_helper
-+ with the 'init' command. The fbcondecor helper should perform all ioctls with
-+ origin set to FBCON_DECOR_IO_ORIG_USER.
-+
-+modechange
-+----------
-+ The kernel issues this command on a mode change. The helper's response should
-+ be similar to the response to the 'init' command. Note that this time the
-+ console sem is held and all ioctls must be performed with origin set to
-+ FBCON_DECOR_IO_ORIG_KERNEL.
-+
-+
-+Userspace -> Kernel
-+-------------------
-+
-+Userspace programs can communicate with fbcondecor via ioctls on the
-+fbcondecor device. These ioctls are to be used by both the userspace helper
-+(called only by the kernel) and userspace configuration tools (run by the users).
-+
-+The fbcondecor helper should set the origin field to FBCON_DECOR_IO_ORIG_KERNEL
-+when doing the appropriate ioctls. All userspace configuration tools should
-+use FBCON_DECOR_IO_ORIG_USER. Failure to set the appropriate value in the origin
-+field when performing ioctls from the kernel helper will most likely result
-+in a console deadlock.
-+
-+FBCON_DECOR_IO_ORIG_KERNEL instructs fbcondecor not to try to acquire the console
-+semaphore. Not surprisingly, FBCON_DECOR_IO_ORIG_USER instructs it to acquire
-+the console sem.
-+
-+The framebuffer console decoration provides the following ioctls (all defined in 
-+linux/fb.h):
-+
-+FBIOCONDECOR_SETPIC
-+description: loads a background picture for a virtual console
-+argument: struct fbcon_decor_iowrapper*; data: struct fb_image*
-+notes: 
-+If called for consoles other than the current foreground one, the picture data
-+will be ignored.
-+
-+If the current virtual console is running in a 8-bpp mode, the cmap substruct
-+of fb_image has to be filled appropriately: start should be set to 16 (first
-+16 colors are reserved for fbcon), len to a value <= 240 and red, green and
-+blue should point to valid cmap data. The transp field is ingored. The fields
-+dx, dy, bg_color, fg_color in fb_image are ignored as well.
-+
-+FBIOCONDECOR_SETCFG
-+description: sets the fbcondecor config for a virtual console
-+argument: struct fbcon_decor_iowrapper*; data: struct vc_decor*
-+notes: The structure has to be filled with valid data.
-+
-+FBIOCONDECOR_GETCFG
-+description: gets the fbcondecor config for a virtual console
-+argument: struct fbcon_decor_iowrapper*; data: struct vc_decor*
-+
-+FBIOCONDECOR_SETSTATE
-+description: sets the fbcondecor state for a virtual console
-+argument: struct fbcon_decor_iowrapper*; data: unsigned int*
-+          values: 0 = disabled, 1 = enabled.
-+
-+FBIOCONDECOR_GETSTATE
-+description: gets the fbcondecor state for a virtual console
-+argument: struct fbcon_decor_iowrapper*; data: unsigned int*
-+          values: as in FBIOCONDECOR_SETSTATE
-+
-+Info on used structures:
-+
-+Definition of struct vc_decor can be found in linux/console_decor.h. It's
-+heavily commented. Note that the 'theme' field should point to a string
-+no longer than FBCON_DECOR_THEME_LEN. When FBIOCONDECOR_GETCFG call is
-+performed, the theme field should point to a char buffer of length
-+FBCON_DECOR_THEME_LEN.
-+
-+Definition of struct fbcon_decor_iowrapper can be found in linux/fb.h.
-+The fields in this struct have the following meaning:
-+
-+vc: 
-+Virtual console number.
-+
-+origin: 
-+Specifies if the ioctl is performed as a response to a kernel request. The
-+fbcondecor helper should set this field to FBCON_DECOR_IO_ORIG_KERNEL, userspace
-+programs should set it to FBCON_DECOR_IO_ORIG_USER. This field is necessary to
-+avoid console semaphore deadlocks.
-+
-+data: 
-+Pointer to a data structure appropriate for the performed ioctl. Type of
-+the data struct is specified in the ioctls description.
-+
-+*****************************************************************************
-+
-+Credit
-+------
-+
-+Original 'bootsplash' project & implementation by:
-+  Volker Poplawski <volker@poplawski.de>, Stefan Reinauer <stepan@suse.de>,
-+  Steffen Winterfeldt <snwint@suse.de>, Michael Schroeder <mls@suse.de>,
-+  Ken Wimer <wimer@suse.de>.
-+
-+Fbcondecor, fbcondecor protocol design, current implementation & docs by:
-+  Michal Januszewski <michalj+fbcondecor@gmail.com>
-+
-diff --git a/drivers/Makefile b/drivers/Makefile
-index 7183b6a..d576148 100644
---- a/drivers/Makefile
-+++ b/drivers/Makefile
-@@ -17,6 +17,10 @@ obj-y				+= pwm/
- obj-$(CONFIG_PCI)		+= pci/
- obj-$(CONFIG_PARISC)		+= parisc/
- obj-$(CONFIG_RAPIDIO)		+= rapidio/
-+# tty/ comes before char/ so that the VT console is the boot-time
-+# default.
-+obj-y				+= tty/
-+obj-y				+= char/
- obj-y				+= video/
- obj-y				+= idle/
- 
-@@ -42,11 +46,6 @@ obj-$(CONFIG_REGULATOR)		+= regulator/
- # reset controllers early, since gpu drivers might rely on them to initialize
- obj-$(CONFIG_RESET_CONTROLLER)	+= reset/
- 
--# tty/ comes before char/ so that the VT console is the boot-time
--# default.
--obj-y				+= tty/
--obj-y				+= char/
--
- # gpu/ comes after char for AGP vs DRM startup
- obj-y				+= gpu/
- 
-diff --git a/drivers/video/console/Kconfig b/drivers/video/console/Kconfig
-index fe1cd01..6d2e87a 100644
---- a/drivers/video/console/Kconfig
-+++ b/drivers/video/console/Kconfig
-@@ -126,6 +126,19 @@ config FRAMEBUFFER_CONSOLE_ROTATION
-          such that other users of the framebuffer will remain normally
-          oriented.
- 
-+config FB_CON_DECOR
-+	bool "Support for the Framebuffer Console Decorations"
-+	depends on FRAMEBUFFER_CONSOLE=y && !FB_TILEBLITTING
-+	default n
-+	---help---
-+	  This option enables support for framebuffer console decorations which
-+	  makes it possible to display images in the background of the system
-+	  consoles.  Note that userspace utilities are necessary in order to take 
-+	  advantage of these features. Refer to Documentation/fb/fbcondecor.txt 
-+	  for more information.
-+
-+	  If unsure, say N.
-+
- config STI_CONSOLE
-         bool "STI text console"
-         depends on PARISC
-diff --git a/drivers/video/console/Makefile b/drivers/video/console/Makefile
-index 43bfa48..cc104b6 100644
---- a/drivers/video/console/Makefile
-+++ b/drivers/video/console/Makefile
-@@ -16,4 +16,5 @@ obj-$(CONFIG_FRAMEBUFFER_CONSOLE)     += fbcon_rotate.o fbcon_cw.o fbcon_ud.o \
-                                          fbcon_ccw.o
- endif
- 
-+obj-$(CONFIG_FB_CON_DECOR)     	  += fbcondecor.o cfbcondecor.o
- obj-$(CONFIG_FB_STI)              += sticore.o
-diff --git a/drivers/video/console/bitblit.c b/drivers/video/console/bitblit.c
-index 61b182b..984384b 100644
---- a/drivers/video/console/bitblit.c
-+++ b/drivers/video/console/bitblit.c
-@@ -18,6 +18,7 @@
- #include <linux/console.h>
- #include <asm/types.h>
- #include "fbcon.h"
-+#include "fbcondecor.h"
- 
- /*
-  * Accelerated handlers.
-@@ -55,6 +56,13 @@ static void bit_bmove(struct vc_data *vc, struct fb_info *info, int sy,
- 	area.height = height * vc->vc_font.height;
- 	area.width = width * vc->vc_font.width;
- 
-+	if (fbcon_decor_active(info, vc)) {
-+ 		area.sx += vc->vc_decor.tx;
-+ 		area.sy += vc->vc_decor.ty;
-+ 		area.dx += vc->vc_decor.tx;
-+ 		area.dy += vc->vc_decor.ty;
-+ 	}
-+
- 	info->fbops->fb_copyarea(info, &area);
- }
- 
-@@ -380,11 +388,15 @@ static void bit_cursor(struct vc_data *vc, struct fb_info *info, int mode,
- 	cursor.image.depth = 1;
- 	cursor.rop = ROP_XOR;
- 
--	if (info->fbops->fb_cursor)
--		err = info->fbops->fb_cursor(info, &cursor);
-+	if (fbcon_decor_active(info, vc)) {
-+		fbcon_decor_cursor(info, &cursor);
-+	} else {
-+		if (info->fbops->fb_cursor)
-+			err = info->fbops->fb_cursor(info, &cursor);
- 
--	if (err)
--		soft_cursor(info, &cursor);
-+		if (err)
-+			soft_cursor(info, &cursor);
-+	}
- 
- 	ops->cursor_reset = 0;
- }
-diff --git a/drivers/video/console/cfbcondecor.c b/drivers/video/console/cfbcondecor.c
-new file mode 100644
-index 0000000..a2b4497
---- /dev/null
-+++ b/drivers/video/console/cfbcondecor.c
-@@ -0,0 +1,471 @@
-+/*
-+ *  linux/drivers/video/cfbcon_decor.c -- Framebuffer decor render functions
-+ *
-+ *  Copyright (C) 2004 Michal Januszewski <michalj+fbcondecor@gmail.com>
-+ *
-+ *  Code based upon "Bootdecor" (C) 2001-2003
-+ *       Volker Poplawski <volker@poplawski.de>,
-+ *       Stefan Reinauer <stepan@suse.de>,
-+ *       Steffen Winterfeldt <snwint@suse.de>,
-+ *       Michael Schroeder <mls@suse.de>,
-+ *       Ken Wimer <wimer@suse.de>.
-+ *
-+ *  This file is subject to the terms and conditions of the GNU General Public
-+ *  License.  See the file COPYING in the main directory of this archive for
-+ *  more details.
-+ */
-+#include <linux/module.h>
-+#include <linux/types.h>
-+#include <linux/fb.h>
-+#include <linux/selection.h>
-+#include <linux/slab.h>
-+#include <linux/vt_kern.h>
-+#include <asm/irq.h>
-+
-+#include "fbcon.h"
-+#include "fbcondecor.h"
-+
-+#define parse_pixel(shift,bpp,type)						\
-+	do {									\
-+		if (d & (0x80 >> (shift)))					\
-+			dd2[(shift)] = fgx;					\
-+		else								\
-+			dd2[(shift)] = transparent ? *(type *)decor_src : bgx;	\
-+		decor_src += (bpp);						\
-+	} while (0)								\
-+
-+extern int get_color(struct vc_data *vc, struct fb_info *info,
-+		     u16 c, int is_fg);
-+
-+void fbcon_decor_fix_pseudo_pal(struct fb_info *info, struct vc_data *vc)
-+{
-+	int i, j, k;
-+	int minlen = min(min(info->var.red.length, info->var.green.length),
-+			     info->var.blue.length);
-+	u32 col;
-+
-+	for (j = i = 0; i < 16; i++) {
-+		k = color_table[i];
-+
-+		col = ((vc->vc_palette[j++]  >> (8-minlen))
-+			<< info->var.red.offset);
-+		col |= ((vc->vc_palette[j++] >> (8-minlen))
-+			<< info->var.green.offset);
-+		col |= ((vc->vc_palette[j++] >> (8-minlen))
-+			<< info->var.blue.offset);
-+			((u32 *)info->pseudo_palette)[k] = col;
-+	}
-+}
-+
-+void fbcon_decor_renderc(struct fb_info *info, int ypos, int xpos, int height,
-+		      int width, u8* src, u32 fgx, u32 bgx, u8 transparent)
-+{
-+	unsigned int x, y;
-+	u32 dd;
-+	int bytespp = ((info->var.bits_per_pixel + 7) >> 3);
-+	unsigned int d = ypos * info->fix.line_length + xpos * bytespp;
-+	unsigned int ds = (ypos * info->var.xres + xpos) * bytespp;
-+	u16 dd2[4];
-+
-+	u8* decor_src = (u8 *)(info->bgdecor.data + ds);
-+	u8* dst = (u8 *)(info->screen_base + d);
-+
-+	if ((ypos + height) > info->var.yres || (xpos + width) > info->var.xres)
-+		return;
-+
-+	for (y = 0; y < height; y++) {
-+		switch (info->var.bits_per_pixel) {
-+
-+		case 32:
-+			for (x = 0; x < width; x++) {
-+
-+				if ((x & 7) == 0)
-+					d = *src++;
-+				if (d & 0x80)
-+					dd = fgx;
-+				else
-+					dd = transparent ?
-+					     *(u32 *)decor_src : bgx;
-+
-+				d <<= 1;
-+				decor_src += 4;
-+				fb_writel(dd, dst);
-+				dst += 4;
-+			}
-+			break;
-+		case 24:
-+			for (x = 0; x < width; x++) {
-+
-+				if ((x & 7) == 0)
-+					d = *src++;
-+				if (d & 0x80)
-+					dd = fgx;
-+				else
-+					dd = transparent ?
-+					     (*(u32 *)decor_src & 0xffffff) : bgx;
-+
-+				d <<= 1;
-+				decor_src += 3;
-+#ifdef __LITTLE_ENDIAN
-+				fb_writew(dd & 0xffff, dst);
-+				dst += 2;
-+				fb_writeb((dd >> 16), dst);
-+#else
-+				fb_writew(dd >> 8, dst);
-+				dst += 2;
-+				fb_writeb(dd & 0xff, dst);
-+#endif
-+				dst++;
-+			}
-+			break;
-+		case 16:
-+			for (x = 0; x < width; x += 2) {
-+				if ((x & 7) == 0)
-+					d = *src++;
-+
-+				parse_pixel(0, 2, u16);
-+				parse_pixel(1, 2, u16);
-+#ifdef __LITTLE_ENDIAN
-+				dd = dd2[0] | (dd2[1] << 16);
-+#else
-+				dd = dd2[1] | (dd2[0] << 16);
-+#endif
-+				d <<= 2;
-+				fb_writel(dd, dst);
-+				dst += 4;
-+			}
-+			break;
-+
-+		case 8:
-+			for (x = 0; x < width; x += 4) {
-+				if ((x & 7) == 0)
-+					d = *src++;
-+
-+				parse_pixel(0, 1, u8);
-+				parse_pixel(1, 1, u8);
-+				parse_pixel(2, 1, u8);
-+				parse_pixel(3, 1, u8);
-+
-+#ifdef __LITTLE_ENDIAN
-+				dd = dd2[0] | (dd2[1] << 8) | (dd2[2] << 16) | (dd2[3] << 24);
-+#else
-+				dd = dd2[3] | (dd2[2] << 8) | (dd2[1] << 16) | (dd2[0] << 24);
-+#endif
-+				d <<= 4;
-+				fb_writel(dd, dst);
-+				dst += 4;
-+			}
-+		}
-+
-+		dst += info->fix.line_length - width * bytespp;
-+		decor_src += (info->var.xres - width) * bytespp;
-+	}
-+}
-+
-+#define cc2cx(a) 						\
-+	((info->fix.visual == FB_VISUAL_TRUECOLOR || 		\
-+	  info->fix.visual == FB_VISUAL_DIRECTCOLOR) ? 		\
-+	 ((u32*)info->pseudo_palette)[a] : a)
-+
-+void fbcon_decor_putcs(struct vc_data *vc, struct fb_info *info,
-+		   const unsigned short *s, int count, int yy, int xx)
-+{
-+	unsigned short charmask = vc->vc_hi_font_mask ? 0x1ff : 0xff;
-+	struct fbcon_ops *ops = info->fbcon_par;
-+	int fg_color, bg_color, transparent;
-+	u8 *src;
-+	u32 bgx, fgx;
-+	u16 c = scr_readw(s);
-+
-+	fg_color = get_color(vc, info, c, 1);
-+        bg_color = get_color(vc, info, c, 0);
-+
-+	/* Don't paint the background image if console is blanked */
-+	transparent = ops->blank_state ? 0 :
-+		(vc->vc_decor.bg_color == bg_color);
-+
-+	xx = xx * vc->vc_font.width + vc->vc_decor.tx;
-+	yy = yy * vc->vc_font.height + vc->vc_decor.ty;
-+
-+	fgx = cc2cx(fg_color);
-+	bgx = cc2cx(bg_color);
-+
-+	while (count--) {
-+		c = scr_readw(s++);
-+		src = vc->vc_font.data + (c & charmask) * vc->vc_font.height *
-+		      ((vc->vc_font.width + 7) >> 3);
-+
-+		fbcon_decor_renderc(info, yy, xx, vc->vc_font.height,
-+			       vc->vc_font.width, src, fgx, bgx, transparent);
-+		xx += vc->vc_font.width;
-+	}
-+}
-+
-+void fbcon_decor_cursor(struct fb_info *info, struct fb_cursor *cursor)
-+{
-+	int i;
-+	unsigned int dsize, s_pitch;
-+	struct fbcon_ops *ops = info->fbcon_par;
-+	struct vc_data* vc;
-+	u8 *src;
-+
-+	/* we really don't need any cursors while the console is blanked */
-+	if (info->state != FBINFO_STATE_RUNNING || ops->blank_state)
-+		return;
-+
-+	vc = vc_cons[ops->currcon].d;
-+
-+	src = kmalloc(64 + sizeof(struct fb_image), GFP_ATOMIC);
-+	if (!src)
-+		return;
-+
-+	s_pitch = (cursor->image.width + 7) >> 3;
-+	dsize = s_pitch * cursor->image.height;
-+	if (cursor->enable) {
-+		switch (cursor->rop) {
-+		case ROP_XOR:
-+			for (i = 0; i < dsize; i++)
-+				src[i] = cursor->image.data[i] ^ cursor->mask[i];
-+                        break;
-+		case ROP_COPY:
-+		default:
-+			for (i = 0; i < dsize; i++)
-+				src[i] = cursor->image.data[i] & cursor->mask[i];
-+			break;
-+		}
-+	} else
-+		memcpy(src, cursor->image.data, dsize);
-+
-+	fbcon_decor_renderc(info,
-+			cursor->image.dy + vc->vc_decor.ty,
-+			cursor->image.dx + vc->vc_decor.tx,
-+			cursor->image.height,
-+			cursor->image.width,
-+			(u8*)src,
-+			cc2cx(cursor->image.fg_color),
-+			cc2cx(cursor->image.bg_color),
-+			cursor->image.bg_color == vc->vc_decor.bg_color);
-+
-+	kfree(src);
-+}
-+
-+static void decorset(u8 *dst, int height, int width, int dstbytes,
-+		        u32 bgx, int bpp)
-+{
-+	int i;
-+
-+	if (bpp == 8)
-+		bgx |= bgx << 8;
-+	if (bpp == 16 || bpp == 8)
-+		bgx |= bgx << 16;
-+
-+	while (height-- > 0) {
-+		u8 *p = dst;
-+
-+		switch (bpp) {
-+
-+		case 32:
-+			for (i=0; i < width; i++) {
-+				fb_writel(bgx, p); p += 4;
-+			}
-+			break;
-+		case 24:
-+			for (i=0; i < width; i++) {
-+#ifdef __LITTLE_ENDIAN
-+				fb_writew((bgx & 0xffff),(u16*)p); p += 2;
-+				fb_writeb((bgx >> 16),p++);
-+#else
-+				fb_writew((bgx >> 8),(u16*)p); p += 2;
-+				fb_writeb((bgx & 0xff),p++);
-+#endif
-+			}
-+		case 16:
-+			for (i=0; i < width/4; i++) {
-+				fb_writel(bgx,p); p += 4;
-+				fb_writel(bgx,p); p += 4;
-+			}
-+			if (width & 2) {
-+				fb_writel(bgx,p); p += 4;
-+			}
-+			if (width & 1)
-+				fb_writew(bgx,(u16*)p);
-+			break;
-+		case 8:
-+			for (i=0; i < width/4; i++) {
-+				fb_writel(bgx,p); p += 4;
-+			}
-+
-+			if (width & 2) {
-+				fb_writew(bgx,p); p += 2;
-+			}
-+			if (width & 1)
-+				fb_writeb(bgx,(u8*)p);
-+			break;
-+
-+		}
-+		dst += dstbytes;
-+	}
-+}
-+
-+void fbcon_decor_copy(u8 *dst, u8 *src, int height, int width, int linebytes,
-+		   int srclinebytes, int bpp)
-+{
-+	int i;
-+
-+	while (height-- > 0) {
-+		u32 *p = (u32 *)dst;
-+		u32 *q = (u32 *)src;
-+
-+		switch (bpp) {
-+
-+		case 32:
-+			for (i=0; i < width; i++)
-+				fb_writel(*q++, p++);
-+			break;
-+		case 24:
-+			for (i=0; i < (width*3/4); i++)
-+				fb_writel(*q++, p++);
-+			if ((width*3) % 4) {
-+				if (width & 2) {
-+					fb_writeb(*(u8*)q, (u8*)p);
-+				} else if (width & 1) {
-+					fb_writew(*(u16*)q, (u16*)p);
-+					fb_writeb(*(u8*)((u16*)q+1),(u8*)((u16*)p+2));
-+				}
-+			}
-+			break;
-+		case 16:
-+			for (i=0; i < width/4; i++) {
-+				fb_writel(*q++, p++);
-+				fb_writel(*q++, p++);
-+			}
-+			if (width & 2)
-+				fb_writel(*q++, p++);
-+			if (width & 1)
-+				fb_writew(*(u16*)q, (u16*)p);
-+			break;
-+		case 8:
-+			for (i=0; i < width/4; i++)
-+				fb_writel(*q++, p++);
-+
-+			if (width & 2) {
-+				fb_writew(*(u16*)q, (u16*)p);
-+				q = (u32*) ((u16*)q + 1);
-+				p = (u32*) ((u16*)p + 1);
-+			}
-+			if (width & 1)
-+				fb_writeb(*(u8*)q, (u8*)p);
-+			break;
-+		}
-+
-+		dst += linebytes;
-+		src += srclinebytes;
-+	}
-+}
-+
-+static void decorfill(struct fb_info *info, int sy, int sx, int height,
-+		       int width)
-+{
-+	int bytespp = ((info->var.bits_per_pixel + 7) >> 3);
-+	int d  = sy * info->fix.line_length + sx * bytespp;
-+	int ds = (sy * info->var.xres + sx) * bytespp;
-+
-+	fbcon_decor_copy((u8 *)(info->screen_base + d), (u8 *)(info->bgdecor.data + ds),
-+		    height, width, info->fix.line_length, info->var.xres * bytespp,
-+		    info->var.bits_per_pixel);
-+}
-+
-+void fbcon_decor_clear(struct vc_data *vc, struct fb_info *info, int sy, int sx,
-+		    int height, int width)
-+{
-+	int bgshift = (vc->vc_hi_font_mask) ? 13 : 12;
-+	struct fbcon_ops *ops = info->fbcon_par;
-+	u8 *dst;
-+	int transparent, bg_color = attr_bgcol_ec(bgshift, vc, info);
-+
-+	transparent = (vc->vc_decor.bg_color == bg_color);
-+	sy = sy * vc->vc_font.height + vc->vc_decor.ty;
-+	sx = sx * vc->vc_font.width + vc->vc_decor.tx;
-+	height *= vc->vc_font.height;
-+	width *= vc->vc_font.width;
-+
-+	/* Don't paint the background image if console is blanked */
-+	if (transparent && !ops->blank_state) {
-+		decorfill(info, sy, sx, height, width);
-+	} else {
-+		dst = (u8 *)(info->screen_base + sy * info->fix.line_length +
-+			     sx * ((info->var.bits_per_pixel + 7) >> 3));
-+		decorset(dst, height, width, info->fix.line_length, cc2cx(bg_color),
-+			  info->var.bits_per_pixel);
-+	}
-+}
-+
-+void fbcon_decor_clear_margins(struct vc_data *vc, struct fb_info *info,
-+			    int bottom_only)
-+{
-+	unsigned int tw = vc->vc_cols*vc->vc_font.width;
-+	unsigned int th = vc->vc_rows*vc->vc_font.height;
-+
-+	if (!bottom_only) {
-+		/* top margin */
-+		decorfill(info, 0, 0, vc->vc_decor.ty, info->var.xres);
-+		/* left margin */
-+		decorfill(info, vc->vc_decor.ty, 0, th, vc->vc_decor.tx);
-+		/* right margin */
-+		decorfill(info, vc->vc_decor.ty, vc->vc_decor.tx + tw, th, 
-+			   info->var.xres - vc->vc_decor.tx - tw);
-+	}
-+	decorfill(info, vc->vc_decor.ty + th, 0, 
-+		   info->var.yres - vc->vc_decor.ty - th, info->var.xres);
-+}
-+
-+void fbcon_decor_bmove_redraw(struct vc_data *vc, struct fb_info *info, int y, 
-+			   int sx, int dx, int width)
-+{
-+	u16 *d = (u16 *) (vc->vc_origin + vc->vc_size_row * y + dx * 2);
-+	u16 *s = d + (dx - sx);
-+	u16 *start = d;
-+	u16 *ls = d;
-+	u16 *le = d + width;
-+	u16 c;
-+	int x = dx;
-+	u16 attr = 1;
-+
-+	do {
-+		c = scr_readw(d);
-+		if (attr != (c & 0xff00)) {
-+			attr = c & 0xff00;
-+			if (d > start) {
-+				fbcon_decor_putcs(vc, info, start, d - start, y, x);
-+				x += d - start;
-+				start = d;
-+			}
-+		}
-+		if (s >= ls && s < le && c == scr_readw(s)) {
-+			if (d > start) {
-+				fbcon_decor_putcs(vc, info, start, d - start, y, x);
-+				x += d - start + 1;
-+				start = d + 1;
-+			} else {
-+				x++;
-+				start++;
-+			}
-+		}
-+		s++;
-+		d++;
-+	} while (d < le);
-+	if (d > start)
-+		fbcon_decor_putcs(vc, info, start, d - start, y, x);
-+}
-+
-+void fbcon_decor_blank(struct vc_data *vc, struct fb_info *info, int blank)
-+{
-+	if (blank) {
-+		decorset((u8 *)info->screen_base, info->var.yres, info->var.xres,
-+			  info->fix.line_length, 0, info->var.bits_per_pixel);
-+	} else {
-+		update_screen(vc);
-+		fbcon_decor_clear_margins(vc, info, 0);
-+	}
-+}
-+
-diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
-index f447734..1a840c2 100644
---- a/drivers/video/console/fbcon.c
-+++ b/drivers/video/console/fbcon.c
-@@ -79,6 +79,7 @@
- #include <asm/irq.h>
- 
- #include "fbcon.h"
-+#include "fbcondecor.h"
- 
- #ifdef FBCONDEBUG
- #  define DPRINTK(fmt, args...) printk(KERN_DEBUG "%s: " fmt, __func__ , ## args)
-@@ -94,7 +95,7 @@ enum {
- 
- static struct display fb_display[MAX_NR_CONSOLES];
- 
--static signed char con2fb_map[MAX_NR_CONSOLES];
-+signed char con2fb_map[MAX_NR_CONSOLES];
- static signed char con2fb_map_boot[MAX_NR_CONSOLES];
- 
- static int logo_lines;
-@@ -286,7 +287,7 @@ static inline int fbcon_is_inactive(struct vc_data *vc, struct fb_info *info)
- 		!vt_force_oops_output(vc);
- }
- 
--static int get_color(struct vc_data *vc, struct fb_info *info,
-+int get_color(struct vc_data *vc, struct fb_info *info,
- 	      u16 c, int is_fg)
- {
- 	int depth = fb_get_color_depth(&info->var, &info->fix);
-@@ -551,6 +552,9 @@ static int do_fbcon_takeover(int show_logo)
- 		info_idx = -1;
- 	} else {
- 		fbcon_has_console_bind = 1;
-+#ifdef CONFIG_FB_CON_DECOR
-+		fbcon_decor_init();
-+#endif
- 	}
- 
- 	return err;
-@@ -1007,6 +1011,12 @@ static const char *fbcon_startup(void)
- 	rows = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres);
- 	cols /= vc->vc_font.width;
- 	rows /= vc->vc_font.height;
-+
-+	if (fbcon_decor_active(info, vc)) {
-+		cols = vc->vc_decor.twidth / vc->vc_font.width;
-+		rows = vc->vc_decor.theight / vc->vc_font.height;
-+	}
-+
- 	vc_resize(vc, cols, rows);
- 
- 	DPRINTK("mode:   %s\n", info->fix.id);
-@@ -1036,7 +1046,7 @@ static void fbcon_init(struct vc_data *vc, int init)
- 	cap = info->flags;
- 
- 	if (vc != svc || logo_shown == FBCON_LOGO_DONTSHOW ||
--	    (info->fix.type == FB_TYPE_TEXT))
-+	    (info->fix.type == FB_TYPE_TEXT) || fbcon_decor_active(info, vc))
- 		logo = 0;
- 
- 	if (var_to_display(p, &info->var, info))
-@@ -1260,6 +1270,11 @@ static void fbcon_clear(struct vc_data *vc, int sy, int sx, int height,
- 		fbcon_clear_margins(vc, 0);
- 	}
- 
-+ 	if (fbcon_decor_active(info, vc)) {
-+ 		fbcon_decor_clear(vc, info, sy, sx, height, width);
-+ 		return;
-+ 	}
-+
- 	/* Split blits that cross physical y_wrap boundary */
- 
- 	y_break = p->vrows - p->yscroll;
-@@ -1279,10 +1294,15 @@ static void fbcon_putcs(struct vc_data *vc, const unsigned short *s,
- 	struct display *p = &fb_display[vc->vc_num];
- 	struct fbcon_ops *ops = info->fbcon_par;
- 
--	if (!fbcon_is_inactive(vc, info))
--		ops->putcs(vc, info, s, count, real_y(p, ypos), xpos,
--			   get_color(vc, info, scr_readw(s), 1),
--			   get_color(vc, info, scr_readw(s), 0));
-+	if (!fbcon_is_inactive(vc, info)) {
-+
-+		if (fbcon_decor_active(info, vc))
-+			fbcon_decor_putcs(vc, info, s, count, ypos, xpos);
-+		else
-+			ops->putcs(vc, info, s, count, real_y(p, ypos), xpos,
-+				   get_color(vc, info, scr_readw(s), 1),
-+				   get_color(vc, info, scr_readw(s), 0));
-+	}
- }
- 
- static void fbcon_putc(struct vc_data *vc, int c, int ypos, int xpos)
-@@ -1298,8 +1318,13 @@ static void fbcon_clear_margins(struct vc_data *vc, int bottom_only)
- 	struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
- 	struct fbcon_ops *ops = info->fbcon_par;
- 
--	if (!fbcon_is_inactive(vc, info))
--		ops->clear_margins(vc, info, bottom_only);
-+	if (!fbcon_is_inactive(vc, info)) {
-+	 	if (fbcon_decor_active(info, vc)) {
-+	 		fbcon_decor_clear_margins(vc, info, bottom_only);
-+ 		} else {
-+			ops->clear_margins(vc, info, bottom_only);
-+		}
-+	}
- }
- 
- static void fbcon_cursor(struct vc_data *vc, int mode)
-@@ -1819,7 +1844,7 @@ static int fbcon_scroll(struct vc_data *vc, int t, int b, int dir,
- 			count = vc->vc_rows;
- 		if (softback_top)
- 			fbcon_softback_note(vc, t, count);
--		if (logo_shown >= 0)
-+		if (logo_shown >= 0 || fbcon_decor_active(info, vc))
- 			goto redraw_up;
- 		switch (p->scrollmode) {
- 		case SCROLL_MOVE:
-@@ -1912,6 +1937,8 @@ static int fbcon_scroll(struct vc_data *vc, int t, int b, int dir,
- 			count = vc->vc_rows;
- 		if (logo_shown >= 0)
- 			goto redraw_down;
-+		if (fbcon_decor_active(info, vc))
-+			goto redraw_down;
- 		switch (p->scrollmode) {
- 		case SCROLL_MOVE:
- 			fbcon_redraw_blit(vc, info, p, b - 1, b - t - count,
-@@ -2060,6 +2087,13 @@ static void fbcon_bmove_rec(struct vc_data *vc, struct display *p, int sy, int s
- 		}
- 		return;
- 	}
-+
-+	if (fbcon_decor_active(info, vc) && sy == dy && height == 1) {
-+ 		/* must use slower redraw bmove to keep background pic intact */
-+ 		fbcon_decor_bmove_redraw(vc, info, sy, sx, dx, width);
-+ 		return;
-+ 	}
-+
- 	ops->bmove(vc, info, real_y(p, sy), sx, real_y(p, dy), dx,
- 		   height, width);
- }
-@@ -2130,8 +2164,8 @@ static int fbcon_resize(struct vc_data *vc, unsigned int width,
- 	var.yres = virt_h * virt_fh;
- 	x_diff = info->var.xres - var.xres;
- 	y_diff = info->var.yres - var.yres;
--	if (x_diff < 0 || x_diff > virt_fw ||
--	    y_diff < 0 || y_diff > virt_fh) {
-+	if ((x_diff < 0 || x_diff > virt_fw ||
-+		y_diff < 0 || y_diff > virt_fh) && !vc->vc_decor.state) {
- 		const struct fb_videomode *mode;
- 
- 		DPRINTK("attempting resize %ix%i\n", var.xres, var.yres);
-@@ -2167,6 +2201,21 @@ static int fbcon_switch(struct vc_data *vc)
- 
- 	info = registered_fb[con2fb_map[vc->vc_num]];
- 	ops = info->fbcon_par;
-+	prev_console = ops->currcon;
-+	if (prev_console != -1)
-+		old_info = registered_fb[con2fb_map[prev_console]];
-+
-+#ifdef CONFIG_FB_CON_DECOR
-+	if (!fbcon_decor_active_vc(vc) && info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
-+		struct vc_data *vc_curr = vc_cons[prev_console].d;
-+		if (vc_curr && fbcon_decor_active_vc(vc_curr)) {
-+			/* Clear the screen to avoid displaying funky colors during
-+			 * palette updates. */
-+			memset((u8*)info->screen_base + info->fix.line_length * info->var.yoffset,
-+			       0, info->var.yres * info->fix.line_length);
-+		}
-+	}
-+#endif
- 
- 	if (softback_top) {
- 		if (softback_lines)
-@@ -2185,9 +2234,6 @@ static int fbcon_switch(struct vc_data *vc)
- 		logo_shown = FBCON_LOGO_CANSHOW;
- 	}
- 
--	prev_console = ops->currcon;
--	if (prev_console != -1)
--		old_info = registered_fb[con2fb_map[prev_console]];
- 	/*
- 	 * FIXME: If we have multiple fbdev's loaded, we need to
- 	 * update all info->currcon.  Perhaps, we can place this
-@@ -2231,6 +2277,18 @@ static int fbcon_switch(struct vc_data *vc)
- 			fbcon_del_cursor_timer(old_info);
- 	}
- 
-+	if (fbcon_decor_active_vc(vc)) {
-+		struct vc_data *vc_curr = vc_cons[prev_console].d;
-+
-+		if (!vc_curr->vc_decor.theme ||
-+			strcmp(vc->vc_decor.theme, vc_curr->vc_decor.theme) ||
-+			(fbcon_decor_active_nores(info, vc_curr) &&
-+			 !fbcon_decor_active(info, vc_curr))) {
-+			fbcon_decor_disable(vc, 0);
-+			fbcon_decor_call_helper("modechange", vc->vc_num);
-+		}
-+	}
-+
- 	if (fbcon_is_inactive(vc, info) ||
- 	    ops->blank_state != FB_BLANK_UNBLANK)
- 		fbcon_del_cursor_timer(info);
-@@ -2339,15 +2397,20 @@ static int fbcon_blank(struct vc_data *vc, int blank, int mode_switch)
- 		}
- 	}
- 
-- 	if (!fbcon_is_inactive(vc, info)) {
-+	if (!fbcon_is_inactive(vc, info)) {
- 		if (ops->blank_state != blank) {
- 			ops->blank_state = blank;
- 			fbcon_cursor(vc, blank ? CM_ERASE : CM_DRAW);
- 			ops->cursor_flash = (!blank);
- 
--			if (!(info->flags & FBINFO_MISC_USEREVENT))
--				if (fb_blank(info, blank))
--					fbcon_generic_blank(vc, info, blank);
-+			if (!(info->flags & FBINFO_MISC_USEREVENT)) {
-+				if (fb_blank(info, blank)) {
-+					if (fbcon_decor_active(info, vc))
-+						fbcon_decor_blank(vc, info, blank);
-+					else
-+						fbcon_generic_blank(vc, info, blank);
-+				}
-+			}
- 		}
- 
- 		if (!blank)
-@@ -2522,13 +2585,22 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h,
- 	}
- 
- 	if (resize) {
-+		/* reset wrap/pan */
- 		int cols, rows;
- 
- 		cols = FBCON_SWAP(ops->rotate, info->var.xres, info->var.yres);
- 		rows = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres);
-+
-+		if (fbcon_decor_active(info, vc)) {
-+			info->var.xoffset = info->var.yoffset = p->yscroll = 0;
-+			cols = vc->vc_decor.twidth;
-+			rows = vc->vc_decor.theight;
-+		}
- 		cols /= w;
- 		rows /= h;
-+
- 		vc_resize(vc, cols, rows);
-+
- 		if (CON_IS_VISIBLE(vc) && softback_buf)
- 			fbcon_update_softback(vc);
- 	} else if (CON_IS_VISIBLE(vc)
-@@ -2657,7 +2729,11 @@ static int fbcon_set_palette(struct vc_data *vc, unsigned char *table)
- 	int i, j, k, depth;
- 	u8 val;
- 
--	if (fbcon_is_inactive(vc, info))
-+	if (fbcon_is_inactive(vc, info)
-+#ifdef CONFIG_FB_CON_DECOR
-+			|| vc->vc_num != fg_console
-+#endif
-+		)
- 		return -EINVAL;
- 
- 	if (!CON_IS_VISIBLE(vc))
-@@ -2683,14 +2759,56 @@ static int fbcon_set_palette(struct vc_data *vc, unsigned char *table)
- 	} else
- 		fb_copy_cmap(fb_default_cmap(1 << depth), &palette_cmap);
- 
--	return fb_set_cmap(&palette_cmap, info);
-+	if (fbcon_decor_active(info, vc_cons[fg_console].d) &&
-+	    info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
-+
-+		u16 *red, *green, *blue;
-+		int minlen = min(min(info->var.red.length, info->var.green.length),
-+				     info->var.blue.length);
-+		int h;
-+
-+		struct fb_cmap cmap = {
-+			.start = 0,
-+			.len = (1 << minlen),
-+			.red = NULL,
-+			.green = NULL,
-+			.blue = NULL,
-+			.transp = NULL
-+		};
-+
-+		red = kmalloc(256 * sizeof(u16) * 3, GFP_KERNEL);
-+
-+		if (!red)
-+			goto out;
-+
-+		green = red + 256;
-+		blue = green + 256;
-+		cmap.red = red;
-+		cmap.green = green;
-+		cmap.blue = blue;
-+
-+		for (i = 0; i < cmap.len; i++) {
-+			red[i] = green[i] = blue[i] = (0xffff * i)/(cmap.len-1);
-+		}
-+
-+		h = fb_set_cmap(&cmap, info);
-+		fbcon_decor_fix_pseudo_pal(info, vc_cons[fg_console].d);
-+		kfree(red);
-+
-+		return h;
-+
-+	} else if (fbcon_decor_active(info, vc_cons[fg_console].d) &&
-+		   info->var.bits_per_pixel == 8 && info->bgdecor.cmap.red != NULL)
-+		fb_set_cmap(&info->bgdecor.cmap, info);
-+
-+out:	return fb_set_cmap(&palette_cmap, info);
- }
- 
- static u16 *fbcon_screen_pos(struct vc_data *vc, int offset)
- {
- 	unsigned long p;
- 	int line;
--	
-+
- 	if (vc->vc_num != fg_console || !softback_lines)
- 		return (u16 *) (vc->vc_origin + offset);
- 	line = offset / vc->vc_size_row;
-@@ -2909,7 +3027,14 @@ static void fbcon_modechanged(struct fb_info *info)
- 		rows = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres);
- 		cols /= vc->vc_font.width;
- 		rows /= vc->vc_font.height;
--		vc_resize(vc, cols, rows);
-+
-+		if (!fbcon_decor_active_nores(info, vc)) {
-+			vc_resize(vc, cols, rows);
-+		} else {
-+			fbcon_decor_disable(vc, 0);
-+			fbcon_decor_call_helper("modechange", vc->vc_num);
-+		}
-+
- 		updatescrollmode(p, info, vc);
- 		scrollback_max = 0;
- 		scrollback_current = 0;
-@@ -2954,7 +3079,9 @@ static void fbcon_set_all_vcs(struct fb_info *info)
- 		rows = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres);
- 		cols /= vc->vc_font.width;
- 		rows /= vc->vc_font.height;
--		vc_resize(vc, cols, rows);
-+		if (!fbcon_decor_active_nores(info, vc)) {
-+			vc_resize(vc, cols, rows);
-+		}
- 	}
- 
- 	if (fg != -1)
-@@ -3596,6 +3723,7 @@ static void fbcon_exit(void)
- 		}
- 	}
- 
-+	fbcon_decor_exit();
- 	fbcon_has_exited = 1;
- }
- 
-diff --git a/drivers/video/console/fbcondecor.c b/drivers/video/console/fbcondecor.c
-new file mode 100644
-index 0000000..babc8c5
---- /dev/null
-+++ b/drivers/video/console/fbcondecor.c
-@@ -0,0 +1,555 @@
-+/*
-+ *  linux/drivers/video/console/fbcondecor.c -- Framebuffer console decorations
-+ *
-+ *  Copyright (C) 2004-2009 Michal Januszewski <michalj+fbcondecor@gmail.com>
-+ *
-+ *  Code based upon "Bootsplash" (C) 2001-2003
-+ *       Volker Poplawski <volker@poplawski.de>,
-+ *       Stefan Reinauer <stepan@suse.de>,
-+ *       Steffen Winterfeldt <snwint@suse.de>,
-+ *       Michael Schroeder <mls@suse.de>,
-+ *       Ken Wimer <wimer@suse.de>.
-+ *
-+ *  Compat ioctl support by Thorsten Klein <TK@Thorsten-Klein.de>.
-+ *
-+ *  This file is subject to the terms and conditions of the GNU General Public
-+ *  License.  See the file COPYING in the main directory of this archive for
-+ *  more details.
-+ *
-+ */
-+#include <linux/module.h>
-+#include <linux/kernel.h>
-+#include <linux/string.h>
-+#include <linux/types.h>
-+#include <linux/fb.h>
-+#include <linux/vt_kern.h>
-+#include <linux/vmalloc.h>
-+#include <linux/unistd.h>
-+#include <linux/syscalls.h>
-+#include <linux/init.h>
-+#include <linux/proc_fs.h>
-+#include <linux/workqueue.h>
-+#include <linux/kmod.h>
-+#include <linux/miscdevice.h>
-+#include <linux/device.h>
-+#include <linux/fs.h>
-+#include <linux/compat.h>
-+#include <linux/console.h>
-+
-+#include <asm/uaccess.h>
-+#include <asm/irq.h>
-+
-+#include "fbcon.h"
-+#include "fbcondecor.h"
-+
-+extern signed char con2fb_map[];
-+static int fbcon_decor_enable(struct vc_data *vc);
-+char fbcon_decor_path[KMOD_PATH_LEN] = "/sbin/fbcondecor_helper";
-+static int initialized = 0;
-+
-+int fbcon_decor_call_helper(char* cmd, unsigned short vc)
-+{
-+	char *envp[] = {
-+		"HOME=/",
-+		"PATH=/sbin:/bin",
-+		NULL
-+	};
-+
-+	char tfb[5];
-+	char tcons[5];
-+	unsigned char fb = (int) con2fb_map[vc];
-+
-+	char *argv[] = {
-+		fbcon_decor_path,
-+		"2",
-+		cmd,
-+		tcons,
-+		tfb,
-+		vc_cons[vc].d->vc_decor.theme,
-+		NULL
-+	};
-+
-+	snprintf(tfb,5,"%d",fb);
-+	snprintf(tcons,5,"%d",vc);
-+
-+	return call_usermodehelper(fbcon_decor_path, argv, envp, UMH_WAIT_EXEC);
-+}
-+
-+/* Disables fbcondecor on a virtual console; called with console sem held. */
-+int fbcon_decor_disable(struct vc_data *vc, unsigned char redraw)
-+{
-+	struct fb_info* info;
-+
-+	if (!vc->vc_decor.state)
-+		return -EINVAL;
-+
-+	info = registered_fb[(int) con2fb_map[vc->vc_num]];
-+
-+	if (info == NULL)
-+		return -EINVAL;
-+
-+	vc->vc_decor.state = 0;
-+	vc_resize(vc, info->var.xres / vc->vc_font.width,
-+		  info->var.yres / vc->vc_font.height);
-+
-+	if (fg_console == vc->vc_num && redraw) {
-+		redraw_screen(vc, 0);
-+		update_region(vc, vc->vc_origin +
-+			      vc->vc_size_row * vc->vc_top,
-+			      vc->vc_size_row * (vc->vc_bottom - vc->vc_top) / 2);
-+	}
-+
-+	printk(KERN_INFO "fbcondecor: switched decor state to 'off' on console %d\n",
-+			 vc->vc_num);
-+
-+	return 0;
-+}
-+
-+/* Enables fbcondecor on a virtual console; called with console sem held. */
-+static int fbcon_decor_enable(struct vc_data *vc)
-+{
-+	struct fb_info* info;
-+
-+	info = registered_fb[(int) con2fb_map[vc->vc_num]];
-+
-+	if (vc->vc_decor.twidth == 0 || vc->vc_decor.theight == 0 ||
-+	    info == NULL || vc->vc_decor.state || (!info->bgdecor.data &&
-+	    vc->vc_num == fg_console))
-+		return -EINVAL;
-+
-+	vc->vc_decor.state = 1;
-+	vc_resize(vc, vc->vc_decor.twidth / vc->vc_font.width,
-+		  vc->vc_decor.theight / vc->vc_font.height);
-+
-+	if (fg_console == vc->vc_num) {
-+		redraw_screen(vc, 0);
-+		update_region(vc, vc->vc_origin +
-+			      vc->vc_size_row * vc->vc_top,
-+			      vc->vc_size_row * (vc->vc_bottom - vc->vc_top) / 2);
-+		fbcon_decor_clear_margins(vc, info, 0);
-+	}
-+
-+	printk(KERN_INFO "fbcondecor: switched decor state to 'on' on console %d\n",
-+			 vc->vc_num);
-+
-+	return 0;
-+}
-+
-+static inline int fbcon_decor_ioctl_dosetstate(struct vc_data *vc, unsigned int state, unsigned char origin)
-+{
-+	int ret;
-+
-+//	if (origin == FBCON_DECOR_IO_ORIG_USER)
-+		console_lock();
-+	if (!state)
-+		ret = fbcon_decor_disable(vc, 1);
-+	else
-+		ret = fbcon_decor_enable(vc);
-+//	if (origin == FBCON_DECOR_IO_ORIG_USER)
-+		console_unlock();
-+
-+	return ret;
-+}
-+
-+static inline void fbcon_decor_ioctl_dogetstate(struct vc_data *vc, unsigned int *state)
-+{
-+	*state = vc->vc_decor.state;
-+}
-+
-+static int fbcon_decor_ioctl_dosetcfg(struct vc_data *vc, struct vc_decor *cfg, unsigned char origin)
-+{
-+	struct fb_info *info;
-+	int len;
-+	char *tmp;
-+
-+	info = registered_fb[(int) con2fb_map[vc->vc_num]];
-+
-+	if (info == NULL || !cfg->twidth || !cfg->theight ||
-+	    cfg->tx + cfg->twidth  > info->var.xres ||
-+	    cfg->ty + cfg->theight > info->var.yres)
-+		return -EINVAL;
-+
-+	len = strlen_user(cfg->theme);
-+	if (!len || len > FBCON_DECOR_THEME_LEN)
-+		return -EINVAL;
-+	tmp = kmalloc(len, GFP_KERNEL);
-+	if (!tmp)
-+		return -ENOMEM;
-+	if (copy_from_user(tmp, (void __user *)cfg->theme, len))
-+		return -EFAULT;
-+	cfg->theme = tmp;
-+	cfg->state = 0;
-+
-+	/* If this ioctl is a response to a request from kernel, the console sem
-+	 * is already held; we also don't need to disable decor because either the
-+	 * new config and background picture will be successfully loaded, and the
-+	 * decor will stay on, or in case of a failure it'll be turned off in fbcon. */
-+//	if (origin == FBCON_DECOR_IO_ORIG_USER) {
-+		console_lock();
-+		if (vc->vc_decor.state)
-+			fbcon_decor_disable(vc, 1);
-+//	}
-+
-+	if (vc->vc_decor.theme)
-+		kfree(vc->vc_decor.theme);
-+
-+	vc->vc_decor = *cfg;
-+
-+//	if (origin == FBCON_DECOR_IO_ORIG_USER)
-+		console_unlock();
-+
-+	printk(KERN_INFO "fbcondecor: console %d using theme '%s'\n",
-+			 vc->vc_num, vc->vc_decor.theme);
-+	return 0;
-+}
-+
-+static int fbcon_decor_ioctl_dogetcfg(struct vc_data *vc, struct vc_decor *decor)
-+{
-+	char __user *tmp;
-+
-+	tmp = decor->theme;
-+	*decor = vc->vc_decor;
-+	decor->theme = tmp;
-+
-+	if (vc->vc_decor.theme) {
-+		if (copy_to_user(tmp, vc->vc_decor.theme, strlen(vc->vc_decor.theme) + 1))
-+			return -EFAULT;
-+	} else
-+		if (put_user(0, tmp))
-+			return -EFAULT;
-+
-+	return 0;
-+}
-+
-+static int fbcon_decor_ioctl_dosetpic(struct vc_data *vc, struct fb_image *img, unsigned char origin)
-+{
-+	struct fb_info *info;
-+	int len;
-+	u8 *tmp;
-+
-+	if (vc->vc_num != fg_console)
-+		return -EINVAL;
-+
-+	info = registered_fb[(int) con2fb_map[vc->vc_num]];
-+
-+	if (info == NULL)
-+		return -EINVAL;
-+
-+	if (img->width != info->var.xres || img->height != info->var.yres) {
-+		printk(KERN_ERR "fbcondecor: picture dimensions mismatch\n");
-+		printk(KERN_ERR "%dx%d vs %dx%d\n", img->width, img->height, info->var.xres, info->var.yres);
-+		return -EINVAL;
-+	}
-+
-+	if (img->depth != info->var.bits_per_pixel) {
-+		printk(KERN_ERR "fbcondecor: picture depth mismatch\n");
-+		return -EINVAL;
-+	}
-+
-+	if (img->depth == 8) {
-+		if (!img->cmap.len || !img->cmap.red || !img->cmap.green ||
-+		    !img->cmap.blue)
-+			return -EINVAL;
-+
-+		tmp = vmalloc(img->cmap.len * 3 * 2);
-+		if (!tmp)
-+			return -ENOMEM;
-+
-+		if (copy_from_user(tmp,
-+			    	   (void __user*)img->cmap.red, (img->cmap.len << 1)) ||
-+		    copy_from_user(tmp + (img->cmap.len << 1),
-+			    	   (void __user*)img->cmap.green, (img->cmap.len << 1)) ||
-+		    copy_from_user(tmp + (img->cmap.len << 2),
-+			    	   (void __user*)img->cmap.blue, (img->cmap.len << 1))) {
-+			vfree(tmp);
-+			return -EFAULT;
-+		}
-+
-+		img->cmap.transp = NULL;
-+		img->cmap.red = (u16*)tmp;
-+		img->cmap.green = img->cmap.red + img->cmap.len;
-+		img->cmap.blue = img->cmap.green + img->cmap.len;
-+	} else {
-+		img->cmap.red = NULL;
-+	}
-+
-+	len = ((img->depth + 7) >> 3) * img->width * img->height;
-+
-+	/*
-+	 * Allocate an additional byte so that we never go outside of the
-+	 * buffer boundaries in the rendering functions in a 24 bpp mode.
-+	 */
-+	tmp = vmalloc(len + 1);
-+
-+	if (!tmp)
-+		goto out;
-+
-+	if (copy_from_user(tmp, (void __user*)img->data, len))
-+		goto out;
-+
-+	img->data = tmp;
-+
-+	/* If this ioctl is a response to a request from kernel, the console sem
-+	 * is already held. */
-+//	if (origin == FBCON_DECOR_IO_ORIG_USER)
-+		console_lock();
-+
-+	if (info->bgdecor.data)
-+		vfree((u8*)info->bgdecor.data);
-+	if (info->bgdecor.cmap.red)
-+		vfree(info->bgdecor.cmap.red);
-+
-+	info->bgdecor = *img;
-+
-+	if (fbcon_decor_active_vc(vc) && fg_console == vc->vc_num) {
-+		redraw_screen(vc, 0);
-+		update_region(vc, vc->vc_origin +
-+			      vc->vc_size_row * vc->vc_top,
-+			      vc->vc_size_row * (vc->vc_bottom - vc->vc_top) / 2);
-+		fbcon_decor_clear_margins(vc, info, 0);
-+	}
-+
-+//	if (origin == FBCON_DECOR_IO_ORIG_USER)
-+		console_unlock();
-+
-+	return 0;
-+
-+out:	if (img->cmap.red)
-+		vfree(img->cmap.red);
-+
-+	if (tmp)
-+		vfree(tmp);
-+	return -ENOMEM;
-+}
-+
-+static long fbcon_decor_ioctl(struct file *filp, u_int cmd, u_long arg)
-+{
-+	struct fbcon_decor_iowrapper __user *wrapper = (void __user*) arg;
-+	struct vc_data *vc = NULL;
-+	unsigned short vc_num = 0;
-+	unsigned char origin = 0;
-+	void __user *data = NULL;
-+
-+	if (!access_ok(VERIFY_READ, wrapper,
-+			sizeof(struct fbcon_decor_iowrapper)))
-+		return -EFAULT;
-+
-+	__get_user(vc_num, &wrapper->vc);
-+	__get_user(origin, &wrapper->origin);
-+	__get_user(data, &wrapper->data);
-+
-+	if (!vc_cons_allocated(vc_num))
-+		return -EINVAL;
-+
-+	vc = vc_cons[vc_num].d;
-+
-+	switch (cmd) {
-+	case FBIOCONDECOR_SETPIC:
-+	{
-+		struct fb_image img;
-+		if (copy_from_user(&img, (struct fb_image __user *)data, sizeof(struct fb_image)))
-+			return -EFAULT;
-+
-+		return fbcon_decor_ioctl_dosetpic(vc, &img, origin);
-+	}
-+	case FBIOCONDECOR_SETCFG:
-+	{
-+		struct vc_decor cfg;
-+		if (copy_from_user(&cfg, (struct vc_decor __user *)data, sizeof(struct vc_decor)))
-+			return -EFAULT;
-+
-+		return fbcon_decor_ioctl_dosetcfg(vc, &cfg, origin);
-+	}
-+	case FBIOCONDECOR_GETCFG:
-+	{
-+		int rval;
-+		struct vc_decor cfg;
-+
-+		if (copy_from_user(&cfg, (struct vc_decor __user *)data, sizeof(struct vc_decor)))
-+			return -EFAULT;
-+
-+		rval = fbcon_decor_ioctl_dogetcfg(vc, &cfg);
-+
-+		if (copy_to_user(data, &cfg, sizeof(struct vc_decor)))
-+			return -EFAULT;
-+		return rval;
-+	}
-+	case FBIOCONDECOR_SETSTATE:
-+	{
-+		unsigned int state = 0;
-+		if (get_user(state, (unsigned int __user *)data))
-+			return -EFAULT;
-+		return fbcon_decor_ioctl_dosetstate(vc, state, origin);
-+	}
-+	case FBIOCONDECOR_GETSTATE:
-+	{
-+		unsigned int state = 0;
-+		fbcon_decor_ioctl_dogetstate(vc, &state);
-+		return put_user(state, (unsigned int __user *)data);
-+	}
-+
-+	default:
-+		return -ENOIOCTLCMD;
-+	}
-+}
-+
-+#ifdef CONFIG_COMPAT
-+
-+static long fbcon_decor_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) {
-+
-+	struct fbcon_decor_iowrapper32 __user *wrapper = (void __user *)arg;
-+	struct vc_data *vc = NULL;
-+	unsigned short vc_num = 0;
-+	unsigned char origin = 0;
-+	compat_uptr_t data_compat = 0;
-+	void __user *data = NULL;
-+
-+	if (!access_ok(VERIFY_READ, wrapper,
-+                       sizeof(struct fbcon_decor_iowrapper32)))
-+		return -EFAULT;
-+
-+	__get_user(vc_num, &wrapper->vc);
-+	__get_user(origin, &wrapper->origin);
-+	__get_user(data_compat, &wrapper->data);
-+	data = compat_ptr(data_compat);
-+
-+	if (!vc_cons_allocated(vc_num))
-+		return -EINVAL;
-+
-+	vc = vc_cons[vc_num].d;
-+
-+	switch (cmd) {
-+	case FBIOCONDECOR_SETPIC32:
-+	{
-+		struct fb_image32 img_compat;
-+		struct fb_image img;
-+
-+		if (copy_from_user(&img_compat, (struct fb_image32 __user *)data, sizeof(struct fb_image32)))
-+			return -EFAULT;
-+
-+		fb_image_from_compat(img, img_compat);
-+
-+		return fbcon_decor_ioctl_dosetpic(vc, &img, origin);
-+	}
-+
-+	case FBIOCONDECOR_SETCFG32:
-+	{
-+		struct vc_decor32 cfg_compat;
-+		struct vc_decor cfg;
-+
-+		if (copy_from_user(&cfg_compat, (struct vc_decor32 __user *)data, sizeof(struct vc_decor32)))
-+			return -EFAULT;
-+
-+		vc_decor_from_compat(cfg, cfg_compat);
-+
-+		return fbcon_decor_ioctl_dosetcfg(vc, &cfg, origin);
-+	}
-+
-+	case FBIOCONDECOR_GETCFG32:
-+	{
-+		int rval;
-+		struct vc_decor32 cfg_compat;
-+		struct vc_decor cfg;
-+
-+		if (copy_from_user(&cfg_compat, (struct vc_decor32 __user *)data, sizeof(struct vc_decor32)))
-+			return -EFAULT;
-+		cfg.theme = compat_ptr(cfg_compat.theme);
-+
-+		rval = fbcon_decor_ioctl_dogetcfg(vc, &cfg);
-+
-+		vc_decor_to_compat(cfg_compat, cfg);
-+
-+		if (copy_to_user((struct vc_decor32 __user *)data, &cfg_compat, sizeof(struct vc_decor32)))
-+			return -EFAULT;
-+		return rval;
-+	}
-+
-+	case FBIOCONDECOR_SETSTATE32:
-+	{
-+		compat_uint_t state_compat = 0;
-+		unsigned int state = 0;
-+
-+		if (get_user(state_compat, (compat_uint_t __user *)data))
-+			return -EFAULT;
-+
-+		state = (unsigned int)state_compat;
-+
-+		return fbcon_decor_ioctl_dosetstate(vc, state, origin);
-+	}
-+
-+	case FBIOCONDECOR_GETSTATE32:
-+	{
-+		compat_uint_t state_compat = 0;
-+		unsigned int state = 0;
-+
-+		fbcon_decor_ioctl_dogetstate(vc, &state);
-+		state_compat = (compat_uint_t)state;
-+
-+		return put_user(state_compat, (compat_uint_t __user *)data);
-+	}
-+
-+	default:
-+		return -ENOIOCTLCMD;
-+	}
-+}
-+#else
-+  #define fbcon_decor_compat_ioctl NULL
-+#endif
-+
-+static struct file_operations fbcon_decor_ops = {
-+	.owner = THIS_MODULE,
-+	.unlocked_ioctl = fbcon_decor_ioctl,
-+	.compat_ioctl = fbcon_decor_compat_ioctl
-+};
-+
-+static struct miscdevice fbcon_decor_dev = {
-+	.minor = MISC_DYNAMIC_MINOR,
-+	.name = "fbcondecor",
-+	.fops = &fbcon_decor_ops
-+};
-+
-+void fbcon_decor_reset(void)
-+{
-+	int i;
-+
-+	for (i = 0; i < num_registered_fb; i++) {
-+		registered_fb[i]->bgdecor.data = NULL;
-+		registered_fb[i]->bgdecor.cmap.red = NULL;
-+	}
-+
-+	for (i = 0; i < MAX_NR_CONSOLES && vc_cons[i].d; i++) {
-+		vc_cons[i].d->vc_decor.state = vc_cons[i].d->vc_decor.twidth =
-+						vc_cons[i].d->vc_decor.theight = 0;
-+		vc_cons[i].d->vc_decor.theme = NULL;
-+	}
-+
-+	return;
-+}
-+
-+int fbcon_decor_init(void)
-+{
-+	int i;
-+
-+	fbcon_decor_reset();
-+
-+	if (initialized)
-+		return 0;
-+
-+	i = misc_register(&fbcon_decor_dev);
-+	if (i) {
-+		printk(KERN_ERR "fbcondecor: failed to register device\n");
-+		return i;
-+	}
-+
-+	fbcon_decor_call_helper("init", 0);
-+	initialized = 1;
-+	return 0;
-+}
-+
-+int fbcon_decor_exit(void)
-+{
-+	fbcon_decor_reset();
-+	return 0;
-+}
-+
-+EXPORT_SYMBOL(fbcon_decor_path);
-diff --git a/drivers/video/console/fbcondecor.h b/drivers/video/console/fbcondecor.h
-new file mode 100644
-index 0000000..3b3724b
---- /dev/null
-+++ b/drivers/video/console/fbcondecor.h
-@@ -0,0 +1,78 @@
-+/* 
-+ *  linux/drivers/video/console/fbcondecor.h -- Framebuffer Console Decoration headers
-+ *
-+ *  Copyright (C) 2004 Michal Januszewski <michalj+fbcondecor@gmail.com>
-+ *
-+ */
-+
-+#ifndef __FBCON_DECOR_H
-+#define __FBCON_DECOR_H
-+
-+#ifndef _LINUX_FB_H
-+#include <linux/fb.h>
-+#endif
-+
-+/* This is needed for vc_cons in fbcmap.c */
-+#include <linux/vt_kern.h>
-+
-+struct fb_cursor;
-+struct fb_info;
-+struct vc_data;
-+
-+#ifdef CONFIG_FB_CON_DECOR
-+/* fbcondecor.c */
-+int fbcon_decor_init(void);
-+int fbcon_decor_exit(void);
-+int fbcon_decor_call_helper(char* cmd, unsigned short cons);
-+int fbcon_decor_disable(struct vc_data *vc, unsigned char redraw);
-+
-+/* cfbcondecor.c */
-+void fbcon_decor_putcs(struct vc_data *vc, struct fb_info *info, const unsigned short *s, int count, int yy, int xx);
-+void fbcon_decor_cursor(struct fb_info *info, struct fb_cursor *cursor);
-+void fbcon_decor_clear(struct vc_data *vc, struct fb_info *info, int sy, int sx, int height, int width);
-+void fbcon_decor_clear_margins(struct vc_data *vc, struct fb_info *info, int bottom_only);
-+void fbcon_decor_blank(struct vc_data *vc, struct fb_info *info, int blank);
-+void fbcon_decor_bmove_redraw(struct vc_data *vc, struct fb_info *info, int y, int sx, int dx, int width);
-+void fbcon_decor_copy(u8 *dst, u8 *src, int height, int width, int linebytes, int srclinesbytes, int bpp);
-+void fbcon_decor_fix_pseudo_pal(struct fb_info *info, struct vc_data *vc);
-+
-+/* vt.c */
-+void acquire_console_sem(void);
-+void release_console_sem(void);
-+void do_unblank_screen(int entering_gfx);
-+
-+/* struct vc_data *y */
-+#define fbcon_decor_active_vc(y) (y->vc_decor.state && y->vc_decor.theme) 
-+
-+/* struct fb_info *x, struct vc_data *y */
-+#define fbcon_decor_active_nores(x,y) (x->bgdecor.data && fbcon_decor_active_vc(y))
-+
-+/* struct fb_info *x, struct vc_data *y */
-+#define fbcon_decor_active(x,y) (fbcon_decor_active_nores(x,y) &&		\
-+			      x->bgdecor.width == x->var.xres && 	\
-+			      x->bgdecor.height == x->var.yres &&	\
-+			      x->bgdecor.depth == x->var.bits_per_pixel)
-+
-+
-+#else /* CONFIG_FB_CON_DECOR */
-+
-+static inline void fbcon_decor_putcs(struct vc_data *vc, struct fb_info *info, const unsigned short *s, int count, int yy, int xx) {}
-+static inline void fbcon_decor_putc(struct vc_data *vc, struct fb_info *info, int c, int ypos, int xpos) {}
-+static inline void fbcon_decor_cursor(struct fb_info *info, struct fb_cursor *cursor) {}
-+static inline void fbcon_decor_clear(struct vc_data *vc, struct fb_info *info, int sy, int sx, int height, int width) {}
-+static inline void fbcon_decor_clear_margins(struct vc_data *vc, struct fb_info *info, int bottom_only) {}
-+static inline void fbcon_decor_blank(struct vc_data *vc, struct fb_info *info, int blank) {}
-+static inline void fbcon_decor_bmove_redraw(struct vc_data *vc, struct fb_info *info, int y, int sx, int dx, int width) {}
-+static inline void fbcon_decor_fix_pseudo_pal(struct fb_info *info, struct vc_data *vc) {}
-+static inline int fbcon_decor_call_helper(char* cmd, unsigned short cons) { return 0; }
-+static inline int fbcon_decor_init(void) { return 0; }
-+static inline int fbcon_decor_exit(void) { return 0; }
-+static inline int fbcon_decor_disable(struct vc_data *vc, unsigned char redraw) { return 0; }
-+
-+#define fbcon_decor_active_vc(y) (0)
-+#define fbcon_decor_active_nores(x,y) (0)
-+#define fbcon_decor_active(x,y) (0)
-+
-+#endif /* CONFIG_FB_CON_DECOR */
-+
-+#endif /* __FBCON_DECOR_H */
-diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
-index e1f4727..2952e33 100644
---- a/drivers/video/fbdev/Kconfig
-+++ b/drivers/video/fbdev/Kconfig
-@@ -1204,7 +1204,6 @@ config FB_MATROX
- 	select FB_CFB_FILLRECT
- 	select FB_CFB_COPYAREA
- 	select FB_CFB_IMAGEBLIT
--	select FB_TILEBLITTING
- 	select FB_MACMODES if PPC_PMAC
- 	---help---
- 	  Say Y here if you have a Matrox Millennium, Matrox Millennium II,
-diff --git a/drivers/video/fbdev/core/fbcmap.c b/drivers/video/fbdev/core/fbcmap.c
-index f89245b..05e036c 100644
---- a/drivers/video/fbdev/core/fbcmap.c
-+++ b/drivers/video/fbdev/core/fbcmap.c
-@@ -17,6 +17,8 @@
- #include <linux/slab.h>
- #include <linux/uaccess.h>
- 
-+#include "../../console/fbcondecor.h"
-+
- static u16 red2[] __read_mostly = {
-     0x0000, 0xaaaa
- };
-@@ -249,14 +251,17 @@ int fb_set_cmap(struct fb_cmap *cmap, struct fb_info *info)
- 			if (transp)
- 				htransp = *transp++;
- 			if (info->fbops->fb_setcolreg(start++,
--						      hred, hgreen, hblue,
-+						      hred, hgreen, hblue, 
- 						      htransp, info))
- 				break;
- 		}
- 	}
--	if (rc == 0)
-+	if (rc == 0) {
- 		fb_copy_cmap(cmap, &info->cmap);
--
-+		if (fbcon_decor_active(info, vc_cons[fg_console].d) &&
-+		    info->fix.visual == FB_VISUAL_DIRECTCOLOR)
-+			fbcon_decor_fix_pseudo_pal(info, vc_cons[fg_console].d);
-+	}
- 	return rc;
- }
- 
-diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
-index b6d5008..d6703f2 100644
---- a/drivers/video/fbdev/core/fbmem.c
-+++ b/drivers/video/fbdev/core/fbmem.c
-@@ -1250,15 +1250,6 @@ struct fb_fix_screeninfo32 {
- 	u16			reserved[3];
- };
- 
--struct fb_cmap32 {
--	u32			start;
--	u32			len;
--	compat_caddr_t	red;
--	compat_caddr_t	green;
--	compat_caddr_t	blue;
--	compat_caddr_t	transp;
--};
--
- static int fb_getput_cmap(struct fb_info *info, unsigned int cmd,
- 			  unsigned long arg)
- {
-diff --git a/include/linux/console_decor.h b/include/linux/console_decor.h
-new file mode 100644
-index 0000000..04b8d80
---- /dev/null
-+++ b/include/linux/console_decor.h
-@@ -0,0 +1,46 @@
-+#ifndef _LINUX_CONSOLE_DECOR_H_
-+#define _LINUX_CONSOLE_DECOR_H_ 1
-+
-+/* A structure used by the framebuffer console decorations (drivers/video/console/fbcondecor.c) */
-+struct vc_decor {
-+	__u8 bg_color;				/* The color that is to be treated as transparent */
-+	__u8 state;				/* Current decor state: 0 = off, 1 = on */
-+	__u16 tx, ty;				/* Top left corner coordinates of the text field */
-+	__u16 twidth, theight;			/* Width and height of the text field */
-+	char* theme;
-+};
-+
-+#ifdef __KERNEL__
-+#ifdef CONFIG_COMPAT
-+#include <linux/compat.h>
-+
-+struct vc_decor32 {
-+	__u8 bg_color;				/* The color that is to be treated as transparent */
-+	__u8 state;				/* Current decor state: 0 = off, 1 = on */
-+	__u16 tx, ty;				/* Top left corner coordinates of the text field */
-+	__u16 twidth, theight;			/* Width and height of the text field */
-+	compat_uptr_t theme;
-+};
-+
-+#define vc_decor_from_compat(to, from) \
-+	(to).bg_color = (from).bg_color; \
-+	(to).state    = (from).state; \
-+	(to).tx       = (from).tx; \
-+	(to).ty       = (from).ty; \
-+	(to).twidth   = (from).twidth; \
-+	(to).theight  = (from).theight; \
-+	(to).theme    = compat_ptr((from).theme)
-+
-+#define vc_decor_to_compat(to, from) \
-+	(to).bg_color = (from).bg_color; \
-+	(to).state    = (from).state; \
-+	(to).tx       = (from).tx; \
-+	(to).ty       = (from).ty; \
-+	(to).twidth   = (from).twidth; \
-+	(to).theight  = (from).theight; \
-+	(to).theme    = ptr_to_compat((from).theme)
-+
-+#endif /* CONFIG_COMPAT */
-+#endif /* __KERNEL__ */
-+
-+#endif
-diff --git a/include/linux/console_struct.h b/include/linux/console_struct.h
-index 7f0c329..98f5d60 100644
---- a/include/linux/console_struct.h
-+++ b/include/linux/console_struct.h
-@@ -19,6 +19,7 @@
- struct vt_struct;
- 
- #define NPAR 16
-+#include <linux/console_decor.h>
- 
- struct vc_data {
- 	struct tty_port port;			/* Upper level data */
-@@ -107,6 +108,8 @@ struct vc_data {
- 	unsigned long	vc_uni_pagedir;
- 	unsigned long	*vc_uni_pagedir_loc;  /* [!] Location of uni_pagedir variable for this console */
- 	bool vc_panic_force_write; /* when oops/panic this VC can accept forced output/blanking */
-+
-+	struct vc_decor vc_decor;
- 	/* additional information is in vt_kern.h */
- };
- 
-diff --git a/include/linux/fb.h b/include/linux/fb.h
-index fe6ac95..1e36b03 100644
---- a/include/linux/fb.h
-+++ b/include/linux/fb.h
-@@ -219,6 +219,34 @@ struct fb_deferred_io {
- };
- #endif
- 
-+#ifdef __KERNEL__
-+#ifdef CONFIG_COMPAT
-+struct fb_image32 {
-+	__u32 dx;			/* Where to place image */
-+	__u32 dy;
-+	__u32 width;			/* Size of image */
-+	__u32 height;
-+	__u32 fg_color;			/* Only used when a mono bitmap */
-+	__u32 bg_color;
-+	__u8  depth;			/* Depth of the image */
-+	const compat_uptr_t data;	/* Pointer to image data */
-+	struct fb_cmap32 cmap;		/* color map info */
-+};
-+
-+#define fb_image_from_compat(to, from) \
-+	(to).dx       = (from).dx; \
-+	(to).dy       = (from).dy; \
-+	(to).width    = (from).width; \
-+	(to).height   = (from).height; \
-+	(to).fg_color = (from).fg_color; \
-+	(to).bg_color = (from).bg_color; \
-+	(to).depth    = (from).depth; \
-+	(to).data     = compat_ptr((from).data); \
-+	fb_cmap_from_compat((to).cmap, (from).cmap)
-+
-+#endif /* CONFIG_COMPAT */
-+#endif /* __KERNEL__ */
-+
- /*
-  * Frame buffer operations
-  *
-@@ -489,6 +517,9 @@ struct fb_info {
- #define FBINFO_STATE_SUSPENDED	1
- 	u32 state;			/* Hardware state i.e suspend */
- 	void *fbcon_par;                /* fbcon use-only private area */
-+
-+	struct fb_image bgdecor;
-+
- 	/* From here on everything is device dependent */
- 	void *par;
- 	/* we need the PCI or similar aperture base/size not
-diff --git a/include/uapi/linux/fb.h b/include/uapi/linux/fb.h
-index fb795c3..dc77a03 100644
---- a/include/uapi/linux/fb.h
-+++ b/include/uapi/linux/fb.h
-@@ -8,6 +8,25 @@
- 
- #define FB_MAX			32	/* sufficient for now */
- 
-+struct fbcon_decor_iowrapper
-+{
-+	unsigned short vc;		/* Virtual console */
-+	unsigned char origin;		/* Point of origin of the request */
-+	void *data;
-+};
-+
-+#ifdef __KERNEL__
-+#ifdef CONFIG_COMPAT
-+#include <linux/compat.h>
-+struct fbcon_decor_iowrapper32
-+{
-+	unsigned short vc;		/* Virtual console */
-+	unsigned char origin;		/* Point of origin of the request */
-+	compat_uptr_t data;
-+};
-+#endif /* CONFIG_COMPAT */
-+#endif /* __KERNEL__ */
-+
- /* ioctls
-    0x46 is 'F'								*/
- #define FBIOGET_VSCREENINFO	0x4600
-@@ -35,6 +54,25 @@
- #define FBIOGET_DISPINFO        0x4618
- #define FBIO_WAITFORVSYNC	_IOW('F', 0x20, __u32)
- 
-+#define FBIOCONDECOR_SETCFG	_IOWR('F', 0x19, struct fbcon_decor_iowrapper)
-+#define FBIOCONDECOR_GETCFG	_IOR('F', 0x1A, struct fbcon_decor_iowrapper)
-+#define FBIOCONDECOR_SETSTATE	_IOWR('F', 0x1B, struct fbcon_decor_iowrapper)
-+#define FBIOCONDECOR_GETSTATE	_IOR('F', 0x1C, struct fbcon_decor_iowrapper)
-+#define FBIOCONDECOR_SETPIC 	_IOWR('F', 0x1D, struct fbcon_decor_iowrapper)
-+#ifdef __KERNEL__
-+#ifdef CONFIG_COMPAT
-+#define FBIOCONDECOR_SETCFG32	_IOWR('F', 0x19, struct fbcon_decor_iowrapper32)
-+#define FBIOCONDECOR_GETCFG32	_IOR('F', 0x1A, struct fbcon_decor_iowrapper32)
-+#define FBIOCONDECOR_SETSTATE32	_IOWR('F', 0x1B, struct fbcon_decor_iowrapper32)
-+#define FBIOCONDECOR_GETSTATE32	_IOR('F', 0x1C, struct fbcon_decor_iowrapper32)
-+#define FBIOCONDECOR_SETPIC32	_IOWR('F', 0x1D, struct fbcon_decor_iowrapper32)
-+#endif /* CONFIG_COMPAT */
-+#endif /* __KERNEL__ */
-+
-+#define FBCON_DECOR_THEME_LEN		128	/* Maximum lenght of a theme name */
-+#define FBCON_DECOR_IO_ORIG_KERNEL	0	/* Kernel ioctl origin */
-+#define FBCON_DECOR_IO_ORIG_USER	1	/* User ioctl origin */
-+ 
- #define FB_TYPE_PACKED_PIXELS		0	/* Packed Pixels	*/
- #define FB_TYPE_PLANES			1	/* Non interleaved planes */
- #define FB_TYPE_INTERLEAVED_PLANES	2	/* Interleaved planes	*/
-@@ -277,6 +315,29 @@ struct fb_var_screeninfo {
- 	__u32 reserved[4];		/* Reserved for future compatibility */
- };
- 
-+#ifdef __KERNEL__
-+#ifdef CONFIG_COMPAT
-+struct fb_cmap32 {
-+	__u32 start;
-+	__u32 len;			/* Number of entries */
-+	compat_uptr_t red;		/* Red values	*/
-+	compat_uptr_t green;
-+	compat_uptr_t blue;
-+	compat_uptr_t transp;		/* transparency, can be NULL */
-+};
-+
-+#define fb_cmap_from_compat(to, from) \
-+	(to).start  = (from).start; \
-+	(to).len    = (from).len; \
-+	(to).red    = compat_ptr((from).red); \
-+	(to).green  = compat_ptr((from).green); \
-+	(to).blue   = compat_ptr((from).blue); \
-+	(to).transp = compat_ptr((from).transp)
-+
-+#endif /* CONFIG_COMPAT */
-+#endif /* __KERNEL__ */
-+
-+
- struct fb_cmap {
- 	__u32 start;			/* First entry	*/
- 	__u32 len;			/* Number of entries */
-diff --git a/kernel/sysctl.c b/kernel/sysctl.c
-index 74f5b58..6386ab0 100644
---- a/kernel/sysctl.c
-+++ b/kernel/sysctl.c
-@@ -146,6 +146,10 @@ static const int cap_last_cap = CAP_LAST_CAP;
- static unsigned long hung_task_timeout_max = (LONG_MAX/HZ);
- #endif
- 
-+#ifdef CONFIG_FB_CON_DECOR
-+extern char fbcon_decor_path[];
-+#endif
-+
- #ifdef CONFIG_INOTIFY_USER
- #include <linux/inotify.h>
- #endif
-@@ -255,6 +259,15 @@ static struct ctl_table sysctl_base_table[] = {
- 		.mode		= 0555,
- 		.child		= dev_table,
- 	},
-+#ifdef CONFIG_FB_CON_DECOR
-+	{
-+		.procname	= "fbcondecor",
-+		.data		= &fbcon_decor_path,
-+		.maxlen		= KMOD_PATH_LEN,
-+		.mode		= 0644,
-+		.proc_handler	= &proc_dostring,
-+	},
-+#endif
- 	{ }
- };
- 


^ permalink raw reply related	[flat|nested] 19+ messages in thread

* [gentoo-commits] proj/linux-patches:3.19 commit in: /
@ 2015-02-09 11:54 Mike Pagano
  0 siblings, 0 replies; 19+ messages in thread
From: Mike Pagano @ 2015-02-09 11:54 UTC (permalink / raw
  To: gentoo-commits

commit:     7016604fd038de37963c5bcd01d41004a1375915
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Mon Feb  9 11:54:26 2015 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Mon Feb  9 11:54:26 2015 +0000
URL:        http://sources.gentoo.org/gitweb/?p=proj/linux-patches.git;a=commit;h=7016604f

Remove reference to fbcondecor

---
 0000_README | 4 ----
 1 file changed, 4 deletions(-)

diff --git a/0000_README b/0000_README
index 7a25c41..3e1d0ab 100644
--- a/0000_README
+++ b/0000_README
@@ -59,10 +59,6 @@ Patch:  2905_s2disk-resume-image-fix.patch
 From:   Al Viro <viro <at> ZenIV.linux.org.uk>
 Desc:   Do not lock when UMH is waiting on current thread spawned by linuxrc. (bug #481344)
 
-Patch:  4200_fbcondecor-3.16.patch
-From:   http://www.mepiscommunity.org/fbcondecor
-Desc:   Bootsplash ported by Uladzimir Bely (bug #513334)
-
 Patch:  4567_distro-Gentoo-Kconfig.patch
 From:   Tom Wijsman <TomWij@gentoo.org>
 Desc:   Add Gentoo Linux support config settings and defaults.


^ permalink raw reply related	[flat|nested] 19+ messages in thread

* [gentoo-commits] proj/linux-patches:3.19 commit in: /
@ 2015-02-13  1:33 Mike Pagano
  0 siblings, 0 replies; 19+ messages in thread
From: Mike Pagano @ 2015-02-13  1:33 UTC (permalink / raw
  To: gentoo-commits

commit:     d40b86c655e52a66f63863f89fccffdfea33d381
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri Feb 13 01:32:53 2015 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri Feb 13 01:32:53 2015 +0000
URL:        http://sources.gentoo.org/gitweb/?p=proj/linux-patches.git;a=commit;h=d40b86c6

Add optimization patch for gcc verison =  4.9

---
 0000_README                                        |   6 +-
 ...-additional-cpu-optimizations-for-gcc-4.9.patch | 387 +++++++++++++++++++++
 2 files changed, 392 insertions(+), 1 deletion(-)

diff --git a/0000_README b/0000_README
index 3e1d0ab..f8e5c36 100644
--- a/0000_README
+++ b/0000_README
@@ -65,4 +65,8 @@ Desc:   Add Gentoo Linux support config settings and defaults.
 
 Patch:  5000_enable-additional-cpu-optimizations-for-gcc.patch
 From:   https://github.com/graysky2/kernel_gcc_patch/
-Desc:   Kernel patch enables gcc optimizations for additional CPUs.
+Desc:   Kernel patch enables gcc < v4.9 optimizations for additional CPUs.
+
+Patch:  5010_enable-additional-cpu-optimizations-for-gcc-4.9.patch
+From:   https://github.com/graysky2/kernel_gcc_patch/
+Desc:   Kernel patch enables gcc >= v4.9 optimizations for additional CPUs.

diff --git a/5010_enable-additional-cpu-optimizations-for-gcc-4.9.patch b/5010_enable-additional-cpu-optimizations-for-gcc-4.9.patch
new file mode 100644
index 0000000..f931f75
--- /dev/null
+++ b/5010_enable-additional-cpu-optimizations-for-gcc-4.9.patch
@@ -0,0 +1,387 @@
+WARNING - this version of the patch works with version 4.9+ of gcc and with
+kernel version 3.15.x+ and should NOT be applied when compiling on older
+versions due to name changes of the flags with the 4.9 release of gcc.
+Use the older version of this patch hosted on the same github for older
+versions of gcc. For example:
+
+corei7 --> nehalem
+corei7-avx --> sandybridge
+core-avx-i --> ivybridge
+core-avx2 --> haswell
+
+For more, see: https://gcc.gnu.org/gcc-4.9/changes.html
+
+It also changes 'atom' to 'bonnell' in accordance with the gcc v4.9 changes.
+Note that upstream is using the deprecated 'match=atom' flags when I believe it
+should use the newer 'march=bonnell' flag for atom processors.
+
+I have made that change to this patch set as well.  See the following kernel
+bug report to see if I'm right: https://bugzilla.kernel.org/show_bug.cgi?id=77461
+
+This patch will expand the number of microarchitectures to include new
+processors including: AMD K10-family, AMD Family 10h (Barcelona), AMD Family
+14h (Bobcat), AMD Family 15h (Bulldozer), AMD Family 15h (Piledriver), AMD
+Family 16h (Jaguar), Intel 1st Gen Core i3/i5/i7 (Nehalem), Intel 1.5 Gen Core
+i3/i5/i7 (Westmere), Intel 2nd Gen Core i3/i5/i7 (Sandybridge), Intel 3rd Gen
+Core i3/i5/i7 (Ivybridge), Intel 4th Gen Core i3/i5/i7 (Haswell), and Intel 5th
+Gen Core i3/i5/i7 (Broadwell). It also offers the compiler the 'native' flag.
+
+Small but real speed increases are measurable using a make endpoint comparing
+a generic kernel to one built with one of the respective microarchs.
+
+See the following experimental evidence supporting this statement:
+https://github.com/graysky2/kernel_gcc_patch
+
+REQUIREMENTS
+linux version >=3.15
+gcc version >=4.9
+
+--- a/arch/x86/include/asm/module.h	2014-08-03 18:25:02.000000000 -0400
++++ b/arch/x86/include/asm/module.h	2014-09-13 09:37:16.721385247 -0400
+@@ -15,6 +15,20 @@
+ #define MODULE_PROC_FAMILY "586MMX "
+ #elif defined CONFIG_MCORE2
+ #define MODULE_PROC_FAMILY "CORE2 "
++#elif defined CONFIG_MNATIVE
++#define MODULE_PROC_FAMILY "NATIVE "
++#elif defined CONFIG_MNEHALEM
++#define MODULE_PROC_FAMILY "NEHALEM "
++#elif defined CONFIG_MWESTMERE
++#define MODULE_PROC_FAMILY "WESTMERE "
++#elif defined CONFIG_MSANDYBRIDGE
++#define MODULE_PROC_FAMILY "SANDYBRIDGE "
++#elif defined CONFIG_MIVYBRIDGE
++#define MODULE_PROC_FAMILY "IVYBRIDGE "
++#elif defined CONFIG_MHASWELL
++#define MODULE_PROC_FAMILY "HASWELL "
++#elif defined CONFIG_MBROADWELL
++#define MODULE_PROC_FAMILY "BROADWELL "
+ #elif defined CONFIG_MATOM
+ #define MODULE_PROC_FAMILY "ATOM "
+ #elif defined CONFIG_M686
+@@ -33,6 +47,20 @@
+ #define MODULE_PROC_FAMILY "K7 "
+ #elif defined CONFIG_MK8
+ #define MODULE_PROC_FAMILY "K8 "
++#elif defined CONFIG_MK8SSE3
++#define MODULE_PROC_FAMILY "K8SSE3 "
++#elif defined CONFIG_MK10
++#define MODULE_PROC_FAMILY "K10 "
++#elif defined CONFIG_MBARCELONA
++#define MODULE_PROC_FAMILY "BARCELONA "
++#elif defined CONFIG_MBOBCAT
++#define MODULE_PROC_FAMILY "BOBCAT "
++#elif defined CONFIG_MBULLDOZER
++#define MODULE_PROC_FAMILY "BULLDOZER "
++#elif defined CONFIG_MPILEDRIVER
++#define MODULE_PROC_FAMILY "PILEDRIVER "
++#elif defined CONFIG_MJAGUAR
++#define MODULE_PROC_FAMILY "JAGUAR "
+ #elif defined CONFIG_MELAN
+ #define MODULE_PROC_FAMILY "ELAN "
+ #elif defined CONFIG_MCRUSOE
+--- a/arch/x86/Kconfig.cpu	2014-08-03 18:25:02.000000000 -0400
++++ b/arch/x86/Kconfig.cpu	2014-09-13 09:37:16.721385247 -0400
+@@ -137,9 +137,8 @@ config MPENTIUM4
+ 		-Paxville
+ 		-Dempsey
+ 
+-
+ config MK6
+-	bool "K6/K6-II/K6-III"
++	bool "AMD K6/K6-II/K6-III"
+ 	depends on X86_32
+ 	---help---
+ 	  Select this for an AMD K6-family processor.  Enables use of
+@@ -147,7 +146,7 @@ config MK6
+ 	  flags to GCC.
+ 
+ config MK7
+-	bool "Athlon/Duron/K7"
++	bool "AMD Athlon/Duron/K7"
+ 	depends on X86_32
+ 	---help---
+ 	  Select this for an AMD Athlon K7-family processor.  Enables use of
+@@ -155,12 +154,62 @@ config MK7
+ 	  flags to GCC.
+ 
+ config MK8
+-	bool "Opteron/Athlon64/Hammer/K8"
++	bool "AMD Opteron/Athlon64/Hammer/K8"
+ 	---help---
+ 	  Select this for an AMD Opteron or Athlon64 Hammer-family processor.
+ 	  Enables use of some extended instructions, and passes appropriate
+ 	  optimization flags to GCC.
+ 
++config MK8SSE3
++	bool "AMD Opteron/Athlon64/Hammer/K8 with SSE3"
++	---help---
++	  Select this for improved AMD Opteron or Athlon64 Hammer-family processors.
++	  Enables use of some extended instructions, and passes appropriate
++	  optimization flags to GCC.
++
++config MK10
++	bool "AMD 61xx/7x50/PhenomX3/X4/II/K10"
++	---help---
++	  Select this for an AMD 61xx Eight-Core Magny-Cours, Athlon X2 7x50,
++		Phenom X3/X4/II, Athlon II X2/X3/X4, or Turion II-family processor.
++	  Enables use of some extended instructions, and passes appropriate
++	  optimization flags to GCC.
++
++config MBARCELONA
++	bool "AMD Barcelona"
++	---help---
++	  Select this for AMD Barcelona and newer processors.
++
++	  Enables -march=barcelona
++
++config MBOBCAT
++	bool "AMD Bobcat"
++	---help---
++	  Select this for AMD Bobcat processors.
++
++	  Enables -march=btver1
++
++config MBULLDOZER
++	bool "AMD Bulldozer"
++	---help---
++	  Select this for AMD Bulldozer processors.
++
++	  Enables -march=bdver1
++
++config MPILEDRIVER
++	bool "AMD Piledriver"
++	---help---
++	  Select this for AMD Piledriver processors.
++
++	  Enables -march=bdver2
++
++config MJAGUAR
++	bool "AMD Jaguar"
++	---help---
++	  Select this for AMD Jaguar processors.
++
++	  Enables -march=btver2
++
+ config MCRUSOE
+ 	bool "Crusoe"
+ 	depends on X86_32
+@@ -251,8 +300,17 @@ config MPSC
+ 	  using the cpu family field
+ 	  in /proc/cpuinfo. Family 15 is an older Xeon, Family 6 a newer one.
+ 
++config MATOM
++	bool "Intel Atom"
++	---help---
++
++	  Select this for the Intel Atom platform. Intel Atom CPUs have an
++	  in-order pipelining architecture and thus can benefit from
++	  accordingly optimized code. Use a recent GCC with specific Atom
++	  support in order to fully benefit from selecting this option.
++
+ config MCORE2
+-	bool "Core 2/newer Xeon"
++	bool "Intel Core 2"
+ 	---help---
+ 
+ 	  Select this for Intel Core 2 and newer Core 2 Xeons (Xeon 51xx and
+@@ -260,14 +318,55 @@ config MCORE2
+ 	  family in /proc/cpuinfo. Newer ones have 6 and older ones 15
+ 	  (not a typo)
+ 
+-config MATOM
+-	bool "Intel Atom"
++	  Enables -march=core2
++
++config MNEHALEM
++	bool "Intel Nehalem"
+ 	---help---
+ 
+-	  Select this for the Intel Atom platform. Intel Atom CPUs have an
+-	  in-order pipelining architecture and thus can benefit from
+-	  accordingly optimized code. Use a recent GCC with specific Atom
+-	  support in order to fully benefit from selecting this option.
++	  Select this for 1st Gen Core processors in the Nehalem family.
++
++	  Enables -march=nehalem
++
++config MWESTMERE
++	bool "Intel Westmere"
++	---help---
++
++	  Select this for the Intel Westmere formerly Nehalem-C family.
++
++	  Enables -march=westmere
++
++config MSANDYBRIDGE
++	bool "Intel Sandy Bridge"
++	---help---
++
++	  Select this for 2nd Gen Core processors in the Sandy Bridge family.
++
++	  Enables -march=sandybridge
++
++config MIVYBRIDGE
++	bool "Intel Ivy Bridge"
++	---help---
++
++	  Select this for 3rd Gen Core processors in the Ivy Bridge family.
++
++	  Enables -march=ivybridge
++
++config MHASWELL
++	bool "Intel Haswell"
++	---help---
++
++	  Select this for 4th Gen Core processors in the Haswell family.
++
++	  Enables -march=haswell
++
++config MBROADWELL
++	bool "Intel Broadwell"
++	---help---
++
++	  Select this for 5th Gen Core processors in the Broadwell family.
++
++	  Enables -march=broadwell
+ 
+ config GENERIC_CPU
+ 	bool "Generic-x86-64"
+@@ -276,6 +375,19 @@ config GENERIC_CPU
+ 	  Generic x86-64 CPU.
+ 	  Run equally well on all x86-64 CPUs.
+ 
++config MNATIVE
++ bool "Native optimizations autodetected by GCC"
++ ---help---
++
++   GCC 4.2 and above support -march=native, which automatically detects
++   the optimum settings to use based on your processor. -march=native 
++   also detects and applies additional settings beyond -march specific
++   to your CPU, (eg. -msse4). Unless you have a specific reason not to
++   (e.g. distcc cross-compiling), you should probably be using
++   -march=native rather than anything listed below.
++
++   Enables -march=native
++
+ endchoice
+ 
+ config X86_GENERIC
+@@ -300,7 +412,7 @@ config X86_INTERNODE_CACHE_SHIFT
+ config X86_L1_CACHE_SHIFT
+ 	int
+ 	default "7" if MPENTIUM4 || MPSC
+-	default "6" if MK7 || MK8 || MPENTIUMM || MCORE2 || MATOM || MVIAC7 || X86_GENERIC || GENERIC_CPU
++	default "6" if MK7 || MK8 || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MBULLDOZER || MPILEDRIVER || MJAGUAR || MPENTIUMM || MCORE2 || MNEHALEM || MWESTMERE || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || BROADWELL || MNATIVE || MATOM || MVIAC7 || X86_GENERIC || GENERIC_CPU
+ 	default "4" if MELAN || M486 || MGEODEGX1
+ 	default "5" if MWINCHIP3D || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2 || MGEODE_LX
+ 
+@@ -331,11 +443,11 @@ config X86_ALIGNMENT_16
+ 
+ config X86_INTEL_USERCOPY
+ 	def_bool y
+-	depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M586MMX || X86_GENERIC || MK8 || MK7 || MEFFICEON || MCORE2
++	depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M586MMX || X86_GENERIC || MK8 || MK8SSE3 || MK7 || MEFFICEON || MCORE2 || MK10 || MBARCELONA || MNEHALEM || MWESTMERE || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MNATIVE
+ 
+ config X86_USE_PPRO_CHECKSUM
+ 	def_bool y
+-	depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MVIAC7 || MEFFICEON || MGEODE_LX || MCORE2 || MATOM
++	depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MK10 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MK8SSE3 || MVIAC3_2 || MVIAC7 || MEFFICEON || MGEODE_LX || MCORE2 || MNEHALEM || MWESTMERE || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MATOM || MNATIVE
+ 
+ config X86_USE_3DNOW
+ 	def_bool y
+@@ -359,17 +471,17 @@ config X86_P6_NOP
+ 
+ config X86_TSC
+ 	def_bool y
+-	depends on (MWINCHIP3D || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MVIAC7 || MGEODEGX1 || MGEODE_LX || MCORE2 || MATOM) || X86_64
++	depends on (MWINCHIP3D || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MK8SSE3 || MVIAC3_2 || MVIAC7 || MGEODEGX1 || MGEODE_LX || MCORE2 || MNEHALEM || MWESTMERE || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MNATIVE || MATOM) || X86_64
+ 
+ config X86_CMPXCHG64
+ 	def_bool y
+-	depends on X86_PAE || X86_64 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MATOM
++	depends on X86_PAE || X86_64 || MCORE2 || MNEHALEM || MWESTMERE || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MATOM || MNATIVE
+ 
+ # this should be set for all -march=.. options where the compiler
+ # generates cmov.
+ config X86_CMOV
+ 	def_bool y
+-	depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
++	depends on (MK8 || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MBULLDOZER || MPILEDRIVER || MJAGUAR || MK7 || MCORE2 || MNEHALEM || MWESTMERE || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MNATIVE || MATOM || MGEODE_LX)
+ 
+ config X86_MINIMUM_CPU_FAMILY
+ 	int
+--- a/arch/x86/Makefile	2014-08-03 18:25:02.000000000 -0400
++++ b/arch/x86/Makefile	2014-09-13 09:37:16.721385247 -0400
+@@ -92,13 +92,33 @@ else
+ 	KBUILD_CFLAGS += $(call cc-option,-mpreferred-stack-boundary=3)
+ 
+         # FIXME - should be integrated in Makefile.cpu (Makefile_32.cpu)
++        cflags-$(CONFIG_MNATIVE) += $(call cc-option,-march=native)
+         cflags-$(CONFIG_MK8) += $(call cc-option,-march=k8)
++        cflags-$(CONFIG_MK8SSE3) += $(call cc-option,-march=k8-sse3,-mtune=k8)
++        cflags-$(CONFIG_MK10) += $(call cc-option,-march=amdfam10)
++        cflags-$(CONFIG_MBARCELONA) += $(call cc-option,-march=barcelona)
++        cflags-$(CONFIG_MBOBCAT) += $(call cc-option,-march=btver1)
++        cflags-$(CONFIG_MBULLDOZER) += $(call cc-option,-march=bdver1)
++        cflags-$(CONFIG_MPILEDRIVER) += $(call cc-option,-march=bdver2)
++        cflags-$(CONFIG_MJAGUAR) += $(call cc-option,-march=btver2)
+         cflags-$(CONFIG_MPSC) += $(call cc-option,-march=nocona)
+ 
+         cflags-$(CONFIG_MCORE2) += \
+-                $(call cc-option,-march=core2,$(call cc-option,-mtune=generic))
+-	cflags-$(CONFIG_MATOM) += $(call cc-option,-march=atom) \
+-		$(call cc-option,-mtune=atom,$(call cc-option,-mtune=generic))
++                $(call cc-option,-march=core2,$(call cc-option,-mtune=core2))
++        cflags-$(CONFIG_MNEHALEM) += \
++                $(call cc-option,-march=nehalem,$(call cc-option,-mtune=nehalem))
++        cflags-$(CONFIG_MWESTMERE) += \
++                $(call cc-option,-march=westmere,$(call cc-option,-mtune=westmere))
++        cflags-$(CONFIG_MSANDYBRIDGE) += \
++                $(call cc-option,-march=sandybridge,$(call cc-option,-mtune=sandybridge))
++        cflags-$(CONFIG_MIVYBRIDGE) += \
++                $(call cc-option,-march=ivybridge,$(call cc-option,-mtune=ivybridge))
++        cflags-$(CONFIG_MHASWELL) += \
++                $(call cc-option,-march=haswell,$(call cc-option,-mtune=haswell))
++        cflags-$(CONFIG_MBROADWELL) += \
++                $(call cc-option,-march=broadwell,$(call cc-option,-mtune=broadwell))
++        cflags-$(CONFIG_MATOM) += $(call cc-option,-march=bonnell) \
++                $(call cc-option,-mtune=bonnell,$(call cc-option,-mtune=generic))
+         cflags-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=generic)
+         KBUILD_CFLAGS += $(cflags-y)
+ 
+--- a/arch/x86/Makefile_32.cpu	2014-08-03 18:25:02.000000000 -0400
++++ b/arch/x86/Makefile_32.cpu	2014-09-13 09:37:16.721385247 -0400
+@@ -23,7 +23,15 @@ cflags-$(CONFIG_MK6)		+= -march=k6
+ # Please note, that patches that add -march=athlon-xp and friends are pointless.
+ # They make zero difference whatsosever to performance at this time.
+ cflags-$(CONFIG_MK7)		+= -march=athlon
++cflags-$(CONFIG_MNATIVE) += $(call cc-option,-march=native)
+ cflags-$(CONFIG_MK8)		+= $(call cc-option,-march=k8,-march=athlon)
++cflags-$(CONFIG_MK8SSE3)		+= $(call cc-option,-march=k8-sse3,-march=athlon)
++cflags-$(CONFIG_MK10)	+= $(call cc-option,-march=amdfam10,-march=athlon)
++cflags-$(CONFIG_MBARCELONA)	+= $(call cc-option,-march=barcelona,-march=athlon)
++cflags-$(CONFIG_MBOBCAT)	+= $(call cc-option,-march=btver1,-march=athlon)
++cflags-$(CONFIG_MBULLDOZER)	+= $(call cc-option,-march=bdver1,-march=athlon)
++cflags-$(CONFIG_MPILEDRIVER)	+= $(call cc-option,-march=bdver2,-march=athlon)
++cflags-$(CONFIG_MJAGUAR)	+= $(call cc-option,-march=btver2,-march=athlon)
+ cflags-$(CONFIG_MCRUSOE)	+= -march=i686 $(align)-functions=0 $(align)-jumps=0 $(align)-loops=0
+ cflags-$(CONFIG_MEFFICEON)	+= -march=i686 $(call tune,pentium3) $(align)-functions=0 $(align)-jumps=0 $(align)-loops=0
+ cflags-$(CONFIG_MWINCHIPC6)	+= $(call cc-option,-march=winchip-c6,-march=i586)
+@@ -32,8 +40,14 @@ cflags-$(CONFIG_MCYRIXIII)	+= $(call cc-
+ cflags-$(CONFIG_MVIAC3_2)	+= $(call cc-option,-march=c3-2,-march=i686)
+ cflags-$(CONFIG_MVIAC7)		+= -march=i686
+ cflags-$(CONFIG_MCORE2)		+= -march=i686 $(call tune,core2)
+-cflags-$(CONFIG_MATOM)		+= $(call cc-option,-march=atom,$(call cc-option,-march=core2,-march=i686)) \
+-	$(call cc-option,-mtune=atom,$(call cc-option,-mtune=generic))
++cflags-$(CONFIG_MNEHALEM)	+= -march=i686 $(call tune,nehalem)
++cflags-$(CONFIG_MWESTMERE)	+= -march=i686 $(call tune,westmere)
++cflags-$(CONFIG_MSANDYBRIDGE)	+= -march=i686 $(call tune,sandybridge)
++cflags-$(CONFIG_MIVYBRIDGE)	+= -march=i686 $(call tune,ivybridge)
++cflags-$(CONFIG_MHASWELL)	+= -march=i686 $(call tune,haswell)
++cflags-$(CONFIG_MBROADWELL)	+= -march=i686 $(call tune,broadwell)
++cflags-$(CONFIG_MATOM)		+= $(call cc-option,-march=bonnell,$(call cc-option,-march=core2,-march=i686)) \
++	$(call cc-option,-mtune=bonnell,$(call cc-option,-mtune=generic))
+ 
+ # AMD Elan support
+ cflags-$(CONFIG_MELAN)		+= -march=i486


^ permalink raw reply related	[flat|nested] 19+ messages in thread

* [gentoo-commits] proj/linux-patches:3.19 commit in: /
@ 2015-02-13  1:48 Mike Pagano
  0 siblings, 0 replies; 19+ messages in thread
From: Mike Pagano @ 2015-02-13  1:48 UTC (permalink / raw
  To: gentoo-commits

commit:     323c782c4e1726ca1aa855cccd5e7d8730d62ba8
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri Feb 13 01:47:53 2015 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri Feb 13 01:47:53 2015 +0000
URL:        http://sources.gentoo.org/gitweb/?p=proj/linux-patches.git;a=commit;h=323c782c

Bootsplash ported by Marco. (Bug #539616)

---
 0000_README                |    4 +
 4200_fbcondecor-3.19.patch | 2119 ++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 2123 insertions(+)

diff --git a/0000_README b/0000_README
index f8e5c36..3fa77db 100644
--- a/0000_README
+++ b/0000_README
@@ -59,6 +59,10 @@ Patch:  2905_s2disk-resume-image-fix.patch
 From:   Al Viro <viro <at> ZenIV.linux.org.uk>
 Desc:   Do not lock when UMH is waiting on current thread spawned by linuxrc. (bug #481344)
 
+Patch:  4200_fbcondecor-3.19.patch
+From:   http://www.mepiscommunity.org/fbcondecor
+Desc:   Bootsplash ported by Marco. (Bug #539616)
+
 Patch:  4567_distro-Gentoo-Kconfig.patch
 From:   Tom Wijsman <TomWij@gentoo.org>
 Desc:   Add Gentoo Linux support config settings and defaults.

diff --git a/4200_fbcondecor-3.19.patch b/4200_fbcondecor-3.19.patch
new file mode 100644
index 0000000..29c379f
--- /dev/null
+++ b/4200_fbcondecor-3.19.patch
@@ -0,0 +1,2119 @@
+diff --git a/Documentation/fb/00-INDEX b/Documentation/fb/00-INDEX
+index fe85e7c..2230930 100644
+--- a/Documentation/fb/00-INDEX
++++ b/Documentation/fb/00-INDEX
+@@ -23,6 +23,8 @@ ep93xx-fb.txt
+ 	- info on the driver for EP93xx LCD controller.
+ fbcon.txt
+ 	- intro to and usage guide for the framebuffer console (fbcon).
++fbcondecor.txt
++	- info on the Framebuffer Console Decoration
+ framebuffer.txt
+ 	- introduction to frame buffer devices.
+ gxfb.txt
+diff --git a/Documentation/fb/fbcondecor.txt b/Documentation/fb/fbcondecor.txt
+new file mode 100644
+index 0000000..3388c61
+--- /dev/null
++++ b/Documentation/fb/fbcondecor.txt
+@@ -0,0 +1,207 @@
++What is it?
++-----------
++
++The framebuffer decorations are a kernel feature which allows displaying a 
++background picture on selected consoles.
++
++What do I need to get it to work?
++---------------------------------
++
++To get fbcondecor up-and-running you will have to:
++ 1) get a copy of splashutils [1] or a similar program
++ 2) get some fbcondecor themes
++ 3) build the kernel helper program
++ 4) build your kernel with the FB_CON_DECOR option enabled.
++
++To get fbcondecor operational right after fbcon initialization is finished, you
++will have to include a theme and the kernel helper into your initramfs image.
++Please refer to splashutils documentation for instructions on how to do that.
++
++[1] The splashutils package can be downloaded from:
++    http://github.com/alanhaggai/fbsplash
++
++The userspace helper
++--------------------
++
++The userspace fbcondecor helper (by default: /sbin/fbcondecor_helper) is called by the
++kernel whenever an important event occurs and the kernel needs some kind of
++job to be carried out. Important events include console switches and video
++mode switches (the kernel requests background images and configuration
++parameters for the current console). The fbcondecor helper must be accessible at
++all times. If it's not, fbcondecor will be switched off automatically.
++
++It's possible to set path to the fbcondecor helper by writing it to
++/proc/sys/kernel/fbcondecor.
++
++*****************************************************************************
++
++The information below is mostly technical stuff. There's probably no need to
++read it unless you plan to develop a userspace helper.
++
++The fbcondecor protocol
++-----------------------
++
++The fbcondecor protocol defines a communication interface between the kernel and
++the userspace fbcondecor helper.
++
++The kernel side is responsible for:
++
++ * rendering console text, using an image as a background (instead of a
++   standard solid color fbcon uses),
++ * accepting commands from the user via ioctls on the fbcondecor device,
++ * calling the userspace helper to set things up as soon as the fb subsystem 
++   is initialized.
++
++The userspace helper is responsible for everything else, including parsing
++configuration files, decompressing the image files whenever the kernel needs
++it, and communicating with the kernel if necessary.
++
++The fbcondecor protocol specifies how communication is done in both ways:
++kernel->userspace and userspace->helper.
++  
++Kernel -> Userspace
++-------------------
++
++The kernel communicates with the userspace helper by calling it and specifying
++the task to be done in a series of arguments.
++
++The arguments follow the pattern:
++<fbcondecor protocol version> <command> <parameters>
++
++All commands defined in fbcondecor protocol v2 have the following parameters:
++ virtual console
++ framebuffer number
++ theme
++
++Fbcondecor protocol v1 specified an additional 'fbcondecor mode' after the
++framebuffer number. Fbcondecor protocol v1 is deprecated and should not be used.
++
++Fbcondecor protocol v2 specifies the following commands:
++
++getpic
++------
++ The kernel issues this command to request image data. It's up to the 
++ userspace  helper to find a background image appropriate for the specified 
++ theme and the current resolution. The userspace helper should respond by 
++ issuing the FBIOCONDECOR_SETPIC ioctl.
++
++init
++----
++ The kernel issues this command after the fbcondecor device is created and
++ the fbcondecor interface is initialized. Upon receiving 'init', the userspace
++ helper should parse the kernel command line (/proc/cmdline) or otherwise
++ decide whether fbcondecor is to be activated.
++
++ To activate fbcondecor on the first console the helper should issue the
++ FBIOCONDECOR_SETCFG, FBIOCONDECOR_SETPIC and FBIOCONDECOR_SETSTATE commands,
++ in the above-mentioned order.
++
++ When the userspace helper is called in an early phase of the boot process
++ (right after the initialization of fbcon), no filesystems will be mounted.
++ The helper program should mount sysfs and then create the appropriate
++ framebuffer, fbcondecor and tty0 devices (if they don't already exist) to get
++ current display settings and to be able to communicate with the kernel side.
++ It should probably also mount the procfs to be able to parse the kernel
++ command line parameters.
++
++ Note that the console sem is not held when the kernel calls fbcondecor_helper
++ with the 'init' command. The fbcondecor helper should perform all ioctls with
++ origin set to FBCON_DECOR_IO_ORIG_USER.
++
++modechange
++----------
++ The kernel issues this command on a mode change. The helper's response should
++ be similar to the response to the 'init' command. Note that this time the
++ console sem is held and all ioctls must be performed with origin set to
++ FBCON_DECOR_IO_ORIG_KERNEL.
++
++
++Userspace -> Kernel
++-------------------
++
++Userspace programs can communicate with fbcondecor via ioctls on the
++fbcondecor device. These ioctls are to be used by both the userspace helper
++(called only by the kernel) and userspace configuration tools (run by the users).
++
++The fbcondecor helper should set the origin field to FBCON_DECOR_IO_ORIG_KERNEL
++when doing the appropriate ioctls. All userspace configuration tools should
++use FBCON_DECOR_IO_ORIG_USER. Failure to set the appropriate value in the origin
++field when performing ioctls from the kernel helper will most likely result
++in a console deadlock.
++
++FBCON_DECOR_IO_ORIG_KERNEL instructs fbcondecor not to try to acquire the console
++semaphore. Not surprisingly, FBCON_DECOR_IO_ORIG_USER instructs it to acquire
++the console sem.
++
++The framebuffer console decoration provides the following ioctls (all defined in 
++linux/fb.h):
++
++FBIOCONDECOR_SETPIC
++description: loads a background picture for a virtual console
++argument: struct fbcon_decor_iowrapper*; data: struct fb_image*
++notes: 
++If called for consoles other than the current foreground one, the picture data
++will be ignored.
++
++If the current virtual console is running in a 8-bpp mode, the cmap substruct
++of fb_image has to be filled appropriately: start should be set to 16 (first
++16 colors are reserved for fbcon), len to a value <= 240 and red, green and
++blue should point to valid cmap data. The transp field is ingored. The fields
++dx, dy, bg_color, fg_color in fb_image are ignored as well.
++
++FBIOCONDECOR_SETCFG
++description: sets the fbcondecor config for a virtual console
++argument: struct fbcon_decor_iowrapper*; data: struct vc_decor*
++notes: The structure has to be filled with valid data.
++
++FBIOCONDECOR_GETCFG
++description: gets the fbcondecor config for a virtual console
++argument: struct fbcon_decor_iowrapper*; data: struct vc_decor*
++
++FBIOCONDECOR_SETSTATE
++description: sets the fbcondecor state for a virtual console
++argument: struct fbcon_decor_iowrapper*; data: unsigned int*
++          values: 0 = disabled, 1 = enabled.
++
++FBIOCONDECOR_GETSTATE
++description: gets the fbcondecor state for a virtual console
++argument: struct fbcon_decor_iowrapper*; data: unsigned int*
++          values: as in FBIOCONDECOR_SETSTATE
++
++Info on used structures:
++
++Definition of struct vc_decor can be found in linux/console_decor.h. It's
++heavily commented. Note that the 'theme' field should point to a string
++no longer than FBCON_DECOR_THEME_LEN. When FBIOCONDECOR_GETCFG call is
++performed, the theme field should point to a char buffer of length
++FBCON_DECOR_THEME_LEN.
++
++Definition of struct fbcon_decor_iowrapper can be found in linux/fb.h.
++The fields in this struct have the following meaning:
++
++vc: 
++Virtual console number.
++
++origin: 
++Specifies if the ioctl is performed as a response to a kernel request. The
++fbcondecor helper should set this field to FBCON_DECOR_IO_ORIG_KERNEL, userspace
++programs should set it to FBCON_DECOR_IO_ORIG_USER. This field is necessary to
++avoid console semaphore deadlocks.
++
++data: 
++Pointer to a data structure appropriate for the performed ioctl. Type of
++the data struct is specified in the ioctls description.
++
++*****************************************************************************
++
++Credit
++------
++
++Original 'bootsplash' project & implementation by:
++  Volker Poplawski <volker@poplawski.de>, Stefan Reinauer <stepan@suse.de>,
++  Steffen Winterfeldt <snwint@suse.de>, Michael Schroeder <mls@suse.de>,
++  Ken Wimer <wimer@suse.de>.
++
++Fbcondecor, fbcondecor protocol design, current implementation & docs by:
++  Michal Januszewski <michalj+fbcondecor@gmail.com>
++
+diff --git a/drivers/Makefile b/drivers/Makefile
+index 7183b6a..d576148 100644
+--- a/drivers/Makefile
++++ b/drivers/Makefile
+@@ -17,6 +17,10 @@ obj-y				+= pwm/
+ obj-$(CONFIG_PCI)		+= pci/
+ obj-$(CONFIG_PARISC)		+= parisc/
+ obj-$(CONFIG_RAPIDIO)		+= rapidio/
++# tty/ comes before char/ so that the VT console is the boot-time
++# default.
++obj-y				+= tty/
++obj-y				+= char/
+ obj-y				+= video/
+ obj-y				+= idle/
+ 
+@@ -42,11 +46,6 @@ obj-$(CONFIG_REGULATOR)		+= regulator/
+ # reset controllers early, since gpu drivers might rely on them to initialize
+ obj-$(CONFIG_RESET_CONTROLLER)	+= reset/
+ 
+-# tty/ comes before char/ so that the VT console is the boot-time
+-# default.
+-obj-y				+= tty/
+-obj-y				+= char/
+-
+ # iommu/ comes before gpu as gpu are using iommu controllers
+ obj-$(CONFIG_IOMMU_SUPPORT) += iommu/
+
+diff --git a/drivers/video/console/Kconfig b/drivers/video/console/Kconfig
+index fe1cd01..6d2e87a 100644
+--- a/drivers/video/console/Kconfig
++++ b/drivers/video/console/Kconfig
+@@ -126,6 +126,19 @@ config FRAMEBUFFER_CONSOLE_ROTATION
+          such that other users of the framebuffer will remain normally
+          oriented.
+ 
++config FB_CON_DECOR
++	bool "Support for the Framebuffer Console Decorations"
++	depends on FRAMEBUFFER_CONSOLE=y && !FB_TILEBLITTING
++	default n
++	---help---
++	  This option enables support for framebuffer console decorations which
++	  makes it possible to display images in the background of the system
++	  consoles.  Note that userspace utilities are necessary in order to take 
++	  advantage of these features. Refer to Documentation/fb/fbcondecor.txt 
++	  for more information.
++
++	  If unsure, say N.
++
+ config STI_CONSOLE
+         bool "STI text console"
+         depends on PARISC
+diff --git a/drivers/video/console/Makefile b/drivers/video/console/Makefile
+index 43bfa48..cc104b6f 100644
+--- a/drivers/video/console/Makefile
++++ b/drivers/video/console/Makefile
+@@ -16,4 +16,5 @@ obj-$(CONFIG_FRAMEBUFFER_CONSOLE)     += fbcon_rotate.o fbcon_cw.o fbcon_ud.o \
+                                          fbcon_ccw.o
+ endif
+ 
++obj-$(CONFIG_FB_CON_DECOR)     	  += fbcondecor.o cfbcondecor.o
+ obj-$(CONFIG_FB_STI)              += sticore.o
+diff --git a/drivers/video/console/bitblit.c b/drivers/video/console/bitblit.c
+index 61b182b..984384b 100644
+--- a/drivers/video/console/bitblit.c
++++ b/drivers/video/console/bitblit.c
+@@ -18,6 +18,7 @@
+ #include <linux/console.h>
+ #include <asm/types.h>
+ #include "fbcon.h"
++#include "fbcondecor.h"
+ 
+ /*
+  * Accelerated handlers.
+@@ -55,6 +56,13 @@ static void bit_bmove(struct vc_data *vc, struct fb_info *info, int sy,
+ 	area.height = height * vc->vc_font.height;
+ 	area.width = width * vc->vc_font.width;
+ 
++	if (fbcon_decor_active(info, vc)) {
++ 		area.sx += vc->vc_decor.tx;
++ 		area.sy += vc->vc_decor.ty;
++ 		area.dx += vc->vc_decor.tx;
++ 		area.dy += vc->vc_decor.ty;
++ 	}
++
+ 	info->fbops->fb_copyarea(info, &area);
+ }
+ 
+@@ -380,11 +388,15 @@ static void bit_cursor(struct vc_data *vc, struct fb_info *info, int mode,
+ 	cursor.image.depth = 1;
+ 	cursor.rop = ROP_XOR;
+ 
+-	if (info->fbops->fb_cursor)
+-		err = info->fbops->fb_cursor(info, &cursor);
++	if (fbcon_decor_active(info, vc)) {
++		fbcon_decor_cursor(info, &cursor);
++	} else {
++		if (info->fbops->fb_cursor)
++			err = info->fbops->fb_cursor(info, &cursor);
+ 
+-	if (err)
+-		soft_cursor(info, &cursor);
++		if (err)
++			soft_cursor(info, &cursor);
++	}
+ 
+ 	ops->cursor_reset = 0;
+ }
+diff --git a/drivers/video/console/cfbcondecor.c b/drivers/video/console/cfbcondecor.c
+new file mode 100644
+index 0000000..a2b4497
+--- /dev/null
++++ b/drivers/video/console/cfbcondecor.c
+@@ -0,0 +1,471 @@
++/*
++ *  linux/drivers/video/cfbcon_decor.c -- Framebuffer decor render functions
++ *
++ *  Copyright (C) 2004 Michal Januszewski <michalj+fbcondecor@gmail.com>
++ *
++ *  Code based upon "Bootdecor" (C) 2001-2003
++ *       Volker Poplawski <volker@poplawski.de>,
++ *       Stefan Reinauer <stepan@suse.de>,
++ *       Steffen Winterfeldt <snwint@suse.de>,
++ *       Michael Schroeder <mls@suse.de>,
++ *       Ken Wimer <wimer@suse.de>.
++ *
++ *  This file is subject to the terms and conditions of the GNU General Public
++ *  License.  See the file COPYING in the main directory of this archive for
++ *  more details.
++ */
++#include <linux/module.h>
++#include <linux/types.h>
++#include <linux/fb.h>
++#include <linux/selection.h>
++#include <linux/slab.h>
++#include <linux/vt_kern.h>
++#include <asm/irq.h>
++
++#include "fbcon.h"
++#include "fbcondecor.h"
++
++#define parse_pixel(shift,bpp,type)						\
++	do {									\
++		if (d & (0x80 >> (shift)))					\
++			dd2[(shift)] = fgx;					\
++		else								\
++			dd2[(shift)] = transparent ? *(type *)decor_src : bgx;	\
++		decor_src += (bpp);						\
++	} while (0)								\
++
++extern int get_color(struct vc_data *vc, struct fb_info *info,
++		     u16 c, int is_fg);
++
++void fbcon_decor_fix_pseudo_pal(struct fb_info *info, struct vc_data *vc)
++{
++	int i, j, k;
++	int minlen = min(min(info->var.red.length, info->var.green.length),
++			     info->var.blue.length);
++	u32 col;
++
++	for (j = i = 0; i < 16; i++) {
++		k = color_table[i];
++
++		col = ((vc->vc_palette[j++]  >> (8-minlen))
++			<< info->var.red.offset);
++		col |= ((vc->vc_palette[j++] >> (8-minlen))
++			<< info->var.green.offset);
++		col |= ((vc->vc_palette[j++] >> (8-minlen))
++			<< info->var.blue.offset);
++			((u32 *)info->pseudo_palette)[k] = col;
++	}
++}
++
++void fbcon_decor_renderc(struct fb_info *info, int ypos, int xpos, int height,
++		      int width, u8* src, u32 fgx, u32 bgx, u8 transparent)
++{
++	unsigned int x, y;
++	u32 dd;
++	int bytespp = ((info->var.bits_per_pixel + 7) >> 3);
++	unsigned int d = ypos * info->fix.line_length + xpos * bytespp;
++	unsigned int ds = (ypos * info->var.xres + xpos) * bytespp;
++	u16 dd2[4];
++
++	u8* decor_src = (u8 *)(info->bgdecor.data + ds);
++	u8* dst = (u8 *)(info->screen_base + d);
++
++	if ((ypos + height) > info->var.yres || (xpos + width) > info->var.xres)
++		return;
++
++	for (y = 0; y < height; y++) {
++		switch (info->var.bits_per_pixel) {
++
++		case 32:
++			for (x = 0; x < width; x++) {
++
++				if ((x & 7) == 0)
++					d = *src++;
++				if (d & 0x80)
++					dd = fgx;
++				else
++					dd = transparent ?
++					     *(u32 *)decor_src : bgx;
++
++				d <<= 1;
++				decor_src += 4;
++				fb_writel(dd, dst);
++				dst += 4;
++			}
++			break;
++		case 24:
++			for (x = 0; x < width; x++) {
++
++				if ((x & 7) == 0)
++					d = *src++;
++				if (d & 0x80)
++					dd = fgx;
++				else
++					dd = transparent ?
++					     (*(u32 *)decor_src & 0xffffff) : bgx;
++
++				d <<= 1;
++				decor_src += 3;
++#ifdef __LITTLE_ENDIAN
++				fb_writew(dd & 0xffff, dst);
++				dst += 2;
++				fb_writeb((dd >> 16), dst);
++#else
++				fb_writew(dd >> 8, dst);
++				dst += 2;
++				fb_writeb(dd & 0xff, dst);
++#endif
++				dst++;
++			}
++			break;
++		case 16:
++			for (x = 0; x < width; x += 2) {
++				if ((x & 7) == 0)
++					d = *src++;
++
++				parse_pixel(0, 2, u16);
++				parse_pixel(1, 2, u16);
++#ifdef __LITTLE_ENDIAN
++				dd = dd2[0] | (dd2[1] << 16);
++#else
++				dd = dd2[1] | (dd2[0] << 16);
++#endif
++				d <<= 2;
++				fb_writel(dd, dst);
++				dst += 4;
++			}
++			break;
++
++		case 8:
++			for (x = 0; x < width; x += 4) {
++				if ((x & 7) == 0)
++					d = *src++;
++
++				parse_pixel(0, 1, u8);
++				parse_pixel(1, 1, u8);
++				parse_pixel(2, 1, u8);
++				parse_pixel(3, 1, u8);
++
++#ifdef __LITTLE_ENDIAN
++				dd = dd2[0] | (dd2[1] << 8) | (dd2[2] << 16) | (dd2[3] << 24);
++#else
++				dd = dd2[3] | (dd2[2] << 8) | (dd2[1] << 16) | (dd2[0] << 24);
++#endif
++				d <<= 4;
++				fb_writel(dd, dst);
++				dst += 4;
++			}
++		}
++
++		dst += info->fix.line_length - width * bytespp;
++		decor_src += (info->var.xres - width) * bytespp;
++	}
++}
++
++#define cc2cx(a) 						\
++	((info->fix.visual == FB_VISUAL_TRUECOLOR || 		\
++	  info->fix.visual == FB_VISUAL_DIRECTCOLOR) ? 		\
++	 ((u32*)info->pseudo_palette)[a] : a)
++
++void fbcon_decor_putcs(struct vc_data *vc, struct fb_info *info,
++		   const unsigned short *s, int count, int yy, int xx)
++{
++	unsigned short charmask = vc->vc_hi_font_mask ? 0x1ff : 0xff;
++	struct fbcon_ops *ops = info->fbcon_par;
++	int fg_color, bg_color, transparent;
++	u8 *src;
++	u32 bgx, fgx;
++	u16 c = scr_readw(s);
++
++	fg_color = get_color(vc, info, c, 1);
++        bg_color = get_color(vc, info, c, 0);
++
++	/* Don't paint the background image if console is blanked */
++	transparent = ops->blank_state ? 0 :
++		(vc->vc_decor.bg_color == bg_color);
++
++	xx = xx * vc->vc_font.width + vc->vc_decor.tx;
++	yy = yy * vc->vc_font.height + vc->vc_decor.ty;
++
++	fgx = cc2cx(fg_color);
++	bgx = cc2cx(bg_color);
++
++	while (count--) {
++		c = scr_readw(s++);
++		src = vc->vc_font.data + (c & charmask) * vc->vc_font.height *
++		      ((vc->vc_font.width + 7) >> 3);
++
++		fbcon_decor_renderc(info, yy, xx, vc->vc_font.height,
++			       vc->vc_font.width, src, fgx, bgx, transparent);
++		xx += vc->vc_font.width;
++	}
++}
++
++void fbcon_decor_cursor(struct fb_info *info, struct fb_cursor *cursor)
++{
++	int i;
++	unsigned int dsize, s_pitch;
++	struct fbcon_ops *ops = info->fbcon_par;
++	struct vc_data* vc;
++	u8 *src;
++
++	/* we really don't need any cursors while the console is blanked */
++	if (info->state != FBINFO_STATE_RUNNING || ops->blank_state)
++		return;
++
++	vc = vc_cons[ops->currcon].d;
++
++	src = kmalloc(64 + sizeof(struct fb_image), GFP_ATOMIC);
++	if (!src)
++		return;
++
++	s_pitch = (cursor->image.width + 7) >> 3;
++	dsize = s_pitch * cursor->image.height;
++	if (cursor->enable) {
++		switch (cursor->rop) {
++		case ROP_XOR:
++			for (i = 0; i < dsize; i++)
++				src[i] = cursor->image.data[i] ^ cursor->mask[i];
++                        break;
++		case ROP_COPY:
++		default:
++			for (i = 0; i < dsize; i++)
++				src[i] = cursor->image.data[i] & cursor->mask[i];
++			break;
++		}
++	} else
++		memcpy(src, cursor->image.data, dsize);
++
++	fbcon_decor_renderc(info,
++			cursor->image.dy + vc->vc_decor.ty,
++			cursor->image.dx + vc->vc_decor.tx,
++			cursor->image.height,
++			cursor->image.width,
++			(u8*)src,
++			cc2cx(cursor->image.fg_color),
++			cc2cx(cursor->image.bg_color),
++			cursor->image.bg_color == vc->vc_decor.bg_color);
++
++	kfree(src);
++}
++
++static void decorset(u8 *dst, int height, int width, int dstbytes,
++		        u32 bgx, int bpp)
++{
++	int i;
++
++	if (bpp == 8)
++		bgx |= bgx << 8;
++	if (bpp == 16 || bpp == 8)
++		bgx |= bgx << 16;
++
++	while (height-- > 0) {
++		u8 *p = dst;
++
++		switch (bpp) {
++
++		case 32:
++			for (i=0; i < width; i++) {
++				fb_writel(bgx, p); p += 4;
++			}
++			break;
++		case 24:
++			for (i=0; i < width; i++) {
++#ifdef __LITTLE_ENDIAN
++				fb_writew((bgx & 0xffff),(u16*)p); p += 2;
++				fb_writeb((bgx >> 16),p++);
++#else
++				fb_writew((bgx >> 8),(u16*)p); p += 2;
++				fb_writeb((bgx & 0xff),p++);
++#endif
++			}
++		case 16:
++			for (i=0; i < width/4; i++) {
++				fb_writel(bgx,p); p += 4;
++				fb_writel(bgx,p); p += 4;
++			}
++			if (width & 2) {
++				fb_writel(bgx,p); p += 4;
++			}
++			if (width & 1)
++				fb_writew(bgx,(u16*)p);
++			break;
++		case 8:
++			for (i=0; i < width/4; i++) {
++				fb_writel(bgx,p); p += 4;
++			}
++
++			if (width & 2) {
++				fb_writew(bgx,p); p += 2;
++			}
++			if (width & 1)
++				fb_writeb(bgx,(u8*)p);
++			break;
++
++		}
++		dst += dstbytes;
++	}
++}
++
++void fbcon_decor_copy(u8 *dst, u8 *src, int height, int width, int linebytes,
++		   int srclinebytes, int bpp)
++{
++	int i;
++
++	while (height-- > 0) {
++		u32 *p = (u32 *)dst;
++		u32 *q = (u32 *)src;
++
++		switch (bpp) {
++
++		case 32:
++			for (i=0; i < width; i++)
++				fb_writel(*q++, p++);
++			break;
++		case 24:
++			for (i=0; i < (width*3/4); i++)
++				fb_writel(*q++, p++);
++			if ((width*3) % 4) {
++				if (width & 2) {
++					fb_writeb(*(u8*)q, (u8*)p);
++				} else if (width & 1) {
++					fb_writew(*(u16*)q, (u16*)p);
++					fb_writeb(*(u8*)((u16*)q+1),(u8*)((u16*)p+2));
++				}
++			}
++			break;
++		case 16:
++			for (i=0; i < width/4; i++) {
++				fb_writel(*q++, p++);
++				fb_writel(*q++, p++);
++			}
++			if (width & 2)
++				fb_writel(*q++, p++);
++			if (width & 1)
++				fb_writew(*(u16*)q, (u16*)p);
++			break;
++		case 8:
++			for (i=0; i < width/4; i++)
++				fb_writel(*q++, p++);
++
++			if (width & 2) {
++				fb_writew(*(u16*)q, (u16*)p);
++				q = (u32*) ((u16*)q + 1);
++				p = (u32*) ((u16*)p + 1);
++			}
++			if (width & 1)
++				fb_writeb(*(u8*)q, (u8*)p);
++			break;
++		}
++
++		dst += linebytes;
++		src += srclinebytes;
++	}
++}
++
++static void decorfill(struct fb_info *info, int sy, int sx, int height,
++		       int width)
++{
++	int bytespp = ((info->var.bits_per_pixel + 7) >> 3);
++	int d  = sy * info->fix.line_length + sx * bytespp;
++	int ds = (sy * info->var.xres + sx) * bytespp;
++
++	fbcon_decor_copy((u8 *)(info->screen_base + d), (u8 *)(info->bgdecor.data + ds),
++		    height, width, info->fix.line_length, info->var.xres * bytespp,
++		    info->var.bits_per_pixel);
++}
++
++void fbcon_decor_clear(struct vc_data *vc, struct fb_info *info, int sy, int sx,
++		    int height, int width)
++{
++	int bgshift = (vc->vc_hi_font_mask) ? 13 : 12;
++	struct fbcon_ops *ops = info->fbcon_par;
++	u8 *dst;
++	int transparent, bg_color = attr_bgcol_ec(bgshift, vc, info);
++
++	transparent = (vc->vc_decor.bg_color == bg_color);
++	sy = sy * vc->vc_font.height + vc->vc_decor.ty;
++	sx = sx * vc->vc_font.width + vc->vc_decor.tx;
++	height *= vc->vc_font.height;
++	width *= vc->vc_font.width;
++
++	/* Don't paint the background image if console is blanked */
++	if (transparent && !ops->blank_state) {
++		decorfill(info, sy, sx, height, width);
++	} else {
++		dst = (u8 *)(info->screen_base + sy * info->fix.line_length +
++			     sx * ((info->var.bits_per_pixel + 7) >> 3));
++		decorset(dst, height, width, info->fix.line_length, cc2cx(bg_color),
++			  info->var.bits_per_pixel);
++	}
++}
++
++void fbcon_decor_clear_margins(struct vc_data *vc, struct fb_info *info,
++			    int bottom_only)
++{
++	unsigned int tw = vc->vc_cols*vc->vc_font.width;
++	unsigned int th = vc->vc_rows*vc->vc_font.height;
++
++	if (!bottom_only) {
++		/* top margin */
++		decorfill(info, 0, 0, vc->vc_decor.ty, info->var.xres);
++		/* left margin */
++		decorfill(info, vc->vc_decor.ty, 0, th, vc->vc_decor.tx);
++		/* right margin */
++		decorfill(info, vc->vc_decor.ty, vc->vc_decor.tx + tw, th, 
++			   info->var.xres - vc->vc_decor.tx - tw);
++	}
++	decorfill(info, vc->vc_decor.ty + th, 0, 
++		   info->var.yres - vc->vc_decor.ty - th, info->var.xres);
++}
++
++void fbcon_decor_bmove_redraw(struct vc_data *vc, struct fb_info *info, int y, 
++			   int sx, int dx, int width)
++{
++	u16 *d = (u16 *) (vc->vc_origin + vc->vc_size_row * y + dx * 2);
++	u16 *s = d + (dx - sx);
++	u16 *start = d;
++	u16 *ls = d;
++	u16 *le = d + width;
++	u16 c;
++	int x = dx;
++	u16 attr = 1;
++
++	do {
++		c = scr_readw(d);
++		if (attr != (c & 0xff00)) {
++			attr = c & 0xff00;
++			if (d > start) {
++				fbcon_decor_putcs(vc, info, start, d - start, y, x);
++				x += d - start;
++				start = d;
++			}
++		}
++		if (s >= ls && s < le && c == scr_readw(s)) {
++			if (d > start) {
++				fbcon_decor_putcs(vc, info, start, d - start, y, x);
++				x += d - start + 1;
++				start = d + 1;
++			} else {
++				x++;
++				start++;
++			}
++		}
++		s++;
++		d++;
++	} while (d < le);
++	if (d > start)
++		fbcon_decor_putcs(vc, info, start, d - start, y, x);
++}
++
++void fbcon_decor_blank(struct vc_data *vc, struct fb_info *info, int blank)
++{
++	if (blank) {
++		decorset((u8 *)info->screen_base, info->var.yres, info->var.xres,
++			  info->fix.line_length, 0, info->var.bits_per_pixel);
++	} else {
++		update_screen(vc);
++		fbcon_decor_clear_margins(vc, info, 0);
++	}
++}
++
+diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
+index f447734..da50d61 100644
+--- a/drivers/video/console/fbcon.c
++++ b/drivers/video/console/fbcon.c
+@@ -79,6 +79,7 @@
+ #include <asm/irq.h>
+ 
+ #include "fbcon.h"
++#include "../console/fbcondecor.h"
+ 
+ #ifdef FBCONDEBUG
+ #  define DPRINTK(fmt, args...) printk(KERN_DEBUG "%s: " fmt, __func__ , ## args)
+@@ -94,7 +95,7 @@ enum {
+ 
+ static struct display fb_display[MAX_NR_CONSOLES];
+ 
+-static signed char con2fb_map[MAX_NR_CONSOLES];
++signed char con2fb_map[MAX_NR_CONSOLES];
+ static signed char con2fb_map_boot[MAX_NR_CONSOLES];
+ 
+ static int logo_lines;
+@@ -286,7 +287,7 @@ static inline int fbcon_is_inactive(struct vc_data *vc, struct fb_info *info)
+ 		!vt_force_oops_output(vc);
+ }
+ 
+-static int get_color(struct vc_data *vc, struct fb_info *info,
++int get_color(struct vc_data *vc, struct fb_info *info,
+ 	      u16 c, int is_fg)
+ {
+ 	int depth = fb_get_color_depth(&info->var, &info->fix);
+@@ -551,6 +552,9 @@ static int do_fbcon_takeover(int show_logo)
+ 		info_idx = -1;
+ 	} else {
+ 		fbcon_has_console_bind = 1;
++#ifdef CONFIG_FB_CON_DECOR
++		fbcon_decor_init();
++#endif
+ 	}
+ 
+ 	return err;
+@@ -1007,6 +1011,12 @@ static const char *fbcon_startup(void)
+ 	rows = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres);
+ 	cols /= vc->vc_font.width;
+ 	rows /= vc->vc_font.height;
++
++	if (fbcon_decor_active(info, vc)) {
++		cols = vc->vc_decor.twidth / vc->vc_font.width;
++		rows = vc->vc_decor.theight / vc->vc_font.height;
++	}
++
+ 	vc_resize(vc, cols, rows);
+ 
+ 	DPRINTK("mode:   %s\n", info->fix.id);
+@@ -1036,7 +1046,7 @@ static void fbcon_init(struct vc_data *vc, int init)
+ 	cap = info->flags;
+ 
+ 	if (vc != svc || logo_shown == FBCON_LOGO_DONTSHOW ||
+-	    (info->fix.type == FB_TYPE_TEXT))
++	    (info->fix.type == FB_TYPE_TEXT) || fbcon_decor_active(info, vc))
+ 		logo = 0;
+ 
+ 	if (var_to_display(p, &info->var, info))
+@@ -1260,6 +1270,11 @@ static void fbcon_clear(struct vc_data *vc, int sy, int sx, int height,
+ 		fbcon_clear_margins(vc, 0);
+ 	}
+ 
++ 	if (fbcon_decor_active(info, vc)) {
++ 		fbcon_decor_clear(vc, info, sy, sx, height, width);
++ 		return;
++ 	}
++
+ 	/* Split blits that cross physical y_wrap boundary */
+ 
+ 	y_break = p->vrows - p->yscroll;
+@@ -1279,10 +1294,15 @@ static void fbcon_putcs(struct vc_data *vc, const unsigned short *s,
+ 	struct display *p = &fb_display[vc->vc_num];
+ 	struct fbcon_ops *ops = info->fbcon_par;
+ 
+-	if (!fbcon_is_inactive(vc, info))
+-		ops->putcs(vc, info, s, count, real_y(p, ypos), xpos,
+-			   get_color(vc, info, scr_readw(s), 1),
+-			   get_color(vc, info, scr_readw(s), 0));
++	if (!fbcon_is_inactive(vc, info)) {
++
++		if (fbcon_decor_active(info, vc))
++			fbcon_decor_putcs(vc, info, s, count, ypos, xpos);
++		else
++			ops->putcs(vc, info, s, count, real_y(p, ypos), xpos,
++				   get_color(vc, info, scr_readw(s), 1),
++				   get_color(vc, info, scr_readw(s), 0));
++	}
+ }
+ 
+ static void fbcon_putc(struct vc_data *vc, int c, int ypos, int xpos)
+@@ -1298,8 +1318,13 @@ static void fbcon_clear_margins(struct vc_data *vc, int bottom_only)
+ 	struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
+ 	struct fbcon_ops *ops = info->fbcon_par;
+ 
+-	if (!fbcon_is_inactive(vc, info))
+-		ops->clear_margins(vc, info, bottom_only);
++	if (!fbcon_is_inactive(vc, info)) {
++	 	if (fbcon_decor_active(info, vc)) {
++	 		fbcon_decor_clear_margins(vc, info, bottom_only);
++ 		} else {
++			ops->clear_margins(vc, info, bottom_only);
++		}
++	}
+ }
+ 
+ static void fbcon_cursor(struct vc_data *vc, int mode)
+@@ -1819,7 +1844,7 @@ static int fbcon_scroll(struct vc_data *vc, int t, int b, int dir,
+ 			count = vc->vc_rows;
+ 		if (softback_top)
+ 			fbcon_softback_note(vc, t, count);
+-		if (logo_shown >= 0)
++		if (logo_shown >= 0 || fbcon_decor_active(info, vc))
+ 			goto redraw_up;
+ 		switch (p->scrollmode) {
+ 		case SCROLL_MOVE:
+@@ -1912,6 +1937,8 @@ static int fbcon_scroll(struct vc_data *vc, int t, int b, int dir,
+ 			count = vc->vc_rows;
+ 		if (logo_shown >= 0)
+ 			goto redraw_down;
++		if (fbcon_decor_active(info, vc))
++			goto redraw_down;
+ 		switch (p->scrollmode) {
+ 		case SCROLL_MOVE:
+ 			fbcon_redraw_blit(vc, info, p, b - 1, b - t - count,
+@@ -2060,6 +2087,13 @@ static void fbcon_bmove_rec(struct vc_data *vc, struct display *p, int sy, int s
+ 		}
+ 		return;
+ 	}
++
++	if (fbcon_decor_active(info, vc) && sy == dy && height == 1) {
++ 		/* must use slower redraw bmove to keep background pic intact */
++ 		fbcon_decor_bmove_redraw(vc, info, sy, sx, dx, width);
++ 		return;
++ 	}
++
+ 	ops->bmove(vc, info, real_y(p, sy), sx, real_y(p, dy), dx,
+ 		   height, width);
+ }
+@@ -2130,8 +2164,8 @@ static int fbcon_resize(struct vc_data *vc, unsigned int width,
+ 	var.yres = virt_h * virt_fh;
+ 	x_diff = info->var.xres - var.xres;
+ 	y_diff = info->var.yres - var.yres;
+-	if (x_diff < 0 || x_diff > virt_fw ||
+-	    y_diff < 0 || y_diff > virt_fh) {
++	if ((x_diff < 0 || x_diff > virt_fw ||
++		y_diff < 0 || y_diff > virt_fh) && !vc->vc_decor.state) {
+ 		const struct fb_videomode *mode;
+ 
+ 		DPRINTK("attempting resize %ix%i\n", var.xres, var.yres);
+@@ -2167,6 +2201,21 @@ static int fbcon_switch(struct vc_data *vc)
+ 
+ 	info = registered_fb[con2fb_map[vc->vc_num]];
+ 	ops = info->fbcon_par;
++	prev_console = ops->currcon;
++	if (prev_console != -1)
++		old_info = registered_fb[con2fb_map[prev_console]];
++
++#ifdef CONFIG_FB_CON_DECOR
++	if (!fbcon_decor_active_vc(vc) && info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
++		struct vc_data *vc_curr = vc_cons[prev_console].d;
++		if (vc_curr && fbcon_decor_active_vc(vc_curr)) {
++			/* Clear the screen to avoid displaying funky colors during
++			 * palette updates. */
++			memset((u8*)info->screen_base + info->fix.line_length * info->var.yoffset,
++			       0, info->var.yres * info->fix.line_length);
++		}
++	}
++#endif
+ 
+ 	if (softback_top) {
+ 		if (softback_lines)
+@@ -2185,9 +2234,6 @@ static int fbcon_switch(struct vc_data *vc)
+ 		logo_shown = FBCON_LOGO_CANSHOW;
+ 	}
+ 
+-	prev_console = ops->currcon;
+-	if (prev_console != -1)
+-		old_info = registered_fb[con2fb_map[prev_console]];
+ 	/*
+ 	 * FIXME: If we have multiple fbdev's loaded, we need to
+ 	 * update all info->currcon.  Perhaps, we can place this
+@@ -2231,6 +2277,18 @@ static int fbcon_switch(struct vc_data *vc)
+ 			fbcon_del_cursor_timer(old_info);
+ 	}
+ 
++	if (fbcon_decor_active_vc(vc)) {
++		struct vc_data *vc_curr = vc_cons[prev_console].d;
++
++		if (!vc_curr->vc_decor.theme ||
++			strcmp(vc->vc_decor.theme, vc_curr->vc_decor.theme) ||
++			(fbcon_decor_active_nores(info, vc_curr) &&
++			 !fbcon_decor_active(info, vc_curr))) {
++			fbcon_decor_disable(vc, 0);
++			fbcon_decor_call_helper("modechange", vc->vc_num);
++		}
++	}
++
+ 	if (fbcon_is_inactive(vc, info) ||
+ 	    ops->blank_state != FB_BLANK_UNBLANK)
+ 		fbcon_del_cursor_timer(info);
+@@ -2339,15 +2397,20 @@ static int fbcon_blank(struct vc_data *vc, int blank, int mode_switch)
+ 		}
+ 	}
+ 
+- 	if (!fbcon_is_inactive(vc, info)) {
++	if (!fbcon_is_inactive(vc, info)) {
+ 		if (ops->blank_state != blank) {
+ 			ops->blank_state = blank;
+ 			fbcon_cursor(vc, blank ? CM_ERASE : CM_DRAW);
+ 			ops->cursor_flash = (!blank);
+ 
+-			if (!(info->flags & FBINFO_MISC_USEREVENT))
+-				if (fb_blank(info, blank))
+-					fbcon_generic_blank(vc, info, blank);
++			if (!(info->flags & FBINFO_MISC_USEREVENT)) {
++				if (fb_blank(info, blank)) {
++					if (fbcon_decor_active(info, vc))
++						fbcon_decor_blank(vc, info, blank);
++					else
++						fbcon_generic_blank(vc, info, blank);
++				}
++			}
+ 		}
+ 
+ 		if (!blank)
+@@ -2522,13 +2585,22 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h,
+ 	}
+ 
+ 	if (resize) {
++		/* reset wrap/pan */
+ 		int cols, rows;
+ 
+ 		cols = FBCON_SWAP(ops->rotate, info->var.xres, info->var.yres);
+ 		rows = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres);
++
++		if (fbcon_decor_active(info, vc)) {
++			info->var.xoffset = info->var.yoffset = p->yscroll = 0;
++			cols = vc->vc_decor.twidth;
++			rows = vc->vc_decor.theight;
++		}
+ 		cols /= w;
+ 		rows /= h;
++
+ 		vc_resize(vc, cols, rows);
++
+ 		if (CON_IS_VISIBLE(vc) && softback_buf)
+ 			fbcon_update_softback(vc);
+ 	} else if (CON_IS_VISIBLE(vc)
+@@ -2657,7 +2729,11 @@ static int fbcon_set_palette(struct vc_data *vc, unsigned char *table)
+ 	int i, j, k, depth;
+ 	u8 val;
+ 
+-	if (fbcon_is_inactive(vc, info))
++	if (fbcon_is_inactive(vc, info)
++#ifdef CONFIG_FB_CON_DECOR
++			|| vc->vc_num != fg_console
++#endif
++		)
+ 		return -EINVAL;
+ 
+ 	if (!CON_IS_VISIBLE(vc))
+@@ -2683,14 +2759,56 @@ static int fbcon_set_palette(struct vc_data *vc, unsigned char *table)
+ 	} else
+ 		fb_copy_cmap(fb_default_cmap(1 << depth), &palette_cmap);
+ 
+-	return fb_set_cmap(&palette_cmap, info);
++	if (fbcon_decor_active(info, vc_cons[fg_console].d) &&
++	    info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
++
++		u16 *red, *green, *blue;
++		int minlen = min(min(info->var.red.length, info->var.green.length),
++				     info->var.blue.length);
++		int h;
++
++		struct fb_cmap cmap = {
++			.start = 0,
++			.len = (1 << minlen),
++			.red = NULL,
++			.green = NULL,
++			.blue = NULL,
++			.transp = NULL
++		};
++
++		red = kmalloc(256 * sizeof(u16) * 3, GFP_KERNEL);
++
++		if (!red)
++			goto out;
++
++		green = red + 256;
++		blue = green + 256;
++		cmap.red = red;
++		cmap.green = green;
++		cmap.blue = blue;
++
++		for (i = 0; i < cmap.len; i++) {
++			red[i] = green[i] = blue[i] = (0xffff * i)/(cmap.len-1);
++		}
++
++		h = fb_set_cmap(&cmap, info);
++		fbcon_decor_fix_pseudo_pal(info, vc_cons[fg_console].d);
++		kfree(red);
++
++		return h;
++
++	} else if (fbcon_decor_active(info, vc_cons[fg_console].d) &&
++		   info->var.bits_per_pixel == 8 && info->bgdecor.cmap.red != NULL)
++		fb_set_cmap(&info->bgdecor.cmap, info);
++
++out:	return fb_set_cmap(&palette_cmap, info);
+ }
+ 
+ static u16 *fbcon_screen_pos(struct vc_data *vc, int offset)
+ {
+ 	unsigned long p;
+ 	int line;
+-	
++
+ 	if (vc->vc_num != fg_console || !softback_lines)
+ 		return (u16 *) (vc->vc_origin + offset);
+ 	line = offset / vc->vc_size_row;
+@@ -2909,7 +3027,14 @@ static void fbcon_modechanged(struct fb_info *info)
+ 		rows = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres);
+ 		cols /= vc->vc_font.width;
+ 		rows /= vc->vc_font.height;
+-		vc_resize(vc, cols, rows);
++
++		if (!fbcon_decor_active_nores(info, vc)) {
++			vc_resize(vc, cols, rows);
++		} else {
++			fbcon_decor_disable(vc, 0);
++			fbcon_decor_call_helper("modechange", vc->vc_num);
++		}
++
+ 		updatescrollmode(p, info, vc);
+ 		scrollback_max = 0;
+ 		scrollback_current = 0;
+@@ -2954,7 +3079,9 @@ static void fbcon_set_all_vcs(struct fb_info *info)
+ 		rows = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres);
+ 		cols /= vc->vc_font.width;
+ 		rows /= vc->vc_font.height;
+-		vc_resize(vc, cols, rows);
++		if (!fbcon_decor_active_nores(info, vc)) {
++			vc_resize(vc, cols, rows);
++		}
+ 	}
+ 
+ 	if (fg != -1)
+@@ -3596,6 +3723,7 @@ static void fbcon_exit(void)
+ 		}
+ 	}
+ 
++	fbcon_decor_exit();
+ 	fbcon_has_exited = 1;
+ }
+ 
+diff --git a/drivers/video/console/fbcondecor.c b/drivers/video/console/fbcondecor.c
+new file mode 100644
+index 0000000..babc8c5
+--- /dev/null
++++ b/drivers/video/console/fbcondecor.c
+@@ -0,0 +1,555 @@
++/*
++ *  linux/drivers/video/console/fbcondecor.c -- Framebuffer console decorations
++ *
++ *  Copyright (C) 2004-2009 Michal Januszewski <michalj+fbcondecor@gmail.com>
++ *
++ *  Code based upon "Bootsplash" (C) 2001-2003
++ *       Volker Poplawski <volker@poplawski.de>,
++ *       Stefan Reinauer <stepan@suse.de>,
++ *       Steffen Winterfeldt <snwint@suse.de>,
++ *       Michael Schroeder <mls@suse.de>,
++ *       Ken Wimer <wimer@suse.de>.
++ *
++ *  Compat ioctl support by Thorsten Klein <TK@Thorsten-Klein.de>.
++ *
++ *  This file is subject to the terms and conditions of the GNU General Public
++ *  License.  See the file COPYING in the main directory of this archive for
++ *  more details.
++ *
++ */
++#include <linux/module.h>
++#include <linux/kernel.h>
++#include <linux/string.h>
++#include <linux/types.h>
++#include <linux/fb.h>
++#include <linux/vt_kern.h>
++#include <linux/vmalloc.h>
++#include <linux/unistd.h>
++#include <linux/syscalls.h>
++#include <linux/init.h>
++#include <linux/proc_fs.h>
++#include <linux/workqueue.h>
++#include <linux/kmod.h>
++#include <linux/miscdevice.h>
++#include <linux/device.h>
++#include <linux/fs.h>
++#include <linux/compat.h>
++#include <linux/console.h>
++
++#include <asm/uaccess.h>
++#include <asm/irq.h>
++
++#include "fbcon.h"
++#include "fbcondecor.h"
++
++extern signed char con2fb_map[];
++static int fbcon_decor_enable(struct vc_data *vc);
++char fbcon_decor_path[KMOD_PATH_LEN] = "/sbin/fbcondecor_helper";
++static int initialized = 0;
++
++int fbcon_decor_call_helper(char* cmd, unsigned short vc)
++{
++	char *envp[] = {
++		"HOME=/",
++		"PATH=/sbin:/bin",
++		NULL
++	};
++
++	char tfb[5];
++	char tcons[5];
++	unsigned char fb = (int) con2fb_map[vc];
++
++	char *argv[] = {
++		fbcon_decor_path,
++		"2",
++		cmd,
++		tcons,
++		tfb,
++		vc_cons[vc].d->vc_decor.theme,
++		NULL
++	};
++
++	snprintf(tfb,5,"%d",fb);
++	snprintf(tcons,5,"%d",vc);
++
++	return call_usermodehelper(fbcon_decor_path, argv, envp, UMH_WAIT_EXEC);
++}
++
++/* Disables fbcondecor on a virtual console; called with console sem held. */
++int fbcon_decor_disable(struct vc_data *vc, unsigned char redraw)
++{
++	struct fb_info* info;
++
++	if (!vc->vc_decor.state)
++		return -EINVAL;
++
++	info = registered_fb[(int) con2fb_map[vc->vc_num]];
++
++	if (info == NULL)
++		return -EINVAL;
++
++	vc->vc_decor.state = 0;
++	vc_resize(vc, info->var.xres / vc->vc_font.width,
++		  info->var.yres / vc->vc_font.height);
++
++	if (fg_console == vc->vc_num && redraw) {
++		redraw_screen(vc, 0);
++		update_region(vc, vc->vc_origin +
++			      vc->vc_size_row * vc->vc_top,
++			      vc->vc_size_row * (vc->vc_bottom - vc->vc_top) / 2);
++	}
++
++	printk(KERN_INFO "fbcondecor: switched decor state to 'off' on console %d\n",
++			 vc->vc_num);
++
++	return 0;
++}
++
++/* Enables fbcondecor on a virtual console; called with console sem held. */
++static int fbcon_decor_enable(struct vc_data *vc)
++{
++	struct fb_info* info;
++
++	info = registered_fb[(int) con2fb_map[vc->vc_num]];
++
++	if (vc->vc_decor.twidth == 0 || vc->vc_decor.theight == 0 ||
++	    info == NULL || vc->vc_decor.state || (!info->bgdecor.data &&
++	    vc->vc_num == fg_console))
++		return -EINVAL;
++
++	vc->vc_decor.state = 1;
++	vc_resize(vc, vc->vc_decor.twidth / vc->vc_font.width,
++		  vc->vc_decor.theight / vc->vc_font.height);
++
++	if (fg_console == vc->vc_num) {
++		redraw_screen(vc, 0);
++		update_region(vc, vc->vc_origin +
++			      vc->vc_size_row * vc->vc_top,
++			      vc->vc_size_row * (vc->vc_bottom - vc->vc_top) / 2);
++		fbcon_decor_clear_margins(vc, info, 0);
++	}
++
++	printk(KERN_INFO "fbcondecor: switched decor state to 'on' on console %d\n",
++			 vc->vc_num);
++
++	return 0;
++}
++
++static inline int fbcon_decor_ioctl_dosetstate(struct vc_data *vc, unsigned int state, unsigned char origin)
++{
++	int ret;
++
++//	if (origin == FBCON_DECOR_IO_ORIG_USER)
++		console_lock();
++	if (!state)
++		ret = fbcon_decor_disable(vc, 1);
++	else
++		ret = fbcon_decor_enable(vc);
++//	if (origin == FBCON_DECOR_IO_ORIG_USER)
++		console_unlock();
++
++	return ret;
++}
++
++static inline void fbcon_decor_ioctl_dogetstate(struct vc_data *vc, unsigned int *state)
++{
++	*state = vc->vc_decor.state;
++}
++
++static int fbcon_decor_ioctl_dosetcfg(struct vc_data *vc, struct vc_decor *cfg, unsigned char origin)
++{
++	struct fb_info *info;
++	int len;
++	char *tmp;
++
++	info = registered_fb[(int) con2fb_map[vc->vc_num]];
++
++	if (info == NULL || !cfg->twidth || !cfg->theight ||
++	    cfg->tx + cfg->twidth  > info->var.xres ||
++	    cfg->ty + cfg->theight > info->var.yres)
++		return -EINVAL;
++
++	len = strlen_user(cfg->theme);
++	if (!len || len > FBCON_DECOR_THEME_LEN)
++		return -EINVAL;
++	tmp = kmalloc(len, GFP_KERNEL);
++	if (!tmp)
++		return -ENOMEM;
++	if (copy_from_user(tmp, (void __user *)cfg->theme, len))
++		return -EFAULT;
++	cfg->theme = tmp;
++	cfg->state = 0;
++
++	/* If this ioctl is a response to a request from kernel, the console sem
++	 * is already held; we also don't need to disable decor because either the
++	 * new config and background picture will be successfully loaded, and the
++	 * decor will stay on, or in case of a failure it'll be turned off in fbcon. */
++//	if (origin == FBCON_DECOR_IO_ORIG_USER) {
++		console_lock();
++		if (vc->vc_decor.state)
++			fbcon_decor_disable(vc, 1);
++//	}
++
++	if (vc->vc_decor.theme)
++		kfree(vc->vc_decor.theme);
++
++	vc->vc_decor = *cfg;
++
++//	if (origin == FBCON_DECOR_IO_ORIG_USER)
++		console_unlock();
++
++	printk(KERN_INFO "fbcondecor: console %d using theme '%s'\n",
++			 vc->vc_num, vc->vc_decor.theme);
++	return 0;
++}
++
++static int fbcon_decor_ioctl_dogetcfg(struct vc_data *vc, struct vc_decor *decor)
++{
++	char __user *tmp;
++
++	tmp = decor->theme;
++	*decor = vc->vc_decor;
++	decor->theme = tmp;
++
++	if (vc->vc_decor.theme) {
++		if (copy_to_user(tmp, vc->vc_decor.theme, strlen(vc->vc_decor.theme) + 1))
++			return -EFAULT;
++	} else
++		if (put_user(0, tmp))
++			return -EFAULT;
++
++	return 0;
++}
++
++static int fbcon_decor_ioctl_dosetpic(struct vc_data *vc, struct fb_image *img, unsigned char origin)
++{
++	struct fb_info *info;
++	int len;
++	u8 *tmp;
++
++	if (vc->vc_num != fg_console)
++		return -EINVAL;
++
++	info = registered_fb[(int) con2fb_map[vc->vc_num]];
++
++	if (info == NULL)
++		return -EINVAL;
++
++	if (img->width != info->var.xres || img->height != info->var.yres) {
++		printk(KERN_ERR "fbcondecor: picture dimensions mismatch\n");
++		printk(KERN_ERR "%dx%d vs %dx%d\n", img->width, img->height, info->var.xres, info->var.yres);
++		return -EINVAL;
++	}
++
++	if (img->depth != info->var.bits_per_pixel) {
++		printk(KERN_ERR "fbcondecor: picture depth mismatch\n");
++		return -EINVAL;
++	}
++
++	if (img->depth == 8) {
++		if (!img->cmap.len || !img->cmap.red || !img->cmap.green ||
++		    !img->cmap.blue)
++			return -EINVAL;
++
++		tmp = vmalloc(img->cmap.len * 3 * 2);
++		if (!tmp)
++			return -ENOMEM;
++
++		if (copy_from_user(tmp,
++			    	   (void __user*)img->cmap.red, (img->cmap.len << 1)) ||
++		    copy_from_user(tmp + (img->cmap.len << 1),
++			    	   (void __user*)img->cmap.green, (img->cmap.len << 1)) ||
++		    copy_from_user(tmp + (img->cmap.len << 2),
++			    	   (void __user*)img->cmap.blue, (img->cmap.len << 1))) {
++			vfree(tmp);
++			return -EFAULT;
++		}
++
++		img->cmap.transp = NULL;
++		img->cmap.red = (u16*)tmp;
++		img->cmap.green = img->cmap.red + img->cmap.len;
++		img->cmap.blue = img->cmap.green + img->cmap.len;
++	} else {
++		img->cmap.red = NULL;
++	}
++
++	len = ((img->depth + 7) >> 3) * img->width * img->height;
++
++	/*
++	 * Allocate an additional byte so that we never go outside of the
++	 * buffer boundaries in the rendering functions in a 24 bpp mode.
++	 */
++	tmp = vmalloc(len + 1);
++
++	if (!tmp)
++		goto out;
++
++	if (copy_from_user(tmp, (void __user*)img->data, len))
++		goto out;
++
++	img->data = tmp;
++
++	/* If this ioctl is a response to a request from kernel, the console sem
++	 * is already held. */
++//	if (origin == FBCON_DECOR_IO_ORIG_USER)
++		console_lock();
++
++	if (info->bgdecor.data)
++		vfree((u8*)info->bgdecor.data);
++	if (info->bgdecor.cmap.red)
++		vfree(info->bgdecor.cmap.red);
++
++	info->bgdecor = *img;
++
++	if (fbcon_decor_active_vc(vc) && fg_console == vc->vc_num) {
++		redraw_screen(vc, 0);
++		update_region(vc, vc->vc_origin +
++			      vc->vc_size_row * vc->vc_top,
++			      vc->vc_size_row * (vc->vc_bottom - vc->vc_top) / 2);
++		fbcon_decor_clear_margins(vc, info, 0);
++	}
++
++//	if (origin == FBCON_DECOR_IO_ORIG_USER)
++		console_unlock();
++
++	return 0;
++
++out:	if (img->cmap.red)
++		vfree(img->cmap.red);
++
++	if (tmp)
++		vfree(tmp);
++	return -ENOMEM;
++}
++
++static long fbcon_decor_ioctl(struct file *filp, u_int cmd, u_long arg)
++{
++	struct fbcon_decor_iowrapper __user *wrapper = (void __user*) arg;
++	struct vc_data *vc = NULL;
++	unsigned short vc_num = 0;
++	unsigned char origin = 0;
++	void __user *data = NULL;
++
++	if (!access_ok(VERIFY_READ, wrapper,
++			sizeof(struct fbcon_decor_iowrapper)))
++		return -EFAULT;
++
++	__get_user(vc_num, &wrapper->vc);
++	__get_user(origin, &wrapper->origin);
++	__get_user(data, &wrapper->data);
++
++	if (!vc_cons_allocated(vc_num))
++		return -EINVAL;
++
++	vc = vc_cons[vc_num].d;
++
++	switch (cmd) {
++	case FBIOCONDECOR_SETPIC:
++	{
++		struct fb_image img;
++		if (copy_from_user(&img, (struct fb_image __user *)data, sizeof(struct fb_image)))
++			return -EFAULT;
++
++		return fbcon_decor_ioctl_dosetpic(vc, &img, origin);
++	}
++	case FBIOCONDECOR_SETCFG:
++	{
++		struct vc_decor cfg;
++		if (copy_from_user(&cfg, (struct vc_decor __user *)data, sizeof(struct vc_decor)))
++			return -EFAULT;
++
++		return fbcon_decor_ioctl_dosetcfg(vc, &cfg, origin);
++	}
++	case FBIOCONDECOR_GETCFG:
++	{
++		int rval;
++		struct vc_decor cfg;
++
++		if (copy_from_user(&cfg, (struct vc_decor __user *)data, sizeof(struct vc_decor)))
++			return -EFAULT;
++
++		rval = fbcon_decor_ioctl_dogetcfg(vc, &cfg);
++
++		if (copy_to_user(data, &cfg, sizeof(struct vc_decor)))
++			return -EFAULT;
++		return rval;
++	}
++	case FBIOCONDECOR_SETSTATE:
++	{
++		unsigned int state = 0;
++		if (get_user(state, (unsigned int __user *)data))
++			return -EFAULT;
++		return fbcon_decor_ioctl_dosetstate(vc, state, origin);
++	}
++	case FBIOCONDECOR_GETSTATE:
++	{
++		unsigned int state = 0;
++		fbcon_decor_ioctl_dogetstate(vc, &state);
++		return put_user(state, (unsigned int __user *)data);
++	}
++
++	default:
++		return -ENOIOCTLCMD;
++	}
++}
++
++#ifdef CONFIG_COMPAT
++
++static long fbcon_decor_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) {
++
++	struct fbcon_decor_iowrapper32 __user *wrapper = (void __user *)arg;
++	struct vc_data *vc = NULL;
++	unsigned short vc_num = 0;
++	unsigned char origin = 0;
++	compat_uptr_t data_compat = 0;
++	void __user *data = NULL;
++
++	if (!access_ok(VERIFY_READ, wrapper,
++                       sizeof(struct fbcon_decor_iowrapper32)))
++		return -EFAULT;
++
++	__get_user(vc_num, &wrapper->vc);
++	__get_user(origin, &wrapper->origin);
++	__get_user(data_compat, &wrapper->data);
++	data = compat_ptr(data_compat);
++
++	if (!vc_cons_allocated(vc_num))
++		return -EINVAL;
++
++	vc = vc_cons[vc_num].d;
++
++	switch (cmd) {
++	case FBIOCONDECOR_SETPIC32:
++	{
++		struct fb_image32 img_compat;
++		struct fb_image img;
++
++		if (copy_from_user(&img_compat, (struct fb_image32 __user *)data, sizeof(struct fb_image32)))
++			return -EFAULT;
++
++		fb_image_from_compat(img, img_compat);
++
++		return fbcon_decor_ioctl_dosetpic(vc, &img, origin);
++	}
++
++	case FBIOCONDECOR_SETCFG32:
++	{
++		struct vc_decor32 cfg_compat;
++		struct vc_decor cfg;
++
++		if (copy_from_user(&cfg_compat, (struct vc_decor32 __user *)data, sizeof(struct vc_decor32)))
++			return -EFAULT;
++
++		vc_decor_from_compat(cfg, cfg_compat);
++
++		return fbcon_decor_ioctl_dosetcfg(vc, &cfg, origin);
++	}
++
++	case FBIOCONDECOR_GETCFG32:
++	{
++		int rval;
++		struct vc_decor32 cfg_compat;
++		struct vc_decor cfg;
++
++		if (copy_from_user(&cfg_compat, (struct vc_decor32 __user *)data, sizeof(struct vc_decor32)))
++			return -EFAULT;
++		cfg.theme = compat_ptr(cfg_compat.theme);
++
++		rval = fbcon_decor_ioctl_dogetcfg(vc, &cfg);
++
++		vc_decor_to_compat(cfg_compat, cfg);
++
++		if (copy_to_user((struct vc_decor32 __user *)data, &cfg_compat, sizeof(struct vc_decor32)))
++			return -EFAULT;
++		return rval;
++	}
++
++	case FBIOCONDECOR_SETSTATE32:
++	{
++		compat_uint_t state_compat = 0;
++		unsigned int state = 0;
++
++		if (get_user(state_compat, (compat_uint_t __user *)data))
++			return -EFAULT;
++
++		state = (unsigned int)state_compat;
++
++		return fbcon_decor_ioctl_dosetstate(vc, state, origin);
++	}
++
++	case FBIOCONDECOR_GETSTATE32:
++	{
++		compat_uint_t state_compat = 0;
++		unsigned int state = 0;
++
++		fbcon_decor_ioctl_dogetstate(vc, &state);
++		state_compat = (compat_uint_t)state;
++
++		return put_user(state_compat, (compat_uint_t __user *)data);
++	}
++
++	default:
++		return -ENOIOCTLCMD;
++	}
++}
++#else
++  #define fbcon_decor_compat_ioctl NULL
++#endif
++
++static struct file_operations fbcon_decor_ops = {
++	.owner = THIS_MODULE,
++	.unlocked_ioctl = fbcon_decor_ioctl,
++	.compat_ioctl = fbcon_decor_compat_ioctl
++};
++
++static struct miscdevice fbcon_decor_dev = {
++	.minor = MISC_DYNAMIC_MINOR,
++	.name = "fbcondecor",
++	.fops = &fbcon_decor_ops
++};
++
++void fbcon_decor_reset(void)
++{
++	int i;
++
++	for (i = 0; i < num_registered_fb; i++) {
++		registered_fb[i]->bgdecor.data = NULL;
++		registered_fb[i]->bgdecor.cmap.red = NULL;
++	}
++
++	for (i = 0; i < MAX_NR_CONSOLES && vc_cons[i].d; i++) {
++		vc_cons[i].d->vc_decor.state = vc_cons[i].d->vc_decor.twidth =
++						vc_cons[i].d->vc_decor.theight = 0;
++		vc_cons[i].d->vc_decor.theme = NULL;
++	}
++
++	return;
++}
++
++int fbcon_decor_init(void)
++{
++	int i;
++
++	fbcon_decor_reset();
++
++	if (initialized)
++		return 0;
++
++	i = misc_register(&fbcon_decor_dev);
++	if (i) {
++		printk(KERN_ERR "fbcondecor: failed to register device\n");
++		return i;
++	}
++
++	fbcon_decor_call_helper("init", 0);
++	initialized = 1;
++	return 0;
++}
++
++int fbcon_decor_exit(void)
++{
++	fbcon_decor_reset();
++	return 0;
++}
++
++EXPORT_SYMBOL(fbcon_decor_path);
+diff --git a/drivers/video/console/fbcondecor.h b/drivers/video/console/fbcondecor.h
+new file mode 100644
+index 0000000..3b3724b
+--- /dev/null
++++ b/drivers/video/console/fbcondecor.h
+@@ -0,0 +1,78 @@
++/* 
++ *  linux/drivers/video/console/fbcondecor.h -- Framebuffer Console Decoration headers
++ *
++ *  Copyright (C) 2004 Michal Januszewski <michalj+fbcondecor@gmail.com>
++ *
++ */
++
++#ifndef __FBCON_DECOR_H
++#define __FBCON_DECOR_H
++
++#ifndef _LINUX_FB_H
++#include <linux/fb.h>
++#endif
++
++/* This is needed for vc_cons in fbcmap.c */
++#include <linux/vt_kern.h>
++
++struct fb_cursor;
++struct fb_info;
++struct vc_data;
++
++#ifdef CONFIG_FB_CON_DECOR
++/* fbcondecor.c */
++int fbcon_decor_init(void);
++int fbcon_decor_exit(void);
++int fbcon_decor_call_helper(char* cmd, unsigned short cons);
++int fbcon_decor_disable(struct vc_data *vc, unsigned char redraw);
++
++/* cfbcondecor.c */
++void fbcon_decor_putcs(struct vc_data *vc, struct fb_info *info, const unsigned short *s, int count, int yy, int xx);
++void fbcon_decor_cursor(struct fb_info *info, struct fb_cursor *cursor);
++void fbcon_decor_clear(struct vc_data *vc, struct fb_info *info, int sy, int sx, int height, int width);
++void fbcon_decor_clear_margins(struct vc_data *vc, struct fb_info *info, int bottom_only);
++void fbcon_decor_blank(struct vc_data *vc, struct fb_info *info, int blank);
++void fbcon_decor_bmove_redraw(struct vc_data *vc, struct fb_info *info, int y, int sx, int dx, int width);
++void fbcon_decor_copy(u8 *dst, u8 *src, int height, int width, int linebytes, int srclinesbytes, int bpp);
++void fbcon_decor_fix_pseudo_pal(struct fb_info *info, struct vc_data *vc);
++
++/* vt.c */
++void acquire_console_sem(void);
++void release_console_sem(void);
++void do_unblank_screen(int entering_gfx);
++
++/* struct vc_data *y */
++#define fbcon_decor_active_vc(y) (y->vc_decor.state && y->vc_decor.theme) 
++
++/* struct fb_info *x, struct vc_data *y */
++#define fbcon_decor_active_nores(x,y) (x->bgdecor.data && fbcon_decor_active_vc(y))
++
++/* struct fb_info *x, struct vc_data *y */
++#define fbcon_decor_active(x,y) (fbcon_decor_active_nores(x,y) &&		\
++			      x->bgdecor.width == x->var.xres && 	\
++			      x->bgdecor.height == x->var.yres &&	\
++			      x->bgdecor.depth == x->var.bits_per_pixel)
++
++
++#else /* CONFIG_FB_CON_DECOR */
++
++static inline void fbcon_decor_putcs(struct vc_data *vc, struct fb_info *info, const unsigned short *s, int count, int yy, int xx) {}
++static inline void fbcon_decor_putc(struct vc_data *vc, struct fb_info *info, int c, int ypos, int xpos) {}
++static inline void fbcon_decor_cursor(struct fb_info *info, struct fb_cursor *cursor) {}
++static inline void fbcon_decor_clear(struct vc_data *vc, struct fb_info *info, int sy, int sx, int height, int width) {}
++static inline void fbcon_decor_clear_margins(struct vc_data *vc, struct fb_info *info, int bottom_only) {}
++static inline void fbcon_decor_blank(struct vc_data *vc, struct fb_info *info, int blank) {}
++static inline void fbcon_decor_bmove_redraw(struct vc_data *vc, struct fb_info *info, int y, int sx, int dx, int width) {}
++static inline void fbcon_decor_fix_pseudo_pal(struct fb_info *info, struct vc_data *vc) {}
++static inline int fbcon_decor_call_helper(char* cmd, unsigned short cons) { return 0; }
++static inline int fbcon_decor_init(void) { return 0; }
++static inline int fbcon_decor_exit(void) { return 0; }
++static inline int fbcon_decor_disable(struct vc_data *vc, unsigned char redraw) { return 0; }
++
++#define fbcon_decor_active_vc(y) (0)
++#define fbcon_decor_active_nores(x,y) (0)
++#define fbcon_decor_active(x,y) (0)
++
++#endif /* CONFIG_FB_CON_DECOR */
++
++#endif /* __FBCON_DECOR_H */
+diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
+index e1f4727..2952e33 100644
+--- a/drivers/video/fbdev/Kconfig
++++ b/drivers/video/fbdev/Kconfig
+@@ -1204,7 +1204,6 @@ config FB_MATROX
+ 	select FB_CFB_FILLRECT
+ 	select FB_CFB_COPYAREA
+ 	select FB_CFB_IMAGEBLIT
+-	select FB_TILEBLITTING
+ 	select FB_MACMODES if PPC_PMAC
+ 	---help---
+ 	  Say Y here if you have a Matrox Millennium, Matrox Millennium II,
+diff --git a/drivers/video/fbdev/core/fbcmap.c b/drivers/video/fbdev/core/fbcmap.c
+index f89245b..05e036c 100644
+--- a/drivers/video/fbdev/core/fbcmap.c
++++ b/drivers/video/fbdev/core/fbcmap.c
+@@ -17,6 +17,8 @@
+ #include <linux/slab.h>
+ #include <linux/uaccess.h>
+ 
++#include "../../console/fbcondecor.h"
++
+ static u16 red2[] __read_mostly = {
+     0x0000, 0xaaaa
+ };
+@@ -249,14 +251,17 @@ int fb_set_cmap(struct fb_cmap *cmap, struct fb_info *info)
+ 			if (transp)
+ 				htransp = *transp++;
+ 			if (info->fbops->fb_setcolreg(start++,
+-						      hred, hgreen, hblue,
++						      hred, hgreen, hblue, 
+ 						      htransp, info))
+ 				break;
+ 		}
+ 	}
+-	if (rc == 0)
++	if (rc == 0) {
+ 		fb_copy_cmap(cmap, &info->cmap);
+-
++		if (fbcon_decor_active(info, vc_cons[fg_console].d) &&
++		    info->fix.visual == FB_VISUAL_DIRECTCOLOR)
++			fbcon_decor_fix_pseudo_pal(info, vc_cons[fg_console].d);
++	}
+ 	return rc;
+ }
+ 
+diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
+index b6d5008..d6703f2 100644
+--- a/drivers/video/fbdev/core/fbmem.c
++++ b/drivers/video/fbdev/core/fbmem.c
+@@ -1250,15 +1250,6 @@ struct fb_fix_screeninfo32 {
+ 	u16			reserved[3];
+ };
+ 
+-struct fb_cmap32 {
+-	u32			start;
+-	u32			len;
+-	compat_caddr_t	red;
+-	compat_caddr_t	green;
+-	compat_caddr_t	blue;
+-	compat_caddr_t	transp;
+-};
+-
+ static int fb_getput_cmap(struct fb_info *info, unsigned int cmd,
+ 			  unsigned long arg)
+ {
+diff --git a/include/linux/console_decor.h b/include/linux/console_decor.h
+new file mode 100644
+index 0000000..04b8d80
+--- /dev/null
++++ b/include/linux/console_decor.h
+@@ -0,0 +1,46 @@
++#ifndef _LINUX_CONSOLE_DECOR_H_
++#define _LINUX_CONSOLE_DECOR_H_ 1
++
++/* A structure used by the framebuffer console decorations (drivers/video/console/fbcondecor.c) */
++struct vc_decor {
++	__u8 bg_color;				/* The color that is to be treated as transparent */
++	__u8 state;				/* Current decor state: 0 = off, 1 = on */
++	__u16 tx, ty;				/* Top left corner coordinates of the text field */
++	__u16 twidth, theight;			/* Width and height of the text field */
++	char* theme;
++};
++
++#ifdef __KERNEL__
++#ifdef CONFIG_COMPAT
++#include <linux/compat.h>
++
++struct vc_decor32 {
++	__u8 bg_color;				/* The color that is to be treated as transparent */
++	__u8 state;				/* Current decor state: 0 = off, 1 = on */
++	__u16 tx, ty;				/* Top left corner coordinates of the text field */
++	__u16 twidth, theight;			/* Width and height of the text field */
++	compat_uptr_t theme;
++};
++
++#define vc_decor_from_compat(to, from) \
++	(to).bg_color = (from).bg_color; \
++	(to).state    = (from).state; \
++	(to).tx       = (from).tx; \
++	(to).ty       = (from).ty; \
++	(to).twidth   = (from).twidth; \
++	(to).theight  = (from).theight; \
++	(to).theme    = compat_ptr((from).theme)
++
++#define vc_decor_to_compat(to, from) \
++	(to).bg_color = (from).bg_color; \
++	(to).state    = (from).state; \
++	(to).tx       = (from).tx; \
++	(to).ty       = (from).ty; \
++	(to).twidth   = (from).twidth; \
++	(to).theight  = (from).theight; \
++	(to).theme    = ptr_to_compat((from).theme)
++
++#endif /* CONFIG_COMPAT */
++#endif /* __KERNEL__ */
++
++#endif
+diff --git a/include/linux/console_struct.h b/include/linux/console_struct.h
+index 7f0c329..98f5d60 100644
+--- a/include/linux/console_struct.h
++++ b/include/linux/console_struct.h
+@@ -19,6 +19,7 @@
+ struct vt_struct;
+ 
+ #define NPAR 16
++#include <linux/console_decor.h>
+ 
+ struct vc_data {
+ 	struct tty_port port;			/* Upper level data */
+@@ -107,6 +108,8 @@ struct vc_data {
+ 	unsigned long	vc_uni_pagedir;
+ 	unsigned long	*vc_uni_pagedir_loc;  /* [!] Location of uni_pagedir variable for this console */
+ 	bool vc_panic_force_write; /* when oops/panic this VC can accept forced output/blanking */
++
++	struct vc_decor vc_decor;
+ 	/* additional information is in vt_kern.h */
+ };
+ 
+diff --git a/include/linux/fb.h b/include/linux/fb.h
+index fe6ac95..1e36b03 100644
+--- a/include/linux/fb.h
++++ b/include/linux/fb.h
+@@ -219,6 +219,34 @@ struct fb_deferred_io {
+ };
+ #endif
+ 
++#ifdef __KERNEL__
++#ifdef CONFIG_COMPAT
++struct fb_image32 {
++	__u32 dx;			/* Where to place image */
++	__u32 dy;
++	__u32 width;			/* Size of image */
++	__u32 height;
++	__u32 fg_color;			/* Only used when a mono bitmap */
++	__u32 bg_color;
++	__u8  depth;			/* Depth of the image */
++	const compat_uptr_t data;	/* Pointer to image data */
++	struct fb_cmap32 cmap;		/* color map info */
++};
++
++#define fb_image_from_compat(to, from) \
++	(to).dx       = (from).dx; \
++	(to).dy       = (from).dy; \
++	(to).width    = (from).width; \
++	(to).height   = (from).height; \
++	(to).fg_color = (from).fg_color; \
++	(to).bg_color = (from).bg_color; \
++	(to).depth    = (from).depth; \
++	(to).data     = compat_ptr((from).data); \
++	fb_cmap_from_compat((to).cmap, (from).cmap)
++
++#endif /* CONFIG_COMPAT */
++#endif /* __KERNEL__ */
++
+ /*
+  * Frame buffer operations
+  *
+@@ -489,6 +517,9 @@ struct fb_info {
+ #define FBINFO_STATE_SUSPENDED	1
+ 	u32 state;			/* Hardware state i.e suspend */
+ 	void *fbcon_par;                /* fbcon use-only private area */
++
++	struct fb_image bgdecor;
++
+ 	/* From here on everything is device dependent */
+ 	void *par;
+ 	/* we need the PCI or similar aperture base/size not
+diff --git a/include/uapi/linux/fb.h b/include/uapi/linux/fb.h
+index fb795c3..dc77a03 100644
+--- a/include/uapi/linux/fb.h
++++ b/include/uapi/linux/fb.h
+@@ -8,6 +8,25 @@
+ 
+ #define FB_MAX			32	/* sufficient for now */
+ 
++struct fbcon_decor_iowrapper
++{
++	unsigned short vc;		/* Virtual console */
++	unsigned char origin;		/* Point of origin of the request */
++	void *data;
++};
++
++#ifdef __KERNEL__
++#ifdef CONFIG_COMPAT
++#include <linux/compat.h>
++struct fbcon_decor_iowrapper32
++{
++	unsigned short vc;		/* Virtual console */
++	unsigned char origin;		/* Point of origin of the request */
++	compat_uptr_t data;
++};
++#endif /* CONFIG_COMPAT */
++#endif /* __KERNEL__ */
++
+ /* ioctls
+    0x46 is 'F'								*/
+ #define FBIOGET_VSCREENINFO	0x4600
+@@ -35,6 +54,25 @@
+ #define FBIOGET_DISPINFO        0x4618
+ #define FBIO_WAITFORVSYNC	_IOW('F', 0x20, __u32)
+ 
++#define FBIOCONDECOR_SETCFG	_IOWR('F', 0x19, struct fbcon_decor_iowrapper)
++#define FBIOCONDECOR_GETCFG	_IOR('F', 0x1A, struct fbcon_decor_iowrapper)
++#define FBIOCONDECOR_SETSTATE	_IOWR('F', 0x1B, struct fbcon_decor_iowrapper)
++#define FBIOCONDECOR_GETSTATE	_IOR('F', 0x1C, struct fbcon_decor_iowrapper)
++#define FBIOCONDECOR_SETPIC 	_IOWR('F', 0x1D, struct fbcon_decor_iowrapper)
++#ifdef __KERNEL__
++#ifdef CONFIG_COMPAT
++#define FBIOCONDECOR_SETCFG32	_IOWR('F', 0x19, struct fbcon_decor_iowrapper32)
++#define FBIOCONDECOR_GETCFG32	_IOR('F', 0x1A, struct fbcon_decor_iowrapper32)
++#define FBIOCONDECOR_SETSTATE32	_IOWR('F', 0x1B, struct fbcon_decor_iowrapper32)
++#define FBIOCONDECOR_GETSTATE32	_IOR('F', 0x1C, struct fbcon_decor_iowrapper32)
++#define FBIOCONDECOR_SETPIC32	_IOWR('F', 0x1D, struct fbcon_decor_iowrapper32)
++#endif /* CONFIG_COMPAT */
++#endif /* __KERNEL__ */
++
++#define FBCON_DECOR_THEME_LEN		128	/* Maximum lenght of a theme name */
++#define FBCON_DECOR_IO_ORIG_KERNEL	0	/* Kernel ioctl origin */
++#define FBCON_DECOR_IO_ORIG_USER	1	/* User ioctl origin */
++ 
+ #define FB_TYPE_PACKED_PIXELS		0	/* Packed Pixels	*/
+ #define FB_TYPE_PLANES			1	/* Non interleaved planes */
+ #define FB_TYPE_INTERLEAVED_PLANES	2	/* Interleaved planes	*/
+@@ -277,6 +315,29 @@ struct fb_var_screeninfo {
+ 	__u32 reserved[4];		/* Reserved for future compatibility */
+ };
+ 
++#ifdef __KERNEL__
++#ifdef CONFIG_COMPAT
++struct fb_cmap32 {
++	__u32 start;
++	__u32 len;			/* Number of entries */
++	compat_uptr_t red;		/* Red values	*/
++	compat_uptr_t green;
++	compat_uptr_t blue;
++	compat_uptr_t transp;		/* transparency, can be NULL */
++};
++
++#define fb_cmap_from_compat(to, from) \
++	(to).start  = (from).start; \
++	(to).len    = (from).len; \
++	(to).red    = compat_ptr((from).red); \
++	(to).green  = compat_ptr((from).green); \
++	(to).blue   = compat_ptr((from).blue); \
++	(to).transp = compat_ptr((from).transp)
++
++#endif /* CONFIG_COMPAT */
++#endif /* __KERNEL__ */
++
++
+ struct fb_cmap {
+ 	__u32 start;			/* First entry	*/
+ 	__u32 len;			/* Number of entries */
+diff --git a/kernel/sysctl.c b/kernel/sysctl.c
+index 74f5b58..6386ab0 100644
+--- a/kernel/sysctl.c
++++ b/kernel/sysctl.c
+@@ -146,6 +146,10 @@ static const int cap_last_cap = CAP_LAST_CAP;
+ static unsigned long hung_task_timeout_max = (LONG_MAX/HZ);
+ #endif
+ 
++#ifdef CONFIG_FB_CON_DECOR
++extern char fbcon_decor_path[];
++#endif
++
+ #ifdef CONFIG_INOTIFY_USER
+ #include <linux/inotify.h>
+ #endif
+@@ -255,6 +259,15 @@ static struct ctl_table sysctl_base_table[] = {
+ 		.mode		= 0555,
+ 		.child		= dev_table,
+ 	},
++#ifdef CONFIG_FB_CON_DECOR
++	{
++		.procname	= "fbcondecor",
++		.data		= &fbcon_decor_path,
++		.maxlen		= KMOD_PATH_LEN,
++		.mode		= 0644,
++		.proc_handler	= &proc_dostring,
++	},
++#endif
+ 	{ }
+ };
+ 


^ permalink raw reply related	[flat|nested] 19+ messages in thread

* [gentoo-commits] proj/linux-patches:3.19 commit in: /
@ 2015-02-14  0:12 Mike Pagano
  0 siblings, 0 replies; 19+ messages in thread
From: Mike Pagano @ 2015-02-14  0:12 UTC (permalink / raw
  To: gentoo-commits

commit:     349dbcf6e1be928dd8bd03acf7d6d388b2232aee
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sat Feb 14 00:12:45 2015 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sat Feb 14 00:12:45 2015 +0000
URL:        http://sources.gentoo.org/gitweb/?p=proj/linux-patches.git;a=commit;h=349dbcf6

BFQ v7r7 patches for 3.19

---
 0000_README                                        |   12 +
 ...oups-kconfig-build-bits-for-BFQ-v7r7-3.19.patch |  104 +
 ...ntroduce-the-BFQ-v7r7-I-O-sched-for-3.19.patch1 | 6966 ++++++++++++++++++++
 ...ly-Queue-Merge-EQM-to-BFQ-v7r7-for-3.19.0.patch | 1222 ++++
 4 files changed, 8304 insertions(+)

diff --git a/0000_README b/0000_README
index 3fa77db..35025ab 100644
--- a/0000_README
+++ b/0000_README
@@ -71,6 +71,18 @@ Patch:  5000_enable-additional-cpu-optimizations-for-gcc.patch
 From:   https://github.com/graysky2/kernel_gcc_patch/
 Desc:   Kernel patch enables gcc < v4.9 optimizations for additional CPUs.
 
+Patch:  5001_block-cgroups-kconfig-build-bits-for-BFQ-v7r7-3.19.patch
+From:   http://algo.ing.unimo.it/people/paolo/disk_sched/
+Desc:   BFQ v7r7 patch 1 for 3.19: Build, cgroups and kconfig bits
+
+Patch:  5002_block-introduce-the-BFQ-v7r7-I-O-sched-for-3.19.patch1
+From:   http://algo.ing.unimo.it/people/paolo/disk_sched/
+Desc:   BFQ v7r7 patch 2 for 3.19: BFQ Scheduler
+
+Patch:  5003_block-bfq-add-Early-Queue-Merge-EQM-to-BFQ-v7r7-for-3.19.0.patch
+From:   http://algo.ing.unimo.it/people/paolo/disk_sched/
+Desc:   BFQ v7r7 patch 3 for 3.19: Early Queue Merge (EQM)
+
 Patch:  5010_enable-additional-cpu-optimizations-for-gcc-4.9.patch
 From:   https://github.com/graysky2/kernel_gcc_patch/
 Desc:   Kernel patch enables gcc >= v4.9 optimizations for additional CPUs.

diff --git a/5001_block-cgroups-kconfig-build-bits-for-BFQ-v7r7-3.19.patch b/5001_block-cgroups-kconfig-build-bits-for-BFQ-v7r7-3.19.patch
new file mode 100644
index 0000000..b36d572
--- /dev/null
+++ b/5001_block-cgroups-kconfig-build-bits-for-BFQ-v7r7-3.19.patch
@@ -0,0 +1,104 @@
+From a828d56aebc0735676be23734cccf31e279c0d1b Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@unimore.it>
+Date: Mon, 8 Dec 2014 16:04:25 +0100
+Subject: [PATCH 1/3] block: cgroups, kconfig, build bits for BFQ-v7r7-3.19
+
+Update Kconfig.iosched and do the related Makefile changes to include
+kernel configuration options for BFQ. Also add the bfqio controller
+to the cgroups subsystem.
+
+Signed-off-by: Paolo Valente <paolo.valente@unimore.it>
+Signed-off-by: Arianna Avanzini <avanzini.arianna@gmail.com>
+---
+ block/Kconfig.iosched         | 32 ++++++++++++++++++++++++++++++++
+ block/Makefile                |  1 +
+ include/linux/cgroup_subsys.h |  4 ++++
+ 3 files changed, 37 insertions(+)
+
+diff --git a/block/Kconfig.iosched b/block/Kconfig.iosched
+index 421bef9..0ee5f0f 100644
+--- a/block/Kconfig.iosched
++++ b/block/Kconfig.iosched
+@@ -39,6 +39,27 @@ config CFQ_GROUP_IOSCHED
+ 	---help---
+ 	  Enable group IO scheduling in CFQ.
+ 
++config IOSCHED_BFQ
++	tristate "BFQ I/O scheduler"
++	default n
++	---help---
++	  The BFQ I/O scheduler tries to distribute bandwidth among
++	  all processes according to their weights.
++	  It aims at distributing the bandwidth as desired, independently of
++	  the disk parameters and with any workload. It also tries to
++	  guarantee low latency to interactive and soft real-time
++	  applications. If compiled built-in (saying Y here), BFQ can
++	  be configured to support hierarchical scheduling.
++
++config CGROUP_BFQIO
++	bool "BFQ hierarchical scheduling support"
++	depends on CGROUPS && IOSCHED_BFQ=y
++	default n
++	---help---
++	  Enable hierarchical scheduling in BFQ, using the cgroups
++	  filesystem interface.  The name of the subsystem will be
++	  bfqio.
++
+ choice
+ 	prompt "Default I/O scheduler"
+ 	default DEFAULT_CFQ
+@@ -52,6 +73,16 @@ choice
+ 	config DEFAULT_CFQ
+ 		bool "CFQ" if IOSCHED_CFQ=y
+ 
++	config DEFAULT_BFQ
++		bool "BFQ" if IOSCHED_BFQ=y
++		help
++		  Selects BFQ as the default I/O scheduler which will be
++		  used by default for all block devices.
++		  The BFQ I/O scheduler aims at distributing the bandwidth
++		  as desired, independently of the disk parameters and with
++		  any workload. It also tries to guarantee low latency to
++		  interactive and soft real-time applications.
++
+ 	config DEFAULT_NOOP
+ 		bool "No-op"
+ 
+@@ -61,6 +92,7 @@ config DEFAULT_IOSCHED
+ 	string
+ 	default "deadline" if DEFAULT_DEADLINE
+ 	default "cfq" if DEFAULT_CFQ
++	default "bfq" if DEFAULT_BFQ
+ 	default "noop" if DEFAULT_NOOP
+ 
+ endmenu
+diff --git a/block/Makefile b/block/Makefile
+index 00ecc97..1ed86d5 100644
+--- a/block/Makefile
++++ b/block/Makefile
+@@ -18,6 +18,7 @@ obj-$(CONFIG_BLK_DEV_THROTTLING)	+= blk-throttle.o
+ obj-$(CONFIG_IOSCHED_NOOP)	+= noop-iosched.o
+ obj-$(CONFIG_IOSCHED_DEADLINE)	+= deadline-iosched.o
+ obj-$(CONFIG_IOSCHED_CFQ)	+= cfq-iosched.o
++obj-$(CONFIG_IOSCHED_BFQ)	+= bfq-iosched.o
+ 
+ obj-$(CONFIG_BLOCK_COMPAT)	+= compat_ioctl.o
+ obj-$(CONFIG_BLK_CMDLINE_PARSER)	+= cmdline-parser.o
+diff --git a/include/linux/cgroup_subsys.h b/include/linux/cgroup_subsys.h
+index 98c4f9b..13b010d 100644
+--- a/include/linux/cgroup_subsys.h
++++ b/include/linux/cgroup_subsys.h
+@@ -35,6 +35,10 @@ SUBSYS(net_cls)
+ SUBSYS(blkio)
+ #endif
+ 
++#if IS_ENABLED(CONFIG_CGROUP_BFQIO)
++SUBSYS(bfqio)
++#endif
++
+ #if IS_ENABLED(CONFIG_CGROUP_PERF)
+ SUBSYS(perf_event)
+ #endif
+-- 
+2.3.0
+

diff --git a/5002_block-introduce-the-BFQ-v7r7-I-O-sched-for-3.19.patch1 b/5002_block-introduce-the-BFQ-v7r7-I-O-sched-for-3.19.patch1
new file mode 100644
index 0000000..fb96ff2
--- /dev/null
+++ b/5002_block-introduce-the-BFQ-v7r7-I-O-sched-for-3.19.patch1
@@ -0,0 +1,6966 @@
+From 5bfaaa91a51940efaccb66bc5cd562ba3b9d4053 Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@unimore.it>
+Date: Thu, 9 May 2013 19:10:02 +0200
+Subject: [PATCH 2/3] block: introduce the BFQ-v7r7 I/O sched for 3.19
+
+Add the BFQ-v7r7 I/O scheduler to 3.19.
+The general structure is borrowed from CFQ, as much of the code for
+handling I/O contexts. Over time, several useful features have been
+ported from CFQ as well (details in the changelog in README.BFQ). A
+(bfq_)queue is associated to each task doing I/O on a device, and each
+time a scheduling decision has to be made a queue is selected and served
+until it expires.
+
+    - Slices are given in the service domain: tasks are assigned
+      budgets, measured in number of sectors. Once got the disk, a task
+      must however consume its assigned budget within a configurable
+      maximum time (by default, the maximum possible value of the
+      budgets is automatically computed to comply with this timeout).
+      This allows the desired latency vs "throughput boosting" tradeoff
+      to be set.
+
+    - Budgets are scheduled according to a variant of WF2Q+, implemented
+      using an augmented rb-tree to take eligibility into account while
+      preserving an O(log N) overall complexity.
+
+    - A low-latency tunable is provided; if enabled, both interactive
+      and soft real-time applications are guaranteed a very low latency.
+
+    - Latency guarantees are preserved also in the presence of NCQ.
+
+    - Also with flash-based devices, a high throughput is achieved
+      while still preserving latency guarantees.
+
+    - BFQ features Early Queue Merge (EQM), a sort of fusion of the
+      cooperating-queue-merging and the preemption mechanisms present
+      in CFQ. EQM is in fact a unified mechanism that tries to get a
+      sequential read pattern, and hence a high throughput, with any
+      set of processes performing interleaved I/O over a contiguous
+      sequence of sectors.
+
+    - BFQ supports full hierarchical scheduling, exporting a cgroups
+      interface.  Since each node has a full scheduler, each group can
+      be assigned its own weight.
+
+    - If the cgroups interface is not used, only I/O priorities can be
+      assigned to processes, with ioprio values mapped to weights
+      with the relation weight = IOPRIO_BE_NR - ioprio.
+
+    - ioprio classes are served in strict priority order, i.e., lower
+      priority queues are not served as long as there are higher
+      priority queues.  Among queues in the same class the bandwidth is
+      distributed in proportion to the weight of each queue. A very
+      thin extra bandwidth is however guaranteed to the Idle class, to
+      prevent it from starving.
+
+Signed-off-by: Paolo Valente <paolo.valente@unimore.it>
+Signed-off-by: Arianna Avanzini <avanzini.arianna@gmail.com>
+---
+ block/bfq-cgroup.c  |  936 ++++++++++++
+ block/bfq-ioc.c     |   36 +
+ block/bfq-iosched.c | 3902 +++++++++++++++++++++++++++++++++++++++++++++++++++
+ block/bfq-sched.c   | 1214 ++++++++++++++++
+ block/bfq.h         |  775 ++++++++++
+ 5 files changed, 6863 insertions(+)
+ create mode 100644 block/bfq-cgroup.c
+ create mode 100644 block/bfq-ioc.c
+ create mode 100644 block/bfq-iosched.c
+ create mode 100644 block/bfq-sched.c
+ create mode 100644 block/bfq.h
+
+diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c
+new file mode 100644
+index 0000000..11e2f1d
+--- /dev/null
++++ b/block/bfq-cgroup.c
+@@ -0,0 +1,936 @@
++/*
++ * BFQ: CGROUPS support.
++ *
++ * Based on ideas and code from CFQ:
++ * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
++ *
++ * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
++ *		      Paolo Valente <paolo.valente@unimore.it>
++ *
++ * Copyright (C) 2010 Paolo Valente <paolo.valente@unimore.it>
++ *
++ * Licensed under the GPL-2 as detailed in the accompanying COPYING.BFQ
++ * file.
++ */
++
++#ifdef CONFIG_CGROUP_BFQIO
++
++static DEFINE_MUTEX(bfqio_mutex);
++
++static bool bfqio_is_removed(struct bfqio_cgroup *bgrp)
++{
++	return bgrp ? !bgrp->online : false;
++}
++
++static struct bfqio_cgroup bfqio_root_cgroup = {
++	.weight = BFQ_DEFAULT_GRP_WEIGHT,
++	.ioprio = BFQ_DEFAULT_GRP_IOPRIO,
++	.ioprio_class = BFQ_DEFAULT_GRP_CLASS,
++};
++
++static inline void bfq_init_entity(struct bfq_entity *entity,
++				   struct bfq_group *bfqg)
++{
++	entity->weight = entity->new_weight;
++	entity->orig_weight = entity->new_weight;
++	entity->ioprio = entity->new_ioprio;
++	entity->ioprio_class = entity->new_ioprio_class;
++	entity->parent = bfqg->my_entity;
++	entity->sched_data = &bfqg->sched_data;
++}
++
++static struct bfqio_cgroup *css_to_bfqio(struct cgroup_subsys_state *css)
++{
++	return css ? container_of(css, struct bfqio_cgroup, css) : NULL;
++}
++
++/*
++ * Search the bfq_group for bfqd into the hash table (by now only a list)
++ * of bgrp.  Must be called under rcu_read_lock().
++ */
++static struct bfq_group *bfqio_lookup_group(struct bfqio_cgroup *bgrp,
++					    struct bfq_data *bfqd)
++{
++	struct bfq_group *bfqg;
++	void *key;
++
++	hlist_for_each_entry_rcu(bfqg, &bgrp->group_data, group_node) {
++		key = rcu_dereference(bfqg->bfqd);
++		if (key == bfqd)
++			return bfqg;
++	}
++
++	return NULL;
++}
++
++static inline void bfq_group_init_entity(struct bfqio_cgroup *bgrp,
++					 struct bfq_group *bfqg)
++{
++	struct bfq_entity *entity = &bfqg->entity;
++
++	/*
++	 * If the weight of the entity has never been set via the sysfs
++	 * interface, then bgrp->weight == 0. In this case we initialize
++	 * the weight from the current ioprio value. Otherwise, the group
++	 * weight, if set, has priority over the ioprio value.
++	 */
++	if (bgrp->weight == 0) {
++		entity->new_weight = bfq_ioprio_to_weight(bgrp->ioprio);
++		entity->new_ioprio = bgrp->ioprio;
++	} else {
++		if (bgrp->weight < BFQ_MIN_WEIGHT ||
++		    bgrp->weight > BFQ_MAX_WEIGHT) {
++			printk(KERN_CRIT "bfq_group_init_entity: "
++					 "bgrp->weight %d\n", bgrp->weight);
++			BUG();
++		}
++		entity->new_weight = bgrp->weight;
++		entity->new_ioprio = bfq_weight_to_ioprio(bgrp->weight);
++	}
++	entity->orig_weight = entity->weight = entity->new_weight;
++	entity->ioprio = entity->new_ioprio;
++	entity->ioprio_class = entity->new_ioprio_class = bgrp->ioprio_class;
++	entity->my_sched_data = &bfqg->sched_data;
++	bfqg->active_entities = 0;
++}
++
++static inline void bfq_group_set_parent(struct bfq_group *bfqg,
++					struct bfq_group *parent)
++{
++	struct bfq_entity *entity;
++
++	BUG_ON(parent == NULL);
++	BUG_ON(bfqg == NULL);
++
++	entity = &bfqg->entity;
++	entity->parent = parent->my_entity;
++	entity->sched_data = &parent->sched_data;
++}
++
++/**
++ * bfq_group_chain_alloc - allocate a chain of groups.
++ * @bfqd: queue descriptor.
++ * @css: the leaf cgroup_subsys_state this chain starts from.
++ *
++ * Allocate a chain of groups starting from the one belonging to
++ * @cgroup up to the root cgroup.  Stop if a cgroup on the chain
++ * to the root has already an allocated group on @bfqd.
++ */
++static struct bfq_group *bfq_group_chain_alloc(struct bfq_data *bfqd,
++					       struct cgroup_subsys_state *css)
++{
++	struct bfqio_cgroup *bgrp;
++	struct bfq_group *bfqg, *prev = NULL, *leaf = NULL;
++
++	for (; css != NULL; css = css->parent) {
++		bgrp = css_to_bfqio(css);
++
++		bfqg = bfqio_lookup_group(bgrp, bfqd);
++		if (bfqg != NULL) {
++			/*
++			 * All the cgroups in the path from there to the
++			 * root must have a bfq_group for bfqd, so we don't
++			 * need any more allocations.
++			 */
++			break;
++		}
++
++		bfqg = kzalloc(sizeof(*bfqg), GFP_ATOMIC);
++		if (bfqg == NULL)
++			goto cleanup;
++
++		bfq_group_init_entity(bgrp, bfqg);
++		bfqg->my_entity = &bfqg->entity;
++
++		if (leaf == NULL) {
++			leaf = bfqg;
++			prev = leaf;
++		} else {
++			bfq_group_set_parent(prev, bfqg);
++			/*
++			 * Build a list of allocated nodes using the bfqd
++			 * filed, that is still unused and will be
++			 * initialized only after the node will be
++			 * connected.
++			 */
++			prev->bfqd = bfqg;
++			prev = bfqg;
++		}
++	}
++
++	return leaf;
++
++cleanup:
++	while (leaf != NULL) {
++		prev = leaf;
++		leaf = leaf->bfqd;
++		kfree(prev);
++	}
++
++	return NULL;
++}
++
++/**
++ * bfq_group_chain_link - link an allocated group chain to a cgroup
++ *                        hierarchy.
++ * @bfqd: the queue descriptor.
++ * @css: the leaf cgroup_subsys_state to start from.
++ * @leaf: the leaf group (to be associated to @cgroup).
++ *
++ * Try to link a chain of groups to a cgroup hierarchy, connecting the
++ * nodes bottom-up, so we can be sure that when we find a cgroup in the
++ * hierarchy that already as a group associated to @bfqd all the nodes
++ * in the path to the root cgroup have one too.
++ *
++ * On locking: the queue lock protects the hierarchy (there is a hierarchy
++ * per device) while the bfqio_cgroup lock protects the list of groups
++ * belonging to the same cgroup.
++ */
++static void bfq_group_chain_link(struct bfq_data *bfqd,
++				 struct cgroup_subsys_state *css,
++				 struct bfq_group *leaf)
++{
++	struct bfqio_cgroup *bgrp;
++	struct bfq_group *bfqg, *next, *prev = NULL;
++	unsigned long flags;
++
++	assert_spin_locked(bfqd->queue->queue_lock);
++
++	for (; css != NULL && leaf != NULL; css = css->parent) {
++		bgrp = css_to_bfqio(css);
++		next = leaf->bfqd;
++
++		bfqg = bfqio_lookup_group(bgrp, bfqd);
++		BUG_ON(bfqg != NULL);
++
++		spin_lock_irqsave(&bgrp->lock, flags);
++
++		rcu_assign_pointer(leaf->bfqd, bfqd);
++		hlist_add_head_rcu(&leaf->group_node, &bgrp->group_data);
++		hlist_add_head(&leaf->bfqd_node, &bfqd->group_list);
++
++		spin_unlock_irqrestore(&bgrp->lock, flags);
++
++		prev = leaf;
++		leaf = next;
++	}
++
++	BUG_ON(css == NULL && leaf != NULL);
++	if (css != NULL && prev != NULL) {
++		bgrp = css_to_bfqio(css);
++		bfqg = bfqio_lookup_group(bgrp, bfqd);
++		bfq_group_set_parent(prev, bfqg);
++	}
++}
++
++/**
++ * bfq_find_alloc_group - return the group associated to @bfqd in @cgroup.
++ * @bfqd: queue descriptor.
++ * @cgroup: cgroup being searched for.
++ *
++ * Return a group associated to @bfqd in @cgroup, allocating one if
++ * necessary.  When a group is returned all the cgroups in the path
++ * to the root have a group associated to @bfqd.
++ *
++ * If the allocation fails, return the root group: this breaks guarantees
++ * but is a safe fallback.  If this loss becomes a problem it can be
++ * mitigated using the equivalent weight (given by the product of the
++ * weights of the groups in the path from @group to the root) in the
++ * root scheduler.
++ *
++ * We allocate all the missing nodes in the path from the leaf cgroup
++ * to the root and we connect the nodes only after all the allocations
++ * have been successful.
++ */
++static struct bfq_group *bfq_find_alloc_group(struct bfq_data *bfqd,
++					      struct cgroup_subsys_state *css)
++{
++	struct bfqio_cgroup *bgrp = css_to_bfqio(css);
++	struct bfq_group *bfqg;
++
++	bfqg = bfqio_lookup_group(bgrp, bfqd);
++	if (bfqg != NULL)
++		return bfqg;
++
++	bfqg = bfq_group_chain_alloc(bfqd, css);
++	if (bfqg != NULL)
++		bfq_group_chain_link(bfqd, css, bfqg);
++	else
++		bfqg = bfqd->root_group;
++
++	return bfqg;
++}
++
++/**
++ * bfq_bfqq_move - migrate @bfqq to @bfqg.
++ * @bfqd: queue descriptor.
++ * @bfqq: the queue to move.
++ * @entity: @bfqq's entity.
++ * @bfqg: the group to move to.
++ *
++ * Move @bfqq to @bfqg, deactivating it from its old group and reactivating
++ * it on the new one.  Avoid putting the entity on the old group idle tree.
++ *
++ * Must be called under the queue lock; the cgroup owning @bfqg must
++ * not disappear (by now this just means that we are called under
++ * rcu_read_lock()).
++ */
++static void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
++			  struct bfq_entity *entity, struct bfq_group *bfqg)
++{
++	int busy, resume;
++
++	busy = bfq_bfqq_busy(bfqq);
++	resume = !RB_EMPTY_ROOT(&bfqq->sort_list);
++
++	BUG_ON(resume && !entity->on_st);
++	BUG_ON(busy && !resume && entity->on_st &&
++	       bfqq != bfqd->in_service_queue);
++
++	if (busy) {
++		BUG_ON(atomic_read(&bfqq->ref) < 2);
++
++		if (!resume)
++			bfq_del_bfqq_busy(bfqd, bfqq, 0);
++		else
++			bfq_deactivate_bfqq(bfqd, bfqq, 0);
++	} else if (entity->on_st)
++		bfq_put_idle_entity(bfq_entity_service_tree(entity), entity);
++
++	/*
++	 * Here we use a reference to bfqg.  We don't need a refcounter
++	 * as the cgroup reference will not be dropped, so that its
++	 * destroy() callback will not be invoked.
++	 */
++	entity->parent = bfqg->my_entity;
++	entity->sched_data = &bfqg->sched_data;
++
++	if (busy && resume)
++		bfq_activate_bfqq(bfqd, bfqq);
++
++	if (bfqd->in_service_queue == NULL && !bfqd->rq_in_driver)
++		bfq_schedule_dispatch(bfqd);
++}
++
++/**
++ * __bfq_bic_change_cgroup - move @bic to @cgroup.
++ * @bfqd: the queue descriptor.
++ * @bic: the bic to move.
++ * @cgroup: the cgroup to move to.
++ *
++ * Move bic to cgroup, assuming that bfqd->queue is locked; the caller
++ * has to make sure that the reference to cgroup is valid across the call.
++ *
++ * NOTE: an alternative approach might have been to store the current
++ * cgroup in bfqq and getting a reference to it, reducing the lookup
++ * time here, at the price of slightly more complex code.
++ */
++static struct bfq_group *__bfq_bic_change_cgroup(struct bfq_data *bfqd,
++						struct bfq_io_cq *bic,
++						struct cgroup_subsys_state *css)
++{
++	struct bfq_queue *async_bfqq = bic_to_bfqq(bic, 0);
++	struct bfq_queue *sync_bfqq = bic_to_bfqq(bic, 1);
++	struct bfq_entity *entity;
++	struct bfq_group *bfqg;
++	struct bfqio_cgroup *bgrp;
++
++	bgrp = css_to_bfqio(css);
++
++	bfqg = bfq_find_alloc_group(bfqd, css);
++	if (async_bfqq != NULL) {
++		entity = &async_bfqq->entity;
++
++		if (entity->sched_data != &bfqg->sched_data) {
++			bic_set_bfqq(bic, NULL, 0);
++			bfq_log_bfqq(bfqd, async_bfqq,
++				     "bic_change_group: %p %d",
++				     async_bfqq, atomic_read(&async_bfqq->ref));
++			bfq_put_queue(async_bfqq);
++		}
++	}
++
++	if (sync_bfqq != NULL) {
++		entity = &sync_bfqq->entity;
++		if (entity->sched_data != &bfqg->sched_data)
++			bfq_bfqq_move(bfqd, sync_bfqq, entity, bfqg);
++	}
++
++	return bfqg;
++}
++
++/**
++ * bfq_bic_change_cgroup - move @bic to @cgroup.
++ * @bic: the bic being migrated.
++ * @cgroup: the destination cgroup.
++ *
++ * When the task owning @bic is moved to @cgroup, @bic is immediately
++ * moved into its new parent group.
++ */
++static void bfq_bic_change_cgroup(struct bfq_io_cq *bic,
++				  struct cgroup_subsys_state *css)
++{
++	struct bfq_data *bfqd;
++	unsigned long uninitialized_var(flags);
++
++	bfqd = bfq_get_bfqd_locked(&(bic->icq.q->elevator->elevator_data),
++				   &flags);
++	if (bfqd != NULL) {
++		__bfq_bic_change_cgroup(bfqd, bic, css);
++		bfq_put_bfqd_unlock(bfqd, &flags);
++	}
++}
++
++/**
++ * bfq_bic_update_cgroup - update the cgroup of @bic.
++ * @bic: the @bic to update.
++ *
++ * Make sure that @bic is enqueued in the cgroup of the current task.
++ * We need this in addition to moving bics during the cgroup attach
++ * phase because the task owning @bic could be at its first disk
++ * access or we may end up in the root cgroup as the result of a
++ * memory allocation failure and here we try to move to the right
++ * group.
++ *
++ * Must be called under the queue lock.  It is safe to use the returned
++ * value even after the rcu_read_unlock() as the migration/destruction
++ * paths act under the queue lock too.  IOW it is impossible to race with
++ * group migration/destruction and end up with an invalid group as:
++ *   a) here cgroup has not yet been destroyed, nor its destroy callback
++ *      has started execution, as current holds a reference to it,
++ *   b) if it is destroyed after rcu_read_unlock() [after current is
++ *      migrated to a different cgroup] its attach() callback will have
++ *      taken care of remove all the references to the old cgroup data.
++ */
++static struct bfq_group *bfq_bic_update_cgroup(struct bfq_io_cq *bic)
++{
++	struct bfq_data *bfqd = bic_to_bfqd(bic);
++	struct bfq_group *bfqg;
++	struct cgroup_subsys_state *css;
++
++	BUG_ON(bfqd == NULL);
++
++	rcu_read_lock();
++	css = task_css(current, bfqio_cgrp_id);
++	bfqg = __bfq_bic_change_cgroup(bfqd, bic, css);
++	rcu_read_unlock();
++
++	return bfqg;
++}
++
++/**
++ * bfq_flush_idle_tree - deactivate any entity on the idle tree of @st.
++ * @st: the service tree being flushed.
++ */
++static inline void bfq_flush_idle_tree(struct bfq_service_tree *st)
++{
++	struct bfq_entity *entity = st->first_idle;
++
++	for (; entity != NULL; entity = st->first_idle)
++		__bfq_deactivate_entity(entity, 0);
++}
++
++/**
++ * bfq_reparent_leaf_entity - move leaf entity to the root_group.
++ * @bfqd: the device data structure with the root group.
++ * @entity: the entity to move.
++ */
++static inline void bfq_reparent_leaf_entity(struct bfq_data *bfqd,
++					    struct bfq_entity *entity)
++{
++	struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
++
++	BUG_ON(bfqq == NULL);
++	bfq_bfqq_move(bfqd, bfqq, entity, bfqd->root_group);
++	return;
++}
++
++/**
++ * bfq_reparent_active_entities - move to the root group all active
++ *                                entities.
++ * @bfqd: the device data structure with the root group.
++ * @bfqg: the group to move from.
++ * @st: the service tree with the entities.
++ *
++ * Needs queue_lock to be taken and reference to be valid over the call.
++ */
++static inline void bfq_reparent_active_entities(struct bfq_data *bfqd,
++						struct bfq_group *bfqg,
++						struct bfq_service_tree *st)
++{
++	struct rb_root *active = &st->active;
++	struct bfq_entity *entity = NULL;
++
++	if (!RB_EMPTY_ROOT(&st->active))
++		entity = bfq_entity_of(rb_first(active));
++
++	for (; entity != NULL; entity = bfq_entity_of(rb_first(active)))
++		bfq_reparent_leaf_entity(bfqd, entity);
++
++	if (bfqg->sched_data.in_service_entity != NULL)
++		bfq_reparent_leaf_entity(bfqd,
++			bfqg->sched_data.in_service_entity);
++
++	return;
++}
++
++/**
++ * bfq_destroy_group - destroy @bfqg.
++ * @bgrp: the bfqio_cgroup containing @bfqg.
++ * @bfqg: the group being destroyed.
++ *
++ * Destroy @bfqg, making sure that it is not referenced from its parent.
++ */
++static void bfq_destroy_group(struct bfqio_cgroup *bgrp, struct bfq_group *bfqg)
++{
++	struct bfq_data *bfqd;
++	struct bfq_service_tree *st;
++	struct bfq_entity *entity = bfqg->my_entity;
++	unsigned long uninitialized_var(flags);
++	int i;
++
++	hlist_del(&bfqg->group_node);
++
++	/*
++	 * Empty all service_trees belonging to this group before
++	 * deactivating the group itself.
++	 */
++	for (i = 0; i < BFQ_IOPRIO_CLASSES; i++) {
++		st = bfqg->sched_data.service_tree + i;
++
++		/*
++		 * The idle tree may still contain bfq_queues belonging
++		 * to exited task because they never migrated to a different
++		 * cgroup from the one being destroyed now.  No one else
++		 * can access them so it's safe to act without any lock.
++		 */
++		bfq_flush_idle_tree(st);
++
++		/*
++		 * It may happen that some queues are still active
++		 * (busy) upon group destruction (if the corresponding
++		 * processes have been forced to terminate). We move
++		 * all the leaf entities corresponding to these queues
++		 * to the root_group.
++		 * Also, it may happen that the group has an entity
++		 * in service, which is disconnected from the active
++		 * tree: it must be moved, too.
++		 * There is no need to put the sync queues, as the
++		 * scheduler has taken no reference.
++		 */
++		bfqd = bfq_get_bfqd_locked(&bfqg->bfqd, &flags);
++		if (bfqd != NULL) {
++			bfq_reparent_active_entities(bfqd, bfqg, st);
++			bfq_put_bfqd_unlock(bfqd, &flags);
++		}
++		BUG_ON(!RB_EMPTY_ROOT(&st->active));
++		BUG_ON(!RB_EMPTY_ROOT(&st->idle));
++	}
++	BUG_ON(bfqg->sched_data.next_in_service != NULL);
++	BUG_ON(bfqg->sched_data.in_service_entity != NULL);
++
++	/*
++	 * We may race with device destruction, take extra care when
++	 * dereferencing bfqg->bfqd.
++	 */
++	bfqd = bfq_get_bfqd_locked(&bfqg->bfqd, &flags);
++	if (bfqd != NULL) {
++		hlist_del(&bfqg->bfqd_node);
++		__bfq_deactivate_entity(entity, 0);
++		bfq_put_async_queues(bfqd, bfqg);
++		bfq_put_bfqd_unlock(bfqd, &flags);
++	}
++	BUG_ON(entity->tree != NULL);
++
++	/*
++	 * No need to defer the kfree() to the end of the RCU grace
++	 * period: we are called from the destroy() callback of our
++	 * cgroup, so we can be sure that no one is a) still using
++	 * this cgroup or b) doing lookups in it.
++	 */
++	kfree(bfqg);
++}
++
++static void bfq_end_wr_async(struct bfq_data *bfqd)
++{
++	struct hlist_node *tmp;
++	struct bfq_group *bfqg;
++
++	hlist_for_each_entry_safe(bfqg, tmp, &bfqd->group_list, bfqd_node)
++		bfq_end_wr_async_queues(bfqd, bfqg);
++	bfq_end_wr_async_queues(bfqd, bfqd->root_group);
++}
++
++/**
++ * bfq_disconnect_groups - disconnect @bfqd from all its groups.
++ * @bfqd: the device descriptor being exited.
++ *
++ * When the device exits we just make sure that no lookup can return
++ * the now unused group structures.  They will be deallocated on cgroup
++ * destruction.
++ */
++static void bfq_disconnect_groups(struct bfq_data *bfqd)
++{
++	struct hlist_node *tmp;
++	struct bfq_group *bfqg;
++
++	bfq_log(bfqd, "disconnect_groups beginning");
++	hlist_for_each_entry_safe(bfqg, tmp, &bfqd->group_list, bfqd_node) {
++		hlist_del(&bfqg->bfqd_node);
++
++		__bfq_deactivate_entity(bfqg->my_entity, 0);
++
++		/*
++		 * Don't remove from the group hash, just set an
++		 * invalid key.  No lookups can race with the
++		 * assignment as bfqd is being destroyed; this
++		 * implies also that new elements cannot be added
++		 * to the list.
++		 */
++		rcu_assign_pointer(bfqg->bfqd, NULL);
++
++		bfq_log(bfqd, "disconnect_groups: put async for group %p",
++			bfqg);
++		bfq_put_async_queues(bfqd, bfqg);
++	}
++}
++
++static inline void bfq_free_root_group(struct bfq_data *bfqd)
++{
++	struct bfqio_cgroup *bgrp = &bfqio_root_cgroup;
++	struct bfq_group *bfqg = bfqd->root_group;
++
++	bfq_put_async_queues(bfqd, bfqg);
++
++	spin_lock_irq(&bgrp->lock);
++	hlist_del_rcu(&bfqg->group_node);
++	spin_unlock_irq(&bgrp->lock);
++
++	/*
++	 * No need to synchronize_rcu() here: since the device is gone
++	 * there cannot be any read-side access to its root_group.
++	 */
++	kfree(bfqg);
++}
++
++static struct bfq_group *bfq_alloc_root_group(struct bfq_data *bfqd, int node)
++{
++	struct bfq_group *bfqg;
++	struct bfqio_cgroup *bgrp;
++	int i;
++
++	bfqg = kzalloc_node(sizeof(*bfqg), GFP_KERNEL, node);
++	if (bfqg == NULL)
++		return NULL;
++
++	bfqg->entity.parent = NULL;
++	for (i = 0; i < BFQ_IOPRIO_CLASSES; i++)
++		bfqg->sched_data.service_tree[i] = BFQ_SERVICE_TREE_INIT;
++
++	bgrp = &bfqio_root_cgroup;
++	spin_lock_irq(&bgrp->lock);
++	rcu_assign_pointer(bfqg->bfqd, bfqd);
++	hlist_add_head_rcu(&bfqg->group_node, &bgrp->group_data);
++	spin_unlock_irq(&bgrp->lock);
++
++	return bfqg;
++}
++
++#define SHOW_FUNCTION(__VAR)						\
++static u64 bfqio_cgroup_##__VAR##_read(struct cgroup_subsys_state *css, \
++				       struct cftype *cftype)		\
++{									\
++	struct bfqio_cgroup *bgrp = css_to_bfqio(css);			\
++	u64 ret = -ENODEV;						\
++									\
++	mutex_lock(&bfqio_mutex);					\
++	if (bfqio_is_removed(bgrp))					\
++		goto out_unlock;					\
++									\
++	spin_lock_irq(&bgrp->lock);					\
++	ret = bgrp->__VAR;						\
++	spin_unlock_irq(&bgrp->lock);					\
++									\
++out_unlock:								\
++	mutex_unlock(&bfqio_mutex);					\
++	return ret;							\
++}
++
++SHOW_FUNCTION(weight);
++SHOW_FUNCTION(ioprio);
++SHOW_FUNCTION(ioprio_class);
++#undef SHOW_FUNCTION
++
++#define STORE_FUNCTION(__VAR, __MIN, __MAX)				\
++static int bfqio_cgroup_##__VAR##_write(struct cgroup_subsys_state *css,\
++					struct cftype *cftype,		\
++					u64 val)			\
++{									\
++	struct bfqio_cgroup *bgrp = css_to_bfqio(css);			\
++	struct bfq_group *bfqg;						\
++	int ret = -EINVAL;						\
++									\
++	if (val < (__MIN) || val > (__MAX))				\
++		return ret;						\
++									\
++	ret = -ENODEV;							\
++	mutex_lock(&bfqio_mutex);					\
++	if (bfqio_is_removed(bgrp))					\
++		goto out_unlock;					\
++	ret = 0;							\
++									\
++	spin_lock_irq(&bgrp->lock);					\
++	bgrp->__VAR = (unsigned short)val;				\
++	hlist_for_each_entry(bfqg, &bgrp->group_data, group_node) {	\
++		/*							\
++		 * Setting the ioprio_changed flag of the entity        \
++		 * to 1 with new_##__VAR == ##__VAR would re-set        \
++		 * the value of the weight to its ioprio mapping.       \
++		 * Set the flag only if necessary.			\
++		 */							\
++		if ((unsigned short)val != bfqg->entity.new_##__VAR) {  \
++			bfqg->entity.new_##__VAR = (unsigned short)val; \
++			/*						\
++			 * Make sure that the above new value has been	\
++			 * stored in bfqg->entity.new_##__VAR before	\
++			 * setting the ioprio_changed flag. In fact,	\
++			 * this flag may be read asynchronously (in	\
++			 * critical sections protected by a different	\
++			 * lock than that held here), and finding this	\
++			 * flag set may cause the execution of the code	\
++			 * for updating parameters whose value may	\
++			 * depend also on bfqg->entity.new_##__VAR (in	\
++			 * __bfq_entity_update_weight_prio).		\
++			 * This barrier makes sure that the new value	\
++			 * of bfqg->entity.new_##__VAR is correctly	\
++			 * seen in that code.				\
++			 */						\
++			smp_wmb();                                      \
++			bfqg->entity.ioprio_changed = 1;                \
++		}							\
++	}								\
++	spin_unlock_irq(&bgrp->lock);					\
++									\
++out_unlock:								\
++	mutex_unlock(&bfqio_mutex);					\
++	return ret;							\
++}
++
++STORE_FUNCTION(weight, BFQ_MIN_WEIGHT, BFQ_MAX_WEIGHT);
++STORE_FUNCTION(ioprio, 0, IOPRIO_BE_NR - 1);
++STORE_FUNCTION(ioprio_class, IOPRIO_CLASS_RT, IOPRIO_CLASS_IDLE);
++#undef STORE_FUNCTION
++
++static struct cftype bfqio_files[] = {
++	{
++		.name = "weight",
++		.read_u64 = bfqio_cgroup_weight_read,
++		.write_u64 = bfqio_cgroup_weight_write,
++	},
++	{
++		.name = "ioprio",
++		.read_u64 = bfqio_cgroup_ioprio_read,
++		.write_u64 = bfqio_cgroup_ioprio_write,
++	},
++	{
++		.name = "ioprio_class",
++		.read_u64 = bfqio_cgroup_ioprio_class_read,
++		.write_u64 = bfqio_cgroup_ioprio_class_write,
++	},
++	{ },	/* terminate */
++};
++
++static struct cgroup_subsys_state *bfqio_create(struct cgroup_subsys_state
++						*parent_css)
++{
++	struct bfqio_cgroup *bgrp;
++
++	if (parent_css != NULL) {
++		bgrp = kzalloc(sizeof(*bgrp), GFP_KERNEL);
++		if (bgrp == NULL)
++			return ERR_PTR(-ENOMEM);
++	} else
++		bgrp = &bfqio_root_cgroup;
++
++	spin_lock_init(&bgrp->lock);
++	INIT_HLIST_HEAD(&bgrp->group_data);
++	bgrp->ioprio = BFQ_DEFAULT_GRP_IOPRIO;
++	bgrp->ioprio_class = BFQ_DEFAULT_GRP_CLASS;
++
++	return &bgrp->css;
++}
++
++/*
++ * We cannot support shared io contexts, as we have no means to support
++ * two tasks with the same ioc in two different groups without major rework
++ * of the main bic/bfqq data structures.  By now we allow a task to change
++ * its cgroup only if it's the only owner of its ioc; the drawback of this
++ * behavior is that a group containing a task that forked using CLONE_IO
++ * will not be destroyed until the tasks sharing the ioc die.
++ */
++static int bfqio_can_attach(struct cgroup_subsys_state *css,
++			    struct cgroup_taskset *tset)
++{
++	struct task_struct *task;
++	struct io_context *ioc;
++	int ret = 0;
++
++	cgroup_taskset_for_each(task, tset) {
++		/*
++		 * task_lock() is needed to avoid races with
++		 * exit_io_context()
++		 */
++		task_lock(task);
++		ioc = task->io_context;
++		if (ioc != NULL && atomic_read(&ioc->nr_tasks) > 1)
++			/*
++			 * ioc == NULL means that the task is either too
++			 * young or exiting: if it has still no ioc the
++			 * ioc can't be shared, if the task is exiting the
++			 * attach will fail anyway, no matter what we
++			 * return here.
++			 */
++			ret = -EINVAL;
++		task_unlock(task);
++		if (ret)
++			break;
++	}
++
++	return ret;
++}
++
++static void bfqio_attach(struct cgroup_subsys_state *css,
++			 struct cgroup_taskset *tset)
++{
++	struct task_struct *task;
++	struct io_context *ioc;
++	struct io_cq *icq;
++
++	/*
++	 * IMPORTANT NOTE: The move of more than one process at a time to a
++	 * new group has not yet been tested.
++	 */
++	cgroup_taskset_for_each(task, tset) {
++		ioc = get_task_io_context(task, GFP_ATOMIC, NUMA_NO_NODE);
++		if (ioc) {
++			/*
++			 * Handle cgroup change here.
++			 */
++			rcu_read_lock();
++			hlist_for_each_entry_rcu(icq, &ioc->icq_list, ioc_node)
++				if (!strncmp(
++					icq->q->elevator->type->elevator_name,
++					"bfq", ELV_NAME_MAX))
++					bfq_bic_change_cgroup(icq_to_bic(icq),
++							      css);
++			rcu_read_unlock();
++			put_io_context(ioc);
++		}
++	}
++}
++
++static void bfqio_destroy(struct cgroup_subsys_state *css)
++{
++	struct bfqio_cgroup *bgrp = css_to_bfqio(css);
++	struct hlist_node *tmp;
++	struct bfq_group *bfqg;
++
++	/*
++	 * Since we are destroying the cgroup, there are no more tasks
++	 * referencing it, and all the RCU grace periods that may have
++	 * referenced it are ended (as the destruction of the parent
++	 * cgroup is RCU-safe); bgrp->group_data will not be accessed by
++	 * anything else and we don't need any synchronization.
++	 */
++	hlist_for_each_entry_safe(bfqg, tmp, &bgrp->group_data, group_node)
++		bfq_destroy_group(bgrp, bfqg);
++
++	BUG_ON(!hlist_empty(&bgrp->group_data));
++
++	kfree(bgrp);
++}
++
++static int bfqio_css_online(struct cgroup_subsys_state *css)
++{
++	struct bfqio_cgroup *bgrp = css_to_bfqio(css);
++
++	mutex_lock(&bfqio_mutex);
++	bgrp->online = true;
++	mutex_unlock(&bfqio_mutex);
++
++	return 0;
++}
++
++static void bfqio_css_offline(struct cgroup_subsys_state *css)
++{
++	struct bfqio_cgroup *bgrp = css_to_bfqio(css);
++
++	mutex_lock(&bfqio_mutex);
++	bgrp->online = false;
++	mutex_unlock(&bfqio_mutex);
++}
++
++struct cgroup_subsys bfqio_cgrp_subsys = {
++	.css_alloc = bfqio_create,
++	.css_online = bfqio_css_online,
++	.css_offline = bfqio_css_offline,
++	.can_attach = bfqio_can_attach,
++	.attach = bfqio_attach,
++	.css_free = bfqio_destroy,
++	.legacy_cftypes = bfqio_files,
++};
++#else
++static inline void bfq_init_entity(struct bfq_entity *entity,
++				   struct bfq_group *bfqg)
++{
++	entity->weight = entity->new_weight;
++	entity->orig_weight = entity->new_weight;
++	entity->ioprio = entity->new_ioprio;
++	entity->ioprio_class = entity->new_ioprio_class;
++	entity->sched_data = &bfqg->sched_data;
++}
++
++static inline struct bfq_group *
++bfq_bic_update_cgroup(struct bfq_io_cq *bic)
++{
++	struct bfq_data *bfqd = bic_to_bfqd(bic);
++	return bfqd->root_group;
++}
++
++static inline void bfq_bfqq_move(struct bfq_data *bfqd,
++				 struct bfq_queue *bfqq,
++				 struct bfq_entity *entity,
++				 struct bfq_group *bfqg)
++{
++}
++
++static void bfq_end_wr_async(struct bfq_data *bfqd)
++{
++	bfq_end_wr_async_queues(bfqd, bfqd->root_group);
++}
++
++static inline void bfq_disconnect_groups(struct bfq_data *bfqd)
++{
++	bfq_put_async_queues(bfqd, bfqd->root_group);
++}
++
++static inline void bfq_free_root_group(struct bfq_data *bfqd)
++{
++	kfree(bfqd->root_group);
++}
++
++static struct bfq_group *bfq_alloc_root_group(struct bfq_data *bfqd, int node)
++{
++	struct bfq_group *bfqg;
++	int i;
++
++	bfqg = kmalloc_node(sizeof(*bfqg), GFP_KERNEL | __GFP_ZERO, node);
++	if (bfqg == NULL)
++		return NULL;
++
++	for (i = 0; i < BFQ_IOPRIO_CLASSES; i++)
++		bfqg->sched_data.service_tree[i] = BFQ_SERVICE_TREE_INIT;
++
++	return bfqg;
++}
++#endif
+diff --git a/block/bfq-ioc.c b/block/bfq-ioc.c
+new file mode 100644
+index 0000000..7f6b000
+--- /dev/null
++++ b/block/bfq-ioc.c
+@@ -0,0 +1,36 @@
++/*
++ * BFQ: I/O context handling.
++ *
++ * Based on ideas and code from CFQ:
++ * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
++ *
++ * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
++ *		      Paolo Valente <paolo.valente@unimore.it>
++ *
++ * Copyright (C) 2010 Paolo Valente <paolo.valente@unimore.it>
++ */
++
++/**
++ * icq_to_bic - convert iocontext queue structure to bfq_io_cq.
++ * @icq: the iocontext queue.
++ */
++static inline struct bfq_io_cq *icq_to_bic(struct io_cq *icq)
++{
++	/* bic->icq is the first member, %NULL will convert to %NULL */
++	return container_of(icq, struct bfq_io_cq, icq);
++}
++
++/**
++ * bfq_bic_lookup - search into @ioc a bic associated to @bfqd.
++ * @bfqd: the lookup key.
++ * @ioc: the io_context of the process doing I/O.
++ *
++ * Queue lock must be held.
++ */
++static inline struct bfq_io_cq *bfq_bic_lookup(struct bfq_data *bfqd,
++					       struct io_context *ioc)
++{
++	if (ioc)
++		return icq_to_bic(ioc_lookup_icq(ioc, bfqd->queue));
++	return NULL;
++}
+diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
+new file mode 100644
+index 0000000..97ee934
+--- /dev/null
++++ b/block/bfq-iosched.c
+@@ -0,0 +1,3902 @@
++/*
++ * Budget Fair Queueing (BFQ) disk scheduler.
++ *
++ * Based on ideas and code from CFQ:
++ * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
++ *
++ * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
++ *		      Paolo Valente <paolo.valente@unimore.it>
++ *
++ * Copyright (C) 2010 Paolo Valente <paolo.valente@unimore.it>
++ *
++ * Licensed under the GPL-2 as detailed in the accompanying COPYING.BFQ
++ * file.
++ *
++ * BFQ is a proportional-share storage-I/O scheduling algorithm based on
++ * the slice-by-slice service scheme of CFQ. But BFQ assigns budgets,
++ * measured in number of sectors, to processes instead of time slices. The
++ * device is not granted to the in-service process for a given time slice,
++ * but until it has exhausted its assigned budget. This change from the time
++ * to the service domain allows BFQ to distribute the device throughput
++ * among processes as desired, without any distortion due to ZBR, workload
++ * fluctuations or other factors. BFQ uses an ad hoc internal scheduler,
++ * called B-WF2Q+, to schedule processes according to their budgets. More
++ * precisely, BFQ schedules queues associated to processes. Thanks to the
++ * accurate policy of B-WF2Q+, BFQ can afford to assign high budgets to
++ * I/O-bound processes issuing sequential requests (to boost the
++ * throughput), and yet guarantee a low latency to interactive and soft
++ * real-time applications.
++ *
++ * BFQ is described in [1], where also a reference to the initial, more
++ * theoretical paper on BFQ can be found. The interested reader can find
++ * in the latter paper full details on the main algorithm, as well as
++ * formulas of the guarantees and formal proofs of all the properties.
++ * With respect to the version of BFQ presented in these papers, this
++ * implementation adds a few more heuristics, such as the one that
++ * guarantees a low latency to soft real-time applications, and a
++ * hierarchical extension based on H-WF2Q+.
++ *
++ * B-WF2Q+ is based on WF2Q+, that is described in [2], together with
++ * H-WF2Q+, while the augmented tree used to implement B-WF2Q+ with O(log N)
++ * complexity derives from the one introduced with EEVDF in [3].
++ *
++ * [1] P. Valente and M. Andreolini, ``Improving Application Responsiveness
++ *     with the BFQ Disk I/O Scheduler'',
++ *     Proceedings of the 5th Annual International Systems and Storage
++ *     Conference (SYSTOR '12), June 2012.
++ *
++ * http://algogroup.unimo.it/people/paolo/disk_sched/bf1-v1-suite-results.pdf
++ *
++ * [2] Jon C.R. Bennett and H. Zhang, ``Hierarchical Packet Fair Queueing
++ *     Algorithms,'' IEEE/ACM Transactions on Networking, 5(5):675-689,
++ *     Oct 1997.
++ *
++ * http://www.cs.cmu.edu/~hzhang/papers/TON-97-Oct.ps.gz
++ *
++ * [3] I. Stoica and H. Abdel-Wahab, ``Earliest Eligible Virtual Deadline
++ *     First: A Flexible and Accurate Mechanism for Proportional Share
++ *     Resource Allocation,'' technical report.
++ *
++ * http://www.cs.berkeley.edu/~istoica/papers/eevdf-tr-95.pdf
++ */
++#include <linux/module.h>
++#include <linux/slab.h>
++#include <linux/blkdev.h>
++#include <linux/cgroup.h>
++#include <linux/elevator.h>
++#include <linux/jiffies.h>
++#include <linux/rbtree.h>
++#include <linux/ioprio.h>
++#include "bfq.h"
++#include "blk.h"
++
++/* Max number of dispatches in one round of service. */
++static const int bfq_quantum = 4;
++
++/* Expiration time of sync (0) and async (1) requests, in jiffies. */
++static const int bfq_fifo_expire[2] = { HZ / 4, HZ / 8 };
++
++/* Maximum backwards seek, in KiB. */
++static const int bfq_back_max = 16 * 1024;
++
++/* Penalty of a backwards seek, in number of sectors. */
++static const int bfq_back_penalty = 2;
++
++/* Idling period duration, in jiffies. */
++static int bfq_slice_idle = HZ / 125;
++
++/* Default maximum budget values, in sectors and number of requests. */
++static const int bfq_default_max_budget = 16 * 1024;
++static const int bfq_max_budget_async_rq = 4;
++
++/*
++ * Async to sync throughput distribution is controlled as follows:
++ * when an async request is served, the entity is charged the number
++ * of sectors of the request, multiplied by the factor below
++ */
++static const int bfq_async_charge_factor = 10;
++
++/* Default timeout values, in jiffies, approximating CFQ defaults. */
++static const int bfq_timeout_sync = HZ / 8;
++static int bfq_timeout_async = HZ / 25;
++
++struct kmem_cache *bfq_pool;
++
++/* Below this threshold (in ms), we consider thinktime immediate. */
++#define BFQ_MIN_TT		2
++
++/* hw_tag detection: parallel requests threshold and min samples needed. */
++#define BFQ_HW_QUEUE_THRESHOLD	4
++#define BFQ_HW_QUEUE_SAMPLES	32
++
++#define BFQQ_SEEK_THR	 (sector_t)(8 * 1024)
++#define BFQQ_SEEKY(bfqq) ((bfqq)->seek_mean > BFQQ_SEEK_THR)
++
++/* Min samples used for peak rate estimation (for autotuning). */
++#define BFQ_PEAK_RATE_SAMPLES	32
++
++/* Shift used for peak rate fixed precision calculations. */
++#define BFQ_RATE_SHIFT		16
++
++/*
++ * By default, BFQ computes the duration of the weight raising for
++ * interactive applications automatically, using the following formula:
++ * duration = (R / r) * T, where r is the peak rate of the device, and
++ * R and T are two reference parameters.
++ * In particular, R is the peak rate of the reference device (see below),
++ * and T is a reference time: given the systems that are likely to be
++ * installed on the reference device according to its speed class, T is
++ * about the maximum time needed, under BFQ and while reading two files in
++ * parallel, to load typical large applications on these systems.
++ * In practice, the slower/faster the device at hand is, the more/less it
++ * takes to load applications with respect to the reference device.
++ * Accordingly, the longer/shorter BFQ grants weight raising to interactive
++ * applications.
++ *
++ * BFQ uses four different reference pairs (R, T), depending on:
++ * . whether the device is rotational or non-rotational;
++ * . whether the device is slow, such as old or portable HDDs, as well as
++ *   SD cards, or fast, such as newer HDDs and SSDs.
++ *
++ * The device's speed class is dynamically (re)detected in
++ * bfq_update_peak_rate() every time the estimated peak rate is updated.
++ *
++ * In the following definitions, R_slow[0]/R_fast[0] and T_slow[0]/T_fast[0]
++ * are the reference values for a slow/fast rotational device, whereas
++ * R_slow[1]/R_fast[1] and T_slow[1]/T_fast[1] are the reference values for
++ * a slow/fast non-rotational device. Finally, device_speed_thresh are the
++ * thresholds used to switch between speed classes.
++ * Both the reference peak rates and the thresholds are measured in
++ * sectors/usec, left-shifted by BFQ_RATE_SHIFT.
++ */
++static int R_slow[2] = {1536, 10752};
++static int R_fast[2] = {17415, 34791};
++/*
++ * To improve readability, a conversion function is used to initialize the
++ * following arrays, which entails that they can be initialized only in a
++ * function.
++ */
++static int T_slow[2];
++static int T_fast[2];
++static int device_speed_thresh[2];
++
++#define BFQ_SERVICE_TREE_INIT	((struct bfq_service_tree)		\
++				{ RB_ROOT, RB_ROOT, NULL, NULL, 0, 0 })
++
++#define RQ_BIC(rq)		((struct bfq_io_cq *) (rq)->elv.priv[0])
++#define RQ_BFQQ(rq)		((rq)->elv.priv[1])
++
++static inline void bfq_schedule_dispatch(struct bfq_data *bfqd);
++
++#include "bfq-ioc.c"
++#include "bfq-sched.c"
++#include "bfq-cgroup.c"
++
++#define bfq_class_idle(bfqq)	((bfqq)->entity.ioprio_class ==\
++				 IOPRIO_CLASS_IDLE)
++#define bfq_class_rt(bfqq)	((bfqq)->entity.ioprio_class ==\
++				 IOPRIO_CLASS_RT)
++
++#define bfq_sample_valid(samples)	((samples) > 80)
++
++/*
++ * We regard a request as SYNC, if either it's a read or has the SYNC bit
++ * set (in which case it could also be a direct WRITE).
++ */
++static inline int bfq_bio_sync(struct bio *bio)
++{
++	if (bio_data_dir(bio) == READ || (bio->bi_rw & REQ_SYNC))
++		return 1;
++
++	return 0;
++}
++
++/*
++ * Scheduler run of queue, if there are requests pending and no one in the
++ * driver that will restart queueing.
++ */
++static inline void bfq_schedule_dispatch(struct bfq_data *bfqd)
++{
++	if (bfqd->queued != 0) {
++		bfq_log(bfqd, "schedule dispatch");
++		kblockd_schedule_work(&bfqd->unplug_work);
++	}
++}
++
++/*
++ * Lifted from AS - choose which of rq1 and rq2 that is best served now.
++ * We choose the request that is closesr to the head right now.  Distance
++ * behind the head is penalized and only allowed to a certain extent.
++ */
++static struct request *bfq_choose_req(struct bfq_data *bfqd,
++				      struct request *rq1,
++				      struct request *rq2,
++				      sector_t last)
++{
++	sector_t s1, s2, d1 = 0, d2 = 0;
++	unsigned long back_max;
++#define BFQ_RQ1_WRAP	0x01 /* request 1 wraps */
++#define BFQ_RQ2_WRAP	0x02 /* request 2 wraps */
++	unsigned wrap = 0; /* bit mask: requests behind the disk head? */
++
++	if (rq1 == NULL || rq1 == rq2)
++		return rq2;
++	if (rq2 == NULL)
++		return rq1;
++
++	if (rq_is_sync(rq1) && !rq_is_sync(rq2))
++		return rq1;
++	else if (rq_is_sync(rq2) && !rq_is_sync(rq1))
++		return rq2;
++	if ((rq1->cmd_flags & REQ_META) && !(rq2->cmd_flags & REQ_META))
++		return rq1;
++	else if ((rq2->cmd_flags & REQ_META) && !(rq1->cmd_flags & REQ_META))
++		return rq2;
++
++	s1 = blk_rq_pos(rq1);
++	s2 = blk_rq_pos(rq2);
++
++	/*
++	 * By definition, 1KiB is 2 sectors.
++	 */
++	back_max = bfqd->bfq_back_max * 2;
++
++	/*
++	 * Strict one way elevator _except_ in the case where we allow
++	 * short backward seeks which are biased as twice the cost of a
++	 * similar forward seek.
++	 */
++	if (s1 >= last)
++		d1 = s1 - last;
++	else if (s1 + back_max >= last)
++		d1 = (last - s1) * bfqd->bfq_back_penalty;
++	else
++		wrap |= BFQ_RQ1_WRAP;
++
++	if (s2 >= last)
++		d2 = s2 - last;
++	else if (s2 + back_max >= last)
++		d2 = (last - s2) * bfqd->bfq_back_penalty;
++	else
++		wrap |= BFQ_RQ2_WRAP;
++
++	/* Found required data */
++
++	/*
++	 * By doing switch() on the bit mask "wrap" we avoid having to
++	 * check two variables for all permutations: --> faster!
++	 */
++	switch (wrap) {
++	case 0: /* common case for CFQ: rq1 and rq2 not wrapped */
++		if (d1 < d2)
++			return rq1;
++		else if (d2 < d1)
++			return rq2;
++		else {
++			if (s1 >= s2)
++				return rq1;
++			else
++				return rq2;
++		}
++
++	case BFQ_RQ2_WRAP:
++		return rq1;
++	case BFQ_RQ1_WRAP:
++		return rq2;
++	case (BFQ_RQ1_WRAP|BFQ_RQ2_WRAP): /* both rqs wrapped */
++	default:
++		/*
++		 * Since both rqs are wrapped,
++		 * start with the one that's further behind head
++		 * (--> only *one* back seek required),
++		 * since back seek takes more time than forward.
++		 */
++		if (s1 <= s2)
++			return rq1;
++		else
++			return rq2;
++	}
++}
++
++static struct bfq_queue *
++bfq_rq_pos_tree_lookup(struct bfq_data *bfqd, struct rb_root *root,
++		     sector_t sector, struct rb_node **ret_parent,
++		     struct rb_node ***rb_link)
++{
++	struct rb_node **p, *parent;
++	struct bfq_queue *bfqq = NULL;
++
++	parent = NULL;
++	p = &root->rb_node;
++	while (*p) {
++		struct rb_node **n;
++
++		parent = *p;
++		bfqq = rb_entry(parent, struct bfq_queue, pos_node);
++
++		/*
++		 * Sort strictly based on sector. Smallest to the left,
++		 * largest to the right.
++		 */
++		if (sector > blk_rq_pos(bfqq->next_rq))
++			n = &(*p)->rb_right;
++		else if (sector < blk_rq_pos(bfqq->next_rq))
++			n = &(*p)->rb_left;
++		else
++			break;
++		p = n;
++		bfqq = NULL;
++	}
++
++	*ret_parent = parent;
++	if (rb_link)
++		*rb_link = p;
++
++	bfq_log(bfqd, "rq_pos_tree_lookup %llu: returning %d",
++		(long long unsigned)sector,
++		bfqq != NULL ? bfqq->pid : 0);
++
++	return bfqq;
++}
++
++static void bfq_rq_pos_tree_add(struct bfq_data *bfqd, struct bfq_queue *bfqq)
++{
++	struct rb_node **p, *parent;
++	struct bfq_queue *__bfqq;
++
++	if (bfqq->pos_root != NULL) {
++		rb_erase(&bfqq->pos_node, bfqq->pos_root);
++		bfqq->pos_root = NULL;
++	}
++
++	if (bfq_class_idle(bfqq))
++		return;
++	if (!bfqq->next_rq)
++		return;
++
++	bfqq->pos_root = &bfqd->rq_pos_tree;
++	__bfqq = bfq_rq_pos_tree_lookup(bfqd, bfqq->pos_root,
++			blk_rq_pos(bfqq->next_rq), &parent, &p);
++	if (__bfqq == NULL) {
++		rb_link_node(&bfqq->pos_node, parent, p);
++		rb_insert_color(&bfqq->pos_node, bfqq->pos_root);
++	} else
++		bfqq->pos_root = NULL;
++}
++
++/*
++ * Tell whether there are active queues or groups with differentiated weights.
++ */
++static inline bool bfq_differentiated_weights(struct bfq_data *bfqd)
++{
++	BUG_ON(!bfqd->hw_tag);
++	/*
++	 * For weights to differ, at least one of the trees must contain
++	 * at least two nodes.
++	 */
++	return (!RB_EMPTY_ROOT(&bfqd->queue_weights_tree) &&
++		(bfqd->queue_weights_tree.rb_node->rb_left ||
++		 bfqd->queue_weights_tree.rb_node->rb_right)
++#ifdef CONFIG_CGROUP_BFQIO
++	       ) ||
++	       (!RB_EMPTY_ROOT(&bfqd->group_weights_tree) &&
++		(bfqd->group_weights_tree.rb_node->rb_left ||
++		 bfqd->group_weights_tree.rb_node->rb_right)
++#endif
++	       );
++}
++
++/*
++ * If the weight-counter tree passed as input contains no counter for
++ * the weight of the input entity, then add that counter; otherwise just
++ * increment the existing counter.
++ *
++ * Note that weight-counter trees contain few nodes in mostly symmetric
++ * scenarios. For example, if all queues have the same weight, then the
++ * weight-counter tree for the queues may contain at most one node.
++ * This holds even if low_latency is on, because weight-raised queues
++ * are not inserted in the tree.
++ * In most scenarios, the rate at which nodes are created/destroyed
++ * should be low too.
++ */
++static void bfq_weights_tree_add(struct bfq_data *bfqd,
++				 struct bfq_entity *entity,
++				 struct rb_root *root)
++{
++	struct rb_node **new = &(root->rb_node), *parent = NULL;
++
++	/*
++	 * Do not insert if:
++	 * - the device does not support queueing;
++	 * - the entity is already associated with a counter, which happens if:
++	 *   1) the entity is associated with a queue, 2) a request arrival
++	 *   has caused the queue to become both non-weight-raised, and hence
++	 *   change its weight, and backlogged; in this respect, each
++	 *   of the two events causes an invocation of this function,
++	 *   3) this is the invocation of this function caused by the second
++	 *   event. This second invocation is actually useless, and we handle
++	 *   this fact by exiting immediately. More efficient or clearer
++	 *   solutions might possibly be adopted.
++	 */
++	if (!bfqd->hw_tag || entity->weight_counter)
++		return;
++
++	while (*new) {
++		struct bfq_weight_counter *__counter = container_of(*new,
++						struct bfq_weight_counter,
++						weights_node);
++		parent = *new;
++
++		if (entity->weight == __counter->weight) {
++			entity->weight_counter = __counter;
++			goto inc_counter;
++		}
++		if (entity->weight < __counter->weight)
++			new = &((*new)->rb_left);
++		else
++			new = &((*new)->rb_right);
++	}
++
++	entity->weight_counter = kzalloc(sizeof(struct bfq_weight_counter),
++					 GFP_ATOMIC);
++	entity->weight_counter->weight = entity->weight;
++	rb_link_node(&entity->weight_counter->weights_node, parent, new);
++	rb_insert_color(&entity->weight_counter->weights_node, root);
++
++inc_counter:
++	entity->weight_counter->num_active++;
++}
++
++/*
++ * Decrement the weight counter associated with the entity, and, if the
++ * counter reaches 0, remove the counter from the tree.
++ * See the comments to the function bfq_weights_tree_add() for considerations
++ * about overhead.
++ */
++static void bfq_weights_tree_remove(struct bfq_data *bfqd,
++				    struct bfq_entity *entity,
++				    struct rb_root *root)
++{
++	/*
++	 * Check whether the entity is actually associated with a counter.
++	 * In fact, the device may not be considered NCQ-capable for a while,
++	 * which implies that no insertion in the weight trees is performed,
++	 * after which the device may start to be deemed NCQ-capable, and hence
++	 * this function may start to be invoked. This may cause the function
++	 * to be invoked for entities that are not associated with any counter.
++	 */
++	if (!entity->weight_counter)
++		return;
++
++	BUG_ON(RB_EMPTY_ROOT(root));
++	BUG_ON(entity->weight_counter->weight != entity->weight);
++
++	BUG_ON(!entity->weight_counter->num_active);
++	entity->weight_counter->num_active--;
++	if (entity->weight_counter->num_active > 0)
++		goto reset_entity_pointer;
++
++	rb_erase(&entity->weight_counter->weights_node, root);
++	kfree(entity->weight_counter);
++
++reset_entity_pointer:
++	entity->weight_counter = NULL;
++}
++
++static struct request *bfq_find_next_rq(struct bfq_data *bfqd,
++					struct bfq_queue *bfqq,
++					struct request *last)
++{
++	struct rb_node *rbnext = rb_next(&last->rb_node);
++	struct rb_node *rbprev = rb_prev(&last->rb_node);
++	struct request *next = NULL, *prev = NULL;
++
++	BUG_ON(RB_EMPTY_NODE(&last->rb_node));
++
++	if (rbprev != NULL)
++		prev = rb_entry_rq(rbprev);
++
++	if (rbnext != NULL)
++		next = rb_entry_rq(rbnext);
++	else {
++		rbnext = rb_first(&bfqq->sort_list);
++		if (rbnext && rbnext != &last->rb_node)
++			next = rb_entry_rq(rbnext);
++	}
++
++	return bfq_choose_req(bfqd, next, prev, blk_rq_pos(last));
++}
++
++/* see the definition of bfq_async_charge_factor for details */
++static inline unsigned long bfq_serv_to_charge(struct request *rq,
++					       struct bfq_queue *bfqq)
++{
++	return blk_rq_sectors(rq) *
++		(1 + ((!bfq_bfqq_sync(bfqq)) * (bfqq->wr_coeff == 1) *
++		bfq_async_charge_factor));
++}
++
++/**
++ * bfq_updated_next_req - update the queue after a new next_rq selection.
++ * @bfqd: the device data the queue belongs to.
++ * @bfqq: the queue to update.
++ *
++ * If the first request of a queue changes we make sure that the queue
++ * has enough budget to serve at least its first request (if the
++ * request has grown).  We do this because if the queue has not enough
++ * budget for its first request, it has to go through two dispatch
++ * rounds to actually get it dispatched.
++ */
++static void bfq_updated_next_req(struct bfq_data *bfqd,
++				 struct bfq_queue *bfqq)
++{
++	struct bfq_entity *entity = &bfqq->entity;
++	struct bfq_service_tree *st = bfq_entity_service_tree(entity);
++	struct request *next_rq = bfqq->next_rq;
++	unsigned long new_budget;
++
++	if (next_rq == NULL)
++		return;
++
++	if (bfqq == bfqd->in_service_queue)
++		/*
++		 * In order not to break guarantees, budgets cannot be
++		 * changed after an entity has been selected.
++		 */
++		return;
++
++	BUG_ON(entity->tree != &st->active);
++	BUG_ON(entity == entity->sched_data->in_service_entity);
++
++	new_budget = max_t(unsigned long, bfqq->max_budget,
++			   bfq_serv_to_charge(next_rq, bfqq));
++	if (entity->budget != new_budget) {
++		entity->budget = new_budget;
++		bfq_log_bfqq(bfqd, bfqq, "updated next rq: new budget %lu",
++					 new_budget);
++		bfq_activate_bfqq(bfqd, bfqq);
++	}
++}
++
++static inline unsigned int bfq_wr_duration(struct bfq_data *bfqd)
++{
++	u64 dur;
++
++	if (bfqd->bfq_wr_max_time > 0)
++		return bfqd->bfq_wr_max_time;
++
++	dur = bfqd->RT_prod;
++	do_div(dur, bfqd->peak_rate);
++
++	return dur;
++}
++
++/* Empty burst list and add just bfqq (see comments to bfq_handle_burst) */
++static inline void bfq_reset_burst_list(struct bfq_data *bfqd,
++					struct bfq_queue *bfqq)
++{
++	struct bfq_queue *item;
++	struct hlist_node *n;
++
++	hlist_for_each_entry_safe(item, n, &bfqd->burst_list, burst_list_node)
++		hlist_del_init(&item->burst_list_node);
++	hlist_add_head(&bfqq->burst_list_node, &bfqd->burst_list);
++	bfqd->burst_size = 1;
++}
++
++/* Add bfqq to the list of queues in current burst (see bfq_handle_burst) */
++static void bfq_add_to_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq)
++{
++	/* Increment burst size to take into account also bfqq */
++	bfqd->burst_size++;
++
++	if (bfqd->burst_size == bfqd->bfq_large_burst_thresh) {
++		struct bfq_queue *pos, *bfqq_item;
++		struct hlist_node *n;
++
++		/*
++		 * Enough queues have been activated shortly after each
++		 * other to consider this burst as large.
++		 */
++		bfqd->large_burst = true;
++
++		/*
++		 * We can now mark all queues in the burst list as
++		 * belonging to a large burst.
++		 */
++		hlist_for_each_entry(bfqq_item, &bfqd->burst_list,
++				     burst_list_node)
++		        bfq_mark_bfqq_in_large_burst(bfqq_item);
++		bfq_mark_bfqq_in_large_burst(bfqq);
++
++		/*
++		 * From now on, and until the current burst finishes, any
++		 * new queue being activated shortly after the last queue
++		 * was inserted in the burst can be immediately marked as
++		 * belonging to a large burst. So the burst list is not
++		 * needed any more. Remove it.
++		 */
++		hlist_for_each_entry_safe(pos, n, &bfqd->burst_list,
++					  burst_list_node)
++			hlist_del_init(&pos->burst_list_node);
++	} else /* burst not yet large: add bfqq to the burst list */
++		hlist_add_head(&bfqq->burst_list_node, &bfqd->burst_list);
++}
++
++/*
++ * If many queues happen to become active shortly after each other, then,
++ * to help the processes associated to these queues get their job done as
++ * soon as possible, it is usually better to not grant either weight-raising
++ * or device idling to these queues. In this comment we describe, firstly,
++ * the reasons why this fact holds, and, secondly, the next function, which
++ * implements the main steps needed to properly mark these queues so that
++ * they can then be treated in a different way.
++ *
++ * As for the terminology, we say that a queue becomes active, i.e.,
++ * switches from idle to backlogged, either when it is created (as a
++ * consequence of the arrival of an I/O request), or, if already existing,
++ * when a new request for the queue arrives while the queue is idle.
++ * Bursts of activations, i.e., activations of different queues occurring
++ * shortly after each other, are typically caused by services or applications
++ * that spawn or reactivate many parallel threads/processes. Examples are
++ * systemd during boot or git grep.
++ *
++ * These services or applications benefit mostly from a high throughput:
++ * the quicker the requests of the activated queues are cumulatively served,
++ * the sooner the target job of these queues gets completed. As a consequence,
++ * weight-raising any of these queues, which also implies idling the device
++ * for it, is almost always counterproductive: in most cases it just lowers
++ * throughput.
++ *
++ * On the other hand, a burst of activations may be also caused by the start
++ * of an application that does not consist in a lot of parallel I/O-bound
++ * threads. In fact, with a complex application, the burst may be just a
++ * consequence of the fact that several processes need to be executed to
++ * start-up the application. To start an application as quickly as possible,
++ * the best thing to do is to privilege the I/O related to the application
++ * with respect to all other I/O. Therefore, the best strategy to start as
++ * quickly as possible an application that causes a burst of activations is
++ * to weight-raise all the queues activated during the burst. This is the
++ * exact opposite of the best strategy for the other type of bursts.
++ *
++ * In the end, to take the best action for each of the two cases, the two
++ * types of bursts need to be distinguished. Fortunately, this seems
++ * relatively easy to do, by looking at the sizes of the bursts. In
++ * particular, we found a threshold such that bursts with a larger size
++ * than that threshold are apparently caused only by services or commands
++ * such as systemd or git grep. For brevity, hereafter we call just 'large'
++ * these bursts. BFQ *does not* weight-raise queues whose activations occur
++ * in a large burst. In addition, for each of these queues BFQ performs or
++ * does not perform idling depending on which choice boosts the throughput
++ * most. The exact choice depends on the device and request pattern at
++ * hand.
++ *
++ * Turning back to the next function, it implements all the steps needed
++ * to detect the occurrence of a large burst and to properly mark all the
++ * queues belonging to it (so that they can then be treated in a different
++ * way). This goal is achieved by maintaining a special "burst list" that
++ * holds, temporarily, the queues that belong to the burst in progress. The
++ * list is then used to mark these queues as belonging to a large burst if
++ * the burst does become large. The main steps are the following.
++ *
++ * . when the very first queue is activated, the queue is inserted into the
++ *   list (as it could be the first queue in a possible burst)
++ *
++ * . if the current burst has not yet become large, and a queue Q that does
++ *   not yet belong to the burst is activated shortly after the last time
++ *   at which a new queue entered the burst list, then the function appends
++ *   Q to the burst list
++ *
++ * . if, as a consequence of the previous step, the burst size reaches
++ *   the large-burst threshold, then
++ *
++ *     . all the queues in the burst list are marked as belonging to a
++ *       large burst
++ *
++ *     . the burst list is deleted; in fact, the burst list already served
++ *       its purpose (keeping temporarily track of the queues in a burst,
++ *       so as to be able to mark them as belonging to a large burst in the
++ *       previous sub-step), and now is not needed any more
++ *
++ *     . the device enters a large-burst mode
++ *
++ * . if a queue Q that does not belong to the burst is activated while
++ *   the device is in large-burst mode and shortly after the last time
++ *   at which a queue either entered the burst list or was marked as
++ *   belonging to the current large burst, then Q is immediately marked
++ *   as belonging to a large burst.
++ *
++ * . if a queue Q that does not belong to the burst is activated a while
++ *   later, i.e., not shortly after, than the last time at which a queue
++ *   either entered the burst list or was marked as belonging to the
++ *   current large burst, then the current burst is deemed as finished and:
++ *
++ *        . the large-burst mode is reset if set
++ *
++ *        . the burst list is emptied
++ *
++ *        . Q is inserted in the burst list, as Q may be the first queue
++ *          in a possible new burst (then the burst list contains just Q
++ *          after this step).
++ */
++static void bfq_handle_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq,
++			     bool idle_for_long_time)
++{
++	/*
++	 * If bfqq happened to be activated in a burst, but has been idle
++	 * for at least as long as an interactive queue, then we assume
++	 * that, in the overall I/O initiated in the burst, the I/O
++	 * associated to bfqq is finished. So bfqq does not need to be
++	 * treated as a queue belonging to a burst anymore. Accordingly,
++	 * we reset bfqq's in_large_burst flag if set, and remove bfqq
++	 * from the burst list if it's there. We do not decrement instead
++	 * burst_size, because the fact that bfqq does not need to belong
++	 * to the burst list any more does not invalidate the fact that
++	 * bfqq may have been activated during the current burst.
++	 */
++	if (idle_for_long_time) {
++		hlist_del_init(&bfqq->burst_list_node);
++		bfq_clear_bfqq_in_large_burst(bfqq);
++	}
++
++	/*
++	 * If bfqq is already in the burst list or is part of a large
++	 * burst, then there is nothing else to do.
++	 */
++	if (!hlist_unhashed(&bfqq->burst_list_node) ||
++	    bfq_bfqq_in_large_burst(bfqq))
++		return;
++
++	/*
++	 * If bfqq's activation happens late enough, then the current
++	 * burst is finished, and related data structures must be reset.
++	 *
++	 * In this respect, consider the special case where bfqq is the very
++	 * first queue being activated. In this case, last_ins_in_burst is
++	 * not yet significant when we get here. But it is easy to verify
++	 * that, whether or not the following condition is true, bfqq will
++	 * end up being inserted into the burst list. In particular the
++	 * list will happen to contain only bfqq. And this is exactly what
++	 * has to happen, as bfqq may be the first queue in a possible
++	 * burst.
++	 */
++	if (time_is_before_jiffies(bfqd->last_ins_in_burst +
++	    bfqd->bfq_burst_interval)) {
++		bfqd->large_burst = false;
++		bfq_reset_burst_list(bfqd, bfqq);
++		return;
++	}
++
++	/*
++	 * If we get here, then bfqq is being activated shortly after the
++	 * last queue. So, if the current burst is also large, we can mark
++	 * bfqq as belonging to this large burst immediately.
++	 */
++	if (bfqd->large_burst) {
++		bfq_mark_bfqq_in_large_burst(bfqq);
++		return;
++	}
++
++	/*
++	 * If we get here, then a large-burst state has not yet been
++	 * reached, but bfqq is being activated shortly after the last
++	 * queue. Then we add bfqq to the burst.
++	 */
++	bfq_add_to_burst(bfqd, bfqq);
++}
++
++static void bfq_add_request(struct request *rq)
++{
++	struct bfq_queue *bfqq = RQ_BFQQ(rq);
++	struct bfq_entity *entity = &bfqq->entity;
++	struct bfq_data *bfqd = bfqq->bfqd;
++	struct request *next_rq, *prev;
++	unsigned long old_wr_coeff = bfqq->wr_coeff;
++	bool interactive = false;
++
++	bfq_log_bfqq(bfqd, bfqq, "add_request %d", rq_is_sync(rq));
++	bfqq->queued[rq_is_sync(rq)]++;
++	bfqd->queued++;
++
++	elv_rb_add(&bfqq->sort_list, rq);
++
++	/*
++	 * Check if this request is a better next-serve candidate.
++	 */
++	prev = bfqq->next_rq;
++	next_rq = bfq_choose_req(bfqd, bfqq->next_rq, rq, bfqd->last_position);
++	BUG_ON(next_rq == NULL);
++	bfqq->next_rq = next_rq;
++
++	/*
++	 * Adjust priority tree position, if next_rq changes.
++	 */
++	if (prev != bfqq->next_rq)
++		bfq_rq_pos_tree_add(bfqd, bfqq);
++
++	if (!bfq_bfqq_busy(bfqq)) {
++		bool soft_rt,
++		     idle_for_long_time = time_is_before_jiffies(
++						bfqq->budget_timeout +
++						bfqd->bfq_wr_min_idle_time);
++
++		if (bfq_bfqq_sync(bfqq)) {
++			bool already_in_burst =
++			   !hlist_unhashed(&bfqq->burst_list_node) ||
++			   bfq_bfqq_in_large_burst(bfqq);
++			bfq_handle_burst(bfqd, bfqq, idle_for_long_time);
++			/*
++			 * If bfqq was not already in the current burst,
++			 * then, at this point, bfqq either has been
++			 * added to the current burst or has caused the
++			 * current burst to terminate. In particular, in
++			 * the second case, bfqq has become the first
++			 * queue in a possible new burst.
++			 * In both cases last_ins_in_burst needs to be
++			 * moved forward.
++			 */
++			if (!already_in_burst)
++				bfqd->last_ins_in_burst = jiffies;
++		}
++
++		soft_rt = bfqd->bfq_wr_max_softrt_rate > 0 &&
++			!bfq_bfqq_in_large_burst(bfqq) &&
++			time_is_before_jiffies(bfqq->soft_rt_next_start);
++		interactive = !bfq_bfqq_in_large_burst(bfqq) &&
++			      idle_for_long_time;
++		entity->budget = max_t(unsigned long, bfqq->max_budget,
++				       bfq_serv_to_charge(next_rq, bfqq));
++
++		if (!bfq_bfqq_IO_bound(bfqq)) {
++			if (time_before(jiffies,
++					RQ_BIC(rq)->ttime.last_end_request +
++					bfqd->bfq_slice_idle)) {
++				bfqq->requests_within_timer++;
++				if (bfqq->requests_within_timer >=
++				    bfqd->bfq_requests_within_timer)
++					bfq_mark_bfqq_IO_bound(bfqq);
++			} else
++				bfqq->requests_within_timer = 0;
++		}
++
++		if (!bfqd->low_latency)
++			goto add_bfqq_busy;
++
++		/*
++		 * If the queue is not being boosted and has been idle
++		 * for enough time, start a weight-raising period
++		 */
++		if (old_wr_coeff == 1 && (interactive || soft_rt)) {
++			bfqq->wr_coeff = bfqd->bfq_wr_coeff;
++			if (interactive)
++				bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
++			else
++				bfqq->wr_cur_max_time =
++					bfqd->bfq_wr_rt_max_time;
++			bfq_log_bfqq(bfqd, bfqq,
++				     "wrais starting at %lu, rais_max_time %u",
++				     jiffies,
++				     jiffies_to_msecs(bfqq->wr_cur_max_time));
++		} else if (old_wr_coeff > 1) {
++			if (interactive)
++				bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
++			else if (bfq_bfqq_in_large_burst(bfqq) ||
++				 (bfqq->wr_cur_max_time ==
++				  bfqd->bfq_wr_rt_max_time &&
++				  !soft_rt)) {
++				bfqq->wr_coeff = 1;
++				bfq_log_bfqq(bfqd, bfqq,
++					"wrais ending at %lu, rais_max_time %u",
++					jiffies,
++					jiffies_to_msecs(bfqq->
++						wr_cur_max_time));
++			} else if (time_before(
++					bfqq->last_wr_start_finish +
++					bfqq->wr_cur_max_time,
++					jiffies +
++					bfqd->bfq_wr_rt_max_time) &&
++				   soft_rt) {
++				/*
++				 *
++				 * The remaining weight-raising time is lower
++				 * than bfqd->bfq_wr_rt_max_time, which
++				 * means that the application is enjoying
++				 * weight raising either because deemed soft-
++				 * rt in the near past, or because deemed
++				 * interactive a long ago. In both cases,
++				 * resetting now the current remaining weight-
++				 * raising time for the application to the
++				 * weight-raising duration for soft rt
++				 * applications would not cause any latency
++				 * increase for the application (as the new
++				 * duration would be higher than the remaining
++				 * time).
++				 *
++				 * In addition, the application is now meeting
++				 * the requirements for being deemed soft rt.
++				 * In the end we can correctly and safely
++				 * (re)charge the weight-raising duration for
++				 * the application with the weight-raising
++				 * duration for soft rt applications.
++				 *
++				 * In particular, doing this recharge now, i.e.,
++				 * before the weight-raising period for the
++				 * application finishes, reduces the probability
++				 * of the following negative scenario:
++				 * 1) the weight of a soft rt application is
++				 *    raised at startup (as for any newly
++				 *    created application),
++				 * 2) since the application is not interactive,
++				 *    at a certain time weight-raising is
++				 *    stopped for the application,
++				 * 3) at that time the application happens to
++				 *    still have pending requests, and hence
++				 *    is destined to not have a chance to be
++				 *    deemed soft rt before these requests are
++				 *    completed (see the comments to the
++				 *    function bfq_bfqq_softrt_next_start()
++				 *    for details on soft rt detection),
++				 * 4) these pending requests experience a high
++				 *    latency because the application is not
++				 *    weight-raised while they are pending.
++				 */
++				bfqq->last_wr_start_finish = jiffies;
++				bfqq->wr_cur_max_time =
++					bfqd->bfq_wr_rt_max_time;
++			}
++		}
++		if (old_wr_coeff != bfqq->wr_coeff)
++			entity->ioprio_changed = 1;
++add_bfqq_busy:
++		bfqq->last_idle_bklogged = jiffies;
++		bfqq->service_from_backlogged = 0;
++		bfq_clear_bfqq_softrt_update(bfqq);
++		bfq_add_bfqq_busy(bfqd, bfqq);
++	} else {
++		if (bfqd->low_latency && old_wr_coeff == 1 && !rq_is_sync(rq) &&
++		    time_is_before_jiffies(
++				bfqq->last_wr_start_finish +
++				bfqd->bfq_wr_min_inter_arr_async)) {
++			bfqq->wr_coeff = bfqd->bfq_wr_coeff;
++			bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
++
++			bfqd->wr_busy_queues++;
++			entity->ioprio_changed = 1;
++			bfq_log_bfqq(bfqd, bfqq,
++			    "non-idle wrais starting at %lu, rais_max_time %u",
++			    jiffies,
++			    jiffies_to_msecs(bfqq->wr_cur_max_time));
++		}
++		if (prev != bfqq->next_rq)
++			bfq_updated_next_req(bfqd, bfqq);
++	}
++
++	if (bfqd->low_latency &&
++		(old_wr_coeff == 1 || bfqq->wr_coeff == 1 || interactive))
++		bfqq->last_wr_start_finish = jiffies;
++}
++
++static struct request *bfq_find_rq_fmerge(struct bfq_data *bfqd,
++					  struct bio *bio)
++{
++	struct task_struct *tsk = current;
++	struct bfq_io_cq *bic;
++	struct bfq_queue *bfqq;
++
++	bic = bfq_bic_lookup(bfqd, tsk->io_context);
++	if (bic == NULL)
++		return NULL;
++
++	bfqq = bic_to_bfqq(bic, bfq_bio_sync(bio));
++	if (bfqq != NULL)
++		return elv_rb_find(&bfqq->sort_list, bio_end_sector(bio));
++
++	return NULL;
++}
++
++static void bfq_activate_request(struct request_queue *q, struct request *rq)
++{
++	struct bfq_data *bfqd = q->elevator->elevator_data;
++
++	bfqd->rq_in_driver++;
++	bfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
++	bfq_log(bfqd, "activate_request: new bfqd->last_position %llu",
++		(long long unsigned)bfqd->last_position);
++}
++
++static inline void bfq_deactivate_request(struct request_queue *q,
++					  struct request *rq)
++{
++	struct bfq_data *bfqd = q->elevator->elevator_data;
++
++	BUG_ON(bfqd->rq_in_driver == 0);
++	bfqd->rq_in_driver--;
++}
++
++static void bfq_remove_request(struct request *rq)
++{
++	struct bfq_queue *bfqq = RQ_BFQQ(rq);
++	struct bfq_data *bfqd = bfqq->bfqd;
++	const int sync = rq_is_sync(rq);
++
++	if (bfqq->next_rq == rq) {
++		bfqq->next_rq = bfq_find_next_rq(bfqd, bfqq, rq);
++		bfq_updated_next_req(bfqd, bfqq);
++	}
++
++	list_del_init(&rq->queuelist);
++	BUG_ON(bfqq->queued[sync] == 0);
++	bfqq->queued[sync]--;
++	bfqd->queued--;
++	elv_rb_del(&bfqq->sort_list, rq);
++
++	if (RB_EMPTY_ROOT(&bfqq->sort_list)) {
++		if (bfq_bfqq_busy(bfqq) && bfqq != bfqd->in_service_queue)
++			bfq_del_bfqq_busy(bfqd, bfqq, 1);
++		/*
++		 * Remove queue from request-position tree as it is empty.
++		 */
++		if (bfqq->pos_root != NULL) {
++			rb_erase(&bfqq->pos_node, bfqq->pos_root);
++			bfqq->pos_root = NULL;
++		}
++	}
++
++	if (rq->cmd_flags & REQ_META) {
++		BUG_ON(bfqq->meta_pending == 0);
++		bfqq->meta_pending--;
++	}
++}
++
++static int bfq_merge(struct request_queue *q, struct request **req,
++		     struct bio *bio)
++{
++	struct bfq_data *bfqd = q->elevator->elevator_data;
++	struct request *__rq;
++
++	__rq = bfq_find_rq_fmerge(bfqd, bio);
++	if (__rq != NULL && elv_rq_merge_ok(__rq, bio)) {
++		*req = __rq;
++		return ELEVATOR_FRONT_MERGE;
++	}
++
++	return ELEVATOR_NO_MERGE;
++}
++
++static void bfq_merged_request(struct request_queue *q, struct request *req,
++			       int type)
++{
++	if (type == ELEVATOR_FRONT_MERGE &&
++	    rb_prev(&req->rb_node) &&
++	    blk_rq_pos(req) <
++	    blk_rq_pos(container_of(rb_prev(&req->rb_node),
++				    struct request, rb_node))) {
++		struct bfq_queue *bfqq = RQ_BFQQ(req);
++		struct bfq_data *bfqd = bfqq->bfqd;
++		struct request *prev, *next_rq;
++
++		/* Reposition request in its sort_list */
++		elv_rb_del(&bfqq->sort_list, req);
++		elv_rb_add(&bfqq->sort_list, req);
++		/* Choose next request to be served for bfqq */
++		prev = bfqq->next_rq;
++		next_rq = bfq_choose_req(bfqd, bfqq->next_rq, req,
++					 bfqd->last_position);
++		BUG_ON(next_rq == NULL);
++		bfqq->next_rq = next_rq;
++		/*
++		 * If next_rq changes, update both the queue's budget to
++		 * fit the new request and the queue's position in its
++		 * rq_pos_tree.
++		 */
++		if (prev != bfqq->next_rq) {
++			bfq_updated_next_req(bfqd, bfqq);
++			bfq_rq_pos_tree_add(bfqd, bfqq);
++		}
++	}
++}
++
++static void bfq_merged_requests(struct request_queue *q, struct request *rq,
++				struct request *next)
++{
++	struct bfq_queue *bfqq = RQ_BFQQ(rq);
++
++	/*
++	 * Reposition in fifo if next is older than rq.
++	 */
++	if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
++	    time_before(next->fifo_time, rq->fifo_time)) {
++		list_move(&rq->queuelist, &next->queuelist);
++		rq->fifo_time = next->fifo_time;
++	}
++
++	if (bfqq->next_rq == next)
++		bfqq->next_rq = rq;
++
++	bfq_remove_request(next);
++}
++
++/* Must be called with bfqq != NULL */
++static inline void bfq_bfqq_end_wr(struct bfq_queue *bfqq)
++{
++	BUG_ON(bfqq == NULL);
++	if (bfq_bfqq_busy(bfqq))
++		bfqq->bfqd->wr_busy_queues--;
++	bfqq->wr_coeff = 1;
++	bfqq->wr_cur_max_time = 0;
++	/* Trigger a weight change on the next activation of the queue */
++	bfqq->entity.ioprio_changed = 1;
++}
++
++static void bfq_end_wr_async_queues(struct bfq_data *bfqd,
++				    struct bfq_group *bfqg)
++{
++	int i, j;
++
++	for (i = 0; i < 2; i++)
++		for (j = 0; j < IOPRIO_BE_NR; j++)
++			if (bfqg->async_bfqq[i][j] != NULL)
++				bfq_bfqq_end_wr(bfqg->async_bfqq[i][j]);
++	if (bfqg->async_idle_bfqq != NULL)
++		bfq_bfqq_end_wr(bfqg->async_idle_bfqq);
++}
++
++static void bfq_end_wr(struct bfq_data *bfqd)
++{
++	struct bfq_queue *bfqq;
++
++	spin_lock_irq(bfqd->queue->queue_lock);
++
++	list_for_each_entry(bfqq, &bfqd->active_list, bfqq_list)
++		bfq_bfqq_end_wr(bfqq);
++	list_for_each_entry(bfqq, &bfqd->idle_list, bfqq_list)
++		bfq_bfqq_end_wr(bfqq);
++	bfq_end_wr_async(bfqd);
++
++	spin_unlock_irq(bfqd->queue->queue_lock);
++}
++
++static int bfq_allow_merge(struct request_queue *q, struct request *rq,
++			   struct bio *bio)
++{
++	struct bfq_data *bfqd = q->elevator->elevator_data;
++	struct bfq_io_cq *bic;
++	struct bfq_queue *bfqq;
++
++	/*
++	 * Disallow merge of a sync bio into an async request.
++	 */
++	if (bfq_bio_sync(bio) && !rq_is_sync(rq))
++		return 0;
++
++	/*
++	 * Lookup the bfqq that this bio will be queued with. Allow
++	 * merge only if rq is queued there.
++	 * Queue lock is held here.
++	 */
++	bic = bfq_bic_lookup(bfqd, current->io_context);
++	if (bic == NULL)
++		return 0;
++
++	bfqq = bic_to_bfqq(bic, bfq_bio_sync(bio));
++	return bfqq == RQ_BFQQ(rq);
++}
++
++static void __bfq_set_in_service_queue(struct bfq_data *bfqd,
++				       struct bfq_queue *bfqq)
++{
++	if (bfqq != NULL) {
++		bfq_mark_bfqq_must_alloc(bfqq);
++		bfq_mark_bfqq_budget_new(bfqq);
++		bfq_clear_bfqq_fifo_expire(bfqq);
++
++		bfqd->budgets_assigned = (bfqd->budgets_assigned*7 + 256) / 8;
++
++		bfq_log_bfqq(bfqd, bfqq,
++			     "set_in_service_queue, cur-budget = %lu",
++			     bfqq->entity.budget);
++	}
++
++	bfqd->in_service_queue = bfqq;
++}
++
++/*
++ * Get and set a new queue for service.
++ */
++static struct bfq_queue *bfq_set_in_service_queue(struct bfq_data *bfqd,
++						  struct bfq_queue *bfqq)
++{
++	if (!bfqq)
++		bfqq = bfq_get_next_queue(bfqd);
++	else
++		bfq_get_next_queue_forced(bfqd, bfqq);
++
++	__bfq_set_in_service_queue(bfqd, bfqq);
++	return bfqq;
++}
++
++static inline sector_t bfq_dist_from_last(struct bfq_data *bfqd,
++					  struct request *rq)
++{
++	if (blk_rq_pos(rq) >= bfqd->last_position)
++		return blk_rq_pos(rq) - bfqd->last_position;
++	else
++		return bfqd->last_position - blk_rq_pos(rq);
++}
++
++/*
++ * Return true if bfqq has no request pending and rq is close enough to
++ * bfqd->last_position, or if rq is closer to bfqd->last_position than
++ * bfqq->next_rq
++ */
++static inline int bfq_rq_close(struct bfq_data *bfqd, struct request *rq)
++{
++	return bfq_dist_from_last(bfqd, rq) <= BFQQ_SEEK_THR;
++}
++
++static struct bfq_queue *bfqq_close(struct bfq_data *bfqd)
++{
++	struct rb_root *root = &bfqd->rq_pos_tree;
++	struct rb_node *parent, *node;
++	struct bfq_queue *__bfqq;
++	sector_t sector = bfqd->last_position;
++
++	if (RB_EMPTY_ROOT(root))
++		return NULL;
++
++	/*
++	 * First, if we find a request starting at the end of the last
++	 * request, choose it.
++	 */
++	__bfqq = bfq_rq_pos_tree_lookup(bfqd, root, sector, &parent, NULL);
++	if (__bfqq != NULL)
++		return __bfqq;
++
++	/*
++	 * If the exact sector wasn't found, the parent of the NULL leaf
++	 * will contain the closest sector (rq_pos_tree sorted by
++	 * next_request position).
++	 */
++	__bfqq = rb_entry(parent, struct bfq_queue, pos_node);
++	if (bfq_rq_close(bfqd, __bfqq->next_rq))
++		return __bfqq;
++
++	if (blk_rq_pos(__bfqq->next_rq) < sector)
++		node = rb_next(&__bfqq->pos_node);
++	else
++		node = rb_prev(&__bfqq->pos_node);
++	if (node == NULL)
++		return NULL;
++
++	__bfqq = rb_entry(node, struct bfq_queue, pos_node);
++	if (bfq_rq_close(bfqd, __bfqq->next_rq))
++		return __bfqq;
++
++	return NULL;
++}
++
++/*
++ * bfqd - obvious
++ * cur_bfqq - passed in so that we don't decide that the current queue
++ *            is closely cooperating with itself.
++ *
++ * We are assuming that cur_bfqq has dispatched at least one request,
++ * and that bfqd->last_position reflects a position on the disk associated
++ * with the I/O issued by cur_bfqq.
++ */
++static struct bfq_queue *bfq_close_cooperator(struct bfq_data *bfqd,
++					      struct bfq_queue *cur_bfqq)
++{
++	struct bfq_queue *bfqq;
++
++	if (bfq_class_idle(cur_bfqq))
++		return NULL;
++	if (!bfq_bfqq_sync(cur_bfqq))
++		return NULL;
++	if (BFQQ_SEEKY(cur_bfqq))
++		return NULL;
++
++	/* If device has only one backlogged bfq_queue, don't search. */
++	if (bfqd->busy_queues == 1)
++		return NULL;
++
++	/*
++	 * We should notice if some of the queues are cooperating, e.g.
++	 * working closely on the same area of the disk. In that case,
++	 * we can group them together and don't waste time idling.
++	 */
++	bfqq = bfqq_close(bfqd);
++	if (bfqq == NULL || bfqq == cur_bfqq)
++		return NULL;
++
++	/*
++	 * Do not merge queues from different bfq_groups.
++	*/
++	if (bfqq->entity.parent != cur_bfqq->entity.parent)
++		return NULL;
++
++	/*
++	 * It only makes sense to merge sync queues.
++	 */
++	if (!bfq_bfqq_sync(bfqq))
++		return NULL;
++	if (BFQQ_SEEKY(bfqq))
++		return NULL;
++
++	/*
++	 * Do not merge queues of different priority classes.
++	 */
++	if (bfq_class_rt(bfqq) != bfq_class_rt(cur_bfqq))
++		return NULL;
++
++	return bfqq;
++}
++
++/*
++ * If enough samples have been computed, return the current max budget
++ * stored in bfqd, which is dynamically updated according to the
++ * estimated disk peak rate; otherwise return the default max budget
++ */
++static inline unsigned long bfq_max_budget(struct bfq_data *bfqd)
++{
++	if (bfqd->budgets_assigned < 194)
++		return bfq_default_max_budget;
++	else
++		return bfqd->bfq_max_budget;
++}
++
++/*
++ * Return min budget, which is a fraction of the current or default
++ * max budget (trying with 1/32)
++ */
++static inline unsigned long bfq_min_budget(struct bfq_data *bfqd)
++{
++	if (bfqd->budgets_assigned < 194)
++		return bfq_default_max_budget / 32;
++	else
++		return bfqd->bfq_max_budget / 32;
++}
++
++static void bfq_arm_slice_timer(struct bfq_data *bfqd)
++{
++	struct bfq_queue *bfqq = bfqd->in_service_queue;
++	struct bfq_io_cq *bic;
++	unsigned long sl;
++
++	BUG_ON(!RB_EMPTY_ROOT(&bfqq->sort_list));
++
++	/* Processes have exited, don't wait. */
++	bic = bfqd->in_service_bic;
++	if (bic == NULL || atomic_read(&bic->icq.ioc->active_ref) == 0)
++		return;
++
++	bfq_mark_bfqq_wait_request(bfqq);
++
++	/*
++	 * We don't want to idle for seeks, but we do want to allow
++	 * fair distribution of slice time for a process doing back-to-back
++	 * seeks. So allow a little bit of time for him to submit a new rq.
++	 *
++	 * To prevent processes with (partly) seeky workloads from
++	 * being too ill-treated, grant them a small fraction of the
++	 * assigned budget before reducing the waiting time to
++	 * BFQ_MIN_TT. This happened to help reduce latency.
++	 */
++	sl = bfqd->bfq_slice_idle;
++	/*
++	 * Unless the queue is being weight-raised, grant only minimum idle
++	 * time if the queue either has been seeky for long enough or has
++	 * already proved to be constantly seeky.
++	 */
++	if (bfq_sample_valid(bfqq->seek_samples) &&
++	    ((BFQQ_SEEKY(bfqq) && bfqq->entity.service >
++				  bfq_max_budget(bfqq->bfqd) / 8) ||
++	      bfq_bfqq_constantly_seeky(bfqq)) && bfqq->wr_coeff == 1)
++		sl = min(sl, msecs_to_jiffies(BFQ_MIN_TT));
++	else if (bfqq->wr_coeff > 1)
++		sl = sl * 3;
++	bfqd->last_idling_start = ktime_get();
++	mod_timer(&bfqd->idle_slice_timer, jiffies + sl);
++	bfq_log(bfqd, "arm idle: %u/%u ms",
++		jiffies_to_msecs(sl), jiffies_to_msecs(bfqd->bfq_slice_idle));
++}
++
++/*
++ * Set the maximum time for the in-service queue to consume its
++ * budget. This prevents seeky processes from lowering the disk
++ * throughput (always guaranteed with a time slice scheme as in CFQ).
++ */
++static void bfq_set_budget_timeout(struct bfq_data *bfqd)
++{
++	struct bfq_queue *bfqq = bfqd->in_service_queue;
++	unsigned int timeout_coeff;
++	if (bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time)
++		timeout_coeff = 1;
++	else
++		timeout_coeff = bfqq->entity.weight / bfqq->entity.orig_weight;
++
++	bfqd->last_budget_start = ktime_get();
++
++	bfq_clear_bfqq_budget_new(bfqq);
++	bfqq->budget_timeout = jiffies +
++		bfqd->bfq_timeout[bfq_bfqq_sync(bfqq)] * timeout_coeff;
++
++	bfq_log_bfqq(bfqd, bfqq, "set budget_timeout %u",
++		jiffies_to_msecs(bfqd->bfq_timeout[bfq_bfqq_sync(bfqq)] *
++		timeout_coeff));
++}
++
++/*
++ * Move request from internal lists to the request queue dispatch list.
++ */
++static void bfq_dispatch_insert(struct request_queue *q, struct request *rq)
++{
++	struct bfq_data *bfqd = q->elevator->elevator_data;
++	struct bfq_queue *bfqq = RQ_BFQQ(rq);
++
++	/*
++	 * For consistency, the next instruction should have been executed
++	 * after removing the request from the queue and dispatching it.
++	 * We execute instead this instruction before bfq_remove_request()
++	 * (and hence introduce a temporary inconsistency), for efficiency.
++	 * In fact, in a forced_dispatch, this prevents two counters related
++	 * to bfqq->dispatched to risk to be uselessly decremented if bfqq
++	 * is not in service, and then to be incremented again after
++	 * incrementing bfqq->dispatched.
++	 */
++	bfqq->dispatched++;
++	bfq_remove_request(rq);
++	elv_dispatch_sort(q, rq);
++
++	if (bfq_bfqq_sync(bfqq))
++		bfqd->sync_flight++;
++}
++
++/*
++ * Return expired entry, or NULL to just start from scratch in rbtree.
++ */
++static struct request *bfq_check_fifo(struct bfq_queue *bfqq)
++{
++	struct request *rq = NULL;
++
++	if (bfq_bfqq_fifo_expire(bfqq))
++		return NULL;
++
++	bfq_mark_bfqq_fifo_expire(bfqq);
++
++	if (list_empty(&bfqq->fifo))
++		return NULL;
++
++	rq = rq_entry_fifo(bfqq->fifo.next);
++
++	if (time_before(jiffies, rq->fifo_time))
++		return NULL;
++
++	return rq;
++}
++
++/* Must be called with the queue_lock held. */
++static int bfqq_process_refs(struct bfq_queue *bfqq)
++{
++	int process_refs, io_refs;
++
++	io_refs = bfqq->allocated[READ] + bfqq->allocated[WRITE];
++	process_refs = atomic_read(&bfqq->ref) - io_refs - bfqq->entity.on_st;
++	BUG_ON(process_refs < 0);
++	return process_refs;
++}
++
++static void bfq_setup_merge(struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
++{
++	int process_refs, new_process_refs;
++	struct bfq_queue *__bfqq;
++
++	/*
++	 * If there are no process references on the new_bfqq, then it is
++	 * unsafe to follow the ->new_bfqq chain as other bfqq's in the chain
++	 * may have dropped their last reference (not just their last process
++	 * reference).
++	 */
++	if (!bfqq_process_refs(new_bfqq))
++		return;
++
++	/* Avoid a circular list and skip interim queue merges. */
++	while ((__bfqq = new_bfqq->new_bfqq)) {
++		if (__bfqq == bfqq)
++			return;
++		new_bfqq = __bfqq;
++	}
++
++	process_refs = bfqq_process_refs(bfqq);
++	new_process_refs = bfqq_process_refs(new_bfqq);
++	/*
++	 * If the process for the bfqq has gone away, there is no
++	 * sense in merging the queues.
++	 */
++	if (process_refs == 0 || new_process_refs == 0)
++		return;
++
++	/*
++	 * Merge in the direction of the lesser amount of work.
++	 */
++	if (new_process_refs >= process_refs) {
++		bfqq->new_bfqq = new_bfqq;
++		atomic_add(process_refs, &new_bfqq->ref);
++	} else {
++		new_bfqq->new_bfqq = bfqq;
++		atomic_add(new_process_refs, &bfqq->ref);
++	}
++	bfq_log_bfqq(bfqq->bfqd, bfqq, "scheduling merge with queue %d",
++		new_bfqq->pid);
++}
++
++static inline unsigned long bfq_bfqq_budget_left(struct bfq_queue *bfqq)
++{
++	struct bfq_entity *entity = &bfqq->entity;
++	return entity->budget - entity->service;
++}
++
++static void __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq)
++{
++	BUG_ON(bfqq != bfqd->in_service_queue);
++
++	__bfq_bfqd_reset_in_service(bfqd);
++
++	/*
++	 * If this bfqq is shared between multiple processes, check
++	 * to make sure that those processes are still issuing I/Os
++	 * within the mean seek distance. If not, it may be time to
++	 * break the queues apart again.
++	 */
++	if (bfq_bfqq_coop(bfqq) && BFQQ_SEEKY(bfqq))
++		bfq_mark_bfqq_split_coop(bfqq);
++
++	if (RB_EMPTY_ROOT(&bfqq->sort_list)) {
++		/*
++		 * Overloading budget_timeout field to store the time
++		 * at which the queue remains with no backlog; used by
++		 * the weight-raising mechanism.
++		 */
++		bfqq->budget_timeout = jiffies;
++		bfq_del_bfqq_busy(bfqd, bfqq, 1);
++	} else {
++		bfq_activate_bfqq(bfqd, bfqq);
++		/*
++		 * Resort priority tree of potential close cooperators.
++		 */
++		bfq_rq_pos_tree_add(bfqd, bfqq);
++	}
++}
++
++/**
++ * __bfq_bfqq_recalc_budget - try to adapt the budget to the @bfqq behavior.
++ * @bfqd: device data.
++ * @bfqq: queue to update.
++ * @reason: reason for expiration.
++ *
++ * Handle the feedback on @bfqq budget.  See the body for detailed
++ * comments.
++ */
++static void __bfq_bfqq_recalc_budget(struct bfq_data *bfqd,
++				     struct bfq_queue *bfqq,
++				     enum bfqq_expiration reason)
++{
++	struct request *next_rq;
++	unsigned long budget, min_budget;
++
++	budget = bfqq->max_budget;
++	min_budget = bfq_min_budget(bfqd);
++
++	BUG_ON(bfqq != bfqd->in_service_queue);
++
++	bfq_log_bfqq(bfqd, bfqq, "recalc_budg: last budg %lu, budg left %lu",
++		bfqq->entity.budget, bfq_bfqq_budget_left(bfqq));
++	bfq_log_bfqq(bfqd, bfqq, "recalc_budg: last max_budg %lu, min budg %lu",
++		budget, bfq_min_budget(bfqd));
++	bfq_log_bfqq(bfqd, bfqq, "recalc_budg: sync %d, seeky %d",
++		bfq_bfqq_sync(bfqq), BFQQ_SEEKY(bfqd->in_service_queue));
++
++	if (bfq_bfqq_sync(bfqq)) {
++		switch (reason) {
++		/*
++		 * Caveat: in all the following cases we trade latency
++		 * for throughput.
++		 */
++		case BFQ_BFQQ_TOO_IDLE:
++			/*
++			 * This is the only case where we may reduce
++			 * the budget: if there is no request of the
++			 * process still waiting for completion, then
++			 * we assume (tentatively) that the timer has
++			 * expired because the batch of requests of
++			 * the process could have been served with a
++			 * smaller budget.  Hence, betting that
++			 * process will behave in the same way when it
++			 * becomes backlogged again, we reduce its
++			 * next budget.  As long as we guess right,
++			 * this budget cut reduces the latency
++			 * experienced by the process.
++			 *
++			 * However, if there are still outstanding
++			 * requests, then the process may have not yet
++			 * issued its next request just because it is
++			 * still waiting for the completion of some of
++			 * the still outstanding ones.  So in this
++			 * subcase we do not reduce its budget, on the
++			 * contrary we increase it to possibly boost
++			 * the throughput, as discussed in the
++			 * comments to the BUDGET_TIMEOUT case.
++			 */
++			if (bfqq->dispatched > 0) /* still outstanding reqs */
++				budget = min(budget * 2, bfqd->bfq_max_budget);
++			else {
++				if (budget > 5 * min_budget)
++					budget -= 4 * min_budget;
++				else
++					budget = min_budget;
++			}
++			break;
++		case BFQ_BFQQ_BUDGET_TIMEOUT:
++			/*
++			 * We double the budget here because: 1) it
++			 * gives the chance to boost the throughput if
++			 * this is not a seeky process (which may have
++			 * bumped into this timeout because of, e.g.,
++			 * ZBR), 2) together with charge_full_budget
++			 * it helps give seeky processes higher
++			 * timestamps, and hence be served less
++			 * frequently.
++			 */
++			budget = min(budget * 2, bfqd->bfq_max_budget);
++			break;
++		case BFQ_BFQQ_BUDGET_EXHAUSTED:
++			/*
++			 * The process still has backlog, and did not
++			 * let either the budget timeout or the disk
++			 * idling timeout expire. Hence it is not
++			 * seeky, has a short thinktime and may be
++			 * happy with a higher budget too. So
++			 * definitely increase the budget of this good
++			 * candidate to boost the disk throughput.
++			 */
++			budget = min(budget * 4, bfqd->bfq_max_budget);
++			break;
++		case BFQ_BFQQ_NO_MORE_REQUESTS:
++		       /*
++			* Leave the budget unchanged.
++			*/
++		default:
++			return;
++		}
++	} else /* async queue */
++	    /* async queues get always the maximum possible budget
++	     * (their ability to dispatch is limited by
++	     * @bfqd->bfq_max_budget_async_rq).
++	     */
++		budget = bfqd->bfq_max_budget;
++
++	bfqq->max_budget = budget;
++
++	if (bfqd->budgets_assigned >= 194 && bfqd->bfq_user_max_budget == 0 &&
++	    bfqq->max_budget > bfqd->bfq_max_budget)
++		bfqq->max_budget = bfqd->bfq_max_budget;
++
++	/*
++	 * Make sure that we have enough budget for the next request.
++	 * Since the finish time of the bfqq must be kept in sync with
++	 * the budget, be sure to call __bfq_bfqq_expire() after the
++	 * update.
++	 */
++	next_rq = bfqq->next_rq;
++	if (next_rq != NULL)
++		bfqq->entity.budget = max_t(unsigned long, bfqq->max_budget,
++					    bfq_serv_to_charge(next_rq, bfqq));
++	else
++		bfqq->entity.budget = bfqq->max_budget;
++
++	bfq_log_bfqq(bfqd, bfqq, "head sect: %u, new budget %lu",
++			next_rq != NULL ? blk_rq_sectors(next_rq) : 0,
++			bfqq->entity.budget);
++}
++
++static unsigned long bfq_calc_max_budget(u64 peak_rate, u64 timeout)
++{
++	unsigned long max_budget;
++
++	/*
++	 * The max_budget calculated when autotuning is equal to the
++	 * amount of sectors transfered in timeout_sync at the
++	 * estimated peak rate.
++	 */
++	max_budget = (unsigned long)(peak_rate * 1000 *
++				     timeout >> BFQ_RATE_SHIFT);
++
++	return max_budget;
++}
++
++/*
++ * In addition to updating the peak rate, checks whether the process
++ * is "slow", and returns 1 if so. This slow flag is used, in addition
++ * to the budget timeout, to reduce the amount of service provided to
++ * seeky processes, and hence reduce their chances to lower the
++ * throughput. See the code for more details.
++ */
++static int bfq_update_peak_rate(struct bfq_data *bfqd, struct bfq_queue *bfqq,
++				int compensate, enum bfqq_expiration reason)
++{
++	u64 bw, usecs, expected, timeout;
++	ktime_t delta;
++	int update = 0;
++
++	if (!bfq_bfqq_sync(bfqq) || bfq_bfqq_budget_new(bfqq))
++		return 0;
++
++	if (compensate)
++		delta = bfqd->last_idling_start;
++	else
++		delta = ktime_get();
++	delta = ktime_sub(delta, bfqd->last_budget_start);
++	usecs = ktime_to_us(delta);
++
++	/* Don't trust short/unrealistic values. */
++	if (usecs < 100 || usecs >= LONG_MAX)
++		return 0;
++
++	/*
++	 * Calculate the bandwidth for the last slice.  We use a 64 bit
++	 * value to store the peak rate, in sectors per usec in fixed
++	 * point math.  We do so to have enough precision in the estimate
++	 * and to avoid overflows.
++	 */
++	bw = (u64)bfqq->entity.service << BFQ_RATE_SHIFT;
++	do_div(bw, (unsigned long)usecs);
++
++	timeout = jiffies_to_msecs(bfqd->bfq_timeout[BLK_RW_SYNC]);
++
++	/*
++	 * Use only long (> 20ms) intervals to filter out spikes for
++	 * the peak rate estimation.
++	 */
++	if (usecs > 20000) {
++		if (bw > bfqd->peak_rate ||
++		   (!BFQQ_SEEKY(bfqq) &&
++		    reason == BFQ_BFQQ_BUDGET_TIMEOUT)) {
++			bfq_log(bfqd, "measured bw =%llu", bw);
++			/*
++			 * To smooth oscillations use a low-pass filter with
++			 * alpha=7/8, i.e.,
++			 * new_rate = (7/8) * old_rate + (1/8) * bw
++			 */
++			do_div(bw, 8);
++			if (bw == 0)
++				return 0;
++			bfqd->peak_rate *= 7;
++			do_div(bfqd->peak_rate, 8);
++			bfqd->peak_rate += bw;
++			update = 1;
++			bfq_log(bfqd, "new peak_rate=%llu", bfqd->peak_rate);
++		}
++
++		update |= bfqd->peak_rate_samples == BFQ_PEAK_RATE_SAMPLES - 1;
++
++		if (bfqd->peak_rate_samples < BFQ_PEAK_RATE_SAMPLES)
++			bfqd->peak_rate_samples++;
++
++		if (bfqd->peak_rate_samples == BFQ_PEAK_RATE_SAMPLES &&
++		    update) {
++			int dev_type = blk_queue_nonrot(bfqd->queue);
++			if (bfqd->bfq_user_max_budget == 0) {
++				bfqd->bfq_max_budget =
++					bfq_calc_max_budget(bfqd->peak_rate,
++							    timeout);
++				bfq_log(bfqd, "new max_budget=%lu",
++					bfqd->bfq_max_budget);
++			}
++			if (bfqd->device_speed == BFQ_BFQD_FAST &&
++			    bfqd->peak_rate < device_speed_thresh[dev_type]) {
++				bfqd->device_speed = BFQ_BFQD_SLOW;
++				bfqd->RT_prod = R_slow[dev_type] *
++						T_slow[dev_type];
++			} else if (bfqd->device_speed == BFQ_BFQD_SLOW &&
++			    bfqd->peak_rate > device_speed_thresh[dev_type]) {
++				bfqd->device_speed = BFQ_BFQD_FAST;
++				bfqd->RT_prod = R_fast[dev_type] *
++						T_fast[dev_type];
++			}
++		}
++	}
++
++	/*
++	 * If the process has been served for a too short time
++	 * interval to let its possible sequential accesses prevail on
++	 * the initial seek time needed to move the disk head on the
++	 * first sector it requested, then give the process a chance
++	 * and for the moment return false.
++	 */
++	if (bfqq->entity.budget <= bfq_max_budget(bfqd) / 8)
++		return 0;
++
++	/*
++	 * A process is considered ``slow'' (i.e., seeky, so that we
++	 * cannot treat it fairly in the service domain, as it would
++	 * slow down too much the other processes) if, when a slice
++	 * ends for whatever reason, it has received service at a
++	 * rate that would not be high enough to complete the budget
++	 * before the budget timeout expiration.
++	 */
++	expected = bw * 1000 * timeout >> BFQ_RATE_SHIFT;
++
++	/*
++	 * Caveat: processes doing IO in the slower disk zones will
++	 * tend to be slow(er) even if not seeky. And the estimated
++	 * peak rate will actually be an average over the disk
++	 * surface. Hence, to not be too harsh with unlucky processes,
++	 * we keep a budget/3 margin of safety before declaring a
++	 * process slow.
++	 */
++	return expected > (4 * bfqq->entity.budget) / 3;
++}
++
++/*
++ * To be deemed as soft real-time, an application must meet two
++ * requirements. First, the application must not require an average
++ * bandwidth higher than the approximate bandwidth required to playback or
++ * record a compressed high-definition video.
++ * The next function is invoked on the completion of the last request of a
++ * batch, to compute the next-start time instant, soft_rt_next_start, such
++ * that, if the next request of the application does not arrive before
++ * soft_rt_next_start, then the above requirement on the bandwidth is met.
++ *
++ * The second requirement is that the request pattern of the application is
++ * isochronous, i.e., that, after issuing a request or a batch of requests,
++ * the application stops issuing new requests until all its pending requests
++ * have been completed. After that, the application may issue a new batch,
++ * and so on.
++ * For this reason the next function is invoked to compute
++ * soft_rt_next_start only for applications that meet this requirement,
++ * whereas soft_rt_next_start is set to infinity for applications that do
++ * not.
++ *
++ * Unfortunately, even a greedy application may happen to behave in an
++ * isochronous way if the CPU load is high. In fact, the application may
++ * stop issuing requests while the CPUs are busy serving other processes,
++ * then restart, then stop again for a while, and so on. In addition, if
++ * the disk achieves a low enough throughput with the request pattern
++ * issued by the application (e.g., because the request pattern is random
++ * and/or the device is slow), then the application may meet the above
++ * bandwidth requirement too. To prevent such a greedy application to be
++ * deemed as soft real-time, a further rule is used in the computation of
++ * soft_rt_next_start: soft_rt_next_start must be higher than the current
++ * time plus the maximum time for which the arrival of a request is waited
++ * for when a sync queue becomes idle, namely bfqd->bfq_slice_idle.
++ * This filters out greedy applications, as the latter issue instead their
++ * next request as soon as possible after the last one has been completed
++ * (in contrast, when a batch of requests is completed, a soft real-time
++ * application spends some time processing data).
++ *
++ * Unfortunately, the last filter may easily generate false positives if
++ * only bfqd->bfq_slice_idle is used as a reference time interval and one
++ * or both the following cases occur:
++ * 1) HZ is so low that the duration of a jiffy is comparable to or higher
++ *    than bfqd->bfq_slice_idle. This happens, e.g., on slow devices with
++ *    HZ=100.
++ * 2) jiffies, instead of increasing at a constant rate, may stop increasing
++ *    for a while, then suddenly 'jump' by several units to recover the lost
++ *    increments. This seems to happen, e.g., inside virtual machines.
++ * To address this issue, we do not use as a reference time interval just
++ * bfqd->bfq_slice_idle, but bfqd->bfq_slice_idle plus a few jiffies. In
++ * particular we add the minimum number of jiffies for which the filter
++ * seems to be quite precise also in embedded systems and KVM/QEMU virtual
++ * machines.
++ */
++static inline unsigned long bfq_bfqq_softrt_next_start(struct bfq_data *bfqd,
++						       struct bfq_queue *bfqq)
++{
++	return max(bfqq->last_idle_bklogged +
++		   HZ * bfqq->service_from_backlogged /
++		   bfqd->bfq_wr_max_softrt_rate,
++		   jiffies + bfqq->bfqd->bfq_slice_idle + 4);
++}
++
++/*
++ * Return the largest-possible time instant such that, for as long as possible,
++ * the current time will be lower than this time instant according to the macro
++ * time_is_before_jiffies().
++ */
++static inline unsigned long bfq_infinity_from_now(unsigned long now)
++{
++	return now + ULONG_MAX / 2;
++}
++
++/**
++ * bfq_bfqq_expire - expire a queue.
++ * @bfqd: device owning the queue.
++ * @bfqq: the queue to expire.
++ * @compensate: if true, compensate for the time spent idling.
++ * @reason: the reason causing the expiration.
++ *
++ *
++ * If the process associated to the queue is slow (i.e., seeky), or in
++ * case of budget timeout, or, finally, if it is async, we
++ * artificially charge it an entire budget (independently of the
++ * actual service it received). As a consequence, the queue will get
++ * higher timestamps than the correct ones upon reactivation, and
++ * hence it will be rescheduled as if it had received more service
++ * than what it actually received. In the end, this class of processes
++ * will receive less service in proportion to how slowly they consume
++ * their budgets (and hence how seriously they tend to lower the
++ * throughput).
++ *
++ * In contrast, when a queue expires because it has been idling for
++ * too much or because it exhausted its budget, we do not touch the
++ * amount of service it has received. Hence when the queue will be
++ * reactivated and its timestamps updated, the latter will be in sync
++ * with the actual service received by the queue until expiration.
++ *
++ * Charging a full budget to the first type of queues and the exact
++ * service to the others has the effect of using the WF2Q+ policy to
++ * schedule the former on a timeslice basis, without violating the
++ * service domain guarantees of the latter.
++ */
++static void bfq_bfqq_expire(struct bfq_data *bfqd,
++			    struct bfq_queue *bfqq,
++			    int compensate,
++			    enum bfqq_expiration reason)
++{
++	int slow;
++	BUG_ON(bfqq != bfqd->in_service_queue);
++
++	/* Update disk peak rate for autotuning and check whether the
++	 * process is slow (see bfq_update_peak_rate).
++	 */
++	slow = bfq_update_peak_rate(bfqd, bfqq, compensate, reason);
++
++	/*
++	 * As above explained, 'punish' slow (i.e., seeky), timed-out
++	 * and async queues, to favor sequential sync workloads.
++	 *
++	 * Processes doing I/O in the slower disk zones will tend to be
++	 * slow(er) even if not seeky. Hence, since the estimated peak
++	 * rate is actually an average over the disk surface, these
++	 * processes may timeout just for bad luck. To avoid punishing
++	 * them we do not charge a full budget to a process that
++	 * succeeded in consuming at least 2/3 of its budget.
++	 */
++	if (slow || (reason == BFQ_BFQQ_BUDGET_TIMEOUT &&
++		     bfq_bfqq_budget_left(bfqq) >=  bfqq->entity.budget / 3))
++		bfq_bfqq_charge_full_budget(bfqq);
++
++	bfqq->service_from_backlogged += bfqq->entity.service;
++
++	if (BFQQ_SEEKY(bfqq) && reason == BFQ_BFQQ_BUDGET_TIMEOUT &&
++	    !bfq_bfqq_constantly_seeky(bfqq)) {
++		bfq_mark_bfqq_constantly_seeky(bfqq);
++		if (!blk_queue_nonrot(bfqd->queue))
++			bfqd->const_seeky_busy_in_flight_queues++;
++	}
++
++	if (reason == BFQ_BFQQ_TOO_IDLE &&
++	    bfqq->entity.service <= 2 * bfqq->entity.budget / 10 )
++		bfq_clear_bfqq_IO_bound(bfqq);
++
++	if (bfqd->low_latency && bfqq->wr_coeff == 1)
++		bfqq->last_wr_start_finish = jiffies;
++
++	if (bfqd->low_latency && bfqd->bfq_wr_max_softrt_rate > 0 &&
++	    RB_EMPTY_ROOT(&bfqq->sort_list)) {
++		/*
++		 * If we get here, and there are no outstanding requests,
++		 * then the request pattern is isochronous (see the comments
++		 * to the function bfq_bfqq_softrt_next_start()). Hence we
++		 * can compute soft_rt_next_start. If, instead, the queue
++		 * still has outstanding requests, then we have to wait
++		 * for the completion of all the outstanding requests to
++		 * discover whether the request pattern is actually
++		 * isochronous.
++		 */
++		if (bfqq->dispatched == 0)
++			bfqq->soft_rt_next_start =
++				bfq_bfqq_softrt_next_start(bfqd, bfqq);
++		else {
++			/*
++			 * The application is still waiting for the
++			 * completion of one or more requests:
++			 * prevent it from possibly being incorrectly
++			 * deemed as soft real-time by setting its
++			 * soft_rt_next_start to infinity. In fact,
++			 * without this assignment, the application
++			 * would be incorrectly deemed as soft
++			 * real-time if:
++			 * 1) it issued a new request before the
++			 *    completion of all its in-flight
++			 *    requests, and
++			 * 2) at that time, its soft_rt_next_start
++			 *    happened to be in the past.
++			 */
++			bfqq->soft_rt_next_start =
++				bfq_infinity_from_now(jiffies);
++			/*
++			 * Schedule an update of soft_rt_next_start to when
++			 * the task may be discovered to be isochronous.
++			 */
++			bfq_mark_bfqq_softrt_update(bfqq);
++		}
++	}
++
++	bfq_log_bfqq(bfqd, bfqq,
++		"expire (%d, slow %d, num_disp %d, idle_win %d)", reason,
++		slow, bfqq->dispatched, bfq_bfqq_idle_window(bfqq));
++
++	/*
++	 * Increase, decrease or leave budget unchanged according to
++	 * reason.
++	 */
++	__bfq_bfqq_recalc_budget(bfqd, bfqq, reason);
++	__bfq_bfqq_expire(bfqd, bfqq);
++}
++
++/*
++ * Budget timeout is not implemented through a dedicated timer, but
++ * just checked on request arrivals and completions, as well as on
++ * idle timer expirations.
++ */
++static int bfq_bfqq_budget_timeout(struct bfq_queue *bfqq)
++{
++	if (bfq_bfqq_budget_new(bfqq) ||
++	    time_before(jiffies, bfqq->budget_timeout))
++		return 0;
++	return 1;
++}
++
++/*
++ * If we expire a queue that is waiting for the arrival of a new
++ * request, we may prevent the fictitious timestamp back-shifting that
++ * allows the guarantees of the queue to be preserved (see [1] for
++ * this tricky aspect). Hence we return true only if this condition
++ * does not hold, or if the queue is slow enough to deserve only to be
++ * kicked off for preserving a high throughput.
++*/
++static inline int bfq_may_expire_for_budg_timeout(struct bfq_queue *bfqq)
++{
++	bfq_log_bfqq(bfqq->bfqd, bfqq,
++		"may_budget_timeout: wait_request %d left %d timeout %d",
++		bfq_bfqq_wait_request(bfqq),
++			bfq_bfqq_budget_left(bfqq) >=  bfqq->entity.budget / 3,
++		bfq_bfqq_budget_timeout(bfqq));
++
++	return (!bfq_bfqq_wait_request(bfqq) ||
++		bfq_bfqq_budget_left(bfqq) >=  bfqq->entity.budget / 3)
++		&&
++		bfq_bfqq_budget_timeout(bfqq);
++}
++
++/*
++ * Device idling is allowed only for the queues for which this function
++ * returns true. For this reason, the return value of this function plays a
++ * critical role for both throughput boosting and service guarantees. The
++ * return value is computed through a logical expression. In this rather
++ * long comment, we try to briefly describe all the details and motivations
++ * behind the components of this logical expression.
++ *
++ * First, the expression is false if bfqq is not sync, or if: bfqq happened
++ * to become active during a large burst of queue activations, and the
++ * pattern of requests bfqq contains boosts the throughput if bfqq is
++ * expired. In fact, queues that became active during a large burst benefit
++ * only from throughput, as discussed in the comments to bfq_handle_burst.
++ * In this respect, expiring bfqq certainly boosts the throughput on NCQ-
++ * capable flash-based devices, whereas, on rotational devices, it boosts
++ * the throughput only if bfqq contains random requests.
++ *
++ * On the opposite end, if (a) bfqq is sync, (b) the above burst-related
++ * condition does not hold, and (c) bfqq is being weight-raised, then the
++ * expression always evaluates to true, as device idling is instrumental
++ * for preserving low-latency guarantees (see [1]). If, instead, conditions
++ * (a) and (b) do hold, but (c) does not, then the expression evaluates to
++ * true only if: (1) bfqq is I/O-bound and has a non-null idle window, and
++ * (2) at least one of the following two conditions holds.
++ * The first condition is that the device is not performing NCQ, because
++ * idling the device most certainly boosts the throughput if this condition
++ * holds and bfqq is I/O-bound and has been granted a non-null idle window.
++ * The second compound condition is made of the logical AND of two components.
++ *
++ * The first component is true only if there is no weight-raised busy
++ * queue. This guarantees that the device is not idled for a sync non-
++ * weight-raised queue when there are busy weight-raised queues. The former
++ * is then expired immediately if empty. Combined with the timestamping
++ * rules of BFQ (see [1] for details), this causes sync non-weight-raised
++ * queues to get a lower number of requests served, and hence to ask for a
++ * lower number of requests from the request pool, before the busy weight-
++ * raised queues get served again.
++ *
++ * This is beneficial for the processes associated with weight-raised
++ * queues, when the request pool is saturated (e.g., in the presence of
++ * write hogs). In fact, if the processes associated with the other queues
++ * ask for requests at a lower rate, then weight-raised processes have a
++ * higher probability to get a request from the pool immediately (or at
++ * least soon) when they need one. Hence they have a higher probability to
++ * actually get a fraction of the disk throughput proportional to their
++ * high weight. This is especially true with NCQ-capable drives, which
++ * enqueue several requests in advance and further reorder internally-
++ * queued requests.
++ *
++ * In the end, mistreating non-weight-raised queues when there are busy
++ * weight-raised queues seems to mitigate starvation problems in the
++ * presence of heavy write workloads and NCQ, and hence to guarantee a
++ * higher application and system responsiveness in these hostile scenarios.
++ *
++ * If the first component of the compound condition is instead true, i.e.,
++ * there is no weight-raised busy queue, then the second component of the
++ * compound condition takes into account service-guarantee and throughput
++ * issues related to NCQ (recall that the compound condition is evaluated
++ * only if the device is detected as supporting NCQ).
++ *
++ * As for service guarantees, allowing the drive to enqueue more than one
++ * request at a time, and hence delegating de facto final scheduling
++ * decisions to the drive's internal scheduler, causes loss of control on
++ * the actual request service order. In this respect, when the drive is
++ * allowed to enqueue more than one request at a time, the service
++ * distribution enforced by the drive's internal scheduler is likely to
++ * coincide with the desired device-throughput distribution only in the
++ * following, perfectly symmetric, scenario:
++ * 1) all active queues have the same weight,
++ * 2) all active groups at the same level in the groups tree have the same
++ *    weight,
++ * 3) all active groups at the same level in the groups tree have the same
++ *    number of children.
++ *
++ * Even in such a scenario, sequential I/O may still receive a preferential
++ * treatment, but this is not likely to be a big issue with flash-based
++ * devices, because of their non-dramatic loss of throughput with random
++ * I/O. Things do differ with HDDs, for which additional care is taken, as
++ * explained after completing the discussion for flash-based devices.
++ *
++ * Unfortunately, keeping the necessary state for evaluating exactly the
++ * above symmetry conditions would be quite complex and time-consuming.
++ * Therefore BFQ evaluates instead the following stronger sub-conditions,
++ * for which it is much easier to maintain the needed state:
++ * 1) all active queues have the same weight,
++ * 2) all active groups have the same weight,
++ * 3) all active groups have at most one active child each.
++ * In particular, the last two conditions are always true if hierarchical
++ * support and the cgroups interface are not enabled, hence no state needs
++ * to be maintained in this case.
++ *
++ * According to the above considerations, the second component of the
++ * compound condition evaluates to true if any of the above symmetry
++ * sub-condition does not hold, or the device is not flash-based. Therefore,
++ * if also the first component is true, then idling is allowed for a sync
++ * queue. These are the only sub-conditions considered if the device is
++ * flash-based, as, for such a device, it is sensible to force idling only
++ * for service-guarantee issues. In fact, as for throughput, idling
++ * NCQ-capable flash-based devices would not boost the throughput even
++ * with sequential I/O; rather it would lower the throughput in proportion
++ * to how fast the device is. In the end, (only) if all the three
++ * sub-conditions hold and the device is flash-based, the compound
++ * condition evaluates to false and therefore no idling is performed.
++ *
++ * As already said, things change with a rotational device, where idling
++ * boosts the throughput with sequential I/O (even with NCQ). Hence, for
++ * such a device the second component of the compound condition evaluates
++ * to true also if the following additional sub-condition does not hold:
++ * the queue is constantly seeky. Unfortunately, this different behavior
++ * with respect to flash-based devices causes an additional asymmetry: if
++ * some sync queues enjoy idling and some other sync queues do not, then
++ * the latter get a low share of the device throughput, simply because the
++ * former get many requests served after being set as in service, whereas
++ * the latter do not. As a consequence, to guarantee the desired throughput
++ * distribution, on HDDs the compound expression evaluates to true (and
++ * hence device idling is performed) also if the following last symmetry
++ * condition does not hold: no other queue is benefiting from idling. Also
++ * this last condition is actually replaced with a simpler-to-maintain and
++ * stronger condition: there is no busy queue which is not constantly seeky
++ * (and hence may also benefit from idling).
++ *
++ * To sum up, when all the required symmetry and throughput-boosting
++ * sub-conditions hold, the second component of the compound condition
++ * evaluates to false, and hence no idling is performed. This helps to
++ * keep the drives' internal queues full on NCQ-capable devices, and hence
++ * to boost the throughput, without causing 'almost' any loss of service
++ * guarantees. The 'almost' follows from the fact that, if the internal
++ * queue of one such device is filled while all the sub-conditions hold,
++ * but at some point in time some sub-condition stops to hold, then it may
++ * become impossible to let requests be served in the new desired order
++ * until all the requests already queued in the device have been served.
++ */
++static inline bool bfq_bfqq_must_not_expire(struct bfq_queue *bfqq)
++{
++	struct bfq_data *bfqd = bfqq->bfqd;
++#ifdef CONFIG_CGROUP_BFQIO
++#define symmetric_scenario	  (!bfqd->active_numerous_groups && \
++				   !bfq_differentiated_weights(bfqd))
++#else
++#define symmetric_scenario	  (!bfq_differentiated_weights(bfqd))
++#endif
++#define cond_for_seeky_on_ncq_hdd (bfq_bfqq_constantly_seeky(bfqq) && \
++				   bfqd->busy_in_flight_queues == \
++				   bfqd->const_seeky_busy_in_flight_queues)
++
++#define cond_for_expiring_in_burst	(bfq_bfqq_in_large_burst(bfqq) && \
++					 bfqd->hw_tag && \
++					 (blk_queue_nonrot(bfqd->queue) || \
++					  bfq_bfqq_constantly_seeky(bfqq)))
++
++/*
++ * Condition for expiring a non-weight-raised queue (and hence not idling
++ * the device).
++ */
++#define cond_for_expiring_non_wr  (bfqd->hw_tag && \
++				   (bfqd->wr_busy_queues > 0 || \
++				    (symmetric_scenario && \
++				     (blk_queue_nonrot(bfqd->queue) || \
++				      cond_for_seeky_on_ncq_hdd))))
++
++	return bfq_bfqq_sync(bfqq) &&
++		!cond_for_expiring_in_burst &&
++		(bfqq->wr_coeff > 1 ||
++		 (bfq_bfqq_IO_bound(bfqq) && bfq_bfqq_idle_window(bfqq) &&
++		  !cond_for_expiring_non_wr)
++	);
++}
++
++/*
++ * If the in-service queue is empty but sync, and the function
++ * bfq_bfqq_must_not_expire returns true, then:
++ * 1) the queue must remain in service and cannot be expired, and
++ * 2) the disk must be idled to wait for the possible arrival of a new
++ *    request for the queue.
++ * See the comments to the function bfq_bfqq_must_not_expire for the reasons
++ * why performing device idling is the best choice to boost the throughput
++ * and preserve service guarantees when bfq_bfqq_must_not_expire itself
++ * returns true.
++ */
++static inline bool bfq_bfqq_must_idle(struct bfq_queue *bfqq)
++{
++	struct bfq_data *bfqd = bfqq->bfqd;
++
++	return RB_EMPTY_ROOT(&bfqq->sort_list) && bfqd->bfq_slice_idle != 0 &&
++	       bfq_bfqq_must_not_expire(bfqq);
++}
++
++/*
++ * Select a queue for service.  If we have a current queue in service,
++ * check whether to continue servicing it, or retrieve and set a new one.
++ */
++static struct bfq_queue *bfq_select_queue(struct bfq_data *bfqd)
++{
++	struct bfq_queue *bfqq, *new_bfqq = NULL;
++	struct request *next_rq;
++	enum bfqq_expiration reason = BFQ_BFQQ_BUDGET_TIMEOUT;
++
++	bfqq = bfqd->in_service_queue;
++	if (bfqq == NULL)
++		goto new_queue;
++
++	bfq_log_bfqq(bfqd, bfqq, "select_queue: already in-service queue");
++
++	/*
++         * If another queue has a request waiting within our mean seek
++         * distance, let it run. The expire code will check for close
++         * cooperators and put the close queue at the front of the
++         * service tree. If possible, merge the expiring queue with the
++         * new bfqq.
++         */
++        new_bfqq = bfq_close_cooperator(bfqd, bfqq);
++        if (new_bfqq != NULL && bfqq->new_bfqq == NULL)
++                bfq_setup_merge(bfqq, new_bfqq);
++
++	if (bfq_may_expire_for_budg_timeout(bfqq) &&
++	    !timer_pending(&bfqd->idle_slice_timer) &&
++	    !bfq_bfqq_must_idle(bfqq))
++		goto expire;
++
++	next_rq = bfqq->next_rq;
++	/*
++	 * If bfqq has requests queued and it has enough budget left to
++	 * serve them, keep the queue, otherwise expire it.
++	 */
++	if (next_rq != NULL) {
++		if (bfq_serv_to_charge(next_rq, bfqq) >
++			bfq_bfqq_budget_left(bfqq)) {
++			reason = BFQ_BFQQ_BUDGET_EXHAUSTED;
++			goto expire;
++		} else {
++			/*
++			 * The idle timer may be pending because we may
++			 * not disable disk idling even when a new request
++			 * arrives.
++			 */
++			if (timer_pending(&bfqd->idle_slice_timer)) {
++				/*
++				 * If we get here: 1) at least a new request
++				 * has arrived but we have not disabled the
++				 * timer because the request was too small,
++				 * 2) then the block layer has unplugged
++				 * the device, causing the dispatch to be
++				 * invoked.
++				 *
++				 * Since the device is unplugged, now the
++				 * requests are probably large enough to
++				 * provide a reasonable throughput.
++				 * So we disable idling.
++				 */
++				bfq_clear_bfqq_wait_request(bfqq);
++				del_timer(&bfqd->idle_slice_timer);
++			}
++			if (new_bfqq == NULL)
++				goto keep_queue;
++			else
++				goto expire;
++		}
++	}
++
++	/*
++	 * No requests pending.  If the in-service queue still has requests
++	 * in flight (possibly waiting for a completion) or is idling for a
++	 * new request, then keep it.
++	 */
++	if (new_bfqq == NULL && (timer_pending(&bfqd->idle_slice_timer) ||
++	    (bfqq->dispatched != 0 && bfq_bfqq_must_not_expire(bfqq)))) {
++		bfqq = NULL;
++		goto keep_queue;
++	} else if (new_bfqq != NULL && timer_pending(&bfqd->idle_slice_timer)) {
++		/*
++		 * Expiring the queue because there is a close cooperator,
++		 * cancel timer.
++		 */
++		bfq_clear_bfqq_wait_request(bfqq);
++		del_timer(&bfqd->idle_slice_timer);
++	}
++
++	reason = BFQ_BFQQ_NO_MORE_REQUESTS;
++expire:
++	bfq_bfqq_expire(bfqd, bfqq, 0, reason);
++new_queue:
++	bfqq = bfq_set_in_service_queue(bfqd, new_bfqq);
++	bfq_log(bfqd, "select_queue: new queue %d returned",
++		bfqq != NULL ? bfqq->pid : 0);
++keep_queue:
++	return bfqq;
++}
++
++static void bfq_update_wr_data(struct bfq_data *bfqd,
++			       struct bfq_queue *bfqq)
++{
++	if (bfqq->wr_coeff > 1) { /* queue is being boosted */
++		struct bfq_entity *entity = &bfqq->entity;
++
++		bfq_log_bfqq(bfqd, bfqq,
++			"raising period dur %u/%u msec, old coeff %u, w %d(%d)",
++			jiffies_to_msecs(jiffies -
++				bfqq->last_wr_start_finish),
++			jiffies_to_msecs(bfqq->wr_cur_max_time),
++			bfqq->wr_coeff,
++			bfqq->entity.weight, bfqq->entity.orig_weight);
++
++		BUG_ON(bfqq != bfqd->in_service_queue && entity->weight !=
++		       entity->orig_weight * bfqq->wr_coeff);
++		if (entity->ioprio_changed)
++			bfq_log_bfqq(bfqd, bfqq, "WARN: pending prio change");
++		/*
++		 * If the queue was activated in a burst, or
++		 * too much time has elapsed from the beginning
++		 * of this weight-raising, then end weight raising.
++		 */
++		if (bfq_bfqq_in_large_burst(bfqq) ||
++		    time_is_before_jiffies(bfqq->last_wr_start_finish +
++					   bfqq->wr_cur_max_time)) {
++			bfqq->last_wr_start_finish = jiffies;
++			bfq_log_bfqq(bfqd, bfqq,
++				     "wrais ending at %lu, rais_max_time %u",
++				     bfqq->last_wr_start_finish,
++				     jiffies_to_msecs(bfqq->wr_cur_max_time));
++			bfq_bfqq_end_wr(bfqq);
++			__bfq_entity_update_weight_prio(
++				bfq_entity_service_tree(entity),
++				entity);
++		}
++	}
++}
++
++/*
++ * Dispatch one request from bfqq, moving it to the request queue
++ * dispatch list.
++ */
++static int bfq_dispatch_request(struct bfq_data *bfqd,
++				struct bfq_queue *bfqq)
++{
++	int dispatched = 0;
++	struct request *rq;
++	unsigned long service_to_charge;
++
++	BUG_ON(RB_EMPTY_ROOT(&bfqq->sort_list));
++
++	/* Follow expired path, else get first next available. */
++	rq = bfq_check_fifo(bfqq);
++	if (rq == NULL)
++		rq = bfqq->next_rq;
++	service_to_charge = bfq_serv_to_charge(rq, bfqq);
++
++	if (service_to_charge > bfq_bfqq_budget_left(bfqq)) {
++		/*
++		 * This may happen if the next rq is chosen in fifo order
++		 * instead of sector order. The budget is properly
++		 * dimensioned to be always sufficient to serve the next
++		 * request only if it is chosen in sector order. The reason
++		 * is that it would be quite inefficient and little useful
++		 * to always make sure that the budget is large enough to
++		 * serve even the possible next rq in fifo order.
++		 * In fact, requests are seldom served in fifo order.
++		 *
++		 * Expire the queue for budget exhaustion, and make sure
++		 * that the next act_budget is enough to serve the next
++		 * request, even if it comes from the fifo expired path.
++		 */
++		bfqq->next_rq = rq;
++		/*
++		 * Since this dispatch is failed, make sure that
++		 * a new one will be performed
++		 */
++		if (!bfqd->rq_in_driver)
++			bfq_schedule_dispatch(bfqd);
++		goto expire;
++	}
++
++	/* Finally, insert request into driver dispatch list. */
++	bfq_bfqq_served(bfqq, service_to_charge);
++	bfq_dispatch_insert(bfqd->queue, rq);
++
++	bfq_update_wr_data(bfqd, bfqq);
++
++	bfq_log_bfqq(bfqd, bfqq,
++			"dispatched %u sec req (%llu), budg left %lu",
++			blk_rq_sectors(rq),
++			(long long unsigned)blk_rq_pos(rq),
++			bfq_bfqq_budget_left(bfqq));
++
++	dispatched++;
++
++	if (bfqd->in_service_bic == NULL) {
++		atomic_long_inc(&RQ_BIC(rq)->icq.ioc->refcount);
++		bfqd->in_service_bic = RQ_BIC(rq);
++	}
++
++	if (bfqd->busy_queues > 1 && ((!bfq_bfqq_sync(bfqq) &&
++	    dispatched >= bfqd->bfq_max_budget_async_rq) ||
++	    bfq_class_idle(bfqq)))
++		goto expire;
++
++	return dispatched;
++
++expire:
++	bfq_bfqq_expire(bfqd, bfqq, 0, BFQ_BFQQ_BUDGET_EXHAUSTED);
++	return dispatched;
++}
++
++static int __bfq_forced_dispatch_bfqq(struct bfq_queue *bfqq)
++{
++	int dispatched = 0;
++
++	while (bfqq->next_rq != NULL) {
++		bfq_dispatch_insert(bfqq->bfqd->queue, bfqq->next_rq);
++		dispatched++;
++	}
++
++	BUG_ON(!list_empty(&bfqq->fifo));
++	return dispatched;
++}
++
++/*
++ * Drain our current requests.
++ * Used for barriers and when switching io schedulers on-the-fly.
++ */
++static int bfq_forced_dispatch(struct bfq_data *bfqd)
++{
++	struct bfq_queue *bfqq, *n;
++	struct bfq_service_tree *st;
++	int dispatched = 0;
++
++	bfqq = bfqd->in_service_queue;
++	if (bfqq != NULL)
++		__bfq_bfqq_expire(bfqd, bfqq);
++
++	/*
++	 * Loop through classes, and be careful to leave the scheduler
++	 * in a consistent state, as feedback mechanisms and vtime
++	 * updates cannot be disabled during the process.
++	 */
++	list_for_each_entry_safe(bfqq, n, &bfqd->active_list, bfqq_list) {
++		st = bfq_entity_service_tree(&bfqq->entity);
++
++		dispatched += __bfq_forced_dispatch_bfqq(bfqq);
++		bfqq->max_budget = bfq_max_budget(bfqd);
++
++		bfq_forget_idle(st);
++	}
++
++	BUG_ON(bfqd->busy_queues != 0);
++
++	return dispatched;
++}
++
++static int bfq_dispatch_requests(struct request_queue *q, int force)
++{
++	struct bfq_data *bfqd = q->elevator->elevator_data;
++	struct bfq_queue *bfqq;
++	int max_dispatch;
++
++	bfq_log(bfqd, "dispatch requests: %d busy queues", bfqd->busy_queues);
++	if (bfqd->busy_queues == 0)
++		return 0;
++
++	if (unlikely(force))
++		return bfq_forced_dispatch(bfqd);
++
++	bfqq = bfq_select_queue(bfqd);
++	if (bfqq == NULL)
++		return 0;
++
++	max_dispatch = bfqd->bfq_quantum;
++	if (bfq_class_idle(bfqq))
++		max_dispatch = 1;
++
++	if (!bfq_bfqq_sync(bfqq))
++		max_dispatch = bfqd->bfq_max_budget_async_rq;
++
++	if (bfqq->dispatched >= max_dispatch) {
++		if (bfqd->busy_queues > 1)
++			return 0;
++		if (bfqq->dispatched >= 4 * max_dispatch)
++			return 0;
++	}
++
++	if (bfqd->sync_flight != 0 && !bfq_bfqq_sync(bfqq))
++		return 0;
++
++	bfq_clear_bfqq_wait_request(bfqq);
++	BUG_ON(timer_pending(&bfqd->idle_slice_timer));
++
++	if (!bfq_dispatch_request(bfqd, bfqq))
++		return 0;
++
++	bfq_log_bfqq(bfqd, bfqq, "dispatched one request of %d (max_disp %d)",
++			bfqq->pid, max_dispatch);
++
++	return 1;
++}
++
++/*
++ * Task holds one reference to the queue, dropped when task exits.  Each rq
++ * in-flight on this queue also holds a reference, dropped when rq is freed.
++ *
++ * Queue lock must be held here.
++ */
++static void bfq_put_queue(struct bfq_queue *bfqq)
++{
++	struct bfq_data *bfqd = bfqq->bfqd;
++
++	BUG_ON(atomic_read(&bfqq->ref) <= 0);
++
++	bfq_log_bfqq(bfqd, bfqq, "put_queue: %p %d", bfqq,
++		     atomic_read(&bfqq->ref));
++	if (!atomic_dec_and_test(&bfqq->ref))
++		return;
++
++	BUG_ON(rb_first(&bfqq->sort_list) != NULL);
++	BUG_ON(bfqq->allocated[READ] + bfqq->allocated[WRITE] != 0);
++	BUG_ON(bfqq->entity.tree != NULL);
++	BUG_ON(bfq_bfqq_busy(bfqq));
++	BUG_ON(bfqd->in_service_queue == bfqq);
++
++	if (bfq_bfqq_sync(bfqq))
++		/*
++		 * The fact that this queue is being destroyed does not
++		 * invalidate the fact that this queue may have been
++		 * activated during the current burst. As a consequence,
++		 * although the queue does not exist anymore, and hence
++		 * needs to be removed from the burst list if there,
++		 * the burst size has not to be decremented.
++		 */
++		hlist_del_init(&bfqq->burst_list_node);
++
++	bfq_log_bfqq(bfqd, bfqq, "put_queue: %p freed", bfqq);
++
++	kmem_cache_free(bfq_pool, bfqq);
++}
++
++static void bfq_put_cooperator(struct bfq_queue *bfqq)
++{
++	struct bfq_queue *__bfqq, *next;
++
++	/*
++	 * If this queue was scheduled to merge with another queue, be
++	 * sure to drop the reference taken on that queue (and others in
++	 * the merge chain). See bfq_setup_merge and bfq_merge_bfqqs.
++	 */
++	__bfqq = bfqq->new_bfqq;
++	while (__bfqq) {
++		if (__bfqq == bfqq)
++			break;
++		next = __bfqq->new_bfqq;
++		bfq_put_queue(__bfqq);
++		__bfqq = next;
++	}
++}
++
++static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq)
++{
++	if (bfqq == bfqd->in_service_queue) {
++		__bfq_bfqq_expire(bfqd, bfqq);
++		bfq_schedule_dispatch(bfqd);
++	}
++
++	bfq_log_bfqq(bfqd, bfqq, "exit_bfqq: %p, %d", bfqq,
++		     atomic_read(&bfqq->ref));
++
++	bfq_put_cooperator(bfqq);
++
++	bfq_put_queue(bfqq);
++}
++
++static inline void bfq_init_icq(struct io_cq *icq)
++{
++	struct bfq_io_cq *bic = icq_to_bic(icq);
++
++	bic->ttime.last_end_request = jiffies;
++}
++
++static void bfq_exit_icq(struct io_cq *icq)
++{
++	struct bfq_io_cq *bic = icq_to_bic(icq);
++	struct bfq_data *bfqd = bic_to_bfqd(bic);
++
++	if (bic->bfqq[BLK_RW_ASYNC]) {
++		bfq_exit_bfqq(bfqd, bic->bfqq[BLK_RW_ASYNC]);
++		bic->bfqq[BLK_RW_ASYNC] = NULL;
++	}
++
++	if (bic->bfqq[BLK_RW_SYNC]) {
++		bfq_exit_bfqq(bfqd, bic->bfqq[BLK_RW_SYNC]);
++		bic->bfqq[BLK_RW_SYNC] = NULL;
++	}
++}
++
++/*
++ * Update the entity prio values; note that the new values will not
++ * be used until the next (re)activation.
++ */
++static void bfq_init_prio_data(struct bfq_queue *bfqq, struct bfq_io_cq *bic)
++{
++	struct task_struct *tsk = current;
++	int ioprio_class;
++
++	if (!bfq_bfqq_prio_changed(bfqq))
++		return;
++
++	ioprio_class = IOPRIO_PRIO_CLASS(bic->ioprio);
++	switch (ioprio_class) {
++	default:
++		dev_err(bfqq->bfqd->queue->backing_dev_info.dev,
++			"bfq: bad prio class %d\n", ioprio_class);
++	case IOPRIO_CLASS_NONE:
++		/*
++		 * No prio set, inherit CPU scheduling settings.
++		 */
++		bfqq->entity.new_ioprio = task_nice_ioprio(tsk);
++		bfqq->entity.new_ioprio_class = task_nice_ioclass(tsk);
++		break;
++	case IOPRIO_CLASS_RT:
++		bfqq->entity.new_ioprio = IOPRIO_PRIO_DATA(bic->ioprio);
++		bfqq->entity.new_ioprio_class = IOPRIO_CLASS_RT;
++		break;
++	case IOPRIO_CLASS_BE:
++		bfqq->entity.new_ioprio = IOPRIO_PRIO_DATA(bic->ioprio);
++		bfqq->entity.new_ioprio_class = IOPRIO_CLASS_BE;
++		break;
++	case IOPRIO_CLASS_IDLE:
++		bfqq->entity.new_ioprio_class = IOPRIO_CLASS_IDLE;
++		bfqq->entity.new_ioprio = 7;
++		bfq_clear_bfqq_idle_window(bfqq);
++		break;
++	}
++
++	if (bfqq->entity.new_ioprio < 0 ||
++	    bfqq->entity.new_ioprio >= IOPRIO_BE_NR) {
++		printk(KERN_CRIT "bfq_init_prio_data: new_ioprio %d\n",
++				 bfqq->entity.new_ioprio);
++		BUG();
++	}
++
++	bfqq->entity.ioprio_changed = 1;
++
++	bfq_clear_bfqq_prio_changed(bfqq);
++}
++
++static void bfq_changed_ioprio(struct bfq_io_cq *bic)
++{
++	struct bfq_data *bfqd;
++	struct bfq_queue *bfqq, *new_bfqq;
++	struct bfq_group *bfqg;
++	unsigned long uninitialized_var(flags);
++	int ioprio = bic->icq.ioc->ioprio;
++
++	bfqd = bfq_get_bfqd_locked(&(bic->icq.q->elevator->elevator_data),
++				   &flags);
++	/*
++	 * This condition may trigger on a newly created bic, be sure to
++	 * drop the lock before returning.
++	 */
++	if (unlikely(bfqd == NULL) || likely(bic->ioprio == ioprio))
++		goto out;
++
++	bfqq = bic->bfqq[BLK_RW_ASYNC];
++	if (bfqq != NULL) {
++		bfqg = container_of(bfqq->entity.sched_data, struct bfq_group,
++				    sched_data);
++		new_bfqq = bfq_get_queue(bfqd, bfqg, BLK_RW_ASYNC, bic,
++					 GFP_ATOMIC);
++		if (new_bfqq != NULL) {
++			bic->bfqq[BLK_RW_ASYNC] = new_bfqq;
++			bfq_log_bfqq(bfqd, bfqq,
++				     "changed_ioprio: bfqq %p %d",
++				     bfqq, atomic_read(&bfqq->ref));
++			bfq_put_queue(bfqq);
++		}
++	}
++
++	bfqq = bic->bfqq[BLK_RW_SYNC];
++	if (bfqq != NULL)
++		bfq_mark_bfqq_prio_changed(bfqq);
++
++	bic->ioprio = ioprio;
++
++out:
++	bfq_put_bfqd_unlock(bfqd, &flags);
++}
++
++static void bfq_init_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
++			  pid_t pid, int is_sync)
++{
++	RB_CLEAR_NODE(&bfqq->entity.rb_node);
++	INIT_LIST_HEAD(&bfqq->fifo);
++	INIT_HLIST_NODE(&bfqq->burst_list_node);
++
++	atomic_set(&bfqq->ref, 0);
++	bfqq->bfqd = bfqd;
++
++	bfq_mark_bfqq_prio_changed(bfqq);
++
++	if (is_sync) {
++		if (!bfq_class_idle(bfqq))
++			bfq_mark_bfqq_idle_window(bfqq);
++		bfq_mark_bfqq_sync(bfqq);
++	}
++	bfq_mark_bfqq_IO_bound(bfqq);
++
++	/* Tentative initial value to trade off between thr and lat */
++	bfqq->max_budget = (2 * bfq_max_budget(bfqd)) / 3;
++	bfqq->pid = pid;
++
++	bfqq->wr_coeff = 1;
++	bfqq->last_wr_start_finish = 0;
++	/*
++	 * Set to the value for which bfqq will not be deemed as
++	 * soft rt when it becomes backlogged.
++	 */
++	bfqq->soft_rt_next_start = bfq_infinity_from_now(jiffies);
++}
++
++static struct bfq_queue *bfq_find_alloc_queue(struct bfq_data *bfqd,
++					      struct bfq_group *bfqg,
++					      int is_sync,
++					      struct bfq_io_cq *bic,
++					      gfp_t gfp_mask)
++{
++	struct bfq_queue *bfqq, *new_bfqq = NULL;
++
++retry:
++	/* bic always exists here */
++	bfqq = bic_to_bfqq(bic, is_sync);
++
++	/*
++	 * Always try a new alloc if we fall back to the OOM bfqq
++	 * originally, since it should just be a temporary situation.
++	 */
++	if (bfqq == NULL || bfqq == &bfqd->oom_bfqq) {
++		bfqq = NULL;
++		if (new_bfqq != NULL) {
++			bfqq = new_bfqq;
++			new_bfqq = NULL;
++		} else if (gfp_mask & __GFP_WAIT) {
++			spin_unlock_irq(bfqd->queue->queue_lock);
++			new_bfqq = kmem_cache_alloc_node(bfq_pool,
++					gfp_mask | __GFP_ZERO,
++					bfqd->queue->node);
++			spin_lock_irq(bfqd->queue->queue_lock);
++			if (new_bfqq != NULL)
++				goto retry;
++		} else {
++			bfqq = kmem_cache_alloc_node(bfq_pool,
++					gfp_mask | __GFP_ZERO,
++					bfqd->queue->node);
++		}
++
++		if (bfqq != NULL) {
++			bfq_init_bfqq(bfqd, bfqq, current->pid, is_sync);
++			bfq_init_prio_data(bfqq, bic);
++			bfq_init_entity(&bfqq->entity, bfqg);
++			bfq_log_bfqq(bfqd, bfqq, "allocated");
++		} else {
++			bfqq = &bfqd->oom_bfqq;
++			bfq_log_bfqq(bfqd, bfqq, "using oom bfqq");
++		}
++	}
++
++	if (new_bfqq != NULL)
++		kmem_cache_free(bfq_pool, new_bfqq);
++
++	return bfqq;
++}
++
++static struct bfq_queue **bfq_async_queue_prio(struct bfq_data *bfqd,
++					       struct bfq_group *bfqg,
++					       int ioprio_class, int ioprio)
++{
++	switch (ioprio_class) {
++	case IOPRIO_CLASS_RT:
++		return &bfqg->async_bfqq[0][ioprio];
++	case IOPRIO_CLASS_NONE:
++		ioprio = IOPRIO_NORM;
++		/* fall through */
++	case IOPRIO_CLASS_BE:
++		return &bfqg->async_bfqq[1][ioprio];
++	case IOPRIO_CLASS_IDLE:
++		return &bfqg->async_idle_bfqq;
++	default:
++		BUG();
++	}
++}
++
++static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd,
++				       struct bfq_group *bfqg, int is_sync,
++				       struct bfq_io_cq *bic, gfp_t gfp_mask)
++{
++	const int ioprio = IOPRIO_PRIO_DATA(bic->ioprio);
++	const int ioprio_class = IOPRIO_PRIO_CLASS(bic->ioprio);
++	struct bfq_queue **async_bfqq = NULL;
++	struct bfq_queue *bfqq = NULL;
++
++	if (!is_sync) {
++		async_bfqq = bfq_async_queue_prio(bfqd, bfqg, ioprio_class,
++						  ioprio);
++		bfqq = *async_bfqq;
++	}
++
++	if (bfqq == NULL)
++		bfqq = bfq_find_alloc_queue(bfqd, bfqg, is_sync, bic, gfp_mask);
++
++	/*
++	 * Pin the queue now that it's allocated, scheduler exit will
++	 * prune it.
++	 */
++	if (!is_sync && *async_bfqq == NULL) {
++		atomic_inc(&bfqq->ref);
++		bfq_log_bfqq(bfqd, bfqq, "get_queue, bfqq not in async: %p, %d",
++			     bfqq, atomic_read(&bfqq->ref));
++		*async_bfqq = bfqq;
++	}
++
++	atomic_inc(&bfqq->ref);
++	bfq_log_bfqq(bfqd, bfqq, "get_queue, at end: %p, %d", bfqq,
++		     atomic_read(&bfqq->ref));
++	return bfqq;
++}
++
++static void bfq_update_io_thinktime(struct bfq_data *bfqd,
++				    struct bfq_io_cq *bic)
++{
++	unsigned long elapsed = jiffies - bic->ttime.last_end_request;
++	unsigned long ttime = min(elapsed, 2UL * bfqd->bfq_slice_idle);
++
++	bic->ttime.ttime_samples = (7*bic->ttime.ttime_samples + 256) / 8;
++	bic->ttime.ttime_total = (7*bic->ttime.ttime_total + 256*ttime) / 8;
++	bic->ttime.ttime_mean = (bic->ttime.ttime_total + 128) /
++				bic->ttime.ttime_samples;
++}
++
++static void bfq_update_io_seektime(struct bfq_data *bfqd,
++				   struct bfq_queue *bfqq,
++				   struct request *rq)
++{
++	sector_t sdist;
++	u64 total;
++
++	if (bfqq->last_request_pos < blk_rq_pos(rq))
++		sdist = blk_rq_pos(rq) - bfqq->last_request_pos;
++	else
++		sdist = bfqq->last_request_pos - blk_rq_pos(rq);
++
++	/*
++	 * Don't allow the seek distance to get too large from the
++	 * odd fragment, pagein, etc.
++	 */
++	if (bfqq->seek_samples == 0) /* first request, not really a seek */
++		sdist = 0;
++	else if (bfqq->seek_samples <= 60) /* second & third seek */
++		sdist = min(sdist, (bfqq->seek_mean * 4) + 2*1024*1024);
++	else
++		sdist = min(sdist, (bfqq->seek_mean * 4) + 2*1024*64);
++
++	bfqq->seek_samples = (7*bfqq->seek_samples + 256) / 8;
++	bfqq->seek_total = (7*bfqq->seek_total + (u64)256*sdist) / 8;
++	total = bfqq->seek_total + (bfqq->seek_samples/2);
++	do_div(total, bfqq->seek_samples);
++	bfqq->seek_mean = (sector_t)total;
++
++	bfq_log_bfqq(bfqd, bfqq, "dist=%llu mean=%llu", (u64)sdist,
++			(u64)bfqq->seek_mean);
++}
++
++/*
++ * Disable idle window if the process thinks too long or seeks so much that
++ * it doesn't matter.
++ */
++static void bfq_update_idle_window(struct bfq_data *bfqd,
++				   struct bfq_queue *bfqq,
++				   struct bfq_io_cq *bic)
++{
++	int enable_idle;
++
++	/* Don't idle for async or idle io prio class. */
++	if (!bfq_bfqq_sync(bfqq) || bfq_class_idle(bfqq))
++		return;
++
++	enable_idle = bfq_bfqq_idle_window(bfqq);
++
++	if (atomic_read(&bic->icq.ioc->active_ref) == 0 ||
++	    bfqd->bfq_slice_idle == 0 ||
++		(bfqd->hw_tag && BFQQ_SEEKY(bfqq) &&
++			bfqq->wr_coeff == 1))
++		enable_idle = 0;
++	else if (bfq_sample_valid(bic->ttime.ttime_samples)) {
++		if (bic->ttime.ttime_mean > bfqd->bfq_slice_idle &&
++			bfqq->wr_coeff == 1)
++			enable_idle = 0;
++		else
++			enable_idle = 1;
++	}
++	bfq_log_bfqq(bfqd, bfqq, "update_idle_window: enable_idle %d",
++		enable_idle);
++
++	if (enable_idle)
++		bfq_mark_bfqq_idle_window(bfqq);
++	else
++		bfq_clear_bfqq_idle_window(bfqq);
++}
++
++/*
++ * Called when a new fs request (rq) is added to bfqq.  Check if there's
++ * something we should do about it.
++ */
++static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq,
++			    struct request *rq)
++{
++	struct bfq_io_cq *bic = RQ_BIC(rq);
++
++	if (rq->cmd_flags & REQ_META)
++		bfqq->meta_pending++;
++
++	bfq_update_io_thinktime(bfqd, bic);
++	bfq_update_io_seektime(bfqd, bfqq, rq);
++	if (!BFQQ_SEEKY(bfqq) && bfq_bfqq_constantly_seeky(bfqq)) {
++		bfq_clear_bfqq_constantly_seeky(bfqq);
++		if (!blk_queue_nonrot(bfqd->queue)) {
++			BUG_ON(!bfqd->const_seeky_busy_in_flight_queues);
++			bfqd->const_seeky_busy_in_flight_queues--;
++		}
++	}
++	if (bfqq->entity.service > bfq_max_budget(bfqd) / 8 ||
++	    !BFQQ_SEEKY(bfqq))
++		bfq_update_idle_window(bfqd, bfqq, bic);
++
++	bfq_log_bfqq(bfqd, bfqq,
++		     "rq_enqueued: idle_window=%d (seeky %d, mean %llu)",
++		     bfq_bfqq_idle_window(bfqq), BFQQ_SEEKY(bfqq),
++		     (long long unsigned)bfqq->seek_mean);
++
++	bfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
++
++	if (bfqq == bfqd->in_service_queue && bfq_bfqq_wait_request(bfqq)) {
++		int small_req = bfqq->queued[rq_is_sync(rq)] == 1 &&
++				blk_rq_sectors(rq) < 32;
++		int budget_timeout = bfq_bfqq_budget_timeout(bfqq);
++
++		/*
++		 * There is just this request queued: if the request
++		 * is small and the queue is not to be expired, then
++		 * just exit.
++		 *
++		 * In this way, if the disk is being idled to wait for
++		 * a new request from the in-service queue, we avoid
++		 * unplugging the device and committing the disk to serve
++		 * just a small request. On the contrary, we wait for
++		 * the block layer to decide when to unplug the device:
++		 * hopefully, new requests will be merged to this one
++		 * quickly, then the device will be unplugged and
++		 * larger requests will be dispatched.
++		 */
++		if (small_req && !budget_timeout)
++			return;
++
++		/*
++		 * A large enough request arrived, or the queue is to
++		 * be expired: in both cases disk idling is to be
++		 * stopped, so clear wait_request flag and reset
++		 * timer.
++		 */
++		bfq_clear_bfqq_wait_request(bfqq);
++		del_timer(&bfqd->idle_slice_timer);
++
++		/*
++		 * The queue is not empty, because a new request just
++		 * arrived. Hence we can safely expire the queue, in
++		 * case of budget timeout, without risking that the
++		 * timestamps of the queue are not updated correctly.
++		 * See [1] for more details.
++		 */
++		if (budget_timeout)
++			bfq_bfqq_expire(bfqd, bfqq, 0, BFQ_BFQQ_BUDGET_TIMEOUT);
++
++		/*
++		 * Let the request rip immediately, or let a new queue be
++		 * selected if bfqq has just been expired.
++		 */
++		__blk_run_queue(bfqd->queue);
++	}
++}
++
++static void bfq_insert_request(struct request_queue *q, struct request *rq)
++{
++	struct bfq_data *bfqd = q->elevator->elevator_data;
++	struct bfq_queue *bfqq = RQ_BFQQ(rq);
++
++	assert_spin_locked(bfqd->queue->queue_lock);
++	bfq_init_prio_data(bfqq, RQ_BIC(rq));
++
++	bfq_add_request(rq);
++
++	rq->fifo_time = jiffies + bfqd->bfq_fifo_expire[rq_is_sync(rq)];
++	list_add_tail(&rq->queuelist, &bfqq->fifo);
++
++	bfq_rq_enqueued(bfqd, bfqq, rq);
++}
++
++static void bfq_update_hw_tag(struct bfq_data *bfqd)
++{
++	bfqd->max_rq_in_driver = max(bfqd->max_rq_in_driver,
++				     bfqd->rq_in_driver);
++
++	if (bfqd->hw_tag == 1)
++		return;
++
++	/*
++	 * This sample is valid if the number of outstanding requests
++	 * is large enough to allow a queueing behavior.  Note that the
++	 * sum is not exact, as it's not taking into account deactivated
++	 * requests.
++	 */
++	if (bfqd->rq_in_driver + bfqd->queued < BFQ_HW_QUEUE_THRESHOLD)
++		return;
++
++	if (bfqd->hw_tag_samples++ < BFQ_HW_QUEUE_SAMPLES)
++		return;
++
++	bfqd->hw_tag = bfqd->max_rq_in_driver > BFQ_HW_QUEUE_THRESHOLD;
++	bfqd->max_rq_in_driver = 0;
++	bfqd->hw_tag_samples = 0;
++}
++
++static void bfq_completed_request(struct request_queue *q, struct request *rq)
++{
++	struct bfq_queue *bfqq = RQ_BFQQ(rq);
++	struct bfq_data *bfqd = bfqq->bfqd;
++	bool sync = bfq_bfqq_sync(bfqq);
++
++	bfq_log_bfqq(bfqd, bfqq, "completed one req with %u sects left (%d)",
++		     blk_rq_sectors(rq), sync);
++
++	bfq_update_hw_tag(bfqd);
++
++	BUG_ON(!bfqd->rq_in_driver);
++	BUG_ON(!bfqq->dispatched);
++	bfqd->rq_in_driver--;
++	bfqq->dispatched--;
++
++	if (!bfqq->dispatched && !bfq_bfqq_busy(bfqq)) {
++		bfq_weights_tree_remove(bfqd, &bfqq->entity,
++					&bfqd->queue_weights_tree);
++		if (!blk_queue_nonrot(bfqd->queue)) {
++			BUG_ON(!bfqd->busy_in_flight_queues);
++			bfqd->busy_in_flight_queues--;
++			if (bfq_bfqq_constantly_seeky(bfqq)) {
++				BUG_ON(!bfqd->
++					const_seeky_busy_in_flight_queues);
++				bfqd->const_seeky_busy_in_flight_queues--;
++			}
++		}
++	}
++
++	if (sync) {
++		bfqd->sync_flight--;
++		RQ_BIC(rq)->ttime.last_end_request = jiffies;
++	}
++
++	/*
++	 * If we are waiting to discover whether the request pattern of the
++	 * task associated with the queue is actually isochronous, and
++	 * both requisites for this condition to hold are satisfied, then
++	 * compute soft_rt_next_start (see the comments to the function
++	 * bfq_bfqq_softrt_next_start()).
++	 */
++	if (bfq_bfqq_softrt_update(bfqq) && bfqq->dispatched == 0 &&
++	    RB_EMPTY_ROOT(&bfqq->sort_list))
++		bfqq->soft_rt_next_start =
++			bfq_bfqq_softrt_next_start(bfqd, bfqq);
++
++	/*
++	 * If this is the in-service queue, check if it needs to be expired,
++	 * or if we want to idle in case it has no pending requests.
++	 */
++	if (bfqd->in_service_queue == bfqq) {
++		if (bfq_bfqq_budget_new(bfqq))
++			bfq_set_budget_timeout(bfqd);
++
++		if (bfq_bfqq_must_idle(bfqq)) {
++			bfq_arm_slice_timer(bfqd);
++			goto out;
++		} else if (bfq_may_expire_for_budg_timeout(bfqq))
++			bfq_bfqq_expire(bfqd, bfqq, 0, BFQ_BFQQ_BUDGET_TIMEOUT);
++		else if (RB_EMPTY_ROOT(&bfqq->sort_list) &&
++			 (bfqq->dispatched == 0 ||
++			  !bfq_bfqq_must_not_expire(bfqq)))
++			bfq_bfqq_expire(bfqd, bfqq, 0,
++					BFQ_BFQQ_NO_MORE_REQUESTS);
++	}
++
++	if (!bfqd->rq_in_driver)
++		bfq_schedule_dispatch(bfqd);
++
++out:
++	return;
++}
++
++static inline int __bfq_may_queue(struct bfq_queue *bfqq)
++{
++	if (bfq_bfqq_wait_request(bfqq) && bfq_bfqq_must_alloc(bfqq)) {
++		bfq_clear_bfqq_must_alloc(bfqq);
++		return ELV_MQUEUE_MUST;
++	}
++
++	return ELV_MQUEUE_MAY;
++}
++
++static int bfq_may_queue(struct request_queue *q, int rw)
++{
++	struct bfq_data *bfqd = q->elevator->elevator_data;
++	struct task_struct *tsk = current;
++	struct bfq_io_cq *bic;
++	struct bfq_queue *bfqq;
++
++	/*
++	 * Don't force setup of a queue from here, as a call to may_queue
++	 * does not necessarily imply that a request actually will be
++	 * queued. So just lookup a possibly existing queue, or return
++	 * 'may queue' if that fails.
++	 */
++	bic = bfq_bic_lookup(bfqd, tsk->io_context);
++	if (bic == NULL)
++		return ELV_MQUEUE_MAY;
++
++	bfqq = bic_to_bfqq(bic, rw_is_sync(rw));
++	if (bfqq != NULL) {
++		bfq_init_prio_data(bfqq, bic);
++
++		return __bfq_may_queue(bfqq);
++	}
++
++	return ELV_MQUEUE_MAY;
++}
++
++/*
++ * Queue lock held here.
++ */
++static void bfq_put_request(struct request *rq)
++{
++	struct bfq_queue *bfqq = RQ_BFQQ(rq);
++
++	if (bfqq != NULL) {
++		const int rw = rq_data_dir(rq);
++
++		BUG_ON(!bfqq->allocated[rw]);
++		bfqq->allocated[rw]--;
++
++		rq->elv.priv[0] = NULL;
++		rq->elv.priv[1] = NULL;
++
++		bfq_log_bfqq(bfqq->bfqd, bfqq, "put_request %p, %d",
++			     bfqq, atomic_read(&bfqq->ref));
++		bfq_put_queue(bfqq);
++	}
++}
++
++static struct bfq_queue *
++bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic,
++		struct bfq_queue *bfqq)
++{
++	bfq_log_bfqq(bfqd, bfqq, "merging with queue %lu",
++		(long unsigned)bfqq->new_bfqq->pid);
++	bic_set_bfqq(bic, bfqq->new_bfqq, 1);
++	bfq_mark_bfqq_coop(bfqq->new_bfqq);
++	bfq_put_queue(bfqq);
++	return bic_to_bfqq(bic, 1);
++}
++
++/*
++ * Returns NULL if a new bfqq should be allocated, or the old bfqq if this
++ * was the last process referring to said bfqq.
++ */
++static struct bfq_queue *
++bfq_split_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq)
++{
++	bfq_log_bfqq(bfqq->bfqd, bfqq, "splitting queue");
++	if (bfqq_process_refs(bfqq) == 1) {
++		bfqq->pid = current->pid;
++		bfq_clear_bfqq_coop(bfqq);
++		bfq_clear_bfqq_split_coop(bfqq);
++		return bfqq;
++	}
++
++	bic_set_bfqq(bic, NULL, 1);
++
++	bfq_put_cooperator(bfqq);
++
++	bfq_put_queue(bfqq);
++	return NULL;
++}
++
++/*
++ * Allocate bfq data structures associated with this request.
++ */
++static int bfq_set_request(struct request_queue *q, struct request *rq,
++			   struct bio *bio, gfp_t gfp_mask)
++{
++	struct bfq_data *bfqd = q->elevator->elevator_data;
++	struct bfq_io_cq *bic = icq_to_bic(rq->elv.icq);
++	const int rw = rq_data_dir(rq);
++	const int is_sync = rq_is_sync(rq);
++	struct bfq_queue *bfqq;
++	struct bfq_group *bfqg;
++	unsigned long flags;
++
++	might_sleep_if(gfp_mask & __GFP_WAIT);
++
++	bfq_changed_ioprio(bic);
++
++	spin_lock_irqsave(q->queue_lock, flags);
++
++	if (bic == NULL)
++		goto queue_fail;
++
++	bfqg = bfq_bic_update_cgroup(bic);
++
++new_queue:
++	bfqq = bic_to_bfqq(bic, is_sync);
++	if (bfqq == NULL || bfqq == &bfqd->oom_bfqq) {
++		bfqq = bfq_get_queue(bfqd, bfqg, is_sync, bic, gfp_mask);
++		bic_set_bfqq(bic, bfqq, is_sync);
++	} else {
++		/*
++		 * If the queue was seeky for too long, break it apart.
++		 */
++		if (bfq_bfqq_coop(bfqq) && bfq_bfqq_split_coop(bfqq)) {
++			bfq_log_bfqq(bfqd, bfqq, "breaking apart bfqq");
++			bfqq = bfq_split_bfqq(bic, bfqq);
++			if (!bfqq)
++				goto new_queue;
++		}
++
++		/*
++		 * Check to see if this queue is scheduled to merge with
++		 * another closely cooperating queue. The merging of queues
++		 * happens here as it must be done in process context.
++		 * The reference on new_bfqq was taken in merge_bfqqs.
++		 */
++		if (bfqq->new_bfqq != NULL)
++			bfqq = bfq_merge_bfqqs(bfqd, bic, bfqq);
++	}
++
++	bfqq->allocated[rw]++;
++	atomic_inc(&bfqq->ref);
++	bfq_log_bfqq(bfqd, bfqq, "set_request: bfqq %p, %d", bfqq,
++		     atomic_read(&bfqq->ref));
++
++	rq->elv.priv[0] = bic;
++	rq->elv.priv[1] = bfqq;
++
++	spin_unlock_irqrestore(q->queue_lock, flags);
++
++	return 0;
++
++queue_fail:
++	bfq_schedule_dispatch(bfqd);
++	spin_unlock_irqrestore(q->queue_lock, flags);
++
++	return 1;
++}
++
++static void bfq_kick_queue(struct work_struct *work)
++{
++	struct bfq_data *bfqd =
++		container_of(work, struct bfq_data, unplug_work);
++	struct request_queue *q = bfqd->queue;
++
++	spin_lock_irq(q->queue_lock);
++	__blk_run_queue(q);
++	spin_unlock_irq(q->queue_lock);
++}
++
++/*
++ * Handler of the expiration of the timer running if the in-service queue
++ * is idling inside its time slice.
++ */
++static void bfq_idle_slice_timer(unsigned long data)
++{
++	struct bfq_data *bfqd = (struct bfq_data *)data;
++	struct bfq_queue *bfqq;
++	unsigned long flags;
++	enum bfqq_expiration reason;
++
++	spin_lock_irqsave(bfqd->queue->queue_lock, flags);
++
++	bfqq = bfqd->in_service_queue;
++	/*
++	 * Theoretical race here: the in-service queue can be NULL or
++	 * different from the queue that was idling if the timer handler
++	 * spins on the queue_lock and a new request arrives for the
++	 * current queue and there is a full dispatch cycle that changes
++	 * the in-service queue.  This can hardly happen, but in the worst
++	 * case we just expire a queue too early.
++	 */
++	if (bfqq != NULL) {
++		bfq_log_bfqq(bfqd, bfqq, "slice_timer expired");
++		if (bfq_bfqq_budget_timeout(bfqq))
++			/*
++			 * Also here the queue can be safely expired
++			 * for budget timeout without wasting
++			 * guarantees
++			 */
++			reason = BFQ_BFQQ_BUDGET_TIMEOUT;
++		else if (bfqq->queued[0] == 0 && bfqq->queued[1] == 0)
++			/*
++			 * The queue may not be empty upon timer expiration,
++			 * because we may not disable the timer when the
++			 * first request of the in-service queue arrives
++			 * during disk idling.
++			 */
++			reason = BFQ_BFQQ_TOO_IDLE;
++		else
++			goto schedule_dispatch;
++
++		bfq_bfqq_expire(bfqd, bfqq, 1, reason);
++	}
++
++schedule_dispatch:
++	bfq_schedule_dispatch(bfqd);
++
++	spin_unlock_irqrestore(bfqd->queue->queue_lock, flags);
++}
++
++static void bfq_shutdown_timer_wq(struct bfq_data *bfqd)
++{
++	del_timer_sync(&bfqd->idle_slice_timer);
++	cancel_work_sync(&bfqd->unplug_work);
++}
++
++static inline void __bfq_put_async_bfqq(struct bfq_data *bfqd,
++					struct bfq_queue **bfqq_ptr)
++{
++	struct bfq_group *root_group = bfqd->root_group;
++	struct bfq_queue *bfqq = *bfqq_ptr;
++
++	bfq_log(bfqd, "put_async_bfqq: %p", bfqq);
++	if (bfqq != NULL) {
++		bfq_bfqq_move(bfqd, bfqq, &bfqq->entity, root_group);
++		bfq_log_bfqq(bfqd, bfqq, "put_async_bfqq: putting %p, %d",
++			     bfqq, atomic_read(&bfqq->ref));
++		bfq_put_queue(bfqq);
++		*bfqq_ptr = NULL;
++	}
++}
++
++/*
++ * Release all the bfqg references to its async queues.  If we are
++ * deallocating the group these queues may still contain requests, so
++ * we reparent them to the root cgroup (i.e., the only one that will
++ * exist for sure until all the requests on a device are gone).
++ */
++static void bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg)
++{
++	int i, j;
++
++	for (i = 0; i < 2; i++)
++		for (j = 0; j < IOPRIO_BE_NR; j++)
++			__bfq_put_async_bfqq(bfqd, &bfqg->async_bfqq[i][j]);
++
++	__bfq_put_async_bfqq(bfqd, &bfqg->async_idle_bfqq);
++}
++
++static void bfq_exit_queue(struct elevator_queue *e)
++{
++	struct bfq_data *bfqd = e->elevator_data;
++	struct request_queue *q = bfqd->queue;
++	struct bfq_queue *bfqq, *n;
++
++	bfq_shutdown_timer_wq(bfqd);
++
++	spin_lock_irq(q->queue_lock);
++
++	BUG_ON(bfqd->in_service_queue != NULL);
++	list_for_each_entry_safe(bfqq, n, &bfqd->idle_list, bfqq_list)
++		bfq_deactivate_bfqq(bfqd, bfqq, 0);
++
++	bfq_disconnect_groups(bfqd);
++	spin_unlock_irq(q->queue_lock);
++
++	bfq_shutdown_timer_wq(bfqd);
++
++	synchronize_rcu();
++
++	BUG_ON(timer_pending(&bfqd->idle_slice_timer));
++
++	bfq_free_root_group(bfqd);
++	kfree(bfqd);
++}
++
++static int bfq_init_queue(struct request_queue *q, struct elevator_type *e)
++{
++	struct bfq_group *bfqg;
++	struct bfq_data *bfqd;
++	struct elevator_queue *eq;
++
++	eq = elevator_alloc(q, e);
++	if (eq == NULL)
++		return -ENOMEM;
++
++	bfqd = kzalloc_node(sizeof(*bfqd), GFP_KERNEL, q->node);
++	if (bfqd == NULL) {
++		kobject_put(&eq->kobj);
++		return -ENOMEM;
++	}
++	eq->elevator_data = bfqd;
++
++	/*
++	 * Our fallback bfqq if bfq_find_alloc_queue() runs into OOM issues.
++	 * Grab a permanent reference to it, so that the normal code flow
++	 * will not attempt to free it.
++	 */
++	bfq_init_bfqq(bfqd, &bfqd->oom_bfqq, 1, 0);
++	atomic_inc(&bfqd->oom_bfqq.ref);
++	bfqd->oom_bfqq.entity.new_ioprio = BFQ_DEFAULT_QUEUE_IOPRIO;
++	bfqd->oom_bfqq.entity.new_ioprio_class = IOPRIO_CLASS_BE;
++	/*
++	 * Trigger weight initialization, according to ioprio, at the
++	 * oom_bfqq's first activation. The oom_bfqq's ioprio and ioprio
++	 * class won't be changed any more.
++	 */
++	bfqd->oom_bfqq.entity.ioprio_changed = 1;
++
++	bfqd->queue = q;
++
++	spin_lock_irq(q->queue_lock);
++	q->elevator = eq;
++	spin_unlock_irq(q->queue_lock);
++
++	bfqg = bfq_alloc_root_group(bfqd, q->node);
++	if (bfqg == NULL) {
++		kfree(bfqd);
++		kobject_put(&eq->kobj);
++		return -ENOMEM;
++	}
++
++	bfqd->root_group = bfqg;
++	bfq_init_entity(&bfqd->oom_bfqq.entity, bfqd->root_group);
++#ifdef CONFIG_CGROUP_BFQIO
++	bfqd->active_numerous_groups = 0;
++#endif
++
++	init_timer(&bfqd->idle_slice_timer);
++	bfqd->idle_slice_timer.function = bfq_idle_slice_timer;
++	bfqd->idle_slice_timer.data = (unsigned long)bfqd;
++
++	bfqd->rq_pos_tree = RB_ROOT;
++	bfqd->queue_weights_tree = RB_ROOT;
++	bfqd->group_weights_tree = RB_ROOT;
++
++	INIT_WORK(&bfqd->unplug_work, bfq_kick_queue);
++
++	INIT_LIST_HEAD(&bfqd->active_list);
++	INIT_LIST_HEAD(&bfqd->idle_list);
++	INIT_HLIST_HEAD(&bfqd->burst_list);
++
++	bfqd->hw_tag = -1;
++
++	bfqd->bfq_max_budget = bfq_default_max_budget;
++
++	bfqd->bfq_quantum = bfq_quantum;
++	bfqd->bfq_fifo_expire[0] = bfq_fifo_expire[0];
++	bfqd->bfq_fifo_expire[1] = bfq_fifo_expire[1];
++	bfqd->bfq_back_max = bfq_back_max;
++	bfqd->bfq_back_penalty = bfq_back_penalty;
++	bfqd->bfq_slice_idle = bfq_slice_idle;
++	bfqd->bfq_class_idle_last_service = 0;
++	bfqd->bfq_max_budget_async_rq = bfq_max_budget_async_rq;
++	bfqd->bfq_timeout[BLK_RW_ASYNC] = bfq_timeout_async;
++	bfqd->bfq_timeout[BLK_RW_SYNC] = bfq_timeout_sync;
++
++	bfqd->bfq_coop_thresh = 2;
++	bfqd->bfq_failed_cooperations = 7000;
++	bfqd->bfq_requests_within_timer = 120;
++
++	bfqd->bfq_large_burst_thresh = 11;
++	bfqd->bfq_burst_interval = msecs_to_jiffies(500);
++
++	bfqd->low_latency = true;
++
++	bfqd->bfq_wr_coeff = 20;
++	bfqd->bfq_wr_rt_max_time = msecs_to_jiffies(300);
++	bfqd->bfq_wr_max_time = 0;
++	bfqd->bfq_wr_min_idle_time = msecs_to_jiffies(2000);
++	bfqd->bfq_wr_min_inter_arr_async = msecs_to_jiffies(500);
++	bfqd->bfq_wr_max_softrt_rate = 7000; /*
++					      * Approximate rate required
++					      * to playback or record a
++					      * high-definition compressed
++					      * video.
++					      */
++	bfqd->wr_busy_queues = 0;
++	bfqd->busy_in_flight_queues = 0;
++	bfqd->const_seeky_busy_in_flight_queues = 0;
++
++	/*
++	 * Begin by assuming, optimistically, that the device peak rate is
++	 * equal to the highest reference rate.
++	 */
++	bfqd->RT_prod = R_fast[blk_queue_nonrot(bfqd->queue)] *
++			T_fast[blk_queue_nonrot(bfqd->queue)];
++	bfqd->peak_rate = R_fast[blk_queue_nonrot(bfqd->queue)];
++	bfqd->device_speed = BFQ_BFQD_FAST;
++
++	return 0;
++}
++
++static void bfq_slab_kill(void)
++{
++	if (bfq_pool != NULL)
++		kmem_cache_destroy(bfq_pool);
++}
++
++static int __init bfq_slab_setup(void)
++{
++	bfq_pool = KMEM_CACHE(bfq_queue, 0);
++	if (bfq_pool == NULL)
++		return -ENOMEM;
++	return 0;
++}
++
++static ssize_t bfq_var_show(unsigned int var, char *page)
++{
++	return sprintf(page, "%d\n", var);
++}
++
++static ssize_t bfq_var_store(unsigned long *var, const char *page,
++			     size_t count)
++{
++	unsigned long new_val;
++	int ret = kstrtoul(page, 10, &new_val);
++
++	if (ret == 0)
++		*var = new_val;
++
++	return count;
++}
++
++static ssize_t bfq_wr_max_time_show(struct elevator_queue *e, char *page)
++{
++	struct bfq_data *bfqd = e->elevator_data;
++	return sprintf(page, "%d\n", bfqd->bfq_wr_max_time > 0 ?
++		       jiffies_to_msecs(bfqd->bfq_wr_max_time) :
++		       jiffies_to_msecs(bfq_wr_duration(bfqd)));
++}
++
++static ssize_t bfq_weights_show(struct elevator_queue *e, char *page)
++{
++	struct bfq_queue *bfqq;
++	struct bfq_data *bfqd = e->elevator_data;
++	ssize_t num_char = 0;
++
++	num_char += sprintf(page + num_char, "Tot reqs queued %d\n\n",
++			    bfqd->queued);
++
++	spin_lock_irq(bfqd->queue->queue_lock);
++
++	num_char += sprintf(page + num_char, "Active:\n");
++	list_for_each_entry(bfqq, &bfqd->active_list, bfqq_list) {
++	  num_char += sprintf(page + num_char,
++			      "pid%d: weight %hu, nr_queued %d %d, dur %d/%u\n",
++			      bfqq->pid,
++			      bfqq->entity.weight,
++			      bfqq->queued[0],
++			      bfqq->queued[1],
++			jiffies_to_msecs(jiffies - bfqq->last_wr_start_finish),
++			jiffies_to_msecs(bfqq->wr_cur_max_time));
++	}
++
++	num_char += sprintf(page + num_char, "Idle:\n");
++	list_for_each_entry(bfqq, &bfqd->idle_list, bfqq_list) {
++			num_char += sprintf(page + num_char,
++				"pid%d: weight %hu, dur %d/%u\n",
++				bfqq->pid,
++				bfqq->entity.weight,
++				jiffies_to_msecs(jiffies -
++					bfqq->last_wr_start_finish),
++				jiffies_to_msecs(bfqq->wr_cur_max_time));
++	}
++
++	spin_unlock_irq(bfqd->queue->queue_lock);
++
++	return num_char;
++}
++
++#define SHOW_FUNCTION(__FUNC, __VAR, __CONV)				\
++static ssize_t __FUNC(struct elevator_queue *e, char *page)		\
++{									\
++	struct bfq_data *bfqd = e->elevator_data;			\
++	unsigned int __data = __VAR;					\
++	if (__CONV)							\
++		__data = jiffies_to_msecs(__data);			\
++	return bfq_var_show(__data, (page));				\
++}
++SHOW_FUNCTION(bfq_quantum_show, bfqd->bfq_quantum, 0);
++SHOW_FUNCTION(bfq_fifo_expire_sync_show, bfqd->bfq_fifo_expire[1], 1);
++SHOW_FUNCTION(bfq_fifo_expire_async_show, bfqd->bfq_fifo_expire[0], 1);
++SHOW_FUNCTION(bfq_back_seek_max_show, bfqd->bfq_back_max, 0);
++SHOW_FUNCTION(bfq_back_seek_penalty_show, bfqd->bfq_back_penalty, 0);
++SHOW_FUNCTION(bfq_slice_idle_show, bfqd->bfq_slice_idle, 1);
++SHOW_FUNCTION(bfq_max_budget_show, bfqd->bfq_user_max_budget, 0);
++SHOW_FUNCTION(bfq_max_budget_async_rq_show,
++	      bfqd->bfq_max_budget_async_rq, 0);
++SHOW_FUNCTION(bfq_timeout_sync_show, bfqd->bfq_timeout[BLK_RW_SYNC], 1);
++SHOW_FUNCTION(bfq_timeout_async_show, bfqd->bfq_timeout[BLK_RW_ASYNC], 1);
++SHOW_FUNCTION(bfq_low_latency_show, bfqd->low_latency, 0);
++SHOW_FUNCTION(bfq_wr_coeff_show, bfqd->bfq_wr_coeff, 0);
++SHOW_FUNCTION(bfq_wr_rt_max_time_show, bfqd->bfq_wr_rt_max_time, 1);
++SHOW_FUNCTION(bfq_wr_min_idle_time_show, bfqd->bfq_wr_min_idle_time, 1);
++SHOW_FUNCTION(bfq_wr_min_inter_arr_async_show, bfqd->bfq_wr_min_inter_arr_async,
++	1);
++SHOW_FUNCTION(bfq_wr_max_softrt_rate_show, bfqd->bfq_wr_max_softrt_rate, 0);
++#undef SHOW_FUNCTION
++
++#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV)			\
++static ssize_t								\
++__FUNC(struct elevator_queue *e, const char *page, size_t count)	\
++{									\
++	struct bfq_data *bfqd = e->elevator_data;			\
++	unsigned long uninitialized_var(__data);			\
++	int ret = bfq_var_store(&__data, (page), count);		\
++	if (__data < (MIN))						\
++		__data = (MIN);						\
++	else if (__data > (MAX))					\
++		__data = (MAX);						\
++	if (__CONV)							\
++		*(__PTR) = msecs_to_jiffies(__data);			\
++	else								\
++		*(__PTR) = __data;					\
++	return ret;							\
++}
++STORE_FUNCTION(bfq_quantum_store, &bfqd->bfq_quantum, 1, INT_MAX, 0);
++STORE_FUNCTION(bfq_fifo_expire_sync_store, &bfqd->bfq_fifo_expire[1], 1,
++		INT_MAX, 1);
++STORE_FUNCTION(bfq_fifo_expire_async_store, &bfqd->bfq_fifo_expire[0], 1,
++		INT_MAX, 1);
++STORE_FUNCTION(bfq_back_seek_max_store, &bfqd->bfq_back_max, 0, INT_MAX, 0);
++STORE_FUNCTION(bfq_back_seek_penalty_store, &bfqd->bfq_back_penalty, 1,
++		INT_MAX, 0);
++STORE_FUNCTION(bfq_slice_idle_store, &bfqd->bfq_slice_idle, 0, INT_MAX, 1);
++STORE_FUNCTION(bfq_max_budget_async_rq_store, &bfqd->bfq_max_budget_async_rq,
++		1, INT_MAX, 0);
++STORE_FUNCTION(bfq_timeout_async_store, &bfqd->bfq_timeout[BLK_RW_ASYNC], 0,
++		INT_MAX, 1);
++STORE_FUNCTION(bfq_wr_coeff_store, &bfqd->bfq_wr_coeff, 1, INT_MAX, 0);
++STORE_FUNCTION(bfq_wr_max_time_store, &bfqd->bfq_wr_max_time, 0, INT_MAX, 1);
++STORE_FUNCTION(bfq_wr_rt_max_time_store, &bfqd->bfq_wr_rt_max_time, 0, INT_MAX,
++		1);
++STORE_FUNCTION(bfq_wr_min_idle_time_store, &bfqd->bfq_wr_min_idle_time, 0,
++		INT_MAX, 1);
++STORE_FUNCTION(bfq_wr_min_inter_arr_async_store,
++		&bfqd->bfq_wr_min_inter_arr_async, 0, INT_MAX, 1);
++STORE_FUNCTION(bfq_wr_max_softrt_rate_store, &bfqd->bfq_wr_max_softrt_rate, 0,
++		INT_MAX, 0);
++#undef STORE_FUNCTION
++
++/* do nothing for the moment */
++static ssize_t bfq_weights_store(struct elevator_queue *e,
++				    const char *page, size_t count)
++{
++	return count;
++}
++
++static inline unsigned long bfq_estimated_max_budget(struct bfq_data *bfqd)
++{
++	u64 timeout = jiffies_to_msecs(bfqd->bfq_timeout[BLK_RW_SYNC]);
++
++	if (bfqd->peak_rate_samples >= BFQ_PEAK_RATE_SAMPLES)
++		return bfq_calc_max_budget(bfqd->peak_rate, timeout);
++	else
++		return bfq_default_max_budget;
++}
++
++static ssize_t bfq_max_budget_store(struct elevator_queue *e,
++				    const char *page, size_t count)
++{
++	struct bfq_data *bfqd = e->elevator_data;
++	unsigned long uninitialized_var(__data);
++	int ret = bfq_var_store(&__data, (page), count);
++
++	if (__data == 0)
++		bfqd->bfq_max_budget = bfq_estimated_max_budget(bfqd);
++	else {
++		if (__data > INT_MAX)
++			__data = INT_MAX;
++		bfqd->bfq_max_budget = __data;
++	}
++
++	bfqd->bfq_user_max_budget = __data;
++
++	return ret;
++}
++
++static ssize_t bfq_timeout_sync_store(struct elevator_queue *e,
++				      const char *page, size_t count)
++{
++	struct bfq_data *bfqd = e->elevator_data;
++	unsigned long uninitialized_var(__data);
++	int ret = bfq_var_store(&__data, (page), count);
++
++	if (__data < 1)
++		__data = 1;
++	else if (__data > INT_MAX)
++		__data = INT_MAX;
++
++	bfqd->bfq_timeout[BLK_RW_SYNC] = msecs_to_jiffies(__data);
++	if (bfqd->bfq_user_max_budget == 0)
++		bfqd->bfq_max_budget = bfq_estimated_max_budget(bfqd);
++
++	return ret;
++}
++
++static ssize_t bfq_low_latency_store(struct elevator_queue *e,
++				     const char *page, size_t count)
++{
++	struct bfq_data *bfqd = e->elevator_data;
++	unsigned long uninitialized_var(__data);
++	int ret = bfq_var_store(&__data, (page), count);
++
++	if (__data > 1)
++		__data = 1;
++	if (__data == 0 && bfqd->low_latency != 0)
++		bfq_end_wr(bfqd);
++	bfqd->low_latency = __data;
++
++	return ret;
++}
++
++#define BFQ_ATTR(name) \
++	__ATTR(name, S_IRUGO|S_IWUSR, bfq_##name##_show, bfq_##name##_store)
++
++static struct elv_fs_entry bfq_attrs[] = {
++	BFQ_ATTR(quantum),
++	BFQ_ATTR(fifo_expire_sync),
++	BFQ_ATTR(fifo_expire_async),
++	BFQ_ATTR(back_seek_max),
++	BFQ_ATTR(back_seek_penalty),
++	BFQ_ATTR(slice_idle),
++	BFQ_ATTR(max_budget),
++	BFQ_ATTR(max_budget_async_rq),
++	BFQ_ATTR(timeout_sync),
++	BFQ_ATTR(timeout_async),
++	BFQ_ATTR(low_latency),
++	BFQ_ATTR(wr_coeff),
++	BFQ_ATTR(wr_max_time),
++	BFQ_ATTR(wr_rt_max_time),
++	BFQ_ATTR(wr_min_idle_time),
++	BFQ_ATTR(wr_min_inter_arr_async),
++	BFQ_ATTR(wr_max_softrt_rate),
++	BFQ_ATTR(weights),
++	__ATTR_NULL
++};
++
++static struct elevator_type iosched_bfq = {
++	.ops = {
++		.elevator_merge_fn =		bfq_merge,
++		.elevator_merged_fn =		bfq_merged_request,
++		.elevator_merge_req_fn =	bfq_merged_requests,
++		.elevator_allow_merge_fn =	bfq_allow_merge,
++		.elevator_dispatch_fn =		bfq_dispatch_requests,
++		.elevator_add_req_fn =		bfq_insert_request,
++		.elevator_activate_req_fn =	bfq_activate_request,
++		.elevator_deactivate_req_fn =	bfq_deactivate_request,
++		.elevator_completed_req_fn =	bfq_completed_request,
++		.elevator_former_req_fn =	elv_rb_former_request,
++		.elevator_latter_req_fn =	elv_rb_latter_request,
++		.elevator_init_icq_fn =		bfq_init_icq,
++		.elevator_exit_icq_fn =		bfq_exit_icq,
++		.elevator_set_req_fn =		bfq_set_request,
++		.elevator_put_req_fn =		bfq_put_request,
++		.elevator_may_queue_fn =	bfq_may_queue,
++		.elevator_init_fn =		bfq_init_queue,
++		.elevator_exit_fn =		bfq_exit_queue,
++	},
++	.icq_size =		sizeof(struct bfq_io_cq),
++	.icq_align =		__alignof__(struct bfq_io_cq),
++	.elevator_attrs =	bfq_attrs,
++	.elevator_name =	"bfq",
++	.elevator_owner =	THIS_MODULE,
++};
++
++static int __init bfq_init(void)
++{
++	/*
++	 * Can be 0 on HZ < 1000 setups.
++	 */
++	if (bfq_slice_idle == 0)
++		bfq_slice_idle = 1;
++
++	if (bfq_timeout_async == 0)
++		bfq_timeout_async = 1;
++
++	if (bfq_slab_setup())
++		return -ENOMEM;
++
++	/*
++	 * Times to load large popular applications for the typical systems
++	 * installed on the reference devices (see the comments before the
++	 * definitions of the two arrays).
++	 */
++	T_slow[0] = msecs_to_jiffies(2600);
++	T_slow[1] = msecs_to_jiffies(1000);
++	T_fast[0] = msecs_to_jiffies(5500);
++	T_fast[1] = msecs_to_jiffies(2000);
++
++	/*
++	 * Thresholds that determine the switch between speed classes (see
++	 * the comments before the definition of the array).
++	 */
++	device_speed_thresh[0] = (R_fast[0] + R_slow[0]) / 2;
++	device_speed_thresh[1] = (R_fast[1] + R_slow[1]) / 2;
++
++	elv_register(&iosched_bfq);
++	pr_info("BFQ I/O-scheduler version: v7r7");
++
++	return 0;
++}
++
++static void __exit bfq_exit(void)
++{
++	elv_unregister(&iosched_bfq);
++	bfq_slab_kill();
++}
++
++module_init(bfq_init);
++module_exit(bfq_exit);
++
++MODULE_AUTHOR("Fabio Checconi, Paolo Valente");
++MODULE_LICENSE("GPL");
+diff --git a/block/bfq-sched.c b/block/bfq-sched.c
+new file mode 100644
+index 0000000..2931563
+--- /dev/null
++++ b/block/bfq-sched.c
+@@ -0,0 +1,1214 @@
++/*
++ * BFQ: Hierarchical B-WF2Q+ scheduler.
++ *
++ * Based on ideas and code from CFQ:
++ * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
++ *
++ * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
++ *		      Paolo Valente <paolo.valente@unimore.it>
++ *
++ * Copyright (C) 2010 Paolo Valente <paolo.valente@unimore.it>
++ */
++
++#ifdef CONFIG_CGROUP_BFQIO
++#define for_each_entity(entity)	\
++	for (; entity != NULL; entity = entity->parent)
++
++#define for_each_entity_safe(entity, parent) \
++	for (; entity && ({ parent = entity->parent; 1; }); entity = parent)
++
++static struct bfq_entity *bfq_lookup_next_entity(struct bfq_sched_data *sd,
++						 int extract,
++						 struct bfq_data *bfqd);
++
++static inline void bfq_update_budget(struct bfq_entity *next_in_service)
++{
++	struct bfq_entity *bfqg_entity;
++	struct bfq_group *bfqg;
++	struct bfq_sched_data *group_sd;
++
++	BUG_ON(next_in_service == NULL);
++
++	group_sd = next_in_service->sched_data;
++
++	bfqg = container_of(group_sd, struct bfq_group, sched_data);
++	/*
++	 * bfq_group's my_entity field is not NULL only if the group
++	 * is not the root group. We must not touch the root entity
++	 * as it must never become an in-service entity.
++	 */
++	bfqg_entity = bfqg->my_entity;
++	if (bfqg_entity != NULL)
++		bfqg_entity->budget = next_in_service->budget;
++}
++
++static int bfq_update_next_in_service(struct bfq_sched_data *sd)
++{
++	struct bfq_entity *next_in_service;
++
++	if (sd->in_service_entity != NULL)
++		/* will update/requeue at the end of service */
++		return 0;
++
++	/*
++	 * NOTE: this can be improved in many ways, such as returning
++	 * 1 (and thus propagating upwards the update) only when the
++	 * budget changes, or caching the bfqq that will be scheduled
++	 * next from this subtree.  By now we worry more about
++	 * correctness than about performance...
++	 */
++	next_in_service = bfq_lookup_next_entity(sd, 0, NULL);
++	sd->next_in_service = next_in_service;
++
++	if (next_in_service != NULL)
++		bfq_update_budget(next_in_service);
++
++	return 1;
++}
++
++static inline void bfq_check_next_in_service(struct bfq_sched_data *sd,
++					     struct bfq_entity *entity)
++{
++	BUG_ON(sd->next_in_service != entity);
++}
++#else
++#define for_each_entity(entity)	\
++	for (; entity != NULL; entity = NULL)
++
++#define for_each_entity_safe(entity, parent) \
++	for (parent = NULL; entity != NULL; entity = parent)
++
++static inline int bfq_update_next_in_service(struct bfq_sched_data *sd)
++{
++	return 0;
++}
++
++static inline void bfq_check_next_in_service(struct bfq_sched_data *sd,
++					     struct bfq_entity *entity)
++{
++}
++
++static inline void bfq_update_budget(struct bfq_entity *next_in_service)
++{
++}
++#endif
++
++/*
++ * Shift for timestamp calculations.  This actually limits the maximum
++ * service allowed in one timestamp delta (small shift values increase it),
++ * the maximum total weight that can be used for the queues in the system
++ * (big shift values increase it), and the period of virtual time
++ * wraparounds.
++ */
++#define WFQ_SERVICE_SHIFT	22
++
++/**
++ * bfq_gt - compare two timestamps.
++ * @a: first ts.
++ * @b: second ts.
++ *
++ * Return @a > @b, dealing with wrapping correctly.
++ */
++static inline int bfq_gt(u64 a, u64 b)
++{
++	return (s64)(a - b) > 0;
++}
++
++static inline struct bfq_queue *bfq_entity_to_bfqq(struct bfq_entity *entity)
++{
++	struct bfq_queue *bfqq = NULL;
++
++	BUG_ON(entity == NULL);
++
++	if (entity->my_sched_data == NULL)
++		bfqq = container_of(entity, struct bfq_queue, entity);
++
++	return bfqq;
++}
++
++
++/**
++ * bfq_delta - map service into the virtual time domain.
++ * @service: amount of service.
++ * @weight: scale factor (weight of an entity or weight sum).
++ */
++static inline u64 bfq_delta(unsigned long service,
++					unsigned long weight)
++{
++	u64 d = (u64)service << WFQ_SERVICE_SHIFT;
++
++	do_div(d, weight);
++	return d;
++}
++
++/**
++ * bfq_calc_finish - assign the finish time to an entity.
++ * @entity: the entity to act upon.
++ * @service: the service to be charged to the entity.
++ */
++static inline void bfq_calc_finish(struct bfq_entity *entity,
++				   unsigned long service)
++{
++	struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
++
++	BUG_ON(entity->weight == 0);
++
++	entity->finish = entity->start +
++		bfq_delta(service, entity->weight);
++
++	if (bfqq != NULL) {
++		bfq_log_bfqq(bfqq->bfqd, bfqq,
++			"calc_finish: serv %lu, w %d",
++			service, entity->weight);
++		bfq_log_bfqq(bfqq->bfqd, bfqq,
++			"calc_finish: start %llu, finish %llu, delta %llu",
++			entity->start, entity->finish,
++			bfq_delta(service, entity->weight));
++	}
++}
++
++/**
++ * bfq_entity_of - get an entity from a node.
++ * @node: the node field of the entity.
++ *
++ * Convert a node pointer to the relative entity.  This is used only
++ * to simplify the logic of some functions and not as the generic
++ * conversion mechanism because, e.g., in the tree walking functions,
++ * the check for a %NULL value would be redundant.
++ */
++static inline struct bfq_entity *bfq_entity_of(struct rb_node *node)
++{
++	struct bfq_entity *entity = NULL;
++
++	if (node != NULL)
++		entity = rb_entry(node, struct bfq_entity, rb_node);
++
++	return entity;
++}
++
++/**
++ * bfq_extract - remove an entity from a tree.
++ * @root: the tree root.
++ * @entity: the entity to remove.
++ */
++static inline void bfq_extract(struct rb_root *root,
++			       struct bfq_entity *entity)
++{
++	BUG_ON(entity->tree != root);
++
++	entity->tree = NULL;
++	rb_erase(&entity->rb_node, root);
++}
++
++/**
++ * bfq_idle_extract - extract an entity from the idle tree.
++ * @st: the service tree of the owning @entity.
++ * @entity: the entity being removed.
++ */
++static void bfq_idle_extract(struct bfq_service_tree *st,
++			     struct bfq_entity *entity)
++{
++	struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
++	struct rb_node *next;
++
++	BUG_ON(entity->tree != &st->idle);
++
++	if (entity == st->first_idle) {
++		next = rb_next(&entity->rb_node);
++		st->first_idle = bfq_entity_of(next);
++	}
++
++	if (entity == st->last_idle) {
++		next = rb_prev(&entity->rb_node);
++		st->last_idle = bfq_entity_of(next);
++	}
++
++	bfq_extract(&st->idle, entity);
++
++	if (bfqq != NULL)
++		list_del(&bfqq->bfqq_list);
++}
++
++/**
++ * bfq_insert - generic tree insertion.
++ * @root: tree root.
++ * @entity: entity to insert.
++ *
++ * This is used for the idle and the active tree, since they are both
++ * ordered by finish time.
++ */
++static void bfq_insert(struct rb_root *root, struct bfq_entity *entity)
++{
++	struct bfq_entity *entry;
++	struct rb_node **node = &root->rb_node;
++	struct rb_node *parent = NULL;
++
++	BUG_ON(entity->tree != NULL);
++
++	while (*node != NULL) {
++		parent = *node;
++		entry = rb_entry(parent, struct bfq_entity, rb_node);
++
++		if (bfq_gt(entry->finish, entity->finish))
++			node = &parent->rb_left;
++		else
++			node = &parent->rb_right;
++	}
++
++	rb_link_node(&entity->rb_node, parent, node);
++	rb_insert_color(&entity->rb_node, root);
++
++	entity->tree = root;
++}
++
++/**
++ * bfq_update_min - update the min_start field of a entity.
++ * @entity: the entity to update.
++ * @node: one of its children.
++ *
++ * This function is called when @entity may store an invalid value for
++ * min_start due to updates to the active tree.  The function  assumes
++ * that the subtree rooted at @node (which may be its left or its right
++ * child) has a valid min_start value.
++ */
++static inline void bfq_update_min(struct bfq_entity *entity,
++				  struct rb_node *node)
++{
++	struct bfq_entity *child;
++
++	if (node != NULL) {
++		child = rb_entry(node, struct bfq_entity, rb_node);
++		if (bfq_gt(entity->min_start, child->min_start))
++			entity->min_start = child->min_start;
++	}
++}
++
++/**
++ * bfq_update_active_node - recalculate min_start.
++ * @node: the node to update.
++ *
++ * @node may have changed position or one of its children may have moved,
++ * this function updates its min_start value.  The left and right subtrees
++ * are assumed to hold a correct min_start value.
++ */
++static inline void bfq_update_active_node(struct rb_node *node)
++{
++	struct bfq_entity *entity = rb_entry(node, struct bfq_entity, rb_node);
++
++	entity->min_start = entity->start;
++	bfq_update_min(entity, node->rb_right);
++	bfq_update_min(entity, node->rb_left);
++}
++
++/**
++ * bfq_update_active_tree - update min_start for the whole active tree.
++ * @node: the starting node.
++ *
++ * @node must be the deepest modified node after an update.  This function
++ * updates its min_start using the values held by its children, assuming
++ * that they did not change, and then updates all the nodes that may have
++ * changed in the path to the root.  The only nodes that may have changed
++ * are the ones in the path or their siblings.
++ */
++static void bfq_update_active_tree(struct rb_node *node)
++{
++	struct rb_node *parent;
++
++up:
++	bfq_update_active_node(node);
++
++	parent = rb_parent(node);
++	if (parent == NULL)
++		return;
++
++	if (node == parent->rb_left && parent->rb_right != NULL)
++		bfq_update_active_node(parent->rb_right);
++	else if (parent->rb_left != NULL)
++		bfq_update_active_node(parent->rb_left);
++
++	node = parent;
++	goto up;
++}
++
++static void bfq_weights_tree_add(struct bfq_data *bfqd,
++				 struct bfq_entity *entity,
++				 struct rb_root *root);
++
++static void bfq_weights_tree_remove(struct bfq_data *bfqd,
++				    struct bfq_entity *entity,
++				    struct rb_root *root);
++
++
++/**
++ * bfq_active_insert - insert an entity in the active tree of its
++ *                     group/device.
++ * @st: the service tree of the entity.
++ * @entity: the entity being inserted.
++ *
++ * The active tree is ordered by finish time, but an extra key is kept
++ * per each node, containing the minimum value for the start times of
++ * its children (and the node itself), so it's possible to search for
++ * the eligible node with the lowest finish time in logarithmic time.
++ */
++static void bfq_active_insert(struct bfq_service_tree *st,
++			      struct bfq_entity *entity)
++{
++	struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
++	struct rb_node *node = &entity->rb_node;
++#ifdef CONFIG_CGROUP_BFQIO
++	struct bfq_sched_data *sd = NULL;
++	struct bfq_group *bfqg = NULL;
++	struct bfq_data *bfqd = NULL;
++#endif
++
++	bfq_insert(&st->active, entity);
++
++	if (node->rb_left != NULL)
++		node = node->rb_left;
++	else if (node->rb_right != NULL)
++		node = node->rb_right;
++
++	bfq_update_active_tree(node);
++
++#ifdef CONFIG_CGROUP_BFQIO
++	sd = entity->sched_data;
++	bfqg = container_of(sd, struct bfq_group, sched_data);
++	BUG_ON(!bfqg);
++	bfqd = (struct bfq_data *)bfqg->bfqd;
++#endif
++	if (bfqq != NULL)
++		list_add(&bfqq->bfqq_list, &bfqq->bfqd->active_list);
++#ifdef CONFIG_CGROUP_BFQIO
++	else { /* bfq_group */
++		BUG_ON(!bfqd);
++		bfq_weights_tree_add(bfqd, entity, &bfqd->group_weights_tree);
++	}
++	if (bfqg != bfqd->root_group) {
++		BUG_ON(!bfqg);
++		BUG_ON(!bfqd);
++		bfqg->active_entities++;
++		if (bfqg->active_entities == 2)
++			bfqd->active_numerous_groups++;
++	}
++#endif
++}
++
++/**
++ * bfq_ioprio_to_weight - calc a weight from an ioprio.
++ * @ioprio: the ioprio value to convert.
++ */
++static inline unsigned short bfq_ioprio_to_weight(int ioprio)
++{
++	BUG_ON(ioprio < 0 || ioprio >= IOPRIO_BE_NR);
++	return IOPRIO_BE_NR - ioprio;
++}
++
++/**
++ * bfq_weight_to_ioprio - calc an ioprio from a weight.
++ * @weight: the weight value to convert.
++ *
++ * To preserve as mush as possible the old only-ioprio user interface,
++ * 0 is used as an escape ioprio value for weights (numerically) equal or
++ * larger than IOPRIO_BE_NR
++ */
++static inline unsigned short bfq_weight_to_ioprio(int weight)
++{
++	BUG_ON(weight < BFQ_MIN_WEIGHT || weight > BFQ_MAX_WEIGHT);
++	return IOPRIO_BE_NR - weight < 0 ? 0 : IOPRIO_BE_NR - weight;
++}
++
++static inline void bfq_get_entity(struct bfq_entity *entity)
++{
++	struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
++
++	if (bfqq != NULL) {
++		atomic_inc(&bfqq->ref);
++		bfq_log_bfqq(bfqq->bfqd, bfqq, "get_entity: %p %d",
++			     bfqq, atomic_read(&bfqq->ref));
++	}
++}
++
++/**
++ * bfq_find_deepest - find the deepest node that an extraction can modify.
++ * @node: the node being removed.
++ *
++ * Do the first step of an extraction in an rb tree, looking for the
++ * node that will replace @node, and returning the deepest node that
++ * the following modifications to the tree can touch.  If @node is the
++ * last node in the tree return %NULL.
++ */
++static struct rb_node *bfq_find_deepest(struct rb_node *node)
++{
++	struct rb_node *deepest;
++
++	if (node->rb_right == NULL && node->rb_left == NULL)
++		deepest = rb_parent(node);
++	else if (node->rb_right == NULL)
++		deepest = node->rb_left;
++	else if (node->rb_left == NULL)
++		deepest = node->rb_right;
++	else {
++		deepest = rb_next(node);
++		if (deepest->rb_right != NULL)
++			deepest = deepest->rb_right;
++		else if (rb_parent(deepest) != node)
++			deepest = rb_parent(deepest);
++	}
++
++	return deepest;
++}
++
++/**
++ * bfq_active_extract - remove an entity from the active tree.
++ * @st: the service_tree containing the tree.
++ * @entity: the entity being removed.
++ */
++static void bfq_active_extract(struct bfq_service_tree *st,
++			       struct bfq_entity *entity)
++{
++	struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
++	struct rb_node *node;
++#ifdef CONFIG_CGROUP_BFQIO
++	struct bfq_sched_data *sd = NULL;
++	struct bfq_group *bfqg = NULL;
++	struct bfq_data *bfqd = NULL;
++#endif
++
++	node = bfq_find_deepest(&entity->rb_node);
++	bfq_extract(&st->active, entity);
++
++	if (node != NULL)
++		bfq_update_active_tree(node);
++
++#ifdef CONFIG_CGROUP_BFQIO
++	sd = entity->sched_data;
++	bfqg = container_of(sd, struct bfq_group, sched_data);
++	BUG_ON(!bfqg);
++	bfqd = (struct bfq_data *)bfqg->bfqd;
++#endif
++	if (bfqq != NULL)
++		list_del(&bfqq->bfqq_list);
++#ifdef CONFIG_CGROUP_BFQIO
++	else { /* bfq_group */
++		BUG_ON(!bfqd);
++		bfq_weights_tree_remove(bfqd, entity,
++					&bfqd->group_weights_tree);
++	}
++	if (bfqg != bfqd->root_group) {
++		BUG_ON(!bfqg);
++		BUG_ON(!bfqd);
++		BUG_ON(!bfqg->active_entities);
++		bfqg->active_entities--;
++		if (bfqg->active_entities == 1) {
++			BUG_ON(!bfqd->active_numerous_groups);
++			bfqd->active_numerous_groups--;
++		}
++	}
++#endif
++}
++
++/**
++ * bfq_idle_insert - insert an entity into the idle tree.
++ * @st: the service tree containing the tree.
++ * @entity: the entity to insert.
++ */
++static void bfq_idle_insert(struct bfq_service_tree *st,
++			    struct bfq_entity *entity)
++{
++	struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
++	struct bfq_entity *first_idle = st->first_idle;
++	struct bfq_entity *last_idle = st->last_idle;
++
++	if (first_idle == NULL || bfq_gt(first_idle->finish, entity->finish))
++		st->first_idle = entity;
++	if (last_idle == NULL || bfq_gt(entity->finish, last_idle->finish))
++		st->last_idle = entity;
++
++	bfq_insert(&st->idle, entity);
++
++	if (bfqq != NULL)
++		list_add(&bfqq->bfqq_list, &bfqq->bfqd->idle_list);
++}
++
++/**
++ * bfq_forget_entity - remove an entity from the wfq trees.
++ * @st: the service tree.
++ * @entity: the entity being removed.
++ *
++ * Update the device status and forget everything about @entity, putting
++ * the device reference to it, if it is a queue.  Entities belonging to
++ * groups are not refcounted.
++ */
++static void bfq_forget_entity(struct bfq_service_tree *st,
++			      struct bfq_entity *entity)
++{
++	struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
++	struct bfq_sched_data *sd;
++
++	BUG_ON(!entity->on_st);
++
++	entity->on_st = 0;
++	st->wsum -= entity->weight;
++	if (bfqq != NULL) {
++		sd = entity->sched_data;
++		bfq_log_bfqq(bfqq->bfqd, bfqq, "forget_entity: %p %d",
++			     bfqq, atomic_read(&bfqq->ref));
++		bfq_put_queue(bfqq);
++	}
++}
++
++/**
++ * bfq_put_idle_entity - release the idle tree ref of an entity.
++ * @st: service tree for the entity.
++ * @entity: the entity being released.
++ */
++static void bfq_put_idle_entity(struct bfq_service_tree *st,
++				struct bfq_entity *entity)
++{
++	bfq_idle_extract(st, entity);
++	bfq_forget_entity(st, entity);
++}
++
++/**
++ * bfq_forget_idle - update the idle tree if necessary.
++ * @st: the service tree to act upon.
++ *
++ * To preserve the global O(log N) complexity we only remove one entry here;
++ * as the idle tree will not grow indefinitely this can be done safely.
++ */
++static void bfq_forget_idle(struct bfq_service_tree *st)
++{
++	struct bfq_entity *first_idle = st->first_idle;
++	struct bfq_entity *last_idle = st->last_idle;
++
++	if (RB_EMPTY_ROOT(&st->active) && last_idle != NULL &&
++	    !bfq_gt(last_idle->finish, st->vtime)) {
++		/*
++		 * Forget the whole idle tree, increasing the vtime past
++		 * the last finish time of idle entities.
++		 */
++		st->vtime = last_idle->finish;
++	}
++
++	if (first_idle != NULL && !bfq_gt(first_idle->finish, st->vtime))
++		bfq_put_idle_entity(st, first_idle);
++}
++
++static struct bfq_service_tree *
++__bfq_entity_update_weight_prio(struct bfq_service_tree *old_st,
++			 struct bfq_entity *entity)
++{
++	struct bfq_service_tree *new_st = old_st;
++
++	if (entity->ioprio_changed) {
++		struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
++		unsigned short prev_weight, new_weight;
++		struct bfq_data *bfqd = NULL;
++		struct rb_root *root;
++#ifdef CONFIG_CGROUP_BFQIO
++		struct bfq_sched_data *sd;
++		struct bfq_group *bfqg;
++#endif
++
++		if (bfqq != NULL)
++			bfqd = bfqq->bfqd;
++#ifdef CONFIG_CGROUP_BFQIO
++		else {
++			sd = entity->my_sched_data;
++			bfqg = container_of(sd, struct bfq_group, sched_data);
++			BUG_ON(!bfqg);
++			bfqd = (struct bfq_data *)bfqg->bfqd;
++			BUG_ON(!bfqd);
++		}
++#endif
++
++		BUG_ON(old_st->wsum < entity->weight);
++		old_st->wsum -= entity->weight;
++
++		if (entity->new_weight != entity->orig_weight) {
++			if (entity->new_weight < BFQ_MIN_WEIGHT ||
++			    entity->new_weight > BFQ_MAX_WEIGHT) {
++				printk(KERN_CRIT "update_weight_prio: "
++						 "new_weight %d\n",
++					entity->new_weight);
++				BUG();
++			}
++			entity->orig_weight = entity->new_weight;
++			entity->ioprio =
++				bfq_weight_to_ioprio(entity->orig_weight);
++		} else if (entity->new_ioprio != entity->ioprio) {
++			entity->ioprio = entity->new_ioprio;
++			entity->orig_weight =
++					bfq_ioprio_to_weight(entity->ioprio);
++		} else
++			entity->new_weight = entity->orig_weight =
++				bfq_ioprio_to_weight(entity->ioprio);
++
++		entity->ioprio_class = entity->new_ioprio_class;
++		entity->ioprio_changed = 0;
++
++		/*
++		 * NOTE: here we may be changing the weight too early,
++		 * this will cause unfairness.  The correct approach
++		 * would have required additional complexity to defer
++		 * weight changes to the proper time instants (i.e.,
++		 * when entity->finish <= old_st->vtime).
++		 */
++		new_st = bfq_entity_service_tree(entity);
++
++		prev_weight = entity->weight;
++		new_weight = entity->orig_weight *
++			     (bfqq != NULL ? bfqq->wr_coeff : 1);
++		/*
++		 * If the weight of the entity changes, remove the entity
++		 * from its old weight counter (if there is a counter
++		 * associated with the entity), and add it to the counter
++		 * associated with its new weight.
++		 */
++		if (prev_weight != new_weight) {
++			root = bfqq ? &bfqd->queue_weights_tree :
++				      &bfqd->group_weights_tree;
++			bfq_weights_tree_remove(bfqd, entity, root);
++		}
++		entity->weight = new_weight;
++		/*
++		 * Add the entity to its weights tree only if it is
++		 * not associated with a weight-raised queue.
++		 */
++		if (prev_weight != new_weight &&
++		    (bfqq ? bfqq->wr_coeff == 1 : 1))
++			/* If we get here, root has been initialized. */
++			bfq_weights_tree_add(bfqd, entity, root);
++
++		new_st->wsum += entity->weight;
++
++		if (new_st != old_st)
++			entity->start = new_st->vtime;
++	}
++
++	return new_st;
++}
++
++/**
++ * bfq_bfqq_served - update the scheduler status after selection for
++ *                   service.
++ * @bfqq: the queue being served.
++ * @served: bytes to transfer.
++ *
++ * NOTE: this can be optimized, as the timestamps of upper level entities
++ * are synchronized every time a new bfqq is selected for service.  By now,
++ * we keep it to better check consistency.
++ */
++static void bfq_bfqq_served(struct bfq_queue *bfqq, unsigned long served)
++{
++	struct bfq_entity *entity = &bfqq->entity;
++	struct bfq_service_tree *st;
++
++	for_each_entity(entity) {
++		st = bfq_entity_service_tree(entity);
++
++		entity->service += served;
++		BUG_ON(entity->service > entity->budget);
++		BUG_ON(st->wsum == 0);
++
++		st->vtime += bfq_delta(served, st->wsum);
++		bfq_forget_idle(st);
++	}
++	bfq_log_bfqq(bfqq->bfqd, bfqq, "bfqq_served %lu secs", served);
++}
++
++/**
++ * bfq_bfqq_charge_full_budget - set the service to the entity budget.
++ * @bfqq: the queue that needs a service update.
++ *
++ * When it's not possible to be fair in the service domain, because
++ * a queue is not consuming its budget fast enough (the meaning of
++ * fast depends on the timeout parameter), we charge it a full
++ * budget.  In this way we should obtain a sort of time-domain
++ * fairness among all the seeky/slow queues.
++ */
++static inline void bfq_bfqq_charge_full_budget(struct bfq_queue *bfqq)
++{
++	struct bfq_entity *entity = &bfqq->entity;
++
++	bfq_log_bfqq(bfqq->bfqd, bfqq, "charge_full_budget");
++
++	bfq_bfqq_served(bfqq, entity->budget - entity->service);
++}
++
++/**
++ * __bfq_activate_entity - activate an entity.
++ * @entity: the entity being activated.
++ *
++ * Called whenever an entity is activated, i.e., it is not active and one
++ * of its children receives a new request, or has to be reactivated due to
++ * budget exhaustion.  It uses the current budget of the entity (and the
++ * service received if @entity is active) of the queue to calculate its
++ * timestamps.
++ */
++static void __bfq_activate_entity(struct bfq_entity *entity)
++{
++	struct bfq_sched_data *sd = entity->sched_data;
++	struct bfq_service_tree *st = bfq_entity_service_tree(entity);
++
++	if (entity == sd->in_service_entity) {
++		BUG_ON(entity->tree != NULL);
++		/*
++		 * If we are requeueing the current entity we have
++		 * to take care of not charging to it service it has
++		 * not received.
++		 */
++		bfq_calc_finish(entity, entity->service);
++		entity->start = entity->finish;
++		sd->in_service_entity = NULL;
++	} else if (entity->tree == &st->active) {
++		/*
++		 * Requeueing an entity due to a change of some
++		 * next_in_service entity below it.  We reuse the
++		 * old start time.
++		 */
++		bfq_active_extract(st, entity);
++	} else if (entity->tree == &st->idle) {
++		/*
++		 * Must be on the idle tree, bfq_idle_extract() will
++		 * check for that.
++		 */
++		bfq_idle_extract(st, entity);
++		entity->start = bfq_gt(st->vtime, entity->finish) ?
++				       st->vtime : entity->finish;
++	} else {
++		/*
++		 * The finish time of the entity may be invalid, and
++		 * it is in the past for sure, otherwise the queue
++		 * would have been on the idle tree.
++		 */
++		entity->start = st->vtime;
++		st->wsum += entity->weight;
++		bfq_get_entity(entity);
++
++		BUG_ON(entity->on_st);
++		entity->on_st = 1;
++	}
++
++	st = __bfq_entity_update_weight_prio(st, entity);
++	bfq_calc_finish(entity, entity->budget);
++	bfq_active_insert(st, entity);
++}
++
++/**
++ * bfq_activate_entity - activate an entity and its ancestors if necessary.
++ * @entity: the entity to activate.
++ *
++ * Activate @entity and all the entities on the path from it to the root.
++ */
++static void bfq_activate_entity(struct bfq_entity *entity)
++{
++	struct bfq_sched_data *sd;
++
++	for_each_entity(entity) {
++		__bfq_activate_entity(entity);
++
++		sd = entity->sched_data;
++		if (!bfq_update_next_in_service(sd))
++			/*
++			 * No need to propagate the activation to the
++			 * upper entities, as they will be updated when
++			 * the in-service entity is rescheduled.
++			 */
++			break;
++	}
++}
++
++/**
++ * __bfq_deactivate_entity - deactivate an entity from its service tree.
++ * @entity: the entity to deactivate.
++ * @requeue: if false, the entity will not be put into the idle tree.
++ *
++ * Deactivate an entity, independently from its previous state.  If the
++ * entity was not on a service tree just return, otherwise if it is on
++ * any scheduler tree, extract it from that tree, and if necessary
++ * and if the caller did not specify @requeue, put it on the idle tree.
++ *
++ * Return %1 if the caller should update the entity hierarchy, i.e.,
++ * if the entity was in service or if it was the next_in_service for
++ * its sched_data; return %0 otherwise.
++ */
++static int __bfq_deactivate_entity(struct bfq_entity *entity, int requeue)
++{
++	struct bfq_sched_data *sd = entity->sched_data;
++	struct bfq_service_tree *st = bfq_entity_service_tree(entity);
++	int was_in_service = entity == sd->in_service_entity;
++	int ret = 0;
++
++	if (!entity->on_st)
++		return 0;
++
++	BUG_ON(was_in_service && entity->tree != NULL);
++
++	if (was_in_service) {
++		bfq_calc_finish(entity, entity->service);
++		sd->in_service_entity = NULL;
++	} else if (entity->tree == &st->active)
++		bfq_active_extract(st, entity);
++	else if (entity->tree == &st->idle)
++		bfq_idle_extract(st, entity);
++	else if (entity->tree != NULL)
++		BUG();
++
++	if (was_in_service || sd->next_in_service == entity)
++		ret = bfq_update_next_in_service(sd);
++
++	if (!requeue || !bfq_gt(entity->finish, st->vtime))
++		bfq_forget_entity(st, entity);
++	else
++		bfq_idle_insert(st, entity);
++
++	BUG_ON(sd->in_service_entity == entity);
++	BUG_ON(sd->next_in_service == entity);
++
++	return ret;
++}
++
++/**
++ * bfq_deactivate_entity - deactivate an entity.
++ * @entity: the entity to deactivate.
++ * @requeue: true if the entity can be put on the idle tree
++ */
++static void bfq_deactivate_entity(struct bfq_entity *entity, int requeue)
++{
++	struct bfq_sched_data *sd;
++	struct bfq_entity *parent;
++
++	for_each_entity_safe(entity, parent) {
++		sd = entity->sched_data;
++
++		if (!__bfq_deactivate_entity(entity, requeue))
++			/*
++			 * The parent entity is still backlogged, and
++			 * we don't need to update it as it is still
++			 * in service.
++			 */
++			break;
++
++		if (sd->next_in_service != NULL)
++			/*
++			 * The parent entity is still backlogged and
++			 * the budgets on the path towards the root
++			 * need to be updated.
++			 */
++			goto update;
++
++		/*
++		 * If we reach there the parent is no more backlogged and
++		 * we want to propagate the dequeue upwards.
++		 */
++		requeue = 1;
++	}
++
++	return;
++
++update:
++	entity = parent;
++	for_each_entity(entity) {
++		__bfq_activate_entity(entity);
++
++		sd = entity->sched_data;
++		if (!bfq_update_next_in_service(sd))
++			break;
++	}
++}
++
++/**
++ * bfq_update_vtime - update vtime if necessary.
++ * @st: the service tree to act upon.
++ *
++ * If necessary update the service tree vtime to have at least one
++ * eligible entity, skipping to its start time.  Assumes that the
++ * active tree of the device is not empty.
++ *
++ * NOTE: this hierarchical implementation updates vtimes quite often,
++ * we may end up with reactivated processes getting timestamps after a
++ * vtime skip done because we needed a ->first_active entity on some
++ * intermediate node.
++ */
++static void bfq_update_vtime(struct bfq_service_tree *st)
++{
++	struct bfq_entity *entry;
++	struct rb_node *node = st->active.rb_node;
++
++	entry = rb_entry(node, struct bfq_entity, rb_node);
++	if (bfq_gt(entry->min_start, st->vtime)) {
++		st->vtime = entry->min_start;
++		bfq_forget_idle(st);
++	}
++}
++
++/**
++ * bfq_first_active_entity - find the eligible entity with
++ *                           the smallest finish time
++ * @st: the service tree to select from.
++ *
++ * This function searches the first schedulable entity, starting from the
++ * root of the tree and going on the left every time on this side there is
++ * a subtree with at least one eligible (start >= vtime) entity. The path on
++ * the right is followed only if a) the left subtree contains no eligible
++ * entities and b) no eligible entity has been found yet.
++ */
++static struct bfq_entity *bfq_first_active_entity(struct bfq_service_tree *st)
++{
++	struct bfq_entity *entry, *first = NULL;
++	struct rb_node *node = st->active.rb_node;
++
++	while (node != NULL) {
++		entry = rb_entry(node, struct bfq_entity, rb_node);
++left:
++		if (!bfq_gt(entry->start, st->vtime))
++			first = entry;
++
++		BUG_ON(bfq_gt(entry->min_start, st->vtime));
++
++		if (node->rb_left != NULL) {
++			entry = rb_entry(node->rb_left,
++					 struct bfq_entity, rb_node);
++			if (!bfq_gt(entry->min_start, st->vtime)) {
++				node = node->rb_left;
++				goto left;
++			}
++		}
++		if (first != NULL)
++			break;
++		node = node->rb_right;
++	}
++
++	BUG_ON(first == NULL && !RB_EMPTY_ROOT(&st->active));
++	return first;
++}
++
++/**
++ * __bfq_lookup_next_entity - return the first eligible entity in @st.
++ * @st: the service tree.
++ *
++ * Update the virtual time in @st and return the first eligible entity
++ * it contains.
++ */
++static struct bfq_entity *__bfq_lookup_next_entity(struct bfq_service_tree *st,
++						   bool force)
++{
++	struct bfq_entity *entity, *new_next_in_service = NULL;
++
++	if (RB_EMPTY_ROOT(&st->active))
++		return NULL;
++
++	bfq_update_vtime(st);
++	entity = bfq_first_active_entity(st);
++	BUG_ON(bfq_gt(entity->start, st->vtime));
++
++	/*
++	 * If the chosen entity does not match with the sched_data's
++	 * next_in_service and we are forcedly serving the IDLE priority
++	 * class tree, bubble up budget update.
++	 */
++	if (unlikely(force && entity != entity->sched_data->next_in_service)) {
++		new_next_in_service = entity;
++		for_each_entity(new_next_in_service)
++			bfq_update_budget(new_next_in_service);
++	}
++
++	return entity;
++}
++
++/**
++ * bfq_lookup_next_entity - return the first eligible entity in @sd.
++ * @sd: the sched_data.
++ * @extract: if true the returned entity will be also extracted from @sd.
++ *
++ * NOTE: since we cache the next_in_service entity at each level of the
++ * hierarchy, the complexity of the lookup can be decreased with
++ * absolutely no effort just returning the cached next_in_service value;
++ * we prefer to do full lookups to test the consistency of * the data
++ * structures.
++ */
++static struct bfq_entity *bfq_lookup_next_entity(struct bfq_sched_data *sd,
++						 int extract,
++						 struct bfq_data *bfqd)
++{
++	struct bfq_service_tree *st = sd->service_tree;
++	struct bfq_entity *entity;
++	int i = 0;
++
++	BUG_ON(sd->in_service_entity != NULL);
++
++	if (bfqd != NULL &&
++	    jiffies - bfqd->bfq_class_idle_last_service > BFQ_CL_IDLE_TIMEOUT) {
++		entity = __bfq_lookup_next_entity(st + BFQ_IOPRIO_CLASSES - 1,
++						  true);
++		if (entity != NULL) {
++			i = BFQ_IOPRIO_CLASSES - 1;
++			bfqd->bfq_class_idle_last_service = jiffies;
++			sd->next_in_service = entity;
++		}
++	}
++	for (; i < BFQ_IOPRIO_CLASSES; i++) {
++		entity = __bfq_lookup_next_entity(st + i, false);
++		if (entity != NULL) {
++			if (extract) {
++				bfq_check_next_in_service(sd, entity);
++				bfq_active_extract(st + i, entity);
++				sd->in_service_entity = entity;
++				sd->next_in_service = NULL;
++			}
++			break;
++		}
++	}
++
++	return entity;
++}
++
++/*
++ * Get next queue for service.
++ */
++static struct bfq_queue *bfq_get_next_queue(struct bfq_data *bfqd)
++{
++	struct bfq_entity *entity = NULL;
++	struct bfq_sched_data *sd;
++	struct bfq_queue *bfqq;
++
++	BUG_ON(bfqd->in_service_queue != NULL);
++
++	if (bfqd->busy_queues == 0)
++		return NULL;
++
++	sd = &bfqd->root_group->sched_data;
++	for (; sd != NULL; sd = entity->my_sched_data) {
++		entity = bfq_lookup_next_entity(sd, 1, bfqd);
++		BUG_ON(entity == NULL);
++		entity->service = 0;
++	}
++
++	bfqq = bfq_entity_to_bfqq(entity);
++	BUG_ON(bfqq == NULL);
++
++	return bfqq;
++}
++
++/*
++ * Forced extraction of the given queue.
++ */
++static void bfq_get_next_queue_forced(struct bfq_data *bfqd,
++				      struct bfq_queue *bfqq)
++{
++	struct bfq_entity *entity;
++	struct bfq_sched_data *sd;
++
++	BUG_ON(bfqd->in_service_queue != NULL);
++
++	entity = &bfqq->entity;
++	/*
++	 * Bubble up extraction/update from the leaf to the root.
++	*/
++	for_each_entity(entity) {
++		sd = entity->sched_data;
++		bfq_update_budget(entity);
++		bfq_update_vtime(bfq_entity_service_tree(entity));
++		bfq_active_extract(bfq_entity_service_tree(entity), entity);
++		sd->in_service_entity = entity;
++		sd->next_in_service = NULL;
++		entity->service = 0;
++	}
++
++	return;
++}
++
++static void __bfq_bfqd_reset_in_service(struct bfq_data *bfqd)
++{
++	if (bfqd->in_service_bic != NULL) {
++		put_io_context(bfqd->in_service_bic->icq.ioc);
++		bfqd->in_service_bic = NULL;
++	}
++
++	bfqd->in_service_queue = NULL;
++	del_timer(&bfqd->idle_slice_timer);
++}
++
++static void bfq_deactivate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
++				int requeue)
++{
++	struct bfq_entity *entity = &bfqq->entity;
++
++	if (bfqq == bfqd->in_service_queue)
++		__bfq_bfqd_reset_in_service(bfqd);
++
++	bfq_deactivate_entity(entity, requeue);
++}
++
++static void bfq_activate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq)
++{
++	struct bfq_entity *entity = &bfqq->entity;
++
++	bfq_activate_entity(entity);
++}
++
++/*
++ * Called when the bfqq no longer has requests pending, remove it from
++ * the service tree.
++ */
++static void bfq_del_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq,
++			      int requeue)
++{
++	BUG_ON(!bfq_bfqq_busy(bfqq));
++	BUG_ON(!RB_EMPTY_ROOT(&bfqq->sort_list));
++
++	bfq_log_bfqq(bfqd, bfqq, "del from busy");
++
++	bfq_clear_bfqq_busy(bfqq);
++
++	BUG_ON(bfqd->busy_queues == 0);
++	bfqd->busy_queues--;
++
++	if (!bfqq->dispatched) {
++		bfq_weights_tree_remove(bfqd, &bfqq->entity,
++					&bfqd->queue_weights_tree);
++		if (!blk_queue_nonrot(bfqd->queue)) {
++			BUG_ON(!bfqd->busy_in_flight_queues);
++			bfqd->busy_in_flight_queues--;
++			if (bfq_bfqq_constantly_seeky(bfqq)) {
++				BUG_ON(!bfqd->
++					const_seeky_busy_in_flight_queues);
++				bfqd->const_seeky_busy_in_flight_queues--;
++			}
++		}
++	}
++	if (bfqq->wr_coeff > 1)
++		bfqd->wr_busy_queues--;
++
++	bfq_deactivate_bfqq(bfqd, bfqq, requeue);
++}
++
++/*
++ * Called when an inactive queue receives a new request.
++ */
++static void bfq_add_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq)
++{
++	BUG_ON(bfq_bfqq_busy(bfqq));
++	BUG_ON(bfqq == bfqd->in_service_queue);
++
++	bfq_log_bfqq(bfqd, bfqq, "add to busy");
++
++	bfq_activate_bfqq(bfqd, bfqq);
++
++	bfq_mark_bfqq_busy(bfqq);
++	bfqd->busy_queues++;
++
++	if (!bfqq->dispatched) {
++		if (bfqq->wr_coeff == 1)
++			bfq_weights_tree_add(bfqd, &bfqq->entity,
++					     &bfqd->queue_weights_tree);
++		if (!blk_queue_nonrot(bfqd->queue)) {
++			bfqd->busy_in_flight_queues++;
++			if (bfq_bfqq_constantly_seeky(bfqq))
++				bfqd->const_seeky_busy_in_flight_queues++;
++		}
++	}
++	if (bfqq->wr_coeff > 1)
++		bfqd->wr_busy_queues++;
++}
+diff --git a/block/bfq.h b/block/bfq.h
+new file mode 100644
+index 0000000..3c5d85e
+--- /dev/null
++++ b/block/bfq.h
+@@ -0,0 +1,775 @@
++/*
++ * BFQ-v7r7 for 3.19.0: data structures and common functions prototypes.
++ *
++ * Based on ideas and code from CFQ:
++ * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
++ *
++ * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
++ *		      Paolo Valente <paolo.valente@unimore.it>
++ *
++ * Copyright (C) 2010 Paolo Valente <paolo.valente@unimore.it>
++ */
++
++#ifndef _BFQ_H
++#define _BFQ_H
++
++#include <linux/blktrace_api.h>
++#include <linux/hrtimer.h>
++#include <linux/ioprio.h>
++#include <linux/rbtree.h>
++
++#define BFQ_IOPRIO_CLASSES	3
++#define BFQ_CL_IDLE_TIMEOUT	(HZ/5)
++
++#define BFQ_MIN_WEIGHT	1
++#define BFQ_MAX_WEIGHT	1000
++
++#define BFQ_DEFAULT_QUEUE_IOPRIO	4
++
++#define BFQ_DEFAULT_GRP_WEIGHT	10
++#define BFQ_DEFAULT_GRP_IOPRIO	0
++#define BFQ_DEFAULT_GRP_CLASS	IOPRIO_CLASS_BE
++
++struct bfq_entity;
++
++/**
++ * struct bfq_service_tree - per ioprio_class service tree.
++ * @active: tree for active entities (i.e., those backlogged).
++ * @idle: tree for idle entities (i.e., those not backlogged, with V <= F_i).
++ * @first_idle: idle entity with minimum F_i.
++ * @last_idle: idle entity with maximum F_i.
++ * @vtime: scheduler virtual time.
++ * @wsum: scheduler weight sum; active and idle entities contribute to it.
++ *
++ * Each service tree represents a B-WF2Q+ scheduler on its own.  Each
++ * ioprio_class has its own independent scheduler, and so its own
++ * bfq_service_tree.  All the fields are protected by the queue lock
++ * of the containing bfqd.
++ */
++struct bfq_service_tree {
++	struct rb_root active;
++	struct rb_root idle;
++
++	struct bfq_entity *first_idle;
++	struct bfq_entity *last_idle;
++
++	u64 vtime;
++	unsigned long wsum;
++};
++
++/**
++ * struct bfq_sched_data - multi-class scheduler.
++ * @in_service_entity: entity in service.
++ * @next_in_service: head-of-the-line entity in the scheduler.
++ * @service_tree: array of service trees, one per ioprio_class.
++ *
++ * bfq_sched_data is the basic scheduler queue.  It supports three
++ * ioprio_classes, and can be used either as a toplevel queue or as
++ * an intermediate queue on a hierarchical setup.
++ * @next_in_service points to the active entity of the sched_data
++ * service trees that will be scheduled next.
++ *
++ * The supported ioprio_classes are the same as in CFQ, in descending
++ * priority order, IOPRIO_CLASS_RT, IOPRIO_CLASS_BE, IOPRIO_CLASS_IDLE.
++ * Requests from higher priority queues are served before all the
++ * requests from lower priority queues; among requests of the same
++ * queue requests are served according to B-WF2Q+.
++ * All the fields are protected by the queue lock of the containing bfqd.
++ */
++struct bfq_sched_data {
++	struct bfq_entity *in_service_entity;
++	struct bfq_entity *next_in_service;
++	struct bfq_service_tree service_tree[BFQ_IOPRIO_CLASSES];
++};
++
++/**
++ * struct bfq_weight_counter - counter of the number of all active entities
++ *                             with a given weight.
++ * @weight: weight of the entities that this counter refers to.
++ * @num_active: number of active entities with this weight.
++ * @weights_node: weights tree member (see bfq_data's @queue_weights_tree
++ *                and @group_weights_tree).
++ */
++struct bfq_weight_counter {
++	short int weight;
++	unsigned int num_active;
++	struct rb_node weights_node;
++};
++
++/**
++ * struct bfq_entity - schedulable entity.
++ * @rb_node: service_tree member.
++ * @weight_counter: pointer to the weight counter associated with this entity.
++ * @on_st: flag, true if the entity is on a tree (either the active or
++ *         the idle one of its service_tree).
++ * @finish: B-WF2Q+ finish timestamp (aka F_i).
++ * @start: B-WF2Q+ start timestamp (aka S_i).
++ * @tree: tree the entity is enqueued into; %NULL if not on a tree.
++ * @min_start: minimum start time of the (active) subtree rooted at
++ *             this entity; used for O(log N) lookups into active trees.
++ * @service: service received during the last round of service.
++ * @budget: budget used to calculate F_i; F_i = S_i + @budget / @weight.
++ * @weight: weight of the queue
++ * @parent: parent entity, for hierarchical scheduling.
++ * @my_sched_data: for non-leaf nodes in the cgroup hierarchy, the
++ *                 associated scheduler queue, %NULL on leaf nodes.
++ * @sched_data: the scheduler queue this entity belongs to.
++ * @ioprio: the ioprio in use.
++ * @new_weight: when a weight change is requested, the new weight value.
++ * @orig_weight: original weight, used to implement weight boosting
++ * @new_ioprio: when an ioprio change is requested, the new ioprio value.
++ * @ioprio_class: the ioprio_class in use.
++ * @new_ioprio_class: when an ioprio_class change is requested, the new
++ *                    ioprio_class value.
++ * @ioprio_changed: flag, true when the user requested a weight, ioprio or
++ *                  ioprio_class change.
++ *
++ * A bfq_entity is used to represent either a bfq_queue (leaf node in the
++ * cgroup hierarchy) or a bfq_group into the upper level scheduler.  Each
++ * entity belongs to the sched_data of the parent group in the cgroup
++ * hierarchy.  Non-leaf entities have also their own sched_data, stored
++ * in @my_sched_data.
++ *
++ * Each entity stores independently its priority values; this would
++ * allow different weights on different devices, but this
++ * functionality is not exported to userspace by now.  Priorities and
++ * weights are updated lazily, first storing the new values into the
++ * new_* fields, then setting the @ioprio_changed flag.  As soon as
++ * there is a transition in the entity state that allows the priority
++ * update to take place the effective and the requested priority
++ * values are synchronized.
++ *
++ * Unless cgroups are used, the weight value is calculated from the
++ * ioprio to export the same interface as CFQ.  When dealing with
++ * ``well-behaved'' queues (i.e., queues that do not spend too much
++ * time to consume their budget and have true sequential behavior, and
++ * when there are no external factors breaking anticipation) the
++ * relative weights at each level of the cgroups hierarchy should be
++ * guaranteed.  All the fields are protected by the queue lock of the
++ * containing bfqd.
++ */
++struct bfq_entity {
++	struct rb_node rb_node;
++	struct bfq_weight_counter *weight_counter;
++
++	int on_st;
++
++	u64 finish;
++	u64 start;
++
++	struct rb_root *tree;
++
++	u64 min_start;
++
++	unsigned long service, budget;
++	unsigned short weight, new_weight;
++	unsigned short orig_weight;
++
++	struct bfq_entity *parent;
++
++	struct bfq_sched_data *my_sched_data;
++	struct bfq_sched_data *sched_data;
++
++	unsigned short ioprio, new_ioprio;
++	unsigned short ioprio_class, new_ioprio_class;
++
++	int ioprio_changed;
++};
++
++struct bfq_group;
++
++/**
++ * struct bfq_queue - leaf schedulable entity.
++ * @ref: reference counter.
++ * @bfqd: parent bfq_data.
++ * @new_bfqq: shared bfq_queue if queue is cooperating with
++ *           one or more other queues.
++ * @pos_node: request-position tree member (see bfq_data's @rq_pos_tree).
++ * @pos_root: request-position tree root (see bfq_data's @rq_pos_tree).
++ * @sort_list: sorted list of pending requests.
++ * @next_rq: if fifo isn't expired, next request to serve.
++ * @queued: nr of requests queued in @sort_list.
++ * @allocated: currently allocated requests.
++ * @meta_pending: pending metadata requests.
++ * @fifo: fifo list of requests in sort_list.
++ * @entity: entity representing this queue in the scheduler.
++ * @max_budget: maximum budget allowed from the feedback mechanism.
++ * @budget_timeout: budget expiration (in jiffies).
++ * @dispatched: number of requests on the dispatch list or inside driver.
++ * @flags: status flags.
++ * @bfqq_list: node for active/idle bfqq list inside our bfqd.
++ * @burst_list_node: node for the device's burst list.
++ * @seek_samples: number of seeks sampled
++ * @seek_total: sum of the distances of the seeks sampled
++ * @seek_mean: mean seek distance
++ * @last_request_pos: position of the last request enqueued
++ * @requests_within_timer: number of consecutive pairs of request completion
++ *                         and arrival, such that the queue becomes idle
++ *                         after the completion, but the next request arrives
++ *                         within an idle time slice; used only if the queue's
++ *                         IO_bound has been cleared.
++ * @pid: pid of the process owning the queue, used for logging purposes.
++ * @last_wr_start_finish: start time of the current weight-raising period if
++ *                        the @bfq-queue is being weight-raised, otherwise
++ *                        finish time of the last weight-raising period
++ * @wr_cur_max_time: current max raising time for this queue
++ * @soft_rt_next_start: minimum time instant such that, only if a new
++ *                      request is enqueued after this time instant in an
++ *                      idle @bfq_queue with no outstanding requests, then
++ *                      the task associated with the queue it is deemed as
++ *                      soft real-time (see the comments to the function
++ *                      bfq_bfqq_softrt_next_start()).
++ * @last_idle_bklogged: time of the last transition of the @bfq_queue from
++ *                      idle to backlogged
++ * @service_from_backlogged: cumulative service received from the @bfq_queue
++ *                           since the last transition from idle to
++ *                           backlogged
++ *
++ * A bfq_queue is a leaf request queue; it can be associated with an io_context
++ * or more, if it is async or shared between cooperating processes. @cgroup
++ * holds a reference to the cgroup, to be sure that it does not disappear while
++ * a bfqq still references it (mostly to avoid races between request issuing and
++ * task migration followed by cgroup destruction).
++ * All the fields are protected by the queue lock of the containing bfqd.
++ */
++struct bfq_queue {
++	atomic_t ref;
++	struct bfq_data *bfqd;
++
++	/* fields for cooperating queues handling */
++	struct bfq_queue *new_bfqq;
++	struct rb_node pos_node;
++	struct rb_root *pos_root;
++
++	struct rb_root sort_list;
++	struct request *next_rq;
++	int queued[2];
++	int allocated[2];
++	int meta_pending;
++	struct list_head fifo;
++
++	struct bfq_entity entity;
++
++	unsigned long max_budget;
++	unsigned long budget_timeout;
++
++	int dispatched;
++
++	unsigned int flags;
++
++	struct list_head bfqq_list;
++
++	struct hlist_node burst_list_node;
++
++	unsigned int seek_samples;
++	u64 seek_total;
++	sector_t seek_mean;
++	sector_t last_request_pos;
++
++	unsigned int requests_within_timer;
++
++	pid_t pid;
++
++	/* weight-raising fields */
++	unsigned long wr_cur_max_time;
++	unsigned long soft_rt_next_start;
++	unsigned long last_wr_start_finish;
++	unsigned int wr_coeff;
++	unsigned long last_idle_bklogged;
++	unsigned long service_from_backlogged;
++};
++
++/**
++ * struct bfq_ttime - per process thinktime stats.
++ * @ttime_total: total process thinktime
++ * @ttime_samples: number of thinktime samples
++ * @ttime_mean: average process thinktime
++ */
++struct bfq_ttime {
++	unsigned long last_end_request;
++
++	unsigned long ttime_total;
++	unsigned long ttime_samples;
++	unsigned long ttime_mean;
++};
++
++/**
++ * struct bfq_io_cq - per (request_queue, io_context) structure.
++ * @icq: associated io_cq structure
++ * @bfqq: array of two process queues, the sync and the async
++ * @ttime: associated @bfq_ttime struct
++ */
++struct bfq_io_cq {
++	struct io_cq icq; /* must be the first member */
++	struct bfq_queue *bfqq[2];
++	struct bfq_ttime ttime;
++	int ioprio;
++};
++
++enum bfq_device_speed {
++	BFQ_BFQD_FAST,
++	BFQ_BFQD_SLOW,
++};
++
++/**
++ * struct bfq_data - per device data structure.
++ * @queue: request queue for the managed device.
++ * @root_group: root bfq_group for the device.
++ * @rq_pos_tree: rbtree sorted by next_request position, used when
++ *               determining if two or more queues have interleaving
++ *               requests (see bfq_close_cooperator()).
++ * @active_numerous_groups: number of bfq_groups containing more than one
++ *                          active @bfq_entity.
++ * @queue_weights_tree: rbtree of weight counters of @bfq_queues, sorted by
++ *                      weight. Used to keep track of whether all @bfq_queues
++ *                     have the same weight. The tree contains one counter
++ *                     for each distinct weight associated to some active
++ *                     and not weight-raised @bfq_queue (see the comments to
++ *                      the functions bfq_weights_tree_[add|remove] for
++ *                     further details).
++ * @group_weights_tree: rbtree of non-queue @bfq_entity weight counters, sorted
++ *                      by weight. Used to keep track of whether all
++ *                     @bfq_groups have the same weight. The tree contains
++ *                     one counter for each distinct weight associated to
++ *                     some active @bfq_group (see the comments to the
++ *                     functions bfq_weights_tree_[add|remove] for further
++ *                     details).
++ * @busy_queues: number of bfq_queues containing requests (including the
++ *		 queue in service, even if it is idling).
++ * @busy_in_flight_queues: number of @bfq_queues containing pending or
++ *                         in-flight requests, plus the @bfq_queue in
++ *                         service, even if idle but waiting for the
++ *                         possible arrival of its next sync request. This
++ *                         field is updated only if the device is rotational,
++ *                         but used only if the device is also NCQ-capable.
++ *                         The reason why the field is updated also for non-
++ *                         NCQ-capable rotational devices is related to the
++ *                         fact that the value of @hw_tag may be set also
++ *                         later than when busy_in_flight_queues may need to
++ *                         be incremented for the first time(s). Taking also
++ *                         this possibility into account, to avoid unbalanced
++ *                         increments/decrements, would imply more overhead
++ *                         than just updating busy_in_flight_queues
++ *                         regardless of the value of @hw_tag.
++ * @const_seeky_busy_in_flight_queues: number of constantly-seeky @bfq_queues
++ *                                     (that is, seeky queues that expired
++ *                                     for budget timeout at least once)
++ *                                     containing pending or in-flight
++ *                                     requests, including the in-service
++ *                                     @bfq_queue if constantly seeky. This
++ *                                     field is updated only if the device
++ *                                     is rotational, but used only if the
++ *                                     device is also NCQ-capable (see the
++ *                                     comments to @busy_in_flight_queues).
++ * @wr_busy_queues: number of weight-raised busy @bfq_queues.
++ * @queued: number of queued requests.
++ * @rq_in_driver: number of requests dispatched and waiting for completion.
++ * @sync_flight: number of sync requests in the driver.
++ * @max_rq_in_driver: max number of reqs in driver in the last
++ *                    @hw_tag_samples completed requests.
++ * @hw_tag_samples: nr of samples used to calculate hw_tag.
++ * @hw_tag: flag set to one if the driver is showing a queueing behavior.
++ * @budgets_assigned: number of budgets assigned.
++ * @idle_slice_timer: timer set when idling for the next sequential request
++ *                    from the queue in service.
++ * @unplug_work: delayed work to restart dispatching on the request queue.
++ * @in_service_queue: bfq_queue in service.
++ * @in_service_bic: bfq_io_cq (bic) associated with the @in_service_queue.
++ * @last_position: on-disk position of the last served request.
++ * @last_budget_start: beginning of the last budget.
++ * @last_idling_start: beginning of the last idle slice.
++ * @peak_rate: peak transfer rate observed for a budget.
++ * @peak_rate_samples: number of samples used to calculate @peak_rate.
++ * @bfq_max_budget: maximum budget allotted to a bfq_queue before
++ *                  rescheduling.
++ * @group_list: list of all the bfq_groups active on the device.
++ * @active_list: list of all the bfq_queues active on the device.
++ * @idle_list: list of all the bfq_queues idle on the device.
++ * @bfq_quantum: max number of requests dispatched per dispatch round.
++ * @bfq_fifo_expire: timeout for async/sync requests; when it expires
++ *                   requests are served in fifo order.
++ * @bfq_back_penalty: weight of backward seeks wrt forward ones.
++ * @bfq_back_max: maximum allowed backward seek.
++ * @bfq_slice_idle: maximum idling time.
++ * @bfq_user_max_budget: user-configured max budget value
++ *                       (0 for auto-tuning).
++ * @bfq_max_budget_async_rq: maximum budget (in nr of requests) allotted to
++ *                           async queues.
++ * @bfq_timeout: timeout for bfq_queues to consume their budget; used to
++ *               to prevent seeky queues to impose long latencies to well
++ *               behaved ones (this also implies that seeky queues cannot
++ *               receive guarantees in the service domain; after a timeout
++ *               they are charged for the whole allocated budget, to try
++ *               to preserve a behavior reasonably fair among them, but
++ *               without service-domain guarantees).
++ * @bfq_coop_thresh: number of queue merges after which a @bfq_queue is
++ *                   no more granted any weight-raising.
++ * @bfq_failed_cooperations: number of consecutive failed cooperation
++ *                           chances after which weight-raising is restored
++ *                           to a queue subject to more than bfq_coop_thresh
++ *                           queue merges.
++ * @bfq_requests_within_timer: number of consecutive requests that must be
++ *                             issued within the idle time slice to set
++ *                             again idling to a queue which was marked as
++ *                             non-I/O-bound (see the definition of the
++ *                             IO_bound flag for further details).
++ * @last_ins_in_burst: last time at which a queue entered the current
++ *                     burst of queues being activated shortly after
++ *                     each other; for more details about this and the
++ *                     following parameters related to a burst of
++ *                     activations, see the comments to the function
++ *                     @bfq_handle_burst.
++ * @bfq_burst_interval: reference time interval used to decide whether a
++ *                      queue has been activated shortly after
++ *                      @last_ins_in_burst.
++ * @burst_size: number of queues in the current burst of queue activations.
++ * @bfq_large_burst_thresh: maximum burst size above which the current
++ * 			    queue-activation burst is deemed as 'large'.
++ * @large_burst: true if a large queue-activation burst is in progress.
++ * @burst_list: head of the burst list (as for the above fields, more details
++ * 		in the comments to the function bfq_handle_burst).
++ * @low_latency: if set to true, low-latency heuristics are enabled.
++ * @bfq_wr_coeff: maximum factor by which the weight of a weight-raised
++ *                queue is multiplied.
++ * @bfq_wr_max_time: maximum duration of a weight-raising period (jiffies).
++ * @bfq_wr_rt_max_time: maximum duration for soft real-time processes.
++ * @bfq_wr_min_idle_time: minimum idle period after which weight-raising
++ *			  may be reactivated for a queue (in jiffies).
++ * @bfq_wr_min_inter_arr_async: minimum period between request arrivals
++ *				after which weight-raising may be
++ *				reactivated for an already busy queue
++ *				(in jiffies).
++ * @bfq_wr_max_softrt_rate: max service-rate for a soft real-time queue,
++ *			    sectors per seconds.
++ * @RT_prod: cached value of the product R*T used for computing the maximum
++ *	     duration of the weight raising automatically.
++ * @device_speed: device-speed class for the low-latency heuristic.
++ * @oom_bfqq: fallback dummy bfqq for extreme OOM conditions.
++ *
++ * All the fields are protected by the @queue lock.
++ */
++struct bfq_data {
++	struct request_queue *queue;
++
++	struct bfq_group *root_group;
++	struct rb_root rq_pos_tree;
++
++#ifdef CONFIG_CGROUP_BFQIO
++	int active_numerous_groups;
++#endif
++
++	struct rb_root queue_weights_tree;
++	struct rb_root group_weights_tree;
++
++	int busy_queues;
++	int busy_in_flight_queues;
++	int const_seeky_busy_in_flight_queues;
++	int wr_busy_queues;
++	int queued;
++	int rq_in_driver;
++	int sync_flight;
++
++	int max_rq_in_driver;
++	int hw_tag_samples;
++	int hw_tag;
++
++	int budgets_assigned;
++
++	struct timer_list idle_slice_timer;
++	struct work_struct unplug_work;
++
++	struct bfq_queue *in_service_queue;
++	struct bfq_io_cq *in_service_bic;
++
++	sector_t last_position;
++
++	ktime_t last_budget_start;
++	ktime_t last_idling_start;
++	int peak_rate_samples;
++	u64 peak_rate;
++	unsigned long bfq_max_budget;
++
++	struct hlist_head group_list;
++	struct list_head active_list;
++	struct list_head idle_list;
++
++	unsigned int bfq_quantum;
++	unsigned int bfq_fifo_expire[2];
++	unsigned int bfq_back_penalty;
++	unsigned int bfq_back_max;
++	unsigned int bfq_slice_idle;
++	u64 bfq_class_idle_last_service;
++
++	unsigned int bfq_user_max_budget;
++	unsigned int bfq_max_budget_async_rq;
++	unsigned int bfq_timeout[2];
++
++	unsigned int bfq_coop_thresh;
++	unsigned int bfq_failed_cooperations;
++	unsigned int bfq_requests_within_timer;
++
++	unsigned long last_ins_in_burst;
++	unsigned long bfq_burst_interval;
++	int burst_size;
++	unsigned long bfq_large_burst_thresh;
++	bool large_burst;
++	struct hlist_head burst_list;
++
++	bool low_latency;
++
++	/* parameters of the low_latency heuristics */
++	unsigned int bfq_wr_coeff;
++	unsigned int bfq_wr_max_time;
++	unsigned int bfq_wr_rt_max_time;
++	unsigned int bfq_wr_min_idle_time;
++	unsigned long bfq_wr_min_inter_arr_async;
++	unsigned int bfq_wr_max_softrt_rate;
++	u64 RT_prod;
++	enum bfq_device_speed device_speed;
++
++	struct bfq_queue oom_bfqq;
++};
++
++enum bfqq_state_flags {
++	BFQ_BFQQ_FLAG_busy = 0,		/* has requests or is in service */
++	BFQ_BFQQ_FLAG_wait_request,	/* waiting for a request */
++	BFQ_BFQQ_FLAG_must_alloc,	/* must be allowed rq alloc */
++	BFQ_BFQQ_FLAG_fifo_expire,	/* FIFO checked in this slice */
++	BFQ_BFQQ_FLAG_idle_window,	/* slice idling enabled */
++	BFQ_BFQQ_FLAG_prio_changed,	/* task priority has changed */
++	BFQ_BFQQ_FLAG_sync,		/* synchronous queue */
++	BFQ_BFQQ_FLAG_budget_new,	/* no completion with this budget */
++	BFQ_BFQQ_FLAG_IO_bound,         /*
++					 * bfqq has timed-out at least once
++					 * having consumed at most 2/10 of
++					 * its budget
++					 */
++	BFQ_BFQQ_FLAG_in_large_burst,	/*
++					 * bfqq activated in a large burst,
++					 * see comments to bfq_handle_burst.
++					 */
++	BFQ_BFQQ_FLAG_constantly_seeky,	/*
++					 * bfqq has proved to be slow and
++					 * seeky until budget timeout
++					 */
++	BFQ_BFQQ_FLAG_softrt_update,    /*
++					 * may need softrt-next-start
++					 * update
++					 */
++	BFQ_BFQQ_FLAG_coop,		/* bfqq is shared */
++	BFQ_BFQQ_FLAG_split_coop,	/* shared bfqq will be splitted */
++};
++
++#define BFQ_BFQQ_FNS(name)						\
++static inline void bfq_mark_bfqq_##name(struct bfq_queue *bfqq)		\
++{									\
++	(bfqq)->flags |= (1 << BFQ_BFQQ_FLAG_##name);			\
++}									\
++static inline void bfq_clear_bfqq_##name(struct bfq_queue *bfqq)	\
++{									\
++	(bfqq)->flags &= ~(1 << BFQ_BFQQ_FLAG_##name);			\
++}									\
++static inline int bfq_bfqq_##name(const struct bfq_queue *bfqq)		\
++{									\
++	return ((bfqq)->flags & (1 << BFQ_BFQQ_FLAG_##name)) != 0;	\
++}
++
++BFQ_BFQQ_FNS(busy);
++BFQ_BFQQ_FNS(wait_request);
++BFQ_BFQQ_FNS(must_alloc);
++BFQ_BFQQ_FNS(fifo_expire);
++BFQ_BFQQ_FNS(idle_window);
++BFQ_BFQQ_FNS(prio_changed);
++BFQ_BFQQ_FNS(sync);
++BFQ_BFQQ_FNS(budget_new);
++BFQ_BFQQ_FNS(IO_bound);
++BFQ_BFQQ_FNS(in_large_burst);
++BFQ_BFQQ_FNS(constantly_seeky);
++BFQ_BFQQ_FNS(coop);
++BFQ_BFQQ_FNS(split_coop);
++BFQ_BFQQ_FNS(softrt_update);
++#undef BFQ_BFQQ_FNS
++
++/* Logging facilities. */
++#define bfq_log_bfqq(bfqd, bfqq, fmt, args...) \
++	blk_add_trace_msg((bfqd)->queue, "bfq%d " fmt, (bfqq)->pid, ##args)
++
++#define bfq_log(bfqd, fmt, args...) \
++	blk_add_trace_msg((bfqd)->queue, "bfq " fmt, ##args)
++
++/* Expiration reasons. */
++enum bfqq_expiration {
++	BFQ_BFQQ_TOO_IDLE = 0,		/*
++					 * queue has been idling for
++					 * too long
++					 */
++	BFQ_BFQQ_BUDGET_TIMEOUT,	/* budget took too long to be used */
++	BFQ_BFQQ_BUDGET_EXHAUSTED,	/* budget consumed */
++	BFQ_BFQQ_NO_MORE_REQUESTS,	/* the queue has no more requests */
++};
++
++#ifdef CONFIG_CGROUP_BFQIO
++/**
++ * struct bfq_group - per (device, cgroup) data structure.
++ * @entity: schedulable entity to insert into the parent group sched_data.
++ * @sched_data: own sched_data, to contain child entities (they may be
++ *              both bfq_queues and bfq_groups).
++ * @group_node: node to be inserted into the bfqio_cgroup->group_data
++ *              list of the containing cgroup's bfqio_cgroup.
++ * @bfqd_node: node to be inserted into the @bfqd->group_list list
++ *             of the groups active on the same device; used for cleanup.
++ * @bfqd: the bfq_data for the device this group acts upon.
++ * @async_bfqq: array of async queues for all the tasks belonging to
++ *              the group, one queue per ioprio value per ioprio_class,
++ *              except for the idle class that has only one queue.
++ * @async_idle_bfqq: async queue for the idle class (ioprio is ignored).
++ * @my_entity: pointer to @entity, %NULL for the toplevel group; used
++ *             to avoid too many special cases during group creation/
++ *             migration.
++ * @active_entities: number of active entities belonging to the group;
++ *                   unused for the root group. Used to know whether there
++ *                   are groups with more than one active @bfq_entity
++ *                   (see the comments to the function
++ *                   bfq_bfqq_must_not_expire()).
++ *
++ * Each (device, cgroup) pair has its own bfq_group, i.e., for each cgroup
++ * there is a set of bfq_groups, each one collecting the lower-level
++ * entities belonging to the group that are acting on the same device.
++ *
++ * Locking works as follows:
++ *    o @group_node is protected by the bfqio_cgroup lock, and is accessed
++ *      via RCU from its readers.
++ *    o @bfqd is protected by the queue lock, RCU is used to access it
++ *      from the readers.
++ *    o All the other fields are protected by the @bfqd queue lock.
++ */
++struct bfq_group {
++	struct bfq_entity entity;
++	struct bfq_sched_data sched_data;
++
++	struct hlist_node group_node;
++	struct hlist_node bfqd_node;
++
++	void *bfqd;
++
++	struct bfq_queue *async_bfqq[2][IOPRIO_BE_NR];
++	struct bfq_queue *async_idle_bfqq;
++
++	struct bfq_entity *my_entity;
++
++	int active_entities;
++};
++
++/**
++ * struct bfqio_cgroup - bfq cgroup data structure.
++ * @css: subsystem state for bfq in the containing cgroup.
++ * @online: flag marked when the subsystem is inserted.
++ * @weight: cgroup weight.
++ * @ioprio: cgroup ioprio.
++ * @ioprio_class: cgroup ioprio_class.
++ * @lock: spinlock that protects @ioprio, @ioprio_class and @group_data.
++ * @group_data: list containing the bfq_group belonging to this cgroup.
++ *
++ * @group_data is accessed using RCU, with @lock protecting the updates,
++ * @ioprio and @ioprio_class are protected by @lock.
++ */
++struct bfqio_cgroup {
++	struct cgroup_subsys_state css;
++	bool online;
++
++	unsigned short weight, ioprio, ioprio_class;
++
++	spinlock_t lock;
++	struct hlist_head group_data;
++};
++#else
++struct bfq_group {
++	struct bfq_sched_data sched_data;
++
++	struct bfq_queue *async_bfqq[2][IOPRIO_BE_NR];
++	struct bfq_queue *async_idle_bfqq;
++};
++#endif
++
++static inline struct bfq_service_tree *
++bfq_entity_service_tree(struct bfq_entity *entity)
++{
++	struct bfq_sched_data *sched_data = entity->sched_data;
++	unsigned int idx = entity->ioprio_class - 1;
++
++	BUG_ON(idx >= BFQ_IOPRIO_CLASSES);
++	BUG_ON(sched_data == NULL);
++
++	return sched_data->service_tree + idx;
++}
++
++static inline struct bfq_queue *bic_to_bfqq(struct bfq_io_cq *bic,
++					    bool is_sync)
++{
++	return bic->bfqq[is_sync];
++}
++
++static inline void bic_set_bfqq(struct bfq_io_cq *bic,
++				struct bfq_queue *bfqq, bool is_sync)
++{
++	bic->bfqq[is_sync] = bfqq;
++}
++
++static inline struct bfq_data *bic_to_bfqd(struct bfq_io_cq *bic)
++{
++	return bic->icq.q->elevator->elevator_data;
++}
++
++/**
++ * bfq_get_bfqd_locked - get a lock to a bfqd using a RCU protected pointer.
++ * @ptr: a pointer to a bfqd.
++ * @flags: storage for the flags to be saved.
++ *
++ * This function allows bfqg->bfqd to be protected by the
++ * queue lock of the bfqd they reference; the pointer is dereferenced
++ * under RCU, so the storage for bfqd is assured to be safe as long
++ * as the RCU read side critical section does not end.  After the
++ * bfqd->queue->queue_lock is taken the pointer is rechecked, to be
++ * sure that no other writer accessed it.  If we raced with a writer,
++ * the function returns NULL, with the queue unlocked, otherwise it
++ * returns the dereferenced pointer, with the queue locked.
++ */
++static inline struct bfq_data *bfq_get_bfqd_locked(void **ptr,
++						   unsigned long *flags)
++{
++	struct bfq_data *bfqd;
++
++	rcu_read_lock();
++	bfqd = rcu_dereference(*(struct bfq_data **)ptr);
++
++	if (bfqd != NULL) {
++		spin_lock_irqsave(bfqd->queue->queue_lock, *flags);
++		if (*ptr == bfqd)
++			goto out;
++		spin_unlock_irqrestore(bfqd->queue->queue_lock, *flags);
++	}
++
++	bfqd = NULL;
++out:
++	rcu_read_unlock();
++	return bfqd;
++}
++
++static inline void bfq_put_bfqd_unlock(struct bfq_data *bfqd,
++				       unsigned long *flags)
++{
++	spin_unlock_irqrestore(bfqd->queue->queue_lock, *flags);
++}
++
++static void bfq_changed_ioprio(struct bfq_io_cq *bic);
++static void bfq_put_queue(struct bfq_queue *bfqq);
++static void bfq_dispatch_insert(struct request_queue *q, struct request *rq);
++static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd,
++				       struct bfq_group *bfqg, int is_sync,
++				       struct bfq_io_cq *bic, gfp_t gfp_mask);
++static void bfq_end_wr_async_queues(struct bfq_data *bfqd,
++				    struct bfq_group *bfqg);
++static void bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg);
++static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq);
++
++#endif /* _BFQ_H */
+-- 
+2.3.0
+

diff --git a/5003_block-bfq-add-Early-Queue-Merge-EQM-to-BFQ-v7r7-for-3.19.0.patch b/5003_block-bfq-add-Early-Queue-Merge-EQM-to-BFQ-v7r7-for-3.19.0.patch
new file mode 100644
index 0000000..b2e83e4
--- /dev/null
+++ b/5003_block-bfq-add-Early-Queue-Merge-EQM-to-BFQ-v7r7-for-3.19.0.patch
@@ -0,0 +1,1222 @@
+From 8308a96c0ba080015eb1c58a1bfbfbf1fee80fd6 Mon Sep 17 00:00:00 2001
+From: Mauro Andreolini <mauro.andreolini@unimore.it>
+Date: Thu, 18 Dec 2014 21:32:08 +0100
+Subject: [PATCH 3/3] block, bfq: add Early Queue Merge (EQM) to BFQ-v7r7 for
+ 3.19.0
+
+A set of processes may happen  to  perform interleaved reads, i.e.,requests
+whose union would give rise to a  sequential read  pattern.  There are two
+typical  cases: in the first  case,   processes  read  fixed-size chunks of
+data at a fixed distance from each other, while in the second case processes
+may read variable-size chunks at  variable distances. The latter case occurs
+for  example with  QEMU, which  splits the  I/O generated  by the  guest into
+multiple chunks,  and lets these chunks  be served by a  pool of cooperating
+processes,  iteratively  assigning  the  next  chunk of  I/O  to  the first
+available  process. CFQ  uses actual  queue merging  for the  first type of
+rocesses, whereas it  uses preemption to get a sequential  read pattern out
+of the read requests  performed by the second type of  processes. In the end
+it uses  two different  mechanisms to  achieve the  same goal: boosting the
+throughput with interleaved I/O.
+
+This patch introduces  Early Queue Merge (EQM), a unified mechanism to get a
+sequential  read pattern  with both  types of  processes. The  main idea is
+checking newly arrived requests against the next request of the active queue
+both in case of actual request insert and in case of request merge. By doing
+so, both the types of processes can be handled by just merging their queues.
+EQM is  then simpler and  more compact than the  pair of mechanisms used in
+CFQ.
+
+Finally, EQM  also preserves the  typical low-latency properties of BFQ, by
+properly restoring the weight-raising state of a queue when it gets back to
+a non-merged state.
+
+Signed-off-by: Mauro Andreolini <mauro.andreolini@unimore.it>
+Signed-off-by: Arianna Avanzini <avanzini.arianna@gmail.com>
+Signed-off-by: Paolo Valente <paolo.valente@unimore.it>
+---
+ block/bfq-iosched.c | 751 +++++++++++++++++++++++++++++++++++++---------------
+ block/bfq-sched.c   |  28 --
+ block/bfq.h         |  54 +++-
+ 3 files changed, 581 insertions(+), 252 deletions(-)
+
+diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
+index 97ee934..328f33c 100644
+--- a/block/bfq-iosched.c
++++ b/block/bfq-iosched.c
+@@ -571,6 +571,57 @@ static inline unsigned int bfq_wr_duration(struct bfq_data *bfqd)
+ 	return dur;
+ }
+ 
++static inline unsigned
++bfq_bfqq_cooperations(struct bfq_queue *bfqq)
++{
++	return bfqq->bic ? bfqq->bic->cooperations : 0;
++}
++
++static inline void
++bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_io_cq *bic)
++{
++	if (bic->saved_idle_window)
++		bfq_mark_bfqq_idle_window(bfqq);
++	else
++		bfq_clear_bfqq_idle_window(bfqq);
++	if (bic->saved_IO_bound)
++		bfq_mark_bfqq_IO_bound(bfqq);
++	else
++		bfq_clear_bfqq_IO_bound(bfqq);
++	/* Assuming that the flag in_large_burst is already correctly set */
++	if (bic->wr_time_left && bfqq->bfqd->low_latency &&
++	    !bfq_bfqq_in_large_burst(bfqq) &&
++	    bic->cooperations < bfqq->bfqd->bfq_coop_thresh) {
++		/*
++		 * Start a weight raising period with the duration given by
++		 * the raising_time_left snapshot.
++		 */
++		if (bfq_bfqq_busy(bfqq))
++			bfqq->bfqd->wr_busy_queues++;
++		bfqq->wr_coeff = bfqq->bfqd->bfq_wr_coeff;
++		bfqq->wr_cur_max_time = bic->wr_time_left;
++		bfqq->last_wr_start_finish = jiffies;
++		bfqq->entity.ioprio_changed = 1;
++	}
++	/*
++	 * Clear wr_time_left to prevent bfq_bfqq_save_state() from
++	 * getting confused about the queue's need of a weight-raising
++	 * period.
++	 */
++	bic->wr_time_left = 0;
++}
++
++/* Must be called with the queue_lock held. */
++static int bfqq_process_refs(struct bfq_queue *bfqq)
++{
++	int process_refs, io_refs;
++
++	io_refs = bfqq->allocated[READ] + bfqq->allocated[WRITE];
++	process_refs = atomic_read(&bfqq->ref) - io_refs - bfqq->entity.on_st;
++	BUG_ON(process_refs < 0);
++	return process_refs;
++}
++
+ /* Empty burst list and add just bfqq (see comments to bfq_handle_burst) */
+ static inline void bfq_reset_burst_list(struct bfq_data *bfqd,
+ 					struct bfq_queue *bfqq)
+@@ -815,7 +866,7 @@ static void bfq_add_request(struct request *rq)
+ 		bfq_rq_pos_tree_add(bfqd, bfqq);
+ 
+ 	if (!bfq_bfqq_busy(bfqq)) {
+-		bool soft_rt,
++		bool soft_rt, coop_or_in_burst,
+ 		     idle_for_long_time = time_is_before_jiffies(
+ 						bfqq->budget_timeout +
+ 						bfqd->bfq_wr_min_idle_time);
+@@ -839,11 +890,12 @@ static void bfq_add_request(struct request *rq)
+ 				bfqd->last_ins_in_burst = jiffies;
+ 		}
+ 
++		coop_or_in_burst = bfq_bfqq_in_large_burst(bfqq) ||
++			bfq_bfqq_cooperations(bfqq) >= bfqd->bfq_coop_thresh;
+ 		soft_rt = bfqd->bfq_wr_max_softrt_rate > 0 &&
+-			!bfq_bfqq_in_large_burst(bfqq) &&
++			!coop_or_in_burst &&
+ 			time_is_before_jiffies(bfqq->soft_rt_next_start);
+-		interactive = !bfq_bfqq_in_large_burst(bfqq) &&
+-			      idle_for_long_time;
++		interactive = !coop_or_in_burst && idle_for_long_time;
+ 		entity->budget = max_t(unsigned long, bfqq->max_budget,
+ 				       bfq_serv_to_charge(next_rq, bfqq));
+ 
+@@ -862,11 +914,20 @@ static void bfq_add_request(struct request *rq)
+ 		if (!bfqd->low_latency)
+ 			goto add_bfqq_busy;
+ 
++		if (bfq_bfqq_just_split(bfqq))
++			goto set_ioprio_changed;
++
+ 		/*
+-		 * If the queue is not being boosted and has been idle
+-		 * for enough time, start a weight-raising period
++		 * If the queue:
++		 * - is not being boosted,
++		 * - has been idle for enough time,
++		 * - is not a sync queue or is linked to a bfq_io_cq (it is
++		 *   shared "for its nature" or it is not shared and its
++		 *   requests have not been redirected to a shared queue)
++		 * start a weight-raising period.
+ 		 */
+-		if (old_wr_coeff == 1 && (interactive || soft_rt)) {
++		if (old_wr_coeff == 1 && (interactive || soft_rt) &&
++		    (!bfq_bfqq_sync(bfqq) || bfqq->bic != NULL)) {
+ 			bfqq->wr_coeff = bfqd->bfq_wr_coeff;
+ 			if (interactive)
+ 				bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
+@@ -880,7 +941,7 @@ static void bfq_add_request(struct request *rq)
+ 		} else if (old_wr_coeff > 1) {
+ 			if (interactive)
+ 				bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
+-			else if (bfq_bfqq_in_large_burst(bfqq) ||
++			else if (coop_or_in_burst ||
+ 				 (bfqq->wr_cur_max_time ==
+ 				  bfqd->bfq_wr_rt_max_time &&
+ 				  !soft_rt)) {
+@@ -899,18 +960,18 @@ static void bfq_add_request(struct request *rq)
+ 				/*
+ 				 *
+ 				 * The remaining weight-raising time is lower
+-				 * than bfqd->bfq_wr_rt_max_time, which
+-				 * means that the application is enjoying
+-				 * weight raising either because deemed soft-
+-				 * rt in the near past, or because deemed
+-				 * interactive a long ago. In both cases,
+-				 * resetting now the current remaining weight-
+-				 * raising time for the application to the
+-				 * weight-raising duration for soft rt
+-				 * applications would not cause any latency
+-				 * increase for the application (as the new
+-				 * duration would be higher than the remaining
+-				 * time).
++				 * than bfqd->bfq_wr_rt_max_time, which means
++				 * that the application is enjoying weight
++				 * raising either because deemed soft-rt in
++				 * the near past, or because deemed interactive
++				 * a long ago.
++				 * In both cases, resetting now the current
++				 * remaining weight-raising time for the
++				 * application to the weight-raising duration
++				 * for soft rt applications would not cause any
++				 * latency increase for the application (as the
++				 * new duration would be higher than the
++				 * remaining time).
+ 				 *
+ 				 * In addition, the application is now meeting
+ 				 * the requirements for being deemed soft rt.
+@@ -945,6 +1006,7 @@ static void bfq_add_request(struct request *rq)
+ 					bfqd->bfq_wr_rt_max_time;
+ 			}
+ 		}
++set_ioprio_changed:
+ 		if (old_wr_coeff != bfqq->wr_coeff)
+ 			entity->ioprio_changed = 1;
+ add_bfqq_busy:
+@@ -1156,90 +1218,35 @@ static void bfq_end_wr(struct bfq_data *bfqd)
+ 	spin_unlock_irq(bfqd->queue->queue_lock);
+ }
+ 
+-static int bfq_allow_merge(struct request_queue *q, struct request *rq,
+-			   struct bio *bio)
++static inline sector_t bfq_io_struct_pos(void *io_struct, bool request)
+ {
+-	struct bfq_data *bfqd = q->elevator->elevator_data;
+-	struct bfq_io_cq *bic;
+-	struct bfq_queue *bfqq;
+-
+-	/*
+-	 * Disallow merge of a sync bio into an async request.
+-	 */
+-	if (bfq_bio_sync(bio) && !rq_is_sync(rq))
+-		return 0;
+-
+-	/*
+-	 * Lookup the bfqq that this bio will be queued with. Allow
+-	 * merge only if rq is queued there.
+-	 * Queue lock is held here.
+-	 */
+-	bic = bfq_bic_lookup(bfqd, current->io_context);
+-	if (bic == NULL)
+-		return 0;
+-
+-	bfqq = bic_to_bfqq(bic, bfq_bio_sync(bio));
+-	return bfqq == RQ_BFQQ(rq);
+-}
+-
+-static void __bfq_set_in_service_queue(struct bfq_data *bfqd,
+-				       struct bfq_queue *bfqq)
+-{
+-	if (bfqq != NULL) {
+-		bfq_mark_bfqq_must_alloc(bfqq);
+-		bfq_mark_bfqq_budget_new(bfqq);
+-		bfq_clear_bfqq_fifo_expire(bfqq);
+-
+-		bfqd->budgets_assigned = (bfqd->budgets_assigned*7 + 256) / 8;
+-
+-		bfq_log_bfqq(bfqd, bfqq,
+-			     "set_in_service_queue, cur-budget = %lu",
+-			     bfqq->entity.budget);
+-	}
+-
+-	bfqd->in_service_queue = bfqq;
+-}
+-
+-/*
+- * Get and set a new queue for service.
+- */
+-static struct bfq_queue *bfq_set_in_service_queue(struct bfq_data *bfqd,
+-						  struct bfq_queue *bfqq)
+-{
+-	if (!bfqq)
+-		bfqq = bfq_get_next_queue(bfqd);
++	if (request)
++		return blk_rq_pos(io_struct);
+ 	else
+-		bfq_get_next_queue_forced(bfqd, bfqq);
+-
+-	__bfq_set_in_service_queue(bfqd, bfqq);
+-	return bfqq;
++		return ((struct bio *)io_struct)->bi_iter.bi_sector;
+ }
+ 
+-static inline sector_t bfq_dist_from_last(struct bfq_data *bfqd,
+-					  struct request *rq)
++static inline sector_t bfq_dist_from(sector_t pos1,
++				     sector_t pos2)
+ {
+-	if (blk_rq_pos(rq) >= bfqd->last_position)
+-		return blk_rq_pos(rq) - bfqd->last_position;
++	if (pos1 >= pos2)
++		return pos1 - pos2;
+ 	else
+-		return bfqd->last_position - blk_rq_pos(rq);
++		return pos2 - pos1;
+ }
+ 
+-/*
+- * Return true if bfqq has no request pending and rq is close enough to
+- * bfqd->last_position, or if rq is closer to bfqd->last_position than
+- * bfqq->next_rq
+- */
+-static inline int bfq_rq_close(struct bfq_data *bfqd, struct request *rq)
++static inline int bfq_rq_close_to_sector(void *io_struct, bool request,
++					 sector_t sector)
+ {
+-	return bfq_dist_from_last(bfqd, rq) <= BFQQ_SEEK_THR;
++	return bfq_dist_from(bfq_io_struct_pos(io_struct, request), sector) <=
++	       BFQQ_SEEK_THR;
+ }
+ 
+-static struct bfq_queue *bfqq_close(struct bfq_data *bfqd)
++static struct bfq_queue *bfqq_close(struct bfq_data *bfqd, sector_t sector)
+ {
+ 	struct rb_root *root = &bfqd->rq_pos_tree;
+ 	struct rb_node *parent, *node;
+ 	struct bfq_queue *__bfqq;
+-	sector_t sector = bfqd->last_position;
+ 
+ 	if (RB_EMPTY_ROOT(root))
+ 		return NULL;
+@@ -1258,7 +1265,7 @@ static struct bfq_queue *bfqq_close(struct bfq_data *bfqd)
+ 	 * next_request position).
+ 	 */
+ 	__bfqq = rb_entry(parent, struct bfq_queue, pos_node);
+-	if (bfq_rq_close(bfqd, __bfqq->next_rq))
++	if (bfq_rq_close_to_sector(__bfqq->next_rq, true, sector))
+ 		return __bfqq;
+ 
+ 	if (blk_rq_pos(__bfqq->next_rq) < sector)
+@@ -1269,7 +1276,7 @@ static struct bfq_queue *bfqq_close(struct bfq_data *bfqd)
+ 		return NULL;
+ 
+ 	__bfqq = rb_entry(node, struct bfq_queue, pos_node);
+-	if (bfq_rq_close(bfqd, __bfqq->next_rq))
++	if (bfq_rq_close_to_sector(__bfqq->next_rq, true, sector))
+ 		return __bfqq;
+ 
+ 	return NULL;
+@@ -1278,14 +1285,12 @@ static struct bfq_queue *bfqq_close(struct bfq_data *bfqd)
+ /*
+  * bfqd - obvious
+  * cur_bfqq - passed in so that we don't decide that the current queue
+- *            is closely cooperating with itself.
+- *
+- * We are assuming that cur_bfqq has dispatched at least one request,
+- * and that bfqd->last_position reflects a position on the disk associated
+- * with the I/O issued by cur_bfqq.
++ *            is closely cooperating with itself
++ * sector - used as a reference point to search for a close queue
+  */
+ static struct bfq_queue *bfq_close_cooperator(struct bfq_data *bfqd,
+-					      struct bfq_queue *cur_bfqq)
++					      struct bfq_queue *cur_bfqq,
++					      sector_t sector)
+ {
+ 	struct bfq_queue *bfqq;
+ 
+@@ -1305,7 +1310,7 @@ static struct bfq_queue *bfq_close_cooperator(struct bfq_data *bfqd,
+ 	 * working closely on the same area of the disk. In that case,
+ 	 * we can group them together and don't waste time idling.
+ 	 */
+-	bfqq = bfqq_close(bfqd);
++	bfqq = bfqq_close(bfqd, sector);
+ 	if (bfqq == NULL || bfqq == cur_bfqq)
+ 		return NULL;
+ 
+@@ -1332,6 +1337,315 @@ static struct bfq_queue *bfq_close_cooperator(struct bfq_data *bfqd,
+ 	return bfqq;
+ }
+ 
++static struct bfq_queue *
++bfq_setup_merge(struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
++{
++	int process_refs, new_process_refs;
++	struct bfq_queue *__bfqq;
++
++	/*
++	 * If there are no process references on the new_bfqq, then it is
++	 * unsafe to follow the ->new_bfqq chain as other bfqq's in the chain
++	 * may have dropped their last reference (not just their last process
++	 * reference).
++	 */
++	if (!bfqq_process_refs(new_bfqq))
++		return NULL;
++
++	/* Avoid a circular list and skip interim queue merges. */
++	while ((__bfqq = new_bfqq->new_bfqq)) {
++		if (__bfqq == bfqq)
++			return NULL;
++		new_bfqq = __bfqq;
++	}
++
++	process_refs = bfqq_process_refs(bfqq);
++	new_process_refs = bfqq_process_refs(new_bfqq);
++	/*
++	 * If the process for the bfqq has gone away, there is no
++	 * sense in merging the queues.
++	 */
++	if (process_refs == 0 || new_process_refs == 0)
++		return NULL;
++
++	bfq_log_bfqq(bfqq->bfqd, bfqq, "scheduling merge with queue %d",
++		new_bfqq->pid);
++
++	/*
++	 * Merging is just a redirection: the requests of the process
++	 * owning one of the two queues are redirected to the other queue.
++	 * The latter queue, in its turn, is set as shared if this is the
++	 * first time that the requests of some process are redirected to
++	 * it.
++	 *
++	 * We redirect bfqq to new_bfqq and not the opposite, because we
++	 * are in the context of the process owning bfqq, hence we have
++	 * the io_cq of this process. So we can immediately configure this
++	 * io_cq to redirect the requests of the process to new_bfqq.
++	 *
++	 * NOTE, even if new_bfqq coincides with the in-service queue, the
++	 * io_cq of new_bfqq is not available, because, if the in-service
++	 * queue is shared, bfqd->in_service_bic may not point to the
++	 * io_cq of the in-service queue.
++	 * Redirecting the requests of the process owning bfqq to the
++	 * currently in-service queue is in any case the best option, as
++	 * we feed the in-service queue with new requests close to the
++	 * last request served and, by doing so, hopefully increase the
++	 * throughput.
++	 */
++	bfqq->new_bfqq = new_bfqq;
++	atomic_add(process_refs, &new_bfqq->ref);
++	return new_bfqq;
++}
++
++/*
++ * Attempt to schedule a merge of bfqq with the currently in-service queue
++ * or with a close queue among the scheduled queues.
++ * Return NULL if no merge was scheduled, a pointer to the shared bfq_queue
++ * structure otherwise.
++ *
++ * The OOM queue is not allowed to participate to cooperation: in fact, since
++ * the requests temporarily redirected to the OOM queue could be redirected
++ * again to dedicated queues at any time, the state needed to correctly
++ * handle merging with the OOM queue would be quite complex and expensive
++ * to maintain. Besides, in such a critical condition as an out of memory,
++ * the benefits of queue merging may be little relevant, or even negligible.
++ */
++static struct bfq_queue *
++bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
++		     void *io_struct, bool request)
++{
++	struct bfq_queue *in_service_bfqq, *new_bfqq;
++
++	if (bfqq->new_bfqq)
++		return bfqq->new_bfqq;
++
++	if (!io_struct || unlikely(bfqq == &bfqd->oom_bfqq))
++		return NULL;
++
++	in_service_bfqq = bfqd->in_service_queue;
++
++	if (in_service_bfqq == NULL || in_service_bfqq == bfqq ||
++	    !bfqd->in_service_bic ||
++	    unlikely(in_service_bfqq == &bfqd->oom_bfqq))
++		goto check_scheduled;
++
++	if (bfq_class_idle(in_service_bfqq) || bfq_class_idle(bfqq))
++		goto check_scheduled;
++
++	if (bfq_class_rt(in_service_bfqq) != bfq_class_rt(bfqq))
++		goto check_scheduled;
++
++	if (in_service_bfqq->entity.parent != bfqq->entity.parent)
++		goto check_scheduled;
++
++	if (bfq_rq_close_to_sector(io_struct, request, bfqd->last_position) &&
++	    bfq_bfqq_sync(in_service_bfqq) && bfq_bfqq_sync(bfqq)) {
++		new_bfqq = bfq_setup_merge(bfqq, in_service_bfqq);
++		if (new_bfqq != NULL)
++			return new_bfqq; /* Merge with in-service queue */
++	}
++
++	/*
++	 * Check whether there is a cooperator among currently scheduled
++	 * queues. The only thing we need is that the bio/request is not
++	 * NULL, as we need it to establish whether a cooperator exists.
++	 */
++check_scheduled:
++	new_bfqq = bfq_close_cooperator(bfqd, bfqq,
++					bfq_io_struct_pos(io_struct, request));
++	if (new_bfqq && likely(new_bfqq != &bfqd->oom_bfqq))
++		return bfq_setup_merge(bfqq, new_bfqq);
++
++	return NULL;
++}
++
++static inline void
++bfq_bfqq_save_state(struct bfq_queue *bfqq)
++{
++	/*
++	 * If bfqq->bic == NULL, the queue is already shared or its requests
++	 * have already been redirected to a shared queue; both idle window
++	 * and weight raising state have already been saved. Do nothing.
++	 */
++	if (bfqq->bic == NULL)
++		return;
++	if (bfqq->bic->wr_time_left)
++		/*
++		 * This is the queue of a just-started process, and would
++		 * deserve weight raising: we set wr_time_left to the full
++		 * weight-raising duration to trigger weight-raising when
++		 * and if the queue is split and the first request of the
++		 * queue is enqueued.
++		 */
++		bfqq->bic->wr_time_left = bfq_wr_duration(bfqq->bfqd);
++	else if (bfqq->wr_coeff > 1) {
++		unsigned long wr_duration =
++			jiffies - bfqq->last_wr_start_finish;
++		/*
++		 * It may happen that a queue's weight raising period lasts
++		 * longer than its wr_cur_max_time, as weight raising is
++		 * handled only when a request is enqueued or dispatched (it
++		 * does not use any timer). If the weight raising period is
++		 * about to end, don't save it.
++		 */
++		if (bfqq->wr_cur_max_time <= wr_duration)
++			bfqq->bic->wr_time_left = 0;
++		else
++			bfqq->bic->wr_time_left =
++				bfqq->wr_cur_max_time - wr_duration;
++		/*
++		 * The bfq_queue is becoming shared or the requests of the
++		 * process owning the queue are being redirected to a shared
++		 * queue. Stop the weight raising period of the queue, as in
++		 * both cases it should not be owned by an interactive or
++		 * soft real-time application.
++		 */
++		bfq_bfqq_end_wr(bfqq);
++	} else
++		bfqq->bic->wr_time_left = 0;
++	bfqq->bic->saved_idle_window = bfq_bfqq_idle_window(bfqq);
++	bfqq->bic->saved_IO_bound = bfq_bfqq_IO_bound(bfqq);
++	bfqq->bic->saved_in_large_burst = bfq_bfqq_in_large_burst(bfqq);
++	bfqq->bic->was_in_burst_list = !hlist_unhashed(&bfqq->burst_list_node);
++	bfqq->bic->cooperations++;
++	bfqq->bic->failed_cooperations = 0;
++}
++
++static inline void
++bfq_get_bic_reference(struct bfq_queue *bfqq)
++{
++	/*
++	 * If bfqq->bic has a non-NULL value, the bic to which it belongs
++	 * is about to begin using a shared bfq_queue.
++	 */
++	if (bfqq->bic)
++		atomic_long_inc(&bfqq->bic->icq.ioc->refcount);
++}
++
++static void
++bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic,
++		struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
++{
++	bfq_log_bfqq(bfqd, bfqq, "merging with queue %lu",
++		(long unsigned)new_bfqq->pid);
++	/* Save weight raising and idle window of the merged queues */
++	bfq_bfqq_save_state(bfqq);
++	bfq_bfqq_save_state(new_bfqq);
++	if (bfq_bfqq_IO_bound(bfqq))
++		bfq_mark_bfqq_IO_bound(new_bfqq);
++	bfq_clear_bfqq_IO_bound(bfqq);
++	/*
++	 * Grab a reference to the bic, to prevent it from being destroyed
++	 * before being possibly touched by a bfq_split_bfqq().
++	 */
++	bfq_get_bic_reference(bfqq);
++	bfq_get_bic_reference(new_bfqq);
++	/*
++	 * Merge queues (that is, let bic redirect its requests to new_bfqq)
++	 */
++	bic_set_bfqq(bic, new_bfqq, 1);
++	bfq_mark_bfqq_coop(new_bfqq);
++	/*
++	 * new_bfqq now belongs to at least two bics (it is a shared queue):
++	 * set new_bfqq->bic to NULL. bfqq either:
++	 * - does not belong to any bic any more, and hence bfqq->bic must
++	 *   be set to NULL, or
++	 * - is a queue whose owning bics have already been redirected to a
++	 *   different queue, hence the queue is destined to not belong to
++	 *   any bic soon and bfqq->bic is already NULL (therefore the next
++	 *   assignment causes no harm).
++	 */
++	new_bfqq->bic = NULL;
++	bfqq->bic = NULL;
++	bfq_put_queue(bfqq);
++}
++
++static inline void bfq_bfqq_increase_failed_cooperations(struct bfq_queue *bfqq)
++{
++	struct bfq_io_cq *bic = bfqq->bic;
++	struct bfq_data *bfqd = bfqq->bfqd;
++
++	if (bic && bfq_bfqq_cooperations(bfqq) >= bfqd->bfq_coop_thresh) {
++		bic->failed_cooperations++;
++		if (bic->failed_cooperations >= bfqd->bfq_failed_cooperations)
++			bic->cooperations = 0;
++	}
++}
++
++static int bfq_allow_merge(struct request_queue *q, struct request *rq,
++			   struct bio *bio)
++{
++	struct bfq_data *bfqd = q->elevator->elevator_data;
++	struct bfq_io_cq *bic;
++	struct bfq_queue *bfqq, *new_bfqq;
++
++	/*
++	 * Disallow merge of a sync bio into an async request.
++	 */
++	if (bfq_bio_sync(bio) && !rq_is_sync(rq))
++		return 0;
++
++	/*
++	 * Lookup the bfqq that this bio will be queued with. Allow
++	 * merge only if rq is queued there.
++	 * Queue lock is held here.
++	 */
++	bic = bfq_bic_lookup(bfqd, current->io_context);
++	if (bic == NULL)
++		return 0;
++
++	bfqq = bic_to_bfqq(bic, bfq_bio_sync(bio));
++	/*
++	 * We take advantage of this function to perform an early merge
++	 * of the queues of possible cooperating processes.
++	 */
++	if (bfqq != NULL) {
++		new_bfqq = bfq_setup_cooperator(bfqd, bfqq, bio, false);
++		if (new_bfqq != NULL) {
++			bfq_merge_bfqqs(bfqd, bic, bfqq, new_bfqq);
++			/*
++			 * If we get here, the bio will be queued in the
++			 * shared queue, i.e., new_bfqq, so use new_bfqq
++			 * to decide whether bio and rq can be merged.
++			 */
++			bfqq = new_bfqq;
++		} else
++			bfq_bfqq_increase_failed_cooperations(bfqq);
++	}
++
++	return bfqq == RQ_BFQQ(rq);
++}
++
++static void __bfq_set_in_service_queue(struct bfq_data *bfqd,
++				       struct bfq_queue *bfqq)
++{
++	if (bfqq != NULL) {
++		bfq_mark_bfqq_must_alloc(bfqq);
++		bfq_mark_bfqq_budget_new(bfqq);
++		bfq_clear_bfqq_fifo_expire(bfqq);
++
++		bfqd->budgets_assigned = (bfqd->budgets_assigned*7 + 256) / 8;
++
++		bfq_log_bfqq(bfqd, bfqq,
++			     "set_in_service_queue, cur-budget = %lu",
++			     bfqq->entity.budget);
++	}
++
++	bfqd->in_service_queue = bfqq;
++}
++
++/*
++ * Get and set a new queue for service.
++ */
++static struct bfq_queue *bfq_set_in_service_queue(struct bfq_data *bfqd)
++{
++	struct bfq_queue *bfqq = bfq_get_next_queue(bfqd);
++
++	__bfq_set_in_service_queue(bfqd, bfqq);
++	return bfqq;
++}
++
+ /*
+  * If enough samples have been computed, return the current max budget
+  * stored in bfqd, which is dynamically updated according to the
+@@ -1475,61 +1789,6 @@ static struct request *bfq_check_fifo(struct bfq_queue *bfqq)
+ 	return rq;
+ }
+ 
+-/* Must be called with the queue_lock held. */
+-static int bfqq_process_refs(struct bfq_queue *bfqq)
+-{
+-	int process_refs, io_refs;
+-
+-	io_refs = bfqq->allocated[READ] + bfqq->allocated[WRITE];
+-	process_refs = atomic_read(&bfqq->ref) - io_refs - bfqq->entity.on_st;
+-	BUG_ON(process_refs < 0);
+-	return process_refs;
+-}
+-
+-static void bfq_setup_merge(struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
+-{
+-	int process_refs, new_process_refs;
+-	struct bfq_queue *__bfqq;
+-
+-	/*
+-	 * If there are no process references on the new_bfqq, then it is
+-	 * unsafe to follow the ->new_bfqq chain as other bfqq's in the chain
+-	 * may have dropped their last reference (not just their last process
+-	 * reference).
+-	 */
+-	if (!bfqq_process_refs(new_bfqq))
+-		return;
+-
+-	/* Avoid a circular list and skip interim queue merges. */
+-	while ((__bfqq = new_bfqq->new_bfqq)) {
+-		if (__bfqq == bfqq)
+-			return;
+-		new_bfqq = __bfqq;
+-	}
+-
+-	process_refs = bfqq_process_refs(bfqq);
+-	new_process_refs = bfqq_process_refs(new_bfqq);
+-	/*
+-	 * If the process for the bfqq has gone away, there is no
+-	 * sense in merging the queues.
+-	 */
+-	if (process_refs == 0 || new_process_refs == 0)
+-		return;
+-
+-	/*
+-	 * Merge in the direction of the lesser amount of work.
+-	 */
+-	if (new_process_refs >= process_refs) {
+-		bfqq->new_bfqq = new_bfqq;
+-		atomic_add(process_refs, &new_bfqq->ref);
+-	} else {
+-		new_bfqq->new_bfqq = bfqq;
+-		atomic_add(new_process_refs, &bfqq->ref);
+-	}
+-	bfq_log_bfqq(bfqq->bfqd, bfqq, "scheduling merge with queue %d",
+-		new_bfqq->pid);
+-}
+-
+ static inline unsigned long bfq_bfqq_budget_left(struct bfq_queue *bfqq)
+ {
+ 	struct bfq_entity *entity = &bfqq->entity;
+@@ -2263,7 +2522,7 @@ static inline bool bfq_bfqq_must_idle(struct bfq_queue *bfqq)
+  */
+ static struct bfq_queue *bfq_select_queue(struct bfq_data *bfqd)
+ {
+-	struct bfq_queue *bfqq, *new_bfqq = NULL;
++	struct bfq_queue *bfqq;
+ 	struct request *next_rq;
+ 	enum bfqq_expiration reason = BFQ_BFQQ_BUDGET_TIMEOUT;
+ 
+@@ -2273,17 +2532,6 @@ static struct bfq_queue *bfq_select_queue(struct bfq_data *bfqd)
+ 
+ 	bfq_log_bfqq(bfqd, bfqq, "select_queue: already in-service queue");
+ 
+-	/*
+-         * If another queue has a request waiting within our mean seek
+-         * distance, let it run. The expire code will check for close
+-         * cooperators and put the close queue at the front of the
+-         * service tree. If possible, merge the expiring queue with the
+-         * new bfqq.
+-         */
+-        new_bfqq = bfq_close_cooperator(bfqd, bfqq);
+-        if (new_bfqq != NULL && bfqq->new_bfqq == NULL)
+-                bfq_setup_merge(bfqq, new_bfqq);
+-
+ 	if (bfq_may_expire_for_budg_timeout(bfqq) &&
+ 	    !timer_pending(&bfqd->idle_slice_timer) &&
+ 	    !bfq_bfqq_must_idle(bfqq))
+@@ -2322,10 +2570,7 @@ static struct bfq_queue *bfq_select_queue(struct bfq_data *bfqd)
+ 				bfq_clear_bfqq_wait_request(bfqq);
+ 				del_timer(&bfqd->idle_slice_timer);
+ 			}
+-			if (new_bfqq == NULL)
+-				goto keep_queue;
+-			else
+-				goto expire;
++			goto keep_queue;
+ 		}
+ 	}
+ 
+@@ -2334,40 +2579,30 @@ static struct bfq_queue *bfq_select_queue(struct bfq_data *bfqd)
+ 	 * in flight (possibly waiting for a completion) or is idling for a
+ 	 * new request, then keep it.
+ 	 */
+-	if (new_bfqq == NULL && (timer_pending(&bfqd->idle_slice_timer) ||
+-	    (bfqq->dispatched != 0 && bfq_bfqq_must_not_expire(bfqq)))) {
++	if (timer_pending(&bfqd->idle_slice_timer) ||
++	    (bfqq->dispatched != 0 && bfq_bfqq_must_not_expire(bfqq))) {
+ 		bfqq = NULL;
+ 		goto keep_queue;
+-	} else if (new_bfqq != NULL && timer_pending(&bfqd->idle_slice_timer)) {
+-		/*
+-		 * Expiring the queue because there is a close cooperator,
+-		 * cancel timer.
+-		 */
+-		bfq_clear_bfqq_wait_request(bfqq);
+-		del_timer(&bfqd->idle_slice_timer);
+ 	}
+ 
+ 	reason = BFQ_BFQQ_NO_MORE_REQUESTS;
+ expire:
+ 	bfq_bfqq_expire(bfqd, bfqq, 0, reason);
+ new_queue:
+-	bfqq = bfq_set_in_service_queue(bfqd, new_bfqq);
++	bfqq = bfq_set_in_service_queue(bfqd);
+ 	bfq_log(bfqd, "select_queue: new queue %d returned",
+ 		bfqq != NULL ? bfqq->pid : 0);
+ keep_queue:
+ 	return bfqq;
+ }
+ 
+-static void bfq_update_wr_data(struct bfq_data *bfqd,
+-			       struct bfq_queue *bfqq)
++static void bfq_update_wr_data(struct bfq_data *bfqd, struct bfq_queue *bfqq)
+ {
+-	if (bfqq->wr_coeff > 1) { /* queue is being boosted */
+-		struct bfq_entity *entity = &bfqq->entity;
+-
++	struct bfq_entity *entity = &bfqq->entity;
++	if (bfqq->wr_coeff > 1) { /* queue is being weight-raised */
+ 		bfq_log_bfqq(bfqd, bfqq,
+ 			"raising period dur %u/%u msec, old coeff %u, w %d(%d)",
+-			jiffies_to_msecs(jiffies -
+-				bfqq->last_wr_start_finish),
++			jiffies_to_msecs(jiffies - bfqq->last_wr_start_finish),
+ 			jiffies_to_msecs(bfqq->wr_cur_max_time),
+ 			bfqq->wr_coeff,
+ 			bfqq->entity.weight, bfqq->entity.orig_weight);
+@@ -2376,12 +2611,16 @@ static void bfq_update_wr_data(struct bfq_data *bfqd,
+ 		       entity->orig_weight * bfqq->wr_coeff);
+ 		if (entity->ioprio_changed)
+ 			bfq_log_bfqq(bfqd, bfqq, "WARN: pending prio change");
++
+ 		/*
+ 		 * If the queue was activated in a burst, or
+ 		 * too much time has elapsed from the beginning
+-		 * of this weight-raising, then end weight raising.
++		 * of this weight-raising period, or the queue has
++		 * exceeded the acceptable number of cooperations,
++		 * then end weight raising.
+ 		 */
+ 		if (bfq_bfqq_in_large_burst(bfqq) ||
++		    bfq_bfqq_cooperations(bfqq) >= bfqd->bfq_coop_thresh ||
+ 		    time_is_before_jiffies(bfqq->last_wr_start_finish +
+ 					   bfqq->wr_cur_max_time)) {
+ 			bfqq->last_wr_start_finish = jiffies;
+@@ -2390,11 +2629,13 @@ static void bfq_update_wr_data(struct bfq_data *bfqd,
+ 				     bfqq->last_wr_start_finish,
+ 				     jiffies_to_msecs(bfqq->wr_cur_max_time));
+ 			bfq_bfqq_end_wr(bfqq);
+-			__bfq_entity_update_weight_prio(
+-				bfq_entity_service_tree(entity),
+-				entity);
+ 		}
+ 	}
++	/* Update weight both if it must be raised and if it must be lowered */
++	if ((entity->weight > entity->orig_weight) != (bfqq->wr_coeff > 1))
++		__bfq_entity_update_weight_prio(
++			bfq_entity_service_tree(entity),
++			entity);
+ }
+ 
+ /*
+@@ -2642,6 +2883,25 @@ static inline void bfq_init_icq(struct io_cq *icq)
+ 	struct bfq_io_cq *bic = icq_to_bic(icq);
+ 
+ 	bic->ttime.last_end_request = jiffies;
++	/*
++	 * A newly created bic indicates that the process has just
++	 * started doing I/O, and is probably mapping into memory its
++	 * executable and libraries: it definitely needs weight raising.
++	 * There is however the possibility that the process performs,
++	 * for a while, I/O close to some other process. EQM intercepts
++	 * this behavior and may merge the queue corresponding to the
++	 * process  with some other queue, BEFORE the weight of the queue
++	 * is raised. Merged queues are not weight-raised (they are assumed
++	 * to belong to processes that benefit only from high throughput).
++	 * If the merge is basically the consequence of an accident, then
++	 * the queue will be split soon and will get back its old weight.
++	 * It is then important to write down somewhere that this queue
++	 * does need weight raising, even if it did not make it to get its
++	 * weight raised before being merged. To this purpose, we overload
++	 * the field raising_time_left and assign 1 to it, to mark the queue
++	 * as needing weight raising.
++	 */
++	bic->wr_time_left = 1;
+ }
+ 
+ static void bfq_exit_icq(struct io_cq *icq)
+@@ -2655,6 +2915,13 @@ static void bfq_exit_icq(struct io_cq *icq)
+ 	}
+ 
+ 	if (bic->bfqq[BLK_RW_SYNC]) {
++		/*
++		 * If the bic is using a shared queue, put the reference
++		 * taken on the io_context when the bic started using a
++		 * shared bfq_queue.
++		 */
++		if (bfq_bfqq_coop(bic->bfqq[BLK_RW_SYNC]))
++			put_io_context(icq->ioc);
+ 		bfq_exit_bfqq(bfqd, bic->bfqq[BLK_RW_SYNC]);
+ 		bic->bfqq[BLK_RW_SYNC] = NULL;
+ 	}
+@@ -2950,6 +3217,10 @@ static void bfq_update_idle_window(struct bfq_data *bfqd,
+ 	if (!bfq_bfqq_sync(bfqq) || bfq_class_idle(bfqq))
+ 		return;
+ 
++	/* Idle window just restored, statistics are meaningless. */
++	if (bfq_bfqq_just_split(bfqq))
++		return;
++
+ 	enable_idle = bfq_bfqq_idle_window(bfqq);
+ 
+ 	if (atomic_read(&bic->icq.ioc->active_ref) == 0 ||
+@@ -2997,6 +3268,7 @@ static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq,
+ 	if (bfqq->entity.service > bfq_max_budget(bfqd) / 8 ||
+ 	    !BFQQ_SEEKY(bfqq))
+ 		bfq_update_idle_window(bfqd, bfqq, bic);
++	bfq_clear_bfqq_just_split(bfqq);
+ 
+ 	bfq_log_bfqq(bfqd, bfqq,
+ 		     "rq_enqueued: idle_window=%d (seeky %d, mean %llu)",
+@@ -3057,13 +3329,49 @@ static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq,
+ static void bfq_insert_request(struct request_queue *q, struct request *rq)
+ {
+ 	struct bfq_data *bfqd = q->elevator->elevator_data;
+-	struct bfq_queue *bfqq = RQ_BFQQ(rq);
++	struct bfq_queue *bfqq = RQ_BFQQ(rq), *new_bfqq;
+ 
+ 	assert_spin_locked(bfqd->queue->queue_lock);
++
++	/*
++	 * An unplug may trigger a requeue of a request from the device
++	 * driver: make sure we are in process context while trying to
++	 * merge two bfq_queues.
++	 */
++	if (!in_interrupt()) {
++		new_bfqq = bfq_setup_cooperator(bfqd, bfqq, rq, true);
++		if (new_bfqq != NULL) {
++			if (bic_to_bfqq(RQ_BIC(rq), 1) != bfqq)
++				new_bfqq = bic_to_bfqq(RQ_BIC(rq), 1);
++			/*
++			 * Release the request's reference to the old bfqq
++			 * and make sure one is taken to the shared queue.
++			 */
++			new_bfqq->allocated[rq_data_dir(rq)]++;
++			bfqq->allocated[rq_data_dir(rq)]--;
++			atomic_inc(&new_bfqq->ref);
++			bfq_put_queue(bfqq);
++			if (bic_to_bfqq(RQ_BIC(rq), 1) == bfqq)
++				bfq_merge_bfqqs(bfqd, RQ_BIC(rq),
++						bfqq, new_bfqq);
++			rq->elv.priv[1] = new_bfqq;
++			bfqq = new_bfqq;
++		} else
++			bfq_bfqq_increase_failed_cooperations(bfqq);
++	}
++
+ 	bfq_init_prio_data(bfqq, RQ_BIC(rq));
+ 
+ 	bfq_add_request(rq);
+ 
++	/*
++	 * Here a newly-created bfq_queue has already started a weight-raising
++	 * period: clear raising_time_left to prevent bfq_bfqq_save_state()
++	 * from assigning it a full weight-raising period. See the detailed
++	 * comments about this field in bfq_init_icq().
++	 */
++	if (bfqq->bic != NULL)
++		bfqq->bic->wr_time_left = 0;
+ 	rq->fifo_time = jiffies + bfqd->bfq_fifo_expire[rq_is_sync(rq)];
+ 	list_add_tail(&rq->queuelist, &bfqq->fifo);
+ 
+@@ -3228,18 +3536,6 @@ static void bfq_put_request(struct request *rq)
+ 	}
+ }
+ 
+-static struct bfq_queue *
+-bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic,
+-		struct bfq_queue *bfqq)
+-{
+-	bfq_log_bfqq(bfqd, bfqq, "merging with queue %lu",
+-		(long unsigned)bfqq->new_bfqq->pid);
+-	bic_set_bfqq(bic, bfqq->new_bfqq, 1);
+-	bfq_mark_bfqq_coop(bfqq->new_bfqq);
+-	bfq_put_queue(bfqq);
+-	return bic_to_bfqq(bic, 1);
+-}
+-
+ /*
+  * Returns NULL if a new bfqq should be allocated, or the old bfqq if this
+  * was the last process referring to said bfqq.
+@@ -3248,6 +3544,9 @@ static struct bfq_queue *
+ bfq_split_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq)
+ {
+ 	bfq_log_bfqq(bfqq->bfqd, bfqq, "splitting queue");
++
++	put_io_context(bic->icq.ioc);
++
+ 	if (bfqq_process_refs(bfqq) == 1) {
+ 		bfqq->pid = current->pid;
+ 		bfq_clear_bfqq_coop(bfqq);
+@@ -3276,6 +3575,7 @@ static int bfq_set_request(struct request_queue *q, struct request *rq,
+ 	struct bfq_queue *bfqq;
+ 	struct bfq_group *bfqg;
+ 	unsigned long flags;
++	bool split = false;
+ 
+ 	might_sleep_if(gfp_mask & __GFP_WAIT);
+ 
+@@ -3293,25 +3593,26 @@ new_queue:
+ 	if (bfqq == NULL || bfqq == &bfqd->oom_bfqq) {
+ 		bfqq = bfq_get_queue(bfqd, bfqg, is_sync, bic, gfp_mask);
+ 		bic_set_bfqq(bic, bfqq, is_sync);
++		if (split && is_sync) {
++			if ((bic->was_in_burst_list && bfqd->large_burst) ||
++			    bic->saved_in_large_burst)
++				bfq_mark_bfqq_in_large_burst(bfqq);
++			else {
++			    bfq_clear_bfqq_in_large_burst(bfqq);
++			    if (bic->was_in_burst_list)
++			       hlist_add_head(&bfqq->burst_list_node,
++				              &bfqd->burst_list);
++			}
++		}
+ 	} else {
+-		/*
+-		 * If the queue was seeky for too long, break it apart.
+-		 */
++		/* If the queue was seeky for too long, break it apart. */
+ 		if (bfq_bfqq_coop(bfqq) && bfq_bfqq_split_coop(bfqq)) {
+ 			bfq_log_bfqq(bfqd, bfqq, "breaking apart bfqq");
+ 			bfqq = bfq_split_bfqq(bic, bfqq);
++			split = true;
+ 			if (!bfqq)
+ 				goto new_queue;
+ 		}
+-
+-		/*
+-		 * Check to see if this queue is scheduled to merge with
+-		 * another closely cooperating queue. The merging of queues
+-		 * happens here as it must be done in process context.
+-		 * The reference on new_bfqq was taken in merge_bfqqs.
+-		 */
+-		if (bfqq->new_bfqq != NULL)
+-			bfqq = bfq_merge_bfqqs(bfqd, bic, bfqq);
+ 	}
+ 
+ 	bfqq->allocated[rw]++;
+@@ -3322,6 +3623,26 @@ new_queue:
+ 	rq->elv.priv[0] = bic;
+ 	rq->elv.priv[1] = bfqq;
+ 
++	/*
++	 * If a bfq_queue has only one process reference, it is owned
++	 * by only one bfq_io_cq: we can set the bic field of the
++	 * bfq_queue to the address of that structure. Also, if the
++	 * queue has just been split, mark a flag so that the
++	 * information is available to the other scheduler hooks.
++	 */
++	if (likely(bfqq != &bfqd->oom_bfqq) && bfqq_process_refs(bfqq) == 1) {
++		bfqq->bic = bic;
++		if (split) {
++			bfq_mark_bfqq_just_split(bfqq);
++			/*
++			 * If the queue has just been split from a shared
++			 * queue, restore the idle window and the possible
++			 * weight raising period.
++			 */
++			bfq_bfqq_resume_state(bfqq, bic);
++		}
++	}
++
+ 	spin_unlock_irqrestore(q->queue_lock, flags);
+ 
+ 	return 0;
+diff --git a/block/bfq-sched.c b/block/bfq-sched.c
+index 2931563..6764a7e 100644
+--- a/block/bfq-sched.c
++++ b/block/bfq-sched.c
+@@ -1091,34 +1091,6 @@ static struct bfq_queue *bfq_get_next_queue(struct bfq_data *bfqd)
+ 	return bfqq;
+ }
+ 
+-/*
+- * Forced extraction of the given queue.
+- */
+-static void bfq_get_next_queue_forced(struct bfq_data *bfqd,
+-				      struct bfq_queue *bfqq)
+-{
+-	struct bfq_entity *entity;
+-	struct bfq_sched_data *sd;
+-
+-	BUG_ON(bfqd->in_service_queue != NULL);
+-
+-	entity = &bfqq->entity;
+-	/*
+-	 * Bubble up extraction/update from the leaf to the root.
+-	*/
+-	for_each_entity(entity) {
+-		sd = entity->sched_data;
+-		bfq_update_budget(entity);
+-		bfq_update_vtime(bfq_entity_service_tree(entity));
+-		bfq_active_extract(bfq_entity_service_tree(entity), entity);
+-		sd->in_service_entity = entity;
+-		sd->next_in_service = NULL;
+-		entity->service = 0;
+-	}
+-
+-	return;
+-}
+-
+ static void __bfq_bfqd_reset_in_service(struct bfq_data *bfqd)
+ {
+ 	if (bfqd->in_service_bic != NULL) {
+diff --git a/block/bfq.h b/block/bfq.h
+index 3c5d85e..5ced336 100644
+--- a/block/bfq.h
++++ b/block/bfq.h
+@@ -218,18 +218,21 @@ struct bfq_group;
+  *                      idle @bfq_queue with no outstanding requests, then
+  *                      the task associated with the queue it is deemed as
+  *                      soft real-time (see the comments to the function
+- *                      bfq_bfqq_softrt_next_start()).
++ *                      bfq_bfqq_softrt_next_start())
+  * @last_idle_bklogged: time of the last transition of the @bfq_queue from
+  *                      idle to backlogged
+  * @service_from_backlogged: cumulative service received from the @bfq_queue
+  *                           since the last transition from idle to
+  *                           backlogged
++ * @bic: pointer to the bfq_io_cq owning the bfq_queue, set to %NULL if the
++ *	 queue is shared
+  *
+- * A bfq_queue is a leaf request queue; it can be associated with an io_context
+- * or more, if it is async or shared between cooperating processes. @cgroup
+- * holds a reference to the cgroup, to be sure that it does not disappear while
+- * a bfqq still references it (mostly to avoid races between request issuing and
+- * task migration followed by cgroup destruction).
++ * A bfq_queue is a leaf request queue; it can be associated with an
++ * io_context or more, if it  is  async or shared  between  cooperating
++ * processes. @cgroup holds a reference to the cgroup, to be sure that it
++ * does not disappear while a bfqq still references it (mostly to avoid
++ * races between request issuing and task migration followed by cgroup
++ * destruction).
+  * All the fields are protected by the queue lock of the containing bfqd.
+  */
+ struct bfq_queue {
+@@ -269,6 +272,7 @@ struct bfq_queue {
+ 	unsigned int requests_within_timer;
+ 
+ 	pid_t pid;
++	struct bfq_io_cq *bic;
+ 
+ 	/* weight-raising fields */
+ 	unsigned long wr_cur_max_time;
+@@ -298,12 +302,42 @@ struct bfq_ttime {
+  * @icq: associated io_cq structure
+  * @bfqq: array of two process queues, the sync and the async
+  * @ttime: associated @bfq_ttime struct
++ * @wr_time_left: snapshot of the time left before weight raising ends
++ *                for the sync queue associated to this process; this
++ *		  snapshot is taken to remember this value while the weight
++ *		  raising is suspended because the queue is merged with a
++ *		  shared queue, and is used to set @raising_cur_max_time
++ *		  when the queue is split from the shared queue and its
++ *		  weight is raised again
++ * @saved_idle_window: same purpose as the previous field for the idle
++ *                     window
++ * @saved_IO_bound: same purpose as the previous two fields for the I/O
++ *                  bound classification of a queue
++ * @saved_in_large_burst: same purpose as the previous fields for the
++ *                        value of the field keeping the queue's belonging
++ *                        to a large burst
++ * @was_in_burst_list: true if the queue belonged to a burst list
++ *                     before its merge with another cooperating queue
++ * @cooperations: counter of consecutive successful queue merges underwent
++ *                by any of the process' @bfq_queues
++ * @failed_cooperations: counter of consecutive failed queue merges of any
++ *                       of the process' @bfq_queues
+  */
+ struct bfq_io_cq {
+ 	struct io_cq icq; /* must be the first member */
+ 	struct bfq_queue *bfqq[2];
+ 	struct bfq_ttime ttime;
+ 	int ioprio;
++
++	unsigned int wr_time_left;
++	bool saved_idle_window;
++	bool saved_IO_bound;
++
++	bool saved_in_large_burst;
++	bool was_in_burst_list;
++
++	unsigned int cooperations;
++	unsigned int failed_cooperations;
+ };
+ 
+ enum bfq_device_speed {
+@@ -539,7 +573,7 @@ enum bfqq_state_flags {
+ 	BFQ_BFQQ_FLAG_prio_changed,	/* task priority has changed */
+ 	BFQ_BFQQ_FLAG_sync,		/* synchronous queue */
+ 	BFQ_BFQQ_FLAG_budget_new,	/* no completion with this budget */
+-	BFQ_BFQQ_FLAG_IO_bound,         /*
++	BFQ_BFQQ_FLAG_IO_bound,		/*
+ 					 * bfqq has timed-out at least once
+ 					 * having consumed at most 2/10 of
+ 					 * its budget
+@@ -552,12 +586,13 @@ enum bfqq_state_flags {
+ 					 * bfqq has proved to be slow and
+ 					 * seeky until budget timeout
+ 					 */
+-	BFQ_BFQQ_FLAG_softrt_update,    /*
++	BFQ_BFQQ_FLAG_softrt_update,	/*
+ 					 * may need softrt-next-start
+ 					 * update
+ 					 */
+ 	BFQ_BFQQ_FLAG_coop,		/* bfqq is shared */
+-	BFQ_BFQQ_FLAG_split_coop,	/* shared bfqq will be splitted */
++	BFQ_BFQQ_FLAG_split_coop,	/* shared bfqq will be split */
++	BFQ_BFQQ_FLAG_just_split,	/* queue has just been split */
+ };
+ 
+ #define BFQ_BFQQ_FNS(name)						\
+@@ -587,6 +622,7 @@ BFQ_BFQQ_FNS(in_large_burst);
+ BFQ_BFQQ_FNS(constantly_seeky);
+ BFQ_BFQQ_FNS(coop);
+ BFQ_BFQQ_FNS(split_coop);
++BFQ_BFQQ_FNS(just_split);
+ BFQ_BFQQ_FNS(softrt_update);
+ #undef BFQ_BFQQ_FNS
+ 
+-- 
+2.3.0
+


^ permalink raw reply related	[flat|nested] 19+ messages in thread

* [gentoo-commits] proj/linux-patches:3.19 commit in: /
@ 2015-02-14 20:13 Mike Pagano
  0 siblings, 0 replies; 19+ messages in thread
From: Mike Pagano @ 2015-02-14 20:13 UTC (permalink / raw
  To: gentoo-commits

commit:     7f3a0b1564e52306388a4ea9876fa159c2a6ffaa
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sat Feb 14 20:13:14 2015 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sat Feb 14 20:13:14 2015 +0000
URL:        http://sources.gentoo.org/gitweb/?p=proj/linux-patches.git;a=commit;h=7f3a0b15

Enable link security restrictions by default

---
 0000_README                                        |  4 ++++
 ...ble-link-security-restrictions-by-default.patch | 22 ++++++++++++++++++++++
 2 files changed, 26 insertions(+)

diff --git a/0000_README b/0000_README
index 35025ab..aadbe00 100644
--- a/0000_README
+++ b/0000_README
@@ -47,6 +47,10 @@ Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.
 
+Patch:  1510_fs-enable-link-security-restrictions-by-default.patch
+From:   http://sources.debian.net/src/linux/3.16.7-ckt4-3/debian/patches/debian/fs-enable-link-security-restrictions-by-default.patch/
+Desc:   Enable link security restrictions by default
+
 Patch:  2700_ThinkPad-30-brightness-control-fix.patch
 From:   Seth Forshee <seth.forshee@canonical.com>
 Desc:   ACPI: Disable Windows 8 compatibility for some Lenovo ThinkPads.

diff --git a/1510_fs-enable-link-security-restrictions-by-default.patch b/1510_fs-enable-link-security-restrictions-by-default.patch
new file mode 100644
index 0000000..639fb3c
--- /dev/null
+++ b/1510_fs-enable-link-security-restrictions-by-default.patch
@@ -0,0 +1,22 @@
+From: Ben Hutchings <ben@decadent.org.uk>
+Subject: fs: Enable link security restrictions by default
+Date: Fri, 02 Nov 2012 05:32:06 +0000
+Bug-Debian: https://bugs.debian.org/609455
+Forwarded: not-needed
+
+This reverts commit 561ec64ae67ef25cac8d72bb9c4bfc955edfd415
+('VFS: don't do protected {sym,hard}links by default').
+
+--- a/fs/namei.c
++++ b/fs/namei.c
+@@ -651,8 +651,8 @@ static inline void put_link(struct namei
+ 	path_put(link);
+ }
+ 
+-int sysctl_protected_symlinks __read_mostly = 0;
+-int sysctl_protected_hardlinks __read_mostly = 0;
++int sysctl_protected_symlinks __read_mostly = 1;
++int sysctl_protected_hardlinks __read_mostly = 1;
+ 
+ /**
+  * may_follow_link - Check symlink following for unsafe situations


^ permalink raw reply related	[flat|nested] 19+ messages in thread

* [gentoo-commits] proj/linux-patches:3.19 commit in: /
@ 2015-03-07 14:10 Mike Pagano
  0 siblings, 0 replies; 19+ messages in thread
From: Mike Pagano @ 2015-03-07 14:10 UTC (permalink / raw
  To: gentoo-commits

commit:     8ac628efa623a94e812fbfdda2485a5b214d3cf9
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sat Mar  7 14:10:20 2015 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sat Mar  7 14:10:20 2015 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=8ac628ef

Linux patch 3.19.1

 0000_README             |    4 +
 1000_linux-3.19.1.patch | 7185 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 7189 insertions(+)

diff --git a/0000_README b/0000_README
index aadbe00..87dcbc3 100644
--- a/0000_README
+++ b/0000_README
@@ -43,6 +43,10 @@ EXPERIMENTAL
 Individual Patch Descriptions:
 --------------------------------------------------------------------------
 
+Patch:  1000_linux-3.19.1.patch
+From:   http://www.kernel.org
+Desc:   Linux 3.19.1
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1000_linux-3.19.1.patch b/1000_linux-3.19.1.patch
new file mode 100644
index 0000000..a8bb242
--- /dev/null
+++ b/1000_linux-3.19.1.patch
@@ -0,0 +1,7185 @@
+diff --git a/Makefile b/Makefile
+index b15036b1890c..688777b17869 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 19
+-SUBLEVEL = 0
++SUBLEVEL = 1
+ EXTRAVERSION =
+ NAME = Diseased Newt
+ 
+diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h
+index 6b0b7f7ef783..7670f33b9ce2 100644
+--- a/arch/arc/include/asm/pgtable.h
++++ b/arch/arc/include/asm/pgtable.h
+@@ -259,7 +259,8 @@ static inline void pmd_set(pmd_t *pmdp, pte_t *ptep)
+ #define pmd_clear(xp)			do { pmd_val(*(xp)) = 0; } while (0)
+ 
+ #define pte_page(x) (mem_map + \
+-		(unsigned long)(((pte_val(x) - PAGE_OFFSET) >> PAGE_SHIFT)))
++		(unsigned long)(((pte_val(x) - CONFIG_LINUX_LINK_BASE) >> \
++				PAGE_SHIFT)))
+ 
+ #define mk_pte(page, pgprot)						\
+ ({									\
+diff --git a/arch/arm/boot/dts/am335x-bone-common.dtsi b/arch/arm/boot/dts/am335x-bone-common.dtsi
+index 6cc25ed912ee..2c6248d9a9ef 100644
+--- a/arch/arm/boot/dts/am335x-bone-common.dtsi
++++ b/arch/arm/boot/dts/am335x-bone-common.dtsi
+@@ -195,6 +195,7 @@
+ 
+ &usb0 {
+ 	status = "okay";
++	dr_mode = "peripheral";
+ };
+ 
+ &usb1 {
+diff --git a/arch/arm/boot/dts/bcm63138.dtsi b/arch/arm/boot/dts/bcm63138.dtsi
+index d2d8e94e0aa2..f46329c8ad75 100644
+--- a/arch/arm/boot/dts/bcm63138.dtsi
++++ b/arch/arm/boot/dts/bcm63138.dtsi
+@@ -66,8 +66,9 @@
+ 			reg = <0x1d000 0x1000>;
+ 			cache-unified;
+ 			cache-level = <2>;
+-			cache-sets = <16>;
+-			cache-size = <0x80000>;
++			cache-size = <524288>;
++			cache-sets = <1024>;
++			cache-line-size = <32>;
+ 			interrupts = <GIC_PPI 0 IRQ_TYPE_LEVEL_HIGH>;
+ 		};
+ 
+diff --git a/arch/arm/boot/dts/tegra20.dtsi b/arch/arm/boot/dts/tegra20.dtsi
+index 8acf5d85c99d..f76fe94267d6 100644
+--- a/arch/arm/boot/dts/tegra20.dtsi
++++ b/arch/arm/boot/dts/tegra20.dtsi
+@@ -68,9 +68,9 @@
+ 			reset-names = "2d";
+ 		};
+ 
+-		gr3d@54140000 {
++		gr3d@54180000 {
+ 			compatible = "nvidia,tegra20-gr3d";
+-			reg = <0x54140000 0x00040000>;
++			reg = <0x54180000 0x00040000>;
+ 			clocks = <&tegra_car TEGRA20_CLK_GR3D>;
+ 			resets = <&tegra_car 24>;
+ 			reset-names = "3d";
+@@ -130,9 +130,9 @@
+ 			status = "disabled";
+ 		};
+ 
+-		dsi@542c0000 {
++		dsi@54300000 {
+ 			compatible = "nvidia,tegra20-dsi";
+-			reg = <0x542c0000 0x00040000>;
++			reg = <0x54300000 0x00040000>;
+ 			clocks = <&tegra_car TEGRA20_CLK_DSI>;
+ 			resets = <&tegra_car 48>;
+ 			reset-names = "dsi";
+diff --git a/arch/arm/mach-bcm/Kconfig b/arch/arm/mach-bcm/Kconfig
+index aaeec78c3ec4..8b11f44bb36e 100644
+--- a/arch/arm/mach-bcm/Kconfig
++++ b/arch/arm/mach-bcm/Kconfig
+@@ -68,7 +68,7 @@ config ARCH_BCM_MOBILE
+ 	  This enables support for systems based on Broadcom mobile SoCs.
+ 
+ config ARCH_BCM_281XX
+-	bool "Broadcom BCM281XX SoC family"
++	bool "Broadcom BCM281XX SoC family" if ARCH_MULTI_V7
+ 	select ARCH_BCM_MOBILE
+ 	select HAVE_SMP
+ 	help
+@@ -77,7 +77,7 @@ config ARCH_BCM_281XX
+ 	  variants.
+ 
+ config ARCH_BCM_21664
+-	bool "Broadcom BCM21664 SoC family"
++	bool "Broadcom BCM21664 SoC family" if ARCH_MULTI_V7
+ 	select ARCH_BCM_MOBILE
+ 	select HAVE_SMP
+ 	help
+diff --git a/arch/arm/mach-bcm/platsmp-brcmstb.c b/arch/arm/mach-bcm/platsmp-brcmstb.c
+index 31c87a284a34..e209e6fc7caf 100644
+--- a/arch/arm/mach-bcm/platsmp-brcmstb.c
++++ b/arch/arm/mach-bcm/platsmp-brcmstb.c
+@@ -17,6 +17,7 @@
+ #include <linux/errno.h>
+ #include <linux/init.h>
+ #include <linux/io.h>
++#include <linux/jiffies.h>
+ #include <linux/of_address.h>
+ #include <linux/of_platform.h>
+ #include <linux/printk.h>
+@@ -94,10 +95,35 @@ static u32 pwr_ctrl_rd(u32 cpu)
+ 	return readl_relaxed(base);
+ }
+ 
+-static void pwr_ctrl_wr(u32 cpu, u32 val)
++static void pwr_ctrl_set(unsigned int cpu, u32 val, u32 mask)
+ {
+ 	void __iomem *base = pwr_ctrl_get_base(cpu);
+-	writel(val, base);
++	writel((readl(base) & mask) | val, base);
++}
++
++static void pwr_ctrl_clr(unsigned int cpu, u32 val, u32 mask)
++{
++	void __iomem *base = pwr_ctrl_get_base(cpu);
++	writel((readl(base) & mask) & ~val, base);
++}
++
++#define POLL_TMOUT_MS 500
++static int pwr_ctrl_wait_tmout(unsigned int cpu, u32 set, u32 mask)
++{
++	const unsigned long timeo = jiffies + msecs_to_jiffies(POLL_TMOUT_MS);
++	u32 tmp;
++
++	do {
++		tmp = pwr_ctrl_rd(cpu) & mask;
++		if (!set == !tmp)
++			return 0;
++	} while (time_before(jiffies, timeo));
++
++	tmp = pwr_ctrl_rd(cpu) & mask;
++	if (!set == !tmp)
++		return 0;
++
++	return -ETIMEDOUT;
+ }
+ 
+ static void cpu_rst_cfg_set(u32 cpu, int set)
+@@ -139,15 +165,22 @@ static void brcmstb_cpu_power_on(u32 cpu)
+ 	 * The secondary cores power was cut, so we must go through
+ 	 * power-on initialization.
+ 	 */
+-	u32 tmp;
++	pwr_ctrl_set(cpu, ZONE_MAN_ISO_CNTL_MASK, 0xffffff00);
++	pwr_ctrl_set(cpu, ZONE_MANUAL_CONTROL_MASK, -1);
++	pwr_ctrl_set(cpu, ZONE_RESERVED_1_MASK, -1);
+ 
+-	/* Request zone power up */
+-	pwr_ctrl_wr(cpu, ZONE_PWR_UP_REQ_MASK);
++	pwr_ctrl_set(cpu, ZONE_MAN_MEM_PWR_MASK, -1);
+ 
+-	/* Wait for the power up FSM to complete */
+-	do {
+-		tmp = pwr_ctrl_rd(cpu);
+-	} while (!(tmp & ZONE_PWR_ON_STATE_MASK));
++	if (pwr_ctrl_wait_tmout(cpu, 1, ZONE_MEM_PWR_STATE_MASK))
++		panic("ZONE_MEM_PWR_STATE_MASK set timeout");
++
++	pwr_ctrl_set(cpu, ZONE_MAN_CLKEN_MASK, -1);
++
++	if (pwr_ctrl_wait_tmout(cpu, 1, ZONE_DPG_PWR_STATE_MASK))
++		panic("ZONE_DPG_PWR_STATE_MASK set timeout");
++
++	pwr_ctrl_clr(cpu, ZONE_MAN_ISO_CNTL_MASK, -1);
++	pwr_ctrl_set(cpu, ZONE_MAN_RESET_CNTL_MASK, -1);
+ }
+ 
+ static int brcmstb_cpu_get_power_state(u32 cpu)
+@@ -174,25 +207,33 @@ static void brcmstb_cpu_die(u32 cpu)
+ 
+ static int brcmstb_cpu_kill(u32 cpu)
+ {
+-	u32 tmp;
++	/*
++	 * Ordinarily, the hardware forbids power-down of CPU0 (which is good
++	 * because it is the boot CPU), but this is not true when using BPCM
++	 * manual mode.  Consequently, we must avoid turning off CPU0 here to
++	 * ensure that TI2C master reset will work.
++	 */
++	if (cpu == 0) {
++		pr_warn("SMP: refusing to power off CPU0\n");
++		return 1;
++	}
+ 
+ 	while (per_cpu_sw_state_rd(cpu))
+ 		;
+ 
+-	/* Program zone reset */
+-	pwr_ctrl_wr(cpu, ZONE_RESET_STATE_MASK | ZONE_BLK_RST_ASSERT_MASK |
+-			      ZONE_PWR_DN_REQ_MASK);
++	pwr_ctrl_set(cpu, ZONE_MANUAL_CONTROL_MASK, -1);
++	pwr_ctrl_clr(cpu, ZONE_MAN_RESET_CNTL_MASK, -1);
++	pwr_ctrl_clr(cpu, ZONE_MAN_CLKEN_MASK, -1);
++	pwr_ctrl_set(cpu, ZONE_MAN_ISO_CNTL_MASK, -1);
++	pwr_ctrl_clr(cpu, ZONE_MAN_MEM_PWR_MASK, -1);
+ 
+-	/* Verify zone reset */
+-	tmp = pwr_ctrl_rd(cpu);
+-	if (!(tmp & ZONE_RESET_STATE_MASK))
+-		pr_err("%s: Zone reset bit for CPU %d not asserted!\n",
+-			__func__, cpu);
++	if (pwr_ctrl_wait_tmout(cpu, 0, ZONE_MEM_PWR_STATE_MASK))
++		panic("ZONE_MEM_PWR_STATE_MASK clear timeout");
+ 
+-	/* Wait for power down */
+-	do {
+-		tmp = pwr_ctrl_rd(cpu);
+-	} while (!(tmp & ZONE_PWR_OFF_STATE_MASK));
++	pwr_ctrl_clr(cpu, ZONE_RESERVED_1_MASK, -1);
++
++	if (pwr_ctrl_wait_tmout(cpu, 0, ZONE_DPG_PWR_STATE_MASK))
++		panic("ZONE_DPG_PWR_STATE_MASK clear timeout");
+ 
+ 	/* Flush pipeline before resetting CPU */
+ 	mb();
+diff --git a/arch/arm/mach-mvebu/system-controller.c b/arch/arm/mach-mvebu/system-controller.c
+index a068cb5c2ce8..c6c132acd7a6 100644
+--- a/arch/arm/mach-mvebu/system-controller.c
++++ b/arch/arm/mach-mvebu/system-controller.c
+@@ -126,7 +126,7 @@ int mvebu_system_controller_get_soc_id(u32 *dev, u32 *rev)
+ 		return -ENODEV;
+ }
+ 
+-#ifdef CONFIG_SMP
++#if defined(CONFIG_SMP) && defined(CONFIG_MACH_MVEBU_V7)
+ void mvebu_armada375_smp_wa_init(void)
+ {
+ 	u32 dev, rev;
+diff --git a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
+index ffd6604cd546..b6ea88f5399e 100644
+--- a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
++++ b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
+@@ -2017,7 +2017,7 @@ static struct omap_hwmod dra7xx_uart3_hwmod = {
+ 	.class		= &dra7xx_uart_hwmod_class,
+ 	.clkdm_name	= "l4per_clkdm",
+ 	.main_clk	= "uart3_gfclk_mux",
+-	.flags		= HWMOD_SWSUP_SIDLE_ACT,
++	.flags		= HWMOD_SWSUP_SIDLE_ACT | DEBUG_OMAP4UART3_FLAGS,
+ 	.prcm = {
+ 		.omap4 = {
+ 			.clkctrl_offs = DRA7XX_CM_L4PER_UART3_CLKCTRL_OFFSET,
+diff --git a/arch/arm/mach-pxa/corgi.c b/arch/arm/mach-pxa/corgi.c
+index 06022b235730..89f790dda93e 100644
+--- a/arch/arm/mach-pxa/corgi.c
++++ b/arch/arm/mach-pxa/corgi.c
+@@ -26,6 +26,7 @@
+ #include <linux/i2c.h>
+ #include <linux/i2c/pxa-i2c.h>
+ #include <linux/io.h>
++#include <linux/regulator/machine.h>
+ #include <linux/spi/spi.h>
+ #include <linux/spi/ads7846.h>
+ #include <linux/spi/corgi_lcd.h>
+@@ -752,6 +753,8 @@ static void __init corgi_init(void)
+ 		sharpsl_nand_partitions[1].size = 53 * 1024 * 1024;
+ 
+ 	platform_add_devices(devices, ARRAY_SIZE(devices));
++
++	regulator_has_full_constraints();
+ }
+ 
+ static void __init fixup_corgi(struct tag *tags, char **cmdline)
+diff --git a/arch/arm/mach-pxa/hx4700.c b/arch/arm/mach-pxa/hx4700.c
+index c66ad4edc5e3..5fb41ad6e3bc 100644
+--- a/arch/arm/mach-pxa/hx4700.c
++++ b/arch/arm/mach-pxa/hx4700.c
+@@ -893,6 +893,8 @@ static void __init hx4700_init(void)
+ 	mdelay(10);
+ 	gpio_set_value(GPIO71_HX4700_ASIC3_nRESET, 1);
+ 	mdelay(10);
++
++	regulator_has_full_constraints();
+ }
+ 
+ MACHINE_START(H4700, "HP iPAQ HX4700")
+diff --git a/arch/arm/mach-pxa/poodle.c b/arch/arm/mach-pxa/poodle.c
+index 29019beae591..195b1121c8f1 100644
+--- a/arch/arm/mach-pxa/poodle.c
++++ b/arch/arm/mach-pxa/poodle.c
+@@ -25,6 +25,7 @@
+ #include <linux/gpio.h>
+ #include <linux/i2c.h>
+ #include <linux/i2c/pxa-i2c.h>
++#include <linux/regulator/machine.h>
+ #include <linux/spi/spi.h>
+ #include <linux/spi/ads7846.h>
+ #include <linux/spi/pxa2xx_spi.h>
+@@ -455,6 +456,7 @@ static void __init poodle_init(void)
+ 	pxa_set_i2c_info(NULL);
+ 	i2c_register_board_info(0, ARRAY_AND_SIZE(poodle_i2c_devices));
+ 	poodle_init_spi();
++	regulator_has_full_constraints();
+ }
+ 
+ static void __init fixup_poodle(struct tag *tags, char **cmdline)
+diff --git a/arch/arm/mach-pxa/spitz.c b/arch/arm/mach-pxa/spitz.c
+index 962a7f31f596..f4e2e2719580 100644
+--- a/arch/arm/mach-pxa/spitz.c
++++ b/arch/arm/mach-pxa/spitz.c
+@@ -979,6 +979,8 @@ static void __init spitz_init(void)
+ 	spitz_nand_init();
+ 	spitz_i2c_init();
+ 	spitz_audio_init();
++
++	regulator_has_full_constraints();
+ }
+ 
+ static void __init spitz_fixup(struct tag *tags, char **cmdline)
+diff --git a/arch/arm/mach-sa1100/pm.c b/arch/arm/mach-sa1100/pm.c
+index 6645d1e31f14..34853d5dfda2 100644
+--- a/arch/arm/mach-sa1100/pm.c
++++ b/arch/arm/mach-sa1100/pm.c
+@@ -81,6 +81,7 @@ static int sa11x0_pm_enter(suspend_state_t state)
+ 	/*
+ 	 * Ensure not to come back here if it wasn't intended
+ 	 */
++	RCSR = RCSR_SMR;
+ 	PSPR = 0;
+ 
+ 	/*
+diff --git a/arch/arm/mach-vexpress/Kconfig b/arch/arm/mach-vexpress/Kconfig
+index d6b16d9a7838..3c2509b4b694 100644
+--- a/arch/arm/mach-vexpress/Kconfig
++++ b/arch/arm/mach-vexpress/Kconfig
+@@ -73,6 +73,7 @@ config ARCH_VEXPRESS_TC2_PM
+ 	depends on MCPM
+ 	select ARM_CCI
+ 	select ARCH_VEXPRESS_SPC
++	select ARM_CPU_SUSPEND
+ 	help
+ 	  Support for CPU and cluster power management on Versatile Express
+ 	  with a TC2 (A15x2 A7x3) big.LITTLE core tile.
+diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c
+index 5a1ba6e80d4e..6ae93403aae0 100644
+--- a/arch/arm64/kernel/signal32.c
++++ b/arch/arm64/kernel/signal32.c
+@@ -154,8 +154,7 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
+ 	case __SI_TIMER:
+ 		 err |= __put_user(from->si_tid, &to->si_tid);
+ 		 err |= __put_user(from->si_overrun, &to->si_overrun);
+-		 err |= __put_user((compat_uptr_t)(unsigned long)from->si_ptr,
+-				   &to->si_ptr);
++		 err |= __put_user(from->si_int, &to->si_int);
+ 		break;
+ 	case __SI_POLL:
+ 		err |= __put_user(from->si_band, &to->si_band);
+@@ -184,7 +183,7 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
+ 	case __SI_MESGQ: /* But this is */
+ 		err |= __put_user(from->si_pid, &to->si_pid);
+ 		err |= __put_user(from->si_uid, &to->si_uid);
+-		err |= __put_user((compat_uptr_t)(unsigned long)from->si_ptr, &to->si_ptr);
++		err |= __put_user(from->si_int, &to->si_int);
+ 		break;
+ 	case __SI_SYS:
+ 		err |= __put_user((compat_uptr_t)(unsigned long)
+diff --git a/arch/metag/include/asm/processor.h b/arch/metag/include/asm/processor.h
+index 881071c07942..13272fd5a5ba 100644
+--- a/arch/metag/include/asm/processor.h
++++ b/arch/metag/include/asm/processor.h
+@@ -149,8 +149,8 @@ extern void exit_thread(void);
+ 
+ unsigned long get_wchan(struct task_struct *p);
+ 
+-#define	KSTK_EIP(tsk)	((tsk)->thread.kernel_context->CurrPC)
+-#define	KSTK_ESP(tsk)	((tsk)->thread.kernel_context->AX[0].U0)
++#define	KSTK_EIP(tsk)	(task_pt_regs(tsk)->ctx.CurrPC)
++#define	KSTK_ESP(tsk)	(task_pt_regs(tsk)->ctx.AX[0].U0)
+ 
+ #define user_stack_pointer(regs)        ((regs)->ctx.AX[0].U0)
+ 
+diff --git a/arch/mips/alchemy/common/clock.c b/arch/mips/alchemy/common/clock.c
+index 48a9dfc55b51..c4d21ceae516 100644
+--- a/arch/mips/alchemy/common/clock.c
++++ b/arch/mips/alchemy/common/clock.c
+@@ -127,6 +127,8 @@ static unsigned long alchemy_clk_cpu_recalc(struct clk_hw *hw,
+ 		t = 396000000;
+ 	else {
+ 		t = alchemy_rdsys(AU1000_SYS_CPUPLL) & 0x7f;
++		if (alchemy_get_cputype() < ALCHEMY_CPU_AU1300)
++			t &= 0x3f;
+ 		t *= parent_rate;
+ 	}
+ 
+diff --git a/arch/mips/include/asm/asmmacro.h b/arch/mips/include/asm/asmmacro.h
+index 6caf8766b80f..71fef0af9c9a 100644
+--- a/arch/mips/include/asm/asmmacro.h
++++ b/arch/mips/include/asm/asmmacro.h
+@@ -304,7 +304,7 @@
+ 	.set	push
+ 	.set	noat
+ 	SET_HARDFLOAT
+-	add	$1, \base, \off
++	addu	$1, \base, \off
+ 	.word	LDD_MSA_INSN | (\wd << 6)
+ 	.set	pop
+ 	.endm
+@@ -313,7 +313,7 @@
+ 	.set	push
+ 	.set	noat
+ 	SET_HARDFLOAT
+-	add	$1, \base, \off
++	addu	$1, \base, \off
+ 	.word	STD_MSA_INSN | (\wd << 6)
+ 	.set	pop
+ 	.endm
+diff --git a/arch/mips/include/asm/cpu-info.h b/arch/mips/include/asm/cpu-info.h
+index a6c9ccb33c5c..c3f4f2d2e108 100644
+--- a/arch/mips/include/asm/cpu-info.h
++++ b/arch/mips/include/asm/cpu-info.h
+@@ -84,6 +84,11 @@ struct cpuinfo_mips {
+ 	 * (shifted by _CACHE_SHIFT)
+ 	 */
+ 	unsigned int		writecombine;
++	/*
++	 * Simple counter to prevent enabling HTW in nested
++	 * htw_start/htw_stop calls
++	 */
++	unsigned int		htw_seq;
+ } __attribute__((aligned(SMP_CACHE_BYTES)));
+ 
+ extern struct cpuinfo_mips cpu_data[];
+diff --git a/arch/mips/include/asm/mmu_context.h b/arch/mips/include/asm/mmu_context.h
+index 2f82568a3ee4..bc01579a907a 100644
+--- a/arch/mips/include/asm/mmu_context.h
++++ b/arch/mips/include/asm/mmu_context.h
+@@ -25,7 +25,6 @@ do {									\
+ 	if (cpu_has_htw) {						\
+ 		write_c0_pwbase(pgd);					\
+ 		back_to_back_c0_hazard();				\
+-		htw_reset();						\
+ 	}								\
+ } while (0)
+ 
+@@ -142,6 +141,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+ 	unsigned long flags;
+ 	local_irq_save(flags);
+ 
++	htw_stop();
+ 	/* Check if our ASID is of an older version and thus invalid */
+ 	if ((cpu_context(cpu, next) ^ asid_cache(cpu)) & ASID_VERSION_MASK)
+ 		get_new_mmu_context(next, cpu);
+@@ -154,6 +154,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+ 	 */
+ 	cpumask_clear_cpu(cpu, mm_cpumask(prev));
+ 	cpumask_set_cpu(cpu, mm_cpumask(next));
++	htw_start();
+ 
+ 	local_irq_restore(flags);
+ }
+@@ -180,6 +181,7 @@ activate_mm(struct mm_struct *prev, struct mm_struct *next)
+ 
+ 	local_irq_save(flags);
+ 
++	htw_stop();
+ 	/* Unconditionally get a new ASID.  */
+ 	get_new_mmu_context(next, cpu);
+ 
+@@ -189,6 +191,7 @@ activate_mm(struct mm_struct *prev, struct mm_struct *next)
+ 	/* mark mmu ownership change */
+ 	cpumask_clear_cpu(cpu, mm_cpumask(prev));
+ 	cpumask_set_cpu(cpu, mm_cpumask(next));
++	htw_start();
+ 
+ 	local_irq_restore(flags);
+ }
+@@ -203,6 +206,7 @@ drop_mmu_context(struct mm_struct *mm, unsigned cpu)
+ 	unsigned long flags;
+ 
+ 	local_irq_save(flags);
++	htw_stop();
+ 
+ 	if (cpumask_test_cpu(cpu, mm_cpumask(mm)))  {
+ 		get_new_mmu_context(mm, cpu);
+@@ -211,6 +215,7 @@ drop_mmu_context(struct mm_struct *mm, unsigned cpu)
+ 		/* will get a new context next time */
+ 		cpu_context(cpu, mm) = 0;
+ 	}
++	htw_start();
+ 	local_irq_restore(flags);
+ }
+ 
+diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
+index 62a6ba383d4f..845016d1cdbd 100644
+--- a/arch/mips/include/asm/pgtable.h
++++ b/arch/mips/include/asm/pgtable.h
+@@ -99,29 +99,35 @@ extern void paging_init(void);
+ 
+ #define htw_stop()							\
+ do {									\
+-	if (cpu_has_htw)						\
+-		write_c0_pwctl(read_c0_pwctl() &			\
+-			       ~(1 << MIPS_PWCTL_PWEN_SHIFT));		\
++	unsigned long flags;						\
++									\
++	if (cpu_has_htw) {						\
++		local_irq_save(flags);					\
++		if(!raw_current_cpu_data.htw_seq++) {			\
++			write_c0_pwctl(read_c0_pwctl() &		\
++				       ~(1 << MIPS_PWCTL_PWEN_SHIFT));	\
++			back_to_back_c0_hazard();			\
++		}							\
++		local_irq_restore(flags);				\
++	}								\
+ } while(0)
+ 
+ #define htw_start()							\
+ do {									\
+-	if (cpu_has_htw)						\
+-		write_c0_pwctl(read_c0_pwctl() |			\
+-			       (1 << MIPS_PWCTL_PWEN_SHIFT));		\
+-} while(0)
+-
+-
+-#define htw_reset()							\
+-do {									\
++	unsigned long flags;						\
++									\
+ 	if (cpu_has_htw) {						\
+-		htw_stop();						\
+-		back_to_back_c0_hazard();				\
+-		htw_start();						\
+-		back_to_back_c0_hazard();				\
++		local_irq_save(flags);					\
++		if (!--raw_current_cpu_data.htw_seq) {			\
++			write_c0_pwctl(read_c0_pwctl() |		\
++				       (1 << MIPS_PWCTL_PWEN_SHIFT));	\
++			back_to_back_c0_hazard();			\
++		}							\
++		local_irq_restore(flags);				\
+ 	}								\
+ } while(0)
+ 
++
+ extern void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
+ 	pte_t pteval);
+ 
+@@ -153,12 +159,13 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *pt
+ {
+ 	pte_t null = __pte(0);
+ 
++	htw_stop();
+ 	/* Preserve global status for the pair */
+ 	if (ptep_buddy(ptep)->pte_low & _PAGE_GLOBAL)
+ 		null.pte_low = null.pte_high = _PAGE_GLOBAL;
+ 
+ 	set_pte_at(mm, addr, ptep, null);
+-	htw_reset();
++	htw_start();
+ }
+ #else
+ 
+@@ -188,6 +195,7 @@ static inline void set_pte(pte_t *ptep, pte_t pteval)
+ 
+ static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+ {
++	htw_stop();
+ #if !defined(CONFIG_CPU_R3000) && !defined(CONFIG_CPU_TX39XX)
+ 	/* Preserve global status for the pair */
+ 	if (pte_val(*ptep_buddy(ptep)) & _PAGE_GLOBAL)
+@@ -195,7 +203,7 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *pt
+ 	else
+ #endif
+ 		set_pte_at(mm, addr, ptep, __pte(0));
+-	htw_reset();
++	htw_start();
+ }
+ #endif
+ 
+diff --git a/arch/mips/kernel/cps-vec.S b/arch/mips/kernel/cps-vec.S
+index 0384b05ab5a0..55b759a0019e 100644
+--- a/arch/mips/kernel/cps-vec.S
++++ b/arch/mips/kernel/cps-vec.S
+@@ -99,11 +99,11 @@ not_nmi:
+ 	xori	t2, t1, 0x7
+ 	beqz	t2, 1f
+ 	 li	t3, 32
+-	addi	t1, t1, 1
++	addiu	t1, t1, 1
+ 	sllv	t1, t3, t1
+ 1:	/* At this point t1 == I-cache sets per way */
+ 	_EXT	t2, v0, MIPS_CONF1_IA_SHF, MIPS_CONF1_IA_SZ
+-	addi	t2, t2, 1
++	addiu	t2, t2, 1
+ 	mul	t1, t1, t0
+ 	mul	t1, t1, t2
+ 
+@@ -126,11 +126,11 @@ icache_done:
+ 	xori	t2, t1, 0x7
+ 	beqz	t2, 1f
+ 	 li	t3, 32
+-	addi	t1, t1, 1
++	addiu	t1, t1, 1
+ 	sllv	t1, t3, t1
+ 1:	/* At this point t1 == D-cache sets per way */
+ 	_EXT	t2, v0, MIPS_CONF1_DA_SHF, MIPS_CONF1_DA_SZ
+-	addi	t2, t2, 1
++	addiu	t2, t2, 1
+ 	mul	t1, t1, t0
+ 	mul	t1, t1, t2
+ 
+@@ -250,7 +250,7 @@ LEAF(mips_cps_core_init)
+ 	mfc0	t0, CP0_MVPCONF0
+ 	srl	t0, t0, MVPCONF0_PVPE_SHIFT
+ 	andi	t0, t0, (MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT)
+-	addi	t7, t0, 1
++	addiu	t7, t0, 1
+ 
+ 	/* If there's only 1, we're done */
+ 	beqz	t0, 2f
+@@ -280,7 +280,7 @@ LEAF(mips_cps_core_init)
+ 	mttc0	t0, CP0_TCHALT
+ 
+ 	/* Next VPE */
+-	addi	t5, t5, 1
++	addiu	t5, t5, 1
+ 	slt	t0, t5, t7
+ 	bnez	t0, 1b
+ 	 nop
+@@ -317,7 +317,7 @@ LEAF(mips_cps_boot_vpes)
+ 	mfc0	t1, CP0_MVPCONF0
+ 	srl	t1, t1, MVPCONF0_PVPE_SHIFT
+ 	andi	t1, t1, MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT
+-	addi	t1, t1, 1
++	addiu	t1, t1, 1
+ 
+ 	/* Calculate a mask for the VPE ID from EBase.CPUNum */
+ 	clz	t1, t1
+@@ -424,7 +424,7 @@ LEAF(mips_cps_boot_vpes)
+ 
+ 	/* Next VPE */
+ 2:	srl	t6, t6, 1
+-	addi	t5, t5, 1
++	addiu	t5, t5, 1
+ 	bnez	t6, 1b
+ 	 nop
+ 
+diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
+index 5342674842f5..228ae864c92e 100644
+--- a/arch/mips/kernel/cpu-probe.c
++++ b/arch/mips/kernel/cpu-probe.c
+@@ -424,8 +424,10 @@ static inline unsigned int decode_config3(struct cpuinfo_mips *c)
+ 	if (config3 & MIPS_CONF3_MSA)
+ 		c->ases |= MIPS_ASE_MSA;
+ 	/* Only tested on 32-bit cores */
+-	if ((config3 & MIPS_CONF3_PW) && config_enabled(CONFIG_32BIT))
++	if ((config3 & MIPS_CONF3_PW) && config_enabled(CONFIG_32BIT)) {
++		c->htw_seq = 0;
+ 		c->options |= MIPS_CPU_HTW;
++	}
+ 
+ 	return config3 & MIPS_CONF_M;
+ }
+diff --git a/arch/mips/kernel/mips_ksyms.c b/arch/mips/kernel/mips_ksyms.c
+index 17eaf0cf760c..1a73c6c9e4b7 100644
+--- a/arch/mips/kernel/mips_ksyms.c
++++ b/arch/mips/kernel/mips_ksyms.c
+@@ -14,6 +14,8 @@
+ #include <linux/mm.h>
+ #include <asm/uaccess.h>
+ #include <asm/ftrace.h>
++#include <asm/fpu.h>
++#include <asm/msa.h>
+ 
+ extern void *__bzero(void *__s, size_t __count);
+ extern long __strncpy_from_kernel_nocheck_asm(char *__to,
+@@ -32,6 +34,14 @@ extern long __strnlen_user_nocheck_asm(const char *s);
+ extern long __strnlen_user_asm(const char *s);
+ 
+ /*
++ * Core architecture code
++ */
++EXPORT_SYMBOL_GPL(_save_fp);
++#ifdef CONFIG_CPU_HAS_MSA
++EXPORT_SYMBOL_GPL(_save_msa);
++#endif
++
++/*
+  * String functions
+  */
+ EXPORT_SYMBOL(memset);
+diff --git a/arch/mips/kvm/locore.S b/arch/mips/kvm/locore.S
+index d7279c03c517..4a68b176d6e4 100644
+--- a/arch/mips/kvm/locore.S
++++ b/arch/mips/kvm/locore.S
+@@ -434,7 +434,7 @@ __kvm_mips_return_to_guest:
+ 	/* Setup status register for running guest in UM */
+ 	.set	at
+ 	or	v1, v1, (ST0_EXL | KSU_USER | ST0_IE)
+-	and	v1, v1, ~ST0_CU0
++	and	v1, v1, ~(ST0_CU0 | ST0_MX)
+ 	.set	noat
+ 	mtc0	v1, CP0_STATUS
+ 	ehb
+diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
+index e3b21e51ff7e..270bbd41769e 100644
+--- a/arch/mips/kvm/mips.c
++++ b/arch/mips/kvm/mips.c
+@@ -15,9 +15,11 @@
+ #include <linux/vmalloc.h>
+ #include <linux/fs.h>
+ #include <linux/bootmem.h>
++#include <asm/fpu.h>
+ #include <asm/page.h>
+ #include <asm/cacheflush.h>
+ #include <asm/mmu_context.h>
++#include <asm/pgtable.h>
+ 
+ #include <linux/kvm_host.h>
+ 
+@@ -378,6 +380,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
+ 		vcpu->mmio_needed = 0;
+ 	}
+ 
++	lose_fpu(1);
++
+ 	local_irq_disable();
+ 	/* Check if we have any exceptions/interrupts pending */
+ 	kvm_mips_deliver_interrupts(vcpu,
+@@ -385,8 +389,14 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
+ 
+ 	kvm_guest_enter();
+ 
++	/* Disable hardware page table walking while in guest */
++	htw_stop();
++
+ 	r = __kvm_mips_vcpu_run(run, vcpu);
+ 
++	/* Re-enable HTW before enabling interrupts */
++	htw_start();
++
+ 	kvm_guest_exit();
+ 	local_irq_enable();
+ 
+@@ -980,9 +990,6 @@ static void kvm_mips_set_c0_status(void)
+ {
+ 	uint32_t status = read_c0_status();
+ 
+-	if (cpu_has_fpu)
+-		status |= (ST0_CU1);
+-
+ 	if (cpu_has_dsp)
+ 		status |= (ST0_MX);
+ 
+@@ -1002,6 +1009,9 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
+ 	enum emulation_result er = EMULATE_DONE;
+ 	int ret = RESUME_GUEST;
+ 
++	/* re-enable HTW before enabling interrupts */
++	htw_start();
++
+ 	/* Set a default exit reason */
+ 	run->exit_reason = KVM_EXIT_UNKNOWN;
+ 	run->ready_for_interrupt_injection = 1;
+@@ -1136,6 +1146,9 @@ skip_emul:
+ 		}
+ 	}
+ 
++	/* Disable HTW before returning to guest or host */
++	htw_stop();
++
+ 	return ret;
+ }
+ 
+diff --git a/arch/powerpc/kvm/book3s_hv_rm_xics.c b/arch/powerpc/kvm/book3s_hv_rm_xics.c
+index 7b066f6b02ad..7c22997de906 100644
+--- a/arch/powerpc/kvm/book3s_hv_rm_xics.c
++++ b/arch/powerpc/kvm/book3s_hv_rm_xics.c
+@@ -152,7 +152,7 @@ static void icp_rm_down_cppr(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
+ 	 * in virtual mode.
+ 	 */
+ 	do {
+-		old_state = new_state = ACCESS_ONCE(icp->state);
++		old_state = new_state = READ_ONCE(icp->state);
+ 
+ 		/* Down_CPPR */
+ 		new_state.cppr = new_cppr;
+@@ -211,7 +211,7 @@ unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu)
+ 	 * pending priority
+ 	 */
+ 	do {
+-		old_state = new_state = ACCESS_ONCE(icp->state);
++		old_state = new_state = READ_ONCE(icp->state);
+ 
+ 		xirr = old_state.xisr | (((u32)old_state.cppr) << 24);
+ 		if (!old_state.xisr)
+@@ -277,7 +277,7 @@ int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
+ 	 * whenever the MFRR is made less favored.
+ 	 */
+ 	do {
+-		old_state = new_state = ACCESS_ONCE(icp->state);
++		old_state = new_state = READ_ONCE(icp->state);
+ 
+ 		/* Set_MFRR */
+ 		new_state.mfrr = mfrr;
+@@ -352,7 +352,7 @@ int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr)
+ 	icp_rm_clr_vcpu_irq(icp->vcpu);
+ 
+ 	do {
+-		old_state = new_state = ACCESS_ONCE(icp->state);
++		old_state = new_state = READ_ONCE(icp->state);
+ 
+ 		reject = 0;
+ 		new_state.cppr = cppr;
+diff --git a/arch/powerpc/kvm/book3s_xics.c b/arch/powerpc/kvm/book3s_xics.c
+index 807351f76f84..a4a8d9f0dcb7 100644
+--- a/arch/powerpc/kvm/book3s_xics.c
++++ b/arch/powerpc/kvm/book3s_xics.c
+@@ -327,7 +327,7 @@ static bool icp_try_to_deliver(struct kvmppc_icp *icp, u32 irq, u8 priority,
+ 		 icp->server_num);
+ 
+ 	do {
+-		old_state = new_state = ACCESS_ONCE(icp->state);
++		old_state = new_state = READ_ONCE(icp->state);
+ 
+ 		*reject = 0;
+ 
+@@ -512,7 +512,7 @@ static void icp_down_cppr(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
+ 	 * in virtual mode.
+ 	 */
+ 	do {
+-		old_state = new_state = ACCESS_ONCE(icp->state);
++		old_state = new_state = READ_ONCE(icp->state);
+ 
+ 		/* Down_CPPR */
+ 		new_state.cppr = new_cppr;
+@@ -567,7 +567,7 @@ static noinline unsigned long kvmppc_h_xirr(struct kvm_vcpu *vcpu)
+ 	 * pending priority
+ 	 */
+ 	do {
+-		old_state = new_state = ACCESS_ONCE(icp->state);
++		old_state = new_state = READ_ONCE(icp->state);
+ 
+ 		xirr = old_state.xisr | (((u32)old_state.cppr) << 24);
+ 		if (!old_state.xisr)
+@@ -634,7 +634,7 @@ static noinline int kvmppc_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
+ 	 * whenever the MFRR is made less favored.
+ 	 */
+ 	do {
+-		old_state = new_state = ACCESS_ONCE(icp->state);
++		old_state = new_state = READ_ONCE(icp->state);
+ 
+ 		/* Set_MFRR */
+ 		new_state.mfrr = mfrr;
+@@ -679,7 +679,7 @@ static int kvmppc_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server)
+ 		if (!icp)
+ 			return H_PARAMETER;
+ 	}
+-	state = ACCESS_ONCE(icp->state);
++	state = READ_ONCE(icp->state);
+ 	kvmppc_set_gpr(vcpu, 4, ((u32)state.cppr << 24) | state.xisr);
+ 	kvmppc_set_gpr(vcpu, 5, state.mfrr);
+ 	return H_SUCCESS;
+@@ -721,7 +721,7 @@ static noinline void kvmppc_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr)
+ 				      BOOK3S_INTERRUPT_EXTERNAL_LEVEL);
+ 
+ 	do {
+-		old_state = new_state = ACCESS_ONCE(icp->state);
++		old_state = new_state = READ_ONCE(icp->state);
+ 
+ 		reject = 0;
+ 		new_state.cppr = cppr;
+@@ -885,7 +885,7 @@ static int xics_debug_show(struct seq_file *m, void *private)
+ 		if (!icp)
+ 			continue;
+ 
+-		state.raw = ACCESS_ONCE(icp->state.raw);
++		state.raw = READ_ONCE(icp->state.raw);
+ 		seq_printf(m, "cpu server %#lx XIRR:%#x PPRI:%#x CPPR:%#x MFRR:%#x OUT:%d NR:%d\n",
+ 			   icp->server_num, state.xisr,
+ 			   state.pending_pri, state.cppr, state.mfrr,
+@@ -1082,7 +1082,7 @@ int kvmppc_xics_set_icp(struct kvm_vcpu *vcpu, u64 icpval)
+ 	 * the ICS states before the ICP states.
+ 	 */
+ 	do {
+-		old_state = ACCESS_ONCE(icp->state);
++		old_state = READ_ONCE(icp->state);
+ 
+ 		if (new_state.mfrr <= old_state.mfrr) {
+ 			resend = false;
+diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
+index 5ff4e07d920a..620d0ec93e6f 100644
+--- a/arch/powerpc/mm/hugetlbpage.c
++++ b/arch/powerpc/mm/hugetlbpage.c
+@@ -978,7 +978,7 @@ pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, unsigned *shift
+ 		 */
+ 		pdshift = PUD_SHIFT;
+ 		pudp = pud_offset(&pgd, ea);
+-		pud  = ACCESS_ONCE(*pudp);
++		pud  = READ_ONCE(*pudp);
+ 
+ 		if (pud_none(pud))
+ 			return NULL;
+@@ -990,7 +990,7 @@ pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, unsigned *shift
+ 		else {
+ 			pdshift = PMD_SHIFT;
+ 			pmdp = pmd_offset(&pud, ea);
+-			pmd  = ACCESS_ONCE(*pmdp);
++			pmd  = READ_ONCE(*pmdp);
+ 			/*
+ 			 * A hugepage collapse is captured by pmd_none, because
+ 			 * it mark the pmd none and do a hpte invalidate.
+diff --git a/arch/powerpc/sysdev/axonram.c b/arch/powerpc/sysdev/axonram.c
+index f532c92bf99d..367533bb3d48 100644
+--- a/arch/powerpc/sysdev/axonram.c
++++ b/arch/powerpc/sysdev/axonram.c
+@@ -156,7 +156,7 @@ axon_ram_direct_access(struct block_device *device, sector_t sector,
+ 	}
+ 
+ 	*kaddr = (void *)(bank->ph_addr + offset);
+-	*pfn = virt_to_phys(kaddr) >> PAGE_SHIFT;
++	*pfn = virt_to_phys(*kaddr) >> PAGE_SHIFT;
+ 
+ 	return 0;
+ }
+diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
+index f00f31e66cd8..f512cffbf84e 100644
+--- a/arch/s390/kvm/interrupt.c
++++ b/arch/s390/kvm/interrupt.c
+@@ -820,7 +820,7 @@ no_timer:
+ 	__unset_cpu_idle(vcpu);
+ 	vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
+ 
+-	hrtimer_try_to_cancel(&vcpu->arch.ckc_timer);
++	hrtimer_cancel(&vcpu->arch.ckc_timer);
+ 	return 0;
+ }
+ 
+@@ -840,10 +840,20 @@ void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu)
+ enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer)
+ {
+ 	struct kvm_vcpu *vcpu;
++	u64 now, sltime;
+ 
+ 	vcpu = container_of(timer, struct kvm_vcpu, arch.ckc_timer);
+-	kvm_s390_vcpu_wakeup(vcpu);
++	now = get_tod_clock_fast() + vcpu->arch.sie_block->epoch;
++	sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now);
+ 
++	/*
++	 * If the monotonic clock runs faster than the tod clock we might be
++	 * woken up too early and have to go back to sleep to avoid deadlocks.
++	 */
++	if (vcpu->arch.sie_block->ckc > now &&
++	    hrtimer_forward_now(timer, ns_to_ktime(sltime)))
++		return HRTIMER_RESTART;
++	kvm_s390_vcpu_wakeup(vcpu);
+ 	return HRTIMER_NORESTART;
+ }
+ 
+@@ -1187,6 +1197,8 @@ static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
+ 		list_add_tail(&inti->list, &iter->list);
+ 	}
+ 	atomic_set(&fi->active, 1);
++	if (atomic_read(&kvm->online_vcpus) == 0)
++		goto unlock_fi;
+ 	sigcpu = find_first_bit(fi->idle_mask, KVM_MAX_VCPUS);
+ 	if (sigcpu == KVM_MAX_VCPUS) {
+ 		do {
+@@ -1221,6 +1233,7 @@ int kvm_s390_inject_vm(struct kvm *kvm,
+ 		       struct kvm_s390_interrupt *s390int)
+ {
+ 	struct kvm_s390_interrupt_info *inti;
++	int rc;
+ 
+ 	inti = kzalloc(sizeof(*inti), GFP_KERNEL);
+ 	if (!inti)
+@@ -1268,7 +1281,10 @@ int kvm_s390_inject_vm(struct kvm *kvm,
+ 	trace_kvm_s390_inject_vm(s390int->type, s390int->parm, s390int->parm64,
+ 				 2);
+ 
+-	return __inject_vm(kvm, inti);
++	rc = __inject_vm(kvm, inti);
++	if (rc)
++		kfree(inti);
++	return rc;
+ }
+ 
+ void kvm_s390_reinject_io_int(struct kvm *kvm,
+diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
+index 3e09801e3104..9af01dc966d0 100644
+--- a/arch/s390/kvm/kvm-s390.c
++++ b/arch/s390/kvm/kvm-s390.c
+@@ -670,7 +670,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
+ 		if (rc)
+ 			return rc;
+ 	}
+-	hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
++	hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ 	vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
+ 	get_cpu_id(&vcpu->arch.cpu_id);
+ 	vcpu->arch.cpu_id.version = 0xff;
+diff --git a/arch/sh/mm/gup.c b/arch/sh/mm/gup.c
+index 37458f38b220..e113bb429e8e 100644
+--- a/arch/sh/mm/gup.c
++++ b/arch/sh/mm/gup.c
+@@ -17,7 +17,7 @@
+ static inline pte_t gup_get_pte(pte_t *ptep)
+ {
+ #ifndef CONFIG_X2TLB
+-	return ACCESS_ONCE(*ptep);
++	return READ_ONCE(*ptep);
+ #else
+ 	/*
+ 	 * With get_user_pages_fast, we walk down the pagetables without
+diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
+index ad754b4411f7..8bd44e8ee6e2 100644
+--- a/arch/x86/boot/compressed/Makefile
++++ b/arch/x86/boot/compressed/Makefile
+@@ -49,6 +49,7 @@ $(obj)/eboot.o: KBUILD_CFLAGS += -fshort-wchar -mno-red-zone
+ 
+ vmlinux-objs-$(CONFIG_EFI_STUB) += $(obj)/eboot.o $(obj)/efi_stub_$(BITS).o \
+ 	$(objtree)/drivers/firmware/efi/libstub/lib.a
++vmlinux-objs-$(CONFIG_EFI_MIXED) += $(obj)/efi_thunk_$(BITS).o
+ 
+ $(obj)/vmlinux: $(vmlinux-objs-y) FORCE
+ 	$(call if_changed,ld)
+diff --git a/arch/x86/boot/compressed/efi_stub_64.S b/arch/x86/boot/compressed/efi_stub_64.S
+index 7ff3632806b1..99494dff2113 100644
+--- a/arch/x86/boot/compressed/efi_stub_64.S
++++ b/arch/x86/boot/compressed/efi_stub_64.S
+@@ -3,28 +3,3 @@
+ #include <asm/processor-flags.h>
+ 
+ #include "../../platform/efi/efi_stub_64.S"
+-
+-#ifdef CONFIG_EFI_MIXED
+-	.code64
+-	.text
+-ENTRY(efi64_thunk)
+-	push	%rbp
+-	push	%rbx
+-
+-	subq	$16, %rsp
+-	leaq	efi_exit32(%rip), %rax
+-	movl	%eax, 8(%rsp)
+-	leaq	efi_gdt64(%rip), %rax
+-	movl	%eax, 4(%rsp)
+-	movl	%eax, 2(%rax)		/* Fixup the gdt base address */
+-	leaq	efi32_boot_gdt(%rip), %rax
+-	movl	%eax, (%rsp)
+-
+-	call	__efi64_thunk
+-
+-	addq	$16, %rsp
+-	pop	%rbx
+-	pop	%rbp
+-	ret
+-ENDPROC(efi64_thunk)
+-#endif /* CONFIG_EFI_MIXED */
+diff --git a/arch/x86/boot/compressed/efi_thunk_64.S b/arch/x86/boot/compressed/efi_thunk_64.S
+new file mode 100644
+index 000000000000..630384a4c14a
+--- /dev/null
++++ b/arch/x86/boot/compressed/efi_thunk_64.S
+@@ -0,0 +1,196 @@
++/*
++ * Copyright (C) 2014, 2015 Intel Corporation; author Matt Fleming
++ *
++ * Early support for invoking 32-bit EFI services from a 64-bit kernel.
++ *
++ * Because this thunking occurs before ExitBootServices() we have to
++ * restore the firmware's 32-bit GDT before we make EFI serivce calls,
++ * since the firmware's 32-bit IDT is still currently installed and it
++ * needs to be able to service interrupts.
++ *
++ * On the plus side, we don't have to worry about mangling 64-bit
++ * addresses into 32-bits because we're executing with an identify
++ * mapped pagetable and haven't transitioned to 64-bit virtual addresses
++ * yet.
++ */
++
++#include <linux/linkage.h>
++#include <asm/msr.h>
++#include <asm/page_types.h>
++#include <asm/processor-flags.h>
++#include <asm/segment.h>
++
++	.code64
++	.text
++ENTRY(efi64_thunk)
++	push	%rbp
++	push	%rbx
++
++	subq	$8, %rsp
++	leaq	efi_exit32(%rip), %rax
++	movl	%eax, 4(%rsp)
++	leaq	efi_gdt64(%rip), %rax
++	movl	%eax, (%rsp)
++	movl	%eax, 2(%rax)		/* Fixup the gdt base address */
++
++	movl	%ds, %eax
++	push	%rax
++	movl	%es, %eax
++	push	%rax
++	movl	%ss, %eax
++	push	%rax
++
++	/*
++	 * Convert x86-64 ABI params to i386 ABI
++	 */
++	subq	$32, %rsp
++	movl	%esi, 0x0(%rsp)
++	movl	%edx, 0x4(%rsp)
++	movl	%ecx, 0x8(%rsp)
++	movq	%r8, %rsi
++	movl	%esi, 0xc(%rsp)
++	movq	%r9, %rsi
++	movl	%esi,  0x10(%rsp)
++
++	sgdt	save_gdt(%rip)
++
++	leaq	1f(%rip), %rbx
++	movq	%rbx, func_rt_ptr(%rip)
++
++	/*
++	 * Switch to gdt with 32-bit segments. This is the firmware GDT
++	 * that was installed when the kernel started executing. This
++	 * pointer was saved at the EFI stub entry point in head_64.S.
++	 */
++	leaq	efi32_boot_gdt(%rip), %rax
++	lgdt	(%rax)
++
++	pushq	$__KERNEL_CS
++	leaq	efi_enter32(%rip), %rax
++	pushq	%rax
++	lretq
++
++1:	addq	$32, %rsp
++
++	lgdt	save_gdt(%rip)
++
++	pop	%rbx
++	movl	%ebx, %ss
++	pop	%rbx
++	movl	%ebx, %es
++	pop	%rbx
++	movl	%ebx, %ds
++
++	/*
++	 * Convert 32-bit status code into 64-bit.
++	 */
++	test	%rax, %rax
++	jz	1f
++	movl	%eax, %ecx
++	andl	$0x0fffffff, %ecx
++	andl	$0xf0000000, %eax
++	shl	$32, %rax
++	or	%rcx, %rax
++1:
++	addq	$8, %rsp
++	pop	%rbx
++	pop	%rbp
++	ret
++ENDPROC(efi64_thunk)
++
++ENTRY(efi_exit32)
++	movq	func_rt_ptr(%rip), %rax
++	push	%rax
++	mov	%rdi, %rax
++	ret
++ENDPROC(efi_exit32)
++
++	.code32
++/*
++ * EFI service pointer must be in %edi.
++ *
++ * The stack should represent the 32-bit calling convention.
++ */
++ENTRY(efi_enter32)
++	movl	$__KERNEL_DS, %eax
++	movl	%eax, %ds
++	movl	%eax, %es
++	movl	%eax, %ss
++
++	/* Reload pgtables */
++	movl	%cr3, %eax
++	movl	%eax, %cr3
++
++	/* Disable paging */
++	movl	%cr0, %eax
++	btrl	$X86_CR0_PG_BIT, %eax
++	movl	%eax, %cr0
++
++	/* Disable long mode via EFER */
++	movl	$MSR_EFER, %ecx
++	rdmsr
++	btrl	$_EFER_LME, %eax
++	wrmsr
++
++	call	*%edi
++
++	/* We must preserve return value */
++	movl	%eax, %edi
++
++	/*
++	 * Some firmware will return with interrupts enabled. Be sure to
++	 * disable them before we switch GDTs.
++	 */
++	cli
++
++	movl	56(%esp), %eax
++	movl	%eax, 2(%eax)
++	lgdtl	(%eax)
++
++	movl	%cr4, %eax
++	btsl	$(X86_CR4_PAE_BIT), %eax
++	movl	%eax, %cr4
++
++	movl	%cr3, %eax
++	movl	%eax, %cr3
++
++	movl	$MSR_EFER, %ecx
++	rdmsr
++	btsl	$_EFER_LME, %eax
++	wrmsr
++
++	xorl	%eax, %eax
++	lldt	%ax
++
++	movl	60(%esp), %eax
++	pushl	$__KERNEL_CS
++	pushl	%eax
++
++	/* Enable paging */
++	movl	%cr0, %eax
++	btsl	$X86_CR0_PG_BIT, %eax
++	movl	%eax, %cr0
++	lret
++ENDPROC(efi_enter32)
++
++	.data
++	.balign	8
++	.global	efi32_boot_gdt
++efi32_boot_gdt:	.word	0
++		.quad	0
++
++save_gdt:	.word	0
++		.quad	0
++func_rt_ptr:	.quad	0
++
++	.global efi_gdt64
++efi_gdt64:
++	.word	efi_gdt64_end - efi_gdt64
++	.long	0			/* Filled out by user */
++	.word	0
++	.quad	0x0000000000000000	/* NULL descriptor */
++	.quad	0x00af9a000000ffff	/* __KERNEL_CS */
++	.quad	0x00cf92000000ffff	/* __KERNEL_DS */
++	.quad	0x0080890000000000	/* TS descriptor */
++	.quad   0x0000000000000000	/* TS continued */
++efi_gdt64_end:
+diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
+index 625660f8a2fc..cf87de3fc390 100644
+--- a/arch/x86/include/asm/spinlock.h
++++ b/arch/x86/include/asm/spinlock.h
+@@ -46,7 +46,7 @@ static __always_inline bool static_key_false(struct static_key *key);
+ 
+ static inline void __ticket_enter_slowpath(arch_spinlock_t *lock)
+ {
+-	set_bit(0, (volatile unsigned long *)&lock->tickets.tail);
++	set_bit(0, (volatile unsigned long *)&lock->tickets.head);
+ }
+ 
+ #else  /* !CONFIG_PARAVIRT_SPINLOCKS */
+@@ -60,10 +60,30 @@ static inline void __ticket_unlock_kick(arch_spinlock_t *lock,
+ }
+ 
+ #endif /* CONFIG_PARAVIRT_SPINLOCKS */
++static inline int  __tickets_equal(__ticket_t one, __ticket_t two)
++{
++	return !((one ^ two) & ~TICKET_SLOWPATH_FLAG);
++}
++
++static inline void __ticket_check_and_clear_slowpath(arch_spinlock_t *lock,
++							__ticket_t head)
++{
++	if (head & TICKET_SLOWPATH_FLAG) {
++		arch_spinlock_t old, new;
++
++		old.tickets.head = head;
++		new.tickets.head = head & ~TICKET_SLOWPATH_FLAG;
++		old.tickets.tail = new.tickets.head + TICKET_LOCK_INC;
++		new.tickets.tail = old.tickets.tail;
++
++		/* try to clear slowpath flag when there are no contenders */
++		cmpxchg(&lock->head_tail, old.head_tail, new.head_tail);
++	}
++}
+ 
+ static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
+ {
+-	return lock.tickets.head == lock.tickets.tail;
++	return __tickets_equal(lock.tickets.head, lock.tickets.tail);
+ }
+ 
+ /*
+@@ -87,18 +107,21 @@ static __always_inline void arch_spin_lock(arch_spinlock_t *lock)
+ 	if (likely(inc.head == inc.tail))
+ 		goto out;
+ 
+-	inc.tail &= ~TICKET_SLOWPATH_FLAG;
+ 	for (;;) {
+ 		unsigned count = SPIN_THRESHOLD;
+ 
+ 		do {
+-			if (READ_ONCE(lock->tickets.head) == inc.tail)
+-				goto out;
++			inc.head = READ_ONCE(lock->tickets.head);
++			if (__tickets_equal(inc.head, inc.tail))
++				goto clear_slowpath;
+ 			cpu_relax();
+ 		} while (--count);
+ 		__ticket_lock_spinning(lock, inc.tail);
+ 	}
+-out:	barrier();	/* make sure nothing creeps before the lock is taken */
++clear_slowpath:
++	__ticket_check_and_clear_slowpath(lock, inc.head);
++out:
++	barrier();	/* make sure nothing creeps before the lock is taken */
+ }
+ 
+ static __always_inline int arch_spin_trylock(arch_spinlock_t *lock)
+@@ -106,56 +129,30 @@ static __always_inline int arch_spin_trylock(arch_spinlock_t *lock)
+ 	arch_spinlock_t old, new;
+ 
+ 	old.tickets = READ_ONCE(lock->tickets);
+-	if (old.tickets.head != (old.tickets.tail & ~TICKET_SLOWPATH_FLAG))
++	if (!__tickets_equal(old.tickets.head, old.tickets.tail))
+ 		return 0;
+ 
+ 	new.head_tail = old.head_tail + (TICKET_LOCK_INC << TICKET_SHIFT);
++	new.head_tail &= ~TICKET_SLOWPATH_FLAG;
+ 
+ 	/* cmpxchg is a full barrier, so nothing can move before it */
+ 	return cmpxchg(&lock->head_tail, old.head_tail, new.head_tail) == old.head_tail;
+ }
+ 
+-static inline void __ticket_unlock_slowpath(arch_spinlock_t *lock,
+-					    arch_spinlock_t old)
+-{
+-	arch_spinlock_t new;
+-
+-	BUILD_BUG_ON(((__ticket_t)NR_CPUS) != NR_CPUS);
+-
+-	/* Perform the unlock on the "before" copy */
+-	old.tickets.head += TICKET_LOCK_INC;
+-
+-	/* Clear the slowpath flag */
+-	new.head_tail = old.head_tail & ~(TICKET_SLOWPATH_FLAG << TICKET_SHIFT);
+-
+-	/*
+-	 * If the lock is uncontended, clear the flag - use cmpxchg in
+-	 * case it changes behind our back though.
+-	 */
+-	if (new.tickets.head != new.tickets.tail ||
+-	    cmpxchg(&lock->head_tail, old.head_tail,
+-					new.head_tail) != old.head_tail) {
+-		/*
+-		 * Lock still has someone queued for it, so wake up an
+-		 * appropriate waiter.
+-		 */
+-		__ticket_unlock_kick(lock, old.tickets.head);
+-	}
+-}
+-
+ static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
+ {
+ 	if (TICKET_SLOWPATH_FLAG &&
+-	    static_key_false(&paravirt_ticketlocks_enabled)) {
+-		arch_spinlock_t prev;
++		static_key_false(&paravirt_ticketlocks_enabled)) {
++		__ticket_t head;
+ 
+-		prev = *lock;
+-		add_smp(&lock->tickets.head, TICKET_LOCK_INC);
++		BUILD_BUG_ON(((__ticket_t)NR_CPUS) != NR_CPUS);
+ 
+-		/* add_smp() is a full mb() */
++		head = xadd(&lock->tickets.head, TICKET_LOCK_INC);
+ 
+-		if (unlikely(lock->tickets.tail & TICKET_SLOWPATH_FLAG))
+-			__ticket_unlock_slowpath(lock, prev);
++		if (unlikely(head & TICKET_SLOWPATH_FLAG)) {
++			head &= ~TICKET_SLOWPATH_FLAG;
++			__ticket_unlock_kick(lock, (head + TICKET_LOCK_INC));
++		}
+ 	} else
+ 		__add(&lock->tickets.head, TICKET_LOCK_INC, UNLOCK_LOCK_PREFIX);
+ }
+@@ -164,14 +161,15 @@ static inline int arch_spin_is_locked(arch_spinlock_t *lock)
+ {
+ 	struct __raw_tickets tmp = READ_ONCE(lock->tickets);
+ 
+-	return tmp.tail != tmp.head;
++	return !__tickets_equal(tmp.tail, tmp.head);
+ }
+ 
+ static inline int arch_spin_is_contended(arch_spinlock_t *lock)
+ {
+ 	struct __raw_tickets tmp = READ_ONCE(lock->tickets);
+ 
+-	return (__ticket_t)(tmp.tail - tmp.head) > TICKET_LOCK_INC;
++	tmp.head &= ~TICKET_SLOWPATH_FLAG;
++	return (tmp.tail - tmp.head) > TICKET_LOCK_INC;
+ }
+ #define arch_spin_is_contended	arch_spin_is_contended
+ 
+@@ -183,16 +181,16 @@ static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock,
+ 
+ static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
+ {
+-	__ticket_t head = ACCESS_ONCE(lock->tickets.head);
++	__ticket_t head = READ_ONCE(lock->tickets.head);
+ 
+ 	for (;;) {
+-		struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets);
++		struct __raw_tickets tmp = READ_ONCE(lock->tickets);
+ 		/*
+ 		 * We need to check "unlocked" in a loop, tmp.head == head
+ 		 * can be false positive because of overflow.
+ 		 */
+-		if (tmp.head == (tmp.tail & ~TICKET_SLOWPATH_FLAG) ||
+-		    tmp.head != head)
++		if (__tickets_equal(tmp.head, tmp.tail) ||
++				!__tickets_equal(tmp.head, head))
+ 			break;
+ 
+ 		cpu_relax();
+diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
+index b9e30daa0881..b5ddc9649227 100644
+--- a/arch/x86/kernel/acpi/boot.c
++++ b/arch/x86/kernel/acpi/boot.c
+@@ -613,6 +613,11 @@ int acpi_gsi_to_irq(u32 gsi, unsigned int *irqp)
+ {
+ 	int rc, irq, trigger, polarity;
+ 
++	if (acpi_irq_model == ACPI_IRQ_MODEL_PIC) {
++		*irqp = gsi;
++		return 0;
++	}
++
+ 	rc = acpi_get_override_irq(gsi, &trigger, &polarity);
+ 	if (rc == 0) {
+ 		trigger = trigger ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE;
+diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
+index 94f643484300..e354cc6446ab 100644
+--- a/arch/x86/kernel/kvm.c
++++ b/arch/x86/kernel/kvm.c
+@@ -609,7 +609,7 @@ static inline void check_zero(void)
+ 	u8 ret;
+ 	u8 old;
+ 
+-	old = ACCESS_ONCE(zero_stats);
++	old = READ_ONCE(zero_stats);
+ 	if (unlikely(old)) {
+ 		ret = cmpxchg(&zero_stats, old, 0);
+ 		/* This ensures only one fellow resets the stat */
+@@ -727,6 +727,7 @@ __visible void kvm_lock_spinning(struct arch_spinlock *lock, __ticket_t want)
+ 	int cpu;
+ 	u64 start;
+ 	unsigned long flags;
++	__ticket_t head;
+ 
+ 	if (in_nmi())
+ 		return;
+@@ -768,11 +769,15 @@ __visible void kvm_lock_spinning(struct arch_spinlock *lock, __ticket_t want)
+ 	 */
+ 	__ticket_enter_slowpath(lock);
+ 
++	/* make sure enter_slowpath, which is atomic does not cross the read */
++	smp_mb__after_atomic();
++
+ 	/*
+ 	 * check again make sure it didn't become free while
+ 	 * we weren't looking.
+ 	 */
+-	if (ACCESS_ONCE(lock->tickets.head) == want) {
++	head = READ_ONCE(lock->tickets.head);
++	if (__tickets_equal(head, want)) {
+ 		add_stats(TAKEN_SLOW_PICKUP, 1);
+ 		goto out;
+ 	}
+@@ -803,8 +808,8 @@ static void kvm_unlock_kick(struct arch_spinlock *lock, __ticket_t ticket)
+ 	add_stats(RELEASED_SLOW, 1);
+ 	for_each_cpu(cpu, &waiting_cpus) {
+ 		const struct kvm_lock_waiting *w = &per_cpu(klock_waiting, cpu);
+-		if (ACCESS_ONCE(w->lock) == lock &&
+-		    ACCESS_ONCE(w->want) == ticket) {
++		if (READ_ONCE(w->lock) == lock &&
++		    READ_ONCE(w->want) == ticket) {
+ 			add_stats(RELEASED_SLOW_KICKED, 1);
+ 			kvm_kick_cpu(cpu);
+ 			break;
+diff --git a/arch/x86/kernel/pmc_atom.c b/arch/x86/kernel/pmc_atom.c
+index 0ee5025e0fa4..8bb9a611ca23 100644
+--- a/arch/x86/kernel/pmc_atom.c
++++ b/arch/x86/kernel/pmc_atom.c
+@@ -217,6 +217,8 @@ static int pmc_dbgfs_register(struct pmc_dev *pmc, struct pci_dev *pdev)
+ 	if (!dir)
+ 		return -ENOMEM;
+ 
++	pmc->dbgfs_dir = dir;
++
+ 	f = debugfs_create_file("dev_state", S_IFREG | S_IRUGO,
+ 				dir, pmc, &pmc_dev_state_ops);
+ 	if (!f) {
+@@ -229,7 +231,7 @@ static int pmc_dbgfs_register(struct pmc_dev *pmc, struct pci_dev *pdev)
+ 		dev_err(&pdev->dev, "sleep_state register failed\n");
+ 		goto err;
+ 	}
+-	pmc->dbgfs_dir = dir;
++
+ 	return 0;
+ err:
+ 	pmc_dbgfs_unregister(pmc);
+diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
+index d7547824e763..224b14235e96 100644
+--- a/arch/x86/mm/gup.c
++++ b/arch/x86/mm/gup.c
+@@ -172,7 +172,7 @@ static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
+ 		 */
+ 		if (pmd_none(pmd) || pmd_trans_splitting(pmd))
+ 			return 0;
+-		if (unlikely(pmd_large(pmd))) {
++		if (unlikely(pmd_large(pmd) || !pmd_present(pmd))) {
+ 			/*
+ 			 * NUMA hinting faults need to be handled in the GUP
+ 			 * slowpath for accounting purposes and so that they
+diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
+index 8b977ebf9388..006cc914994b 100644
+--- a/arch/x86/mm/hugetlbpage.c
++++ b/arch/x86/mm/hugetlbpage.c
+@@ -66,9 +66,15 @@ follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
+ 	return ERR_PTR(-EINVAL);
+ }
+ 
++/*
++ * pmd_huge() returns 1 if @pmd is hugetlb related entry, that is normal
++ * hugetlb entry or non-present (migration or hwpoisoned) hugetlb entry.
++ * Otherwise, returns 0.
++ */
+ int pmd_huge(pmd_t pmd)
+ {
+-	return !!(pmd_val(pmd) & _PAGE_PSE);
++	return !pmd_none(pmd) &&
++		(pmd_val(pmd) & (_PAGE_PRESENT|_PAGE_PSE)) != _PAGE_PRESENT;
+ }
+ 
+ int pud_huge(pud_t pud)
+diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
+index 919b91205cd4..df4552bd239e 100644
+--- a/arch/x86/mm/mmap.c
++++ b/arch/x86/mm/mmap.c
+@@ -35,12 +35,12 @@ struct va_alignment __read_mostly va_align = {
+ 	.flags = -1,
+ };
+ 
+-static unsigned int stack_maxrandom_size(void)
++static unsigned long stack_maxrandom_size(void)
+ {
+-	unsigned int max = 0;
++	unsigned long max = 0;
+ 	if ((current->flags & PF_RANDOMIZE) &&
+ 		!(current->personality & ADDR_NO_RANDOMIZE)) {
+-		max = ((-1U) & STACK_RND_MASK) << PAGE_SHIFT;
++		max = ((-1UL) & STACK_RND_MASK) << PAGE_SHIFT;
+ 	}
+ 
+ 	return max;
+diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S
+index 5fcda7272550..86d0f9e08dd9 100644
+--- a/arch/x86/platform/efi/efi_stub_64.S
++++ b/arch/x86/platform/efi/efi_stub_64.S
+@@ -91,167 +91,6 @@ ENTRY(efi_call)
+ 	ret
+ ENDPROC(efi_call)
+ 
+-#ifdef CONFIG_EFI_MIXED
+-
+-/*
+- * We run this function from the 1:1 mapping.
+- *
+- * This function must be invoked with a 1:1 mapped stack.
+- */
+-ENTRY(__efi64_thunk)
+-	movl	%ds, %eax
+-	push	%rax
+-	movl	%es, %eax
+-	push	%rax
+-	movl	%ss, %eax
+-	push	%rax
+-
+-	subq	$32, %rsp
+-	movl	%esi, 0x0(%rsp)
+-	movl	%edx, 0x4(%rsp)
+-	movl	%ecx, 0x8(%rsp)
+-	movq	%r8, %rsi
+-	movl	%esi, 0xc(%rsp)
+-	movq	%r9, %rsi
+-	movl	%esi,  0x10(%rsp)
+-
+-	sgdt	save_gdt(%rip)
+-
+-	leaq	1f(%rip), %rbx
+-	movq	%rbx, func_rt_ptr(%rip)
+-
+-	/* Switch to gdt with 32-bit segments */
+-	movl	64(%rsp), %eax
+-	lgdt	(%rax)
+-
+-	leaq	efi_enter32(%rip), %rax
+-	pushq	$__KERNEL_CS
+-	pushq	%rax
+-	lretq
+-
+-1:	addq	$32, %rsp
+-
+-	lgdt	save_gdt(%rip)
+-
+-	pop	%rbx
+-	movl	%ebx, %ss
+-	pop	%rbx
+-	movl	%ebx, %es
+-	pop	%rbx
+-	movl	%ebx, %ds
+-
+-	/*
+-	 * Convert 32-bit status code into 64-bit.
+-	 */
+-	test	%rax, %rax
+-	jz	1f
+-	movl	%eax, %ecx
+-	andl	$0x0fffffff, %ecx
+-	andl	$0xf0000000, %eax
+-	shl	$32, %rax
+-	or	%rcx, %rax
+-1:
+-	ret
+-ENDPROC(__efi64_thunk)
+-
+-ENTRY(efi_exit32)
+-	movq	func_rt_ptr(%rip), %rax
+-	push	%rax
+-	mov	%rdi, %rax
+-	ret
+-ENDPROC(efi_exit32)
+-
+-	.code32
+-/*
+- * EFI service pointer must be in %edi.
+- *
+- * The stack should represent the 32-bit calling convention.
+- */
+-ENTRY(efi_enter32)
+-	movl	$__KERNEL_DS, %eax
+-	movl	%eax, %ds
+-	movl	%eax, %es
+-	movl	%eax, %ss
+-
+-	/* Reload pgtables */
+-	movl	%cr3, %eax
+-	movl	%eax, %cr3
+-
+-	/* Disable paging */
+-	movl	%cr0, %eax
+-	btrl	$X86_CR0_PG_BIT, %eax
+-	movl	%eax, %cr0
+-
+-	/* Disable long mode via EFER */
+-	movl	$MSR_EFER, %ecx
+-	rdmsr
+-	btrl	$_EFER_LME, %eax
+-	wrmsr
+-
+-	call	*%edi
+-
+-	/* We must preserve return value */
+-	movl	%eax, %edi
+-
+-	/*
+-	 * Some firmware will return with interrupts enabled. Be sure to
+-	 * disable them before we switch GDTs.
+-	 */
+-	cli
+-
+-	movl	68(%esp), %eax
+-	movl	%eax, 2(%eax)
+-	lgdtl	(%eax)
+-
+-	movl	%cr4, %eax
+-	btsl	$(X86_CR4_PAE_BIT), %eax
+-	movl	%eax, %cr4
+-
+-	movl	%cr3, %eax
+-	movl	%eax, %cr3
+-
+-	movl	$MSR_EFER, %ecx
+-	rdmsr
+-	btsl	$_EFER_LME, %eax
+-	wrmsr
+-
+-	xorl	%eax, %eax
+-	lldt	%ax
+-
+-	movl	72(%esp), %eax
+-	pushl	$__KERNEL_CS
+-	pushl	%eax
+-
+-	/* Enable paging */
+-	movl	%cr0, %eax
+-	btsl	$X86_CR0_PG_BIT, %eax
+-	movl	%eax, %cr0
+-	lret
+-ENDPROC(efi_enter32)
+-
+-	.data
+-	.balign	8
+-	.global	efi32_boot_gdt
+-efi32_boot_gdt:	.word	0
+-		.quad	0
+-
+-save_gdt:	.word	0
+-		.quad	0
+-func_rt_ptr:	.quad	0
+-
+-	.global efi_gdt64
+-efi_gdt64:
+-	.word	efi_gdt64_end - efi_gdt64
+-	.long	0			/* Filled out by user */
+-	.word	0
+-	.quad	0x0000000000000000	/* NULL descriptor */
+-	.quad	0x00af9a000000ffff	/* __KERNEL_CS */
+-	.quad	0x00cf92000000ffff	/* __KERNEL_DS */
+-	.quad	0x0080890000000000	/* TS descriptor */
+-	.quad   0x0000000000000000	/* TS continued */
+-efi_gdt64_end:
+-#endif /* CONFIG_EFI_MIXED */
+-
+ 	.data
+ ENTRY(efi_scratch)
+ 	.fill 3,8,0
+diff --git a/arch/x86/platform/efi/efi_thunk_64.S b/arch/x86/platform/efi/efi_thunk_64.S
+index 8806fa73e6e6..ff85d28c50f2 100644
+--- a/arch/x86/platform/efi/efi_thunk_64.S
++++ b/arch/x86/platform/efi/efi_thunk_64.S
+@@ -1,9 +1,26 @@
+ /*
+  * Copyright (C) 2014 Intel Corporation; author Matt Fleming
++ *
++ * Support for invoking 32-bit EFI runtime services from a 64-bit
++ * kernel.
++ *
++ * The below thunking functions are only used after ExitBootServices()
++ * has been called. This simplifies things considerably as compared with
++ * the early EFI thunking because we can leave all the kernel state
++ * intact (GDT, IDT, etc) and simply invoke the the 32-bit EFI runtime
++ * services from __KERNEL32_CS. This means we can continue to service
++ * interrupts across an EFI mixed mode call.
++ *
++ * We do however, need to handle the fact that we're running in a full
++ * 64-bit virtual address space. Things like the stack and instruction
++ * addresses need to be accessible by the 32-bit firmware, so we rely on
++ * using the identity mappings in the EFI page table to access the stack
++ * and kernel text (see efi_setup_page_tables()).
+  */
+ 
+ #include <linux/linkage.h>
+ #include <asm/page_types.h>
++#include <asm/segment.h>
+ 
+ 	.text
+ 	.code64
+@@ -33,14 +50,6 @@ ENTRY(efi64_thunk)
+ 	leaq	efi_exit32(%rip), %rbx
+ 	subq	%rax, %rbx
+ 	movl	%ebx, 8(%rsp)
+-	leaq	efi_gdt64(%rip), %rbx
+-	subq	%rax, %rbx
+-	movl	%ebx, 2(%ebx)
+-	movl	%ebx, 4(%rsp)
+-	leaq	efi_gdt32(%rip), %rbx
+-	subq	%rax, %rbx
+-	movl	%ebx, 2(%ebx)
+-	movl	%ebx, (%rsp)
+ 
+ 	leaq	__efi64_thunk(%rip), %rbx
+ 	subq	%rax, %rbx
+@@ -52,14 +61,92 @@ ENTRY(efi64_thunk)
+ 	retq
+ ENDPROC(efi64_thunk)
+ 
+-	.data
+-efi_gdt32:
+-	.word 	efi_gdt32_end - efi_gdt32
+-	.long	0			/* Filled out above */
+-	.word	0
+-	.quad	0x0000000000000000	/* NULL descriptor */
+-	.quad	0x00cf9a000000ffff	/* __KERNEL_CS */
+-	.quad	0x00cf93000000ffff	/* __KERNEL_DS */
+-efi_gdt32_end:
++/*
++ * We run this function from the 1:1 mapping.
++ *
++ * This function must be invoked with a 1:1 mapped stack.
++ */
++ENTRY(__efi64_thunk)
++	movl	%ds, %eax
++	push	%rax
++	movl	%es, %eax
++	push	%rax
++	movl	%ss, %eax
++	push	%rax
++
++	subq	$32, %rsp
++	movl	%esi, 0x0(%rsp)
++	movl	%edx, 0x4(%rsp)
++	movl	%ecx, 0x8(%rsp)
++	movq	%r8, %rsi
++	movl	%esi, 0xc(%rsp)
++	movq	%r9, %rsi
++	movl	%esi,  0x10(%rsp)
++
++	leaq	1f(%rip), %rbx
++	movq	%rbx, func_rt_ptr(%rip)
++
++	/* Switch to 32-bit descriptor */
++	pushq	$__KERNEL32_CS
++	leaq	efi_enter32(%rip), %rax
++	pushq	%rax
++	lretq
++
++1:	addq	$32, %rsp
++
++	pop	%rbx
++	movl	%ebx, %ss
++	pop	%rbx
++	movl	%ebx, %es
++	pop	%rbx
++	movl	%ebx, %ds
+ 
++	/*
++	 * Convert 32-bit status code into 64-bit.
++	 */
++	test	%rax, %rax
++	jz	1f
++	movl	%eax, %ecx
++	andl	$0x0fffffff, %ecx
++	andl	$0xf0000000, %eax
++	shl	$32, %rax
++	or	%rcx, %rax
++1:
++	ret
++ENDPROC(__efi64_thunk)
++
++ENTRY(efi_exit32)
++	movq	func_rt_ptr(%rip), %rax
++	push	%rax
++	mov	%rdi, %rax
++	ret
++ENDPROC(efi_exit32)
++
++	.code32
++/*
++ * EFI service pointer must be in %edi.
++ *
++ * The stack should represent the 32-bit calling convention.
++ */
++ENTRY(efi_enter32)
++	movl	$__KERNEL_DS, %eax
++	movl	%eax, %ds
++	movl	%eax, %es
++	movl	%eax, %ss
++
++	call	*%edi
++
++	/* We must preserve return value */
++	movl	%eax, %edi
++
++	movl	72(%esp), %eax
++	pushl	$__KERNEL_CS
++	pushl	%eax
++
++	lret
++ENDPROC(efi_enter32)
++
++	.data
++	.balign	8
++func_rt_ptr:		.quad 0
+ efi_saved_sp:		.quad 0
+diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
+index 70fb5075c901..376a0a9dc670 100644
+--- a/arch/x86/xen/p2m.c
++++ b/arch/x86/xen/p2m.c
+@@ -554,7 +554,7 @@ static bool alloc_p2m(unsigned long pfn)
+ 		mid_mfn = NULL;
+ 	}
+ 
+-	p2m_pfn = pte_pfn(ACCESS_ONCE(*ptep));
++	p2m_pfn = pte_pfn(READ_ONCE(*ptep));
+ 	if (p2m_pfn == PFN_DOWN(__pa(p2m_identity)) ||
+ 	    p2m_pfn == PFN_DOWN(__pa(p2m_missing))) {
+ 		/* p2m leaf page is missing */
+diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
+index 23b45eb9a89c..956374c1edbc 100644
+--- a/arch/x86/xen/spinlock.c
++++ b/arch/x86/xen/spinlock.c
+@@ -41,7 +41,7 @@ static u8 zero_stats;
+ static inline void check_zero(void)
+ {
+ 	u8 ret;
+-	u8 old = ACCESS_ONCE(zero_stats);
++	u8 old = READ_ONCE(zero_stats);
+ 	if (unlikely(old)) {
+ 		ret = cmpxchg(&zero_stats, old, 0);
+ 		/* This ensures only one fellow resets the stat */
+@@ -112,6 +112,7 @@ __visible void xen_lock_spinning(struct arch_spinlock *lock, __ticket_t want)
+ 	struct xen_lock_waiting *w = this_cpu_ptr(&lock_waiting);
+ 	int cpu = smp_processor_id();
+ 	u64 start;
++	__ticket_t head;
+ 	unsigned long flags;
+ 
+ 	/* If kicker interrupts not initialized yet, just spin */
+@@ -159,11 +160,15 @@ __visible void xen_lock_spinning(struct arch_spinlock *lock, __ticket_t want)
+ 	 */
+ 	__ticket_enter_slowpath(lock);
+ 
++	/* make sure enter_slowpath, which is atomic does not cross the read */
++	smp_mb__after_atomic();
++
+ 	/*
+ 	 * check again make sure it didn't become free while
+ 	 * we weren't looking
+ 	 */
+-	if (ACCESS_ONCE(lock->tickets.head) == want) {
++	head = READ_ONCE(lock->tickets.head);
++	if (__tickets_equal(head, want)) {
+ 		add_stats(TAKEN_SLOW_PICKUP, 1);
+ 		goto out;
+ 	}
+@@ -204,8 +209,8 @@ static void xen_unlock_kick(struct arch_spinlock *lock, __ticket_t next)
+ 		const struct xen_lock_waiting *w = &per_cpu(lock_waiting, cpu);
+ 
+ 		/* Make sure we read lock before want */
+-		if (ACCESS_ONCE(w->lock) == lock &&
+-		    ACCESS_ONCE(w->want) == next) {
++		if (READ_ONCE(w->lock) == lock &&
++		    READ_ONCE(w->want) == next) {
+ 			add_stats(RELEASED_SLOW_KICKED, 1);
+ 			xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR);
+ 			break;
+diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
+index 60c9d4a93fe4..3a415ecfe3d4 100644
+--- a/block/blk-mq-tag.c
++++ b/block/blk-mq-tag.c
+@@ -509,6 +509,7 @@ static int bt_alloc(struct blk_mq_bitmap_tags *bt, unsigned int depth,
+ 	bt->bs = kzalloc(BT_WAIT_QUEUES * sizeof(*bt->bs), GFP_KERNEL);
+ 	if (!bt->bs) {
+ 		kfree(bt->map);
++		bt->map = NULL;
+ 		return -ENOMEM;
+ 	}
+ 
+diff --git a/block/blk-throttle.c b/block/blk-throttle.c
+index 9273d0969ebd..5b9c6d5c3636 100644
+--- a/block/blk-throttle.c
++++ b/block/blk-throttle.c
+@@ -1292,6 +1292,9 @@ static u64 tg_prfill_cpu_rwstat(struct seq_file *sf,
+ 	struct blkg_rwstat rwstat = { }, tmp;
+ 	int i, cpu;
+ 
++	if (tg->stats_cpu == NULL)
++		return 0;
++
+ 	for_each_possible_cpu(cpu) {
+ 		struct tg_stats_cpu *sc = per_cpu_ptr(tg->stats_cpu, cpu);
+ 
+diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
+index 6f2751d305de..5da8e6e9ab4b 100644
+--- a/block/cfq-iosched.c
++++ b/block/cfq-iosched.c
+@@ -3590,6 +3590,11 @@ retry:
+ 
+ 	blkcg = bio_blkcg(bio);
+ 	cfqg = cfq_lookup_create_cfqg(cfqd, blkcg);
++	if (!cfqg) {
++		cfqq = &cfqd->oom_cfqq;
++		goto out;
++	}
++
+ 	cfqq = cic_to_cfqq(cic, is_sync);
+ 
+ 	/*
+@@ -3626,7 +3631,7 @@ retry:
+ 		} else
+ 			cfqq = &cfqd->oom_cfqq;
+ 	}
+-
++out:
+ 	if (new_cfqq)
+ 		kmem_cache_free(cfq_pool, new_cfqq);
+ 
+@@ -3656,12 +3661,17 @@ static struct cfq_queue *
+ cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic,
+ 	      struct bio *bio, gfp_t gfp_mask)
+ {
+-	const int ioprio_class = IOPRIO_PRIO_CLASS(cic->ioprio);
+-	const int ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
++	int ioprio_class = IOPRIO_PRIO_CLASS(cic->ioprio);
++	int ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
+ 	struct cfq_queue **async_cfqq = NULL;
+ 	struct cfq_queue *cfqq = NULL;
+ 
+ 	if (!is_sync) {
++		if (!ioprio_valid(cic->ioprio)) {
++			struct task_struct *tsk = current;
++			ioprio = task_nice_ioprio(tsk);
++			ioprio_class = task_nice_ioclass(tsk);
++		}
+ 		async_cfqq = cfq_async_queue_prio(cfqd, ioprio_class, ioprio);
+ 		cfqq = *async_cfqq;
+ 	}
+diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
+index e75737fd7eef..7d5880ded78a 100644
+--- a/drivers/acpi/acpi_lpss.c
++++ b/drivers/acpi/acpi_lpss.c
+@@ -105,7 +105,7 @@ static void lpss_uart_setup(struct lpss_private_data *pdata)
+ 	}
+ }
+ 
+-static void byt_i2c_setup(struct lpss_private_data *pdata)
++static void lpss_deassert_reset(struct lpss_private_data *pdata)
+ {
+ 	unsigned int offset;
+ 	u32 val;
+@@ -114,9 +114,18 @@ static void byt_i2c_setup(struct lpss_private_data *pdata)
+ 	val = readl(pdata->mmio_base + offset);
+ 	val |= LPSS_RESETS_RESET_APB | LPSS_RESETS_RESET_FUNC;
+ 	writel(val, pdata->mmio_base + offset);
++}
++
++#define LPSS_I2C_ENABLE			0x6c
++
++static void byt_i2c_setup(struct lpss_private_data *pdata)
++{
++	lpss_deassert_reset(pdata);
+ 
+ 	if (readl(pdata->mmio_base + pdata->dev_desc->prv_offset))
+ 		pdata->fixed_clk_rate = 133000000;
++
++	writel(0, pdata->mmio_base + LPSS_I2C_ENABLE);
+ }
+ 
+ static struct lpss_device_desc lpt_dev_desc = {
+@@ -166,6 +175,12 @@ static struct lpss_device_desc byt_i2c_dev_desc = {
+ 	.setup = byt_i2c_setup,
+ };
+ 
++static struct lpss_device_desc bsw_spi_dev_desc = {
++	.flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX,
++	.prv_offset = 0x400,
++	.setup = lpss_deassert_reset,
++};
++
+ #else
+ 
+ #define LPSS_ADDR(desc) (0UL)
+@@ -198,7 +213,7 @@ static const struct acpi_device_id acpi_lpss_device_ids[] = {
+ 	/* Braswell LPSS devices */
+ 	{ "80862288", LPSS_ADDR(byt_pwm_dev_desc) },
+ 	{ "8086228A", LPSS_ADDR(byt_uart_dev_desc) },
+-	{ "8086228E", LPSS_ADDR(byt_spi_dev_desc) },
++	{ "8086228E", LPSS_ADDR(bsw_spi_dev_desc) },
+ 	{ "808622C1", LPSS_ADDR(byt_i2c_dev_desc) },
+ 
+ 	{ "INT3430", LPSS_ADDR(lpt_dev_desc) },
+diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
+index 1ee27ac18de0..de4c8499cbac 100644
+--- a/drivers/bluetooth/ath3k.c
++++ b/drivers/bluetooth/ath3k.c
+@@ -108,6 +108,7 @@ static const struct usb_device_id ath3k_table[] = {
+ 	{ USB_DEVICE(0x13d3, 0x3393) },
+ 	{ USB_DEVICE(0x13d3, 0x3402) },
+ 	{ USB_DEVICE(0x13d3, 0x3408) },
++	{ USB_DEVICE(0x13d3, 0x3423) },
+ 	{ USB_DEVICE(0x13d3, 0x3432) },
+ 
+ 	/* Atheros AR5BBU12 with sflash firmware */
+@@ -162,6 +163,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
+ 	{ USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x13d3, 0x3408), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x13d3, 0x3423), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x13d3, 0x3432), .driver_info = BTUSB_ATH3012 },
+ 
+ 	/* Atheros AR5BBU22 with sflash firmware */
+@@ -174,6 +176,8 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
+ #define USB_REQ_DFU_DNLOAD	1
+ #define BULK_SIZE		4096
+ #define FW_HDR_SIZE		20
++#define TIMEGAP_USEC_MIN	50
++#define TIMEGAP_USEC_MAX	100
+ 
+ static int ath3k_load_firmware(struct usb_device *udev,
+ 				const struct firmware *firmware)
+@@ -205,6 +209,9 @@ static int ath3k_load_firmware(struct usb_device *udev,
+ 	pipe = usb_sndbulkpipe(udev, 0x02);
+ 
+ 	while (count) {
++		/* workaround the compatibility issue with xHCI controller*/
++		usleep_range(TIMEGAP_USEC_MIN, TIMEGAP_USEC_MAX);
++
+ 		size = min_t(uint, count, BULK_SIZE);
+ 		memcpy(send_buf, firmware->data + sent, size);
+ 
+@@ -302,6 +309,9 @@ static int ath3k_load_fwfile(struct usb_device *udev,
+ 	pipe = usb_sndbulkpipe(udev, 0x02);
+ 
+ 	while (count) {
++		/* workaround the compatibility issue with xHCI controller*/
++		usleep_range(TIMEGAP_USEC_MIN, TIMEGAP_USEC_MAX);
++
+ 		size = min_t(uint, count, BULK_SIZE);
+ 		memcpy(send_buf, firmware->data + sent, size);
+ 
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index 19cf2cf22e87..c91ec52a8948 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -109,16 +109,24 @@ static const struct usb_device_id btusb_table[] = {
+ 	{ USB_DEVICE(0x13d3, 0x3404),
+ 	  .driver_info = BTUSB_BCM_PATCHRAM },
+ 
++	/* Broadcom BCM20702B0 (Dynex/Insignia) */
++	{ USB_DEVICE(0x19ff, 0x0239), .driver_info = BTUSB_BCM_PATCHRAM },
++
+ 	/* Foxconn - Hon Hai */
+ 	{ USB_VENDOR_AND_INTERFACE_INFO(0x0489, 0xff, 0x01, 0x01),
+ 	  .driver_info = BTUSB_BCM_PATCHRAM },
+ 
++	/* Lite-On Technology - Broadcom based */
++	{ USB_VENDOR_AND_INTERFACE_INFO(0x04ca, 0xff, 0x01, 0x01),
++	  .driver_info = BTUSB_BCM_PATCHRAM },
++
+ 	/* Broadcom devices with vendor specific id */
+ 	{ USB_VENDOR_AND_INTERFACE_INFO(0x0a5c, 0xff, 0x01, 0x01),
+ 	  .driver_info = BTUSB_BCM_PATCHRAM },
+ 
+ 	/* ASUSTek Computer - Broadcom based */
+-	{ USB_VENDOR_AND_INTERFACE_INFO(0x0b05, 0xff, 0x01, 0x01) },
++	{ USB_VENDOR_AND_INTERFACE_INFO(0x0b05, 0xff, 0x01, 0x01),
++	  .driver_info = BTUSB_BCM_PATCHRAM },
+ 
+ 	/* Belkin F8065bf - Broadcom based */
+ 	{ USB_VENDOR_AND_INTERFACE_INFO(0x050d, 0xff, 0x01, 0x01) },
+@@ -188,6 +196,7 @@ static const struct usb_device_id blacklist_table[] = {
+ 	{ USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x13d3, 0x3408), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x13d3, 0x3423), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x13d3, 0x3432), .driver_info = BTUSB_ATH3012 },
+ 
+ 	/* Atheros AR5BBU12 with sflash firmware */
+diff --git a/drivers/char/random.c b/drivers/char/random.c
+index 04645c09fe5e..9cd6968e2f92 100644
+--- a/drivers/char/random.c
++++ b/drivers/char/random.c
+@@ -569,19 +569,19 @@ static void fast_mix(struct fast_pool *f)
+ 	__u32 c = f->pool[2],	d = f->pool[3];
+ 
+ 	a += b;			c += d;
+-	b = rol32(a, 6);	d = rol32(c, 27);
++	b = rol32(b, 6);	d = rol32(d, 27);
+ 	d ^= a;			b ^= c;
+ 
+ 	a += b;			c += d;
+-	b = rol32(a, 16);	d = rol32(c, 14);
++	b = rol32(b, 16);	d = rol32(d, 14);
+ 	d ^= a;			b ^= c;
+ 
+ 	a += b;			c += d;
+-	b = rol32(a, 6);	d = rol32(c, 27);
++	b = rol32(b, 6);	d = rol32(d, 27);
+ 	d ^= a;			b ^= c;
+ 
+ 	a += b;			c += d;
+-	b = rol32(a, 16);	d = rol32(c, 14);
++	b = rol32(b, 16);	d = rol32(d, 14);
+ 	d ^= a;			b ^= c;
+ 
+ 	f->pool[0] = a;  f->pool[1] = b;
+diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
+index 6af17002a115..cfb9089887bd 100644
+--- a/drivers/char/tpm/tpm-interface.c
++++ b/drivers/char/tpm/tpm-interface.c
+@@ -1122,7 +1122,7 @@ struct tpm_chip *tpm_register_hardware(struct device *dev,
+ 
+ 	/* Make chip available */
+ 	spin_lock(&driver_lock);
+-	list_add_rcu(&chip->list, &tpm_chip_list);
++	list_add_tail_rcu(&chip->list, &tpm_chip_list);
+ 	spin_unlock(&driver_lock);
+ 
+ 	return chip;
+diff --git a/drivers/char/tpm/tpm_i2c_atmel.c b/drivers/char/tpm/tpm_i2c_atmel.c
+index 77272925dee6..503a85ae176c 100644
+--- a/drivers/char/tpm/tpm_i2c_atmel.c
++++ b/drivers/char/tpm/tpm_i2c_atmel.c
+@@ -168,6 +168,10 @@ static int i2c_atmel_probe(struct i2c_client *client,
+ 
+ 	chip->vendor.priv = devm_kzalloc(dev, sizeof(struct priv_data),
+ 					 GFP_KERNEL);
++	if (!chip->vendor.priv) {
++		rc = -ENOMEM;
++		goto out_err;
++	}
+ 
+ 	/* Default timeouts */
+ 	chip->vendor.timeout_a = msecs_to_jiffies(TPM_I2C_SHORT_TIMEOUT);
+diff --git a/drivers/char/tpm/tpm_i2c_nuvoton.c b/drivers/char/tpm/tpm_i2c_nuvoton.c
+index 7b158efd49f7..23c7b137a7fd 100644
+--- a/drivers/char/tpm/tpm_i2c_nuvoton.c
++++ b/drivers/char/tpm/tpm_i2c_nuvoton.c
+@@ -538,6 +538,11 @@ static int i2c_nuvoton_probe(struct i2c_client *client,
+ 
+ 	chip->vendor.priv = devm_kzalloc(dev, sizeof(struct priv_data),
+ 					 GFP_KERNEL);
++	if (!chip->vendor.priv) {
++		rc = -ENOMEM;
++		goto out_err;
++	}
++
+ 	init_waitqueue_head(&chip->vendor.read_queue);
+ 	init_waitqueue_head(&chip->vendor.int_queue);
+ 
+diff --git a/drivers/char/tpm/tpm_i2c_stm_st33.c b/drivers/char/tpm/tpm_i2c_stm_st33.c
+index 4669e3713428..7d1c540fa26a 100644
+--- a/drivers/char/tpm/tpm_i2c_stm_st33.c
++++ b/drivers/char/tpm/tpm_i2c_stm_st33.c
+@@ -487,7 +487,7 @@ static int tpm_stm_i2c_send(struct tpm_chip *chip, unsigned char *buf,
+ 		if (burstcnt < 0)
+ 			return burstcnt;
+ 		size = min_t(int, len - i - 1, burstcnt);
+-		ret = I2C_WRITE_DATA(client, TPM_DATA_FIFO, buf, size);
++		ret = I2C_WRITE_DATA(client, TPM_DATA_FIFO, buf + i, size);
+ 		if (ret < 0)
+ 			goto out_err;
+ 
+diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c
+index af74c57e5090..eff9d5870034 100644
+--- a/drivers/char/tpm/tpm_ibmvtpm.c
++++ b/drivers/char/tpm/tpm_ibmvtpm.c
+@@ -148,7 +148,8 @@ static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
+ 	crq.len = (u16)count;
+ 	crq.data = ibmvtpm->rtce_dma_handle;
+ 
+-	rc = ibmvtpm_send_crq(ibmvtpm->vdev, word[0], word[1]);
++	rc = ibmvtpm_send_crq(ibmvtpm->vdev, cpu_to_be64(word[0]),
++			      cpu_to_be64(word[1]));
+ 	if (rc != H_SUCCESS) {
+ 		dev_err(ibmvtpm->dev, "tpm_ibmvtpm_send failed rc=%d\n", rc);
+ 		rc = 0;
+@@ -186,7 +187,8 @@ static int ibmvtpm_crq_get_rtce_size(struct ibmvtpm_dev *ibmvtpm)
+ 	crq.valid = (u8)IBMVTPM_VALID_CMD;
+ 	crq.msg = (u8)VTPM_GET_RTCE_BUFFER_SIZE;
+ 
+-	rc = ibmvtpm_send_crq(ibmvtpm->vdev, buf[0], buf[1]);
++	rc = ibmvtpm_send_crq(ibmvtpm->vdev, cpu_to_be64(buf[0]),
++			      cpu_to_be64(buf[1]));
+ 	if (rc != H_SUCCESS)
+ 		dev_err(ibmvtpm->dev,
+ 			"ibmvtpm_crq_get_rtce_size failed rc=%d\n", rc);
+@@ -212,7 +214,8 @@ static int ibmvtpm_crq_get_version(struct ibmvtpm_dev *ibmvtpm)
+ 	crq.valid = (u8)IBMVTPM_VALID_CMD;
+ 	crq.msg = (u8)VTPM_GET_VERSION;
+ 
+-	rc = ibmvtpm_send_crq(ibmvtpm->vdev, buf[0], buf[1]);
++	rc = ibmvtpm_send_crq(ibmvtpm->vdev, cpu_to_be64(buf[0]),
++			      cpu_to_be64(buf[1]));
+ 	if (rc != H_SUCCESS)
+ 		dev_err(ibmvtpm->dev,
+ 			"ibmvtpm_crq_get_version failed rc=%d\n", rc);
+@@ -307,6 +310,14 @@ static int tpm_ibmvtpm_remove(struct vio_dev *vdev)
+ static unsigned long tpm_ibmvtpm_get_desired_dma(struct vio_dev *vdev)
+ {
+ 	struct ibmvtpm_dev *ibmvtpm = ibmvtpm_get_data(&vdev->dev);
++
++	/* ibmvtpm initializes at probe time, so the data we are
++	* asking for may not be set yet. Estimate that 4K required
++	* for TCE-mapped buffer in addition to CRQ.
++	*/
++	if (!ibmvtpm)
++		return CRQ_RES_BUF_SIZE + PAGE_SIZE;
++
+ 	return CRQ_RES_BUF_SIZE + ibmvtpm->rtce_size;
+ }
+ 
+@@ -327,7 +338,8 @@ static int tpm_ibmvtpm_suspend(struct device *dev)
+ 	crq.valid = (u8)IBMVTPM_VALID_CMD;
+ 	crq.msg = (u8)VTPM_PREPARE_TO_SUSPEND;
+ 
+-	rc = ibmvtpm_send_crq(ibmvtpm->vdev, buf[0], buf[1]);
++	rc = ibmvtpm_send_crq(ibmvtpm->vdev, cpu_to_be64(buf[0]),
++			      cpu_to_be64(buf[1]));
+ 	if (rc != H_SUCCESS)
+ 		dev_err(ibmvtpm->dev,
+ 			"tpm_ibmvtpm_suspend failed rc=%d\n", rc);
+@@ -472,11 +484,11 @@ static void ibmvtpm_crq_process(struct ibmvtpm_crq *crq,
+ 	case IBMVTPM_VALID_CMD:
+ 		switch (crq->msg) {
+ 		case VTPM_GET_RTCE_BUFFER_SIZE_RES:
+-			if (crq->len <= 0) {
++			if (be16_to_cpu(crq->len) <= 0) {
+ 				dev_err(ibmvtpm->dev, "Invalid rtce size\n");
+ 				return;
+ 			}
+-			ibmvtpm->rtce_size = crq->len;
++			ibmvtpm->rtce_size = be16_to_cpu(crq->len);
+ 			ibmvtpm->rtce_buf = kmalloc(ibmvtpm->rtce_size,
+ 						    GFP_KERNEL);
+ 			if (!ibmvtpm->rtce_buf) {
+@@ -497,11 +509,11 @@ static void ibmvtpm_crq_process(struct ibmvtpm_crq *crq,
+ 
+ 			return;
+ 		case VTPM_GET_VERSION_RES:
+-			ibmvtpm->vtpm_version = crq->data;
++			ibmvtpm->vtpm_version = be32_to_cpu(crq->data);
+ 			return;
+ 		case VTPM_TPM_COMMAND_RES:
+ 			/* len of the data in rtce buffer */
+-			ibmvtpm->res_len = crq->len;
++			ibmvtpm->res_len = be16_to_cpu(crq->len);
+ 			wake_up_interruptible(&ibmvtpm->wq);
+ 			return;
+ 		default:
+diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
+index 6f1985496112..ccb140d60532 100644
+--- a/drivers/char/tpm/tpm_tis.c
++++ b/drivers/char/tpm/tpm_tis.c
+@@ -75,6 +75,10 @@ enum tis_defaults {
+ #define	TPM_DID_VID(l)			(0x0F00 | ((l) << 12))
+ #define	TPM_RID(l)			(0x0F04 | ((l) << 12))
+ 
++struct priv_data {
++	bool irq_tested;
++};
++
+ static LIST_HEAD(tis_chips);
+ static DEFINE_MUTEX(tis_lock);
+ 
+@@ -338,12 +342,27 @@ out_err:
+ 	return rc;
+ }
+ 
++static void disable_interrupts(struct tpm_chip *chip)
++{
++	u32 intmask;
++
++	intmask =
++	    ioread32(chip->vendor.iobase +
++		     TPM_INT_ENABLE(chip->vendor.locality));
++	intmask &= ~TPM_GLOBAL_INT_ENABLE;
++	iowrite32(intmask,
++		  chip->vendor.iobase +
++		  TPM_INT_ENABLE(chip->vendor.locality));
++	free_irq(chip->vendor.irq, chip);
++	chip->vendor.irq = 0;
++}
++
+ /*
+  * If interrupts are used (signaled by an irq set in the vendor structure)
+  * tpm.c can skip polling for the data to be available as the interrupt is
+  * waited for here
+  */
+-static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len)
++static int tpm_tis_send_main(struct tpm_chip *chip, u8 *buf, size_t len)
+ {
+ 	int rc;
+ 	u32 ordinal;
+@@ -373,6 +392,30 @@ out_err:
+ 	return rc;
+ }
+ 
++static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len)
++{
++	int rc, irq;
++	struct priv_data *priv = chip->vendor.priv;
++
++	if (!chip->vendor.irq || priv->irq_tested)
++		return tpm_tis_send_main(chip, buf, len);
++
++	/* Verify receipt of the expected IRQ */
++	irq = chip->vendor.irq;
++	chip->vendor.irq = 0;
++	rc = tpm_tis_send_main(chip, buf, len);
++	chip->vendor.irq = irq;
++	if (!priv->irq_tested)
++		msleep(1);
++	if (!priv->irq_tested) {
++		disable_interrupts(chip);
++		dev_err(chip->dev,
++			FW_BUG "TPM interrupt not working, polling instead\n");
++	}
++	priv->irq_tested = true;
++	return rc;
++}
++
+ struct tis_vendor_timeout_override {
+ 	u32 did_vid;
+ 	unsigned long timeout_us[4];
+@@ -505,6 +548,7 @@ static irqreturn_t tis_int_handler(int dummy, void *dev_id)
+ 	if (interrupt == 0)
+ 		return IRQ_NONE;
+ 
++	((struct priv_data *)chip->vendor.priv)->irq_tested = true;
+ 	if (interrupt & TPM_INTF_DATA_AVAIL_INT)
+ 		wake_up_interruptible(&chip->vendor.read_queue);
+ 	if (interrupt & TPM_INTF_LOCALITY_CHANGE_INT)
+@@ -534,9 +578,14 @@ static int tpm_tis_init(struct device *dev, resource_size_t start,
+ 	u32 vendor, intfcaps, intmask;
+ 	int rc, i, irq_s, irq_e, probe;
+ 	struct tpm_chip *chip;
++	struct priv_data *priv;
+ 
++	priv = devm_kzalloc(dev, sizeof(struct priv_data), GFP_KERNEL);
++	if (priv == NULL)
++		return -ENOMEM;
+ 	if (!(chip = tpm_register_hardware(dev, &tpm_tis)))
+ 		return -ENODEV;
++	chip->vendor.priv = priv;
+ 
+ 	chip->vendor.iobase = ioremap(start, len);
+ 	if (!chip->vendor.iobase) {
+@@ -605,19 +654,6 @@ static int tpm_tis_init(struct device *dev, resource_size_t start,
+ 	if (intfcaps & TPM_INTF_DATA_AVAIL_INT)
+ 		dev_dbg(dev, "\tData Avail Int Support\n");
+ 
+-	/* get the timeouts before testing for irqs */
+-	if (tpm_get_timeouts(chip)) {
+-		dev_err(dev, "Could not get TPM timeouts and durations\n");
+-		rc = -ENODEV;
+-		goto out_err;
+-	}
+-
+-	if (tpm_do_selftest(chip)) {
+-		dev_err(dev, "TPM self test failed\n");
+-		rc = -ENODEV;
+-		goto out_err;
+-	}
+-
+ 	/* INTERRUPT Setup */
+ 	init_waitqueue_head(&chip->vendor.read_queue);
+ 	init_waitqueue_head(&chip->vendor.int_queue);
+@@ -719,6 +755,18 @@ static int tpm_tis_init(struct device *dev, resource_size_t start,
+ 		}
+ 	}
+ 
++	if (tpm_get_timeouts(chip)) {
++		dev_err(dev, "Could not get TPM timeouts and durations\n");
++		rc = -ENODEV;
++		goto out_err;
++	}
++
++	if (tpm_do_selftest(chip)) {
++		dev_err(dev, "TPM self test failed\n");
++		rc = -ENODEV;
++		goto out_err;
++	}
++
+ 	INIT_LIST_HEAD(&chip->vendor.list);
+ 	mutex_lock(&tis_lock);
+ 	list_add(&chip->vendor.list, &tis_chips);
+diff --git a/drivers/clocksource/mtk_timer.c b/drivers/clocksource/mtk_timer.c
+index 32a3d25795d3..68ab42356d0e 100644
+--- a/drivers/clocksource/mtk_timer.c
++++ b/drivers/clocksource/mtk_timer.c
+@@ -224,6 +224,8 @@ static void __init mtk_timer_init(struct device_node *node)
+ 	}
+ 	rate = clk_get_rate(clk);
+ 
++	mtk_timer_global_reset(evt);
++
+ 	if (request_irq(evt->dev.irq, mtk_timer_interrupt,
+ 			IRQF_TIMER | IRQF_IRQPOLL, "mtk_timer", evt)) {
+ 		pr_warn("failed to setup irq %d\n", evt->dev.irq);
+@@ -232,8 +234,6 @@ static void __init mtk_timer_init(struct device_node *node)
+ 
+ 	evt->ticks_per_jiffy = DIV_ROUND_UP(rate, HZ);
+ 
+-	mtk_timer_global_reset(evt);
+-
+ 	/* Configure clock source */
+ 	mtk_timer_setup(evt, GPT_CLK_SRC, TIMER_CTRL_OP_FREERUN);
+ 	clocksource_mmio_init(evt->gpt_base + TIMER_CNT_REG(GPT_CLK_SRC),
+@@ -241,10 +241,11 @@ static void __init mtk_timer_init(struct device_node *node)
+ 
+ 	/* Configure clock event */
+ 	mtk_timer_setup(evt, GPT_CLK_EVT, TIMER_CTRL_OP_REPEAT);
+-	mtk_timer_enable_irq(evt, GPT_CLK_EVT);
+-
+ 	clockevents_config_and_register(&evt->dev, rate, 0x3,
+ 					0xffffffff);
++
++	mtk_timer_enable_irq(evt, GPT_CLK_EVT);
++
+ 	return;
+ 
+ err_clk_disable:
+diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
+index 46bed4f81cde..7030c409be24 100644
+--- a/drivers/cpufreq/cpufreq.c
++++ b/drivers/cpufreq/cpufreq.c
+@@ -1416,9 +1416,10 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
+ 	unsigned long flags;
+ 	struct cpufreq_policy *policy;
+ 
+-	read_lock_irqsave(&cpufreq_driver_lock, flags);
++	write_lock_irqsave(&cpufreq_driver_lock, flags);
+ 	policy = per_cpu(cpufreq_cpu_data, cpu);
+-	read_unlock_irqrestore(&cpufreq_driver_lock, flags);
++	per_cpu(cpufreq_cpu_data, cpu) = NULL;
++	write_unlock_irqrestore(&cpufreq_driver_lock, flags);
+ 
+ 	if (!policy) {
+ 		pr_debug("%s: No cpu_data found\n", __func__);
+@@ -1473,7 +1474,6 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
+ 		}
+ 	}
+ 
+-	per_cpu(cpufreq_cpu_data, cpu) = NULL;
+ 	return 0;
+ }
+ 
+diff --git a/drivers/cpufreq/s3c2416-cpufreq.c b/drivers/cpufreq/s3c2416-cpufreq.c
+index 2fd53eaaec20..d6d425773fa4 100644
+--- a/drivers/cpufreq/s3c2416-cpufreq.c
++++ b/drivers/cpufreq/s3c2416-cpufreq.c
+@@ -263,7 +263,7 @@ out:
+ }
+ 
+ #ifdef CONFIG_ARM_S3C2416_CPUFREQ_VCORESCALE
+-static void __init s3c2416_cpufreq_cfg_regulator(struct s3c2416_data *s3c_freq)
++static void s3c2416_cpufreq_cfg_regulator(struct s3c2416_data *s3c_freq)
+ {
+ 	int count, v, i, found;
+ 	struct cpufreq_frequency_table *pos;
+@@ -333,7 +333,7 @@ static struct notifier_block s3c2416_cpufreq_reboot_notifier = {
+ 	.notifier_call = s3c2416_cpufreq_reboot_notifier_evt,
+ };
+ 
+-static int __init s3c2416_cpufreq_driver_init(struct cpufreq_policy *policy)
++static int s3c2416_cpufreq_driver_init(struct cpufreq_policy *policy)
+ {
+ 	struct s3c2416_data *s3c_freq = &s3c2416_cpufreq;
+ 	struct cpufreq_frequency_table *pos;
+diff --git a/drivers/cpufreq/s3c24xx-cpufreq.c b/drivers/cpufreq/s3c24xx-cpufreq.c
+index d00f1cee4509..733aa5153e74 100644
+--- a/drivers/cpufreq/s3c24xx-cpufreq.c
++++ b/drivers/cpufreq/s3c24xx-cpufreq.c
+@@ -144,11 +144,6 @@ static void s3c_cpufreq_setfvco(struct s3c_cpufreq_config *cfg)
+ 	(cfg->info->set_fvco)(cfg);
+ }
+ 
+-static inline void s3c_cpufreq_resume_clocks(void)
+-{
+-	cpu_cur.info->resume_clocks();
+-}
+-
+ static inline void s3c_cpufreq_updateclk(struct clk *clk,
+ 					 unsigned int freq)
+ {
+@@ -417,9 +412,6 @@ static int s3c_cpufreq_resume(struct cpufreq_policy *policy)
+ 
+ 	last_target = ~0;	/* invalidate last_target setting */
+ 
+-	/* first, find out what speed we resumed at. */
+-	s3c_cpufreq_resume_clocks();
+-
+ 	/* whilst we will be called later on, we try and re-set the
+ 	 * cpu frequencies as soon as possible so that we do not end
+ 	 * up resuming devices and then immediately having to re-set
+@@ -454,7 +446,7 @@ static struct cpufreq_driver s3c24xx_driver = {
+ };
+ 
+ 
+-int __init s3c_cpufreq_register(struct s3c_cpufreq_info *info)
++int s3c_cpufreq_register(struct s3c_cpufreq_info *info)
+ {
+ 	if (!info || !info->name) {
+ 		printk(KERN_ERR "%s: failed to pass valid information\n",
+diff --git a/drivers/cpufreq/speedstep-lib.c b/drivers/cpufreq/speedstep-lib.c
+index 7047821a7f8a..4ab7a2156672 100644
+--- a/drivers/cpufreq/speedstep-lib.c
++++ b/drivers/cpufreq/speedstep-lib.c
+@@ -400,6 +400,7 @@ unsigned int speedstep_get_freqs(enum speedstep_processor processor,
+ 
+ 	pr_debug("previous speed is %u\n", prev_speed);
+ 
++	preempt_disable();
+ 	local_irq_save(flags);
+ 
+ 	/* switch to low state */
+@@ -464,6 +465,8 @@ unsigned int speedstep_get_freqs(enum speedstep_processor processor,
+ 
+ out:
+ 	local_irq_restore(flags);
++	preempt_enable();
++
+ 	return ret;
+ }
+ EXPORT_SYMBOL_GPL(speedstep_get_freqs);
+diff --git a/drivers/cpufreq/speedstep-smi.c b/drivers/cpufreq/speedstep-smi.c
+index 5fc96d5d656b..819229e824fb 100644
+--- a/drivers/cpufreq/speedstep-smi.c
++++ b/drivers/cpufreq/speedstep-smi.c
+@@ -156,6 +156,7 @@ static void speedstep_set_state(unsigned int state)
+ 		return;
+ 
+ 	/* Disable IRQs */
++	preempt_disable();
+ 	local_irq_save(flags);
+ 
+ 	command = (smi_sig & 0xffffff00) | (smi_cmd & 0xff);
+@@ -166,9 +167,19 @@ static void speedstep_set_state(unsigned int state)
+ 
+ 	do {
+ 		if (retry) {
++			/*
++			 * We need to enable interrupts, otherwise the blockage
++			 * won't resolve.
++			 *
++			 * We disable preemption so that other processes don't
++			 * run. If other processes were running, they could
++			 * submit more DMA requests, making the blockage worse.
++			 */
+ 			pr_debug("retry %u, previous result %u, waiting...\n",
+ 					retry, result);
++			local_irq_enable();
+ 			mdelay(retry * 50);
++			local_irq_disable();
+ 		}
+ 		retry++;
+ 		__asm__ __volatile__(
+@@ -185,6 +196,7 @@ static void speedstep_set_state(unsigned int state)
+ 
+ 	/* enable IRQs */
+ 	local_irq_restore(flags);
++	preempt_enable();
+ 
+ 	if (new_state == state)
+ 		pr_debug("change to %u MHz succeeded after %u tries "
+diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
+index 17638d7cf5c2..5907c1718f8c 100644
+--- a/drivers/edac/amd64_edac.c
++++ b/drivers/edac/amd64_edac.c
+@@ -2174,14 +2174,20 @@ static void __log_bus_error(struct mem_ctl_info *mci, struct err_info *err,
+ 
+ static inline void decode_bus_error(int node_id, struct mce *m)
+ {
+-	struct mem_ctl_info *mci = mcis[node_id];
+-	struct amd64_pvt *pvt = mci->pvt_info;
++	struct mem_ctl_info *mci;
++	struct amd64_pvt *pvt;
+ 	u8 ecc_type = (m->status >> 45) & 0x3;
+ 	u8 xec = XEC(m->status, 0x1f);
+ 	u16 ec = EC(m->status);
+ 	u64 sys_addr;
+ 	struct err_info err;
+ 
++	mci = edac_mc_find(node_id);
++	if (!mci)
++		return;
++
++	pvt = mci->pvt_info;
++
+ 	/* Bail out early if this was an 'observed' error */
+ 	if (PP(ec) == NBSL_PP_OBS)
+ 		return;
+diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
+index 63aa6730e89e..1acf57ba4c86 100644
+--- a/drivers/edac/sb_edac.c
++++ b/drivers/edac/sb_edac.c
+@@ -2447,7 +2447,7 @@ static int sbridge_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ 		rc = sbridge_get_all_devices(&num_mc, pci_dev_descr_ibridge_table);
+ 		type = IVY_BRIDGE;
+ 		break;
+-	case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA:
++	case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_HA0:
+ 		rc = sbridge_get_all_devices(&num_mc, pci_dev_descr_sbridge_table);
+ 		type = SANDY_BRIDGE;
+ 		break;
+@@ -2460,8 +2460,11 @@ static int sbridge_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ 		type = BROADWELL;
+ 		break;
+ 	}
+-	if (unlikely(rc < 0))
++	if (unlikely(rc < 0)) {
++		edac_dbg(0, "couldn't get all devices for 0x%x\n", pdev->device);
+ 		goto fail0;
++	}
++
+ 	mc = 0;
+ 
+ 	list_for_each_entry(sbridge_dev, &sbridge_edac_list, list) {
+@@ -2474,7 +2477,7 @@ static int sbridge_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ 			goto fail1;
+ 	}
+ 
+-	sbridge_printk(KERN_INFO, "Driver loaded.\n");
++	sbridge_printk(KERN_INFO, "%s\n", SBRIDGE_REVISION);
+ 
+ 	mutex_unlock(&sbridge_edac_lock);
+ 	return 0;
+diff --git a/drivers/gpio/gpio-tps65912.c b/drivers/gpio/gpio-tps65912.c
+index 472fb5b8779f..9cdbc0c9cb2d 100644
+--- a/drivers/gpio/gpio-tps65912.c
++++ b/drivers/gpio/gpio-tps65912.c
+@@ -26,9 +26,12 @@ struct tps65912_gpio_data {
+ 	struct gpio_chip gpio_chip;
+ };
+ 
++#define to_tgd(gc) container_of(gc, struct tps65912_gpio_data, gpio_chip)
++
+ static int tps65912_gpio_get(struct gpio_chip *gc, unsigned offset)
+ {
+-	struct tps65912 *tps65912 = container_of(gc, struct tps65912, gpio);
++	struct tps65912_gpio_data *tps65912_gpio = to_tgd(gc);
++	struct tps65912 *tps65912 = tps65912_gpio->tps65912;
+ 	int val;
+ 
+ 	val = tps65912_reg_read(tps65912, TPS65912_GPIO1 + offset);
+@@ -42,7 +45,8 @@ static int tps65912_gpio_get(struct gpio_chip *gc, unsigned offset)
+ static void tps65912_gpio_set(struct gpio_chip *gc, unsigned offset,
+ 			      int value)
+ {
+-	struct tps65912 *tps65912 = container_of(gc, struct tps65912, gpio);
++	struct tps65912_gpio_data *tps65912_gpio = to_tgd(gc);
++	struct tps65912 *tps65912 = tps65912_gpio->tps65912;
+ 
+ 	if (value)
+ 		tps65912_set_bits(tps65912, TPS65912_GPIO1 + offset,
+@@ -55,7 +59,8 @@ static void tps65912_gpio_set(struct gpio_chip *gc, unsigned offset,
+ static int tps65912_gpio_output(struct gpio_chip *gc, unsigned offset,
+ 				int value)
+ {
+-	struct tps65912 *tps65912 = container_of(gc, struct tps65912, gpio);
++	struct tps65912_gpio_data *tps65912_gpio = to_tgd(gc);
++	struct tps65912 *tps65912 = tps65912_gpio->tps65912;
+ 
+ 	/* Set the initial value */
+ 	tps65912_gpio_set(gc, offset, value);
+@@ -66,7 +71,8 @@ static int tps65912_gpio_output(struct gpio_chip *gc, unsigned offset,
+ 
+ static int tps65912_gpio_input(struct gpio_chip *gc, unsigned offset)
+ {
+-	struct tps65912 *tps65912 = container_of(gc, struct tps65912, gpio);
++	struct tps65912_gpio_data *tps65912_gpio = to_tgd(gc);
++	struct tps65912 *tps65912 = tps65912_gpio->tps65912;
+ 
+ 	return tps65912_clear_bits(tps65912, TPS65912_GPIO1 + offset,
+ 								GPIO_CFG_MASK);
+diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
+index 08261f2b3a82..26645a847bb8 100644
+--- a/drivers/gpio/gpiolib-of.c
++++ b/drivers/gpio/gpiolib-of.c
+@@ -46,12 +46,13 @@ static int of_gpiochip_find_and_xlate(struct gpio_chip *gc, void *data)
+ 
+ 	ret = gc->of_xlate(gc, &gg_data->gpiospec, gg_data->flags);
+ 	if (ret < 0) {
+-		/* We've found the gpio chip, but the translation failed.
+-		 * Return true to stop looking and return the translation
+-		 * error via out_gpio
++		/* We've found a gpio chip, but the translation failed.
++		 * Store translation error in out_gpio.
++		 * Return false to keep looking, as more than one gpio chip
++		 * could be registered per of-node.
+ 		 */
+ 		gg_data->out_gpio = ERR_PTR(ret);
+-		return true;
++		return false;
+ 	 }
+ 
+ 	gg_data->out_gpio = gpiochip_get_desc(gc, ret);
+diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
+index d43e967e7533..5e72fc2428f0 100644
+--- a/drivers/hid/i2c-hid/i2c-hid.c
++++ b/drivers/hid/i2c-hid/i2c-hid.c
+@@ -370,7 +370,10 @@ static int i2c_hid_hwreset(struct i2c_client *client)
+ static void i2c_hid_get_input(struct i2c_hid *ihid)
+ {
+ 	int ret, ret_size;
+-	int size = ihid->bufsize;
++	int size = le16_to_cpu(ihid->hdesc.wMaxInputLength);
++
++	if (size > ihid->bufsize)
++		size = ihid->bufsize;
+ 
+ 	ret = i2c_master_recv(ihid->client, ihid->inbuf, size);
+ 	if (ret != size) {
+diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
+index 40b35be34f8d..2f2f38f4d83c 100644
+--- a/drivers/md/raid1.c
++++ b/drivers/md/raid1.c
+@@ -560,7 +560,7 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
+ 		if (test_bit(WriteMostly, &rdev->flags)) {
+ 			/* Don't balance among write-mostly, just
+ 			 * use the first as a last resort */
+-			if (best_disk < 0) {
++			if (best_dist_disk < 0) {
+ 				if (is_badblock(rdev, this_sector, sectors,
+ 						&first_bad, &bad_sectors)) {
+ 					if (first_bad < this_sector)
+@@ -569,7 +569,8 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
+ 					best_good_sectors = first_bad - this_sector;
+ 				} else
+ 					best_good_sectors = sectors;
+-				best_disk = disk;
++				best_dist_disk = disk;
++				best_pending_disk = disk;
+ 			}
+ 			continue;
+ 		}
+diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
+index b98765f6f77f..8577cc7db47e 100644
+--- a/drivers/md/raid5.c
++++ b/drivers/md/raid5.c
+@@ -3102,7 +3102,8 @@ static void handle_stripe_dirtying(struct r5conf *conf,
+ 	 * generate correct data from the parity.
+ 	 */
+ 	if (conf->max_degraded == 2 ||
+-	    (recovery_cp < MaxSector && sh->sector >= recovery_cp)) {
++	    (recovery_cp < MaxSector && sh->sector >= recovery_cp &&
++	     s->failed == 0)) {
+ 		/* Calculate the real rcw later - for now make it
+ 		 * look like rcw is cheaper
+ 		 */
+diff --git a/drivers/media/dvb-frontends/si2168.c b/drivers/media/dvb-frontends/si2168.c
+index ce9ab442b4b6..acf0fc31f783 100644
+--- a/drivers/media/dvb-frontends/si2168.c
++++ b/drivers/media/dvb-frontends/si2168.c
+@@ -635,6 +635,8 @@ static const struct dvb_frontend_ops si2168_ops = {
+ 	.delsys = {SYS_DVBT, SYS_DVBT2, SYS_DVBC_ANNEX_A},
+ 	.info = {
+ 		.name = "Silicon Labs Si2168",
++		.symbol_rate_min = 1000000,
++		.symbol_rate_max = 7200000,
+ 		.caps =	FE_CAN_FEC_1_2 |
+ 			FE_CAN_FEC_2_3 |
+ 			FE_CAN_FEC_3_4 |
+diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
+index 765bffb49a72..6a1334be7544 100644
+--- a/drivers/media/platform/Kconfig
++++ b/drivers/media/platform/Kconfig
+@@ -56,10 +56,8 @@ config VIDEO_VIU
+ 
+ config VIDEO_TIMBERDALE
+ 	tristate "Support for timberdale Video In/LogiWIN"
+-	depends on VIDEO_V4L2 && I2C && DMADEVICES
+-	depends on MFD_TIMBERDALE || COMPILE_TEST
+-	select DMA_ENGINE
+-	select TIMB_DMA
++	depends on VIDEO_V4L2 && I2C
++	depends on (MFD_TIMBERDALE && TIMB_DMA) || COMPILE_TEST
+ 	select VIDEO_ADV7180
+ 	select VIDEOBUF_DMA_CONTIG
+ 	---help---
+diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
+index 86ffcd54339e..f8c5e47a30aa 100644
+--- a/drivers/media/rc/rc-main.c
++++ b/drivers/media/rc/rc-main.c
+@@ -1021,16 +1021,16 @@ static ssize_t store_protocols(struct device *device,
+ 		goto out;
+ 	}
+ 
+-	if (new_protocols == old_protocols) {
+-		rc = len;
+-		goto out;
++	if (new_protocols != old_protocols) {
++		*current_protocols = new_protocols;
++		IR_dprintk(1, "Protocols changed to 0x%llx\n",
++			   (long long)new_protocols);
+ 	}
+ 
+-	*current_protocols = new_protocols;
+-	IR_dprintk(1, "Protocols changed to 0x%llx\n", (long long)new_protocols);
+-
+ 	/*
+-	 * If the protocol is changed the filter needs updating.
++	 * If a protocol change was attempted the filter may need updating, even
++	 * if the actual protocol mask hasn't changed (since the driver may have
++	 * cleared the filter).
+ 	 * Try setting the same filter with the new protocol (if any).
+ 	 * Fall back to clearing the filter.
+ 	 */
+diff --git a/drivers/media/usb/dvb-usb-v2/lmedm04.c b/drivers/media/usb/dvb-usb-v2/lmedm04.c
+index 994de53a574b..15db9f67f4d1 100644
+--- a/drivers/media/usb/dvb-usb-v2/lmedm04.c
++++ b/drivers/media/usb/dvb-usb-v2/lmedm04.c
+@@ -344,15 +344,17 @@ static void lme2510_int_response(struct urb *lme_urb)
+ 
+ 	usb_submit_urb(lme_urb, GFP_ATOMIC);
+ 
+-	/* interrupt urb is due every 48 msecs while streaming
+-	 *	add 12msecs for system lag */
+-	st->int_urb_due = jiffies + msecs_to_jiffies(60);
++	/* Interrupt urb is due every 48 msecs while streaming the buffer
++	 * stores up to 4 periods if missed. Allow 200 msec for next interrupt.
++	 */
++	st->int_urb_due = jiffies + msecs_to_jiffies(200);
+ }
+ 
+ static int lme2510_int_read(struct dvb_usb_adapter *adap)
+ {
+ 	struct dvb_usb_device *d = adap_to_d(adap);
+ 	struct lme2510_state *lme_int = adap_to_priv(adap);
++	struct usb_host_endpoint *ep;
+ 
+ 	lme_int->lme_urb = usb_alloc_urb(0, GFP_ATOMIC);
+ 
+@@ -374,6 +376,12 @@ static int lme2510_int_read(struct dvb_usb_adapter *adap)
+ 				adap,
+ 				8);
+ 
++	/* Quirk of pipe reporting PIPE_BULK but behaves as interrupt */
++	ep = usb_pipe_endpoint(d->udev, lme_int->lme_urb->pipe);
++
++	if (usb_endpoint_type(&ep->desc) == USB_ENDPOINT_XFER_BULK)
++		lme_int->lme_urb->pipe = usb_rcvbulkpipe(d->udev, 0xa),
++
+ 	lme_int->lme_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+ 
+ 	usb_submit_urb(lme_int->lme_urb, GFP_ATOMIC);
+diff --git a/drivers/media/usb/em28xx/em28xx-audio.c b/drivers/media/usb/em28xx/em28xx-audio.c
+index 44ae1e0661e6..49a5f9532bd8 100644
+--- a/drivers/media/usb/em28xx/em28xx-audio.c
++++ b/drivers/media/usb/em28xx/em28xx-audio.c
+@@ -820,7 +820,7 @@ static int em28xx_audio_urb_init(struct em28xx *dev)
+ 	if (urb_size > ep_size * npackets)
+ 		npackets = DIV_ROUND_UP(urb_size, ep_size);
+ 
+-	em28xx_info("Number of URBs: %d, with %d packets and %d size",
++	em28xx_info("Number of URBs: %d, with %d packets and %d size\n",
+ 		    num_urb, npackets, urb_size);
+ 
+ 	/* Estimate the bytes per period */
+@@ -981,7 +981,7 @@ static int em28xx_audio_fini(struct em28xx *dev)
+ 		return 0;
+ 	}
+ 
+-	em28xx_info("Closing audio extension");
++	em28xx_info("Closing audio extension\n");
+ 
+ 	if (dev->adev.sndcard) {
+ 		snd_card_disconnect(dev->adev.sndcard);
+@@ -1005,7 +1005,7 @@ static int em28xx_audio_suspend(struct em28xx *dev)
+ 	if (dev->usb_audio_type != EM28XX_USB_AUDIO_VENDOR)
+ 		return 0;
+ 
+-	em28xx_info("Suspending audio extension");
++	em28xx_info("Suspending audio extension\n");
+ 	em28xx_deinit_isoc_audio(dev);
+ 	atomic_set(&dev->adev.stream_started, 0);
+ 	return 0;
+@@ -1019,7 +1019,7 @@ static int em28xx_audio_resume(struct em28xx *dev)
+ 	if (dev->usb_audio_type != EM28XX_USB_AUDIO_VENDOR)
+ 		return 0;
+ 
+-	em28xx_info("Resuming audio extension");
++	em28xx_info("Resuming audio extension\n");
+ 	/* Nothing to do other than schedule_work() ?? */
+ 	schedule_work(&dev->adev.wq_trigger);
+ 	return 0;
+diff --git a/drivers/media/usb/em28xx/em28xx-core.c b/drivers/media/usb/em28xx/em28xx-core.c
+index 86461a708abe..37456079f490 100644
+--- a/drivers/media/usb/em28xx/em28xx-core.c
++++ b/drivers/media/usb/em28xx/em28xx-core.c
+@@ -1125,7 +1125,7 @@ int em28xx_suspend_extension(struct em28xx *dev)
+ {
+ 	const struct em28xx_ops *ops = NULL;
+ 
+-	em28xx_info("Suspending extensions");
++	em28xx_info("Suspending extensions\n");
+ 	mutex_lock(&em28xx_devlist_mutex);
+ 	list_for_each_entry(ops, &em28xx_extension_devlist, next) {
+ 		if (ops->suspend)
+@@ -1139,7 +1139,7 @@ int em28xx_resume_extension(struct em28xx *dev)
+ {
+ 	const struct em28xx_ops *ops = NULL;
+ 
+-	em28xx_info("Resuming extensions");
++	em28xx_info("Resuming extensions\n");
+ 	mutex_lock(&em28xx_devlist_mutex);
+ 	list_for_each_entry(ops, &em28xx_extension_devlist, next) {
+ 		if (ops->resume)
+diff --git a/drivers/media/usb/em28xx/em28xx-dvb.c b/drivers/media/usb/em28xx/em28xx-dvb.c
+index 9877b699c6bc..aee70d483264 100644
+--- a/drivers/media/usb/em28xx/em28xx-dvb.c
++++ b/drivers/media/usb/em28xx/em28xx-dvb.c
+@@ -1724,7 +1724,7 @@ static int em28xx_dvb_fini(struct em28xx *dev)
+ 	if (!dev->dvb)
+ 		return 0;
+ 
+-	em28xx_info("Closing DVB extension");
++	em28xx_info("Closing DVB extension\n");
+ 
+ 	dvb = dev->dvb;
+ 	client = dvb->i2c_client_tuner;
+@@ -1775,17 +1775,17 @@ static int em28xx_dvb_suspend(struct em28xx *dev)
+ 	if (!dev->board.has_dvb)
+ 		return 0;
+ 
+-	em28xx_info("Suspending DVB extension");
++	em28xx_info("Suspending DVB extension\n");
+ 	if (dev->dvb) {
+ 		struct em28xx_dvb *dvb = dev->dvb;
+ 
+ 		if (dvb->fe[0]) {
+ 			ret = dvb_frontend_suspend(dvb->fe[0]);
+-			em28xx_info("fe0 suspend %d", ret);
++			em28xx_info("fe0 suspend %d\n", ret);
+ 		}
+ 		if (dvb->fe[1]) {
+ 			dvb_frontend_suspend(dvb->fe[1]);
+-			em28xx_info("fe1 suspend %d", ret);
++			em28xx_info("fe1 suspend %d\n", ret);
+ 		}
+ 	}
+ 
+@@ -1802,18 +1802,18 @@ static int em28xx_dvb_resume(struct em28xx *dev)
+ 	if (!dev->board.has_dvb)
+ 		return 0;
+ 
+-	em28xx_info("Resuming DVB extension");
++	em28xx_info("Resuming DVB extension\n");
+ 	if (dev->dvb) {
+ 		struct em28xx_dvb *dvb = dev->dvb;
+ 
+ 		if (dvb->fe[0]) {
+ 			ret = dvb_frontend_resume(dvb->fe[0]);
+-			em28xx_info("fe0 resume %d", ret);
++			em28xx_info("fe0 resume %d\n", ret);
+ 		}
+ 
+ 		if (dvb->fe[1]) {
+ 			ret = dvb_frontend_resume(dvb->fe[1]);
+-			em28xx_info("fe1 resume %d", ret);
++			em28xx_info("fe1 resume %d\n", ret);
+ 		}
+ 	}
+ 
+diff --git a/drivers/media/usb/em28xx/em28xx-input.c b/drivers/media/usb/em28xx/em28xx-input.c
+index d8dc03aadfbd..4007356d991d 100644
+--- a/drivers/media/usb/em28xx/em28xx-input.c
++++ b/drivers/media/usb/em28xx/em28xx-input.c
+@@ -654,8 +654,6 @@ next_button:
+ 	if (dev->num_button_polling_addresses) {
+ 		memset(dev->button_polling_last_values, 0,
+ 		       EM28XX_NUM_BUTTON_ADDRESSES_MAX);
+-		INIT_DELAYED_WORK(&dev->buttons_query_work,
+-				  em28xx_query_buttons);
+ 		schedule_delayed_work(&dev->buttons_query_work,
+ 				      msecs_to_jiffies(dev->button_polling_interval));
+ 	}
+@@ -689,6 +687,7 @@ static int em28xx_ir_init(struct em28xx *dev)
+ 	}
+ 
+ 	kref_get(&dev->ref);
++	INIT_DELAYED_WORK(&dev->buttons_query_work, em28xx_query_buttons);
+ 
+ 	if (dev->board.buttons)
+ 		em28xx_init_buttons(dev);
+@@ -833,7 +832,7 @@ static int em28xx_ir_fini(struct em28xx *dev)
+ 		return 0;
+ 	}
+ 
+-	em28xx_info("Closing input extension");
++	em28xx_info("Closing input extension\n");
+ 
+ 	em28xx_shutdown_buttons(dev);
+ 
+@@ -862,7 +861,7 @@ static int em28xx_ir_suspend(struct em28xx *dev)
+ 	if (dev->is_audio_only)
+ 		return 0;
+ 
+-	em28xx_info("Suspending input extension");
++	em28xx_info("Suspending input extension\n");
+ 	if (ir)
+ 		cancel_delayed_work_sync(&ir->work);
+ 	cancel_delayed_work_sync(&dev->buttons_query_work);
+@@ -879,7 +878,7 @@ static int em28xx_ir_resume(struct em28xx *dev)
+ 	if (dev->is_audio_only)
+ 		return 0;
+ 
+-	em28xx_info("Resuming input extension");
++	em28xx_info("Resuming input extension\n");
+ 	/* if suspend calls ir_raw_event_unregister(), the should call
+ 	   ir_raw_event_register() */
+ 	if (ir)
+diff --git a/drivers/media/usb/em28xx/em28xx-video.c b/drivers/media/usb/em28xx/em28xx-video.c
+index cf7f58b76292..f220c1f376e3 100644
+--- a/drivers/media/usb/em28xx/em28xx-video.c
++++ b/drivers/media/usb/em28xx/em28xx-video.c
+@@ -1958,7 +1958,7 @@ static int em28xx_v4l2_fini(struct em28xx *dev)
+ 	if (v4l2 == NULL)
+ 		return 0;
+ 
+-	em28xx_info("Closing video extension");
++	em28xx_info("Closing video extension\n");
+ 
+ 	mutex_lock(&dev->lock);
+ 
+@@ -2007,7 +2007,7 @@ static int em28xx_v4l2_suspend(struct em28xx *dev)
+ 	if (!dev->has_video)
+ 		return 0;
+ 
+-	em28xx_info("Suspending video extension");
++	em28xx_info("Suspending video extension\n");
+ 	em28xx_stop_urbs(dev);
+ 	return 0;
+ }
+@@ -2020,7 +2020,7 @@ static int em28xx_v4l2_resume(struct em28xx *dev)
+ 	if (!dev->has_video)
+ 		return 0;
+ 
+-	em28xx_info("Resuming video extension");
++	em28xx_info("Resuming video extension\n");
+ 	/* what do we do here */
+ 	return 0;
+ }
+diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c
+index 06ff0a2ec960..f8fd503dfbd6 100644
+--- a/drivers/misc/mei/hw-me.c
++++ b/drivers/misc/mei/hw-me.c
+@@ -242,7 +242,7 @@ static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable)
+ 	if ((hcsr & H_RST) == H_RST) {
+ 		dev_warn(dev->dev, "H_RST is set = 0x%08X", hcsr);
+ 		hcsr &= ~H_RST;
+-		mei_me_reg_write(hw, H_CSR, hcsr);
++		mei_hcsr_set(hw, hcsr);
+ 		hcsr = mei_hcsr_read(hw);
+ 	}
+ 
+@@ -335,6 +335,7 @@ static int mei_me_hw_ready_wait(struct mei_device *dev)
+ 		return -ETIME;
+ 	}
+ 
++	mei_me_hw_reset_release(dev);
+ 	dev->recvd_hw_ready = false;
+ 	return 0;
+ }
+@@ -731,9 +732,7 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
+ 	/*  check if we need to start the dev */
+ 	if (!mei_host_is_ready(dev)) {
+ 		if (mei_hw_is_ready(dev)) {
+-			mei_me_hw_reset_release(dev);
+ 			dev_dbg(dev->dev, "we need to start the dev.\n");
+-
+ 			dev->recvd_hw_ready = true;
+ 			wake_up(&dev->wait_hw_ready);
+ 		} else {
+diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c
+index ca3424e7ef71..e187f70b7c70 100644
+--- a/drivers/mmc/host/sdhci-pxav3.c
++++ b/drivers/mmc/host/sdhci-pxav3.c
+@@ -118,6 +118,38 @@ static int mv_conf_mbus_windows(struct platform_device *pdev,
+ 	return 0;
+ }
+ 
++static int armada_38x_quirks(struct platform_device *pdev,
++			     struct sdhci_host *host)
++{
++	struct device_node *np = pdev->dev.of_node;
++
++	host->quirks |= SDHCI_QUIRK_MISSING_CAPS;
++	/*
++	 * According to erratum 'FE-2946959' both SDR50 and DDR50
++	 * modes require specific clock adjustments in SDIO3
++	 * Configuration register, if the adjustment is not done,
++	 * remove them from the capabilities.
++	 */
++	host->caps1 = sdhci_readl(host, SDHCI_CAPABILITIES_1);
++	host->caps1 &= ~(SDHCI_SUPPORT_SDR50 | SDHCI_SUPPORT_DDR50);
++
++	/*
++	 * According to erratum 'ERR-7878951' Armada 38x SDHCI
++	 * controller has different capabilities than the ones shown
++	 * in its registers
++	 */
++	host->caps = sdhci_readl(host, SDHCI_CAPABILITIES);
++	if (of_property_read_bool(np, "no-1-8-v")) {
++		host->caps &= ~SDHCI_CAN_VDD_180;
++		host->mmc->caps &= ~MMC_CAP_1_8V_DDR;
++	} else {
++		host->caps &= ~SDHCI_CAN_VDD_330;
++	}
++	host->caps1 &= ~(SDHCI_SUPPORT_SDR104 | SDHCI_USE_SDR50_TUNING);
++
++	return 0;
++}
++
+ static void pxav3_reset(struct sdhci_host *host, u8 mask)
+ {
+ 	struct platform_device *pdev = to_platform_device(mmc_dev(host->mmc));
+@@ -268,8 +300,8 @@ static struct sdhci_pxa_platdata *pxav3_get_mmc_pdata(struct device *dev)
+ 	if (!pdata)
+ 		return NULL;
+ 
+-	of_property_read_u32(np, "mrvl,clk-delay-cycles", &clk_delay_cycles);
+-	if (clk_delay_cycles > 0)
++	if (!of_property_read_u32(np, "mrvl,clk-delay-cycles",
++				  &clk_delay_cycles))
+ 		pdata->clk_delay_cycles = clk_delay_cycles;
+ 
+ 	return pdata;
+@@ -318,15 +350,18 @@ static int sdhci_pxav3_probe(struct platform_device *pdev)
+ 	if (!IS_ERR(pxa->clk_core))
+ 		clk_prepare_enable(pxa->clk_core);
+ 
++	/* enable 1/8V DDR capable */
++	host->mmc->caps |= MMC_CAP_1_8V_DDR;
++
+ 	if (of_device_is_compatible(np, "marvell,armada-380-sdhci")) {
++		ret = armada_38x_quirks(pdev, host);
++		if (ret < 0)
++			goto err_clk_get;
+ 		ret = mv_conf_mbus_windows(pdev, mv_mbus_dram_info());
+ 		if (ret < 0)
+ 			goto err_mbus_win;
+ 	}
+ 
+-	/* enable 1/8V DDR capable */
+-	host->mmc->caps |= MMC_CAP_1_8V_DDR;
+-
+ 	match = of_match_device(of_match_ptr(sdhci_pxav3_of_match), &pdev->dev);
+ 	if (match) {
+ 		ret = mmc_of_parse(host->mmc);
+@@ -365,10 +400,11 @@ static int sdhci_pxav3_probe(struct platform_device *pdev)
+ 		}
+ 	}
+ 
+-	pm_runtime_enable(&pdev->dev);
+-	pm_runtime_get_sync(&pdev->dev);
++	pm_runtime_get_noresume(&pdev->dev);
++	pm_runtime_set_active(&pdev->dev);
+ 	pm_runtime_set_autosuspend_delay(&pdev->dev, PXAV3_RPM_DELAY_MS);
+ 	pm_runtime_use_autosuspend(&pdev->dev);
++	pm_runtime_enable(&pdev->dev);
+ 	pm_suspend_ignore_children(&pdev->dev, 1);
+ 
+ 	ret = sdhci_add_host(host);
+@@ -391,8 +427,8 @@ static int sdhci_pxav3_probe(struct platform_device *pdev)
+ 	return 0;
+ 
+ err_add_host:
+-	pm_runtime_put_sync(&pdev->dev);
+ 	pm_runtime_disable(&pdev->dev);
++	pm_runtime_put_noidle(&pdev->dev);
+ err_of_parse:
+ err_cd_req:
+ err_mbus_win:
+@@ -457,11 +493,11 @@ static int sdhci_pxav3_runtime_suspend(struct device *dev)
+ 	struct sdhci_host *host = dev_get_drvdata(dev);
+ 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ 	struct sdhci_pxa *pxa = pltfm_host->priv;
+-	unsigned long flags;
++	int ret;
+ 
+-	spin_lock_irqsave(&host->lock, flags);
+-	host->runtime_suspended = true;
+-	spin_unlock_irqrestore(&host->lock, flags);
++	ret = sdhci_runtime_suspend_host(host);
++	if (ret)
++		return ret;
+ 
+ 	clk_disable_unprepare(pxa->clk_io);
+ 	if (!IS_ERR(pxa->clk_core))
+@@ -475,17 +511,12 @@ static int sdhci_pxav3_runtime_resume(struct device *dev)
+ 	struct sdhci_host *host = dev_get_drvdata(dev);
+ 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ 	struct sdhci_pxa *pxa = pltfm_host->priv;
+-	unsigned long flags;
+ 
+ 	clk_prepare_enable(pxa->clk_io);
+ 	if (!IS_ERR(pxa->clk_core))
+ 		clk_prepare_enable(pxa->clk_core);
+ 
+-	spin_lock_irqsave(&host->lock, flags);
+-	host->runtime_suspended = false;
+-	spin_unlock_irqrestore(&host->lock, flags);
+-
+-	return 0;
++	return sdhci_runtime_resume_host(host);
+ }
+ #endif
+ 
+diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+index 20915587c820..a704be0bd288 100644
+--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
++++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+@@ -707,9 +707,6 @@ static void iwl_mvm_cleanup_iterator(void *data, u8 *mac,
+ 	mvmvif->uploaded = false;
+ 	mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT;
+ 
+-	/* does this make sense at all? */
+-	mvmvif->color++;
+-
+ 	spin_lock_bh(&mvm->time_event_lock);
+ 	iwl_mvm_te_clear_data(mvm, &mvmvif->time_event_data);
+ 	spin_unlock_bh(&mvm->time_event_lock);
+@@ -1146,7 +1143,7 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
+ 
+ 	ret = iwl_mvm_power_update_mac(mvm);
+ 	if (ret)
+-		goto out_release;
++		goto out_remove_mac;
+ 
+ 	/* beacon filtering */
+ 	ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0);
+diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c
+index c59d07567d90..650a5f0b94c7 100644
+--- a/drivers/net/wireless/iwlwifi/mvm/tx.c
++++ b/drivers/net/wireless/iwlwifi/mvm/tx.c
+@@ -930,6 +930,11 @@ int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
+ 	sta_id = ba_notif->sta_id;
+ 	tid = ba_notif->tid;
+ 
++	if (WARN_ONCE(sta_id >= IWL_MVM_STATION_COUNT ||
++		      tid >= IWL_MAX_TID_COUNT,
++		      "sta_id %d tid %d", sta_id, tid))
++		return 0;
++
+ 	rcu_read_lock();
+ 
+ 	sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
+diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c
+index 8a6c7a084aa1..76709f99a5c7 100644
+--- a/drivers/net/wireless/iwlwifi/pcie/tx.c
++++ b/drivers/net/wireless/iwlwifi/pcie/tx.c
+@@ -722,7 +722,12 @@ void iwl_trans_pcie_tx_reset(struct iwl_trans *trans)
+ 	iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG,
+ 			   trans_pcie->kw.dma >> 4);
+ 
+-	iwl_pcie_tx_start(trans, trans_pcie->scd_base_addr);
++	/*
++	 * Send 0 as the scd_base_addr since the device may have be reset
++	 * while we were in WoWLAN in which case SCD_SRAM_BASE_ADDR will
++	 * contain garbage.
++	 */
++	iwl_pcie_tx_start(trans, 0);
+ }
+ 
+ /*
+diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
+index c70efb9a6e78..e25faacf58b7 100644
+--- a/drivers/net/wireless/rtlwifi/pci.c
++++ b/drivers/net/wireless/rtlwifi/pci.c
+@@ -816,11 +816,8 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
+ 
+ 		/* get a new skb - if fail, old one will be reused */
+ 		new_skb = dev_alloc_skb(rtlpci->rxbuffersize);
+-		if (unlikely(!new_skb)) {
+-			pr_err("Allocation of new skb failed in %s\n",
+-			       __func__);
++		if (unlikely(!new_skb))
+ 			goto no_new;
+-		}
+ 		if (rtlpriv->use_new_trx_flow) {
+ 			buffer_desc =
+ 			  &rtlpci->rx_ring[rxring_idx].buffer_desc
+diff --git a/drivers/net/wireless/rtlwifi/pci.h b/drivers/net/wireless/rtlwifi/pci.h
+index 5e832306dba9..d4567d12e07e 100644
+--- a/drivers/net/wireless/rtlwifi/pci.h
++++ b/drivers/net/wireless/rtlwifi/pci.h
+@@ -325,4 +325,11 @@ static inline void pci_write32_async(struct rtl_priv *rtlpriv,
+ 	writel(val, (u8 __iomem *) rtlpriv->io.pci_mem_start + addr);
+ }
+ 
++static inline u16 calc_fifo_space(u16 rp, u16 wp)
++{
++	if (rp <= wp)
++		return RTL_PCI_MAX_RX_COUNT - 1 + rp - wp;
++	return rp - wp - 1;
++}
++
+ #endif
+diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/fw.c b/drivers/net/wireless/rtlwifi/rtl8192ee/fw.c
+index 45c128b91f7f..c5d4b8013cde 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8192ee/fw.c
++++ b/drivers/net/wireless/rtlwifi/rtl8192ee/fw.c
+@@ -666,7 +666,6 @@ void rtl92ee_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished)
+ 	struct sk_buff *skb = NULL;
+ 
+ 	u32 totalpacketlen;
+-	bool rtstatus;
+ 	u8 u1rsvdpageloc[5] = { 0 };
+ 	bool b_dlok = false;
+ 
+@@ -728,10 +727,7 @@ void rtl92ee_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished)
+ 	memcpy((u8 *)skb_put(skb, totalpacketlen),
+ 	       &reserved_page_packet, totalpacketlen);
+ 
+-	rtstatus = rtl_cmd_send_packet(hw, skb);
+-
+-	if (rtstatus)
+-		b_dlok = true;
++	b_dlok = true;
+ 
+ 	if (b_dlok) {
+ 		RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD ,
+diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/hw.c b/drivers/net/wireless/rtlwifi/rtl8192ee/hw.c
+index 1a87edca2c3f..b461b3128da5 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8192ee/hw.c
++++ b/drivers/net/wireless/rtlwifi/rtl8192ee/hw.c
+@@ -85,29 +85,6 @@ static void _rtl92ee_enable_bcn_sub_func(struct ieee80211_hw *hw)
+ 	_rtl92ee_set_bcn_ctrl_reg(hw, 0, BIT(1));
+ }
+ 
+-static void _rtl92ee_return_beacon_queue_skb(struct ieee80211_hw *hw)
+-{
+-	struct rtl_priv *rtlpriv = rtl_priv(hw);
+-	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+-	struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[BEACON_QUEUE];
+-	unsigned long flags;
+-
+-	spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
+-	while (skb_queue_len(&ring->queue)) {
+-		struct rtl_tx_buffer_desc *entry =
+-						&ring->buffer_desc[ring->idx];
+-		struct sk_buff *skb = __skb_dequeue(&ring->queue);
+-
+-		pci_unmap_single(rtlpci->pdev,
+-				 rtlpriv->cfg->ops->get_desc(
+-				 (u8 *)entry, true, HW_DESC_TXBUFF_ADDR),
+-				 skb->len, PCI_DMA_TODEVICE);
+-		kfree_skb(skb);
+-		ring->idx = (ring->idx + 1) % ring->entries;
+-	}
+-	spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
+-}
+-
+ static void _rtl92ee_disable_bcn_sub_func(struct ieee80211_hw *hw)
+ {
+ 	_rtl92ee_set_bcn_ctrl_reg(hw, BIT(1), 0);
+@@ -403,9 +380,6 @@ static void _rtl92ee_download_rsvd_page(struct ieee80211_hw *hw)
+ 		rtl_write_byte(rtlpriv, REG_DWBCN0_CTRL + 2,
+ 			       bcnvalid_reg | BIT(0));
+ 
+-		/* Return Beacon TCB */
+-		_rtl92ee_return_beacon_queue_skb(hw);
+-
+ 		/* download rsvd page */
+ 		rtl92ee_set_fw_rsvdpagepkt(hw, false);
+ 
+@@ -1163,6 +1137,139 @@ void rtl92ee_enable_hw_security_config(struct ieee80211_hw *hw)
+ 	rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_WPA_CONFIG, &sec_reg_value);
+ }
+ 
++static bool _rtl8192ee_check_pcie_dma_hang(struct rtl_priv *rtlpriv)
++{
++	u8 tmp;
++
++	/* write reg 0x350 Bit[26]=1. Enable debug port. */
++	tmp = rtl_read_byte(rtlpriv, REG_BACKDOOR_DBI_DATA + 3);
++	if (!(tmp & BIT(2))) {
++		rtl_write_byte(rtlpriv, REG_BACKDOOR_DBI_DATA + 3,
++			       tmp | BIT(2));
++		mdelay(100); /* Suggested by DD Justin_tsai. */
++	}
++
++	/* read reg 0x350 Bit[25] if 1 : RX hang
++	 * read reg 0x350 Bit[24] if 1 : TX hang
++	 */
++	tmp = rtl_read_byte(rtlpriv, REG_BACKDOOR_DBI_DATA + 3);
++	if ((tmp & BIT(0)) || (tmp & BIT(1))) {
++		RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
++			 "CheckPcieDMAHang8192EE(): true!!\n");
++		return true;
++	}
++	return false;
++}
++
++static void _rtl8192ee_reset_pcie_interface_dma(struct rtl_priv *rtlpriv,
++						bool mac_power_on)
++{
++	u8 tmp;
++	bool release_mac_rx_pause;
++	u8 backup_pcie_dma_pause;
++
++	RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
++		 "ResetPcieInterfaceDMA8192EE()\n");
++
++	/* Revise Note: Follow the document "PCIe RX DMA Hang Reset Flow_v03"
++	 * released by SD1 Alan.
++	 */
++
++	/* 1. disable register write lock
++	 *	write 0x1C bit[1:0] = 2'h0
++	 *	write 0xCC bit[2] = 1'b1
++	 */
++	tmp = rtl_read_byte(rtlpriv, REG_RSV_CTRL);
++	tmp &= ~(BIT(1) | BIT(0));
++	rtl_write_byte(rtlpriv, REG_RSV_CTRL, tmp);
++	tmp = rtl_read_byte(rtlpriv, REG_PMC_DBG_CTRL2);
++	tmp |= BIT(2);
++	rtl_write_byte(rtlpriv, REG_PMC_DBG_CTRL2, tmp);
++
++	/* 2. Check and pause TRX DMA
++	 *	write 0x284 bit[18] = 1'b1
++	 *	write 0x301 = 0xFF
++	 */
++	tmp = rtl_read_byte(rtlpriv, REG_RXDMA_CONTROL);
++	if (tmp & BIT(2)) {
++		/* Already pause before the function for another reason. */
++		release_mac_rx_pause = false;
++	} else {
++		rtl_write_byte(rtlpriv, REG_RXDMA_CONTROL, (tmp | BIT(2)));
++		release_mac_rx_pause = true;
++	}
++
++	backup_pcie_dma_pause = rtl_read_byte(rtlpriv, REG_PCIE_CTRL_REG + 1);
++	if (backup_pcie_dma_pause != 0xFF)
++		rtl_write_byte(rtlpriv, REG_PCIE_CTRL_REG + 1, 0xFF);
++
++	if (mac_power_on) {
++		/* 3. reset TRX function
++		 *	write 0x100 = 0x00
++		 */
++		rtl_write_byte(rtlpriv, REG_CR, 0);
++	}
++
++	/* 4. Reset PCIe DMA
++	 *	write 0x003 bit[0] = 0
++	 */
++	tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1);
++	tmp &= ~(BIT(0));
++	rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1, tmp);
++
++	/* 5. Enable PCIe DMA
++	 *	write 0x003 bit[0] = 1
++	 */
++	tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1);
++	tmp |= BIT(0);
++	rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1, tmp);
++
++	if (mac_power_on) {
++		/* 6. enable TRX function
++		 *	write 0x100 = 0xFF
++		 */
++		rtl_write_byte(rtlpriv, REG_CR, 0xFF);
++
++		/* We should init LLT & RQPN and
++		 * prepare Tx/Rx descrptor address later
++		 * because MAC function is reset.
++		 */
++	}
++
++	/* 7. Restore PCIe autoload down bit
++	 *	write 0xF8 bit[17] = 1'b1
++	 */
++	tmp = rtl_read_byte(rtlpriv, REG_MAC_PHY_CTRL_NORMAL + 2);
++	tmp |= BIT(1);
++	rtl_write_byte(rtlpriv, REG_MAC_PHY_CTRL_NORMAL + 2, tmp);
++
++	/* In MAC power on state, BB and RF maybe in ON state,
++	 * if we release TRx DMA here
++	 * it will cause packets to be started to Tx/Rx,
++	 * so we release Tx/Rx DMA later.
++	 */
++	if (!mac_power_on) {
++		/* 8. release TRX DMA
++		 *	write 0x284 bit[18] = 1'b0
++		 *	write 0x301 = 0x00
++		 */
++		if (release_mac_rx_pause) {
++			tmp = rtl_read_byte(rtlpriv, REG_RXDMA_CONTROL);
++			rtl_write_byte(rtlpriv, REG_RXDMA_CONTROL,
++				       (tmp & (~BIT(2))));
++		}
++		rtl_write_byte(rtlpriv, REG_PCIE_CTRL_REG + 1,
++			       backup_pcie_dma_pause);
++	}
++
++	/* 9. lock system register
++	 *	write 0xCC bit[2] = 1'b0
++	 */
++	tmp = rtl_read_byte(rtlpriv, REG_PMC_DBG_CTRL2);
++	tmp &= ~(BIT(2));
++	rtl_write_byte(rtlpriv, REG_PMC_DBG_CTRL2, tmp);
++}
++
+ int rtl92ee_hw_init(struct ieee80211_hw *hw)
+ {
+ 	struct rtl_priv *rtlpriv = rtl_priv(hw);
+@@ -1188,6 +1295,13 @@ int rtl92ee_hw_init(struct ieee80211_hw *hw)
+ 		rtlhal->fw_ps_state = FW_PS_STATE_ALL_ON_92E;
+ 	}
+ 
++	if (_rtl8192ee_check_pcie_dma_hang(rtlpriv)) {
++		RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "92ee dma hang!\n");
++		_rtl8192ee_reset_pcie_interface_dma(rtlpriv,
++						    rtlhal->mac_func_enable);
++		rtlhal->mac_func_enable = false;
++	}
++
+ 	rtstatus = _rtl92ee_init_mac(hw);
+ 
+ 	rtl_write_byte(rtlpriv, 0x577, 0x03);
+diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/reg.h b/drivers/net/wireless/rtlwifi/rtl8192ee/reg.h
+index 3f2a9596e7cd..1eaa1fab550d 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8192ee/reg.h
++++ b/drivers/net/wireless/rtlwifi/rtl8192ee/reg.h
+@@ -77,9 +77,11 @@
+ #define REG_HIMRE				0x00B8
+ #define REG_HISRE				0x00BC
+ 
++#define REG_PMC_DBG_CTRL2			0x00CC
+ #define REG_EFUSE_ACCESS			0x00CF
+ #define REG_HPON_FSM				0x00EC
+ #define REG_SYS_CFG1				0x00F0
++#define REG_MAC_PHY_CTRL_NORMAL			0x00F8
+ #define REG_SYS_CFG2				0x00FC
+ 
+ #define REG_CR					0x0100
+diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/trx.c b/drivers/net/wireless/rtlwifi/rtl8192ee/trx.c
+index 2fcbef1d029f..00690040be37 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8192ee/trx.c
++++ b/drivers/net/wireless/rtlwifi/rtl8192ee/trx.c
+@@ -512,6 +512,10 @@ bool rtl92ee_rx_query_desc(struct ieee80211_hw *hw,
+ 	struct ieee80211_hdr *hdr;
+ 	u32 phystatus = GET_RX_DESC_PHYST(pdesc);
+ 
++	if (GET_RX_STATUS_DESC_RPT_SEL(pdesc) == 0)
++		status->packet_report_type = NORMAL_RX;
++	else
++		status->packet_report_type = C2H_PACKET;
+ 	status->length = (u16)GET_RX_DESC_PKT_LEN(pdesc);
+ 	status->rx_drvinfo_size = (u8)GET_RX_DESC_DRV_INFO_SIZE(pdesc) *
+ 				  RX_DRV_INFO_SIZE_UNIT;
+@@ -654,14 +658,7 @@ u16 rtl92ee_rx_desc_buff_remained_cnt(struct ieee80211_hw *hw, u8 queue_index)
+ 	if (!start_rx)
+ 		return 0;
+ 
+-	if ((last_read_point > (RX_DESC_NUM_92E / 2)) &&
+-	    (read_point <= (RX_DESC_NUM_92E / 2))) {
+-		remind_cnt = RX_DESC_NUM_92E - write_point;
+-	} else {
+-		remind_cnt = (read_point >= write_point) ?
+-			     (read_point - write_point) :
+-			     (RX_DESC_NUM_92E - write_point + read_point);
+-	}
++	remind_cnt = calc_fifo_space(read_point, write_point);
+ 
+ 	if (remind_cnt == 0)
+ 		return 0;
+@@ -1207,8 +1204,7 @@ bool rtl92ee_is_tx_desc_closed(struct ieee80211_hw *hw, u8 hw_queue, u16 index)
+ 	static u8 stop_report_cnt;
+ 	struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[hw_queue];
+ 
+-	/*checking Read/Write Point each interrupt wastes CPU */
+-	if (stop_report_cnt > 15 || !rtlpriv->link_info.busytraffic) {
++	{
+ 		u16 point_diff = 0;
+ 		u16 cur_tx_rp, cur_tx_wp;
+ 		u32 tmpu32 = 0;
+diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/trx.h b/drivers/net/wireless/rtlwifi/rtl8192ee/trx.h
+index 6f9be1c7515c..8effef9b13dd 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8192ee/trx.h
++++ b/drivers/net/wireless/rtlwifi/rtl8192ee/trx.h
+@@ -542,6 +542,8 @@
+ 	LE_BITS_TO_4BYTE(__pdesc+8, 12, 4)
+ #define GET_RX_DESC_RX_IS_QOS(__pdesc)			\
+ 	LE_BITS_TO_4BYTE(__pdesc+8, 16, 1)
++#define GET_RX_STATUS_DESC_RPT_SEL(__pdesc)		\
++	LE_BITS_TO_4BYTE(__pdesc+8, 28, 1)
+ 
+ #define GET_RX_DESC_RXMCS(__pdesc)			\
+ 	LE_BITS_TO_4BYTE(__pdesc+12, 0, 7)
+diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
+index 887e6bd95af7..09a66bad8018 100644
+--- a/drivers/pci/pci-driver.c
++++ b/drivers/pci/pci-driver.c
+@@ -1383,7 +1383,7 @@ static int pci_uevent(struct device *dev, struct kobj_uevent_env *env)
+ 	if (add_uevent_var(env, "PCI_SLOT_NAME=%s", pci_name(pdev)))
+ 		return -ENOMEM;
+ 
+-	if (add_uevent_var(env, "MODALIAS=pci:v%08Xd%08Xsv%08Xsd%08Xbc%02Xsc%02Xi%02x",
++	if (add_uevent_var(env, "MODALIAS=pci:v%08Xd%08Xsv%08Xsd%08Xbc%02Xsc%02Xi%02X",
+ 			   pdev->vendor, pdev->device,
+ 			   pdev->subsystem_vendor, pdev->subsystem_device,
+ 			   (u8)(pdev->class >> 16), (u8)(pdev->class >> 8),
+diff --git a/drivers/pci/rom.c b/drivers/pci/rom.c
+index f955edb9bea7..eb0ad530dc43 100644
+--- a/drivers/pci/rom.c
++++ b/drivers/pci/rom.c
+@@ -71,6 +71,7 @@ size_t pci_get_rom_size(struct pci_dev *pdev, void __iomem *rom, size_t size)
+ {
+ 	void __iomem *image;
+ 	int last_image;
++	unsigned length;
+ 
+ 	image = rom;
+ 	do {
+@@ -93,9 +94,9 @@ size_t pci_get_rom_size(struct pci_dev *pdev, void __iomem *rom, size_t size)
+ 		if (readb(pds + 3) != 'R')
+ 			break;
+ 		last_image = readb(pds + 21) & 0x80;
+-		/* this length is reliable */
+-		image += readw(pds + 16) * 512;
+-	} while (!last_image);
++		length = readw(pds + 16);
++		image += length * 512;
++	} while (length && !last_image);
+ 
+ 	/* never return a size larger than the PCI resource window */
+ 	/* there are known ROMs that get the size wrong */
+diff --git a/drivers/platform/x86/samsung-laptop.c b/drivers/platform/x86/samsung-laptop.c
+index ff765d8e1a09..ce364a41842a 100644
+--- a/drivers/platform/x86/samsung-laptop.c
++++ b/drivers/platform/x86/samsung-laptop.c
+@@ -353,6 +353,7 @@ struct samsung_quirks {
+ 	bool broken_acpi_video;
+ 	bool four_kbd_backlight_levels;
+ 	bool enable_kbd_backlight;
++	bool use_native_backlight;
+ };
+ 
+ static struct samsung_quirks samsung_unknown = {};
+@@ -361,6 +362,10 @@ static struct samsung_quirks samsung_broken_acpi_video = {
+ 	.broken_acpi_video = true,
+ };
+ 
++static struct samsung_quirks samsung_use_native_backlight = {
++	.use_native_backlight = true,
++};
++
+ static struct samsung_quirks samsung_np740u3e = {
+ 	.four_kbd_backlight_levels = true,
+ 	.enable_kbd_backlight = true,
+@@ -1507,7 +1512,7 @@ static struct dmi_system_id __initdata samsung_dmi_table[] = {
+ 		DMI_MATCH(DMI_PRODUCT_NAME, "N150P"),
+ 		DMI_MATCH(DMI_BOARD_NAME, "N150P"),
+ 		},
+-	 .driver_data = &samsung_broken_acpi_video,
++	 .driver_data = &samsung_use_native_backlight,
+ 	},
+ 	{
+ 	 .callback = samsung_dmi_matched,
+@@ -1517,7 +1522,7 @@ static struct dmi_system_id __initdata samsung_dmi_table[] = {
+ 		DMI_MATCH(DMI_PRODUCT_NAME, "N145P/N250P/N260P"),
+ 		DMI_MATCH(DMI_BOARD_NAME, "N145P/N250P/N260P"),
+ 		},
+-	 .driver_data = &samsung_broken_acpi_video,
++	 .driver_data = &samsung_use_native_backlight,
+ 	},
+ 	{
+ 	 .callback = samsung_dmi_matched,
+@@ -1557,7 +1562,7 @@ static struct dmi_system_id __initdata samsung_dmi_table[] = {
+ 		DMI_MATCH(DMI_PRODUCT_NAME, "N250P"),
+ 		DMI_MATCH(DMI_BOARD_NAME, "N250P"),
+ 		},
+-	 .driver_data = &samsung_broken_acpi_video,
++	 .driver_data = &samsung_use_native_backlight,
+ 	},
+ 	{
+ 	 .callback = samsung_dmi_matched,
+@@ -1616,6 +1621,15 @@ static int __init samsung_init(void)
+ 		pr_info("Disabling ACPI video driver\n");
+ 		acpi_video_unregister();
+ 	}
++
++	if (samsung->quirks->use_native_backlight) {
++		pr_info("Using native backlight driver\n");
++		/* Tell acpi-video to not handle the backlight */
++		acpi_video_dmi_promote_vendor();
++		acpi_video_unregister();
++		/* And also do not handle it ourselves */
++		samsung->handle_backlight = false;
++	}
+ #endif
+ 
+ 	ret = samsung_platform_init(samsung);
+diff --git a/drivers/power/88pm860x_charger.c b/drivers/power/88pm860x_charger.c
+index 650930e4fa79..734ec4afa14d 100644
+--- a/drivers/power/88pm860x_charger.c
++++ b/drivers/power/88pm860x_charger.c
+@@ -711,6 +711,7 @@ static int pm860x_charger_probe(struct platform_device *pdev)
+ 	return 0;
+ 
+ out_irq:
++	power_supply_unregister(&info->usb);
+ 	while (--i >= 0)
+ 		free_irq(info->irq[i], info);
+ out:
+diff --git a/drivers/power/bq24190_charger.c b/drivers/power/bq24190_charger.c
+index ad3ff8fbfbbb..e4c95e1a6733 100644
+--- a/drivers/power/bq24190_charger.c
++++ b/drivers/power/bq24190_charger.c
+@@ -929,7 +929,7 @@ static void bq24190_charger_init(struct power_supply *charger)
+ 	charger->properties = bq24190_charger_properties;
+ 	charger->num_properties = ARRAY_SIZE(bq24190_charger_properties);
+ 	charger->supplied_to = bq24190_charger_supplied_to;
+-	charger->num_supplies = ARRAY_SIZE(bq24190_charger_supplied_to);
++	charger->num_supplicants = ARRAY_SIZE(bq24190_charger_supplied_to);
+ 	charger->get_property = bq24190_charger_get_property;
+ 	charger->set_property = bq24190_charger_set_property;
+ 	charger->property_is_writeable = bq24190_charger_property_is_writeable;
+diff --git a/drivers/power/gpio-charger.c b/drivers/power/gpio-charger.c
+index aef74bdf7ab3..b7424c8501f1 100644
+--- a/drivers/power/gpio-charger.c
++++ b/drivers/power/gpio-charger.c
+@@ -229,7 +229,7 @@ static int gpio_charger_suspend(struct device *dev)
+ 
+ 	if (device_may_wakeup(dev))
+ 		gpio_charger->wakeup_enabled =
+-			enable_irq_wake(gpio_charger->irq);
++			!enable_irq_wake(gpio_charger->irq);
+ 
+ 	return 0;
+ }
+@@ -239,7 +239,7 @@ static int gpio_charger_resume(struct device *dev)
+ 	struct platform_device *pdev = to_platform_device(dev);
+ 	struct gpio_charger *gpio_charger = platform_get_drvdata(pdev);
+ 
+-	if (gpio_charger->wakeup_enabled)
++	if (device_may_wakeup(dev) && gpio_charger->wakeup_enabled)
+ 		disable_irq_wake(gpio_charger->irq);
+ 	power_supply_changed(&gpio_charger->charger);
+ 
+diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
+index ff283d23788a..d63f04147a59 100644
+--- a/drivers/scsi/megaraid/megaraid_sas_base.c
++++ b/drivers/scsi/megaraid/megaraid_sas_base.c
+@@ -1689,22 +1689,66 @@ static int megasas_slave_alloc(struct scsi_device *sdev)
+ 	return 0;
+ }
+ 
++/*
++* megasas_complete_outstanding_ioctls - Complete outstanding ioctls after a
++*                                       kill adapter
++* @instance:				Adapter soft state
++*
++*/
++void megasas_complete_outstanding_ioctls(struct megasas_instance *instance)
++{
++	int i;
++	struct megasas_cmd *cmd_mfi;
++	struct megasas_cmd_fusion *cmd_fusion;
++	struct fusion_context *fusion = instance->ctrl_context;
++
++	/* Find all outstanding ioctls */
++	if (fusion) {
++		for (i = 0; i < instance->max_fw_cmds; i++) {
++			cmd_fusion = fusion->cmd_list[i];
++			if (cmd_fusion->sync_cmd_idx != (u32)ULONG_MAX) {
++				cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx];
++				if (cmd_mfi->sync_cmd &&
++					cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT)
++					megasas_complete_cmd(instance,
++							     cmd_mfi, DID_OK);
++			}
++		}
++	} else {
++		for (i = 0; i < instance->max_fw_cmds; i++) {
++			cmd_mfi = instance->cmd_list[i];
++			if (cmd_mfi->sync_cmd && cmd_mfi->frame->hdr.cmd !=
++				MFI_CMD_ABORT)
++				megasas_complete_cmd(instance, cmd_mfi, DID_OK);
++		}
++	}
++}
++
++
+ void megaraid_sas_kill_hba(struct megasas_instance *instance)
+ {
++	/* Set critical error to block I/O & ioctls in case caller didn't */
++	instance->adprecovery = MEGASAS_HW_CRITICAL_ERROR;
++	/* Wait 1 second to ensure IO or ioctls in build have posted */
++	msleep(1000);
+ 	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
+-	    (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
+-	    (instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) ||
+-	    (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA) ||
+-	    (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
+-	    (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) {
+-		writel(MFI_STOP_ADP, &instance->reg_set->doorbell);
++		(instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
++		(instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) ||
++		(instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA) ||
++		(instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
++		(instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) {
++		writel(MFI_STOP_ADP,
++			&instance->reg_set->doorbell);
+ 		/* Flush */
+ 		readl(&instance->reg_set->doorbell);
+ 		if (instance->mpio && instance->requestorId)
+ 			memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
+ 	} else {
+-		writel(MFI_STOP_ADP, &instance->reg_set->inbound_doorbell);
++		writel(MFI_STOP_ADP,
++			&instance->reg_set->inbound_doorbell);
+ 	}
++	/* Complete outstanding ioctls when adapter is killed */
++	megasas_complete_outstanding_ioctls(instance);
+ }
+ 
+  /**
+@@ -3028,10 +3072,9 @@ megasas_issue_pending_cmds_again(struct megasas_instance *instance)
+ 					"was tried multiple times during reset."
+ 					"Shutting down the HBA\n",
+ 					cmd, cmd->scmd, cmd->sync_cmd);
++				instance->instancet->disable_intr(instance);
++				atomic_set(&instance->fw_reset_no_pci_access, 1);
+ 				megaraid_sas_kill_hba(instance);
+-
+-				instance->adprecovery =
+-						MEGASAS_HW_CRITICAL_ERROR;
+ 				return;
+ 			}
+ 		}
+@@ -3165,8 +3208,8 @@ process_fw_state_change_wq(struct work_struct *work)
+ 		if (megasas_transition_to_ready(instance, 1)) {
+ 			printk(KERN_NOTICE "megaraid_sas:adapter not ready\n");
+ 
++			atomic_set(&instance->fw_reset_no_pci_access, 1);
+ 			megaraid_sas_kill_hba(instance);
+-			instance->adprecovery	= MEGASAS_HW_CRITICAL_ERROR;
+ 			return ;
+ 		}
+ 
+@@ -3547,7 +3590,6 @@ static int megasas_create_frame_pool(struct megasas_instance *instance)
+ 	int i;
+ 	u32 max_cmd;
+ 	u32 sge_sz;
+-	u32 sgl_sz;
+ 	u32 total_sz;
+ 	u32 frame_count;
+ 	struct megasas_cmd *cmd;
+@@ -3566,24 +3608,23 @@ static int megasas_create_frame_pool(struct megasas_instance *instance)
+ 	}
+ 
+ 	/*
+-	 * Calculated the number of 64byte frames required for SGL
+-	 */
+-	sgl_sz = sge_sz * instance->max_num_sge;
+-	frame_count = (sgl_sz + MEGAMFI_FRAME_SIZE - 1) / MEGAMFI_FRAME_SIZE;
+-	frame_count = 15;
+-
+-	/*
+-	 * We need one extra frame for the MFI command
++	 * For MFI controllers.
++	 * max_num_sge = 60
++	 * max_sge_sz  = 16 byte (sizeof megasas_sge_skinny)
++	 * Total 960 byte (15 MFI frame of 64 byte)
++	 *
++	 * Fusion adapter require only 3 extra frame.
++	 * max_num_sge = 16 (defined as MAX_IOCTL_SGE)
++	 * max_sge_sz  = 12 byte (sizeof  megasas_sge64)
++	 * Total 192 byte (3 MFI frame of 64 byte)
+ 	 */
+-	frame_count++;
+-
++	frame_count = instance->ctrl_context ? (3 + 1) : (15 + 1);
+ 	total_sz = MEGAMFI_FRAME_SIZE * frame_count;
+ 	/*
+ 	 * Use DMA pool facility provided by PCI layer
+ 	 */
+ 	instance->frame_dma_pool = pci_pool_create("megasas frame pool",
+-						   instance->pdev, total_sz, 64,
+-						   0);
++					instance->pdev, total_sz, 256, 0);
+ 
+ 	if (!instance->frame_dma_pool) {
+ 		printk(KERN_DEBUG "megasas: failed to setup frame pool\n");
+diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c
+index 460c6a3d4ade..4f72287860ee 100644
+--- a/drivers/scsi/megaraid/megaraid_sas_fp.c
++++ b/drivers/scsi/megaraid/megaraid_sas_fp.c
+@@ -172,6 +172,7 @@ void MR_PopulateDrvRaidMap(struct megasas_instance *instance)
+ 	struct MR_FW_RAID_MAP_ALL     *fw_map_old    = NULL;
+ 	struct MR_FW_RAID_MAP         *pFwRaidMap    = NULL;
+ 	int i;
++	u16 ld_count;
+ 
+ 
+ 	struct MR_DRV_RAID_MAP_ALL *drv_map =
+@@ -191,9 +192,10 @@ void MR_PopulateDrvRaidMap(struct megasas_instance *instance)
+ 		fw_map_old = (struct MR_FW_RAID_MAP_ALL *)
+ 			fusion->ld_map[(instance->map_id & 1)];
+ 		pFwRaidMap = &fw_map_old->raidMap;
++		ld_count = (u16)le32_to_cpu(pFwRaidMap->ldCount);
+ 
+ #if VD_EXT_DEBUG
+-		for (i = 0; i < le16_to_cpu(pFwRaidMap->ldCount); i++) {
++		for (i = 0; i < ld_count; i++) {
+ 			dev_dbg(&instance->pdev->dev, "(%d) :Index 0x%x "
+ 				"Target Id 0x%x Seq Num 0x%x Size 0/%llx\n",
+ 				instance->unique_id, i,
+@@ -205,12 +207,15 @@ void MR_PopulateDrvRaidMap(struct megasas_instance *instance)
+ 
+ 		memset(drv_map, 0, fusion->drv_map_sz);
+ 		pDrvRaidMap->totalSize = pFwRaidMap->totalSize;
+-		pDrvRaidMap->ldCount = (__le16)pFwRaidMap->ldCount;
++		pDrvRaidMap->ldCount = (__le16)cpu_to_le16(ld_count);
+ 		pDrvRaidMap->fpPdIoTimeoutSec = pFwRaidMap->fpPdIoTimeoutSec;
+ 		for (i = 0; i < MAX_RAIDMAP_LOGICAL_DRIVES + MAX_RAIDMAP_VIEWS; i++)
+ 			pDrvRaidMap->ldTgtIdToLd[i] =
+ 				(u8)pFwRaidMap->ldTgtIdToLd[i];
+-		for (i = 0; i < le16_to_cpu(pDrvRaidMap->ldCount); i++) {
++		for (i = (MAX_RAIDMAP_LOGICAL_DRIVES + MAX_RAIDMAP_VIEWS);
++			i < MAX_LOGICAL_DRIVES_EXT; i++)
++			pDrvRaidMap->ldTgtIdToLd[i] = 0xff;
++		for (i = 0; i < ld_count; i++) {
+ 			pDrvRaidMap->ldSpanMap[i] = pFwRaidMap->ldSpanMap[i];
+ #if VD_EXT_DEBUG
+ 			dev_dbg(&instance->pdev->dev,
+@@ -252,7 +257,7 @@ u8 MR_ValidateMapInfo(struct megasas_instance *instance)
+ 	struct LD_LOAD_BALANCE_INFO *lbInfo;
+ 	PLD_SPAN_INFO ldSpanInfo;
+ 	struct MR_LD_RAID         *raid;
+-	int ldCount, num_lds;
++	u16 ldCount, num_lds;
+ 	u16 ld;
+ 	u32 expected_size;
+ 
+@@ -356,7 +361,7 @@ static int getSpanInfo(struct MR_DRV_RAID_MAP_ALL *map,
+ 
+ 	for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES_EXT; ldCount++) {
+ 		ld = MR_TargetIdToLdGet(ldCount, map);
+-			if (ld >= MAX_LOGICAL_DRIVES_EXT)
++			if (ld >= (MAX_LOGICAL_DRIVES_EXT - 1))
+ 				continue;
+ 		raid = MR_LdRaidGet(ld, map);
+ 		dev_dbg(&instance->pdev->dev, "LD %x: span_depth=%x\n",
+@@ -1157,7 +1162,7 @@ void mr_update_span_set(struct MR_DRV_RAID_MAP_ALL *map,
+ 
+ 	for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES_EXT; ldCount++) {
+ 		ld = MR_TargetIdToLdGet(ldCount, map);
+-		if (ld >= MAX_LOGICAL_DRIVES_EXT)
++		if (ld >= (MAX_LOGICAL_DRIVES_EXT - 1))
+ 			continue;
+ 		raid = MR_LdRaidGet(ld, map);
+ 		for (element = 0; element < MAX_QUAD_DEPTH; element++) {
+diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
+index 71557f64bb5e..0764d20efb26 100644
+--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
++++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
+@@ -103,6 +103,8 @@ megasas_enable_intr_fusion(struct megasas_instance *instance)
+ {
+ 	struct megasas_register_set __iomem *regs;
+ 	regs = instance->reg_set;
++
++	instance->mask_interrupts = 0;
+ 	/* For Thunderbolt/Invader also clear intr on enable */
+ 	writel(~0, &regs->outbound_intr_status);
+ 	readl(&regs->outbound_intr_status);
+@@ -111,7 +113,6 @@ megasas_enable_intr_fusion(struct megasas_instance *instance)
+ 
+ 	/* Dummy readl to force pci flush */
+ 	readl(&regs->outbound_intr_mask);
+-	instance->mask_interrupts = 0;
+ }
+ 
+ /**
+@@ -698,12 +699,11 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
+ 		cpu_to_le32(lower_32_bits(ioc_init_handle));
+ 	init_frame->data_xfer_len = cpu_to_le32(sizeof(struct MPI2_IOC_INIT_REQUEST));
+ 
+-	req_desc.Words = 0;
++	req_desc.u.low = cpu_to_le32(lower_32_bits(cmd->frame_phys_addr));
++	req_desc.u.high = cpu_to_le32(upper_32_bits(cmd->frame_phys_addr));
+ 	req_desc.MFAIo.RequestFlags =
+ 		(MEGASAS_REQ_DESCRIPT_FLAGS_MFA <<
+-		 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
+-	cpu_to_le32s((u32 *)&req_desc.MFAIo);
+-	req_desc.Words |= cpu_to_le64(cmd->frame_phys_addr);
++		MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
+ 
+ 	/*
+ 	 * disable the intr before firing the init frame
+@@ -1717,9 +1717,19 @@ megasas_build_dcdb_fusion(struct megasas_instance *instance,
+ 		if (scmd->device->channel < MEGASAS_MAX_PD_CHANNELS)
+ 			goto NonFastPath;
+ 
++		/*
++		 * For older firmware, Driver should not access ldTgtIdToLd
++		 * beyond index 127 and for Extended VD firmware, ldTgtIdToLd
++		 * should not go beyond 255.
++		 */
++
++		if ((!fusion->fast_path_io) ||
++			(device_id >= instance->fw_supported_vd_count))
++			goto NonFastPath;
++
+ 		ld = MR_TargetIdToLdGet(device_id, local_map_ptr);
+-		if ((ld >= instance->fw_supported_vd_count) ||
+-			(!fusion->fast_path_io))
++
++		if (ld >= instance->fw_supported_vd_count)
+ 			goto NonFastPath;
+ 
+ 		raid = MR_LdRaidGet(ld, local_map_ptr);
+@@ -2612,7 +2622,6 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout)
+ 				instance->host->host_no);
+ 			megaraid_sas_kill_hba(instance);
+ 			instance->skip_heartbeat_timer_del = 1;
+-			instance->adprecovery = MEGASAS_HW_CRITICAL_ERROR;
+ 			retval = FAILED;
+ 			goto out;
+ 		}
+@@ -2808,8 +2817,6 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout)
+ 				dev_info(&instance->pdev->dev,
+ 					"Failed from %s %d\n",
+ 					__func__, __LINE__);
+-				instance->adprecovery =
+-					MEGASAS_HW_CRITICAL_ERROR;
+ 				megaraid_sas_kill_hba(instance);
+ 				retval = FAILED;
+ 			}
+@@ -2858,7 +2865,6 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout)
+ 		       "adapter scsi%d.\n", instance->host->host_no);
+ 		megaraid_sas_kill_hba(instance);
+ 		instance->skip_heartbeat_timer_del = 1;
+-		instance->adprecovery = MEGASAS_HW_CRITICAL_ERROR;
+ 		retval = FAILED;
+ 	} else {
+ 		/* For VF: Restart HB timer if we didn't OCR */
+diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.h b/drivers/scsi/megaraid/megaraid_sas_fusion.h
+index 5ab7daee11be..56e6db2d5874 100644
+--- a/drivers/scsi/megaraid/megaraid_sas_fusion.h
++++ b/drivers/scsi/megaraid/megaraid_sas_fusion.h
+@@ -306,14 +306,9 @@ struct MPI2_RAID_SCSI_IO_REQUEST {
+  * MPT RAID MFA IO Descriptor.
+  */
+ struct MEGASAS_RAID_MFA_IO_REQUEST_DESCRIPTOR {
+-#if   defined(__BIG_ENDIAN_BITFIELD)
+-	u32     MessageAddress1:24; /* bits 31:8*/
+-	u32     RequestFlags:8;
+-#else
+ 	u32     RequestFlags:8;
+-	u32     MessageAddress1:24; /* bits 31:8*/
+-#endif
+-	u32     MessageAddress2;      /* bits 61:32 */
++	u32     MessageAddress1:24;
++	u32     MessageAddress2;
+ };
+ 
+ /* Default Request Descriptor */
+diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
+index b14f64cb9724..763bffe23517 100644
+--- a/drivers/scsi/sg.c
++++ b/drivers/scsi/sg.c
+@@ -1350,6 +1350,17 @@ sg_rq_end_io(struct request *rq, int uptodate)
+ 	}
+ 	/* Rely on write phase to clean out srp status values, so no "else" */
+ 
++	/*
++	 * Free the request as soon as it is complete so that its resources
++	 * can be reused without waiting for userspace to read() the
++	 * result.  But keep the associated bio (if any) around until
++	 * blk_rq_unmap_user() can be called from user context.
++	 */
++	srp->rq = NULL;
++	if (rq->cmd != rq->__cmd)
++		kfree(rq->cmd);
++	__blk_put_request(rq->q, rq);
++
+ 	write_lock_irqsave(&sfp->rq_list_lock, iflags);
+ 	if (unlikely(srp->orphan)) {
+ 		if (sfp->keep_orphan)
+@@ -1684,7 +1695,22 @@ sg_start_req(Sg_request *srp, unsigned char *cmd)
+ 			return -ENOMEM;
+ 	}
+ 
+-	rq = blk_get_request(q, rw, GFP_ATOMIC);
++	/*
++	 * NOTE
++	 *
++	 * With scsi-mq enabled, there are a fixed number of preallocated
++	 * requests equal in number to shost->can_queue.  If all of the
++	 * preallocated requests are already in use, then using GFP_ATOMIC with
++	 * blk_get_request() will return -EWOULDBLOCK, whereas using GFP_KERNEL
++	 * will cause blk_get_request() to sleep until an active command
++	 * completes, freeing up a request.  Neither option is ideal, but
++	 * GFP_KERNEL is the better choice to prevent userspace from getting an
++	 * unexpected EWOULDBLOCK.
++	 *
++	 * With scsi-mq disabled, blk_get_request() with GFP_KERNEL usually
++	 * does not sleep except under memory pressure.
++	 */
++	rq = blk_get_request(q, rw, GFP_KERNEL);
+ 	if (IS_ERR(rq)) {
+ 		kfree(long_cmdp);
+ 		return PTR_ERR(rq);
+@@ -1777,10 +1803,10 @@ sg_finish_rem_req(Sg_request *srp)
+ 	SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp,
+ 				      "sg_finish_rem_req: res_used=%d\n",
+ 				      (int) srp->res_used));
+-	if (srp->rq) {
+-		if (srp->bio)
+-			ret = blk_rq_unmap_user(srp->bio);
++	if (srp->bio)
++		ret = blk_rq_unmap_user(srp->bio);
+ 
++	if (srp->rq) {
+ 		if (srp->rq->cmd != srp->rq->__cmd)
+ 			kfree(srp->rq->cmd);
+ 		blk_put_request(srp->rq);
+diff --git a/drivers/target/iscsi/iscsi_target_tq.c b/drivers/target/iscsi/iscsi_target_tq.c
+index 601e9cc61e98..bb2890e79ca0 100644
+--- a/drivers/target/iscsi/iscsi_target_tq.c
++++ b/drivers/target/iscsi/iscsi_target_tq.c
+@@ -24,36 +24,22 @@
+ #include "iscsi_target_tq.h"
+ #include "iscsi_target.h"
+ 
+-static LIST_HEAD(active_ts_list);
+ static LIST_HEAD(inactive_ts_list);
+-static DEFINE_SPINLOCK(active_ts_lock);
+ static DEFINE_SPINLOCK(inactive_ts_lock);
+ static DEFINE_SPINLOCK(ts_bitmap_lock);
+ 
+-static void iscsi_add_ts_to_active_list(struct iscsi_thread_set *ts)
+-{
+-	spin_lock(&active_ts_lock);
+-	list_add_tail(&ts->ts_list, &active_ts_list);
+-	iscsit_global->active_ts++;
+-	spin_unlock(&active_ts_lock);
+-}
+-
+ static void iscsi_add_ts_to_inactive_list(struct iscsi_thread_set *ts)
+ {
++	if (!list_empty(&ts->ts_list)) {
++		WARN_ON(1);
++		return;
++	}
+ 	spin_lock(&inactive_ts_lock);
+ 	list_add_tail(&ts->ts_list, &inactive_ts_list);
+ 	iscsit_global->inactive_ts++;
+ 	spin_unlock(&inactive_ts_lock);
+ }
+ 
+-static void iscsi_del_ts_from_active_list(struct iscsi_thread_set *ts)
+-{
+-	spin_lock(&active_ts_lock);
+-	list_del(&ts->ts_list);
+-	iscsit_global->active_ts--;
+-	spin_unlock(&active_ts_lock);
+-}
+-
+ static struct iscsi_thread_set *iscsi_get_ts_from_inactive_list(void)
+ {
+ 	struct iscsi_thread_set *ts;
+@@ -66,7 +52,7 @@ static struct iscsi_thread_set *iscsi_get_ts_from_inactive_list(void)
+ 
+ 	ts = list_first_entry(&inactive_ts_list, struct iscsi_thread_set, ts_list);
+ 
+-	list_del(&ts->ts_list);
++	list_del_init(&ts->ts_list);
+ 	iscsit_global->inactive_ts--;
+ 	spin_unlock(&inactive_ts_lock);
+ 
+@@ -204,8 +190,6 @@ static void iscsi_deallocate_extra_thread_sets(void)
+ 
+ void iscsi_activate_thread_set(struct iscsi_conn *conn, struct iscsi_thread_set *ts)
+ {
+-	iscsi_add_ts_to_active_list(ts);
+-
+ 	spin_lock_bh(&ts->ts_state_lock);
+ 	conn->thread_set = ts;
+ 	ts->conn = conn;
+@@ -397,7 +381,6 @@ struct iscsi_conn *iscsi_rx_thread_pre_handler(struct iscsi_thread_set *ts)
+ 
+ 	if (ts->delay_inactive && (--ts->thread_count == 0)) {
+ 		spin_unlock_bh(&ts->ts_state_lock);
+-		iscsi_del_ts_from_active_list(ts);
+ 
+ 		if (!iscsit_global->in_shutdown)
+ 			iscsi_deallocate_extra_thread_sets();
+@@ -452,7 +435,6 @@ struct iscsi_conn *iscsi_tx_thread_pre_handler(struct iscsi_thread_set *ts)
+ 
+ 	if (ts->delay_inactive && (--ts->thread_count == 0)) {
+ 		spin_unlock_bh(&ts->ts_state_lock);
+-		iscsi_del_ts_from_active_list(ts);
+ 
+ 		if (!iscsit_global->in_shutdown)
+ 			iscsi_deallocate_extra_thread_sets();
+diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
+index a9d256d6e909..6e1f1505f04e 100644
+--- a/drivers/tty/pty.c
++++ b/drivers/tty/pty.c
+@@ -210,6 +210,9 @@ static int pty_signal(struct tty_struct *tty, int sig)
+ {
+ 	struct pid *pgrp;
+ 
++	if (sig != SIGINT && sig != SIGQUIT && sig != SIGTSTP)
++		return -EINVAL;
++
+ 	if (tty->link) {
+ 		pgrp = tty_get_pgrp(tty->link);
+ 		if (pgrp)
+diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
+index 4d848a29e223..92a8b263735a 100644
+--- a/drivers/tty/serial/atmel_serial.c
++++ b/drivers/tty/serial/atmel_serial.c
+@@ -794,7 +794,7 @@ static void atmel_tx_dma(struct uart_port *port)
+ 			return;
+ 		}
+ 
+-		dma_sync_sg_for_device(port->dev, sg, 1, DMA_MEM_TO_DEV);
++		dma_sync_sg_for_device(port->dev, sg, 1, DMA_TO_DEVICE);
+ 
+ 		atmel_port->desc_tx = desc;
+ 		desc->callback = atmel_complete_tx_dma;
+@@ -927,7 +927,7 @@ static void atmel_rx_from_dma(struct uart_port *port)
+ 	dma_sync_sg_for_cpu(port->dev,
+ 			    &atmel_port->sg_rx,
+ 			    1,
+-			    DMA_DEV_TO_MEM);
++			    DMA_FROM_DEVICE);
+ 
+ 	/*
+ 	 * ring->head points to the end of data already written by the DMA.
+@@ -974,7 +974,7 @@ static void atmel_rx_from_dma(struct uart_port *port)
+ 	dma_sync_sg_for_device(port->dev,
+ 			       &atmel_port->sg_rx,
+ 			       1,
+-			       DMA_DEV_TO_MEM);
++			       DMA_FROM_DEVICE);
+ 
+ 	/*
+ 	 * Drop the lock here since it might end up calling
+@@ -2565,7 +2565,7 @@ static int atmel_serial_probe(struct platform_device *pdev)
+ 
+ 	ret = atmel_init_port(port, pdev);
+ 	if (ret)
+-		goto err;
++		goto err_clear_bit;
+ 
+ 	if (!atmel_use_pdc_rx(&port->uart)) {
+ 		ret = -ENOMEM;
+@@ -2596,6 +2596,12 @@ static int atmel_serial_probe(struct platform_device *pdev)
+ 	device_init_wakeup(&pdev->dev, 1);
+ 	platform_set_drvdata(pdev, port);
+ 
++	/*
++	 * The peripheral clock has been disabled by atmel_init_port():
++	 * enable it before accessing I/O registers
++	 */
++	clk_prepare_enable(port->clk);
++
+ 	if (rs485_enabled) {
+ 		UART_PUT_MR(&port->uart, ATMEL_US_USMODE_NORMAL);
+ 		UART_PUT_CR(&port->uart, ATMEL_US_RTSEN);
+@@ -2606,6 +2612,12 @@ static int atmel_serial_probe(struct platform_device *pdev)
+ 	 */
+ 	atmel_get_ip_name(&port->uart);
+ 
++	/*
++	 * The peripheral clock can now safely be disabled till the port
++	 * is used
++	 */
++	clk_disable_unprepare(port->clk);
++
+ 	return 0;
+ 
+ err_add_port:
+@@ -2616,6 +2628,8 @@ err_alloc_ring:
+ 		clk_put(port->clk);
+ 		port->clk = NULL;
+ 	}
++err_clear_bit:
++	clear_bit(port->uart.line, atmel_ports_in_use);
+ err:
+ 	return ret;
+ }
+diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
+index e7cde3a9566d..e95c4971327b 100644
+--- a/drivers/tty/serial/fsl_lpuart.c
++++ b/drivers/tty/serial/fsl_lpuart.c
+@@ -506,9 +506,6 @@ static inline void lpuart_prepare_rx(struct lpuart_port *sport)
+ 
+ 	spin_lock_irqsave(&sport->port.lock, flags);
+ 
+-	init_timer(&sport->lpuart_timer);
+-	sport->lpuart_timer.function = lpuart_timer_func;
+-	sport->lpuart_timer.data = (unsigned long)sport;
+ 	sport->lpuart_timer.expires = jiffies + sport->dma_rx_timeout;
+ 	add_timer(&sport->lpuart_timer);
+ 
+@@ -758,18 +755,18 @@ out:
+ static irqreturn_t lpuart_int(int irq, void *dev_id)
+ {
+ 	struct lpuart_port *sport = dev_id;
+-	unsigned char sts;
++	unsigned char sts, crdma;
+ 
+ 	sts = readb(sport->port.membase + UARTSR1);
++	crdma = readb(sport->port.membase + UARTCR5);
+ 
+-	if (sts & UARTSR1_RDRF) {
++	if (sts & UARTSR1_RDRF && !(crdma & UARTCR5_RDMAS)) {
+ 		if (sport->lpuart_dma_use)
+ 			lpuart_prepare_rx(sport);
+ 		else
+ 			lpuart_rxint(irq, dev_id);
+ 	}
+-	if (sts & UARTSR1_TDRE &&
+-		!(readb(sport->port.membase + UARTCR5) & UARTCR5_TDMAS)) {
++	if (sts & UARTSR1_TDRE && !(crdma & UARTCR5_TDMAS)) {
+ 		if (sport->lpuart_dma_use)
+ 			lpuart_pio_tx(sport);
+ 		else
+@@ -1106,7 +1103,10 @@ static int lpuart_startup(struct uart_port *port)
+ 		sport->lpuart_dma_use = false;
+ 	} else {
+ 		sport->lpuart_dma_use = true;
++		setup_timer(&sport->lpuart_timer, lpuart_timer_func,
++			    (unsigned long)sport);
+ 		temp = readb(port->membase + UARTCR5);
++		temp &= ~UARTCR5_RDMAS;
+ 		writeb(temp | UARTCR5_TDMAS, port->membase + UARTCR5);
+ 	}
+ 
+@@ -1180,6 +1180,8 @@ static void lpuart_shutdown(struct uart_port *port)
+ 	devm_free_irq(port->dev, port->irq, sport);
+ 
+ 	if (sport->lpuart_dma_use) {
++		del_timer_sync(&sport->lpuart_timer);
++
+ 		lpuart_dma_tx_free(port);
+ 		lpuart_dma_rx_free(port);
+ 	}
+diff --git a/drivers/tty/tty_mutex.c b/drivers/tty/tty_mutex.c
+index 4486741190c4..a872389dc0bc 100644
+--- a/drivers/tty/tty_mutex.c
++++ b/drivers/tty/tty_mutex.c
+@@ -46,12 +46,8 @@ EXPORT_SYMBOL(tty_unlock);
+ 
+ void __lockfunc tty_lock_slave(struct tty_struct *tty)
+ {
+-	if (tty && tty != tty->link) {
+-		WARN_ON(!mutex_is_locked(&tty->link->legacy_mutex) ||
+-			!tty->driver->type == TTY_DRIVER_TYPE_PTY ||
+-			!tty->driver->type == PTY_TYPE_SLAVE);
++	if (tty && tty != tty->link)
+ 		tty_lock(tty);
+-	}
+ }
+ 
+ void __lockfunc tty_unlock_slave(struct tty_struct *tty)
+diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
+index f3fbbbca9bde..42d8f178fe61 100644
+--- a/drivers/tty/vt/vt.c
++++ b/drivers/tty/vt/vt.c
+@@ -500,6 +500,7 @@ void invert_screen(struct vc_data *vc, int offset, int count, int viewed)
+ #endif
+ 	if (DO_UPDATE(vc))
+ 		do_update_region(vc, (unsigned long) p, count);
++	notify_update(vc);
+ }
+ 
+ /* used by selection: complement pointer position */
+@@ -516,6 +517,7 @@ void complement_pos(struct vc_data *vc, int offset)
+ 		scr_writew(old, screenpos(vc, old_offset, 1));
+ 		if (DO_UPDATE(vc))
+ 			vc->vc_sw->con_putc(vc, old, oldy, oldx);
++		notify_update(vc);
+ 	}
+ 
+ 	old_offset = offset;
+@@ -533,8 +535,8 @@ void complement_pos(struct vc_data *vc, int offset)
+ 			oldy = (offset >> 1) / vc->vc_cols;
+ 			vc->vc_sw->con_putc(vc, new, oldy, oldx);
+ 		}
++		notify_update(vc);
+ 	}
+-
+ }
+ 
+ static void insert_char(struct vc_data *vc, unsigned int nr)
+diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
+index 546a17e8ad5b..a417b738824f 100644
+--- a/drivers/usb/class/cdc-acm.c
++++ b/drivers/usb/class/cdc-acm.c
+@@ -1091,6 +1091,7 @@ static int acm_probe(struct usb_interface *intf,
+ 	unsigned long quirks;
+ 	int num_rx_buf;
+ 	int i;
++	unsigned int elength = 0;
+ 	int combined_interfaces = 0;
+ 	struct device *tty_dev;
+ 	int rv = -ENOMEM;
+@@ -1136,9 +1137,12 @@ static int acm_probe(struct usb_interface *intf,
+ 			dev_err(&intf->dev, "skipping garbage\n");
+ 			goto next_desc;
+ 		}
++		elength = buffer[0];
+ 
+ 		switch (buffer[2]) {
+ 		case USB_CDC_UNION_TYPE: /* we've found it */
++			if (elength < sizeof(struct usb_cdc_union_desc))
++				goto next_desc;
+ 			if (union_header) {
+ 				dev_err(&intf->dev, "More than one "
+ 					"union descriptor, skipping ...\n");
+@@ -1147,29 +1151,36 @@ static int acm_probe(struct usb_interface *intf,
+ 			union_header = (struct usb_cdc_union_desc *)buffer;
+ 			break;
+ 		case USB_CDC_COUNTRY_TYPE: /* export through sysfs*/
++			if (elength < sizeof(struct usb_cdc_country_functional_desc))
++				goto next_desc;
+ 			cfd = (struct usb_cdc_country_functional_desc *)buffer;
+ 			break;
+ 		case USB_CDC_HEADER_TYPE: /* maybe check version */
+ 			break; /* for now we ignore it */
+ 		case USB_CDC_ACM_TYPE:
++			if (elength < 4)
++				goto next_desc;
+ 			ac_management_function = buffer[3];
+ 			break;
+ 		case USB_CDC_CALL_MANAGEMENT_TYPE:
++			if (elength < 5)
++				goto next_desc;
+ 			call_management_function = buffer[3];
+ 			call_interface_num = buffer[4];
+ 			break;
+ 		default:
+-			/* there are LOTS more CDC descriptors that
++			/*
++			 * there are LOTS more CDC descriptors that
+ 			 * could legitimately be found here.
+ 			 */
+ 			dev_dbg(&intf->dev, "Ignoring descriptor: "
+-					"type %02x, length %d\n",
+-					buffer[2], buffer[0]);
++					"type %02x, length %ud\n",
++					buffer[2], elength);
+ 			break;
+ 		}
+ next_desc:
+-		buflen -= buffer[0];
+-		buffer += buffer[0];
++		buflen -= elength;
++		buffer += elength;
+ 	}
+ 
+ 	if (!union_header) {
+diff --git a/drivers/usb/core/buffer.c b/drivers/usb/core/buffer.c
+index 684ef70dc09d..506b969ea7fd 100644
+--- a/drivers/usb/core/buffer.c
++++ b/drivers/usb/core/buffer.c
+@@ -22,17 +22,25 @@
+  */
+ 
+ /* FIXME tune these based on pool statistics ... */
+-static const size_t	pool_max[HCD_BUFFER_POOLS] = {
+-	/* platforms without dma-friendly caches might need to
+-	 * prevent cacheline sharing...
+-	 */
+-	32,
+-	128,
+-	512,
+-	PAGE_SIZE / 2
+-	/* bigger --> allocate pages */
++static size_t pool_max[HCD_BUFFER_POOLS] = {
++	32, 128, 512, 2048,
+ };
+ 
++void __init usb_init_pool_max(void)
++{
++	/*
++	 * The pool_max values must never be smaller than
++	 * ARCH_KMALLOC_MINALIGN.
++	 */
++	if (ARCH_KMALLOC_MINALIGN <= 32)
++		;			/* Original value is okay */
++	else if (ARCH_KMALLOC_MINALIGN <= 64)
++		pool_max[0] = 64;
++	else if (ARCH_KMALLOC_MINALIGN <= 128)
++		pool_max[0] = 0;	/* Don't use this pool */
++	else
++		BUILD_BUG();		/* We don't allow this */
++}
+ 
+ /* SETUP primitives */
+ 
+diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
+index 874dec31a111..818369afff63 100644
+--- a/drivers/usb/core/driver.c
++++ b/drivers/usb/core/driver.c
+@@ -275,21 +275,6 @@ static int usb_unbind_device(struct device *dev)
+ 	return 0;
+ }
+ 
+-/*
+- * Cancel any pending scheduled resets
+- *
+- * [see usb_queue_reset_device()]
+- *
+- * Called after unconfiguring / when releasing interfaces. See
+- * comments in __usb_queue_reset_device() regarding
+- * udev->reset_running.
+- */
+-static void usb_cancel_queued_reset(struct usb_interface *iface)
+-{
+-	if (iface->reset_running == 0)
+-		cancel_work_sync(&iface->reset_ws);
+-}
+-
+ /* called from driver core with dev locked */
+ static int usb_probe_interface(struct device *dev)
+ {
+@@ -380,7 +365,6 @@ static int usb_probe_interface(struct device *dev)
+ 	usb_set_intfdata(intf, NULL);
+ 	intf->needs_remote_wakeup = 0;
+ 	intf->condition = USB_INTERFACE_UNBOUND;
+-	usb_cancel_queued_reset(intf);
+ 
+ 	/* If the LPM disable succeeded, balance the ref counts. */
+ 	if (!lpm_disable_error)
+@@ -425,7 +409,6 @@ static int usb_unbind_interface(struct device *dev)
+ 		usb_disable_interface(udev, intf, false);
+ 
+ 	driver->disconnect(intf);
+-	usb_cancel_queued_reset(intf);
+ 
+ 	/* Free streams */
+ 	for (i = 0, j = 0; i < intf->cur_altsetting->desc.bNumEndpoints; i++) {
+@@ -1797,6 +1780,18 @@ static int autosuspend_check(struct usb_device *udev)
+ 		dev_dbg(&udev->dev, "remote wakeup needed for autosuspend\n");
+ 		return -EOPNOTSUPP;
+ 	}
++
++	/*
++	 * If the device is a direct child of the root hub and the HCD
++	 * doesn't handle wakeup requests, don't allow autosuspend when
++	 * wakeup is needed.
++	 */
++	if (w && udev->parent == udev->bus->root_hub &&
++			bus_to_hcd(udev->bus)->cant_recv_wakeups) {
++		dev_dbg(&udev->dev, "HCD doesn't handle wakeup requests\n");
++		return -EOPNOTSUPP;
++	}
++
+ 	udev->do_remote_wakeup = w;
+ 	return 0;
+ }
+diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
+index 11cee55ae397..45a915ccd71c 100644
+--- a/drivers/usb/core/hcd.c
++++ b/drivers/usb/core/hcd.c
+@@ -1618,6 +1618,7 @@ static int unlink1(struct usb_hcd *hcd, struct urb *urb, int status)
+ int usb_hcd_unlink_urb (struct urb *urb, int status)
+ {
+ 	struct usb_hcd		*hcd;
++	struct usb_device	*udev = urb->dev;
+ 	int			retval = -EIDRM;
+ 	unsigned long		flags;
+ 
+@@ -1629,20 +1630,19 @@ int usb_hcd_unlink_urb (struct urb *urb, int status)
+ 	spin_lock_irqsave(&hcd_urb_unlink_lock, flags);
+ 	if (atomic_read(&urb->use_count) > 0) {
+ 		retval = 0;
+-		usb_get_dev(urb->dev);
++		usb_get_dev(udev);
+ 	}
+ 	spin_unlock_irqrestore(&hcd_urb_unlink_lock, flags);
+ 	if (retval == 0) {
+ 		hcd = bus_to_hcd(urb->dev->bus);
+ 		retval = unlink1(hcd, urb, status);
+-		usb_put_dev(urb->dev);
++		if (retval == 0)
++			retval = -EINPROGRESS;
++		else if (retval != -EIDRM && retval != -EBUSY)
++			dev_dbg(&udev->dev, "hcd_unlink_urb %p fail %d\n",
++					urb, retval);
++		usb_put_dev(udev);
+ 	}
+-
+-	if (retval == 0)
+-		retval = -EINPROGRESS;
+-	else if (retval != -EIDRM && retval != -EBUSY)
+-		dev_dbg(&urb->dev->dev, "hcd_unlink_urb %p fail %d\n",
+-				urb, retval);
+ 	return retval;
+ }
+ 
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index aeb50bb6ba9c..b4bfa3ac4b12 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -5589,26 +5589,19 @@ EXPORT_SYMBOL_GPL(usb_reset_device);
+  *   possible; depending on how the driver attached to each interface
+  *   handles ->pre_reset(), the second reset might happen or not.
+  *
+- * - If a driver is unbound and it had a pending reset, the reset will
+- *   be cancelled.
++ * - If the reset is delayed so long that the interface is unbound from
++ *   its driver, the reset will be skipped.
+  *
+- * - This function can be called during .probe() or .disconnect()
+- *   times. On return from .disconnect(), any pending resets will be
+- *   cancelled.
+- *
+- * There is no no need to lock/unlock the @reset_ws as schedule_work()
+- * does its own.
+- *
+- * NOTE: We don't do any reference count tracking because it is not
+- *     needed. The lifecycle of the work_struct is tied to the
+- *     usb_interface. Before destroying the interface we cancel the
+- *     work_struct, so the fact that work_struct is queued and or
+- *     running means the interface (and thus, the device) exist and
+- *     are referenced.
++ * - This function can be called during .probe().  It can also be called
++ *   during .disconnect(), but doing so is pointless because the reset
++ *   will not occur.  If you really want to reset the device during
++ *   .disconnect(), call usb_reset_device() directly -- but watch out
++ *   for nested unbinding issues!
+  */
+ void usb_queue_reset_device(struct usb_interface *iface)
+ {
+-	schedule_work(&iface->reset_ws);
++	if (schedule_work(&iface->reset_ws))
++		usb_get_intf(iface);
+ }
+ EXPORT_SYMBOL_GPL(usb_queue_reset_device);
+ 
+diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
+index f7b7713cfb2a..f368d2053da5 100644
+--- a/drivers/usb/core/message.c
++++ b/drivers/usb/core/message.c
+@@ -1551,6 +1551,7 @@ static void usb_release_interface(struct device *dev)
+ 			altsetting_to_usb_interface_cache(intf->altsetting);
+ 
+ 	kref_put(&intfc->ref, usb_release_interface_cache);
++	usb_put_dev(interface_to_usbdev(intf));
+ 	kfree(intf);
+ }
+ 
+@@ -1626,24 +1627,6 @@ static struct usb_interface_assoc_descriptor *find_iad(struct usb_device *dev,
+ 
+ /*
+  * Internal function to queue a device reset
+- *
+- * This is initialized into the workstruct in 'struct
+- * usb_device->reset_ws' that is launched by
+- * message.c:usb_set_configuration() when initializing each 'struct
+- * usb_interface'.
+- *
+- * It is safe to get the USB device without reference counts because
+- * the life cycle of @iface is bound to the life cycle of @udev. Then,
+- * this function will be ran only if @iface is alive (and before
+- * freeing it any scheduled instances of it will have been cancelled).
+- *
+- * We need to set a flag (usb_dev->reset_running) because when we call
+- * the reset, the interfaces might be unbound. The current interface
+- * cannot try to remove the queued work as it would cause a deadlock
+- * (you cannot remove your work from within your executing
+- * workqueue). This flag lets it know, so that
+- * usb_cancel_queued_reset() doesn't try to do it.
+- *
+  * See usb_queue_reset_device() for more details
+  */
+ static void __usb_queue_reset_device(struct work_struct *ws)
+@@ -1655,11 +1638,10 @@ static void __usb_queue_reset_device(struct work_struct *ws)
+ 
+ 	rc = usb_lock_device_for_reset(udev, iface);
+ 	if (rc >= 0) {
+-		iface->reset_running = 1;
+ 		usb_reset_device(udev);
+-		iface->reset_running = 0;
+ 		usb_unlock_device(udev);
+ 	}
++	usb_put_intf(iface);	/* Undo _get_ in usb_queue_reset_device() */
+ }
+ 
+ 
+@@ -1854,6 +1836,7 @@ free_interfaces:
+ 		dev_set_name(&intf->dev, "%d-%s:%d.%d",
+ 			dev->bus->busnum, dev->devpath,
+ 			configuration, alt->desc.bInterfaceNumber);
++		usb_get_dev(dev);
+ 	}
+ 	kfree(new_interfaces);
+ 
+diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
+index 2a92b97f0144..b1fb9aef0f5b 100644
+--- a/drivers/usb/core/usb.c
++++ b/drivers/usb/core/usb.c
+@@ -1049,6 +1049,7 @@ static int __init usb_init(void)
+ 		pr_info("%s: USB support disabled\n", usbcore_name);
+ 		return 0;
+ 	}
++	usb_init_pool_max();
+ 
+ 	retval = usb_debugfs_init();
+ 	if (retval)
+diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
+index 8f65ab3a3b92..4efd3bd35586 100644
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -2043,6 +2043,7 @@ static void dwc3_resume_gadget(struct dwc3 *dwc)
+ 	if (dwc->gadget_driver && dwc->gadget_driver->resume) {
+ 		spin_unlock(&dwc->lock);
+ 		dwc->gadget_driver->resume(&dwc->gadget);
++		spin_lock(&dwc->lock);
+ 	}
+ }
+ 
+diff --git a/drivers/usb/host/isp1760-hcd.c b/drivers/usb/host/isp1760-hcd.c
+index 395649f357aa..c83ac8921cb3 100644
+--- a/drivers/usb/host/isp1760-hcd.c
++++ b/drivers/usb/host/isp1760-hcd.c
+@@ -2247,6 +2247,9 @@ struct usb_hcd *isp1760_register(phys_addr_t res_start, resource_size_t res_len,
+ 	hcd->rsrc_start = res_start;
+ 	hcd->rsrc_len = res_len;
+ 
++	/* This driver doesn't support wakeup requests */
++	hcd->cant_recv_wakeups = 1;
++
+ 	ret = usb_add_hcd(hcd, irq, irqflags);
+ 	if (ret)
+ 		goto err_unmap;
+diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
+index f4c56fc1a9f6..f40c856ff758 100644
+--- a/drivers/usb/serial/cp210x.c
++++ b/drivers/usb/serial/cp210x.c
+@@ -56,6 +56,7 @@ static const struct usb_device_id id_table[] = {
+ 	{ USB_DEVICE(0x0846, 0x1100) }, /* NetGear Managed Switch M4100 series, M5300 series, M7100 series */
+ 	{ USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */
+ 	{ USB_DEVICE(0x08FD, 0x000A) }, /* Digianswer A/S , ZigBee/802.15.4 MAC Device */
++	{ USB_DEVICE(0x0908, 0x01FF) }, /* Siemens RUGGEDCOM USB Serial Console */
+ 	{ USB_DEVICE(0x0BED, 0x1100) }, /* MEI (TM) Cashflow-SC Bill/Voucher Acceptor */
+ 	{ USB_DEVICE(0x0BED, 0x1101) }, /* MEI series 2000 Combo Acceptor */
+ 	{ USB_DEVICE(0x0FCF, 0x1003) }, /* Dynastream ANT development board */
+diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
+index f8bb36f9d9ce..bf1940706422 100644
+--- a/drivers/xen/manage.c
++++ b/drivers/xen/manage.c
+@@ -105,10 +105,16 @@ static void do_suspend(void)
+ 
+ 	err = freeze_processes();
+ 	if (err) {
+-		pr_err("%s: freeze failed %d\n", __func__, err);
++		pr_err("%s: freeze processes failed %d\n", __func__, err);
+ 		goto out;
+ 	}
+ 
++	err = freeze_kernel_threads();
++	if (err) {
++		pr_err("%s: freeze kernel threads failed %d\n", __func__, err);
++		goto out_thaw;
++	}
++
+ 	err = dpm_suspend_start(PMSG_FREEZE);
+ 	if (err) {
+ 		pr_err("%s: dpm_suspend_start %d\n", __func__, err);
+diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c
+index e999496eda3e..8e38499df77e 100644
+--- a/drivers/xen/xen-scsiback.c
++++ b/drivers/xen/xen-scsiback.c
+@@ -708,12 +708,11 @@ static int prepare_pending_reqs(struct vscsibk_info *info,
+ static int scsiback_do_cmd_fn(struct vscsibk_info *info)
+ {
+ 	struct vscsiif_back_ring *ring = &info->ring;
+-	struct vscsiif_request *ring_req;
++	struct vscsiif_request ring_req;
+ 	struct vscsibk_pend *pending_req;
+ 	RING_IDX rc, rp;
+ 	int err, more_to_do;
+ 	uint32_t result;
+-	uint8_t act;
+ 
+ 	rc = ring->req_cons;
+ 	rp = ring->sring->req_prod;
+@@ -734,11 +733,10 @@ static int scsiback_do_cmd_fn(struct vscsibk_info *info)
+ 		if (!pending_req)
+ 			return 1;
+ 
+-		ring_req = RING_GET_REQUEST(ring, rc);
++		ring_req = *RING_GET_REQUEST(ring, rc);
+ 		ring->req_cons = ++rc;
+ 
+-		act = ring_req->act;
+-		err = prepare_pending_reqs(info, ring_req, pending_req);
++		err = prepare_pending_reqs(info, &ring_req, pending_req);
+ 		if (err) {
+ 			switch (err) {
+ 			case -ENODEV:
+@@ -754,9 +752,9 @@ static int scsiback_do_cmd_fn(struct vscsibk_info *info)
+ 			return 1;
+ 		}
+ 
+-		switch (act) {
++		switch (ring_req.act) {
+ 		case VSCSIIF_ACT_SCSI_CDB:
+-			if (scsiback_gnttab_data_map(ring_req, pending_req)) {
++			if (scsiback_gnttab_data_map(&ring_req, pending_req)) {
+ 				scsiback_fast_flush_area(pending_req);
+ 				scsiback_do_resp_with_sense(NULL,
+ 					DRIVER_ERROR << 24, 0, pending_req);
+@@ -767,7 +765,7 @@ static int scsiback_do_cmd_fn(struct vscsibk_info *info)
+ 			break;
+ 		case VSCSIIF_ACT_SCSI_ABORT:
+ 			scsiback_device_action(pending_req, TMR_ABORT_TASK,
+-				ring_req->ref_rqid);
++				ring_req.ref_rqid);
+ 			break;
+ 		case VSCSIIF_ACT_SCSI_RESET:
+ 			scsiback_device_action(pending_req, TMR_LUN_RESET, 0);
+diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
+index 02b16910f4c9..995986b8e36b 100644
+--- a/fs/binfmt_elf.c
++++ b/fs/binfmt_elf.c
+@@ -645,11 +645,12 @@ out:
+ 
+ static unsigned long randomize_stack_top(unsigned long stack_top)
+ {
+-	unsigned int random_variable = 0;
++	unsigned long random_variable = 0;
+ 
+ 	if ((current->flags & PF_RANDOMIZE) &&
+ 		!(current->personality & ADDR_NO_RANDOMIZE)) {
+-		random_variable = get_random_int() & STACK_RND_MASK;
++		random_variable = (unsigned long) get_random_int();
++		random_variable &= STACK_RND_MASK;
+ 		random_variable <<= PAGE_SHIFT;
+ 	}
+ #ifdef CONFIG_STACK_GROWSUP
+diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
+index 14a72ed14ef7..f54511dd287e 100644
+--- a/fs/btrfs/ctree.c
++++ b/fs/btrfs/ctree.c
+@@ -2609,32 +2609,23 @@ static int key_search(struct extent_buffer *b, struct btrfs_key *key,
+ 	return 0;
+ }
+ 
+-int btrfs_find_item(struct btrfs_root *fs_root, struct btrfs_path *found_path,
++int btrfs_find_item(struct btrfs_root *fs_root, struct btrfs_path *path,
+ 		u64 iobjectid, u64 ioff, u8 key_type,
+ 		struct btrfs_key *found_key)
+ {
+ 	int ret;
+ 	struct btrfs_key key;
+ 	struct extent_buffer *eb;
+-	struct btrfs_path *path;
++
++	ASSERT(path);
+ 
+ 	key.type = key_type;
+ 	key.objectid = iobjectid;
+ 	key.offset = ioff;
+ 
+-	if (found_path == NULL) {
+-		path = btrfs_alloc_path();
+-		if (!path)
+-			return -ENOMEM;
+-	} else
+-		path = found_path;
+-
+ 	ret = btrfs_search_slot(NULL, fs_root, &key, path, 0, 0);
+-	if ((ret < 0) || (found_key == NULL)) {
+-		if (path != found_path)
+-			btrfs_free_path(path);
++	if ((ret < 0) || (found_key == NULL))
+ 		return ret;
+-	}
+ 
+ 	eb = path->nodes[0];
+ 	if (ret && path->slots[0] >= btrfs_header_nritems(eb)) {
+diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
+index 8c63419a7f70..6f46c9b1f50c 100644
+--- a/fs/btrfs/disk-io.c
++++ b/fs/btrfs/disk-io.c
+@@ -1630,6 +1630,7 @@ struct btrfs_root *btrfs_get_fs_root(struct btrfs_fs_info *fs_info,
+ 				     bool check_ref)
+ {
+ 	struct btrfs_root *root;
++	struct btrfs_path *path;
+ 	int ret;
+ 
+ 	if (location->objectid == BTRFS_ROOT_TREE_OBJECTID)
+@@ -1669,8 +1670,14 @@ again:
+ 	if (ret)
+ 		goto fail;
+ 
+-	ret = btrfs_find_item(fs_info->tree_root, NULL, BTRFS_ORPHAN_OBJECTID,
++	path = btrfs_alloc_path();
++	if (!path) {
++		ret = -ENOMEM;
++		goto fail;
++	}
++	ret = btrfs_find_item(fs_info->tree_root, path, BTRFS_ORPHAN_OBJECTID,
+ 			location->objectid, BTRFS_ORPHAN_ITEM_KEY, NULL);
++	btrfs_free_path(path);
+ 	if (ret < 0)
+ 		goto fail;
+ 	if (ret == 0)
+@@ -2498,7 +2505,7 @@ int open_ctree(struct super_block *sb,
+ 		features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO;
+ 
+ 	if (features & BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA)
+-		printk(KERN_ERR "BTRFS: has skinny extents\n");
++		printk(KERN_INFO "BTRFS: has skinny extents\n");
+ 
+ 	/*
+ 	 * flag our filesystem as having big metadata blocks if
+diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
+index 1a9585d4380a..f78e9dc5d574 100644
+--- a/fs/btrfs/tree-log.c
++++ b/fs/btrfs/tree-log.c
+@@ -488,8 +488,20 @@ insert:
+ 		src_item = (struct btrfs_inode_item *)src_ptr;
+ 		dst_item = (struct btrfs_inode_item *)dst_ptr;
+ 
+-		if (btrfs_inode_generation(eb, src_item) == 0)
++		if (btrfs_inode_generation(eb, src_item) == 0) {
++			struct extent_buffer *dst_eb = path->nodes[0];
++
++			if (S_ISREG(btrfs_inode_mode(eb, src_item)) &&
++			    S_ISREG(btrfs_inode_mode(dst_eb, dst_item))) {
++				struct btrfs_map_token token;
++				u64 ino_size = btrfs_inode_size(eb, src_item);
++
++				btrfs_init_map_token(&token);
++				btrfs_set_token_inode_size(dst_eb, dst_item,
++							   ino_size, &token);
++			}
+ 			goto no_copy;
++		}
+ 
+ 		if (overwrite_root &&
+ 		    S_ISDIR(btrfs_inode_mode(eb, src_item)) &&
+@@ -1257,10 +1269,19 @@ static int insert_orphan_item(struct btrfs_trans_handle *trans,
+ 			      struct btrfs_root *root, u64 offset)
+ {
+ 	int ret;
+-	ret = btrfs_find_item(root, NULL, BTRFS_ORPHAN_OBJECTID,
++	struct btrfs_path *path;
++
++	path = btrfs_alloc_path();
++	if (!path)
++		return -ENOMEM;
++
++	ret = btrfs_find_item(root, path, BTRFS_ORPHAN_OBJECTID,
+ 			offset, BTRFS_ORPHAN_ITEM_KEY, NULL);
+ 	if (ret > 0)
+ 		ret = btrfs_insert_orphan_item(trans, root, offset);
++
++	btrfs_free_path(path);
++
+ 	return ret;
+ }
+ 
+@@ -3219,7 +3240,8 @@ static int drop_objectid_items(struct btrfs_trans_handle *trans,
+ static void fill_inode_item(struct btrfs_trans_handle *trans,
+ 			    struct extent_buffer *leaf,
+ 			    struct btrfs_inode_item *item,
+-			    struct inode *inode, int log_inode_only)
++			    struct inode *inode, int log_inode_only,
++			    u64 logged_isize)
+ {
+ 	struct btrfs_map_token token;
+ 
+@@ -3232,7 +3254,7 @@ static void fill_inode_item(struct btrfs_trans_handle *trans,
+ 		 * to say 'update this inode with these values'
+ 		 */
+ 		btrfs_set_token_inode_generation(leaf, item, 0, &token);
+-		btrfs_set_token_inode_size(leaf, item, 0, &token);
++		btrfs_set_token_inode_size(leaf, item, logged_isize, &token);
+ 	} else {
+ 		btrfs_set_token_inode_generation(leaf, item,
+ 						 BTRFS_I(inode)->generation,
+@@ -3284,7 +3306,7 @@ static int log_inode_item(struct btrfs_trans_handle *trans,
+ 		return ret;
+ 	inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
+ 				    struct btrfs_inode_item);
+-	fill_inode_item(trans, path->nodes[0], inode_item, inode, 0);
++	fill_inode_item(trans, path->nodes[0], inode_item, inode, 0, 0);
+ 	btrfs_release_path(path);
+ 	return 0;
+ }
+@@ -3293,7 +3315,8 @@ static noinline int copy_items(struct btrfs_trans_handle *trans,
+ 			       struct inode *inode,
+ 			       struct btrfs_path *dst_path,
+ 			       struct btrfs_path *src_path, u64 *last_extent,
+-			       int start_slot, int nr, int inode_only)
++			       int start_slot, int nr, int inode_only,
++			       u64 logged_isize)
+ {
+ 	unsigned long src_offset;
+ 	unsigned long dst_offset;
+@@ -3350,7 +3373,8 @@ static noinline int copy_items(struct btrfs_trans_handle *trans,
+ 						    dst_path->slots[0],
+ 						    struct btrfs_inode_item);
+ 			fill_inode_item(trans, dst_path->nodes[0], inode_item,
+-					inode, inode_only == LOG_INODE_EXISTS);
++					inode, inode_only == LOG_INODE_EXISTS,
++					logged_isize);
+ 		} else {
+ 			copy_extent_buffer(dst_path->nodes[0], src, dst_offset,
+ 					   src_offset, ins_sizes[i]);
+@@ -3902,6 +3926,33 @@ process:
+ 	return ret;
+ }
+ 
++static int logged_inode_size(struct btrfs_root *log, struct inode *inode,
++			     struct btrfs_path *path, u64 *size_ret)
++{
++	struct btrfs_key key;
++	int ret;
++
++	key.objectid = btrfs_ino(inode);
++	key.type = BTRFS_INODE_ITEM_KEY;
++	key.offset = 0;
++
++	ret = btrfs_search_slot(NULL, log, &key, path, 0, 0);
++	if (ret < 0) {
++		return ret;
++	} else if (ret > 0) {
++		*size_ret = i_size_read(inode);
++	} else {
++		struct btrfs_inode_item *item;
++
++		item = btrfs_item_ptr(path->nodes[0], path->slots[0],
++				      struct btrfs_inode_item);
++		*size_ret = btrfs_inode_size(path->nodes[0], item);
++	}
++
++	btrfs_release_path(path);
++	return 0;
++}
++
+ /* log a single inode in the tree log.
+  * At least one parent directory for this inode must exist in the tree
+  * or be logged already.
+@@ -3939,6 +3990,7 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
+ 	bool fast_search = false;
+ 	u64 ino = btrfs_ino(inode);
+ 	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
++	u64 logged_isize = 0;
+ 
+ 	path = btrfs_alloc_path();
+ 	if (!path)
+@@ -3992,6 +4044,25 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
+ 			max_key_type = BTRFS_XATTR_ITEM_KEY;
+ 		ret = drop_objectid_items(trans, log, path, ino, max_key_type);
+ 	} else {
++		if (inode_only == LOG_INODE_EXISTS) {
++			/*
++			 * Make sure the new inode item we write to the log has
++			 * the same isize as the current one (if it exists).
++			 * This is necessary to prevent data loss after log
++			 * replay, and also to prevent doing a wrong expanding
++			 * truncate - for e.g. create file, write 4K into offset
++			 * 0, fsync, write 4K into offset 4096, add hard link,
++			 * fsync some other file (to sync log), power fail - if
++			 * we use the inode's current i_size, after log replay
++			 * we get a 8Kb file, with the last 4Kb extent as a hole
++			 * (zeroes), as if an expanding truncate happened,
++			 * instead of getting a file of 4Kb only.
++			 */
++			err = logged_inode_size(log, inode, path,
++						&logged_isize);
++			if (err)
++				goto out_unlock;
++		}
+ 		if (test_and_clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
+ 				       &BTRFS_I(inode)->runtime_flags)) {
+ 			clear_bit(BTRFS_INODE_COPY_EVERYTHING,
+@@ -4047,7 +4118,8 @@ again:
+ 		}
+ 
+ 		ret = copy_items(trans, inode, dst_path, path, &last_extent,
+-				 ins_start_slot, ins_nr, inode_only);
++				 ins_start_slot, ins_nr, inode_only,
++				 logged_isize);
+ 		if (ret < 0) {
+ 			err = ret;
+ 			goto out_unlock;
+@@ -4071,7 +4143,7 @@ next_slot:
+ 		if (ins_nr) {
+ 			ret = copy_items(trans, inode, dst_path, path,
+ 					 &last_extent, ins_start_slot,
+-					 ins_nr, inode_only);
++					 ins_nr, inode_only, logged_isize);
+ 			if (ret < 0) {
+ 				err = ret;
+ 				goto out_unlock;
+@@ -4092,7 +4164,8 @@ next_slot:
+ 	}
+ 	if (ins_nr) {
+ 		ret = copy_items(trans, inode, dst_path, path, &last_extent,
+-				 ins_start_slot, ins_nr, inode_only);
++				 ins_start_slot, ins_nr, inode_only,
++				 logged_isize);
+ 		if (ret < 0) {
+ 			err = ret;
+ 			goto out_unlock;
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index 74c5f53595fb..fc29b2c91bef 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -4864,9 +4864,8 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
+ 	if ((old_opts.s_mount_opt & EXT4_MOUNT_JOURNAL_CHECKSUM) ^
+ 	    test_opt(sb, JOURNAL_CHECKSUM)) {
+ 		ext4_msg(sb, KERN_ERR, "changing journal_checksum "
+-			 "during remount not supported");
+-		err = -EINVAL;
+-		goto restore_opts;
++			 "during remount not supported; ignoring");
++		sbi->s_mount_opt ^= EXT4_MOUNT_JOURNAL_CHECKSUM;
+ 	}
+ 
+ 	if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) {
+diff --git a/fs/jffs2/scan.c b/fs/jffs2/scan.c
+index 7654e87b0428..9ad5ba4b299b 100644
+--- a/fs/jffs2/scan.c
++++ b/fs/jffs2/scan.c
+@@ -510,6 +510,10 @@ static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblo
+ 				sumlen = c->sector_size - je32_to_cpu(sm->offset);
+ 				sumptr = buf + buf_size - sumlen;
+ 
++				/* sm->offset maybe wrong but MAGIC maybe right */
++				if (sumlen > c->sector_size)
++					goto full_scan;
++
+ 				/* Now, make sure the summary itself is available */
+ 				if (sumlen > buf_size) {
+ 					/* Need to kmalloc for this. */
+@@ -544,6 +548,7 @@ static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblo
+ 		}
+ 	}
+ 
++full_scan:
+ 	buf_ofs = jeb->offset;
+ 
+ 	if (!buf_size) {
+diff --git a/fs/lockd/mon.c b/fs/lockd/mon.c
+index 1cc6ec51e6b1..47a32b6d9b90 100644
+--- a/fs/lockd/mon.c
++++ b/fs/lockd/mon.c
+@@ -65,7 +65,7 @@ static inline struct sockaddr *nsm_addr(const struct nsm_handle *nsm)
+ 	return (struct sockaddr *)&nsm->sm_addr;
+ }
+ 
+-static struct rpc_clnt *nsm_create(struct net *net)
++static struct rpc_clnt *nsm_create(struct net *net, const char *nodename)
+ {
+ 	struct sockaddr_in sin = {
+ 		.sin_family		= AF_INET,
+@@ -77,6 +77,7 @@ static struct rpc_clnt *nsm_create(struct net *net)
+ 		.address		= (struct sockaddr *)&sin,
+ 		.addrsize		= sizeof(sin),
+ 		.servername		= "rpc.statd",
++		.nodename		= nodename,
+ 		.program		= &nsm_program,
+ 		.version		= NSM_VERSION,
+ 		.authflavor		= RPC_AUTH_NULL,
+@@ -102,7 +103,7 @@ out:
+ 	return clnt;
+ }
+ 
+-static struct rpc_clnt *nsm_client_get(struct net *net)
++static struct rpc_clnt *nsm_client_get(struct net *net, const char *nodename)
+ {
+ 	struct rpc_clnt	*clnt, *new;
+ 	struct lockd_net *ln = net_generic(net, lockd_net_id);
+@@ -111,7 +112,7 @@ static struct rpc_clnt *nsm_client_get(struct net *net)
+ 	if (clnt != NULL)
+ 		goto out;
+ 
+-	clnt = new = nsm_create(net);
++	clnt = new = nsm_create(net, nodename);
+ 	if (IS_ERR(clnt))
+ 		goto out;
+ 
+@@ -190,19 +191,23 @@ int nsm_monitor(const struct nlm_host *host)
+ 	struct nsm_res	res;
+ 	int		status;
+ 	struct rpc_clnt *clnt;
++	const char *nodename = NULL;
+ 
+ 	dprintk("lockd: nsm_monitor(%s)\n", nsm->sm_name);
+ 
+ 	if (nsm->sm_monitored)
+ 		return 0;
+ 
++	if (host->h_rpcclnt)
++		nodename = host->h_rpcclnt->cl_nodename;
++
+ 	/*
+ 	 * Choose whether to record the caller_name or IP address of
+ 	 * this peer in the local rpc.statd's database.
+ 	 */
+ 	nsm->sm_mon_name = nsm_use_hostnames ? nsm->sm_name : nsm->sm_addrbuf;
+ 
+-	clnt = nsm_client_get(host->net);
++	clnt = nsm_client_get(host->net, nodename);
+ 	if (IS_ERR(clnt)) {
+ 		status = PTR_ERR(clnt);
+ 		dprintk("lockd: failed to create NSM upcall transport, "
+diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c
+index b8fb3a4ef649..351be9205bf8 100644
+--- a/fs/nfs/callback.c
++++ b/fs/nfs/callback.c
+@@ -128,22 +128,24 @@ nfs41_callback_svc(void *vrqstp)
+ 		if (try_to_freeze())
+ 			continue;
+ 
+-		prepare_to_wait(&serv->sv_cb_waitq, &wq, TASK_INTERRUPTIBLE);
++		prepare_to_wait(&serv->sv_cb_waitq, &wq, TASK_UNINTERRUPTIBLE);
+ 		spin_lock_bh(&serv->sv_cb_lock);
+ 		if (!list_empty(&serv->sv_cb_list)) {
+ 			req = list_first_entry(&serv->sv_cb_list,
+ 					struct rpc_rqst, rq_bc_list);
+ 			list_del(&req->rq_bc_list);
+ 			spin_unlock_bh(&serv->sv_cb_lock);
++			finish_wait(&serv->sv_cb_waitq, &wq);
+ 			dprintk("Invoking bc_svc_process()\n");
+ 			error = bc_svc_process(serv, req, rqstp);
+ 			dprintk("bc_svc_process() returned w/ error code= %d\n",
+ 				error);
+ 		} else {
+ 			spin_unlock_bh(&serv->sv_cb_lock);
+-			schedule();
++			/* schedule_timeout to game the hung task watchdog */
++			schedule_timeout(60 * HZ);
++			finish_wait(&serv->sv_cb_waitq, &wq);
+ 		}
+-		finish_wait(&serv->sv_cb_waitq, &wq);
+ 	}
+ 	return 0;
+ }
+diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
+index f4ccfe6521ec..02f8d09e119f 100644
+--- a/fs/nfs/callback_xdr.c
++++ b/fs/nfs/callback_xdr.c
+@@ -464,8 +464,10 @@ static __be32 decode_cb_sequence_args(struct svc_rqst *rqstp,
+ 
+ 		for (i = 0; i < args->csa_nrclists; i++) {
+ 			status = decode_rc_list(xdr, &args->csa_rclists[i]);
+-			if (status)
++			if (status) {
++				args->csa_nrclists = i;
+ 				goto out_free;
++			}
+ 		}
+ 	}
+ 	status = 0;
+diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
+index 294692ff83b1..a094b0c34ac3 100644
+--- a/fs/nfs/direct.c
++++ b/fs/nfs/direct.c
+@@ -242,7 +242,7 @@ static void nfs_direct_release_pages(struct page **pages, unsigned int npages)
+ void nfs_init_cinfo_from_dreq(struct nfs_commit_info *cinfo,
+ 			      struct nfs_direct_req *dreq)
+ {
+-	cinfo->lock = &dreq->lock;
++	cinfo->lock = &dreq->inode->i_lock;
+ 	cinfo->mds = &dreq->mds_cinfo;
+ 	cinfo->ds = &dreq->ds_cinfo;
+ 	cinfo->dreq = dreq;
+diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
+index b6f34bfa6fe8..b84efe4b6247 100644
+--- a/fs/nfs/internal.h
++++ b/fs/nfs/internal.h
+@@ -375,7 +375,7 @@ extern struct rpc_stat nfs_rpcstat;
+ 
+ extern int __init register_nfs_fs(void);
+ extern void __exit unregister_nfs_fs(void);
+-extern void nfs_sb_active(struct super_block *sb);
++extern bool nfs_sb_active(struct super_block *sb);
+ extern void nfs_sb_deactive(struct super_block *sb);
+ 
+ /* namespace.c */
+@@ -493,6 +493,26 @@ extern int nfs41_walk_client_list(struct nfs_client *clp,
+ 				struct nfs_client **result,
+ 				struct rpc_cred *cred);
+ 
++static inline struct inode *nfs_igrab_and_active(struct inode *inode)
++{
++	inode = igrab(inode);
++	if (inode != NULL && !nfs_sb_active(inode->i_sb)) {
++		iput(inode);
++		inode = NULL;
++	}
++	return inode;
++}
++
++static inline void nfs_iput_and_deactive(struct inode *inode)
++{
++	if (inode != NULL) {
++		struct super_block *sb = inode->i_sb;
++
++		iput(inode);
++		nfs_sb_deactive(sb);
++	}
++}
++
+ /*
+  * Determine the device name as a string
+  */
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index c347705b0161..89f6827302de 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -5137,9 +5137,13 @@ static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
+ static void nfs4_delegreturn_release(void *calldata)
+ {
+ 	struct nfs4_delegreturndata *data = calldata;
++	struct inode *inode = data->inode;
+ 
+-	if (data->roc)
+-		pnfs_roc_release(data->inode);
++	if (inode) {
++		if (data->roc)
++			pnfs_roc_release(inode);
++		nfs_iput_and_deactive(inode);
++	}
+ 	kfree(calldata);
+ }
+ 
+@@ -5196,9 +5200,9 @@ static int _nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, co
+ 	nfs_fattr_init(data->res.fattr);
+ 	data->timestamp = jiffies;
+ 	data->rpc_status = 0;
+-	data->inode = inode;
+-	data->roc = list_empty(&NFS_I(inode)->open_files) ?
+-		    pnfs_roc(inode) : false;
++	data->inode = nfs_igrab_and_active(inode);
++	if (data->inode)
++		data->roc = nfs4_roc(inode);
+ 
+ 	task_setup_data.callback_data = data;
+ 	msg.rpc_argp = &data->args;
+diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
+index 0a5dda4d85c2..883ee88e5f5e 100644
+--- a/fs/nfs/pnfs.c
++++ b/fs/nfs/pnfs.c
+@@ -1445,19 +1445,19 @@ pnfs_generic_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *r
+ {
+ 	u64 rd_size = req->wb_bytes;
+ 
+-	WARN_ON_ONCE(pgio->pg_lseg != NULL);
+-
+-	if (pgio->pg_dreq == NULL)
+-		rd_size = i_size_read(pgio->pg_inode) - req_offset(req);
+-	else
+-		rd_size = nfs_dreq_bytes_left(pgio->pg_dreq);
+-
+-	pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
+-					   req->wb_context,
+-					   req_offset(req),
+-					   rd_size,
+-					   IOMODE_READ,
+-					   GFP_KERNEL);
++	if (pgio->pg_lseg == NULL) {
++		if (pgio->pg_dreq == NULL)
++			rd_size = i_size_read(pgio->pg_inode) - req_offset(req);
++		else
++			rd_size = nfs_dreq_bytes_left(pgio->pg_dreq);
++
++		pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
++						   req->wb_context,
++						   req_offset(req),
++						   rd_size,
++						   IOMODE_READ,
++						   GFP_KERNEL);
++	}
+ 	/* If no lseg, fall back to read through mds */
+ 	if (pgio->pg_lseg == NULL)
+ 		nfs_pageio_reset_read_mds(pgio);
+@@ -1469,14 +1469,13 @@ void
+ pnfs_generic_pg_init_write(struct nfs_pageio_descriptor *pgio,
+ 			   struct nfs_page *req, u64 wb_size)
+ {
+-	WARN_ON_ONCE(pgio->pg_lseg != NULL);
+-
+-	pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
+-					   req->wb_context,
+-					   req_offset(req),
+-					   wb_size,
+-					   IOMODE_RW,
+-					   GFP_NOFS);
++	if (pgio->pg_lseg == NULL)
++		pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
++						   req->wb_context,
++						   req_offset(req),
++						   wb_size,
++						   IOMODE_RW,
++						   GFP_NOFS);
+ 	/* If no lseg, fall back to write through mds */
+ 	if (pgio->pg_lseg == NULL)
+ 		nfs_pageio_reset_write_mds(pgio);
+diff --git a/fs/nfs/super.c b/fs/nfs/super.c
+index 31a11b0e885d..368d9395d2e7 100644
+--- a/fs/nfs/super.c
++++ b/fs/nfs/super.c
+@@ -405,12 +405,15 @@ void __exit unregister_nfs_fs(void)
+ 	unregister_filesystem(&nfs_fs_type);
+ }
+ 
+-void nfs_sb_active(struct super_block *sb)
++bool nfs_sb_active(struct super_block *sb)
+ {
+ 	struct nfs_server *server = NFS_SB(sb);
+ 
+-	if (atomic_inc_return(&server->active) == 1)
+-		atomic_inc(&sb->s_active);
++	if (!atomic_inc_not_zero(&sb->s_active))
++		return false;
++	if (atomic_inc_return(&server->active) != 1)
++		atomic_dec(&sb->s_active);
++	return true;
+ }
+ EXPORT_SYMBOL_GPL(nfs_sb_active);
+ 
+diff --git a/fs/ocfs2/quota_local.c b/fs/ocfs2/quota_local.c
+index 10b653930ee2..465223b7592e 100644
+--- a/fs/ocfs2/quota_local.c
++++ b/fs/ocfs2/quota_local.c
+@@ -701,8 +701,8 @@ static int ocfs2_local_read_info(struct super_block *sb, int type)
+ 	/* We don't need the lock and we have to acquire quota file locks
+ 	 * which will later depend on this lock */
+ 	mutex_unlock(&sb_dqopt(sb)->dqio_mutex);
+-	info->dqi_maxblimit = 0x7fffffffffffffffLL;
+-	info->dqi_maxilimit = 0x7fffffffffffffffLL;
++	info->dqi_max_spc_limit = 0x7fffffffffffffffLL;
++	info->dqi_max_ino_limit = 0x7fffffffffffffffLL;
+ 	oinfo = kmalloc(sizeof(struct ocfs2_mem_dqinfo), GFP_NOFS);
+ 	if (!oinfo) {
+ 		mlog(ML_ERROR, "failed to allocate memory for ocfs2 quota"
+diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
+index 246eae84b13b..88f9b8352742 100644
+--- a/fs/proc/task_mmu.c
++++ b/fs/proc/task_mmu.c
+@@ -1069,7 +1069,7 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
+ 	struct vm_area_struct *vma;
+ 	struct pagemapread *pm = walk->private;
+ 	spinlock_t *ptl;
+-	pte_t *pte;
++	pte_t *pte, *orig_pte;
+ 	int err = 0;
+ 
+ 	/* find the first VMA at or above 'addr' */
+@@ -1130,15 +1130,19 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
+ 		BUG_ON(is_vm_hugetlb_page(vma));
+ 
+ 		/* Addresses in the VMA. */
+-		for (; addr < min(end, vma->vm_end); addr += PAGE_SIZE) {
++		orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
++		for (; addr < min(end, vma->vm_end); pte++, addr += PAGE_SIZE) {
+ 			pagemap_entry_t pme;
+-			pte = pte_offset_map(pmd, addr);
++
+ 			pte_to_pagemap_entry(&pme, pm, vma, addr, *pte);
+-			pte_unmap(pte);
+ 			err = add_to_pagemap(addr, &pme, pm);
+ 			if (err)
+-				return err;
++				break;
+ 		}
++		pte_unmap_unlock(orig_pte, ptl);
++
++		if (err)
++			return err;
+ 
+ 		if (addr == end)
+ 			break;
+diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
+index 69df5b239844..ca035a2fce81 100644
+--- a/fs/quota/dquot.c
++++ b/fs/quota/dquot.c
+@@ -2385,16 +2385,6 @@ out:
+ }
+ EXPORT_SYMBOL(dquot_quota_on_mount);
+ 
+-static inline qsize_t qbtos(qsize_t blocks)
+-{
+-	return blocks << QIF_DQBLKSIZE_BITS;
+-}
+-
+-static inline qsize_t stoqb(qsize_t space)
+-{
+-	return (space + QIF_DQBLKSIZE - 1) >> QIF_DQBLKSIZE_BITS;
+-}
+-
+ /* Generic routine for getting common part of quota structure */
+ static void do_get_dqblk(struct dquot *dquot, struct qc_dqblk *di)
+ {
+@@ -2444,13 +2434,13 @@ static int do_set_dqblk(struct dquot *dquot, struct qc_dqblk *di)
+ 		return -EINVAL;
+ 
+ 	if (((di->d_fieldmask & QC_SPC_SOFT) &&
+-	     stoqb(di->d_spc_softlimit) > dqi->dqi_maxblimit) ||
++	     di->d_spc_softlimit > dqi->dqi_max_spc_limit) ||
+ 	    ((di->d_fieldmask & QC_SPC_HARD) &&
+-	     stoqb(di->d_spc_hardlimit) > dqi->dqi_maxblimit) ||
++	     di->d_spc_hardlimit > dqi->dqi_max_spc_limit) ||
+ 	    ((di->d_fieldmask & QC_INO_SOFT) &&
+-	     (di->d_ino_softlimit > dqi->dqi_maxilimit)) ||
++	     (di->d_ino_softlimit > dqi->dqi_max_ino_limit)) ||
+ 	    ((di->d_fieldmask & QC_INO_HARD) &&
+-	     (di->d_ino_hardlimit > dqi->dqi_maxilimit)))
++	     (di->d_ino_hardlimit > dqi->dqi_max_ino_limit)))
+ 		return -ERANGE;
+ 
+ 	spin_lock(&dq_data_lock);
+diff --git a/fs/quota/quota_v1.c b/fs/quota/quota_v1.c
+index 469c6848b322..8fe79beced5c 100644
+--- a/fs/quota/quota_v1.c
++++ b/fs/quota/quota_v1.c
+@@ -169,8 +169,8 @@ static int v1_read_file_info(struct super_block *sb, int type)
+ 	}
+ 	ret = 0;
+ 	/* limits are stored as unsigned 32-bit data */
+-	dqopt->info[type].dqi_maxblimit = 0xffffffff;
+-	dqopt->info[type].dqi_maxilimit = 0xffffffff;
++	dqopt->info[type].dqi_max_spc_limit = 0xffffffffULL << QUOTABLOCK_BITS;
++	dqopt->info[type].dqi_max_ino_limit = 0xffffffff;
+ 	dqopt->info[type].dqi_igrace =
+ 			dqblk.dqb_itime ? dqblk.dqb_itime : MAX_IQ_TIME;
+ 	dqopt->info[type].dqi_bgrace =
+diff --git a/fs/quota/quota_v2.c b/fs/quota/quota_v2.c
+index 02751ec695c5..d1a8054bba9a 100644
+--- a/fs/quota/quota_v2.c
++++ b/fs/quota/quota_v2.c
+@@ -117,12 +117,12 @@ static int v2_read_file_info(struct super_block *sb, int type)
+ 	qinfo = info->dqi_priv;
+ 	if (version == 0) {
+ 		/* limits are stored as unsigned 32-bit data */
+-		info->dqi_maxblimit = 0xffffffff;
+-		info->dqi_maxilimit = 0xffffffff;
++		info->dqi_max_spc_limit = 0xffffffffULL << QUOTABLOCK_BITS;
++		info->dqi_max_ino_limit = 0xffffffff;
+ 	} else {
+-		/* used space is stored as unsigned 64-bit value */
+-		info->dqi_maxblimit = 0xffffffffffffffffULL;	/* 2^64-1 */
+-		info->dqi_maxilimit = 0xffffffffffffffffULL;
++		/* used space is stored as unsigned 64-bit value in bytes */
++		info->dqi_max_spc_limit = 0xffffffffffffffffULL; /* 2^64-1 */
++		info->dqi_max_ino_limit = 0xffffffffffffffffULL;
+ 	}
+ 	info->dqi_bgrace = le32_to_cpu(dinfo.dqi_bgrace);
+ 	info->dqi_igrace = le32_to_cpu(dinfo.dqi_igrace);
+diff --git a/fs/udf/inode.c b/fs/udf/inode.c
+index 5bc71d9a674a..7b72b7dd8906 100644
+--- a/fs/udf/inode.c
++++ b/fs/udf/inode.c
+@@ -1288,6 +1288,7 @@ static int udf_read_inode(struct inode *inode, bool hidden_inode)
+ 	struct kernel_lb_addr *iloc = &iinfo->i_location;
+ 	unsigned int link_count;
+ 	unsigned int indirections = 0;
++	int bs = inode->i_sb->s_blocksize;
+ 	int ret = -EIO;
+ 
+ reread:
+@@ -1374,38 +1375,35 @@ reread:
+ 	if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_EFE)) {
+ 		iinfo->i_efe = 1;
+ 		iinfo->i_use = 0;
+-		ret = udf_alloc_i_data(inode, inode->i_sb->s_blocksize -
++		ret = udf_alloc_i_data(inode, bs -
+ 					sizeof(struct extendedFileEntry));
+ 		if (ret)
+ 			goto out;
+ 		memcpy(iinfo->i_ext.i_data,
+ 		       bh->b_data + sizeof(struct extendedFileEntry),
+-		       inode->i_sb->s_blocksize -
+-					sizeof(struct extendedFileEntry));
++		       bs - sizeof(struct extendedFileEntry));
+ 	} else if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_FE)) {
+ 		iinfo->i_efe = 0;
+ 		iinfo->i_use = 0;
+-		ret = udf_alloc_i_data(inode, inode->i_sb->s_blocksize -
+-						sizeof(struct fileEntry));
++		ret = udf_alloc_i_data(inode, bs - sizeof(struct fileEntry));
+ 		if (ret)
+ 			goto out;
+ 		memcpy(iinfo->i_ext.i_data,
+ 		       bh->b_data + sizeof(struct fileEntry),
+-		       inode->i_sb->s_blocksize - sizeof(struct fileEntry));
++		       bs - sizeof(struct fileEntry));
+ 	} else if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_USE)) {
+ 		iinfo->i_efe = 0;
+ 		iinfo->i_use = 1;
+ 		iinfo->i_lenAlloc = le32_to_cpu(
+ 				((struct unallocSpaceEntry *)bh->b_data)->
+ 				 lengthAllocDescs);
+-		ret = udf_alloc_i_data(inode, inode->i_sb->s_blocksize -
++		ret = udf_alloc_i_data(inode, bs -
+ 					sizeof(struct unallocSpaceEntry));
+ 		if (ret)
+ 			goto out;
+ 		memcpy(iinfo->i_ext.i_data,
+ 		       bh->b_data + sizeof(struct unallocSpaceEntry),
+-		       inode->i_sb->s_blocksize -
+-					sizeof(struct unallocSpaceEntry));
++		       bs - sizeof(struct unallocSpaceEntry));
+ 		return 0;
+ 	}
+ 
+@@ -1489,6 +1487,15 @@ reread:
+ 	}
+ 	inode->i_generation = iinfo->i_unique;
+ 
++	/*
++	 * Sanity check length of allocation descriptors and extended attrs to
++	 * avoid integer overflows
++	 */
++	if (iinfo->i_lenEAttr > bs || iinfo->i_lenAlloc > bs)
++		goto out;
++	/* Now do exact checks */
++	if (udf_file_entry_alloc_offset(inode) + iinfo->i_lenAlloc > bs)
++		goto out;
+ 	/* Sanity checks for files in ICB so that we don't get confused later */
+ 	if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
+ 		/*
+@@ -1498,8 +1505,7 @@ reread:
+ 		if (iinfo->i_lenAlloc != inode->i_size)
+ 			goto out;
+ 		/* File in ICB has to fit in there... */
+-		if (inode->i_size > inode->i_sb->s_blocksize -
+-					udf_file_entry_alloc_offset(inode))
++		if (inode->i_size > bs - udf_file_entry_alloc_offset(inode))
+ 			goto out;
+ 	}
+ 
+diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
+index b5eb4743f75a..4e20fe7497b3 100644
+--- a/fs/xfs/libxfs/xfs_bmap.c
++++ b/fs/xfs/libxfs/xfs_bmap.c
+@@ -973,7 +973,11 @@ xfs_bmap_local_to_extents(
+ 	*firstblock = args.fsbno;
+ 	bp = xfs_btree_get_bufl(args.mp, tp, args.fsbno, 0);
+ 
+-	/* initialise the block and copy the data */
++	/*
++	 * Initialise the block and copy the data
++	 *
++	 * Note: init_fn must set the buffer log item type correctly!
++	 */
+ 	init_fn(tp, bp, ip, ifp);
+ 
+ 	/* account for the change in fork size and log everything */
+diff --git a/fs/xfs/libxfs/xfs_symlink_remote.c b/fs/xfs/libxfs/xfs_symlink_remote.c
+index c80c5236c3da..e7e26bd6468f 100644
+--- a/fs/xfs/libxfs/xfs_symlink_remote.c
++++ b/fs/xfs/libxfs/xfs_symlink_remote.c
+@@ -178,6 +178,8 @@ xfs_symlink_local_to_remote(
+ 	struct xfs_mount	*mp = ip->i_mount;
+ 	char			*buf;
+ 
++	xfs_trans_buf_set_type(tp, bp, XFS_BLFT_SYMLINK_BUF);
++
+ 	if (!xfs_sb_version_hascrc(&mp->m_sb)) {
+ 		bp->b_ops = NULL;
+ 		memcpy(bp->b_addr, ifp->if_u1.if_data, ifp->if_bytes);
+diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c
+index 3f9bd58edec7..744352bf3240 100644
+--- a/fs/xfs/xfs_buf_item.c
++++ b/fs/xfs/xfs_buf_item.c
+@@ -319,6 +319,10 @@ xfs_buf_item_format(
+ 	ASSERT(atomic_read(&bip->bli_refcount) > 0);
+ 	ASSERT((bip->bli_flags & XFS_BLI_LOGGED) ||
+ 	       (bip->bli_flags & XFS_BLI_STALE));
++	ASSERT((bip->bli_flags & XFS_BLI_STALE) ||
++	       (xfs_blft_from_flags(&bip->__bli_format) > XFS_BLFT_UNKNOWN_BUF
++	        && xfs_blft_from_flags(&bip->__bli_format) < XFS_BLFT_MAX_BUF));
++
+ 
+ 	/*
+ 	 * If it is an inode buffer, transfer the in-memory state to the
+diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
+index 41f804e740d7..d745e1a9bf2e 100644
+--- a/fs/xfs/xfs_inode.c
++++ b/fs/xfs/xfs_inode.c
+@@ -1995,6 +1995,7 @@ xfs_iunlink(
+ 	agi->agi_unlinked[bucket_index] = cpu_to_be32(agino);
+ 	offset = offsetof(xfs_agi_t, agi_unlinked) +
+ 		(sizeof(xfs_agino_t) * bucket_index);
++	xfs_trans_buf_set_type(tp, agibp, XFS_BLFT_AGI_BUF);
+ 	xfs_trans_log_buf(tp, agibp, offset,
+ 			  (offset + sizeof(xfs_agino_t) - 1));
+ 	return 0;
+@@ -2086,6 +2087,7 @@ xfs_iunlink_remove(
+ 		agi->agi_unlinked[bucket_index] = cpu_to_be32(next_agino);
+ 		offset = offsetof(xfs_agi_t, agi_unlinked) +
+ 			(sizeof(xfs_agino_t) * bucket_index);
++		xfs_trans_buf_set_type(tp, agibp, XFS_BLFT_AGI_BUF);
+ 		xfs_trans_log_buf(tp, agibp, offset,
+ 				  (offset + sizeof(xfs_agino_t) - 1));
+ 	} else {
+diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c
+index 79fb19dd9c83..79a62bb95fde 100644
+--- a/fs/xfs/xfs_qm.c
++++ b/fs/xfs/xfs_qm.c
+@@ -842,6 +842,11 @@ xfs_qm_reset_dqcounts(
+ 		 */
+ 		xfs_dqcheck(mp, ddq, id+j, type, XFS_QMOPT_DQREPAIR,
+ 			    "xfs_quotacheck");
++		/*
++		 * Reset type in case we are reusing group quota file for
++		 * project quotas or vice versa
++		 */
++		ddq->d_flags = type;
+ 		ddq->d_bcount = 0;
+ 		ddq->d_icount = 0;
+ 		ddq->d_rtbcount = 0;
+diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c
+index fa3135b9bf04..eb90cd59a0ec 100644
+--- a/fs/xfs/xfs_trans.c
++++ b/fs/xfs/xfs_trans.c
+@@ -472,6 +472,7 @@ xfs_trans_apply_sb_deltas(
+ 		whole = 1;
+ 	}
+ 
++	xfs_trans_buf_set_type(tp, bp, XFS_BLFT_SB_BUF);
+ 	if (whole)
+ 		/*
+ 		 * Log the whole thing, the fields are noncontiguous.
+diff --git a/fs/xfs/xfs_trans_buf.c b/fs/xfs/xfs_trans_buf.c
+index 0a4d4ab6d9a9..75798412859a 100644
+--- a/fs/xfs/xfs_trans_buf.c
++++ b/fs/xfs/xfs_trans_buf.c
+@@ -327,9 +327,10 @@ xfs_trans_read_buf_map(
+ 		return -EIO;
+ 	}
+ 
+-	if (tp)
++	if (tp) {
+ 		_xfs_trans_bjoin(tp, bp, 1);
+-	trace_xfs_trans_read_buf(bp->b_fspriv);
++		trace_xfs_trans_read_buf(bp->b_fspriv);
++	}
+ 	*bpp = bp;
+ 	return 0;
+ 
+diff --git a/include/linux/compiler.h b/include/linux/compiler.h
+index 33063f872ee3..fa6a31441d04 100644
+--- a/include/linux/compiler.h
++++ b/include/linux/compiler.h
+@@ -198,7 +198,7 @@ static __always_inline void data_access_exceeds_word_size(void)
+ {
+ }
+ 
+-static __always_inline void __read_once_size(volatile void *p, void *res, int size)
++static __always_inline void __read_once_size(const volatile void *p, void *res, int size)
+ {
+ 	switch (size) {
+ 	case 1: *(__u8 *)res = *(volatile __u8 *)p; break;
+@@ -255,10 +255,10 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
+  */
+ 
+ #define READ_ONCE(x) \
+-	({ typeof(x) __val; __read_once_size(&x, &__val, sizeof(__val)); __val; })
++	({ union { typeof(x) __val; char __c[1]; } __u; __read_once_size(&(x), __u.__c, sizeof(x)); __u.__val; })
+ 
+ #define WRITE_ONCE(x, val) \
+-	({ typeof(x) __val; __val = val; __write_once_size(&x, &__val, sizeof(__val)); __val; })
++	({ typeof(x) __val = (val); __write_once_size(&(x), &__val, sizeof(__val)); __val; })
+ 
+ #endif /* __KERNEL__ */
+ 
+@@ -447,12 +447,23 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
+  * to make the compiler aware of ordering is to put the two invocations of
+  * ACCESS_ONCE() in different C statements.
+  *
+- * This macro does absolutely -nothing- to prevent the CPU from reordering,
+- * merging, or refetching absolutely anything at any time.  Its main intended
+- * use is to mediate communication between process-level code and irq/NMI
+- * handlers, all running on the same CPU.
++ * ACCESS_ONCE will only work on scalar types. For union types, ACCESS_ONCE
++ * on a union member will work as long as the size of the member matches the
++ * size of the union and the size is smaller than word size.
++ *
++ * The major use cases of ACCESS_ONCE used to be (1) Mediating communication
++ * between process-level code and irq/NMI handlers, all running on the same CPU,
++ * and (2) Ensuring that the compiler does not  fold, spindle, or otherwise
++ * mutilate accesses that either do not require ordering or that interact
++ * with an explicit memory barrier or atomic instruction that provides the
++ * required ordering.
++ *
++ * If possible use READ_ONCE/ASSIGN_ONCE instead.
+  */
+-#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
++#define __ACCESS_ONCE(x) ({ \
++	 __maybe_unused typeof(x) __var = (__force typeof(x)) 0; \
++	(volatile typeof(x) *)&(x); })
++#define ACCESS_ONCE(x) (*__ACCESS_ONCE(x))
+ 
+ /* Ignore/forbid kprobes attach on very low level functions marked by this attribute: */
+ #ifdef CONFIG_KPROBES
+diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
+index 1c804b057fb1..7ee1774edee5 100644
+--- a/include/linux/fsnotify.h
++++ b/include/linux/fsnotify.h
+@@ -101,8 +101,10 @@ static inline void fsnotify_move(struct inode *old_dir, struct inode *new_dir,
+ 		new_dir_mask |= FS_ISDIR;
+ 	}
+ 
+-	fsnotify(old_dir, old_dir_mask, old_dir, FSNOTIFY_EVENT_INODE, old_name, fs_cookie);
+-	fsnotify(new_dir, new_dir_mask, new_dir, FSNOTIFY_EVENT_INODE, new_name, fs_cookie);
++	fsnotify(old_dir, old_dir_mask, source, FSNOTIFY_EVENT_INODE, old_name,
++		 fs_cookie);
++	fsnotify(new_dir, new_dir_mask, source, FSNOTIFY_EVENT_INODE, new_name,
++		 fs_cookie);
+ 
+ 	if (target)
+ 		fsnotify_link_count(target);
+diff --git a/include/linux/kdb.h b/include/linux/kdb.h
+index 75ae2e2631fc..a19bcf9e762e 100644
+--- a/include/linux/kdb.h
++++ b/include/linux/kdb.h
+@@ -156,8 +156,14 @@ typedef enum {
+ 	KDB_REASON_SYSTEM_NMI,	/* In NMI due to SYSTEM cmd; regs valid */
+ } kdb_reason_t;
+ 
++enum kdb_msgsrc {
++	KDB_MSGSRC_INTERNAL, /* direct call to kdb_printf() */
++	KDB_MSGSRC_PRINTK, /* trapped from printk() */
++};
++
+ extern int kdb_trap_printk;
+-extern __printf(1, 0) int vkdb_printf(const char *fmt, va_list args);
++extern __printf(2, 0) int vkdb_printf(enum kdb_msgsrc src, const char *fmt,
++				      va_list args);
+ extern __printf(1, 2) int kdb_printf(const char *, ...);
+ typedef __printf(1, 2) int (*kdb_printf_t)(const char *, ...);
+ 
+diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
+index 467c84efb596..06ebfa1f874b 100644
+--- a/include/linux/nfs_xdr.h
++++ b/include/linux/nfs_xdr.h
+@@ -1342,7 +1342,7 @@ struct nfs_commit_completion_ops {
+ };
+ 
+ struct nfs_commit_info {
+-	spinlock_t			*lock;
++	spinlock_t			*lock;	/* inode->i_lock */
+ 	struct nfs_mds_commit_info	*mds;
+ 	struct pnfs_ds_commit_info	*ds;
+ 	struct nfs_direct_req		*dreq;	/* O_DIRECT request */
+diff --git a/include/linux/quota.h b/include/linux/quota.h
+index 097d7eb2441e..b86df497aba3 100644
+--- a/include/linux/quota.h
++++ b/include/linux/quota.h
+@@ -216,8 +216,8 @@ struct mem_dqinfo {
+ 	unsigned long dqi_flags;
+ 	unsigned int dqi_bgrace;
+ 	unsigned int dqi_igrace;
+-	qsize_t dqi_maxblimit;
+-	qsize_t dqi_maxilimit;
++	qsize_t dqi_max_spc_limit;
++	qsize_t dqi_max_ino_limit;
+ 	void *dqi_priv;
+ };
+ 
+diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
+index d86acc63b25f..598ba80ec30c 100644
+--- a/include/linux/sunrpc/clnt.h
++++ b/include/linux/sunrpc/clnt.h
+@@ -57,7 +57,7 @@ struct rpc_clnt {
+ 	const struct rpc_timeout *cl_timeout;	/* Timeout strategy */
+ 
+ 	int			cl_nodelen;	/* nodename length */
+-	char 			cl_nodename[UNX_MAXNODENAME];
++	char 			cl_nodename[UNX_MAXNODENAME+1];
+ 	struct rpc_pipe_dir_head cl_pipedir_objects;
+ 	struct rpc_clnt *	cl_parent;	/* Points to parent of clones */
+ 	struct rpc_rtt		cl_rtt_default;
+@@ -112,6 +112,7 @@ struct rpc_create_args {
+ 	struct sockaddr		*saddress;
+ 	const struct rpc_timeout *timeout;
+ 	const char		*servername;
++	const char		*nodename;
+ 	const struct rpc_program *program;
+ 	u32			prognumber;	/* overrides program->number */
+ 	u32			version;
+diff --git a/include/linux/usb.h b/include/linux/usb.h
+index f89c24a03bd9..058a7698d7e3 100644
+--- a/include/linux/usb.h
++++ b/include/linux/usb.h
+@@ -127,10 +127,6 @@ enum usb_interface_condition {
+  *	to the sysfs representation for that device.
+  * @pm_usage_cnt: PM usage counter for this interface
+  * @reset_ws: Used for scheduling resets from atomic context.
+- * @reset_running: set to 1 if the interface is currently running a
+- *      queued reset so that usb_cancel_queued_reset() doesn't try to
+- *      remove from the workqueue when running inside the worker
+- *      thread. See __usb_queue_reset_device().
+  * @resetting_device: USB core reset the device, so use alt setting 0 as
+  *	current; needs bandwidth alloc after reset.
+  *
+@@ -181,7 +177,6 @@ struct usb_interface {
+ 	unsigned needs_remote_wakeup:1;	/* driver requires remote wakeup */
+ 	unsigned needs_altsetting0:1;	/* switch to altsetting 0 is pending */
+ 	unsigned needs_binding:1;	/* needs delayed unbind/rebind */
+-	unsigned reset_running:1;
+ 	unsigned resetting_device:1;	/* true: bandwidth alloc after reset */
+ 
+ 	struct device dev;		/* interface specific device info */
+diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
+index 086bf13307e6..68b1e836dff1 100644
+--- a/include/linux/usb/hcd.h
++++ b/include/linux/usb/hcd.h
+@@ -146,6 +146,8 @@ struct usb_hcd {
+ 	unsigned		amd_resume_bug:1; /* AMD remote wakeup quirk */
+ 	unsigned		can_do_streams:1; /* HC supports streams */
+ 	unsigned		tpl_support:1; /* OTG & EH TPL support */
++	unsigned		cant_recv_wakeups:1;
++			/* wakeup requests from downstream aren't received */
+ 
+ 	unsigned int		irq;		/* irq allocated */
+ 	void __iomem		*regs;		/* device memory/io */
+@@ -453,6 +455,7 @@ extern const struct dev_pm_ops usb_hcd_pci_pm_ops;
+ #endif /* CONFIG_PCI */
+ 
+ /* pci-ish (pdev null is ok) buffer alloc/mapping support */
++void usb_init_pool_max(void);
+ int hcd_buffer_create(struct usb_hcd *hcd);
+ void hcd_buffer_destroy(struct usb_hcd *hcd);
+ 
+diff --git a/include/net/cipso_ipv4.h b/include/net/cipso_ipv4.h
+index a6fd939f202d..3ebb168b9afc 100644
+--- a/include/net/cipso_ipv4.h
++++ b/include/net/cipso_ipv4.h
+@@ -121,13 +121,6 @@ extern int cipso_v4_rbm_strictvalid;
+ #endif
+ 
+ /*
+- * Helper Functions
+- */
+-
+-#define CIPSO_V4_OPTEXIST(x) (IPCB(x)->opt.cipso != 0)
+-#define CIPSO_V4_OPTPTR(x) (skb_network_header(x) + IPCB(x)->opt.cipso)
+-
+-/*
+  * DOI List Functions
+  */
+ 
+@@ -190,7 +183,7 @@ static inline int cipso_v4_doi_domhsh_remove(struct cipso_v4_doi *doi_def,
+ 
+ #ifdef CONFIG_NETLABEL
+ void cipso_v4_cache_invalidate(void);
+-int cipso_v4_cache_add(const struct sk_buff *skb,
++int cipso_v4_cache_add(const unsigned char *cipso_ptr,
+ 		       const struct netlbl_lsm_secattr *secattr);
+ #else
+ static inline void cipso_v4_cache_invalidate(void)
+@@ -198,7 +191,7 @@ static inline void cipso_v4_cache_invalidate(void)
+ 	return;
+ }
+ 
+-static inline int cipso_v4_cache_add(const struct sk_buff *skb,
++static inline int cipso_v4_cache_add(const unsigned char *cipso_ptr,
+ 				     const struct netlbl_lsm_secattr *secattr)
+ {
+ 	return 0;
+@@ -211,6 +204,8 @@ static inline int cipso_v4_cache_add(const struct sk_buff *skb,
+ 
+ #ifdef CONFIG_NETLABEL
+ void cipso_v4_error(struct sk_buff *skb, int error, u32 gateway);
++int cipso_v4_getattr(const unsigned char *cipso,
++		     struct netlbl_lsm_secattr *secattr);
+ int cipso_v4_sock_setattr(struct sock *sk,
+ 			  const struct cipso_v4_doi *doi_def,
+ 			  const struct netlbl_lsm_secattr *secattr);
+@@ -226,6 +221,7 @@ int cipso_v4_skbuff_setattr(struct sk_buff *skb,
+ int cipso_v4_skbuff_delattr(struct sk_buff *skb);
+ int cipso_v4_skbuff_getattr(const struct sk_buff *skb,
+ 			    struct netlbl_lsm_secattr *secattr);
++unsigned char *cipso_v4_optptr(const struct sk_buff *skb);
+ int cipso_v4_validate(const struct sk_buff *skb, unsigned char **option);
+ #else
+ static inline void cipso_v4_error(struct sk_buff *skb,
+@@ -235,6 +231,12 @@ static inline void cipso_v4_error(struct sk_buff *skb,
+ 	return;
+ }
+ 
++static inline int cipso_v4_getattr(const unsigned char *cipso,
++				   struct netlbl_lsm_secattr *secattr)
++{
++	return -ENOSYS;
++}
++
+ static inline int cipso_v4_sock_setattr(struct sock *sk,
+ 				      const struct cipso_v4_doi *doi_def,
+ 				      const struct netlbl_lsm_secattr *secattr)
+@@ -282,6 +284,11 @@ static inline int cipso_v4_skbuff_getattr(const struct sk_buff *skb,
+ 	return -ENOSYS;
+ }
+ 
++static inline unsigned char *cipso_v4_optptr(const struct sk_buff *skb)
++{
++	return NULL;
++}
++
+ static inline int cipso_v4_validate(const struct sk_buff *skb,
+ 				    unsigned char **option)
+ {
+diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
+index 07ce18ca71e0..ac5c0f9c7a20 100644
+--- a/kernel/debug/debug_core.c
++++ b/kernel/debug/debug_core.c
+@@ -604,7 +604,7 @@ return_normal:
+ 		   online_cpus)
+ 		cpu_relax();
+ 	if (!time_left)
+-		pr_crit("KGDB: Timed out waiting for secondary CPUs.\n");
++		pr_crit("Timed out waiting for secondary CPUs.\n");
+ 
+ 	/*
+ 	 * At this point the primary processor is completely
+diff --git a/kernel/debug/kdb/kdb_io.c b/kernel/debug/kdb/kdb_io.c
+index 7c70812caea5..a550afb99ebe 100644
+--- a/kernel/debug/kdb/kdb_io.c
++++ b/kernel/debug/kdb/kdb_io.c
+@@ -548,7 +548,7 @@ static int kdb_search_string(char *searched, char *searchfor)
+ 	return 0;
+ }
+ 
+-int vkdb_printf(const char *fmt, va_list ap)
++int vkdb_printf(enum kdb_msgsrc src, const char *fmt, va_list ap)
+ {
+ 	int diag;
+ 	int linecount;
+@@ -691,19 +691,20 @@ kdb_printit:
+ 	 * Write to all consoles.
+ 	 */
+ 	retlen = strlen(kdb_buffer);
++	cp = (char *) printk_skip_level(kdb_buffer);
+ 	if (!dbg_kdb_mode && kgdb_connected) {
+-		gdbstub_msg_write(kdb_buffer, retlen);
++		gdbstub_msg_write(cp, retlen - (cp - kdb_buffer));
+ 	} else {
+ 		if (dbg_io_ops && !dbg_io_ops->is_console) {
+-			len = retlen;
+-			cp = kdb_buffer;
++			len = retlen - (cp - kdb_buffer);
++			cp2 = cp;
+ 			while (len--) {
+-				dbg_io_ops->write_char(*cp);
+-				cp++;
++				dbg_io_ops->write_char(*cp2);
++				cp2++;
+ 			}
+ 		}
+ 		while (c) {
+-			c->write(c, kdb_buffer, retlen);
++			c->write(c, cp, retlen - (cp - kdb_buffer));
+ 			touch_nmi_watchdog();
+ 			c = c->next;
+ 		}
+@@ -711,7 +712,10 @@ kdb_printit:
+ 	if (logging) {
+ 		saved_loglevel = console_loglevel;
+ 		console_loglevel = CONSOLE_LOGLEVEL_SILENT;
+-		printk(KERN_INFO "%s", kdb_buffer);
++		if (printk_get_level(kdb_buffer) || src == KDB_MSGSRC_PRINTK)
++			printk("%s", kdb_buffer);
++		else
++			pr_info("%s", kdb_buffer);
+ 	}
+ 
+ 	if (KDB_STATE(PAGER)) {
+@@ -844,7 +848,7 @@ int kdb_printf(const char *fmt, ...)
+ 	int r;
+ 
+ 	va_start(ap, fmt);
+-	r = vkdb_printf(fmt, ap);
++	r = vkdb_printf(KDB_MSGSRC_INTERNAL, fmt, ap);
+ 	va_end(ap);
+ 
+ 	return r;
+diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
+index 7b40c5f07dce..60f6bb817f70 100644
+--- a/kernel/debug/kdb/kdb_main.c
++++ b/kernel/debug/kdb/kdb_main.c
+@@ -2256,7 +2256,7 @@ static int kdb_cpu(int argc, const char **argv)
+ 	/*
+ 	 * Validate cpunum
+ 	 */
+-	if ((cpunum > NR_CPUS) || !kgdb_info[cpunum].enter_kgdb)
++	if ((cpunum >= CONFIG_NR_CPUS) || !kgdb_info[cpunum].enter_kgdb)
+ 		return KDB_BADCPUNUM;
+ 
+ 	dbg_switch_cpu = cpunum;
+@@ -2583,7 +2583,7 @@ static int kdb_summary(int argc, const char **argv)
+ #define K(x) ((x) << (PAGE_SHIFT - 10))
+ 	kdb_printf("\nMemTotal:       %8lu kB\nMemFree:        %8lu kB\n"
+ 		   "Buffers:        %8lu kB\n",
+-		   val.totalram, val.freeram, val.bufferram);
++		   K(val.totalram), K(val.freeram), K(val.bufferram));
+ 	return 0;
+ }
+ 
+diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
+index 02d6b6d28796..fae29e3ffbf0 100644
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -1811,7 +1811,7 @@ int vprintk_default(const char *fmt, va_list args)
+ 
+ #ifdef CONFIG_KGDB_KDB
+ 	if (unlikely(kdb_trap_printk)) {
+-		r = vkdb_printf(fmt, args);
++		r = vkdb_printf(KDB_MSGSRC_PRINTK, fmt, args);
+ 		return r;
+ 	}
+ #endif
+diff --git a/kernel/softirq.c b/kernel/softirq.c
+index 501baa9ac1be..c497fcdf0d1e 100644
+--- a/kernel/softirq.c
++++ b/kernel/softirq.c
+@@ -656,9 +656,13 @@ static void run_ksoftirqd(unsigned int cpu)
+ 		 * in the task stack here.
+ 		 */
+ 		__do_softirq();
+-		rcu_note_context_switch();
+ 		local_irq_enable();
+ 		cond_resched();
++
++		preempt_disable();
++		rcu_note_context_switch();
++		preempt_enable();
++
+ 		return;
+ 	}
+ 	local_irq_enable();
+diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
+index 28bf91c60a0b..85fb3d632bd8 100644
+--- a/kernel/time/ntp.c
++++ b/kernel/time/ntp.c
+@@ -633,10 +633,14 @@ int ntp_validate_timex(struct timex *txc)
+ 	if ((txc->modes & ADJ_SETOFFSET) && (!capable(CAP_SYS_TIME)))
+ 		return -EPERM;
+ 
+-	if (txc->modes & ADJ_FREQUENCY) {
+-		if (LONG_MIN / PPM_SCALE > txc->freq)
++	/*
++	 * Check for potential multiplication overflows that can
++	 * only happen on 64-bit systems:
++	 */
++	if ((txc->modes & ADJ_FREQUENCY) && (BITS_PER_LONG == 64)) {
++		if (LLONG_MIN / PPM_SCALE > txc->freq)
+ 			return -EINVAL;
+-		if (LONG_MAX / PPM_SCALE < txc->freq)
++		if (LLONG_MAX / PPM_SCALE < txc->freq)
+ 			return -EINVAL;
+ 	}
+ 
+diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
+index 7a4104cb95cb..d2e151c83bd5 100644
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -447,7 +447,10 @@ int ring_buffer_print_page_header(struct trace_seq *s)
+ struct rb_irq_work {
+ 	struct irq_work			work;
+ 	wait_queue_head_t		waiters;
++	wait_queue_head_t		full_waiters;
+ 	bool				waiters_pending;
++	bool				full_waiters_pending;
++	bool				wakeup_full;
+ };
+ 
+ /*
+@@ -529,6 +532,10 @@ static void rb_wake_up_waiters(struct irq_work *work)
+ 	struct rb_irq_work *rbwork = container_of(work, struct rb_irq_work, work);
+ 
+ 	wake_up_all(&rbwork->waiters);
++	if (rbwork->wakeup_full) {
++		rbwork->wakeup_full = false;
++		wake_up_all(&rbwork->full_waiters);
++	}
+ }
+ 
+ /**
+@@ -553,9 +560,11 @@ int ring_buffer_wait(struct ring_buffer *buffer, int cpu, bool full)
+ 	 * data in any cpu buffer, or a specific buffer, put the
+ 	 * caller on the appropriate wait queue.
+ 	 */
+-	if (cpu == RING_BUFFER_ALL_CPUS)
++	if (cpu == RING_BUFFER_ALL_CPUS) {
+ 		work = &buffer->irq_work;
+-	else {
++		/* Full only makes sense on per cpu reads */
++		full = false;
++	} else {
+ 		if (!cpumask_test_cpu(cpu, buffer->cpumask))
+ 			return -ENODEV;
+ 		cpu_buffer = buffer->buffers[cpu];
+@@ -564,7 +573,10 @@ int ring_buffer_wait(struct ring_buffer *buffer, int cpu, bool full)
+ 
+ 
+ 	while (true) {
+-		prepare_to_wait(&work->waiters, &wait, TASK_INTERRUPTIBLE);
++		if (full)
++			prepare_to_wait(&work->full_waiters, &wait, TASK_INTERRUPTIBLE);
++		else
++			prepare_to_wait(&work->waiters, &wait, TASK_INTERRUPTIBLE);
+ 
+ 		/*
+ 		 * The events can happen in critical sections where
+@@ -586,7 +598,10 @@ int ring_buffer_wait(struct ring_buffer *buffer, int cpu, bool full)
+ 		 * that is necessary is that the wake up happens after
+ 		 * a task has been queued. It's OK for spurious wake ups.
+ 		 */
+-		work->waiters_pending = true;
++		if (full)
++			work->full_waiters_pending = true;
++		else
++			work->waiters_pending = true;
+ 
+ 		if (signal_pending(current)) {
+ 			ret = -EINTR;
+@@ -615,7 +630,10 @@ int ring_buffer_wait(struct ring_buffer *buffer, int cpu, bool full)
+ 		schedule();
+ 	}
+ 
+-	finish_wait(&work->waiters, &wait);
++	if (full)
++		finish_wait(&work->full_waiters, &wait);
++	else
++		finish_wait(&work->waiters, &wait);
+ 
+ 	return ret;
+ }
+@@ -1230,6 +1248,7 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int nr_pages, int cpu)
+ 	init_completion(&cpu_buffer->update_done);
+ 	init_irq_work(&cpu_buffer->irq_work.work, rb_wake_up_waiters);
+ 	init_waitqueue_head(&cpu_buffer->irq_work.waiters);
++	init_waitqueue_head(&cpu_buffer->irq_work.full_waiters);
+ 
+ 	bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
+ 			    GFP_KERNEL, cpu_to_node(cpu));
+@@ -2801,6 +2820,8 @@ static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
+ static __always_inline void
+ rb_wakeups(struct ring_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer)
+ {
++	bool pagebusy;
++
+ 	if (buffer->irq_work.waiters_pending) {
+ 		buffer->irq_work.waiters_pending = false;
+ 		/* irq_work_queue() supplies it's own memory barriers */
+@@ -2812,6 +2833,15 @@ rb_wakeups(struct ring_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer)
+ 		/* irq_work_queue() supplies it's own memory barriers */
+ 		irq_work_queue(&cpu_buffer->irq_work.work);
+ 	}
++
++	pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page;
++
++	if (!pagebusy && cpu_buffer->irq_work.full_waiters_pending) {
++		cpu_buffer->irq_work.wakeup_full = true;
++		cpu_buffer->irq_work.full_waiters_pending = false;
++		/* irq_work_queue() supplies it's own memory barriers */
++		irq_work_queue(&cpu_buffer->irq_work.work);
++	}
+ }
+ 
+ /**
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index 4a9079b9f082..361a827b4962 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -4942,7 +4942,7 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
+ 	*fpos += written;
+ 
+  out_unlock:
+-	for (i = 0; i < nr_pages; i++){
++	for (i = nr_pages - 1; i >= 0; i--) {
+ 		kunmap_atomic(map_page[i]);
+ 		put_page(pages[i]);
+ 	}
+diff --git a/mm/gup.c b/mm/gup.c
+index 8dd50ce6326f..9b2afbfe67e3 100644
+--- a/mm/gup.c
++++ b/mm/gup.c
+@@ -926,7 +926,7 @@ static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
+ 
+ 	pmdp = pmd_offset(&pud, addr);
+ 	do {
+-		pmd_t pmd = ACCESS_ONCE(*pmdp);
++		pmd_t pmd = READ_ONCE(*pmdp);
+ 
+ 		next = pmd_addr_end(addr, end);
+ 		if (pmd_none(pmd) || pmd_trans_splitting(pmd))
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index 85032de5e20f..c49586f40758 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -3666,6 +3666,8 @@ follow_huge_pmd(struct mm_struct *mm, unsigned long address,
+ {
+ 	struct page *page;
+ 
++	if (!pmd_present(*pmd))
++		return NULL;
+ 	page = pte_page(*(pte_t *)pmd);
+ 	if (page)
+ 		page += ((address & ~PMD_MASK) >> PAGE_SHIFT);
+diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
+index 693ce8bcd06e..1775dbfc84a8 100644
+--- a/net/bluetooth/mgmt.c
++++ b/net/bluetooth/mgmt.c
+@@ -7066,7 +7066,8 @@ void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
+ 	 * However when using service discovery, the value 127 will be
+ 	 * returned when the RSSI is not available.
+ 	 */
+-	if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi)
++	if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
++	    link_type == ACL_LINK)
+ 		rssi = 0;
+ 
+ 	bacpy(&ev->addr.bdaddr, bdaddr);
+diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
+index b67749bb55bf..757ae32bda47 100644
+--- a/net/bluetooth/smp.c
++++ b/net/bluetooth/smp.c
+@@ -2303,8 +2303,12 @@ static int smp_cmd_ident_addr_info(struct l2cap_conn *conn,
+ 	 * implementations are not known of and in order to not over
+ 	 * complicate our implementation, simply pretend that we never
+ 	 * received an IRK for such a device.
++	 *
++	 * The Identity Address must also be a Static Random or Public
++	 * Address, which hci_is_identity_address() checks for.
+ 	 */
+-	if (!bacmp(&info->bdaddr, BDADDR_ANY)) {
++	if (!bacmp(&info->bdaddr, BDADDR_ANY) ||
++	    !hci_is_identity_address(&info->bdaddr, info->addr_type)) {
+ 		BT_ERR("Ignoring IRK with no identity address");
+ 		goto distribute;
+ 	}
+diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
+index 53299c7b0ca4..f693a2f8ac86 100644
+--- a/net/ceph/osd_client.c
++++ b/net/ceph/osd_client.c
+@@ -1048,14 +1048,24 @@ static void put_osd(struct ceph_osd *osd)
+  */
+ static void __remove_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd)
+ {
+-	dout("__remove_osd %p\n", osd);
++	dout("%s %p osd%d\n", __func__, osd, osd->o_osd);
+ 	WARN_ON(!list_empty(&osd->o_requests));
+ 	WARN_ON(!list_empty(&osd->o_linger_requests));
+ 
+-	rb_erase(&osd->o_node, &osdc->osds);
+ 	list_del_init(&osd->o_osd_lru);
+-	ceph_con_close(&osd->o_con);
+-	put_osd(osd);
++	rb_erase(&osd->o_node, &osdc->osds);
++	RB_CLEAR_NODE(&osd->o_node);
++}
++
++static void remove_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd)
++{
++	dout("%s %p osd%d\n", __func__, osd, osd->o_osd);
++
++	if (!RB_EMPTY_NODE(&osd->o_node)) {
++		ceph_con_close(&osd->o_con);
++		__remove_osd(osdc, osd);
++		put_osd(osd);
++	}
+ }
+ 
+ static void remove_all_osds(struct ceph_osd_client *osdc)
+@@ -1065,7 +1075,7 @@ static void remove_all_osds(struct ceph_osd_client *osdc)
+ 	while (!RB_EMPTY_ROOT(&osdc->osds)) {
+ 		struct ceph_osd *osd = rb_entry(rb_first(&osdc->osds),
+ 						struct ceph_osd, o_node);
+-		__remove_osd(osdc, osd);
++		remove_osd(osdc, osd);
+ 	}
+ 	mutex_unlock(&osdc->request_mutex);
+ }
+@@ -1106,7 +1116,7 @@ static void remove_old_osds(struct ceph_osd_client *osdc)
+ 	list_for_each_entry_safe(osd, nosd, &osdc->osd_lru, o_osd_lru) {
+ 		if (time_before(jiffies, osd->lru_ttl))
+ 			break;
+-		__remove_osd(osdc, osd);
++		remove_osd(osdc, osd);
+ 	}
+ 	mutex_unlock(&osdc->request_mutex);
+ }
+@@ -1121,8 +1131,7 @@ static int __reset_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd)
+ 	dout("__reset_osd %p osd%d\n", osd, osd->o_osd);
+ 	if (list_empty(&osd->o_requests) &&
+ 	    list_empty(&osd->o_linger_requests)) {
+-		__remove_osd(osdc, osd);
+-
++		remove_osd(osdc, osd);
+ 		return -ENODEV;
+ 	}
+ 
+@@ -1926,6 +1935,7 @@ static void reset_changed_osds(struct ceph_osd_client *osdc)
+ {
+ 	struct rb_node *p, *n;
+ 
++	dout("%s %p\n", __func__, osdc);
+ 	for (p = rb_first(&osdc->osds); p; p = n) {
+ 		struct ceph_osd *osd = rb_entry(p, struct ceph_osd, o_node);
+ 
+diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
+index 5160c710f2eb..e361ea6f3fc8 100644
+--- a/net/ipv4/cipso_ipv4.c
++++ b/net/ipv4/cipso_ipv4.c
+@@ -378,20 +378,18 @@ static int cipso_v4_cache_check(const unsigned char *key,
+  * negative values on failure.
+  *
+  */
+-int cipso_v4_cache_add(const struct sk_buff *skb,
++int cipso_v4_cache_add(const unsigned char *cipso_ptr,
+ 		       const struct netlbl_lsm_secattr *secattr)
+ {
+ 	int ret_val = -EPERM;
+ 	u32 bkt;
+ 	struct cipso_v4_map_cache_entry *entry = NULL;
+ 	struct cipso_v4_map_cache_entry *old_entry = NULL;
+-	unsigned char *cipso_ptr;
+ 	u32 cipso_ptr_len;
+ 
+ 	if (!cipso_v4_cache_enabled || cipso_v4_cache_bucketsize <= 0)
+ 		return 0;
+ 
+-	cipso_ptr = CIPSO_V4_OPTPTR(skb);
+ 	cipso_ptr_len = cipso_ptr[1];
+ 
+ 	entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
+@@ -1579,6 +1577,33 @@ static int cipso_v4_parsetag_loc(const struct cipso_v4_doi *doi_def,
+ }
+ 
+ /**
++ * cipso_v4_optptr - Find the CIPSO option in the packet
++ * @skb: the packet
++ *
++ * Description:
++ * Parse the packet's IP header looking for a CIPSO option.  Returns a pointer
++ * to the start of the CIPSO option on success, NULL if one if not found.
++ *
++ */
++unsigned char *cipso_v4_optptr(const struct sk_buff *skb)
++{
++	const struct iphdr *iph = ip_hdr(skb);
++	unsigned char *optptr = (unsigned char *)&(ip_hdr(skb)[1]);
++	int optlen;
++	int taglen;
++
++	for (optlen = iph->ihl*4 - sizeof(struct iphdr); optlen > 0; ) {
++		if (optptr[0] == IPOPT_CIPSO)
++			return optptr;
++		taglen = optptr[1];
++		optlen -= taglen;
++		optptr += taglen;
++	}
++
++	return NULL;
++}
++
++/**
+  * cipso_v4_validate - Validate a CIPSO option
+  * @option: the start of the option, on error it is set to point to the error
+  *
+@@ -2119,8 +2144,8 @@ void cipso_v4_req_delattr(struct request_sock *req)
+  * on success and negative values on failure.
+  *
+  */
+-static int cipso_v4_getattr(const unsigned char *cipso,
+-			    struct netlbl_lsm_secattr *secattr)
++int cipso_v4_getattr(const unsigned char *cipso,
++		     struct netlbl_lsm_secattr *secattr)
+ {
+ 	int ret_val = -ENOMSG;
+ 	u32 doi;
+@@ -2305,22 +2330,6 @@ int cipso_v4_skbuff_delattr(struct sk_buff *skb)
+ 	return 0;
+ }
+ 
+-/**
+- * cipso_v4_skbuff_getattr - Get the security attributes from the CIPSO option
+- * @skb: the packet
+- * @secattr: the security attributes
+- *
+- * Description:
+- * Parse the given packet's CIPSO option and return the security attributes.
+- * Returns zero on success and negative values on failure.
+- *
+- */
+-int cipso_v4_skbuff_getattr(const struct sk_buff *skb,
+-			    struct netlbl_lsm_secattr *secattr)
+-{
+-	return cipso_v4_getattr(CIPSO_V4_OPTPTR(skb), secattr);
+-}
+-
+ /*
+  * Setup Functions
+  */
+diff --git a/net/netlabel/netlabel_kapi.c b/net/netlabel/netlabel_kapi.c
+index a845cd4cf21e..28cddc85b700 100644
+--- a/net/netlabel/netlabel_kapi.c
++++ b/net/netlabel/netlabel_kapi.c
+@@ -1065,10 +1065,12 @@ int netlbl_skbuff_getattr(const struct sk_buff *skb,
+ 			  u16 family,
+ 			  struct netlbl_lsm_secattr *secattr)
+ {
++	unsigned char *ptr;
++
+ 	switch (family) {
+ 	case AF_INET:
+-		if (CIPSO_V4_OPTEXIST(skb) &&
+-		    cipso_v4_skbuff_getattr(skb, secattr) == 0)
++		ptr = cipso_v4_optptr(skb);
++		if (ptr && cipso_v4_getattr(ptr, secattr) == 0)
+ 			return 0;
+ 		break;
+ #if IS_ENABLED(CONFIG_IPV6)
+@@ -1094,7 +1096,7 @@ int netlbl_skbuff_getattr(const struct sk_buff *skb,
+  */
+ void netlbl_skbuff_err(struct sk_buff *skb, int error, int gateway)
+ {
+-	if (CIPSO_V4_OPTEXIST(skb))
++	if (cipso_v4_optptr(skb))
+ 		cipso_v4_error(skb, error, gateway);
+ }
+ 
+@@ -1126,11 +1128,14 @@ void netlbl_cache_invalidate(void)
+ int netlbl_cache_add(const struct sk_buff *skb,
+ 		     const struct netlbl_lsm_secattr *secattr)
+ {
++	unsigned char *ptr;
++
+ 	if ((secattr->flags & NETLBL_SECATTR_CACHE) == 0)
+ 		return -ENOMSG;
+ 
+-	if (CIPSO_V4_OPTEXIST(skb))
+-		return cipso_v4_cache_add(skb, secattr);
++	ptr = cipso_v4_optptr(skb);
++	if (ptr)
++		return cipso_v4_cache_add(ptr, secattr);
+ 
+ 	return -ENOMSG;
+ }
+diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
+index 05da12a33945..3f5d4d48f0cb 100644
+--- a/net/sunrpc/clnt.c
++++ b/net/sunrpc/clnt.c
+@@ -286,10 +286,8 @@ static struct rpc_xprt *rpc_clnt_set_transport(struct rpc_clnt *clnt,
+ 
+ static void rpc_clnt_set_nodename(struct rpc_clnt *clnt, const char *nodename)
+ {
+-	clnt->cl_nodelen = strlen(nodename);
+-	if (clnt->cl_nodelen > UNX_MAXNODENAME)
+-		clnt->cl_nodelen = UNX_MAXNODENAME;
+-	memcpy(clnt->cl_nodename, nodename, clnt->cl_nodelen);
++	clnt->cl_nodelen = strlcpy(clnt->cl_nodename,
++			nodename, sizeof(clnt->cl_nodename));
+ }
+ 
+ static int rpc_client_register(struct rpc_clnt *clnt,
+@@ -365,6 +363,7 @@ static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args,
+ 	const struct rpc_version *version;
+ 	struct rpc_clnt *clnt = NULL;
+ 	const struct rpc_timeout *timeout;
++	const char *nodename = args->nodename;
+ 	int err;
+ 
+ 	/* sanity check the name before trying to print it */
+@@ -420,8 +419,10 @@ static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args,
+ 
+ 	atomic_set(&clnt->cl_count, 1);
+ 
++	if (nodename == NULL)
++		nodename = utsname()->nodename;
+ 	/* save the nodename */
+-	rpc_clnt_set_nodename(clnt, utsname()->nodename);
++	rpc_clnt_set_nodename(clnt, nodename);
+ 
+ 	err = rpc_client_register(clnt, args->authflavor, args->client_name);
+ 	if (err)
+@@ -576,6 +577,7 @@ static struct rpc_clnt *__rpc_clone_client(struct rpc_create_args *args,
+ 	if (xprt == NULL)
+ 		goto out_err;
+ 	args->servername = xprt->servername;
++	args->nodename = clnt->cl_nodename;
+ 
+ 	new = rpc_new_client(args, xprt, clnt);
+ 	if (IS_ERR(new)) {
+diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c
+index 05202012bcfc..cf5770d8f49a 100644
+--- a/net/sunrpc/rpcb_clnt.c
++++ b/net/sunrpc/rpcb_clnt.c
+@@ -355,7 +355,8 @@ out:
+ 	return result;
+ }
+ 
+-static struct rpc_clnt *rpcb_create(struct net *net, const char *hostname,
++static struct rpc_clnt *rpcb_create(struct net *net, const char *nodename,
++				    const char *hostname,
+ 				    struct sockaddr *srvaddr, size_t salen,
+ 				    int proto, u32 version)
+ {
+@@ -365,6 +366,7 @@ static struct rpc_clnt *rpcb_create(struct net *net, const char *hostname,
+ 		.address	= srvaddr,
+ 		.addrsize	= salen,
+ 		.servername	= hostname,
++		.nodename	= nodename,
+ 		.program	= &rpcb_program,
+ 		.version	= version,
+ 		.authflavor	= RPC_AUTH_UNIX,
+@@ -740,7 +742,9 @@ void rpcb_getport_async(struct rpc_task *task)
+ 	dprintk("RPC: %5u %s: trying rpcbind version %u\n",
+ 		task->tk_pid, __func__, bind_version);
+ 
+-	rpcb_clnt = rpcb_create(xprt->xprt_net, xprt->servername, sap, salen,
++	rpcb_clnt = rpcb_create(xprt->xprt_net,
++				clnt->cl_nodename,
++				xprt->servername, sap, salen,
+ 				xprt->prot, bind_version);
+ 	if (IS_ERR(rpcb_clnt)) {
+ 		status = PTR_ERR(rpcb_clnt);
+diff --git a/security/smack/smack.h b/security/smack/smack.h
+index b828a379377c..b48359c0da32 100644
+--- a/security/smack/smack.h
++++ b/security/smack/smack.h
+@@ -298,6 +298,16 @@ static inline struct smack_known *smk_of_task(const struct task_smack *tsp)
+ 	return tsp->smk_task;
+ }
+ 
++static inline struct smack_known *smk_of_task_struct(const struct task_struct *t)
++{
++	struct smack_known *skp;
++
++	rcu_read_lock();
++	skp = smk_of_task(__task_cred(t)->security);
++	rcu_read_unlock();
++	return skp;
++}
++
+ /*
+  * Present a pointer to the forked smack label entry in an task blob.
+  */
+diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
+index f1b17a476e12..a7178773dde7 100644
+--- a/security/smack/smack_lsm.c
++++ b/security/smack/smack_lsm.c
+@@ -43,8 +43,6 @@
+ #include <linux/binfmts.h>
+ #include "smack.h"
+ 
+-#define task_security(task)	(task_cred_xxx((task), security))
+-
+ #define TRANS_TRUE	"TRUE"
+ #define TRANS_TRUE_SIZE	4
+ 
+@@ -120,7 +118,7 @@ static int smk_bu_current(char *note, struct smack_known *oskp,
+ static int smk_bu_task(struct task_struct *otp, int mode, int rc)
+ {
+ 	struct task_smack *tsp = current_security();
+-	struct task_smack *otsp = task_security(otp);
++	struct smack_known *smk_task = smk_of_task_struct(otp);
+ 	char acc[SMK_NUM_ACCESS_TYPE + 1];
+ 
+ 	if (rc <= 0)
+@@ -128,7 +126,7 @@ static int smk_bu_task(struct task_struct *otp, int mode, int rc)
+ 
+ 	smk_bu_mode(mode, acc);
+ 	pr_info("Smack Bringup: (%s %s %s) %s to %s\n",
+-		tsp->smk_task->smk_known, otsp->smk_task->smk_known, acc,
++		tsp->smk_task->smk_known, smk_task->smk_known, acc,
+ 		current->comm, otp->comm);
+ 	return 0;
+ }
+@@ -345,7 +343,8 @@ static int smk_ptrace_rule_check(struct task_struct *tracer,
+ 		saip = &ad;
+ 	}
+ 
+-	tsp = task_security(tracer);
++	rcu_read_lock();
++	tsp = __task_cred(tracer)->security;
+ 	tracer_known = smk_of_task(tsp);
+ 
+ 	if ((mode & PTRACE_MODE_ATTACH) &&
+@@ -365,11 +364,14 @@ static int smk_ptrace_rule_check(struct task_struct *tracer,
+ 				  tracee_known->smk_known,
+ 				  0, rc, saip);
+ 
++		rcu_read_unlock();
+ 		return rc;
+ 	}
+ 
+ 	/* In case of rule==SMACK_PTRACE_DEFAULT or mode==PTRACE_MODE_READ */
+ 	rc = smk_tskacc(tsp, tracee_known, smk_ptrace_mode(mode), saip);
++
++	rcu_read_unlock();
+ 	return rc;
+ }
+ 
+@@ -396,7 +398,7 @@ static int smack_ptrace_access_check(struct task_struct *ctp, unsigned int mode)
+ 	if (rc != 0)
+ 		return rc;
+ 
+-	skp = smk_of_task(task_security(ctp));
++	skp = smk_of_task_struct(ctp);
+ 
+ 	rc = smk_ptrace_rule_check(current, skp, mode, __func__);
+ 	return rc;
+@@ -1826,7 +1828,7 @@ static int smk_curacc_on_task(struct task_struct *p, int access,
+ 				const char *caller)
+ {
+ 	struct smk_audit_info ad;
+-	struct smack_known *skp = smk_of_task(task_security(p));
++	struct smack_known *skp = smk_of_task_struct(p);
+ 	int rc;
+ 
+ 	smk_ad_init(&ad, caller, LSM_AUDIT_DATA_TASK);
+@@ -1879,7 +1881,7 @@ static int smack_task_getsid(struct task_struct *p)
+  */
+ static void smack_task_getsecid(struct task_struct *p, u32 *secid)
+ {
+-	struct smack_known *skp = smk_of_task(task_security(p));
++	struct smack_known *skp = smk_of_task_struct(p);
+ 
+ 	*secid = skp->smk_secid;
+ }
+@@ -1986,7 +1988,7 @@ static int smack_task_kill(struct task_struct *p, struct siginfo *info,
+ {
+ 	struct smk_audit_info ad;
+ 	struct smack_known *skp;
+-	struct smack_known *tkp = smk_of_task(task_security(p));
++	struct smack_known *tkp = smk_of_task_struct(p);
+ 	int rc;
+ 
+ 	smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_TASK);
+@@ -2040,7 +2042,7 @@ static int smack_task_wait(struct task_struct *p)
+ static void smack_task_to_inode(struct task_struct *p, struct inode *inode)
+ {
+ 	struct inode_smack *isp = inode->i_security;
+-	struct smack_known *skp = smk_of_task(task_security(p));
++	struct smack_known *skp = smk_of_task_struct(p);
+ 
+ 	isp->smk_inode = skp;
+ }
+@@ -3200,7 +3202,7 @@ unlockandout:
+  */
+ static int smack_getprocattr(struct task_struct *p, char *name, char **value)
+ {
+-	struct smack_known *skp = smk_of_task(task_security(p));
++	struct smack_known *skp = smk_of_task_struct(p);
+ 	char *cp;
+ 	int slen;
+ 
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 65f1f4e18ea5..0c993f7f9181 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -4844,6 +4844,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x18e6, "HP", ALC269_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x218b, "HP", ALC269_FIXUP_LIMIT_INT_MIC_BOOST_MUTE_LED),
+ 	/* ALC282 */
++	SND_PCI_QUIRK(0x103c, 0x21f9, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ 	SND_PCI_QUIRK(0x103c, 0x2210, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ 	SND_PCI_QUIRK(0x103c, 0x2214, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ 	SND_PCI_QUIRK(0x103c, 0x2236, "HP", ALC269_FIXUP_HP_LINE1_MIC1_LED),
+diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
+index 605d14003d25..6d36c5b78805 100644
+--- a/sound/pci/hda/patch_sigmatel.c
++++ b/sound/pci/hda/patch_sigmatel.c
+@@ -99,6 +99,7 @@ enum {
+ 	STAC_HP_ENVY_BASS,
+ 	STAC_HP_BNB13_EQ,
+ 	STAC_HP_ENVY_TS_BASS,
++	STAC_92HD83XXX_GPIO10_EAPD,
+ 	STAC_92HD83XXX_MODELS
+ };
+ 
+@@ -2141,6 +2142,19 @@ static void stac92hd83xxx_fixup_headset_jack(struct hda_codec *codec,
+ 		spec->headset_jack = 1;
+ }
+ 
++static void stac92hd83xxx_fixup_gpio10_eapd(struct hda_codec *codec,
++					    const struct hda_fixup *fix,
++					    int action)
++{
++	struct sigmatel_spec *spec = codec->spec;
++
++	if (action != HDA_FIXUP_ACT_PRE_PROBE)
++		return;
++	spec->eapd_mask = spec->gpio_mask = spec->gpio_dir =
++		spec->gpio_data = 0x10;
++	spec->eapd_switch = 0;
++}
++
+ static const struct hda_verb hp_bnb13_eq_verbs[] = {
+ 	/* 44.1KHz base */
+ 	{ 0x22, 0x7A6, 0x3E },
+@@ -2656,6 +2670,10 @@ static const struct hda_fixup stac92hd83xxx_fixups[] = {
+ 			{}
+ 		},
+ 	},
++	[STAC_92HD83XXX_GPIO10_EAPD] = {
++		.type = HDA_FIXUP_FUNC,
++		.v.func = stac92hd83xxx_fixup_gpio10_eapd,
++	},
+ };
+ 
+ static const struct hda_model_fixup stac92hd83xxx_models[] = {
+@@ -2861,6 +2879,8 @@ static const struct snd_pci_quirk stac92hd83xxx_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x148a,
+ 		      "HP Mini", STAC_92HD83XXX_HP_LED),
+ 	SND_PCI_QUIRK_VENDOR(PCI_VENDOR_ID_HP, "HP", STAC_92HD83XXX_HP),
++	SND_PCI_QUIRK(PCI_VENDOR_ID_TOSHIBA, 0xfa91,
++		      "Toshiba Satellite S50D", STAC_92HD83XXX_GPIO10_EAPD),
+ 	{} /* terminator */
+ };
+ 
+diff --git a/sound/pci/riptide/riptide.c b/sound/pci/riptide/riptide.c
+index 6abc2ac8fffb..e76857277695 100644
+--- a/sound/pci/riptide/riptide.c
++++ b/sound/pci/riptide/riptide.c
+@@ -2030,32 +2030,43 @@ snd_riptide_joystick_probe(struct pci_dev *pci, const struct pci_device_id *id)
+ {
+ 	static int dev;
+ 	struct gameport *gameport;
++	int ret;
+ 
+ 	if (dev >= SNDRV_CARDS)
+ 		return -ENODEV;
++
+ 	if (!enable[dev]) {
+-		dev++;
+-		return -ENOENT;
++		ret = -ENOENT;
++		goto inc_dev;
+ 	}
+ 
+-	if (!joystick_port[dev++])
+-		return 0;
++	if (!joystick_port[dev]) {
++		ret = 0;
++		goto inc_dev;
++	}
+ 
+ 	gameport = gameport_allocate_port();
+-	if (!gameport)
+-		return -ENOMEM;
++	if (!gameport) {
++		ret = -ENOMEM;
++		goto inc_dev;
++	}
+ 	if (!request_region(joystick_port[dev], 8, "Riptide gameport")) {
+ 		snd_printk(KERN_WARNING
+ 			   "Riptide: cannot grab gameport 0x%x\n",
+ 			   joystick_port[dev]);
+ 		gameport_free_port(gameport);
+-		return -EBUSY;
++		ret = -EBUSY;
++		goto inc_dev;
+ 	}
+ 
+ 	gameport->io = joystick_port[dev];
+ 	gameport_register_port(gameport);
+ 	pci_set_drvdata(pci, gameport);
+-	return 0;
++
++	ret = 0;
++inc_dev:
++	dev++;
++	return ret;
+ }
+ 
+ static void snd_riptide_joystick_remove(struct pci_dev *pci)
+diff --git a/sound/pci/rme9652/hdspm.c b/sound/pci/rme9652/hdspm.c
+index 3342705a5715..13bc2011d497 100644
+--- a/sound/pci/rme9652/hdspm.c
++++ b/sound/pci/rme9652/hdspm.c
+@@ -6086,6 +6086,9 @@ static int snd_hdspm_playback_open(struct snd_pcm_substream *substream)
+ 		snd_pcm_hw_constraint_minmax(runtime,
+ 					     SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
+ 					     64, 8192);
++		snd_pcm_hw_constraint_minmax(runtime,
++					     SNDRV_PCM_HW_PARAM_PERIODS,
++					     2, 2);
+ 		break;
+ 	}
+ 
+@@ -6160,6 +6163,9 @@ static int snd_hdspm_capture_open(struct snd_pcm_substream *substream)
+ 		snd_pcm_hw_constraint_minmax(runtime,
+ 					     SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
+ 					     64, 8192);
++		snd_pcm_hw_constraint_minmax(runtime,
++					     SNDRV_PCM_HW_PARAM_PERIODS,
++					     2, 2);
+ 		break;
+ 	}
+ 
+diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
+index 8349f982a586..ef2c70e77d91 100644
+--- a/sound/soc/codecs/Kconfig
++++ b/sound/soc/codecs/Kconfig
+@@ -525,7 +525,7 @@ config SND_SOC_RT5677
+ 
+ config SND_SOC_RT5677_SPI
+ 	tristate
+-	default SND_SOC_RT5677
++	default SND_SOC_RT5677 && SPI
+ 
+ #Freescale sgtl5000 codec
+ config SND_SOC_SGTL5000
+diff --git a/sound/soc/codecs/rt5670.c b/sound/soc/codecs/rt5670.c
+index 8a0833de1665..1e574c85b9c8 100644
+--- a/sound/soc/codecs/rt5670.c
++++ b/sound/soc/codecs/rt5670.c
+@@ -2522,6 +2522,7 @@ static struct snd_soc_codec_driver soc_codec_dev_rt5670 = {
+ static const struct regmap_config rt5670_regmap = {
+ 	.reg_bits = 8,
+ 	.val_bits = 16,
++	.use_single_rw = true,
+ 	.max_register = RT5670_VENDOR_ID2 + 1 + (ARRAY_SIZE(rt5670_ranges) *
+ 					       RT5670_PR_SPACING),
+ 	.volatile_reg = rt5670_volatile_register,
+diff --git a/sound/soc/codecs/rt5677.c b/sound/soc/codecs/rt5677.c
+index 918ada9738b0..cc9098830ed8 100644
+--- a/sound/soc/codecs/rt5677.c
++++ b/sound/soc/codecs/rt5677.c
+@@ -702,6 +702,9 @@ static int rt5677_set_dsp_vad(struct snd_soc_codec *codec, bool on)
+ 	static bool activity;
+ 	int ret;
+ 
++	if (!IS_ENABLED(CONFIG_SND_SOC_RT5677_SPI))
++		return -ENXIO;
++
+ 	if (on && !activity) {
+ 		activity = true;
+ 
+diff --git a/sound/soc/davinci/Kconfig b/sound/soc/davinci/Kconfig
+index 8e948c63f3d9..2b81ca418d2a 100644
+--- a/sound/soc/davinci/Kconfig
++++ b/sound/soc/davinci/Kconfig
+@@ -58,13 +58,12 @@ choice
+ 	depends on MACH_DAVINCI_DM365_EVM
+ 
+ config SND_DM365_AIC3X_CODEC
+-	bool "Audio Codec - AIC3101"
++	tristate "Audio Codec - AIC3101"
+ 	help
+ 	  Say Y if you want to add support for AIC3101 audio codec
+ 
+ config SND_DM365_VOICE_CODEC
+ 	tristate "Voice Codec - CQ93VC"
+-	depends on SND_DAVINCI_SOC
+ 	select MFD_DAVINCI_VOICECODEC
+ 	select SND_DAVINCI_SOC_VCIF
+ 	select SND_SOC_CQ0093VC
+diff --git a/sound/soc/intel/sst/sst.h b/sound/soc/intel/sst/sst.h
+index 7f4bbfcbc6f5..562bc483d6b7 100644
+--- a/sound/soc/intel/sst/sst.h
++++ b/sound/soc/intel/sst/sst.h
+@@ -58,6 +58,7 @@ enum sst_algo_ops {
+ #define SST_BLOCK_TIMEOUT	1000
+ 
+ #define FW_SIGNATURE_SIZE	4
++#define FW_NAME_SIZE		32
+ 
+ /* stream states */
+ enum sst_stream_states {
+@@ -426,7 +427,7 @@ struct intel_sst_drv {
+ 	 * Holder for firmware name. Due to async call it needs to be
+ 	 * persistent till worker thread gets called
+ 	 */
+-	char firmware_name[20];
++	char firmware_name[FW_NAME_SIZE];
+ };
+ 
+ /* misc definitions */
+diff --git a/sound/soc/intel/sst/sst_acpi.c b/sound/soc/intel/sst/sst_acpi.c
+index b3360139c41a..51f83bad5319 100644
+--- a/sound/soc/intel/sst/sst_acpi.c
++++ b/sound/soc/intel/sst/sst_acpi.c
+@@ -47,7 +47,7 @@ struct sst_machines {
+ 	char board[32];
+ 	char machine[32];
+ 	void (*machine_quirk)(void);
+-	char firmware[32];
++	char firmware[FW_NAME_SIZE];
+ 	struct sst_platform_info *pdata;
+ 
+ };
+diff --git a/sound/soc/pxa/mioa701_wm9713.c b/sound/soc/pxa/mioa701_wm9713.c
+index 396dbd51a64f..a9615a574546 100644
+--- a/sound/soc/pxa/mioa701_wm9713.c
++++ b/sound/soc/pxa/mioa701_wm9713.c
+@@ -81,7 +81,7 @@ static int rear_amp_power(struct snd_soc_codec *codec, int power)
+ static int rear_amp_event(struct snd_soc_dapm_widget *widget,
+ 			  struct snd_kcontrol *kctl, int event)
+ {
+-	struct snd_soc_codec *codec = widget->codec;
++	struct snd_soc_codec *codec = widget->dapm->card->rtd[0].codec;
+ 
+ 	return rear_amp_power(codec, SND_SOC_DAPM_EVENT_ON(event));
+ }
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index a7398412310b..a0795ba46ac5 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -1122,6 +1122,7 @@ int snd_usb_select_mode_quirk(struct snd_usb_substream *subs,
+ 	int err;
+ 
+ 	switch (subs->stream->chip->usb_id) {
++	case USB_ID(0x154e, 0x1003): /* Denon DA-300USB */
+ 	case USB_ID(0x154e, 0x3005): /* Marantz HD-DAC1 */
+ 	case USB_ID(0x154e, 0x3006): /* Marantz SA-14S1 */
+ 
+@@ -1201,6 +1202,7 @@ void snd_usb_ctl_msg_quirk(struct usb_device *dev, unsigned int pipe,
+ 	    (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS) {
+ 
+ 		switch (le16_to_cpu(dev->descriptor.idProduct)) {
++		case 0x1003: /* Denon DA300-USB */
+ 		case 0x3005: /* Marantz HD-DAC1 */
+ 		case 0x3006: /* Marantz SA-14S1 */
+ 			mdelay(20);
+@@ -1262,6 +1264,7 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
+ 
+ 	/* Denon/Marantz devices with USB DAC functionality */
+ 	switch (chip->usb_id) {
++	case USB_ID(0x154e, 0x1003): /* Denon DA300-USB */
+ 	case USB_ID(0x154e, 0x3005): /* Marantz HD-DAC1 */
+ 	case USB_ID(0x154e, 0x3006): /* Marantz SA-14S1 */
+ 		if (fp->altsetting == 2)
+diff --git a/tools/perf/util/cloexec.c b/tools/perf/util/cloexec.c
+index 47b78b3f0325..6da965bdbc2c 100644
+--- a/tools/perf/util/cloexec.c
++++ b/tools/perf/util/cloexec.c
+@@ -25,6 +25,10 @@ static int perf_flag_probe(void)
+ 	if (cpu < 0)
+ 		cpu = 0;
+ 
++	/*
++	 * Using -1 for the pid is a workaround to avoid gratuitous jump label
++	 * changes.
++	 */
+ 	while (1) {
+ 		/* check cloexec flag */
+ 		fd = sys_perf_event_open(&attr, pid, cpu, -1,
+@@ -47,16 +51,24 @@ static int perf_flag_probe(void)
+ 		  err, strerror_r(err, sbuf, sizeof(sbuf)));
+ 
+ 	/* not supported, confirm error related to PERF_FLAG_FD_CLOEXEC */
+-	fd = sys_perf_event_open(&attr, pid, cpu, -1, 0);
++	while (1) {
++		fd = sys_perf_event_open(&attr, pid, cpu, -1, 0);
++		if (fd < 0 && pid == -1 && errno == EACCES) {
++			pid = 0;
++			continue;
++		}
++		break;
++	}
+ 	err = errno;
+ 
++	if (fd >= 0)
++		close(fd);
++
+ 	if (WARN_ONCE(fd < 0 && err != EBUSY,
+ 		      "perf_event_open(..., 0) failed unexpectedly with error %d (%s)\n",
+ 		      err, strerror_r(err, sbuf, sizeof(sbuf))))
+ 		return -1;
+ 
+-	close(fd);
+-
+ 	return 0;
+ }
+ 


^ permalink raw reply related	[flat|nested] 19+ messages in thread

* [gentoo-commits] proj/linux-patches:3.19 commit in: /
@ 2015-03-15 18:05 Mike Pagano
  0 siblings, 0 replies; 19+ messages in thread
From: Mike Pagano @ 2015-03-15 18:05 UTC (permalink / raw
  To: gentoo-commits

commit:     4fe9a0402cb9c9366ec6570ebc12569ac414ca6a
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sun Mar 15 18:05:20 2015 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sun Mar 15 18:05:20 2015 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=4fe9a040

Patch to add an option to the Gentoo menu that enables CGROUPS for cgroup, IPC_NS for ipc-sandbox, and NET_NS for network-sandbox.

 4567_distro-Gentoo-Kconfig.patch | 38 ++++++++++++++++++++++++++++++--------
 1 file changed, 30 insertions(+), 8 deletions(-)

diff --git a/4567_distro-Gentoo-Kconfig.patch b/4567_distro-Gentoo-Kconfig.patch
index 690454a..c7af596 100644
--- a/4567_distro-Gentoo-Kconfig.patch
+++ b/4567_distro-Gentoo-Kconfig.patch
@@ -1,5 +1,5 @@
---- a/Kconfig	2014-04-02 09:45:05.389224541 -0400
-+++ b/Kconfig	2014-04-02 09:45:39.269224273 -0400
+--- a/Kconfig
++++ b/Kconfig
 @@ -8,4 +8,6 @@ config SRCARCH
  	string
  	option env="SRCARCH"
@@ -7,9 +7,9 @@
 +source "distro/Kconfig"
 +
  source "arch/$SRCARCH/Kconfig"
---- a/distro/Kconfig	1969-12-31 19:00:00.000000000 -0500
-+++ b/distro/Kconfig	2015-01-02 13:54:45.589830665 -0500
-@@ -0,0 +1,109 @@
+--- /dev/null
++++ b/distro/Kconfig
+@@ -0,0 +1,131 @@
 +menu "Gentoo Linux"
 +
 +config GENTOO_LINUX
@@ -30,7 +30,7 @@
 +
 +	depends on GENTOO_LINUX
 +	default y if GENTOO_LINUX
-+	
++
 +	select DEVTMPFS
 +	select TMPFS
 +
@@ -51,7 +51,29 @@
 +		boot process; if not available, it causes sysfs and udev to malfunction.
 +
 +		To ensure Gentoo Linux boots, it is best to leave this setting enabled;
-+		if you run a custom setup, you could consider whether to disable this. 
++		if you run a custom setup, you could consider whether to disable this.
++
++config GENTOO_LINUX_PORTAGE
++	bool "Select options required by Portage features"
++
++	depends on GENTOO_LINUX
++	default y if GENTOO_LINUX
++
++	select CGROUPS
++	select NAMESPACES
++	select IPC_NS
++	select NET_NS
++
++	help
++		This enables options required by various Portage FEATURES.
++		Currently this selects:
++
++		CGROUPS     (required for FEATURES=cgroup)
++		IPC_NS      (required for FEATURES=ipc-sandbox)
++		NET_NS      (required for FEATURES=network-sandbox)
++
++		It is highly recommended that you leave this enabled as these FEATURES
++		are, or will soon be, enabled by default.
 +
 +menu "Support for init systems, system and service managers"
 +	visible if GENTOO_LINUX
@@ -93,7 +115,7 @@
 +	select FHANDLE
 +	select INOTIFY_USER
 +	select NET
-+	select NET_NS 
++	select NET_NS
 +	select PROC_FS
 +	select SIGNALFD
 +	select SYSFS


^ permalink raw reply related	[flat|nested] 19+ messages in thread

* [gentoo-commits] proj/linux-patches:3.19 commit in: /
@ 2015-03-18 23:26 Mike Pagano
  0 siblings, 0 replies; 19+ messages in thread
From: Mike Pagano @ 2015-03-18 23:26 UTC (permalink / raw
  To: gentoo-commits

commit:     133d401378131c5ed1f2523156d4c95b757f1722
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Mar 18 23:26:42 2015 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Mar 18 23:26:42 2015 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=133d4013

Linux patch 3.19.2

 0000_README             |    4 +
 1001_linux-3.19.2.patch | 6977 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 6981 insertions(+)

diff --git a/0000_README b/0000_README
index 87dcbc3..48e21ed 100644
--- a/0000_README
+++ b/0000_README
@@ -47,6 +47,10 @@ Patch:  1000_linux-3.19.1.patch
 From:   http://www.kernel.org
 Desc:   Linux 3.19.1
 
+Patch:  1001_linux-3.19.2.patch
+From:   http://www.kernel.org
+Desc:   Linux 3.19.2
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1001_linux-3.19.2.patch b/1001_linux-3.19.2.patch
new file mode 100644
index 0000000..f2ae059
--- /dev/null
+++ b/1001_linux-3.19.2.patch
@@ -0,0 +1,6977 @@
+diff --git a/Documentation/stable_kernel_rules.txt b/Documentation/stable_kernel_rules.txt
+index aee73e78c7d4..02f8331edb8b 100644
+--- a/Documentation/stable_kernel_rules.txt
++++ b/Documentation/stable_kernel_rules.txt
+@@ -32,18 +32,42 @@ Procedure for submitting patches to the -stable tree:
+  - If the patch covers files in net/ or drivers/net please follow netdev stable
+    submission guidelines as described in
+    Documentation/networking/netdev-FAQ.txt
+- - Send the patch, after verifying that it follows the above rules, to
+-   stable@vger.kernel.org.  You must note the upstream commit ID in the
+-   changelog of your submission, as well as the kernel version you wish
+-   it to be applied to.
+- - To have the patch automatically included in the stable tree, add the tag
++ - Security patches should not be handled (solely) by the -stable review
++   process but should follow the procedures in Documentation/SecurityBugs.
++
++For all other submissions, choose one of the following procedures:
++
++   --- Option 1 ---
++
++   To have the patch automatically included in the stable tree, add the tag
+      Cc: stable@vger.kernel.org
+    in the sign-off area. Once the patch is merged it will be applied to
+    the stable tree without anything else needing to be done by the author
+    or subsystem maintainer.
+- - If the patch requires other patches as prerequisites which can be
+-   cherry-picked, then this can be specified in the following format in
+-   the sign-off area:
++
++   --- Option 2 ---
++
++   After the patch has been merged to Linus' tree, send an email to
++   stable@vger.kernel.org containing the subject of the patch, the commit ID,
++   why you think it should be applied, and what kernel version you wish it to
++   be applied to.
++
++   --- Option 3 ---
++
++   Send the patch, after verifying that it follows the above rules, to
++   stable@vger.kernel.org.  You must note the upstream commit ID in the
++   changelog of your submission, as well as the kernel version you wish
++   it to be applied to.
++
++Option 1 is probably the easiest and most common. Options 2 and 3 are more
++useful if the patch isn't deemed worthy at the time it is applied to a public
++git tree (for instance, because it deserves more regression testing first).
++Option 3 is especially useful if the patch needs some special handling to apply
++to an older kernel (e.g., if API's have changed in the meantime).
++
++Additionally, some patches submitted via Option 1 may have additional patch
++prerequisites which can be cherry-picked. This can be specified in the following
++format in the sign-off area:
+ 
+      Cc: <stable@vger.kernel.org> # 3.3.x: a1f84a3: sched: Check for idle
+      Cc: <stable@vger.kernel.org> # 3.3.x: 1b9508f: sched: Rate-limit newidle
+@@ -57,13 +81,13 @@ Procedure for submitting patches to the -stable tree:
+      git cherry-pick fd21073
+      git cherry-pick <this commit>
+ 
++Following the submission:
++
+  - The sender will receive an ACK when the patch has been accepted into the
+    queue, or a NAK if the patch is rejected.  This response might take a few
+    days, according to the developer's schedules.
+  - If accepted, the patch will be added to the -stable queue, for review by
+    other developers and by the relevant subsystem maintainer.
+- - Security patches should not be sent to this alias, but instead to the
+-   documented security@kernel.org address.
+ 
+ 
+ Review cycle:
+diff --git a/Makefile b/Makefile
+index 688777b17869..e49665a2b5ac 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 19
+-SUBLEVEL = 1
++SUBLEVEL = 2
+ EXTRAVERSION =
+ NAME = Diseased Newt
+ 
+diff --git a/arch/arc/include/asm/processor.h b/arch/arc/include/asm/processor.h
+index 210fe97464c3..c750af161979 100644
+--- a/arch/arc/include/asm/processor.h
++++ b/arch/arc/include/asm/processor.h
+@@ -75,18 +75,19 @@ unsigned long thread_saved_pc(struct task_struct *t);
+ #define release_segments(mm)        do { } while (0)
+ 
+ #define KSTK_EIP(tsk)   (task_pt_regs(tsk)->ret)
++#define KSTK_ESP(tsk)   (task_pt_regs(tsk)->sp)
+ 
+ /*
+  * Where abouts of Task's sp, fp, blink when it was last seen in kernel mode.
+  * Look in process.c for details of kernel stack layout
+  */
+-#define KSTK_ESP(tsk)   (tsk->thread.ksp)
++#define TSK_K_ESP(tsk)		(tsk->thread.ksp)
+ 
+-#define KSTK_REG(tsk, off)	(*((unsigned int *)(KSTK_ESP(tsk) + \
++#define TSK_K_REG(tsk, off)	(*((unsigned int *)(TSK_K_ESP(tsk) + \
+ 					sizeof(struct callee_regs) + off)))
+ 
+-#define KSTK_BLINK(tsk) KSTK_REG(tsk, 4)
+-#define KSTK_FP(tsk)    KSTK_REG(tsk, 0)
++#define TSK_K_BLINK(tsk)	TSK_K_REG(tsk, 4)
++#define TSK_K_FP(tsk)		TSK_K_REG(tsk, 0)
+ 
+ extern void start_thread(struct pt_regs * regs, unsigned long pc,
+ 			 unsigned long usp);
+diff --git a/arch/arc/kernel/stacktrace.c b/arch/arc/kernel/stacktrace.c
+index 9ce47cfe2303..fb98769b6a98 100644
+--- a/arch/arc/kernel/stacktrace.c
++++ b/arch/arc/kernel/stacktrace.c
+@@ -64,9 +64,9 @@ static void seed_unwind_frame_info(struct task_struct *tsk,
+ 
+ 		frame_info->task = tsk;
+ 
+-		frame_info->regs.r27 = KSTK_FP(tsk);
+-		frame_info->regs.r28 = KSTK_ESP(tsk);
+-		frame_info->regs.r31 = KSTK_BLINK(tsk);
++		frame_info->regs.r27 = TSK_K_FP(tsk);
++		frame_info->regs.r28 = TSK_K_ESP(tsk);
++		frame_info->regs.r31 = TSK_K_BLINK(tsk);
+ 		frame_info->regs.r63 = (unsigned int)__switch_to;
+ 
+ 		/* In the prologue of __switch_to, first FP is saved on stack
+diff --git a/arch/mips/kvm/trace.h b/arch/mips/kvm/trace.h
+index c1388d40663b..bd6437f67dc0 100644
+--- a/arch/mips/kvm/trace.h
++++ b/arch/mips/kvm/trace.h
+@@ -24,18 +24,18 @@ TRACE_EVENT(kvm_exit,
+ 	    TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason),
+ 	    TP_ARGS(vcpu, reason),
+ 	    TP_STRUCT__entry(
+-			__field(struct kvm_vcpu *, vcpu)
++			__field(unsigned long, pc)
+ 			__field(unsigned int, reason)
+ 	    ),
+ 
+ 	    TP_fast_assign(
+-			__entry->vcpu = vcpu;
++			__entry->pc = vcpu->arch.pc;
+ 			__entry->reason = reason;
+ 	    ),
+ 
+ 	    TP_printk("[%s]PC: 0x%08lx",
+ 		      kvm_mips_exit_types_str[__entry->reason],
+-		      __entry->vcpu->arch.pc)
++		      __entry->pc)
+ );
+ 
+ #endif /* _TRACE_KVM_H */
+diff --git a/arch/powerpc/include/asm/pnv-pci.h b/arch/powerpc/include/asm/pnv-pci.h
+index f09a22fa1bd7..bfa8f8ac51fa 100644
+--- a/arch/powerpc/include/asm/pnv-pci.h
++++ b/arch/powerpc/include/asm/pnv-pci.h
+@@ -19,7 +19,7 @@ int pnv_cxl_ioda_msi_setup(struct pci_dev *dev, unsigned int hwirq,
+ int pnv_cxl_alloc_hwirqs(struct pci_dev *dev, int num);
+ void pnv_cxl_release_hwirqs(struct pci_dev *dev, int hwirq, int num);
+ int pnv_cxl_get_irq_count(struct pci_dev *dev);
+-struct device_node *pnv_pci_to_phb_node(struct pci_dev *dev);
++struct device_node *pnv_pci_get_phb_node(struct pci_dev *dev);
+ 
+ #ifdef CONFIG_CXL_BASE
+ int pnv_cxl_alloc_hwirq_ranges(struct cxl_irq_ranges *irqs,
+diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
+index fac88ed8a915..6a9a255d8058 100644
+--- a/arch/powerpc/platforms/powernv/pci-ioda.c
++++ b/arch/powerpc/platforms/powernv/pci-ioda.c
+@@ -1460,13 +1460,13 @@ static void set_msi_irq_chip(struct pnv_phb *phb, unsigned int virq)
+ 
+ #ifdef CONFIG_CXL_BASE
+ 
+-struct device_node *pnv_pci_to_phb_node(struct pci_dev *dev)
++struct device_node *pnv_pci_get_phb_node(struct pci_dev *dev)
+ {
+ 	struct pci_controller *hose = pci_bus_to_host(dev->bus);
+ 
+-	return hose->dn;
++	return of_node_get(hose->dn);
+ }
+-EXPORT_SYMBOL(pnv_pci_to_phb_node);
++EXPORT_SYMBOL(pnv_pci_get_phb_node);
+ 
+ int pnv_phb_to_cxl(struct pci_dev *dev)
+ {
+diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
+index 5fa9770035dc..c9a6d68b8d62 100644
+--- a/arch/x86/include/asm/xsave.h
++++ b/arch/x86/include/asm/xsave.h
+@@ -82,18 +82,15 @@ static inline int xsave_state_booting(struct xsave_struct *fx, u64 mask)
+ 	if (boot_cpu_has(X86_FEATURE_XSAVES))
+ 		asm volatile("1:"XSAVES"\n\t"
+ 			"2:\n\t"
+-			: : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
++			     xstate_fault
++			: "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
+ 			:   "memory");
+ 	else
+ 		asm volatile("1:"XSAVE"\n\t"
+ 			"2:\n\t"
+-			: : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
++			     xstate_fault
++			: "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
+ 			:   "memory");
+-
+-	asm volatile(xstate_fault
+-		     : "0" (0)
+-		     : "memory");
+-
+ 	return err;
+ }
+ 
+@@ -112,18 +109,15 @@ static inline int xrstor_state_booting(struct xsave_struct *fx, u64 mask)
+ 	if (boot_cpu_has(X86_FEATURE_XSAVES))
+ 		asm volatile("1:"XRSTORS"\n\t"
+ 			"2:\n\t"
+-			: : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
++			     xstate_fault
++			: "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
+ 			:   "memory");
+ 	else
+ 		asm volatile("1:"XRSTOR"\n\t"
+ 			"2:\n\t"
+-			: : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
++			     xstate_fault
++			: "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
+ 			:   "memory");
+-
+-	asm volatile(xstate_fault
+-		     : "0" (0)
+-		     : "memory");
+-
+ 	return err;
+ }
+ 
+@@ -149,9 +143,9 @@ static inline int xsave_state(struct xsave_struct *fx, u64 mask)
+ 	 */
+ 	alternative_input_2(
+ 		"1:"XSAVE,
+-		"1:"XSAVEOPT,
++		XSAVEOPT,
+ 		X86_FEATURE_XSAVEOPT,
+-		"1:"XSAVES,
++		XSAVES,
+ 		X86_FEATURE_XSAVES,
+ 		[fx] "D" (fx), "a" (lmask), "d" (hmask) :
+ 		"memory");
+@@ -178,7 +172,7 @@ static inline int xrstor_state(struct xsave_struct *fx, u64 mask)
+ 	 */
+ 	alternative_input(
+ 		"1: " XRSTOR,
+-		"1: " XRSTORS,
++		XRSTORS,
+ 		X86_FEATURE_XSAVES,
+ 		"D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
+ 		: "memory");
+diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
+index 9ebaf63ba182..4ee9a2315f80 100644
+--- a/arch/x86/kernel/entry_64.S
++++ b/arch/x86/kernel/entry_64.S
+@@ -334,11 +334,14 @@ ENTRY(ret_from_fork)
+ 	testl $3, CS-ARGOFFSET(%rsp)		# from kernel_thread?
+ 	jz   1f
+ 
+-	testl $_TIF_IA32, TI_flags(%rcx)	# 32-bit compat task needs IRET
+-	jnz  int_ret_from_sys_call
+-
+-	RESTORE_TOP_OF_STACK %rdi, -ARGOFFSET
+-	jmp ret_from_sys_call			# go to the SYSRET fastpath
++	/*
++	 * By the time we get here, we have no idea whether our pt_regs,
++	 * ti flags, and ti status came from the 64-bit SYSCALL fast path,
++	 * the slow path, or one of the ia32entry paths.
++	 * Use int_ret_from_sys_call to return, since it can safely handle
++	 * all of the above.
++	 */
++	jmp  int_ret_from_sys_call
+ 
+ 1:
+ 	subq $REST_SKIP, %rsp	# leave space for volatiles
+diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
+index de12c1d379f1..b24c2d84dc20 100644
+--- a/arch/x86/kvm/emulate.c
++++ b/arch/x86/kvm/emulate.c
+@@ -4863,7 +4863,8 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
+ 		if (rc != X86EMUL_CONTINUE)
+ 			goto done;
+ 	}
+-	ctxt->dst.orig_val = ctxt->dst.val;
++	/* Copy full 64-bit value for CMPXCHG8B.  */
++	ctxt->dst.orig_val64 = ctxt->dst.val64;
+ 
+ special_insn:
+ 
+diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
+index 7d5880ded78a..e7d257aef9fc 100644
+--- a/drivers/acpi/acpi_lpss.c
++++ b/drivers/acpi/acpi_lpss.c
+@@ -65,6 +65,7 @@ struct lpss_private_data;
+ 
+ struct lpss_device_desc {
+ 	unsigned int flags;
++	const char *clk_con_id;
+ 	unsigned int prv_offset;
+ 	size_t prv_size_override;
+ 	void (*setup)(struct lpss_private_data *pdata);
+@@ -140,6 +141,7 @@ static struct lpss_device_desc lpt_i2c_dev_desc = {
+ 
+ static struct lpss_device_desc lpt_uart_dev_desc = {
+ 	.flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_LTR,
++	.clk_con_id = "baudclk",
+ 	.prv_offset = 0x800,
+ 	.setup = lpss_uart_setup,
+ };
+@@ -156,6 +158,7 @@ static struct lpss_device_desc byt_pwm_dev_desc = {
+ 
+ static struct lpss_device_desc byt_uart_dev_desc = {
+ 	.flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX,
++	.clk_con_id = "baudclk",
+ 	.prv_offset = 0x800,
+ 	.setup = lpss_uart_setup,
+ };
+@@ -313,7 +316,7 @@ out:
+ 		return PTR_ERR(clk);
+ 
+ 	pdata->clk = clk;
+-	clk_register_clkdev(clk, NULL, devname);
++	clk_register_clkdev(clk, dev_desc->clk_con_id, devname);
+ 	return 0;
+ }
+ 
+diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
+index 032db459370f..3647ce71154c 100644
+--- a/drivers/acpi/video.c
++++ b/drivers/acpi/video.c
+@@ -2149,6 +2149,17 @@ EXPORT_SYMBOL(acpi_video_unregister_backlight);
+ 
+ static int __init acpi_video_init(void)
+ {
++	/*
++	 * Let the module load even if ACPI is disabled (e.g. due to
++	 * a broken BIOS) so that i915.ko can still be loaded on such
++	 * old systems without an AcpiOpRegion.
++	 *
++	 * acpi_video_register() will report -ENODEV later as well due
++	 * to acpi_disabled when i915.ko tries to register itself afterwards.
++	 */
++	if (acpi_disabled)
++		return 0;
++
+ 	dmi_check_system(video_dmi_table);
+ 
+ 	if (intel_opregion_present())
+diff --git a/drivers/clk/clk-gate.c b/drivers/clk/clk-gate.c
+index 51fd87fb7ba6..da00eeb95dad 100644
+--- a/drivers/clk/clk-gate.c
++++ b/drivers/clk/clk-gate.c
+@@ -128,7 +128,7 @@ struct clk *clk_register_gate(struct device *dev, const char *name,
+ 	struct clk_init_data init;
+ 
+ 	if (clk_gate_flags & CLK_GATE_HIWORD_MASK) {
+-		if (bit_idx > 16) {
++		if (bit_idx > 15) {
+ 			pr_err("gate bit exceeds LOWORD field\n");
+ 			return ERR_PTR(-EINVAL);
+ 		}
+diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
+index d48ac71c6c8b..2f14f57507fb 100644
+--- a/drivers/clk/clk.c
++++ b/drivers/clk/clk.c
+@@ -343,13 +343,9 @@ unlock:
+ static void clk_debug_unregister(struct clk *clk)
+ {
+ 	mutex_lock(&clk_debug_lock);
+-	if (!clk->dentry)
+-		goto out;
+-
+ 	hlist_del_init(&clk->debug_node);
+ 	debugfs_remove_recursive(clk->dentry);
+ 	clk->dentry = NULL;
+-out:
+ 	mutex_unlock(&clk_debug_lock);
+ }
+ 
+diff --git a/drivers/clk/sunxi/clk-sunxi.c b/drivers/clk/sunxi/clk-sunxi.c
+index 570202582dcf..80f5693be6ee 100644
+--- a/drivers/clk/sunxi/clk-sunxi.c
++++ b/drivers/clk/sunxi/clk-sunxi.c
+@@ -413,6 +413,7 @@ static struct clk_factors_config sun6i_a31_pll1_config = {
+ 	.kwidth = 2,
+ 	.mshift = 0,
+ 	.mwidth = 2,
++	.n_start = 1,
+ };
+ 
+ static struct clk_factors_config sun8i_a23_pll1_config = {
+diff --git a/drivers/clk/zynq/clkc.c b/drivers/clk/zynq/clkc.c
+index 9037bebd69f7..f870aad57711 100644
+--- a/drivers/clk/zynq/clkc.c
++++ b/drivers/clk/zynq/clkc.c
+@@ -303,6 +303,7 @@ static void __init zynq_clk_setup(struct device_node *np)
+ 	clks[cpu_2x] = clk_register_gate(NULL, clk_output_name[cpu_2x],
+ 			"cpu_2x_div", CLK_IGNORE_UNUSED, SLCR_ARM_CLK_CTRL,
+ 			26, 0, &armclk_lock);
++	clk_prepare_enable(clks[cpu_2x]);
+ 
+ 	clk = clk_register_fixed_factor(NULL, "cpu_1x_div", "cpu_div", 0, 1,
+ 			4 + 2 * tmp);
+diff --git a/drivers/coresight/coresight-etm3x.c b/drivers/coresight/coresight-etm3x.c
+index d9e3ed6aa857..369cac00afaa 100644
+--- a/drivers/coresight/coresight-etm3x.c
++++ b/drivers/coresight/coresight-etm3x.c
+@@ -573,7 +573,8 @@ static ssize_t mode_store(struct device *dev,
+ 	if (drvdata->mode & ETM_MODE_STALL) {
+ 		if (!(drvdata->etmccr & ETMCCR_FIFOFULL)) {
+ 			dev_warn(drvdata->dev, "stall mode not supported\n");
+-			return -EINVAL;
++			ret = -EINVAL;
++			goto err_unlock;
+ 		}
+ 		drvdata->ctrl |= ETMCR_STALL_MODE;
+ 	 } else
+@@ -582,7 +583,8 @@ static ssize_t mode_store(struct device *dev,
+ 	if (drvdata->mode & ETM_MODE_TIMESTAMP) {
+ 		if (!(drvdata->etmccer & ETMCCER_TIMESTAMP)) {
+ 			dev_warn(drvdata->dev, "timestamp not supported\n");
+-			return -EINVAL;
++			ret = -EINVAL;
++			goto err_unlock;
+ 		}
+ 		drvdata->ctrl |= ETMCR_TIMESTAMP_EN;
+ 	} else
+@@ -595,6 +597,10 @@ static ssize_t mode_store(struct device *dev,
+ 	spin_unlock(&drvdata->spinlock);
+ 
+ 	return size;
++
++err_unlock:
++	spin_unlock(&drvdata->spinlock);
++	return ret;
+ }
+ static DEVICE_ATTR_RW(mode);
+ 
+diff --git a/drivers/dma-buf/reservation.c b/drivers/dma-buf/reservation.c
+index 3c97c8fa8d02..8a37af923094 100644
+--- a/drivers/dma-buf/reservation.c
++++ b/drivers/dma-buf/reservation.c
+@@ -402,8 +402,6 @@ reservation_object_test_signaled_single(struct fence *passed_fence)
+ 	int ret = 1;
+ 
+ 	if (!test_bit(FENCE_FLAG_SIGNALED_BIT, &lfence->flags)) {
+-		int ret;
+-
+ 		fence = fence_get_rcu(lfence);
+ 		if (!fence)
+ 			return -1;
+diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
+index c5f7b4e9eb6c..69fac068669f 100644
+--- a/drivers/firmware/dmi_scan.c
++++ b/drivers/firmware/dmi_scan.c
+@@ -78,7 +78,7 @@ static const char * __init dmi_string(const struct dmi_header *dm, u8 s)
+  *	We have to be cautious here. We have seen BIOSes with DMI pointers
+  *	pointing to completely the wrong place for example
+  */
+-static void dmi_table(u8 *buf, int len, int num,
++static void dmi_table(u8 *buf, u32 len, int num,
+ 		      void (*decode)(const struct dmi_header *, void *),
+ 		      void *private_data)
+ {
+@@ -93,12 +93,6 @@ static void dmi_table(u8 *buf, int len, int num,
+ 		const struct dmi_header *dm = (const struct dmi_header *)data;
+ 
+ 		/*
+-		 * 7.45 End-of-Table (Type 127) [SMBIOS reference spec v3.0.0]
+-		 */
+-		if (dm->type == DMI_ENTRY_END_OF_TABLE)
+-			break;
+-
+-		/*
+ 		 *  We want to know the total length (formatted area and
+ 		 *  strings) before decoding to make sure we won't run off the
+ 		 *  table in dmi_decode or dmi_string
+@@ -108,13 +102,20 @@ static void dmi_table(u8 *buf, int len, int num,
+ 			data++;
+ 		if (data - buf < len - 1)
+ 			decode(dm, private_data);
++
++		/*
++		 * 7.45 End-of-Table (Type 127) [SMBIOS reference spec v3.0.0]
++		 */
++		if (dm->type == DMI_ENTRY_END_OF_TABLE)
++			break;
++
+ 		data += 2;
+ 		i++;
+ 	}
+ }
+ 
+ static phys_addr_t dmi_base;
+-static u16 dmi_len;
++static u32 dmi_len;
+ static u16 dmi_num;
+ 
+ static int __init dmi_walk_early(void (*decode)(const struct dmi_header *,
+diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c
+index a920fec8fe88..5186eb01945a 100644
+--- a/drivers/firmware/efi/libstub/efi-stub-helper.c
++++ b/drivers/firmware/efi/libstub/efi-stub-helper.c
+@@ -170,12 +170,12 @@ again:
+ 		start = desc->phys_addr;
+ 		end = start + desc->num_pages * (1UL << EFI_PAGE_SHIFT);
+ 
+-		if ((start + size) > end || (start + size) > max)
+-			continue;
+-
+-		if (end - size > max)
++		if (end > max)
+ 			end = max;
+ 
++		if ((start + size) > end)
++			continue;
++
+ 		if (round_down(end - size, align) < start)
+ 			continue;
+ 
+diff --git a/drivers/firmware/efi/runtime-map.c b/drivers/firmware/efi/runtime-map.c
+index 018c29a26615..87b8e3b900d2 100644
+--- a/drivers/firmware/efi/runtime-map.c
++++ b/drivers/firmware/efi/runtime-map.c
+@@ -191,7 +191,7 @@ int __init efi_runtime_map_init(struct kobject *efi_kobj)
+ 
+ 	return 0;
+ out_add_entry:
+-	for (j = i - 1; j > 0; j--) {
++	for (j = i - 1; j >= 0; j--) {
+ 		entry = *(map_entries + j);
+ 		kobject_put(&entry->kobj);
+ 	}
+diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
+index 9d7a7155bf02..0936b0f94826 100644
+--- a/drivers/gpu/drm/i915/i915_drv.h
++++ b/drivers/gpu/drm/i915/i915_drv.h
+@@ -2160,6 +2160,7 @@ struct drm_i915_cmd_table {
+ 				 (INTEL_DEVID(dev) & 0xFF00) == 0x0C00)
+ #define IS_BDW_ULT(dev)		(IS_BROADWELL(dev) && \
+ 				 ((INTEL_DEVID(dev) & 0xf) == 0x6 ||	\
++				 (INTEL_DEVID(dev) & 0xf) == 0xb ||	\
+ 				 (INTEL_DEVID(dev) & 0xf) == 0xe))
+ #define IS_BDW_GT3(dev)		(IS_BROADWELL(dev) && \
+ 				 (INTEL_DEVID(dev) & 0x00F0) == 0x0020)
+diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
+index 4727a4e2c87c..ffe9072a1a5f 100644
+--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
++++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
+@@ -335,9 +335,10 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
+ 		return -EINVAL;
+ 	}
+ 
++	mutex_lock(&dev->struct_mutex);
+ 	if (i915_gem_obj_is_pinned(obj) || obj->framebuffer_references) {
+-		drm_gem_object_unreference_unlocked(&obj->base);
+-		return -EBUSY;
++		ret = -EBUSY;
++		goto err;
+ 	}
+ 
+ 	if (args->tiling_mode == I915_TILING_NONE) {
+@@ -369,7 +370,6 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
+ 		}
+ 	}
+ 
+-	mutex_lock(&dev->struct_mutex);
+ 	if (args->tiling_mode != obj->tiling_mode ||
+ 	    args->stride != obj->stride) {
+ 		/* We need to rebind the object if its current allocation
+@@ -424,6 +424,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
+ 		obj->bit_17 = NULL;
+ 	}
+ 
++err:
+ 	drm_gem_object_unreference(&obj->base);
+ 	mutex_unlock(&dev->struct_mutex);
+ 
+diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
+index d182058383a9..1719078c763a 100644
+--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
++++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
+@@ -113,7 +113,10 @@ restart:
+ 			continue;
+ 
+ 		obj = mo->obj;
+-		drm_gem_object_reference(&obj->base);
++
++		if (!kref_get_unless_zero(&obj->base.refcount))
++			continue;
++
+ 		spin_unlock(&mn->lock);
+ 
+ 		cancel_userptr(obj);
+@@ -149,7 +152,20 @@ static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
+ 			it = interval_tree_iter_first(&mn->objects, start, end);
+ 		if (it != NULL) {
+ 			obj = container_of(it, struct i915_mmu_object, it)->obj;
+-			drm_gem_object_reference(&obj->base);
++
++			/* The mmu_object is released late when destroying the
++			 * GEM object so it is entirely possible to gain a
++			 * reference on an object in the process of being freed
++			 * since our serialisation is via the spinlock and not
++			 * the struct_mutex - and consequently use it after it
++			 * is freed and then double free it.
++			 */
++			if (!kref_get_unless_zero(&obj->base.refcount)) {
++				spin_unlock(&mn->lock);
++				serial = 0;
++				continue;
++			}
++
+ 			serial = mn->serial;
+ 		}
+ 		spin_unlock(&mn->lock);
+diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
+index b051a238baf9..1464bc1f8943 100644
+--- a/drivers/gpu/drm/i915/i915_irq.c
++++ b/drivers/gpu/drm/i915/i915_irq.c
+@@ -1884,6 +1884,9 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
+ 	u32 iir, gt_iir, pm_iir;
+ 	irqreturn_t ret = IRQ_NONE;
+ 
++	if (!intel_irqs_enabled(dev_priv))
++		return IRQ_NONE;
++
+ 	while (true) {
+ 		/* Find, clear, then process each source of interrupt */
+ 
+@@ -1928,6 +1931,9 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
+ 	u32 master_ctl, iir;
+ 	irqreturn_t ret = IRQ_NONE;
+ 
++	if (!intel_irqs_enabled(dev_priv))
++		return IRQ_NONE;
++
+ 	for (;;) {
+ 		master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
+ 		iir = I915_READ(VLV_IIR);
+@@ -2200,6 +2206,9 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
+ 	u32 de_iir, gt_iir, de_ier, sde_ier = 0;
+ 	irqreturn_t ret = IRQ_NONE;
+ 
++	if (!intel_irqs_enabled(dev_priv))
++		return IRQ_NONE;
++
+ 	/* We get interrupts on unclaimed registers, so check for this before we
+ 	 * do any I915_{READ,WRITE}. */
+ 	intel_uncore_check_errors(dev);
+@@ -2271,6 +2280,9 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
+ 	enum pipe pipe;
+ 	u32 aux_mask = GEN8_AUX_CHANNEL_A;
+ 
++	if (!intel_irqs_enabled(dev_priv))
++		return IRQ_NONE;
++
+ 	if (IS_GEN9(dev))
+ 		aux_mask |=  GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
+ 			GEN9_AUX_CHANNEL_D;
+@@ -3770,6 +3782,9 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
+ 		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
+ 		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
+ 
++	if (!intel_irqs_enabled(dev_priv))
++		return IRQ_NONE;
++
+ 	iir = I915_READ16(IIR);
+ 	if (iir == 0)
+ 		return IRQ_NONE;
+@@ -3950,6 +3965,9 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
+ 		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
+ 	int pipe, ret = IRQ_NONE;
+ 
++	if (!intel_irqs_enabled(dev_priv))
++		return IRQ_NONE;
++
+ 	iir = I915_READ(IIR);
+ 	do {
+ 		bool irq_received = (iir & ~flip_mask) != 0;
+@@ -4172,6 +4190,9 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
+ 		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
+ 		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
+ 
++	if (!intel_irqs_enabled(dev_priv))
++		return IRQ_NONE;
++
+ 	iir = I915_READ(IIR);
+ 
+ 	for (;;) {
+@@ -4523,6 +4544,7 @@ void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
+ {
+ 	dev_priv->dev->driver->irq_uninstall(dev_priv->dev);
+ 	dev_priv->pm.irqs_enabled = false;
++	synchronize_irq(dev_priv->dev->irq);
+ }
+ 
+ /**
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index e7a16f119a29..30d4eb300be0 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -13019,6 +13019,9 @@ static struct intel_quirk intel_quirks[] = {
+ 
+ 	/* HP Chromebook 14 (Celeron 2955U) */
+ 	{ 0x0a06, 0x103c, 0x21ed, quirk_backlight_present },
++
++	/* Dell Chromebook 11 */
++	{ 0x0a06, 0x1028, 0x0a35, quirk_backlight_present },
+ };
+ 
+ static void intel_init_quirks(struct drm_device *dev)
+diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
+index 5cecc20efa71..9f6c8971855c 100644
+--- a/drivers/gpu/drm/i915/intel_dp.c
++++ b/drivers/gpu/drm/i915/intel_dp.c
+@@ -3515,8 +3515,6 @@ intel_dp_link_down(struct intel_dp *intel_dp)
+ 	enum port port = intel_dig_port->port;
+ 	struct drm_device *dev = intel_dig_port->base.base.dev;
+ 	struct drm_i915_private *dev_priv = dev->dev_private;
+-	struct intel_crtc *intel_crtc =
+-		to_intel_crtc(intel_dig_port->base.base.crtc);
+ 	uint32_t DP = intel_dp->DP;
+ 
+ 	if (WARN_ON(HAS_DDI(dev)))
+@@ -3541,8 +3539,6 @@ intel_dp_link_down(struct intel_dp *intel_dp)
+ 
+ 	if (HAS_PCH_IBX(dev) &&
+ 	    I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
+-		struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
+-
+ 		/* Hardware workaround: leaving our transcoder select
+ 		 * set to transcoder B while it's off will prevent the
+ 		 * corresponding HDMI output on transcoder A.
+@@ -3553,18 +3549,7 @@ intel_dp_link_down(struct intel_dp *intel_dp)
+ 		 */
+ 		DP &= ~DP_PIPEB_SELECT;
+ 		I915_WRITE(intel_dp->output_reg, DP);
+-
+-		/* Changes to enable or select take place the vblank
+-		 * after being written.
+-		 */
+-		if (WARN_ON(crtc == NULL)) {
+-			/* We should never try to disable a port without a crtc
+-			 * attached. For paranoia keep the code around for a
+-			 * bit. */
+-			POSTING_READ(intel_dp->output_reg);
+-			msleep(50);
+-		} else
+-			intel_wait_for_vblank(dev, intel_crtc->pipe);
++		POSTING_READ(intel_dp->output_reg);
+ 	}
+ 
+ 	DP &= ~DP_AUDIO_OUTPUT_ENABLE;
+diff --git a/drivers/gpu/drm/i915/intel_fifo_underrun.c b/drivers/gpu/drm/i915/intel_fifo_underrun.c
+index 77af512d2d35..b16f2cd15cab 100644
+--- a/drivers/gpu/drm/i915/intel_fifo_underrun.c
++++ b/drivers/gpu/drm/i915/intel_fifo_underrun.c
+@@ -282,16 +282,6 @@ bool intel_set_cpu_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
+ 	return ret;
+ }
+ 
+-static bool
+-__cpu_fifo_underrun_reporting_enabled(struct drm_i915_private *dev_priv,
+-				      enum pipe pipe)
+-{
+-	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+-
+-	return !intel_crtc->cpu_fifo_underrun_disabled;
+-}
+-
+ /**
+  * intel_set_pch_fifo_underrun_reporting - set PCH fifo underrun reporting state
+  * @dev_priv: i915 device instance
+@@ -352,9 +342,15 @@ bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
+ void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
+ 					 enum pipe pipe)
+ {
++	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
++
++	/* We may be called too early in init, thanks BIOS! */
++	if (crtc == NULL)
++		return;
++
+ 	/* GMCH can't disable fifo underruns, filter them. */
+ 	if (HAS_GMCH_DISPLAY(dev_priv->dev) &&
+-	    !__cpu_fifo_underrun_reporting_enabled(dev_priv, pipe))
++	    to_intel_crtc(crtc)->cpu_fifo_underrun_disabled)
+ 		return;
+ 
+ 	if (intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false))
+diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
+index e588376227ea..5ebe805bb627 100644
+--- a/drivers/gpu/drm/i915/intel_lrc.c
++++ b/drivers/gpu/drm/i915/intel_lrc.c
+@@ -1237,15 +1237,17 @@ static int gen8_emit_flush(struct intel_ringbuffer *ringbuf,
+ 
+ 	cmd = MI_FLUSH_DW + 1;
+ 
+-	if (ring == &dev_priv->ring[VCS]) {
+-		if (invalidate_domains & I915_GEM_GPU_DOMAINS)
+-			cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD |
+-				MI_FLUSH_DW_STORE_INDEX |
+-				MI_FLUSH_DW_OP_STOREDW;
+-	} else {
+-		if (invalidate_domains & I915_GEM_DOMAIN_RENDER)
+-			cmd |= MI_INVALIDATE_TLB | MI_FLUSH_DW_STORE_INDEX |
+-				MI_FLUSH_DW_OP_STOREDW;
++	/* We always require a command barrier so that subsequent
++	 * commands, such as breadcrumb interrupts, are strictly ordered
++	 * wrt the contents of the write cache being flushed to memory
++	 * (and thus being coherent from the CPU).
++	 */
++	cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
++
++	if (invalidate_domains & I915_GEM_GPU_DOMAINS) {
++		cmd |= MI_INVALIDATE_TLB;
++		if (ring == &dev_priv->ring[VCS])
++			cmd |= MI_INVALIDATE_BSD;
+ 	}
+ 
+ 	intel_logical_ring_emit(ringbuf, cmd);
+diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
+index bf814a64582a..e6d359371971 100644
+--- a/drivers/gpu/drm/i915/intel_pm.c
++++ b/drivers/gpu/drm/i915/intel_pm.c
+@@ -4625,7 +4625,10 @@ static void gen6_init_rps_frequencies(struct drm_device *dev)
+ 					&ddcc_status);
+ 		if (0 == ret)
+ 			dev_priv->rps.efficient_freq =
+-				(ddcc_status >> 8) & 0xff;
++				clamp_t(u8,
++					((ddcc_status >> 8) & 0xff),
++					dev_priv->rps.min_freq,
++					dev_priv->rps.max_freq);
+ 	}
+ 
+ 	/* Preserve min/max settings in case of re-init */
+diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
+index c7bc93d28d84..a335b842b78b 100644
+--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
++++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
+@@ -2178,6 +2178,14 @@ static int gen6_bsd_ring_flush(struct intel_engine_cs *ring,
+ 	cmd = MI_FLUSH_DW;
+ 	if (INTEL_INFO(ring->dev)->gen >= 8)
+ 		cmd += 1;
++
++	/* We always require a command barrier so that subsequent
++	 * commands, such as breadcrumb interrupts, are strictly ordered
++	 * wrt the contents of the write cache being flushed to memory
++	 * (and thus being coherent from the CPU).
++	 */
++	cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
++
+ 	/*
+ 	 * Bspec vol 1c.5 - video engine command streamer:
+ 	 * "If ENABLED, all TLBs will be invalidated once the flush
+@@ -2185,8 +2193,8 @@ static int gen6_bsd_ring_flush(struct intel_engine_cs *ring,
+ 	 * Post-Sync Operation field is a value of 1h or 3h."
+ 	 */
+ 	if (invalidate & I915_GEM_GPU_DOMAINS)
+-		cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD |
+-			MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
++		cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD;
++
+ 	intel_ring_emit(ring, cmd);
+ 	intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
+ 	if (INTEL_INFO(ring->dev)->gen >= 8) {
+@@ -2282,6 +2290,14 @@ static int gen6_ring_flush(struct intel_engine_cs *ring,
+ 	cmd = MI_FLUSH_DW;
+ 	if (INTEL_INFO(ring->dev)->gen >= 8)
+ 		cmd += 1;
++
++	/* We always require a command barrier so that subsequent
++	 * commands, such as breadcrumb interrupts, are strictly ordered
++	 * wrt the contents of the write cache being flushed to memory
++	 * (and thus being coherent from the CPU).
++	 */
++	cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
++
+ 	/*
+ 	 * Bspec vol 1c.3 - blitter engine command streamer:
+ 	 * "If ENABLED, all TLBs will be invalidated once the flush
+@@ -2289,8 +2305,7 @@ static int gen6_ring_flush(struct intel_engine_cs *ring,
+ 	 * Post-Sync Operation field is a value of 1h or 3h."
+ 	 */
+ 	if (invalidate & I915_GEM_DOMAIN_RENDER)
+-		cmd |= MI_INVALIDATE_TLB | MI_FLUSH_DW_STORE_INDEX |
+-			MI_FLUSH_DW_OP_STOREDW;
++		cmd |= MI_INVALIDATE_TLB;
+ 	intel_ring_emit(ring, cmd);
+ 	intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
+ 	if (INTEL_INFO(ring->dev)->gen >= 8) {
+diff --git a/drivers/gpu/drm/i915/intel_sideband.c b/drivers/gpu/drm/i915/intel_sideband.c
+index 01d841ea3140..731b10a09aa0 100644
+--- a/drivers/gpu/drm/i915/intel_sideband.c
++++ b/drivers/gpu/drm/i915/intel_sideband.c
+@@ -82,7 +82,7 @@ u32 vlv_punit_read(struct drm_i915_private *dev_priv, u8 addr)
+ 	WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+ 
+ 	mutex_lock(&dev_priv->dpio_lock);
+-	vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_PUNIT,
++	vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_PUNIT,
+ 			SB_CRRDDA_NP, addr, &val);
+ 	mutex_unlock(&dev_priv->dpio_lock);
+ 
+@@ -94,7 +94,7 @@ void vlv_punit_write(struct drm_i915_private *dev_priv, u8 addr, u32 val)
+ 	WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+ 
+ 	mutex_lock(&dev_priv->dpio_lock);
+-	vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_PUNIT,
++	vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_PUNIT,
+ 			SB_CRWRDA_NP, addr, &val);
+ 	mutex_unlock(&dev_priv->dpio_lock);
+ }
+@@ -103,7 +103,7 @@ u32 vlv_bunit_read(struct drm_i915_private *dev_priv, u32 reg)
+ {
+ 	u32 val = 0;
+ 
+-	vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_BUNIT,
++	vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_BUNIT,
+ 			SB_CRRDDA_NP, reg, &val);
+ 
+ 	return val;
+@@ -111,7 +111,7 @@ u32 vlv_bunit_read(struct drm_i915_private *dev_priv, u32 reg)
+ 
+ void vlv_bunit_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
+ {
+-	vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_BUNIT,
++	vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_BUNIT,
+ 			SB_CRWRDA_NP, reg, &val);
+ }
+ 
+@@ -122,7 +122,7 @@ u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr)
+ 	WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+ 
+ 	mutex_lock(&dev_priv->dpio_lock);
+-	vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_NC,
++	vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_NC,
+ 			SB_CRRDDA_NP, addr, &val);
+ 	mutex_unlock(&dev_priv->dpio_lock);
+ 
+@@ -132,56 +132,56 @@ u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr)
+ u32 vlv_gpio_nc_read(struct drm_i915_private *dev_priv, u32 reg)
+ {
+ 	u32 val = 0;
+-	vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPIO_NC,
++	vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_GPIO_NC,
+ 			SB_CRRDDA_NP, reg, &val);
+ 	return val;
+ }
+ 
+ void vlv_gpio_nc_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
+ {
+-	vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPIO_NC,
++	vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_GPIO_NC,
+ 			SB_CRWRDA_NP, reg, &val);
+ }
+ 
+ u32 vlv_cck_read(struct drm_i915_private *dev_priv, u32 reg)
+ {
+ 	u32 val = 0;
+-	vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCK,
++	vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_CCK,
+ 			SB_CRRDDA_NP, reg, &val);
+ 	return val;
+ }
+ 
+ void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
+ {
+-	vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCK,
++	vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_CCK,
+ 			SB_CRWRDA_NP, reg, &val);
+ }
+ 
+ u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg)
+ {
+ 	u32 val = 0;
+-	vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCU,
++	vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_CCU,
+ 			SB_CRRDDA_NP, reg, &val);
+ 	return val;
+ }
+ 
+ void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
+ {
+-	vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCU,
++	vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_CCU,
+ 			SB_CRWRDA_NP, reg, &val);
+ }
+ 
+ u32 vlv_gps_core_read(struct drm_i915_private *dev_priv, u32 reg)
+ {
+ 	u32 val = 0;
+-	vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPS_CORE,
++	vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_GPS_CORE,
+ 			SB_CRRDDA_NP, reg, &val);
+ 	return val;
+ }
+ 
+ void vlv_gps_core_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
+ {
+-	vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPS_CORE,
++	vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_GPS_CORE,
+ 			SB_CRWRDA_NP, reg, &val);
+ }
+ 
+diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
+index db42a670f995..5bf825dfaa09 100644
+--- a/drivers/gpu/drm/radeon/atombios_dp.c
++++ b/drivers/gpu/drm/radeon/atombios_dp.c
+@@ -623,10 +623,8 @@ static int radeon_dp_link_train_init(struct radeon_dp_link_train_info *dp_info)
+ 		drm_dp_dpcd_writeb(dp_info->aux,
+ 				   DP_DOWNSPREAD_CTRL, 0);
+ 
+-	if ((dp_info->connector->connector_type == DRM_MODE_CONNECTOR_eDP) &&
+-	    (dig->panel_mode == DP_PANEL_MODE_INTERNAL_DP2_MODE)) {
++	if (dig->panel_mode == DP_PANEL_MODE_INTERNAL_DP2_MODE)
+ 		drm_dp_dpcd_writeb(dp_info->aux, DP_EDP_CONFIGURATION_SET, 1);
+-	}
+ 
+ 	/* set the lane count on the sink */
+ 	tmp = dp_info->dp_lane_count;
+diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
+index 64fdae558d36..de9a56205f0a 100644
+--- a/drivers/gpu/drm/radeon/cik.c
++++ b/drivers/gpu/drm/radeon/cik.c
+@@ -3904,7 +3904,21 @@ void cik_fence_gfx_ring_emit(struct radeon_device *rdev,
+ 	struct radeon_ring *ring = &rdev->ring[fence->ring];
+ 	u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
+ 
+-	/* EVENT_WRITE_EOP - flush caches, send int */
++	/* Workaround for cache flush problems. First send a dummy EOP
++	 * event down the pipe with seq one below.
++	 */
++	radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
++	radeon_ring_write(ring, (EOP_TCL1_ACTION_EN |
++				 EOP_TC_ACTION_EN |
++				 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
++				 EVENT_INDEX(5)));
++	radeon_ring_write(ring, addr & 0xfffffffc);
++	radeon_ring_write(ring, (upper_32_bits(addr) & 0xffff) |
++				DATA_SEL(1) | INT_SEL(0));
++	radeon_ring_write(ring, fence->seq - 1);
++	radeon_ring_write(ring, 0);
++
++	/* Then send the real EOP event down the pipe. */
+ 	radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
+ 	radeon_ring_write(ring, (EOP_TCL1_ACTION_EN |
+ 				 EOP_TC_ACTION_EN |
+@@ -7334,7 +7348,6 @@ int cik_irq_set(struct radeon_device *rdev)
+ 	u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6;
+ 	u32 grbm_int_cntl = 0;
+ 	u32 dma_cntl, dma_cntl1;
+-	u32 thermal_int;
+ 
+ 	if (!rdev->irq.installed) {
+ 		WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
+@@ -7364,13 +7377,6 @@ int cik_irq_set(struct radeon_device *rdev)
+ 
+ 	cp_m1p0 = RREG32(CP_ME1_PIPE0_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
+ 
+-	if (rdev->flags & RADEON_IS_IGP)
+-		thermal_int = RREG32_SMC(CG_THERMAL_INT_CTRL) &
+-			~(THERM_INTH_MASK | THERM_INTL_MASK);
+-	else
+-		thermal_int = RREG32_SMC(CG_THERMAL_INT) &
+-			~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
+-
+ 	/* enable CP interrupts on all rings */
+ 	if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
+ 		DRM_DEBUG("cik_irq_set: sw int gfx\n");
+@@ -7474,14 +7480,6 @@ int cik_irq_set(struct radeon_device *rdev)
+ 		hpd6 |= DC_HPDx_INT_EN;
+ 	}
+ 
+-	if (rdev->irq.dpm_thermal) {
+-		DRM_DEBUG("dpm thermal\n");
+-		if (rdev->flags & RADEON_IS_IGP)
+-			thermal_int |= THERM_INTH_MASK | THERM_INTL_MASK;
+-		else
+-			thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW;
+-	}
+-
+ 	WREG32(CP_INT_CNTL_RING0, cp_int_cntl);
+ 
+ 	WREG32(SDMA0_CNTL + SDMA0_REGISTER_OFFSET, dma_cntl);
+@@ -7528,11 +7526,6 @@ int cik_irq_set(struct radeon_device *rdev)
+ 	WREG32(DC_HPD5_INT_CONTROL, hpd5);
+ 	WREG32(DC_HPD6_INT_CONTROL, hpd6);
+ 
+-	if (rdev->flags & RADEON_IS_IGP)
+-		WREG32_SMC(CG_THERMAL_INT_CTRL, thermal_int);
+-	else
+-		WREG32_SMC(CG_THERMAL_INT, thermal_int);
+-
+ 	return 0;
+ }
+ 
+diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c
+index e3e9c10cfba9..85a109e1e56b 100644
+--- a/drivers/gpu/drm/radeon/kv_dpm.c
++++ b/drivers/gpu/drm/radeon/kv_dpm.c
+@@ -1169,6 +1169,19 @@ void kv_dpm_enable_bapm(struct radeon_device *rdev, bool enable)
+ 	}
+ }
+ 
++static void kv_enable_thermal_int(struct radeon_device *rdev, bool enable)
++{
++	u32 thermal_int;
++
++	thermal_int = RREG32_SMC(CG_THERMAL_INT_CTRL);
++	if (enable)
++		thermal_int |= THERM_INTH_MASK | THERM_INTL_MASK;
++	else
++		thermal_int &= ~(THERM_INTH_MASK | THERM_INTL_MASK);
++	WREG32_SMC(CG_THERMAL_INT_CTRL, thermal_int);
++
++}
++
+ int kv_dpm_enable(struct radeon_device *rdev)
+ {
+ 	struct kv_power_info *pi = kv_get_pi(rdev);
+@@ -1280,8 +1293,7 @@ int kv_dpm_late_enable(struct radeon_device *rdev)
+ 			DRM_ERROR("kv_set_thermal_temperature_range failed\n");
+ 			return ret;
+ 		}
+-		rdev->irq.dpm_thermal = true;
+-		radeon_irq_set(rdev);
++		kv_enable_thermal_int(rdev, true);
+ 	}
+ 
+ 	/* powerdown unused blocks for now */
+@@ -1312,6 +1324,7 @@ void kv_dpm_disable(struct radeon_device *rdev)
+ 	kv_stop_dpm(rdev);
+ 	kv_enable_ulv(rdev, false);
+ 	kv_reset_am(rdev);
++	kv_enable_thermal_int(rdev, false);
+ 
+ 	kv_update_current_ps(rdev, rdev->pm.dpm.boot_ps);
+ }
+diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
+index aea48c89b241..88b2c36f8e1f 100644
+--- a/drivers/gpu/drm/radeon/ni.c
++++ b/drivers/gpu/drm/radeon/ni.c
+@@ -1085,12 +1085,12 @@ static void cayman_gpu_init(struct radeon_device *rdev)
+ 
+ 	if ((rdev->config.cayman.max_backends_per_se == 1) &&
+ 	    (rdev->flags & RADEON_IS_IGP)) {
+-		if ((disabled_rb_mask & 3) == 1) {
+-			/* RB0 disabled, RB1 enabled */
+-			tmp = 0x11111111;
+-		} else {
++		if ((disabled_rb_mask & 3) == 2) {
+ 			/* RB1 disabled, RB0 enabled */
+ 			tmp = 0x00000000;
++		} else {
++			/* RB0 disabled, RB1 enabled */
++			tmp = 0x11111111;
+ 		}
+ 	} else {
+ 		tmp = gb_addr_config & NUM_PIPES_MASK;
+diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c
+index 843b65f46ece..fa2154493cf1 100644
+--- a/drivers/gpu/drm/radeon/r600_dpm.c
++++ b/drivers/gpu/drm/radeon/r600_dpm.c
+@@ -188,7 +188,7 @@ u32 r600_dpm_get_vrefresh(struct radeon_device *rdev)
+ 		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ 			radeon_crtc = to_radeon_crtc(crtc);
+ 			if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) {
+-				vrefresh = radeon_crtc->hw_mode.vrefresh;
++				vrefresh = drm_mode_vrefresh(&radeon_crtc->hw_mode);
+ 				break;
+ 			}
+ 		}
+diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
+index dbc94f300297..fc1b3f34cf18 100644
+--- a/drivers/gpu/drm/radeon/radeon_atombios.c
++++ b/drivers/gpu/drm/radeon/radeon_atombios.c
+@@ -3289,6 +3289,7 @@ int radeon_atom_get_voltage_evv(struct radeon_device *rdev,
+ 
+ 	args.in.ucVoltageType = VOLTAGE_TYPE_VDDC;
+ 	args.in.ucVoltageMode = ATOM_GET_VOLTAGE_EVV_VOLTAGE;
++	args.in.usVoltageLevel = cpu_to_le16(virtual_voltage_id);
+ 	args.in.ulSCLKFreq =
+ 		cpu_to_le32(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[entry_id].clk);
+ 
+diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
+index 6b670b0bc47b..3a297037cc17 100644
+--- a/drivers/gpu/drm/radeon/radeon_encoders.c
++++ b/drivers/gpu/drm/radeon/radeon_encoders.c
+@@ -179,9 +179,12 @@ static void radeon_encoder_add_backlight(struct radeon_encoder *radeon_encoder,
+ 		    (rdev->pdev->subsystem_vendor == 0x1734) &&
+ 		    (rdev->pdev->subsystem_device == 0x1107))
+ 			use_bl = false;
++/* Older PPC macs use on-GPU backlight controller */
++#ifndef CONFIG_PPC_PMAC
+ 		/* disable native backlight control on older asics */
+ 		else if (rdev->family < CHIP_R600)
+ 			use_bl = false;
++#endif
+ 		else
+ 			use_bl = true;
+ 	}
+diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
+index 86fc56434b28..040d2847f8e8 100644
+--- a/drivers/gpu/drm/radeon/radeon_object.c
++++ b/drivers/gpu/drm/radeon/radeon_object.c
+@@ -238,6 +238,18 @@ int radeon_bo_create(struct radeon_device *rdev,
+ 	 * See https://bugs.freedesktop.org/show_bug.cgi?id=84627
+ 	 */
+ 	bo->flags &= ~RADEON_GEM_GTT_WC;
++#elif defined(CONFIG_X86) && !defined(CONFIG_X86_PAT)
++	/* Don't try to enable write-combining when it can't work, or things
++	 * may be slow
++	 * See https://bugs.freedesktop.org/show_bug.cgi?id=88758
++	 */
++
++#warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \
++	 thanks to write-combining
++
++	DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for "
++		      "better performance thanks to write-combining\n");
++	bo->flags &= ~RADEON_GEM_GTT_WC;
+ #endif
+ 
+ 	radeon_ttm_placement_from_domain(bo, domain);
+diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
+index d4f827593dfa..d8c9fb713ebb 100644
+--- a/drivers/gpu/drm/tegra/drm.c
++++ b/drivers/gpu/drm/tegra/drm.c
+@@ -190,7 +190,7 @@ static int host1x_reloc_copy_from_user(struct host1x_reloc *dest,
+ 	if (err < 0)
+ 		return err;
+ 
+-	err = get_user(dest->target.offset, &src->cmdbuf.offset);
++	err = get_user(dest->target.offset, &src->target.offset);
+ 	if (err < 0)
+ 		return err;
+ 
+diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
+index 9505605b6e22..294d3aea132e 100644
+--- a/drivers/hid/hid-input.c
++++ b/drivers/hid/hid-input.c
+@@ -1104,6 +1104,23 @@ void hidinput_hid_event(struct hid_device *hid, struct hid_field *field, struct
+ 		return;
+ 	}
+ 
++	/*
++	 * Ignore reports for absolute data if the data didn't change. This is
++	 * not only an optimization but also fixes 'dead' key reports. Some
++	 * RollOver implementations for localized keys (like BACKSLASH/PIPE; HID
++	 * 0x31 and 0x32) report multiple keys, even though a localized keyboard
++	 * can only have one of them physically available. The 'dead' keys
++	 * report constant 0. As all map to the same keycode, they'd confuse
++	 * the input layer. If we filter the 'dead' keys on the HID level, we
++	 * skip the keycode translation and only forward real events.
++	 */
++	if (!(field->flags & (HID_MAIN_ITEM_RELATIVE |
++	                      HID_MAIN_ITEM_BUFFERED_BYTE)) &&
++			      (field->flags & HID_MAIN_ITEM_VARIABLE) &&
++	    usage->usage_index < field->maxusage &&
++	    value == field->value[usage->usage_index])
++		return;
++
+ 	/* report the usage code as scancode if the key status has changed */
+ 	if (usage->type == EV_KEY && !!test_bit(usage->code, input->key) != value)
+ 		input_event(input, EV_MSC, MSC_SCAN, usage->hid);
+diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
+index ac7447c7b82e..e7f35857462c 100644
+--- a/drivers/hid/wacom_wac.c
++++ b/drivers/hid/wacom_wac.c
+@@ -760,6 +760,12 @@ static int wacom_intuos_irq(struct wacom_wac *wacom)
+ 			input_report_key(input, BTN_7, (data[4] & 0x40));  /* Left   */
+ 			input_report_key(input, BTN_8, (data[4] & 0x80));  /* Down   */
+ 			input_report_key(input, BTN_0, (data[3] & 0x01));  /* Center */
++
++			if (data[4] | (data[3] & 0x01)) {
++				input_report_abs(input, ABS_MISC, PAD_DEVICE_ID);
++			} else {
++				input_report_abs(input, ABS_MISC, 0);
++			}
+ 		} else if (features->type >= INTUOS5S && features->type <= INTUOSPL) {
+ 			int i;
+ 
+diff --git a/drivers/iio/adc/mcp3422.c b/drivers/iio/adc/mcp3422.c
+index 51672256072b..b96c636470ef 100644
+--- a/drivers/iio/adc/mcp3422.c
++++ b/drivers/iio/adc/mcp3422.c
+@@ -58,20 +58,11 @@
+ 		.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ 	}
+ 
+-/* LSB is in nV to eliminate floating point */
+-static const u32 rates_to_lsb[] = {1000000, 250000, 62500, 15625};
+-
+-/*
+- *  scales calculated as:
+- *  rates_to_lsb[sample_rate] / (1 << pga);
+- *  pga is 1 for 0, 2
+- */
+-
+ static const int mcp3422_scales[4][4] = {
+-	{ 1000000, 250000, 62500, 15625 },
+-	{ 500000 , 125000, 31250, 7812 },
+-	{ 250000 , 62500 , 15625, 3906 },
+-	{ 125000 , 31250 , 7812 , 1953 } };
++	{ 1000000, 500000, 250000, 125000 },
++	{ 250000 , 125000, 62500 , 31250  },
++	{ 62500  , 31250 , 15625 , 7812   },
++	{ 15625  , 7812  , 3906  , 1953   } };
+ 
+ /* Constant msleep times for data acquisitions */
+ static const int mcp3422_read_times[4] = {
+diff --git a/drivers/iio/dac/ad5686.c b/drivers/iio/dac/ad5686.c
+index f57562aa396f..15c73e20272d 100644
+--- a/drivers/iio/dac/ad5686.c
++++ b/drivers/iio/dac/ad5686.c
+@@ -322,7 +322,7 @@ static int ad5686_probe(struct spi_device *spi)
+ 	st = iio_priv(indio_dev);
+ 	spi_set_drvdata(spi, indio_dev);
+ 
+-	st->reg = devm_regulator_get(&spi->dev, "vcc");
++	st->reg = devm_regulator_get_optional(&spi->dev, "vcc");
+ 	if (!IS_ERR(st->reg)) {
+ 		ret = regulator_enable(st->reg);
+ 		if (ret)
+diff --git a/drivers/iio/humidity/si7020.c b/drivers/iio/humidity/si7020.c
+index b54164677b89..fa3b809aff5e 100644
+--- a/drivers/iio/humidity/si7020.c
++++ b/drivers/iio/humidity/si7020.c
+@@ -45,12 +45,12 @@ static int si7020_read_raw(struct iio_dev *indio_dev,
+ 			   struct iio_chan_spec const *chan, int *val,
+ 			   int *val2, long mask)
+ {
+-	struct i2c_client *client = iio_priv(indio_dev);
++	struct i2c_client **client = iio_priv(indio_dev);
+ 	int ret;
+ 
+ 	switch (mask) {
+ 	case IIO_CHAN_INFO_RAW:
+-		ret = i2c_smbus_read_word_data(client,
++		ret = i2c_smbus_read_word_data(*client,
+ 					       chan->type == IIO_TEMP ?
+ 					       SI7020CMD_TEMP_HOLD :
+ 					       SI7020CMD_RH_HOLD);
+@@ -126,7 +126,7 @@ static int si7020_probe(struct i2c_client *client,
+ 	/* Wait the maximum power-up time after software reset. */
+ 	msleep(15);
+ 
+-	indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*client));
++	indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ 	if (!indio_dev)
+ 		return -ENOMEM;
+ 
+diff --git a/drivers/iio/imu/adis16400_core.c b/drivers/iio/imu/adis16400_core.c
+index b70873de04ea..fa795dcd5f75 100644
+--- a/drivers/iio/imu/adis16400_core.c
++++ b/drivers/iio/imu/adis16400_core.c
+@@ -26,6 +26,7 @@
+ #include <linux/list.h>
+ #include <linux/module.h>
+ #include <linux/debugfs.h>
++#include <linux/bitops.h>
+ 
+ #include <linux/iio/iio.h>
+ #include <linux/iio/sysfs.h>
+@@ -414,7 +415,7 @@ static int adis16400_read_raw(struct iio_dev *indio_dev,
+ 		mutex_unlock(&indio_dev->mlock);
+ 		if (ret)
+ 			return ret;
+-		val16 = ((val16 & 0xFFF) << 4) >> 4;
++		val16 = sign_extend32(val16, 11);
+ 		*val = val16;
+ 		return IIO_VAL_INT;
+ 	case IIO_CHAN_INFO_OFFSET:
+diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
+index 56a4b7ca7ee3..45d67e9228d7 100644
+--- a/drivers/infiniband/core/ucma.c
++++ b/drivers/infiniband/core/ucma.c
+@@ -1124,6 +1124,9 @@ static int ucma_set_ib_path(struct ucma_context *ctx,
+ 	if (!optlen)
+ 		return -EINVAL;
+ 
++	memset(&sa_path, 0, sizeof(sa_path));
++	sa_path.vlan_id = 0xffff;
++
+ 	ib_sa_unpack_path(path_data->path_rec, &sa_path);
+ 	ret = rdma_set_ib_paths(ctx->cm_id, &sa_path, 1);
+ 	if (ret)
+diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c
+index 6095872549e7..8b8cc6fa0ab0 100644
+--- a/drivers/infiniband/core/umem_odp.c
++++ b/drivers/infiniband/core/umem_odp.c
+@@ -294,7 +294,8 @@ int ib_umem_odp_get(struct ib_ucontext *context, struct ib_umem *umem)
+ 	if (likely(ib_umem_start(umem) != ib_umem_end(umem)))
+ 		rbt_ib_umem_insert(&umem->odp_data->interval_tree,
+ 				   &context->umem_tree);
+-	if (likely(!atomic_read(&context->notifier_count)))
++	if (likely(!atomic_read(&context->notifier_count)) ||
++	    context->odp_mrs_count == 1)
+ 		umem->odp_data->mn_counters_active = true;
+ 	else
+ 		list_add(&umem->odp_data->no_private_counters,
+diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
+index b7943ff16ed3..6c52e72f8d2c 100644
+--- a/drivers/infiniband/core/uverbs_cmd.c
++++ b/drivers/infiniband/core/uverbs_cmd.c
+@@ -2091,20 +2091,21 @@ ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file,
+ 	if (qp->real_qp == qp) {
+ 		ret = ib_resolve_eth_l2_attrs(qp, attr, &cmd.attr_mask);
+ 		if (ret)
+-			goto out;
++			goto release_qp;
+ 		ret = qp->device->modify_qp(qp, attr,
+ 			modify_qp_mask(qp->qp_type, cmd.attr_mask), &udata);
+ 	} else {
+ 		ret = ib_modify_qp(qp, attr, modify_qp_mask(qp->qp_type, cmd.attr_mask));
+ 	}
+ 
+-	put_qp_read(qp);
+-
+ 	if (ret)
+-		goto out;
++		goto release_qp;
+ 
+ 	ret = in_len;
+ 
++release_qp:
++	put_qp_read(qp);
++
+ out:
+ 	kfree(attr);
+ 
+diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
+index 9117b7a2d5f8..0b280b1c98df 100644
+--- a/drivers/infiniband/hw/mlx4/main.c
++++ b/drivers/infiniband/hw/mlx4/main.c
+@@ -1222,8 +1222,7 @@ static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
+ 	struct mlx4_ib_qp *mqp = to_mqp(ibqp);
+ 	u64 reg_id;
+ 	struct mlx4_ib_steering *ib_steering = NULL;
+-	enum mlx4_protocol prot = (gid->raw[1] == 0x0e) ?
+-		MLX4_PROT_IB_IPV4 : MLX4_PROT_IB_IPV6;
++	enum mlx4_protocol prot = MLX4_PROT_IB_IPV6;
+ 
+ 	if (mdev->dev->caps.steering_mode ==
+ 	    MLX4_STEERING_MODE_DEVICE_MANAGED) {
+@@ -1236,8 +1235,10 @@ static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
+ 				    !!(mqp->flags &
+ 				       MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK),
+ 				    prot, &reg_id);
+-	if (err)
++	if (err) {
++		pr_err("multicast attach op failed, err %d\n", err);
+ 		goto err_malloc;
++	}
+ 
+ 	err = add_gid_entry(ibqp, gid);
+ 	if (err)
+@@ -1285,8 +1286,7 @@ static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
+ 	struct net_device *ndev;
+ 	struct mlx4_ib_gid_entry *ge;
+ 	u64 reg_id = 0;
+-	enum mlx4_protocol prot = (gid->raw[1] == 0x0e) ?
+-		MLX4_PROT_IB_IPV4 : MLX4_PROT_IB_IPV6;
++	enum mlx4_protocol prot =  MLX4_PROT_IB_IPV6;
+ 
+ 	if (mdev->dev->caps.steering_mode ==
+ 	    MLX4_STEERING_MODE_DEVICE_MANAGED) {
+diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
+index cf000b7ad64f..c880329b4d64 100644
+--- a/drivers/infiniband/hw/mlx4/qp.c
++++ b/drivers/infiniband/hw/mlx4/qp.c
+@@ -1674,8 +1674,10 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
+ 			    qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI ||
+ 			    qp->mlx4_ib_qp_type == MLX4_IB_QPT_TUN_GSI) {
+ 				err = handle_eth_ud_smac_index(dev, qp, (u8 *)attr->smac, context);
+-				if (err)
+-					return -EINVAL;
++				if (err) {
++					err = -EINVAL;
++					goto out;
++				}
+ 				if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI)
+ 					dev->qp1_proxy[qp->port - 1] = qp;
+ 			}
+diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
+index 03bf81211a54..b1eda4a602a8 100644
+--- a/drivers/infiniband/hw/mlx5/main.c
++++ b/drivers/infiniband/hw/mlx5/main.c
+@@ -997,7 +997,7 @@ static int get_port_caps(struct mlx5_ib_dev *dev)
+ 	struct ib_device_attr *dprops = NULL;
+ 	struct ib_port_attr *pprops = NULL;
+ 	struct mlx5_general_caps *gen;
+-	int err = 0;
++	int err = -ENOMEM;
+ 	int port;
+ 
+ 	gen = &dev->mdev->caps.gen;
+diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
+index c00ae093b6f8..b218254ee41b 100644
+--- a/drivers/infiniband/hw/qib/qib.h
++++ b/drivers/infiniband/hw/qib/qib.h
+@@ -1082,12 +1082,6 @@ struct qib_devdata {
+ 	/* control high-level access to EEPROM */
+ 	struct mutex eep_lock;
+ 	uint64_t traffic_wds;
+-	/* active time is kept in seconds, but logged in hours */
+-	atomic_t active_time;
+-	/* Below are nominal shadow of EEPROM, new since last EEPROM update */
+-	uint8_t eep_st_errs[QIB_EEP_LOG_CNT];
+-	uint8_t eep_st_new_errs[QIB_EEP_LOG_CNT];
+-	uint16_t eep_hrs;
+ 	/*
+ 	 * masks for which bits of errs, hwerrs that cause
+ 	 * each of the counters to increment.
+@@ -1309,8 +1303,7 @@ int qib_twsi_blk_rd(struct qib_devdata *dd, int dev, int addr, void *buffer,
+ int qib_twsi_blk_wr(struct qib_devdata *dd, int dev, int addr,
+ 		    const void *buffer, int len);
+ void qib_get_eeprom_info(struct qib_devdata *);
+-int qib_update_eeprom_log(struct qib_devdata *dd);
+-void qib_inc_eeprom_err(struct qib_devdata *dd, u32 eidx, u32 incr);
++#define qib_inc_eeprom_err(dd, eidx, incr)
+ void qib_dump_lookup_output_queue(struct qib_devdata *);
+ void qib_force_pio_avail_update(struct qib_devdata *);
+ void qib_clear_symerror_on_linkup(unsigned long opaque);
+diff --git a/drivers/infiniband/hw/qib/qib_eeprom.c b/drivers/infiniband/hw/qib/qib_eeprom.c
+index 4d5d71aaa2b4..e2280b07df02 100644
+--- a/drivers/infiniband/hw/qib/qib_eeprom.c
++++ b/drivers/infiniband/hw/qib/qib_eeprom.c
+@@ -267,190 +267,9 @@ void qib_get_eeprom_info(struct qib_devdata *dd)
+ 			"Board SN %s did not pass functional test: %s\n",
+ 			dd->serial, ifp->if_comment);
+ 
+-	memcpy(&dd->eep_st_errs, &ifp->if_errcntp, QIB_EEP_LOG_CNT);
+-	/*
+-	 * Power-on (actually "active") hours are kept as little-endian value
+-	 * in EEPROM, but as seconds in a (possibly as small as 24-bit)
+-	 * atomic_t while running.
+-	 */
+-	atomic_set(&dd->active_time, 0);
+-	dd->eep_hrs = ifp->if_powerhour[0] | (ifp->if_powerhour[1] << 8);
+-
+ done:
+ 	vfree(buf);
+ 
+ bail:;
+ }
+ 
+-/**
+- * qib_update_eeprom_log - copy active-time and error counters to eeprom
+- * @dd: the qlogic_ib device
+- *
+- * Although the time is kept as seconds in the qib_devdata struct, it is
+- * rounded to hours for re-write, as we have only 16 bits in EEPROM.
+- * First-cut code reads whole (expected) struct qib_flash, modifies,
+- * re-writes. Future direction: read/write only what we need, assuming
+- * that the EEPROM had to have been "good enough" for driver init, and
+- * if not, we aren't making it worse.
+- *
+- */
+-int qib_update_eeprom_log(struct qib_devdata *dd)
+-{
+-	void *buf;
+-	struct qib_flash *ifp;
+-	int len, hi_water;
+-	uint32_t new_time, new_hrs;
+-	u8 csum;
+-	int ret, idx;
+-	unsigned long flags;
+-
+-	/* first, check if we actually need to do anything. */
+-	ret = 0;
+-	for (idx = 0; idx < QIB_EEP_LOG_CNT; ++idx) {
+-		if (dd->eep_st_new_errs[idx]) {
+-			ret = 1;
+-			break;
+-		}
+-	}
+-	new_time = atomic_read(&dd->active_time);
+-
+-	if (ret == 0 && new_time < 3600)
+-		goto bail;
+-
+-	/*
+-	 * The quick-check above determined that there is something worthy
+-	 * of logging, so get current contents and do a more detailed idea.
+-	 * read full flash, not just currently used part, since it may have
+-	 * been written with a newer definition
+-	 */
+-	len = sizeof(struct qib_flash);
+-	buf = vmalloc(len);
+-	ret = 1;
+-	if (!buf) {
+-		qib_dev_err(dd,
+-			"Couldn't allocate memory to read %u bytes from eeprom for logging\n",
+-			len);
+-		goto bail;
+-	}
+-
+-	/* Grab semaphore and read current EEPROM. If we get an
+-	 * error, let go, but if not, keep it until we finish write.
+-	 */
+-	ret = mutex_lock_interruptible(&dd->eep_lock);
+-	if (ret) {
+-		qib_dev_err(dd, "Unable to acquire EEPROM for logging\n");
+-		goto free_bail;
+-	}
+-	ret = qib_twsi_blk_rd(dd, dd->twsi_eeprom_dev, 0, buf, len);
+-	if (ret) {
+-		mutex_unlock(&dd->eep_lock);
+-		qib_dev_err(dd, "Unable read EEPROM for logging\n");
+-		goto free_bail;
+-	}
+-	ifp = (struct qib_flash *)buf;
+-
+-	csum = flash_csum(ifp, 0);
+-	if (csum != ifp->if_csum) {
+-		mutex_unlock(&dd->eep_lock);
+-		qib_dev_err(dd, "EEPROM cks err (0x%02X, S/B 0x%02X)\n",
+-			    csum, ifp->if_csum);
+-		ret = 1;
+-		goto free_bail;
+-	}
+-	hi_water = 0;
+-	spin_lock_irqsave(&dd->eep_st_lock, flags);
+-	for (idx = 0; idx < QIB_EEP_LOG_CNT; ++idx) {
+-		int new_val = dd->eep_st_new_errs[idx];
+-		if (new_val) {
+-			/*
+-			 * If we have seen any errors, add to EEPROM values
+-			 * We need to saturate at 0xFF (255) and we also
+-			 * would need to adjust the checksum if we were
+-			 * trying to minimize EEPROM traffic
+-			 * Note that we add to actual current count in EEPROM,
+-			 * in case it was altered while we were running.
+-			 */
+-			new_val += ifp->if_errcntp[idx];
+-			if (new_val > 0xFF)
+-				new_val = 0xFF;
+-			if (ifp->if_errcntp[idx] != new_val) {
+-				ifp->if_errcntp[idx] = new_val;
+-				hi_water = offsetof(struct qib_flash,
+-						    if_errcntp) + idx;
+-			}
+-			/*
+-			 * update our shadow (used to minimize EEPROM
+-			 * traffic), to match what we are about to write.
+-			 */
+-			dd->eep_st_errs[idx] = new_val;
+-			dd->eep_st_new_errs[idx] = 0;
+-		}
+-	}
+-	/*
+-	 * Now update active-time. We would like to round to the nearest hour
+-	 * but unless atomic_t are sure to be proper signed ints we cannot,
+-	 * because we need to account for what we "transfer" to EEPROM and
+-	 * if we log an hour at 31 minutes, then we would need to set
+-	 * active_time to -29 to accurately count the _next_ hour.
+-	 */
+-	if (new_time >= 3600) {
+-		new_hrs = new_time / 3600;
+-		atomic_sub((new_hrs * 3600), &dd->active_time);
+-		new_hrs += dd->eep_hrs;
+-		if (new_hrs > 0xFFFF)
+-			new_hrs = 0xFFFF;
+-		dd->eep_hrs = new_hrs;
+-		if ((new_hrs & 0xFF) != ifp->if_powerhour[0]) {
+-			ifp->if_powerhour[0] = new_hrs & 0xFF;
+-			hi_water = offsetof(struct qib_flash, if_powerhour);
+-		}
+-		if ((new_hrs >> 8) != ifp->if_powerhour[1]) {
+-			ifp->if_powerhour[1] = new_hrs >> 8;
+-			hi_water = offsetof(struct qib_flash, if_powerhour) + 1;
+-		}
+-	}
+-	/*
+-	 * There is a tiny possibility that we could somehow fail to write
+-	 * the EEPROM after updating our shadows, but problems from holding
+-	 * the spinlock too long are a much bigger issue.
+-	 */
+-	spin_unlock_irqrestore(&dd->eep_st_lock, flags);
+-	if (hi_water) {
+-		/* we made some change to the data, uopdate cksum and write */
+-		csum = flash_csum(ifp, 1);
+-		ret = eeprom_write_with_enable(dd, 0, buf, hi_water + 1);
+-	}
+-	mutex_unlock(&dd->eep_lock);
+-	if (ret)
+-		qib_dev_err(dd, "Failed updating EEPROM\n");
+-
+-free_bail:
+-	vfree(buf);
+-bail:
+-	return ret;
+-}
+-
+-/**
+- * qib_inc_eeprom_err - increment one of the four error counters
+- * that are logged to EEPROM.
+- * @dd: the qlogic_ib device
+- * @eidx: 0..3, the counter to increment
+- * @incr: how much to add
+- *
+- * Each counter is 8-bits, and saturates at 255 (0xFF). They
+- * are copied to the EEPROM (aka flash) whenever qib_update_eeprom_log()
+- * is called, but it can only be called in a context that allows sleep.
+- * This function can be called even at interrupt level.
+- */
+-void qib_inc_eeprom_err(struct qib_devdata *dd, u32 eidx, u32 incr)
+-{
+-	uint new_val;
+-	unsigned long flags;
+-
+-	spin_lock_irqsave(&dd->eep_st_lock, flags);
+-	new_val = dd->eep_st_new_errs[eidx] + incr;
+-	if (new_val > 255)
+-		new_val = 255;
+-	dd->eep_st_new_errs[eidx] = new_val;
+-	spin_unlock_irqrestore(&dd->eep_st_lock, flags);
+-}
+diff --git a/drivers/infiniband/hw/qib/qib_iba6120.c b/drivers/infiniband/hw/qib/qib_iba6120.c
+index d68266ac7619..f7f49a6c34b0 100644
+--- a/drivers/infiniband/hw/qib/qib_iba6120.c
++++ b/drivers/infiniband/hw/qib/qib_iba6120.c
+@@ -2681,8 +2681,6 @@ static void qib_get_6120_faststats(unsigned long opaque)
+ 	spin_lock_irqsave(&dd->eep_st_lock, flags);
+ 	traffic_wds -= dd->traffic_wds;
+ 	dd->traffic_wds += traffic_wds;
+-	if (traffic_wds  >= QIB_TRAFFIC_ACTIVE_THRESHOLD)
+-		atomic_add(5, &dd->active_time); /* S/B #define */
+ 	spin_unlock_irqrestore(&dd->eep_st_lock, flags);
+ 
+ 	qib_chk_6120_errormask(dd);
+diff --git a/drivers/infiniband/hw/qib/qib_iba7220.c b/drivers/infiniband/hw/qib/qib_iba7220.c
+index 7dec89fdc124..f5fa106e1992 100644
+--- a/drivers/infiniband/hw/qib/qib_iba7220.c
++++ b/drivers/infiniband/hw/qib/qib_iba7220.c
+@@ -3297,8 +3297,6 @@ static void qib_get_7220_faststats(unsigned long opaque)
+ 	spin_lock_irqsave(&dd->eep_st_lock, flags);
+ 	traffic_wds -= dd->traffic_wds;
+ 	dd->traffic_wds += traffic_wds;
+-	if (traffic_wds  >= QIB_TRAFFIC_ACTIVE_THRESHOLD)
+-		atomic_add(5, &dd->active_time); /* S/B #define */
+ 	spin_unlock_irqrestore(&dd->eep_st_lock, flags);
+ done:
+ 	mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER);
+diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
+index a7eb32517a04..23ca2aca1ad6 100644
+--- a/drivers/infiniband/hw/qib/qib_iba7322.c
++++ b/drivers/infiniband/hw/qib/qib_iba7322.c
+@@ -5178,8 +5178,6 @@ static void qib_get_7322_faststats(unsigned long opaque)
+ 		spin_lock_irqsave(&ppd->dd->eep_st_lock, flags);
+ 		traffic_wds -= ppd->dd->traffic_wds;
+ 		ppd->dd->traffic_wds += traffic_wds;
+-		if (traffic_wds >= QIB_TRAFFIC_ACTIVE_THRESHOLD)
+-			atomic_add(ACTIVITY_TIMER, &ppd->dd->active_time);
+ 		spin_unlock_irqrestore(&ppd->dd->eep_st_lock, flags);
+ 		if (ppd->cpspec->qdr_dfe_on && (ppd->link_speed_active &
+ 						QIB_IB_QDR) &&
+diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c
+index 729da39c49ed..738269b46d83 100644
+--- a/drivers/infiniband/hw/qib/qib_init.c
++++ b/drivers/infiniband/hw/qib/qib_init.c
+@@ -931,7 +931,6 @@ static void qib_shutdown_device(struct qib_devdata *dd)
+ 		qib_free_pportdata(ppd);
+ 	}
+ 
+-	qib_update_eeprom_log(dd);
+ }
+ 
+ /**
+diff --git a/drivers/infiniband/hw/qib/qib_sysfs.c b/drivers/infiniband/hw/qib/qib_sysfs.c
+index 3c8e4e3caca6..b9ccbda7817d 100644
+--- a/drivers/infiniband/hw/qib/qib_sysfs.c
++++ b/drivers/infiniband/hw/qib/qib_sysfs.c
+@@ -611,28 +611,6 @@ bail:
+ 	return ret < 0 ? ret : count;
+ }
+ 
+-static ssize_t show_logged_errs(struct device *device,
+-				struct device_attribute *attr, char *buf)
+-{
+-	struct qib_ibdev *dev =
+-		container_of(device, struct qib_ibdev, ibdev.dev);
+-	struct qib_devdata *dd = dd_from_dev(dev);
+-	int idx, count;
+-
+-	/* force consistency with actual EEPROM */
+-	if (qib_update_eeprom_log(dd) != 0)
+-		return -ENXIO;
+-
+-	count = 0;
+-	for (idx = 0; idx < QIB_EEP_LOG_CNT; ++idx) {
+-		count += scnprintf(buf + count, PAGE_SIZE - count, "%d%c",
+-				   dd->eep_st_errs[idx],
+-				   idx == (QIB_EEP_LOG_CNT - 1) ? '\n' : ' ');
+-	}
+-
+-	return count;
+-}
+-
+ /*
+  * Dump tempsense regs. in decimal, to ease shell-scripts.
+  */
+@@ -679,7 +657,6 @@ static DEVICE_ATTR(nctxts, S_IRUGO, show_nctxts, NULL);
+ static DEVICE_ATTR(nfreectxts, S_IRUGO, show_nfreectxts, NULL);
+ static DEVICE_ATTR(serial, S_IRUGO, show_serial, NULL);
+ static DEVICE_ATTR(boardversion, S_IRUGO, show_boardversion, NULL);
+-static DEVICE_ATTR(logged_errors, S_IRUGO, show_logged_errs, NULL);
+ static DEVICE_ATTR(tempsense, S_IRUGO, show_tempsense, NULL);
+ static DEVICE_ATTR(localbus_info, S_IRUGO, show_localbus_info, NULL);
+ static DEVICE_ATTR(chip_reset, S_IWUSR, NULL, store_chip_reset);
+@@ -693,7 +670,6 @@ static struct device_attribute *qib_attributes[] = {
+ 	&dev_attr_nfreectxts,
+ 	&dev_attr_serial,
+ 	&dev_attr_boardversion,
+-	&dev_attr_logged_errors,
+ 	&dev_attr_tempsense,
+ 	&dev_attr_localbus_info,
+ 	&dev_attr_chip_reset,
+diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
+index 5ce26817e7e1..b47aea1094b2 100644
+--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
++++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
+@@ -654,7 +654,9 @@ int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
+ 			   enum dma_data_direction dma_dir);
+ 
+ void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task,
+-			      struct iser_data_buf *data);
++			      struct iser_data_buf *data,
++			      enum dma_data_direction dir);
++
+ int  iser_initialize_task_headers(struct iscsi_task *task,
+ 			struct iser_tx_desc *tx_desc);
+ int iser_alloc_rx_descriptors(struct iser_conn *iser_conn,
+diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
+index 3821633f1065..20e859a6f1a6 100644
+--- a/drivers/infiniband/ulp/iser/iser_initiator.c
++++ b/drivers/infiniband/ulp/iser/iser_initiator.c
+@@ -320,9 +320,6 @@ void iser_free_rx_descriptors(struct iser_conn *iser_conn)
+ 	struct ib_conn *ib_conn = &iser_conn->ib_conn;
+ 	struct iser_device *device = ib_conn->device;
+ 
+-	if (!iser_conn->rx_descs)
+-		goto free_login_buf;
+-
+ 	if (device->iser_free_rdma_reg_res)
+ 		device->iser_free_rdma_reg_res(ib_conn);
+ 
+@@ -334,7 +331,6 @@ void iser_free_rx_descriptors(struct iser_conn *iser_conn)
+ 	/* make sure we never redo any unmapping */
+ 	iser_conn->rx_descs = NULL;
+ 
+-free_login_buf:
+ 	iser_free_login_buf(iser_conn);
+ }
+ 
+@@ -714,19 +710,23 @@ void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task)
+ 		device->iser_unreg_rdma_mem(iser_task, ISER_DIR_IN);
+ 		if (is_rdma_data_aligned)
+ 			iser_dma_unmap_task_data(iser_task,
+-						 &iser_task->data[ISER_DIR_IN]);
++						 &iser_task->data[ISER_DIR_IN],
++						 DMA_FROM_DEVICE);
+ 		if (prot_count && is_rdma_prot_aligned)
+ 			iser_dma_unmap_task_data(iser_task,
+-						 &iser_task->prot[ISER_DIR_IN]);
++						 &iser_task->prot[ISER_DIR_IN],
++						 DMA_FROM_DEVICE);
+ 	}
+ 
+ 	if (iser_task->dir[ISER_DIR_OUT]) {
+ 		device->iser_unreg_rdma_mem(iser_task, ISER_DIR_OUT);
+ 		if (is_rdma_data_aligned)
+ 			iser_dma_unmap_task_data(iser_task,
+-						 &iser_task->data[ISER_DIR_OUT]);
++						 &iser_task->data[ISER_DIR_OUT],
++						 DMA_TO_DEVICE);
+ 		if (prot_count && is_rdma_prot_aligned)
+ 			iser_dma_unmap_task_data(iser_task,
+-						 &iser_task->prot[ISER_DIR_OUT]);
++						 &iser_task->prot[ISER_DIR_OUT],
++						 DMA_TO_DEVICE);
+ 	}
+ }
+diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
+index abce9339333f..341040bf0984 100644
+--- a/drivers/infiniband/ulp/iser/iser_memory.c
++++ b/drivers/infiniband/ulp/iser/iser_memory.c
+@@ -332,12 +332,13 @@ int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
+ }
+ 
+ void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task,
+-			      struct iser_data_buf *data)
++			      struct iser_data_buf *data,
++			      enum dma_data_direction dir)
+ {
+ 	struct ib_device *dev;
+ 
+ 	dev = iser_task->iser_conn->ib_conn.device->ib_device;
+-	ib_dma_unmap_sg(dev, data->buf, data->size, DMA_FROM_DEVICE);
++	ib_dma_unmap_sg(dev, data->buf, data->size, dir);
+ }
+ 
+ static int fall_to_bounce_buf(struct iscsi_iser_task *iser_task,
+@@ -357,7 +358,9 @@ static int fall_to_bounce_buf(struct iscsi_iser_task *iser_task,
+ 		iser_data_buf_dump(mem, ibdev);
+ 
+ 	/* unmap the command data before accessing it */
+-	iser_dma_unmap_task_data(iser_task, mem);
++	iser_dma_unmap_task_data(iser_task, mem,
++				 (cmd_dir == ISER_DIR_OUT) ?
++				 DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ 
+ 	/* allocate copy buf, if we are writing, copy the */
+ 	/* unaligned scatterlist, dma map the copy        */
+diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
+index 695a2704bd43..f3e21abc20a6 100644
+--- a/drivers/infiniband/ulp/iser/iser_verbs.c
++++ b/drivers/infiniband/ulp/iser/iser_verbs.c
+@@ -600,16 +600,16 @@ void iser_release_work(struct work_struct *work)
+ /**
+  * iser_free_ib_conn_res - release IB related resources
+  * @iser_conn: iser connection struct
+- * @destroy_device: indicator if we need to try to release
+- *     the iser device (only iscsi shutdown and DEVICE_REMOVAL
+- *     will use this.
++ * @destroy: indicator if we need to try to release the
++ *     iser device and memory regoins pool (only iscsi
++ *     shutdown and DEVICE_REMOVAL will use this).
+  *
+  * This routine is called with the iser state mutex held
+  * so the cm_id removal is out of here. It is Safe to
+  * be invoked multiple times.
+  */
+ static void iser_free_ib_conn_res(struct iser_conn *iser_conn,
+-				  bool destroy_device)
++				  bool destroy)
+ {
+ 	struct ib_conn *ib_conn = &iser_conn->ib_conn;
+ 	struct iser_device *device = ib_conn->device;
+@@ -617,17 +617,20 @@ static void iser_free_ib_conn_res(struct iser_conn *iser_conn,
+ 	iser_info("freeing conn %p cma_id %p qp %p\n",
+ 		  iser_conn, ib_conn->cma_id, ib_conn->qp);
+ 
+-	iser_free_rx_descriptors(iser_conn);
+-
+ 	if (ib_conn->qp != NULL) {
+ 		ib_conn->comp->active_qps--;
+ 		rdma_destroy_qp(ib_conn->cma_id);
+ 		ib_conn->qp = NULL;
+ 	}
+ 
+-	if (destroy_device && device != NULL) {
+-		iser_device_try_release(device);
+-		ib_conn->device = NULL;
++	if (destroy) {
++		if (iser_conn->rx_descs)
++			iser_free_rx_descriptors(iser_conn);
++
++		if (device != NULL) {
++			iser_device_try_release(device);
++			ib_conn->device = NULL;
++		}
+ 	}
+ }
+ 
+@@ -840,7 +843,7 @@ static void iser_disconnected_handler(struct rdma_cm_id *cma_id)
+ }
+ 
+ static void iser_cleanup_handler(struct rdma_cm_id *cma_id,
+-				 bool destroy_device)
++				 bool destroy)
+ {
+ 	struct iser_conn *iser_conn = (struct iser_conn *)cma_id->context;
+ 
+@@ -850,7 +853,7 @@ static void iser_cleanup_handler(struct rdma_cm_id *cma_id,
+ 	 * and flush errors.
+ 	 */
+ 	iser_disconnected_handler(cma_id);
+-	iser_free_ib_conn_res(iser_conn, destroy_device);
++	iser_free_ib_conn_res(iser_conn, destroy);
+ 	complete(&iser_conn->ib_completion);
+ };
+ 
+diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
+index c09359db3a90..37de0173b6d2 100644
+--- a/drivers/md/dm-io.c
++++ b/drivers/md/dm-io.c
+@@ -290,6 +290,12 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,
+ 	unsigned short logical_block_size = queue_logical_block_size(q);
+ 	sector_t num_sectors;
+ 
++	/* Reject unsupported discard requests */
++	if ((rw & REQ_DISCARD) && !blk_queue_discard(q)) {
++		dec_count(io, region, -EOPNOTSUPP);
++		return;
++	}
++
+ 	/*
+ 	 * where->count may be zero if rw holds a flush and we need to
+ 	 * send a zero-sized flush.
+diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
+index 7dfdb5c746d6..089d62751f7f 100644
+--- a/drivers/md/dm-raid1.c
++++ b/drivers/md/dm-raid1.c
+@@ -604,6 +604,15 @@ static void write_callback(unsigned long error, void *context)
+ 		return;
+ 	}
+ 
++	/*
++	 * If the bio is discard, return an error, but do not
++	 * degrade the array.
++	 */
++	if (bio->bi_rw & REQ_DISCARD) {
++		bio_endio(bio, -EOPNOTSUPP);
++		return;
++	}
++
+ 	for (i = 0; i < ms->nr_mirrors; i++)
+ 		if (test_bit(i, &error))
+ 			fail_mirror(ms->mirror + i, DM_RAID1_WRITE_ERROR);
+diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
+index 864b03f47727..8b204ae216ab 100644
+--- a/drivers/md/dm-snap.c
++++ b/drivers/md/dm-snap.c
+@@ -1432,8 +1432,6 @@ out:
+ 		full_bio->bi_private = pe->full_bio_private;
+ 		atomic_inc(&full_bio->bi_remaining);
+ 	}
+-	free_pending_exception(pe);
+-
+ 	increment_pending_exceptions_done_count();
+ 
+ 	up_write(&s->lock);
+@@ -1450,6 +1448,8 @@ out:
+ 	}
+ 
+ 	retry_origin_bios(s, origin_bios);
++
++	free_pending_exception(pe);
+ }
+ 
+ static void commit_callback(void *context, int success)
+diff --git a/drivers/md/dm.c b/drivers/md/dm.c
+index 2caf5b374649..64b10e006f9c 100644
+--- a/drivers/md/dm.c
++++ b/drivers/md/dm.c
+@@ -2462,7 +2462,7 @@ int dm_setup_md_queue(struct mapped_device *md)
+ 	return 0;
+ }
+ 
+-static struct mapped_device *dm_find_md(dev_t dev)
++struct mapped_device *dm_get_md(dev_t dev)
+ {
+ 	struct mapped_device *md;
+ 	unsigned minor = MINOR(dev);
+@@ -2473,12 +2473,15 @@ static struct mapped_device *dm_find_md(dev_t dev)
+ 	spin_lock(&_minor_lock);
+ 
+ 	md = idr_find(&_minor_idr, minor);
+-	if (md && (md == MINOR_ALLOCED ||
+-		   (MINOR(disk_devt(dm_disk(md))) != minor) ||
+-		   dm_deleting_md(md) ||
+-		   test_bit(DMF_FREEING, &md->flags))) {
+-		md = NULL;
+-		goto out;
++	if (md) {
++		if ((md == MINOR_ALLOCED ||
++		     (MINOR(disk_devt(dm_disk(md))) != minor) ||
++		     dm_deleting_md(md) ||
++		     test_bit(DMF_FREEING, &md->flags))) {
++			md = NULL;
++			goto out;
++		}
++		dm_get(md);
+ 	}
+ 
+ out:
+@@ -2486,16 +2489,6 @@ out:
+ 
+ 	return md;
+ }
+-
+-struct mapped_device *dm_get_md(dev_t dev)
+-{
+-	struct mapped_device *md = dm_find_md(dev);
+-
+-	if (md)
+-		dm_get(md);
+-
+-	return md;
+-}
+ EXPORT_SYMBOL_GPL(dm_get_md);
+ 
+ void *dm_get_mdptr(struct mapped_device *md)
+diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h
+index 28078f8894a5..f3856c3eb63b 100644
+--- a/drivers/misc/cxl/cxl.h
++++ b/drivers/misc/cxl/cxl.h
+@@ -481,6 +481,7 @@ void cxl_release_one_irq(struct cxl *adapter, int hwirq);
+ int cxl_alloc_irq_ranges(struct cxl_irq_ranges *irqs, struct cxl *adapter, unsigned int num);
+ void cxl_release_irq_ranges(struct cxl_irq_ranges *irqs, struct cxl *adapter);
+ int cxl_setup_irq(struct cxl *adapter, unsigned int hwirq, unsigned int virq);
++int cxl_update_image_control(struct cxl *adapter);
+ 
+ /* common == phyp + powernv */
+ struct cxl_process_element_common {
+diff --git a/drivers/misc/cxl/irq.c b/drivers/misc/cxl/irq.c
+index c294925f73ee..bfbe3c8ae7d6 100644
+--- a/drivers/misc/cxl/irq.c
++++ b/drivers/misc/cxl/irq.c
+@@ -167,6 +167,7 @@ static irqreturn_t cxl_irq(int irq, void *data, struct cxl_irq_info *irq_info)
+ 		}
+ 
+ 		cxl_ack_irq(ctx, CXL_PSL_TFC_An_A, 0);
++		return IRQ_HANDLED;
+ 	}
+ 	if (dsisr & CXL_PSL_DSISR_An_OC)
+ 		pr_devel("CXL interrupt: OS Context Warning\n");
+diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c
+index 0f2cc9f8b4db..eee4fd606dc1 100644
+--- a/drivers/misc/cxl/pci.c
++++ b/drivers/misc/cxl/pci.c
+@@ -316,7 +316,7 @@ static int init_implementation_adapter_regs(struct cxl *adapter, struct pci_dev
+ 	u64 psl_dsnctl;
+ 	u64 chipid;
+ 
+-	if (!(np = pnv_pci_to_phb_node(dev)))
++	if (!(np = pnv_pci_get_phb_node(dev)))
+ 		return -ENODEV;
+ 
+ 	while (np && !(prop = of_get_property(np, "ibm,chip-id", NULL)))
+@@ -361,6 +361,41 @@ int cxl_setup_irq(struct cxl *adapter, unsigned int hwirq,
+ 	return pnv_cxl_ioda_msi_setup(dev, hwirq, virq);
+ }
+ 
++int cxl_update_image_control(struct cxl *adapter)
++{
++	struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
++	int rc;
++	int vsec;
++	u8 image_state;
++
++	if (!(vsec = find_cxl_vsec(dev))) {
++		dev_err(&dev->dev, "ABORTING: CXL VSEC not found!\n");
++		return -ENODEV;
++	}
++
++	if ((rc = CXL_READ_VSEC_IMAGE_STATE(dev, vsec, &image_state))) {
++		dev_err(&dev->dev, "failed to read image state: %i\n", rc);
++		return rc;
++	}
++
++	if (adapter->perst_loads_image)
++		image_state |= CXL_VSEC_PERST_LOADS_IMAGE;
++	else
++		image_state &= ~CXL_VSEC_PERST_LOADS_IMAGE;
++
++	if (adapter->perst_select_user)
++		image_state |= CXL_VSEC_PERST_SELECT_USER;
++	else
++		image_state &= ~CXL_VSEC_PERST_SELECT_USER;
++
++	if ((rc = CXL_WRITE_VSEC_IMAGE_STATE(dev, vsec, image_state))) {
++		dev_err(&dev->dev, "failed to update image control: %i\n", rc);
++		return rc;
++	}
++
++	return 0;
++}
++
+ int cxl_alloc_one_irq(struct cxl *adapter)
+ {
+ 	struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
+@@ -770,8 +805,8 @@ static int cxl_read_vsec(struct cxl *adapter, struct pci_dev *dev)
+ 	CXL_READ_VSEC_BASE_IMAGE(dev, vsec, &adapter->base_image);
+ 	CXL_READ_VSEC_IMAGE_STATE(dev, vsec, &image_state);
+ 	adapter->user_image_loaded = !!(image_state & CXL_VSEC_USER_IMAGE_LOADED);
+-	adapter->perst_loads_image = !!(image_state & CXL_VSEC_PERST_LOADS_IMAGE);
+-	adapter->perst_select_user = !!(image_state & CXL_VSEC_PERST_SELECT_USER);
++	adapter->perst_loads_image = true;
++	adapter->perst_select_user = !!(image_state & CXL_VSEC_USER_IMAGE_LOADED);
+ 
+ 	CXL_READ_VSEC_NAFUS(dev, vsec, &adapter->slices);
+ 	CXL_READ_VSEC_AFU_DESC_OFF(dev, vsec, &afu_desc_off);
+@@ -879,6 +914,9 @@ static struct cxl *cxl_init_adapter(struct pci_dev *dev)
+ 	if ((rc = cxl_vsec_looks_ok(adapter, dev)))
+ 		goto err2;
+ 
++	if ((rc = cxl_update_image_control(adapter)))
++		goto err2;
++
+ 	if ((rc = cxl_map_adapter_regs(adapter, dev)))
+ 		goto err2;
+ 
+diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c
+index 9306219d5675..6ad049a08e4d 100644
+--- a/drivers/misc/mei/init.c
++++ b/drivers/misc/mei/init.c
+@@ -341,6 +341,8 @@ void mei_stop(struct mei_device *dev)
+ 
+ 	dev->dev_state = MEI_DEV_POWER_DOWN;
+ 	mei_reset(dev);
++	/* move device to disabled state unconditionally */
++	dev->dev_state = MEI_DEV_DISABLED;
+ 
+ 	mutex_unlock(&dev->device_lock);
+ 
+diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+index ff83c46bc389..6befde61c203 100644
+--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
++++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+@@ -487,6 +487,7 @@ enum bcmgenet_stat_type {
+ 	BCMGENET_STAT_MIB_TX,
+ 	BCMGENET_STAT_RUNT,
+ 	BCMGENET_STAT_MISC,
++	BCMGENET_STAT_SOFT,
+ };
+ 
+ struct bcmgenet_stats {
+@@ -515,6 +516,7 @@ struct bcmgenet_stats {
+ #define STAT_GENET_MIB_RX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_RX)
+ #define STAT_GENET_MIB_TX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_TX)
+ #define STAT_GENET_RUNT(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_RUNT)
++#define STAT_GENET_SOFT_MIB(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_SOFT)
+ 
+ #define STAT_GENET_MISC(str, m, offset) { \
+ 	.stat_string = str, \
+@@ -614,9 +616,9 @@ static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = {
+ 			UMAC_RBUF_OVFL_CNT),
+ 	STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt, UMAC_RBUF_ERR_CNT),
+ 	STAT_GENET_MISC("mdf_err_cnt", mib.mdf_err_cnt, UMAC_MDF_ERR_CNT),
+-	STAT_GENET_MIB_RX("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
+-	STAT_GENET_MIB_RX("rx_dma_failed", mib.rx_dma_failed),
+-	STAT_GENET_MIB_TX("tx_dma_failed", mib.tx_dma_failed),
++	STAT_GENET_SOFT_MIB("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
++	STAT_GENET_SOFT_MIB("rx_dma_failed", mib.rx_dma_failed),
++	STAT_GENET_SOFT_MIB("tx_dma_failed", mib.tx_dma_failed),
+ };
+ 
+ #define BCMGENET_STATS_LEN	ARRAY_SIZE(bcmgenet_gstrings_stats)
+@@ -668,6 +670,7 @@ static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv)
+ 		s = &bcmgenet_gstrings_stats[i];
+ 		switch (s->type) {
+ 		case BCMGENET_STAT_NETDEV:
++		case BCMGENET_STAT_SOFT:
+ 			continue;
+ 		case BCMGENET_STAT_MIB_RX:
+ 		case BCMGENET_STAT_MIB_TX:
+@@ -971,13 +974,14 @@ static inline void bcmgenet_tx_ring_int_disable(struct bcmgenet_priv *priv,
+ }
+ 
+ /* Unlocked version of the reclaim routine */
+-static void __bcmgenet_tx_reclaim(struct net_device *dev,
+-				  struct bcmgenet_tx_ring *ring)
++static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
++					  struct bcmgenet_tx_ring *ring)
+ {
+ 	struct bcmgenet_priv *priv = netdev_priv(dev);
+ 	int last_tx_cn, last_c_index, num_tx_bds;
+ 	struct enet_cb *tx_cb_ptr;
+ 	struct netdev_queue *txq;
++	unsigned int pkts_compl = 0;
+ 	unsigned int bds_compl;
+ 	unsigned int c_index;
+ 
+@@ -1005,6 +1009,7 @@ static void __bcmgenet_tx_reclaim(struct net_device *dev,
+ 		tx_cb_ptr = ring->cbs + last_c_index;
+ 		bds_compl = 0;
+ 		if (tx_cb_ptr->skb) {
++			pkts_compl++;
+ 			bds_compl = skb_shinfo(tx_cb_ptr->skb)->nr_frags + 1;
+ 			dev->stats.tx_bytes += tx_cb_ptr->skb->len;
+ 			dma_unmap_single(&dev->dev,
+@@ -1028,23 +1033,45 @@ static void __bcmgenet_tx_reclaim(struct net_device *dev,
+ 		last_c_index &= (num_tx_bds - 1);
+ 	}
+ 
+-	if (ring->free_bds > (MAX_SKB_FRAGS + 1))
+-		ring->int_disable(priv, ring);
+-
+-	if (netif_tx_queue_stopped(txq))
+-		netif_tx_wake_queue(txq);
++	if (ring->free_bds > (MAX_SKB_FRAGS + 1)) {
++		if (netif_tx_queue_stopped(txq))
++			netif_tx_wake_queue(txq);
++	}
+ 
+ 	ring->c_index = c_index;
++
++	return pkts_compl;
+ }
+ 
+-static void bcmgenet_tx_reclaim(struct net_device *dev,
++static unsigned int bcmgenet_tx_reclaim(struct net_device *dev,
+ 				struct bcmgenet_tx_ring *ring)
+ {
++	unsigned int released;
+ 	unsigned long flags;
+ 
+ 	spin_lock_irqsave(&ring->lock, flags);
+-	__bcmgenet_tx_reclaim(dev, ring);
++	released = __bcmgenet_tx_reclaim(dev, ring);
+ 	spin_unlock_irqrestore(&ring->lock, flags);
++
++	return released;
++}
++
++static int bcmgenet_tx_poll(struct napi_struct *napi, int budget)
++{
++	struct bcmgenet_tx_ring *ring =
++		container_of(napi, struct bcmgenet_tx_ring, napi);
++	unsigned int work_done = 0;
++
++	work_done = bcmgenet_tx_reclaim(ring->priv->dev, ring);
++
++	if (work_done == 0) {
++		napi_complete(napi);
++		ring->int_enable(ring->priv, ring);
++
++		return 0;
++	}
++
++	return budget;
+ }
+ 
+ static void bcmgenet_tx_reclaim_all(struct net_device *dev)
+@@ -1302,10 +1329,8 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
+ 	bcmgenet_tdma_ring_writel(priv, ring->index,
+ 				  ring->prod_index, TDMA_PROD_INDEX);
+ 
+-	if (ring->free_bds <= (MAX_SKB_FRAGS + 1)) {
++	if (ring->free_bds <= (MAX_SKB_FRAGS + 1))
+ 		netif_tx_stop_queue(txq);
+-		ring->int_enable(priv, ring);
+-	}
+ 
+ out:
+ 	spin_unlock_irqrestore(&ring->lock, flags);
+@@ -1621,6 +1646,7 @@ static int init_umac(struct bcmgenet_priv *priv)
+ 	struct device *kdev = &priv->pdev->dev;
+ 	int ret;
+ 	u32 reg, cpu_mask_clear;
++	int index;
+ 
+ 	dev_dbg(&priv->pdev->dev, "bcmgenet: init_umac\n");
+ 
+@@ -1647,7 +1673,7 @@ static int init_umac(struct bcmgenet_priv *priv)
+ 
+ 	bcmgenet_intr_disable(priv);
+ 
+-	cpu_mask_clear = UMAC_IRQ_RXDMA_BDONE;
++	cpu_mask_clear = UMAC_IRQ_RXDMA_BDONE | UMAC_IRQ_TXDMA_BDONE;
+ 
+ 	dev_dbg(kdev, "%s:Enabling RXDMA_BDONE interrupt\n", __func__);
+ 
+@@ -1674,6 +1700,10 @@ static int init_umac(struct bcmgenet_priv *priv)
+ 
+ 	bcmgenet_intrl2_0_writel(priv, cpu_mask_clear, INTRL2_CPU_MASK_CLEAR);
+ 
++	for (index = 0; index < priv->hw_params->tx_queues; index++)
++		bcmgenet_intrl2_1_writel(priv, (1 << index),
++					 INTRL2_CPU_MASK_CLEAR);
++
+ 	/* Enable rx/tx engine.*/
+ 	dev_dbg(kdev, "done init umac\n");
+ 
+@@ -1693,6 +1723,8 @@ static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv,
+ 	unsigned int first_bd;
+ 
+ 	spin_lock_init(&ring->lock);
++	ring->priv = priv;
++	netif_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll, 64);
+ 	ring->index = index;
+ 	if (index == DESC_INDEX) {
+ 		ring->queue = 0;
+@@ -1738,6 +1770,17 @@ static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv,
+ 				  TDMA_WRITE_PTR);
+ 	bcmgenet_tdma_ring_writel(priv, index, end_ptr * words_per_bd - 1,
+ 				  DMA_END_ADDR);
++
++	napi_enable(&ring->napi);
++}
++
++static void bcmgenet_fini_tx_ring(struct bcmgenet_priv *priv,
++				  unsigned int index)
++{
++	struct bcmgenet_tx_ring *ring = &priv->tx_rings[index];
++
++	napi_disable(&ring->napi);
++	netif_napi_del(&ring->napi);
+ }
+ 
+ /* Initialize a RDMA ring */
+@@ -1907,7 +1950,7 @@ static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv)
+ 	return ret;
+ }
+ 
+-static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
++static void __bcmgenet_fini_dma(struct bcmgenet_priv *priv)
+ {
+ 	int i;
+ 
+@@ -1926,6 +1969,18 @@ static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
+ 	kfree(priv->tx_cbs);
+ }
+ 
++static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
++{
++	int i;
++
++	bcmgenet_fini_tx_ring(priv, DESC_INDEX);
++
++	for (i = 0; i < priv->hw_params->tx_queues; i++)
++		bcmgenet_fini_tx_ring(priv, i);
++
++	__bcmgenet_fini_dma(priv);
++}
++
+ /* init_edma: Initialize DMA control register */
+ static int bcmgenet_init_dma(struct bcmgenet_priv *priv)
+ {
+@@ -1952,7 +2007,7 @@ static int bcmgenet_init_dma(struct bcmgenet_priv *priv)
+ 	priv->tx_cbs = kcalloc(priv->num_tx_bds, sizeof(struct enet_cb),
+ 			       GFP_KERNEL);
+ 	if (!priv->tx_cbs) {
+-		bcmgenet_fini_dma(priv);
++		__bcmgenet_fini_dma(priv);
+ 		return -ENOMEM;
+ 	}
+ 
+@@ -1975,9 +2030,6 @@ static int bcmgenet_poll(struct napi_struct *napi, int budget)
+ 			struct bcmgenet_priv, napi);
+ 	unsigned int work_done;
+ 
+-	/* tx reclaim */
+-	bcmgenet_tx_reclaim(priv->dev, &priv->tx_rings[DESC_INDEX]);
+-
+ 	work_done = bcmgenet_desc_rx(priv, budget);
+ 
+ 	/* Advancing our consumer index*/
+@@ -2022,28 +2074,34 @@ static void bcmgenet_irq_task(struct work_struct *work)
+ static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
+ {
+ 	struct bcmgenet_priv *priv = dev_id;
++	struct bcmgenet_tx_ring *ring;
+ 	unsigned int index;
+ 
+ 	/* Save irq status for bottom-half processing. */
+ 	priv->irq1_stat =
+ 		bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) &
+-		~priv->int1_mask;
++		~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
+ 	/* clear interrupts */
+ 	bcmgenet_intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR);
+ 
+ 	netif_dbg(priv, intr, priv->dev,
+ 		  "%s: IRQ=0x%x\n", __func__, priv->irq1_stat);
++
+ 	/* Check the MBDONE interrupts.
+ 	 * packet is done, reclaim descriptors
+ 	 */
+-	if (priv->irq1_stat & 0x0000ffff) {
+-		index = 0;
+-		for (index = 0; index < 16; index++) {
+-			if (priv->irq1_stat & (1 << index))
+-				bcmgenet_tx_reclaim(priv->dev,
+-						    &priv->tx_rings[index]);
++	for (index = 0; index < priv->hw_params->tx_queues; index++) {
++		if (!(priv->irq1_stat & BIT(index)))
++			continue;
++
++		ring = &priv->tx_rings[index];
++
++		if (likely(napi_schedule_prep(&ring->napi))) {
++			ring->int_disable(priv, ring);
++			__napi_schedule(&ring->napi);
+ 		}
+ 	}
++
+ 	return IRQ_HANDLED;
+ }
+ 
+@@ -2075,8 +2133,12 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
+ 	}
+ 	if (priv->irq0_stat &
+ 			(UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE)) {
+-		/* Tx reclaim */
+-		bcmgenet_tx_reclaim(priv->dev, &priv->tx_rings[DESC_INDEX]);
++		struct bcmgenet_tx_ring *ring = &priv->tx_rings[DESC_INDEX];
++
++		if (likely(napi_schedule_prep(&ring->napi))) {
++			ring->int_disable(priv, ring);
++			__napi_schedule(&ring->napi);
++		}
+ 	}
+ 	if (priv->irq0_stat & (UMAC_IRQ_PHY_DET_R |
+ 				UMAC_IRQ_PHY_DET_F |
+diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+index b36ddec0cc0a..0d370d168aee 100644
+--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
++++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+@@ -520,6 +520,7 @@ struct bcmgenet_hw_params {
+ 
+ struct bcmgenet_tx_ring {
+ 	spinlock_t	lock;		/* ring lock */
++	struct napi_struct napi;	/* NAPI per tx queue */
+ 	unsigned int	index;		/* ring index */
+ 	unsigned int	queue;		/* queue index */
+ 	struct enet_cb	*cbs;		/* tx ring buffer control block*/
+@@ -534,6 +535,7 @@ struct bcmgenet_tx_ring {
+ 			   struct bcmgenet_tx_ring *);
+ 	void (*int_disable)(struct bcmgenet_priv *priv,
+ 			    struct bcmgenet_tx_ring *);
++	struct bcmgenet_priv *priv;
+ };
+ 
+ /* device context */
+diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
+index 14a1c5cec3a5..2e2cf80e7135 100644
+--- a/drivers/net/ethernet/realtek/r8169.c
++++ b/drivers/net/ethernet/realtek/r8169.c
+@@ -5067,8 +5067,6 @@ static void rtl_hw_reset(struct rtl8169_private *tp)
+ 	RTL_W8(ChipCmd, CmdReset);
+ 
+ 	rtl_udelay_loop_wait_low(tp, &rtl_chipcmd_cond, 100, 100);
+-
+-	netdev_reset_queue(tp->dev);
+ }
+ 
+ static void rtl_request_uncached_firmware(struct rtl8169_private *tp)
+@@ -7089,8 +7087,6 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
+ 
+ 	txd->opts2 = cpu_to_le32(opts[1]);
+ 
+-	netdev_sent_queue(dev, skb->len);
+-
+ 	skb_tx_timestamp(skb);
+ 
+ 	/* Force memory writes to complete before releasing descriptor */
+@@ -7192,7 +7188,6 @@ static void rtl8169_pcierr_interrupt(struct net_device *dev)
+ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
+ {
+ 	unsigned int dirty_tx, tx_left;
+-	unsigned int bytes_compl = 0, pkts_compl = 0;
+ 
+ 	dirty_tx = tp->dirty_tx;
+ 	smp_rmb();
+@@ -7216,8 +7211,10 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
+ 		rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb,
+ 				     tp->TxDescArray + entry);
+ 		if (status & LastFrag) {
+-			pkts_compl++;
+-			bytes_compl += tx_skb->skb->len;
++			u64_stats_update_begin(&tp->tx_stats.syncp);
++			tp->tx_stats.packets++;
++			tp->tx_stats.bytes += tx_skb->skb->len;
++			u64_stats_update_end(&tp->tx_stats.syncp);
+ 			dev_kfree_skb_any(tx_skb->skb);
+ 			tx_skb->skb = NULL;
+ 		}
+@@ -7226,13 +7223,6 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
+ 	}
+ 
+ 	if (tp->dirty_tx != dirty_tx) {
+-		netdev_completed_queue(tp->dev, pkts_compl, bytes_compl);
+-
+-		u64_stats_update_begin(&tp->tx_stats.syncp);
+-		tp->tx_stats.packets += pkts_compl;
+-		tp->tx_stats.bytes += bytes_compl;
+-		u64_stats_update_end(&tp->tx_stats.syncp);
+-
+ 		tp->dirty_tx = dirty_tx;
+ 		/* Sync with rtl8169_start_xmit:
+ 		 * - publish dirty_tx ring index (write barrier)
+diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
+index 04283fe0e6a7..20f54d5f8409 100644
+--- a/drivers/net/ethernet/renesas/sh_eth.c
++++ b/drivers/net/ethernet/renesas/sh_eth.c
+@@ -1392,6 +1392,9 @@ static void sh_eth_dev_exit(struct net_device *ndev)
+ 	msleep(2); /* max frame time at 10 Mbps < 1250 us */
+ 	sh_eth_get_stats(ndev);
+ 	sh_eth_reset(ndev);
++
++	/* Set MAC address again */
++	update_mac_address(ndev);
+ }
+ 
+ /* free Tx skb function */
+diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
+index 919f4fccc322..4d050ee0f872 100644
+--- a/drivers/net/macvtap.c
++++ b/drivers/net/macvtap.c
+@@ -654,11 +654,14 @@ static void macvtap_skb_to_vnet_hdr(struct macvtap_queue *q,
+ 	} /* else everything is zero */
+ }
+ 
++/* Neighbour code has some assumptions on HH_DATA_MOD alignment */
++#define MACVTAP_RESERVE HH_DATA_OFF(ETH_HLEN)
++
+ /* Get packet from user space buffer */
+ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
+ 				struct iov_iter *from, int noblock)
+ {
+-	int good_linear = SKB_MAX_HEAD(NET_IP_ALIGN);
++	int good_linear = SKB_MAX_HEAD(MACVTAP_RESERVE);
+ 	struct sk_buff *skb;
+ 	struct macvlan_dev *vlan;
+ 	unsigned long total_len = iov_iter_count(from);
+@@ -722,7 +725,7 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
+ 			linear = macvtap16_to_cpu(q, vnet_hdr.hdr_len);
+ 	}
+ 
+-	skb = macvtap_alloc_skb(&q->sk, NET_IP_ALIGN, copylen,
++	skb = macvtap_alloc_skb(&q->sk, MACVTAP_RESERVE, copylen,
+ 				linear, noblock, &err);
+ 	if (!skb)
+ 		goto err;
+diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
+index 767cd110f496..dc1f6f07326a 100644
+--- a/drivers/net/phy/phy.c
++++ b/drivers/net/phy/phy.c
+@@ -236,6 +236,25 @@ static inline unsigned int phy_find_valid(unsigned int idx, u32 features)
+ }
+ 
+ /**
++ * phy_check_valid - check if there is a valid PHY setting which matches
++ *		     speed, duplex, and feature mask
++ * @speed: speed to match
++ * @duplex: duplex to match
++ * @features: A mask of the valid settings
++ *
++ * Description: Returns true if there is a valid setting, false otherwise.
++ */
++static inline bool phy_check_valid(int speed, int duplex, u32 features)
++{
++	unsigned int idx;
++
++	idx = phy_find_valid(phy_find_setting(speed, duplex), features);
++
++	return settings[idx].speed == speed && settings[idx].duplex == duplex &&
++		(settings[idx].setting & features);
++}
++
++/**
+  * phy_sanitize_settings - make sure the PHY is set to supported speed and duplex
+  * @phydev: the target phy_device struct
+  *
+@@ -1042,7 +1061,6 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
+ 		int eee_lp, eee_cap, eee_adv;
+ 		u32 lp, cap, adv;
+ 		int status;
+-		unsigned int idx;
+ 
+ 		/* Read phy status to properly get the right settings */
+ 		status = phy_read_status(phydev);
+@@ -1074,8 +1092,7 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
+ 
+ 		adv = mmd_eee_adv_to_ethtool_adv_t(eee_adv);
+ 		lp = mmd_eee_adv_to_ethtool_adv_t(eee_lp);
+-		idx = phy_find_setting(phydev->speed, phydev->duplex);
+-		if (!(lp & adv & settings[idx].setting))
++		if (!phy_check_valid(phydev->speed, phydev->duplex, lp & adv))
+ 			goto eee_exit_err;
+ 
+ 		if (clk_stop_enable) {
+diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
+index f7ff493f1e73..2c087efed473 100644
+--- a/drivers/net/team/team.c
++++ b/drivers/net/team/team.c
+@@ -42,9 +42,7 @@
+ 
+ static struct team_port *team_port_get_rcu(const struct net_device *dev)
+ {
+-	struct team_port *port = rcu_dereference(dev->rx_handler_data);
+-
+-	return team_port_exists(dev) ? port : NULL;
++	return rcu_dereference(dev->rx_handler_data);
+ }
+ 
+ static struct team_port *team_port_get_rtnl(const struct net_device *dev)
+@@ -1738,11 +1736,11 @@ static int team_set_mac_address(struct net_device *dev, void *p)
+ 	if (dev->type == ARPHRD_ETHER && !is_valid_ether_addr(addr->sa_data))
+ 		return -EADDRNOTAVAIL;
+ 	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+-	rcu_read_lock();
+-	list_for_each_entry_rcu(port, &team->port_list, list)
++	mutex_lock(&team->lock);
++	list_for_each_entry(port, &team->port_list, list)
+ 		if (team->ops.port_change_dev_addr)
+ 			team->ops.port_change_dev_addr(team, port);
+-	rcu_read_unlock();
++	mutex_unlock(&team->lock);
+ 	return 0;
+ }
+ 
+diff --git a/drivers/net/usb/plusb.c b/drivers/net/usb/plusb.c
+index 3d18bb0eee85..1bfe0fcaccf5 100644
+--- a/drivers/net/usb/plusb.c
++++ b/drivers/net/usb/plusb.c
+@@ -134,6 +134,11 @@ static const struct usb_device_id	products [] = {
+ }, {
+ 	USB_DEVICE(0x050d, 0x258a),     /* Belkin F5U258/F5U279 (PL-25A1) */
+ 	.driver_info =  (unsigned long) &prolific_info,
++}, {
++	USB_DEVICE(0x3923, 0x7825),     /* National Instruments USB
++					 * Host-to-Host Cable
++					 */
++	.driver_info =  (unsigned long) &prolific_info,
+ },
+ 
+ 	{ },		// END
+diff --git a/drivers/net/wireless/ath/ath5k/reset.c b/drivers/net/wireless/ath/ath5k/reset.c
+index a3399c4f13a9..b9b651ea9851 100644
+--- a/drivers/net/wireless/ath/ath5k/reset.c
++++ b/drivers/net/wireless/ath/ath5k/reset.c
+@@ -478,7 +478,7 @@ ath5k_hw_wisoc_reset(struct ath5k_hw *ah, u32 flags)
+ 	regval = ioread32(reg);
+ 	iowrite32(regval | val, reg);
+ 	regval = ioread32(reg);
+-	usleep_range(100, 150);
++	udelay(100);	/* NB: should be atomic */
+ 
+ 	/* Bring BB/MAC out of reset */
+ 	iowrite32(regval & ~val, reg);
+diff --git a/drivers/of/of_pci.c b/drivers/of/of_pci.c
+index 88471d3d98cd..60dc36c865b5 100644
+--- a/drivers/of/of_pci.c
++++ b/drivers/of/of_pci.c
+@@ -140,6 +140,7 @@ int of_pci_get_host_bridge_resources(struct device_node *dev,
+ 			unsigned char busno, unsigned char bus_max,
+ 			struct list_head *resources, resource_size_t *io_base)
+ {
++	struct pci_host_bridge_window *window;
+ 	struct resource *res;
+ 	struct resource *bus_range;
+ 	struct of_pci_range range;
+@@ -225,7 +226,10 @@ int of_pci_get_host_bridge_resources(struct device_node *dev,
+ conversion_failed:
+ 	kfree(res);
+ parse_failed:
++	list_for_each_entry(window, resources, list)
++		kfree(window->res);
+ 	pci_free_resource_list(resources);
++	kfree(bus_range);
+ 	return err;
+ }
+ EXPORT_SYMBOL_GPL(of_pci_get_host_bridge_resources);
+diff --git a/drivers/pinctrl/freescale/pinctrl-imx.c b/drivers/pinctrl/freescale/pinctrl-imx.c
+index 52f2b9404fe0..448f10986c28 100644
+--- a/drivers/pinctrl/freescale/pinctrl-imx.c
++++ b/drivers/pinctrl/freescale/pinctrl-imx.c
+@@ -437,7 +437,7 @@ static void imx_pinconf_dbg_show(struct pinctrl_dev *pctldev,
+ 	const struct imx_pin_reg *pin_reg = &info->pin_regs[pin_id];
+ 	unsigned long config;
+ 
+-	if (!pin_reg || !pin_reg->conf_reg) {
++	if (!pin_reg || pin_reg->conf_reg == -1) {
+ 		seq_printf(s, "N/A");
+ 		return;
+ 	}
+diff --git a/drivers/pinctrl/freescale/pinctrl-imx25.c b/drivers/pinctrl/freescale/pinctrl-imx25.c
+index 8d1013a040c9..faf635654312 100644
+--- a/drivers/pinctrl/freescale/pinctrl-imx25.c
++++ b/drivers/pinctrl/freescale/pinctrl-imx25.c
+@@ -27,150 +27,148 @@
+ 
+ enum imx25_pads {
+ 	MX25_PAD_RESERVE0 = 1,
+-	MX25_PAD_RESERVE1 = 2,
+-	MX25_PAD_A10 = 3,
+-	MX25_PAD_A13 = 4,
+-	MX25_PAD_A14 = 5,
+-	MX25_PAD_A15 = 6,
+-	MX25_PAD_A16 = 7,
+-	MX25_PAD_A17 = 8,
+-	MX25_PAD_A18 = 9,
+-	MX25_PAD_A19 = 10,
+-	MX25_PAD_A20 = 11,
+-	MX25_PAD_A21 = 12,
+-	MX25_PAD_A22 = 13,
+-	MX25_PAD_A23 = 14,
+-	MX25_PAD_A24 = 15,
+-	MX25_PAD_A25 = 16,
+-	MX25_PAD_EB0 = 17,
+-	MX25_PAD_EB1 = 18,
+-	MX25_PAD_OE = 19,
+-	MX25_PAD_CS0 = 20,
+-	MX25_PAD_CS1 = 21,
+-	MX25_PAD_CS4 = 22,
+-	MX25_PAD_CS5 = 23,
+-	MX25_PAD_NF_CE0 = 24,
+-	MX25_PAD_ECB = 25,
+-	MX25_PAD_LBA = 26,
+-	MX25_PAD_BCLK = 27,
+-	MX25_PAD_RW = 28,
+-	MX25_PAD_NFWE_B = 29,
+-	MX25_PAD_NFRE_B = 30,
+-	MX25_PAD_NFALE = 31,
+-	MX25_PAD_NFCLE = 32,
+-	MX25_PAD_NFWP_B = 33,
+-	MX25_PAD_NFRB = 34,
+-	MX25_PAD_D15 = 35,
+-	MX25_PAD_D14 = 36,
+-	MX25_PAD_D13 = 37,
+-	MX25_PAD_D12 = 38,
+-	MX25_PAD_D11 = 39,
+-	MX25_PAD_D10 = 40,
+-	MX25_PAD_D9 = 41,
+-	MX25_PAD_D8 = 42,
+-	MX25_PAD_D7 = 43,
+-	MX25_PAD_D6 = 44,
+-	MX25_PAD_D5 = 45,
+-	MX25_PAD_D4 = 46,
+-	MX25_PAD_D3 = 47,
+-	MX25_PAD_D2 = 48,
+-	MX25_PAD_D1 = 49,
+-	MX25_PAD_D0 = 50,
+-	MX25_PAD_LD0 = 51,
+-	MX25_PAD_LD1 = 52,
+-	MX25_PAD_LD2 = 53,
+-	MX25_PAD_LD3 = 54,
+-	MX25_PAD_LD4 = 55,
+-	MX25_PAD_LD5 = 56,
+-	MX25_PAD_LD6 = 57,
+-	MX25_PAD_LD7 = 58,
+-	MX25_PAD_LD8 = 59,
+-	MX25_PAD_LD9 = 60,
+-	MX25_PAD_LD10 = 61,
+-	MX25_PAD_LD11 = 62,
+-	MX25_PAD_LD12 = 63,
+-	MX25_PAD_LD13 = 64,
+-	MX25_PAD_LD14 = 65,
+-	MX25_PAD_LD15 = 66,
+-	MX25_PAD_HSYNC = 67,
+-	MX25_PAD_VSYNC = 68,
+-	MX25_PAD_LSCLK = 69,
+-	MX25_PAD_OE_ACD = 70,
+-	MX25_PAD_CONTRAST = 71,
+-	MX25_PAD_PWM = 72,
+-	MX25_PAD_CSI_D2 = 73,
+-	MX25_PAD_CSI_D3 = 74,
+-	MX25_PAD_CSI_D4 = 75,
+-	MX25_PAD_CSI_D5 = 76,
+-	MX25_PAD_CSI_D6 = 77,
+-	MX25_PAD_CSI_D7 = 78,
+-	MX25_PAD_CSI_D8 = 79,
+-	MX25_PAD_CSI_D9 = 80,
+-	MX25_PAD_CSI_MCLK = 81,
+-	MX25_PAD_CSI_VSYNC = 82,
+-	MX25_PAD_CSI_HSYNC = 83,
+-	MX25_PAD_CSI_PIXCLK = 84,
+-	MX25_PAD_I2C1_CLK = 85,
+-	MX25_PAD_I2C1_DAT = 86,
+-	MX25_PAD_CSPI1_MOSI = 87,
+-	MX25_PAD_CSPI1_MISO = 88,
+-	MX25_PAD_CSPI1_SS0 = 89,
+-	MX25_PAD_CSPI1_SS1 = 90,
+-	MX25_PAD_CSPI1_SCLK = 91,
+-	MX25_PAD_CSPI1_RDY = 92,
+-	MX25_PAD_UART1_RXD = 93,
+-	MX25_PAD_UART1_TXD = 94,
+-	MX25_PAD_UART1_RTS = 95,
+-	MX25_PAD_UART1_CTS = 96,
+-	MX25_PAD_UART2_RXD = 97,
+-	MX25_PAD_UART2_TXD = 98,
+-	MX25_PAD_UART2_RTS = 99,
+-	MX25_PAD_UART2_CTS = 100,
+-	MX25_PAD_SD1_CMD = 101,
+-	MX25_PAD_SD1_CLK = 102,
+-	MX25_PAD_SD1_DATA0 = 103,
+-	MX25_PAD_SD1_DATA1 = 104,
+-	MX25_PAD_SD1_DATA2 = 105,
+-	MX25_PAD_SD1_DATA3 = 106,
+-	MX25_PAD_KPP_ROW0 = 107,
+-	MX25_PAD_KPP_ROW1 = 108,
+-	MX25_PAD_KPP_ROW2 = 109,
+-	MX25_PAD_KPP_ROW3 = 110,
+-	MX25_PAD_KPP_COL0 = 111,
+-	MX25_PAD_KPP_COL1 = 112,
+-	MX25_PAD_KPP_COL2 = 113,
+-	MX25_PAD_KPP_COL3 = 114,
+-	MX25_PAD_FEC_MDC = 115,
+-	MX25_PAD_FEC_MDIO = 116,
+-	MX25_PAD_FEC_TDATA0 = 117,
+-	MX25_PAD_FEC_TDATA1 = 118,
+-	MX25_PAD_FEC_TX_EN = 119,
+-	MX25_PAD_FEC_RDATA0 = 120,
+-	MX25_PAD_FEC_RDATA1 = 121,
+-	MX25_PAD_FEC_RX_DV = 122,
+-	MX25_PAD_FEC_TX_CLK = 123,
+-	MX25_PAD_RTCK = 124,
+-	MX25_PAD_DE_B = 125,
+-	MX25_PAD_GPIO_A = 126,
+-	MX25_PAD_GPIO_B = 127,
+-	MX25_PAD_GPIO_C = 128,
+-	MX25_PAD_GPIO_D = 129,
+-	MX25_PAD_GPIO_E = 130,
+-	MX25_PAD_GPIO_F = 131,
+-	MX25_PAD_EXT_ARMCLK = 132,
+-	MX25_PAD_UPLL_BYPCLK = 133,
+-	MX25_PAD_VSTBY_REQ = 134,
+-	MX25_PAD_VSTBY_ACK = 135,
+-	MX25_PAD_POWER_FAIL  = 136,
+-	MX25_PAD_CLKO = 137,
+-	MX25_PAD_BOOT_MODE0 = 138,
+-	MX25_PAD_BOOT_MODE1 = 139,
++	MX25_PAD_A10 = 2,
++	MX25_PAD_A13 = 3,
++	MX25_PAD_A14 = 4,
++	MX25_PAD_A15 = 5,
++	MX25_PAD_A16 = 6,
++	MX25_PAD_A17 = 7,
++	MX25_PAD_A18 = 8,
++	MX25_PAD_A19 = 9,
++	MX25_PAD_A20 = 10,
++	MX25_PAD_A21 = 11,
++	MX25_PAD_A22 = 12,
++	MX25_PAD_A23 = 13,
++	MX25_PAD_A24 = 14,
++	MX25_PAD_A25 = 15,
++	MX25_PAD_EB0 = 16,
++	MX25_PAD_EB1 = 17,
++	MX25_PAD_OE = 18,
++	MX25_PAD_CS0 = 19,
++	MX25_PAD_CS1 = 20,
++	MX25_PAD_CS4 = 21,
++	MX25_PAD_CS5 = 22,
++	MX25_PAD_NF_CE0 = 23,
++	MX25_PAD_ECB = 24,
++	MX25_PAD_LBA = 25,
++	MX25_PAD_BCLK = 26,
++	MX25_PAD_RW = 27,
++	MX25_PAD_NFWE_B = 28,
++	MX25_PAD_NFRE_B = 29,
++	MX25_PAD_NFALE = 30,
++	MX25_PAD_NFCLE = 31,
++	MX25_PAD_NFWP_B = 32,
++	MX25_PAD_NFRB = 33,
++	MX25_PAD_D15 = 34,
++	MX25_PAD_D14 = 35,
++	MX25_PAD_D13 = 36,
++	MX25_PAD_D12 = 37,
++	MX25_PAD_D11 = 38,
++	MX25_PAD_D10 = 39,
++	MX25_PAD_D9 = 40,
++	MX25_PAD_D8 = 41,
++	MX25_PAD_D7 = 42,
++	MX25_PAD_D6 = 43,
++	MX25_PAD_D5 = 44,
++	MX25_PAD_D4 = 45,
++	MX25_PAD_D3 = 46,
++	MX25_PAD_D2 = 47,
++	MX25_PAD_D1 = 48,
++	MX25_PAD_D0 = 49,
++	MX25_PAD_LD0 = 50,
++	MX25_PAD_LD1 = 51,
++	MX25_PAD_LD2 = 52,
++	MX25_PAD_LD3 = 53,
++	MX25_PAD_LD4 = 54,
++	MX25_PAD_LD5 = 55,
++	MX25_PAD_LD6 = 56,
++	MX25_PAD_LD7 = 57,
++	MX25_PAD_LD8 = 58,
++	MX25_PAD_LD9 = 59,
++	MX25_PAD_LD10 = 60,
++	MX25_PAD_LD11 = 61,
++	MX25_PAD_LD12 = 62,
++	MX25_PAD_LD13 = 63,
++	MX25_PAD_LD14 = 64,
++	MX25_PAD_LD15 = 65,
++	MX25_PAD_HSYNC = 66,
++	MX25_PAD_VSYNC = 67,
++	MX25_PAD_LSCLK = 68,
++	MX25_PAD_OE_ACD = 69,
++	MX25_PAD_CONTRAST = 70,
++	MX25_PAD_PWM = 71,
++	MX25_PAD_CSI_D2 = 72,
++	MX25_PAD_CSI_D3 = 73,
++	MX25_PAD_CSI_D4 = 74,
++	MX25_PAD_CSI_D5 = 75,
++	MX25_PAD_CSI_D6 = 76,
++	MX25_PAD_CSI_D7 = 77,
++	MX25_PAD_CSI_D8 = 78,
++	MX25_PAD_CSI_D9 = 79,
++	MX25_PAD_CSI_MCLK = 80,
++	MX25_PAD_CSI_VSYNC = 81,
++	MX25_PAD_CSI_HSYNC = 82,
++	MX25_PAD_CSI_PIXCLK = 83,
++	MX25_PAD_I2C1_CLK = 84,
++	MX25_PAD_I2C1_DAT = 85,
++	MX25_PAD_CSPI1_MOSI = 86,
++	MX25_PAD_CSPI1_MISO = 87,
++	MX25_PAD_CSPI1_SS0 = 88,
++	MX25_PAD_CSPI1_SS1 = 89,
++	MX25_PAD_CSPI1_SCLK = 90,
++	MX25_PAD_CSPI1_RDY = 91,
++	MX25_PAD_UART1_RXD = 92,
++	MX25_PAD_UART1_TXD = 93,
++	MX25_PAD_UART1_RTS = 94,
++	MX25_PAD_UART1_CTS = 95,
++	MX25_PAD_UART2_RXD = 96,
++	MX25_PAD_UART2_TXD = 97,
++	MX25_PAD_UART2_RTS = 98,
++	MX25_PAD_UART2_CTS = 99,
++	MX25_PAD_SD1_CMD = 100,
++	MX25_PAD_SD1_CLK = 101,
++	MX25_PAD_SD1_DATA0 = 102,
++	MX25_PAD_SD1_DATA1 = 103,
++	MX25_PAD_SD1_DATA2 = 104,
++	MX25_PAD_SD1_DATA3 = 105,
++	MX25_PAD_KPP_ROW0 = 106,
++	MX25_PAD_KPP_ROW1 = 107,
++	MX25_PAD_KPP_ROW2 = 108,
++	MX25_PAD_KPP_ROW3 = 109,
++	MX25_PAD_KPP_COL0 = 110,
++	MX25_PAD_KPP_COL1 = 111,
++	MX25_PAD_KPP_COL2 = 112,
++	MX25_PAD_KPP_COL3 = 113,
++	MX25_PAD_FEC_MDC = 114,
++	MX25_PAD_FEC_MDIO = 115,
++	MX25_PAD_FEC_TDATA0 = 116,
++	MX25_PAD_FEC_TDATA1 = 117,
++	MX25_PAD_FEC_TX_EN = 118,
++	MX25_PAD_FEC_RDATA0 = 119,
++	MX25_PAD_FEC_RDATA1 = 120,
++	MX25_PAD_FEC_RX_DV = 121,
++	MX25_PAD_FEC_TX_CLK = 122,
++	MX25_PAD_RTCK = 123,
++	MX25_PAD_DE_B = 124,
++	MX25_PAD_GPIO_A = 125,
++	MX25_PAD_GPIO_B = 126,
++	MX25_PAD_GPIO_C = 127,
++	MX25_PAD_GPIO_D = 128,
++	MX25_PAD_GPIO_E = 129,
++	MX25_PAD_GPIO_F = 130,
++	MX25_PAD_EXT_ARMCLK = 131,
++	MX25_PAD_UPLL_BYPCLK = 132,
++	MX25_PAD_VSTBY_REQ = 133,
++	MX25_PAD_VSTBY_ACK = 134,
++	MX25_PAD_POWER_FAIL  = 135,
++	MX25_PAD_CLKO = 136,
++	MX25_PAD_BOOT_MODE0 = 137,
++	MX25_PAD_BOOT_MODE1 = 138,
+ };
+ 
+ /* Pad names for the pinmux subsystem */
+ static const struct pinctrl_pin_desc imx25_pinctrl_pads[] = {
+ 	IMX_PINCTRL_PIN(MX25_PAD_RESERVE0),
+-	IMX_PINCTRL_PIN(MX25_PAD_RESERVE1),
+ 	IMX_PINCTRL_PIN(MX25_PAD_A10),
+ 	IMX_PINCTRL_PIN(MX25_PAD_A13),
+ 	IMX_PINCTRL_PIN(MX25_PAD_A14),
+diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
+index f3193406776c..9cc047bc763b 100644
+--- a/drivers/scsi/be2iscsi/be_main.c
++++ b/drivers/scsi/be2iscsi/be_main.c
+@@ -586,7 +586,6 @@ static struct beiscsi_hba *beiscsi_hba_alloc(struct pci_dev *pcidev)
+ 			"beiscsi_hba_alloc - iscsi_host_alloc failed\n");
+ 		return NULL;
+ 	}
+-	shost->dma_boundary = pcidev->dma_mask;
+ 	shost->max_id = BE2_MAX_SESSIONS;
+ 	shost->max_channel = 0;
+ 	shost->max_cmd_len = BEISCSI_MAX_CMD_LEN;
+diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
+index 763bffe23517..dbf8e777d850 100644
+--- a/drivers/scsi/sg.c
++++ b/drivers/scsi/sg.c
+@@ -546,7 +546,7 @@ static ssize_t
+ sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, Sg_request * srp)
+ {
+ 	sg_io_hdr_t *hp = &srp->header;
+-	int err = 0;
++	int err = 0, err2;
+ 	int len;
+ 
+ 	if (count < SZ_SG_IO_HDR) {
+@@ -575,8 +575,8 @@ sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, Sg_request * srp)
+ 		goto err_out;
+ 	}
+ err_out:
+-	err = sg_finish_rem_req(srp);
+-	return (0 == err) ? count : err;
++	err2 = sg_finish_rem_req(srp);
++	return err ? : err2 ? : count;
+ }
+ 
+ static ssize_t
+diff --git a/drivers/scsi/wd719x.c b/drivers/scsi/wd719x.c
+index 7702664d7ed3..289ad016d925 100644
+--- a/drivers/scsi/wd719x.c
++++ b/drivers/scsi/wd719x.c
+@@ -870,6 +870,7 @@ fail_free_params:
+ }
+ 
+ static struct scsi_host_template wd719x_template = {
++	.module				= THIS_MODULE,
+ 	.name				= "Western Digital 719x",
+ 	.queuecommand			= wd719x_queuecommand,
+ 	.eh_abort_handler		= wd719x_abort,
+diff --git a/drivers/staging/comedi/comedi_compat32.c b/drivers/staging/comedi/comedi_compat32.c
+index 5a4c74f703b3..03a2d0784419 100644
+--- a/drivers/staging/comedi/comedi_compat32.c
++++ b/drivers/staging/comedi/comedi_compat32.c
+@@ -262,7 +262,7 @@ static int compat_cmd(struct file *file, unsigned long arg)
+ {
+ 	struct comedi_cmd __user *cmd;
+ 	struct comedi32_cmd_struct __user *cmd32;
+-	int rc;
++	int rc, err;
+ 
+ 	cmd32 = compat_ptr(arg);
+ 	cmd = compat_alloc_user_space(sizeof(*cmd));
+@@ -271,7 +271,15 @@ static int compat_cmd(struct file *file, unsigned long arg)
+ 	if (rc)
+ 		return rc;
+ 
+-	return translated_ioctl(file, COMEDI_CMD, (unsigned long)cmd);
++	rc = translated_ioctl(file, COMEDI_CMD, (unsigned long)cmd);
++	if (rc == -EAGAIN) {
++		/* Special case: copy cmd back to user. */
++		err = put_compat_cmd(cmd32, cmd);
++		if (err)
++			rc = err;
++	}
++
++	return rc;
+ }
+ 
+ /* Handle 32-bit COMEDI_CMDTEST ioctl. */
+diff --git a/drivers/staging/comedi/drivers/cb_pcidas64.c b/drivers/staging/comedi/drivers/cb_pcidas64.c
+index eddb7ace43df..569310a9135b 100644
+--- a/drivers/staging/comedi/drivers/cb_pcidas64.c
++++ b/drivers/staging/comedi/drivers/cb_pcidas64.c
+@@ -439,6 +439,29 @@ static const struct comedi_lrange ai_ranges_64xx = {
+ 	}
+ };
+ 
++static const uint8_t ai_range_code_64xx[8] = {
++	0x0, 0x1, 0x2, 0x3,	/* bipolar 10, 5, 2,5, 1.25 */
++	0x8, 0x9, 0xa, 0xb	/* unipolar 10, 5, 2.5, 1.25 */
++};
++
++/* analog input ranges for 64-Mx boards */
++static const struct comedi_lrange ai_ranges_64_mx = {
++	7, {
++		BIP_RANGE(5),
++		BIP_RANGE(2.5),
++		BIP_RANGE(1.25),
++		BIP_RANGE(0.625),
++		UNI_RANGE(5),
++		UNI_RANGE(2.5),
++		UNI_RANGE(1.25)
++	}
++};
++
++static const uint8_t ai_range_code_64_mx[7] = {
++	0x0, 0x1, 0x2, 0x3,	/* bipolar 5, 2.5, 1.25, 0.625 */
++	0x9, 0xa, 0xb		/* unipolar 5, 2.5, 1.25 */
++};
++
+ /* analog input ranges for 60xx boards */
+ static const struct comedi_lrange ai_ranges_60xx = {
+ 	4, {
+@@ -449,6 +472,10 @@ static const struct comedi_lrange ai_ranges_60xx = {
+ 	}
+ };
+ 
++static const uint8_t ai_range_code_60xx[4] = {
++	0x0, 0x1, 0x4, 0x7	/* bipolar 10, 5, 0.5, 0.05 */
++};
++
+ /* analog input ranges for 6030, etc boards */
+ static const struct comedi_lrange ai_ranges_6030 = {
+ 	14, {
+@@ -469,6 +496,11 @@ static const struct comedi_lrange ai_ranges_6030 = {
+ 	}
+ };
+ 
++static const uint8_t ai_range_code_6030[14] = {
++	0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, /* bip 10, 5, 2, 1, 0.5, 0.2, 0.1 */
++	0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf  /* uni 10, 5, 2, 1, 0.5, 0.2, 0.1 */
++};
++
+ /* analog input ranges for 6052, etc boards */
+ static const struct comedi_lrange ai_ranges_6052 = {
+ 	15, {
+@@ -490,6 +522,11 @@ static const struct comedi_lrange ai_ranges_6052 = {
+ 	}
+ };
+ 
++static const uint8_t ai_range_code_6052[15] = {
++	0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7,	/* bipolar 10 ... 0.05 */
++	0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf	/* unipolar 10 ... 0.1 */
++};
++
+ /* analog input ranges for 4020 board */
+ static const struct comedi_lrange ai_ranges_4020 = {
+ 	2, {
+@@ -593,6 +630,7 @@ struct pcidas64_board {
+ 	int ai_bits;		/*  analog input resolution */
+ 	int ai_speed;		/*  fastest conversion period in ns */
+ 	const struct comedi_lrange *ai_range_table;
++	const uint8_t *ai_range_code;
+ 	int ao_nchan;		/*  number of analog out channels */
+ 	int ao_bits;		/*  analog output resolution */
+ 	int ao_scan_speed;	/*  analog output scan speed */
+@@ -651,6 +689,7 @@ static const struct pcidas64_board pcidas64_boards[] = {
+ 		.ao_scan_speed	= 10000,
+ 		.layout		= LAYOUT_64XX,
+ 		.ai_range_table	= &ai_ranges_64xx,
++		.ai_range_code	= ai_range_code_64xx,
+ 		.ao_range_table	= &ao_ranges_64xx,
+ 		.ao_range_code	= ao_range_code_64xx,
+ 		.ai_fifo	= &ai_fifo_64xx,
+@@ -666,6 +705,7 @@ static const struct pcidas64_board pcidas64_boards[] = {
+ 		.ao_scan_speed	= 10000,
+ 		.layout		= LAYOUT_64XX,
+ 		.ai_range_table	= &ai_ranges_64xx,
++		.ai_range_code	= ai_range_code_64xx,
+ 		.ao_range_table	= &ao_ranges_64xx,
+ 		.ao_range_code	= ao_range_code_64xx,
+ 		.ai_fifo	= &ai_fifo_64xx,
+@@ -680,7 +720,8 @@ static const struct pcidas64_board pcidas64_boards[] = {
+ 		.ao_bits	= 16,
+ 		.ao_scan_speed	= 10000,
+ 		.layout		= LAYOUT_64XX,
+-		.ai_range_table	= &ai_ranges_64xx,
++		.ai_range_table	= &ai_ranges_64_mx,
++		.ai_range_code	= ai_range_code_64_mx,
+ 		.ao_range_table	= &ao_ranges_64xx,
+ 		.ao_range_code	= ao_range_code_64xx,
+ 		.ai_fifo	= &ai_fifo_64xx,
+@@ -695,7 +736,8 @@ static const struct pcidas64_board pcidas64_boards[] = {
+ 		.ao_bits	= 16,
+ 		.ao_scan_speed	= 10000,
+ 		.layout		= LAYOUT_64XX,
+-		.ai_range_table	= &ai_ranges_64xx,
++		.ai_range_table	= &ai_ranges_64_mx,
++		.ai_range_code	= ai_range_code_64_mx,
+ 		.ao_range_table	= &ao_ranges_64xx,
+ 		.ao_range_code	= ao_range_code_64xx,
+ 		.ai_fifo	= &ai_fifo_64xx,
+@@ -710,7 +752,8 @@ static const struct pcidas64_board pcidas64_boards[] = {
+ 		.ao_bits	= 16,
+ 		.ao_scan_speed	= 10000,
+ 		.layout		= LAYOUT_64XX,
+-		.ai_range_table	= &ai_ranges_64xx,
++		.ai_range_table	= &ai_ranges_64_mx,
++		.ai_range_code	= ai_range_code_64_mx,
+ 		.ao_range_table	= &ao_ranges_64xx,
+ 		.ao_range_code	= ao_range_code_64xx,
+ 		.ai_fifo	= &ai_fifo_64xx,
+@@ -725,6 +768,7 @@ static const struct pcidas64_board pcidas64_boards[] = {
+ 		.ao_bits	= 16,
+ 		.layout		= LAYOUT_60XX,
+ 		.ai_range_table	= &ai_ranges_60xx,
++		.ai_range_code	= ai_range_code_60xx,
+ 		.ao_range_table	= &range_bipolar10,
+ 		.ao_range_code	= ao_range_code_60xx,
+ 		.ai_fifo	= &ai_fifo_60xx,
+@@ -740,6 +784,7 @@ static const struct pcidas64_board pcidas64_boards[] = {
+ 		.ao_scan_speed	= 100000,
+ 		.layout		= LAYOUT_60XX,
+ 		.ai_range_table	= &ai_ranges_60xx,
++		.ai_range_code	= ai_range_code_60xx,
+ 		.ao_range_table	= &range_bipolar10,
+ 		.ao_range_code	= ao_range_code_60xx,
+ 		.ai_fifo	= &ai_fifo_60xx,
+@@ -754,6 +799,7 @@ static const struct pcidas64_board pcidas64_boards[] = {
+ 		.ao_scan_speed	= 100000,
+ 		.layout		= LAYOUT_60XX,
+ 		.ai_range_table	= &ai_ranges_60xx,
++		.ai_range_code	= ai_range_code_60xx,
+ 		.ao_range_table	= &range_bipolar10,
+ 		.ao_range_code	= ao_range_code_60xx,
+ 		.ai_fifo	= &ai_fifo_60xx,
+@@ -769,6 +815,7 @@ static const struct pcidas64_board pcidas64_boards[] = {
+ 		.ao_scan_speed	= 100000,
+ 		.layout		= LAYOUT_60XX,
+ 		.ai_range_table	= &ai_ranges_60xx,
++		.ai_range_code	= ai_range_code_60xx,
+ 		.ao_range_table	= &range_bipolar10,
+ 		.ao_range_code	= ao_range_code_60xx,
+ 		.ai_fifo	= &ai_fifo_60xx,
+@@ -784,6 +831,7 @@ static const struct pcidas64_board pcidas64_boards[] = {
+ 		.ao_scan_speed	= 10000,
+ 		.layout		= LAYOUT_60XX,
+ 		.ai_range_table	= &ai_ranges_6030,
++		.ai_range_code	= ai_range_code_6030,
+ 		.ao_range_table	= &ao_ranges_6030,
+ 		.ao_range_code	= ao_range_code_6030,
+ 		.ai_fifo	= &ai_fifo_60xx,
+@@ -799,6 +847,7 @@ static const struct pcidas64_board pcidas64_boards[] = {
+ 		.ao_scan_speed	= 10000,
+ 		.layout		= LAYOUT_60XX,
+ 		.ai_range_table	= &ai_ranges_6030,
++		.ai_range_code	= ai_range_code_6030,
+ 		.ao_range_table	= &ao_ranges_6030,
+ 		.ao_range_code	= ao_range_code_6030,
+ 		.ai_fifo	= &ai_fifo_60xx,
+@@ -812,6 +861,7 @@ static const struct pcidas64_board pcidas64_boards[] = {
+ 		.ao_nchan	= 0,
+ 		.layout		= LAYOUT_60XX,
+ 		.ai_range_table	= &ai_ranges_6030,
++		.ai_range_code	= ai_range_code_6030,
+ 		.ai_fifo	= &ai_fifo_60xx,
+ 		.has_8255	= 0,
+ 	},
+@@ -823,6 +873,7 @@ static const struct pcidas64_board pcidas64_boards[] = {
+ 		.ao_nchan	= 0,
+ 		.layout		= LAYOUT_60XX,
+ 		.ai_range_table	= &ai_ranges_6030,
++		.ai_range_code	= ai_range_code_6030,
+ 		.ai_fifo	= &ai_fifo_60xx,
+ 		.has_8255	= 0,
+ 	},
+@@ -835,6 +886,7 @@ static const struct pcidas64_board pcidas64_boards[] = {
+ 		.ao_scan_speed	= 0,
+ 		.layout		= LAYOUT_60XX,
+ 		.ai_range_table	= &ai_ranges_60xx,
++		.ai_range_code	= ai_range_code_60xx,
+ 		.ai_fifo	= &ai_fifo_60xx,
+ 		.has_8255	= 0,
+ 	},
+@@ -848,6 +900,7 @@ static const struct pcidas64_board pcidas64_boards[] = {
+ 		.ao_scan_speed	= 100000,
+ 		.layout		= LAYOUT_60XX,
+ 		.ai_range_table	= &ai_ranges_60xx,
++		.ai_range_code	= ai_range_code_60xx,
+ 		.ao_range_table	= &range_bipolar10,
+ 		.ao_range_code	= ao_range_code_60xx,
+ 		.ai_fifo	= &ai_fifo_60xx,
+@@ -863,6 +916,7 @@ static const struct pcidas64_board pcidas64_boards[] = {
+ 		.ao_scan_speed	= 100000,
+ 		.layout		= LAYOUT_60XX,
+ 		.ai_range_table	= &ai_ranges_60xx,
++		.ai_range_code	= ai_range_code_60xx,
+ 		.ao_range_table	= &range_bipolar10,
+ 		.ao_range_code	= ao_range_code_60xx,
+ 		.ai_fifo	= &ai_fifo_60xx,
+@@ -878,6 +932,7 @@ static const struct pcidas64_board pcidas64_boards[] = {
+ 		.ao_scan_speed	= 1000,
+ 		.layout		= LAYOUT_60XX,
+ 		.ai_range_table	= &ai_ranges_6052,
++		.ai_range_code	= ai_range_code_6052,
+ 		.ao_range_table	= &ao_ranges_6030,
+ 		.ao_range_code	= ao_range_code_6030,
+ 		.ai_fifo	= &ai_fifo_60xx,
+@@ -893,6 +948,7 @@ static const struct pcidas64_board pcidas64_boards[] = {
+ 		.ao_scan_speed	= 3333,
+ 		.layout		= LAYOUT_60XX,
+ 		.ai_range_table	= &ai_ranges_6052,
++		.ai_range_code	= ai_range_code_6052,
+ 		.ao_range_table	= &ao_ranges_6030,
+ 		.ao_range_code	= ao_range_code_6030,
+ 		.ai_fifo	= &ai_fifo_60xx,
+@@ -908,6 +964,7 @@ static const struct pcidas64_board pcidas64_boards[] = {
+ 		.ao_scan_speed	= 1000,
+ 		.layout		= LAYOUT_60XX,
+ 		.ai_range_table	= &ai_ranges_6052,
++		.ai_range_code	= ai_range_code_6052,
+ 		.ao_range_table	= &ao_ranges_6030,
+ 		.ao_range_code	= ao_range_code_6030,
+ 		.ai_fifo	= &ai_fifo_60xx,
+@@ -923,6 +980,7 @@ static const struct pcidas64_board pcidas64_boards[] = {
+ 		.ao_scan_speed	= 1000,
+ 		.layout		= LAYOUT_60XX,
+ 		.ai_range_table	= &ai_ranges_6052,
++		.ai_range_code	= ai_range_code_6052,
+ 		.ao_range_table	= &ao_ranges_6030,
+ 		.ao_range_code	= ao_range_code_6030,
+ 		.ai_fifo	= &ai_fifo_60xx,
+@@ -957,6 +1015,7 @@ static const struct pcidas64_board pcidas64_boards[] = {
+ 		.ao_scan_speed	= 10000,
+ 		.layout		= LAYOUT_64XX,
+ 		.ai_range_table	= &ai_ranges_64xx,
++		.ai_range_code	= ai_range_code_64xx,
+ 		.ai_fifo	= ai_fifo_64xx,
+ 		.has_8255	= 1,
+ 	},
+@@ -968,7 +1027,8 @@ static const struct pcidas64_board pcidas64_boards[] = {
+ 		.ao_nchan	= 0,
+ 		.ao_scan_speed	= 10000,
+ 		.layout		= LAYOUT_64XX,
+-		.ai_range_table	= &ai_ranges_64xx,
++		.ai_range_table	= &ai_ranges_64_mx,
++		.ai_range_code	= ai_range_code_64_mx,
+ 		.ai_fifo	= ai_fifo_64xx,
+ 		.has_8255	= 1,
+ 	},
+@@ -980,7 +1040,8 @@ static const struct pcidas64_board pcidas64_boards[] = {
+ 		.ao_nchan	= 0,
+ 		.ao_scan_speed	= 10000,
+ 		.layout		= LAYOUT_64XX,
+-		.ai_range_table	= &ai_ranges_64xx,
++		.ai_range_table	= &ai_ranges_64_mx,
++		.ai_range_code	= ai_range_code_64_mx,
+ 		.ai_fifo	= ai_fifo_64xx,
+ 		.has_8255	= 1,
+ 	},
+@@ -992,7 +1053,8 @@ static const struct pcidas64_board pcidas64_boards[] = {
+ 		.ao_nchan	= 0,
+ 		.ao_scan_speed	= 10000,
+ 		.layout		= LAYOUT_64XX,
+-		.ai_range_table	= &ai_ranges_64xx,
++		.ai_range_table	= &ai_ranges_64_mx,
++		.ai_range_code	= ai_range_code_64_mx,
+ 		.ai_fifo	= ai_fifo_64xx,
+ 		.has_8255	= 1,
+ 	},
+@@ -1004,7 +1066,8 @@ static const struct pcidas64_board pcidas64_boards[] = {
+ 		.ao_nchan	= 2,
+ 		.ao_scan_speed	= 10000,
+ 		.layout		= LAYOUT_64XX,
+-		.ai_range_table	= &ai_ranges_64xx,
++		.ai_range_table	= &ai_ranges_64_mx,
++		.ai_range_code	= ai_range_code_64_mx,
+ 		.ai_fifo	= ai_fifo_64xx,
+ 		.has_8255	= 1,
+ 	},
+@@ -1016,7 +1079,8 @@ static const struct pcidas64_board pcidas64_boards[] = {
+ 		.ao_nchan	= 2,
+ 		.ao_scan_speed	= 10000,
+ 		.layout		= LAYOUT_64XX,
+-		.ai_range_table	= &ai_ranges_64xx,
++		.ai_range_table	= &ai_ranges_64_mx,
++		.ai_range_code	= ai_range_code_64_mx,
+ 		.ai_fifo	= ai_fifo_64xx,
+ 		.has_8255	= 1,
+ 	},
+@@ -1028,7 +1092,8 @@ static const struct pcidas64_board pcidas64_boards[] = {
+ 		.ao_nchan	= 2,
+ 		.ao_scan_speed	= 10000,
+ 		.layout		= LAYOUT_64XX,
+-		.ai_range_table	= &ai_ranges_64xx,
++		.ai_range_table	= &ai_ranges_64_mx,
++		.ai_range_code	= ai_range_code_64_mx,
+ 		.ai_fifo	= ai_fifo_64xx,
+ 		.has_8255	= 1,
+ 	},
+@@ -1115,45 +1180,8 @@ static unsigned int ai_range_bits_6xxx(const struct comedi_device *dev,
+ 				       unsigned int range_index)
+ {
+ 	const struct pcidas64_board *thisboard = dev->board_ptr;
+-	const struct comedi_krange *range =
+-		&thisboard->ai_range_table->range[range_index];
+-	unsigned int bits = 0;
+ 
+-	switch (range->max) {
+-	case 10000000:
+-		bits = 0x000;
+-		break;
+-	case 5000000:
+-		bits = 0x100;
+-		break;
+-	case 2000000:
+-	case 2500000:
+-		bits = 0x200;
+-		break;
+-	case 1000000:
+-	case 1250000:
+-		bits = 0x300;
+-		break;
+-	case 500000:
+-		bits = 0x400;
+-		break;
+-	case 200000:
+-	case 250000:
+-		bits = 0x500;
+-		break;
+-	case 100000:
+-		bits = 0x600;
+-		break;
+-	case 50000:
+-		bits = 0x700;
+-		break;
+-	default:
+-		dev_err(dev->class_dev, "bug! in %s\n", __func__);
+-		break;
+-	}
+-	if (range->min == 0)
+-		bits += 0x900;
+-	return bits;
++	return thisboard->ai_range_code[range_index] << 8;
+ }
+ 
+ static unsigned int hw_revision(const struct comedi_device *dev,
+diff --git a/drivers/staging/iio/adc/mxs-lradc.c b/drivers/staging/iio/adc/mxs-lradc.c
+index f053535385bf..351339ccaad6 100644
+--- a/drivers/staging/iio/adc/mxs-lradc.c
++++ b/drivers/staging/iio/adc/mxs-lradc.c
+@@ -214,11 +214,17 @@ struct mxs_lradc {
+ 	unsigned long		is_divided;
+ 
+ 	/*
+-	 * Touchscreen LRADC channels receives a private slot in the CTRL4
+-	 * register, the slot #7. Therefore only 7 slots instead of 8 in the
+-	 * CTRL4 register can be mapped to LRADC channels when using the
+-	 * touchscreen.
+-	 *
++	 * When the touchscreen is enabled, we give it two private virtual
++	 * channels: #6 and #7. This means that only 6 virtual channels (instead
++	 * of 8) will be available for buffered capture.
++	 */
++#define TOUCHSCREEN_VCHANNEL1		7
++#define TOUCHSCREEN_VCHANNEL2		6
++#define BUFFER_VCHANS_LIMITED		0x3f
++#define BUFFER_VCHANS_ALL		0xff
++	u8			buffer_vchans;
++
++	/*
+ 	 * Furthermore, certain LRADC channels are shared between touchscreen
+ 	 * and/or touch-buttons and generic LRADC block. Therefore when using
+ 	 * either of these, these channels are not available for the regular
+@@ -342,6 +348,9 @@ struct mxs_lradc {
+ #define	LRADC_CTRL4				0x140
+ #define	LRADC_CTRL4_LRADCSELECT_MASK(n)		(0xf << ((n) * 4))
+ #define	LRADC_CTRL4_LRADCSELECT_OFFSET(n)	((n) * 4)
++#define	LRADC_CTRL4_LRADCSELECT(n, x) \
++				(((x) << LRADC_CTRL4_LRADCSELECT_OFFSET(n)) & \
++				LRADC_CTRL4_LRADCSELECT_MASK(n))
+ 
+ #define LRADC_RESOLUTION			12
+ #define LRADC_SINGLE_SAMPLE_MASK		((1 << LRADC_RESOLUTION) - 1)
+@@ -416,6 +425,14 @@ static bool mxs_lradc_check_touch_event(struct mxs_lradc *lradc)
+ 					LRADC_STATUS_TOUCH_DETECT_RAW);
+ }
+ 
++static void mxs_lradc_map_channel(struct mxs_lradc *lradc, unsigned vch,
++				  unsigned ch)
++{
++	mxs_lradc_reg_clear(lradc, LRADC_CTRL4_LRADCSELECT_MASK(vch),
++				LRADC_CTRL4);
++	mxs_lradc_reg_set(lradc, LRADC_CTRL4_LRADCSELECT(vch, ch), LRADC_CTRL4);
++}
++
+ static void mxs_lradc_setup_ts_channel(struct mxs_lradc *lradc, unsigned ch)
+ {
+ 	/*
+@@ -443,12 +460,8 @@ static void mxs_lradc_setup_ts_channel(struct mxs_lradc *lradc, unsigned ch)
+ 		LRADC_DELAY_DELAY(lradc->over_sample_delay - 1),
+ 			LRADC_DELAY(3));
+ 
+-	mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ(2) |
+-			LRADC_CTRL1_LRADC_IRQ(3) | LRADC_CTRL1_LRADC_IRQ(4) |
+-			LRADC_CTRL1_LRADC_IRQ(5), LRADC_CTRL1);
++	mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ(ch), LRADC_CTRL1);
+ 
+-	/* wake us again, when the complete conversion is done */
+-	mxs_lradc_reg_set(lradc, LRADC_CTRL1_LRADC_IRQ_EN(ch), LRADC_CTRL1);
+ 	/*
+ 	 * after changing the touchscreen plates setting
+ 	 * the signals need some initial time to settle. Start the
+@@ -502,12 +515,8 @@ static void mxs_lradc_setup_ts_pressure(struct mxs_lradc *lradc, unsigned ch1,
+ 		LRADC_DELAY_DELAY(lradc->over_sample_delay - 1),
+ 					LRADC_DELAY(3));
+ 
+-	mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ(2) |
+-			LRADC_CTRL1_LRADC_IRQ(3) | LRADC_CTRL1_LRADC_IRQ(4) |
+-			LRADC_CTRL1_LRADC_IRQ(5), LRADC_CTRL1);
++	mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ(ch2), LRADC_CTRL1);
+ 
+-	/* wake us again, when the conversions are done */
+-	mxs_lradc_reg_set(lradc, LRADC_CTRL1_LRADC_IRQ_EN(ch2), LRADC_CTRL1);
+ 	/*
+ 	 * after changing the touchscreen plates setting
+ 	 * the signals need some initial time to settle. Start the
+@@ -573,36 +582,6 @@ static unsigned mxs_lradc_read_ts_pressure(struct mxs_lradc *lradc,
+ #define TS_CH_XM 4
+ #define TS_CH_YM 5
+ 
+-static int mxs_lradc_read_ts_channel(struct mxs_lradc *lradc)
+-{
+-	u32 reg;
+-	int val;
+-
+-	reg = readl(lradc->base + LRADC_CTRL1);
+-
+-	/* only channels 3 to 5 are of interest here */
+-	if (reg & LRADC_CTRL1_LRADC_IRQ(TS_CH_YP)) {
+-		mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ_EN(TS_CH_YP) |
+-			LRADC_CTRL1_LRADC_IRQ(TS_CH_YP), LRADC_CTRL1);
+-		val = mxs_lradc_read_raw_channel(lradc, TS_CH_YP);
+-	} else if (reg & LRADC_CTRL1_LRADC_IRQ(TS_CH_XM)) {
+-		mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ_EN(TS_CH_XM) |
+-			LRADC_CTRL1_LRADC_IRQ(TS_CH_XM), LRADC_CTRL1);
+-		val = mxs_lradc_read_raw_channel(lradc, TS_CH_XM);
+-	} else if (reg & LRADC_CTRL1_LRADC_IRQ(TS_CH_YM)) {
+-		mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ_EN(TS_CH_YM) |
+-			LRADC_CTRL1_LRADC_IRQ(TS_CH_YM), LRADC_CTRL1);
+-		val = mxs_lradc_read_raw_channel(lradc, TS_CH_YM);
+-	} else {
+-		return -EIO;
+-	}
+-
+-	mxs_lradc_reg_wrt(lradc, 0, LRADC_DELAY(2));
+-	mxs_lradc_reg_wrt(lradc, 0, LRADC_DELAY(3));
+-
+-	return val;
+-}
+-
+ /*
+  * YP(open)--+-------------+
+  *           |             |--+
+@@ -646,7 +625,8 @@ static void mxs_lradc_prepare_x_pos(struct mxs_lradc *lradc)
+ 	mxs_lradc_reg_set(lradc, mxs_lradc_drive_x_plate(lradc), LRADC_CTRL0);
+ 
+ 	lradc->cur_plate = LRADC_SAMPLE_X;
+-	mxs_lradc_setup_ts_channel(lradc, TS_CH_YP);
++	mxs_lradc_map_channel(lradc, TOUCHSCREEN_VCHANNEL1, TS_CH_YP);
++	mxs_lradc_setup_ts_channel(lradc, TOUCHSCREEN_VCHANNEL1);
+ }
+ 
+ /*
+@@ -667,7 +647,8 @@ static void mxs_lradc_prepare_y_pos(struct mxs_lradc *lradc)
+ 	mxs_lradc_reg_set(lradc, mxs_lradc_drive_y_plate(lradc), LRADC_CTRL0);
+ 
+ 	lradc->cur_plate = LRADC_SAMPLE_Y;
+-	mxs_lradc_setup_ts_channel(lradc, TS_CH_XM);
++	mxs_lradc_map_channel(lradc, TOUCHSCREEN_VCHANNEL1, TS_CH_XM);
++	mxs_lradc_setup_ts_channel(lradc, TOUCHSCREEN_VCHANNEL1);
+ }
+ 
+ /*
+@@ -688,7 +669,10 @@ static void mxs_lradc_prepare_pressure(struct mxs_lradc *lradc)
+ 	mxs_lradc_reg_set(lradc, mxs_lradc_drive_pressure(lradc), LRADC_CTRL0);
+ 
+ 	lradc->cur_plate = LRADC_SAMPLE_PRESSURE;
+-	mxs_lradc_setup_ts_pressure(lradc, TS_CH_XP, TS_CH_YM);
++	mxs_lradc_map_channel(lradc, TOUCHSCREEN_VCHANNEL1, TS_CH_YM);
++	mxs_lradc_map_channel(lradc, TOUCHSCREEN_VCHANNEL2, TS_CH_XP);
++	mxs_lradc_setup_ts_pressure(lradc, TOUCHSCREEN_VCHANNEL2,
++						TOUCHSCREEN_VCHANNEL1);
+ }
+ 
+ static void mxs_lradc_enable_touch_detection(struct mxs_lradc *lradc)
+@@ -701,6 +685,19 @@ static void mxs_lradc_enable_touch_detection(struct mxs_lradc *lradc)
+ 	mxs_lradc_reg_set(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ_EN, LRADC_CTRL1);
+ }
+ 
++static void mxs_lradc_start_touch_event(struct mxs_lradc *lradc)
++{
++	mxs_lradc_reg_clear(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ_EN,
++				LRADC_CTRL1);
++	mxs_lradc_reg_set(lradc,
++		LRADC_CTRL1_LRADC_IRQ_EN(TOUCHSCREEN_VCHANNEL1), LRADC_CTRL1);
++	/*
++	 * start with the Y-pos, because it uses nearly the same plate
++	 * settings like the touch detection
++	 */
++	mxs_lradc_prepare_y_pos(lradc);
++}
++
+ static void mxs_lradc_report_ts_event(struct mxs_lradc *lradc)
+ {
+ 	input_report_abs(lradc->ts_input, ABS_X, lradc->ts_x_pos);
+@@ -718,10 +715,12 @@ static void mxs_lradc_complete_touch_event(struct mxs_lradc *lradc)
+ 	 * start a dummy conversion to burn time to settle the signals
+ 	 * note: we are not interested in the conversion's value
+ 	 */
+-	mxs_lradc_reg_wrt(lradc, 0, LRADC_CH(5));
+-	mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ(5), LRADC_CTRL1);
+-	mxs_lradc_reg_set(lradc, LRADC_CTRL1_LRADC_IRQ_EN(5), LRADC_CTRL1);
+-	mxs_lradc_reg_wrt(lradc, LRADC_DELAY_TRIGGER(1 << 5) |
++	mxs_lradc_reg_wrt(lradc, 0, LRADC_CH(TOUCHSCREEN_VCHANNEL1));
++	mxs_lradc_reg_clear(lradc,
++		LRADC_CTRL1_LRADC_IRQ(TOUCHSCREEN_VCHANNEL1) |
++		LRADC_CTRL1_LRADC_IRQ(TOUCHSCREEN_VCHANNEL2), LRADC_CTRL1);
++	mxs_lradc_reg_wrt(lradc,
++		LRADC_DELAY_TRIGGER(1 << TOUCHSCREEN_VCHANNEL1) |
+ 		LRADC_DELAY_KICK | LRADC_DELAY_DELAY(10), /* waste 5 ms */
+ 			LRADC_DELAY(2));
+ }
+@@ -753,59 +752,45 @@ static void mxs_lradc_finish_touch_event(struct mxs_lradc *lradc, bool valid)
+ 
+ 	/* if it is released, wait for the next touch via IRQ */
+ 	lradc->cur_plate = LRADC_TOUCH;
+-	mxs_lradc_reg_clear(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ, LRADC_CTRL1);
++	mxs_lradc_reg_wrt(lradc, 0, LRADC_DELAY(2));
++	mxs_lradc_reg_wrt(lradc, 0, LRADC_DELAY(3));
++	mxs_lradc_reg_clear(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ |
++		LRADC_CTRL1_LRADC_IRQ_EN(TOUCHSCREEN_VCHANNEL1) |
++		LRADC_CTRL1_LRADC_IRQ(TOUCHSCREEN_VCHANNEL1), LRADC_CTRL1);
+ 	mxs_lradc_reg_set(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ_EN, LRADC_CTRL1);
+ }
+ 
+ /* touchscreen's state machine */
+ static void mxs_lradc_handle_touch(struct mxs_lradc *lradc)
+ {
+-	int val;
+-
+ 	switch (lradc->cur_plate) {
+ 	case LRADC_TOUCH:
+-		/*
+-		 * start with the Y-pos, because it uses nearly the same plate
+-		 * settings like the touch detection
+-		 */
+-		if (mxs_lradc_check_touch_event(lradc)) {
+-			mxs_lradc_reg_clear(lradc,
+-					LRADC_CTRL1_TOUCH_DETECT_IRQ_EN,
+-					LRADC_CTRL1);
+-			mxs_lradc_prepare_y_pos(lradc);
+-		}
++		if (mxs_lradc_check_touch_event(lradc))
++			mxs_lradc_start_touch_event(lradc);
+ 		mxs_lradc_reg_clear(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ,
+ 					LRADC_CTRL1);
+ 		return;
+ 
+ 	case LRADC_SAMPLE_Y:
+-		val = mxs_lradc_read_ts_channel(lradc);
+-		if (val < 0) {
+-			mxs_lradc_enable_touch_detection(lradc); /* re-start */
+-			return;
+-		}
+-		lradc->ts_y_pos = val;
++		lradc->ts_y_pos = mxs_lradc_read_raw_channel(lradc,
++							TOUCHSCREEN_VCHANNEL1);
+ 		mxs_lradc_prepare_x_pos(lradc);
+ 		return;
+ 
+ 	case LRADC_SAMPLE_X:
+-		val = mxs_lradc_read_ts_channel(lradc);
+-		if (val < 0) {
+-			mxs_lradc_enable_touch_detection(lradc); /* re-start */
+-			return;
+-		}
+-		lradc->ts_x_pos = val;
++		lradc->ts_x_pos = mxs_lradc_read_raw_channel(lradc,
++							TOUCHSCREEN_VCHANNEL1);
+ 		mxs_lradc_prepare_pressure(lradc);
+ 		return;
+ 
+ 	case LRADC_SAMPLE_PRESSURE:
+-		lradc->ts_pressure =
+-			mxs_lradc_read_ts_pressure(lradc, TS_CH_XP, TS_CH_YM);
++		lradc->ts_pressure = mxs_lradc_read_ts_pressure(lradc,
++							TOUCHSCREEN_VCHANNEL2,
++							TOUCHSCREEN_VCHANNEL1);
+ 		mxs_lradc_complete_touch_event(lradc);
+ 		return;
+ 
+ 	case LRADC_SAMPLE_VALID:
+-		val = mxs_lradc_read_ts_channel(lradc); /* ignore the value */
+ 		mxs_lradc_finish_touch_event(lradc, 1);
+ 		break;
+ 	}
+@@ -837,9 +822,9 @@ static int mxs_lradc_read_single(struct iio_dev *iio_dev, int chan, int *val)
+ 	 * used if doing raw sampling.
+ 	 */
+ 	if (lradc->soc == IMX28_LRADC)
+-		mxs_lradc_reg_clear(lradc, LRADC_CTRL1_MX28_LRADC_IRQ_EN_MASK,
++		mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ_EN(0),
+ 			LRADC_CTRL1);
+-	mxs_lradc_reg_clear(lradc, 0xff, LRADC_CTRL0);
++	mxs_lradc_reg_clear(lradc, 0x1, LRADC_CTRL0);
+ 
+ 	/* Enable / disable the divider per requirement */
+ 	if (test_bit(chan, &lradc->is_divided))
+@@ -1083,9 +1068,8 @@ static void mxs_lradc_disable_ts(struct mxs_lradc *lradc)
+ {
+ 	/* stop all interrupts from firing */
+ 	mxs_lradc_reg_clear(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ_EN |
+-		LRADC_CTRL1_LRADC_IRQ_EN(2) | LRADC_CTRL1_LRADC_IRQ_EN(3) |
+-		LRADC_CTRL1_LRADC_IRQ_EN(4) | LRADC_CTRL1_LRADC_IRQ_EN(5),
+-		LRADC_CTRL1);
++		LRADC_CTRL1_LRADC_IRQ_EN(TOUCHSCREEN_VCHANNEL1) |
++		LRADC_CTRL1_LRADC_IRQ_EN(TOUCHSCREEN_VCHANNEL2), LRADC_CTRL1);
+ 
+ 	/* Power-down touchscreen touch-detect circuitry. */
+ 	mxs_lradc_reg_clear(lradc, mxs_lradc_plate_mask(lradc), LRADC_CTRL0);
+@@ -1151,26 +1135,31 @@ static irqreturn_t mxs_lradc_handle_irq(int irq, void *data)
+ 	struct iio_dev *iio = data;
+ 	struct mxs_lradc *lradc = iio_priv(iio);
+ 	unsigned long reg = readl(lradc->base + LRADC_CTRL1);
++	uint32_t clr_irq = mxs_lradc_irq_mask(lradc);
+ 	const uint32_t ts_irq_mask =
+ 		LRADC_CTRL1_TOUCH_DETECT_IRQ |
+-		LRADC_CTRL1_LRADC_IRQ(2) |
+-		LRADC_CTRL1_LRADC_IRQ(3) |
+-		LRADC_CTRL1_LRADC_IRQ(4) |
+-		LRADC_CTRL1_LRADC_IRQ(5);
++		LRADC_CTRL1_LRADC_IRQ(TOUCHSCREEN_VCHANNEL1) |
++		LRADC_CTRL1_LRADC_IRQ(TOUCHSCREEN_VCHANNEL2);
+ 
+ 	if (!(reg & mxs_lradc_irq_mask(lradc)))
+ 		return IRQ_NONE;
+ 
+-	if (lradc->use_touchscreen && (reg & ts_irq_mask))
++	if (lradc->use_touchscreen && (reg & ts_irq_mask)) {
+ 		mxs_lradc_handle_touch(lradc);
+ 
+-	if (iio_buffer_enabled(iio))
+-		iio_trigger_poll(iio->trig);
+-	else if (reg & LRADC_CTRL1_LRADC_IRQ(0))
++		/* Make sure we don't clear the next conversion's interrupt. */
++		clr_irq &= ~(LRADC_CTRL1_LRADC_IRQ(TOUCHSCREEN_VCHANNEL1) |
++				LRADC_CTRL1_LRADC_IRQ(TOUCHSCREEN_VCHANNEL2));
++	}
++
++	if (iio_buffer_enabled(iio)) {
++		if (reg & lradc->buffer_vchans)
++			iio_trigger_poll(iio->trig);
++	} else if (reg & LRADC_CTRL1_LRADC_IRQ(0)) {
+ 		complete(&lradc->completion);
++	}
+ 
+-	mxs_lradc_reg_clear(lradc, reg & mxs_lradc_irq_mask(lradc),
+-			LRADC_CTRL1);
++	mxs_lradc_reg_clear(lradc, reg & clr_irq, LRADC_CTRL1);
+ 
+ 	return IRQ_HANDLED;
+ }
+@@ -1282,9 +1271,10 @@ static int mxs_lradc_buffer_preenable(struct iio_dev *iio)
+ 	}
+ 
+ 	if (lradc->soc == IMX28_LRADC)
+-		mxs_lradc_reg_clear(lradc, LRADC_CTRL1_MX28_LRADC_IRQ_EN_MASK,
+-							LRADC_CTRL1);
+-	mxs_lradc_reg_clear(lradc, 0xff, LRADC_CTRL0);
++		mxs_lradc_reg_clear(lradc,
++			lradc->buffer_vchans << LRADC_CTRL1_LRADC_IRQ_EN_OFFSET,
++			LRADC_CTRL1);
++	mxs_lradc_reg_clear(lradc, lradc->buffer_vchans, LRADC_CTRL0);
+ 
+ 	for_each_set_bit(chan, iio->active_scan_mask, LRADC_MAX_TOTAL_CHANS) {
+ 		ctrl4_set |= chan << LRADC_CTRL4_LRADCSELECT_OFFSET(ofs);
+@@ -1317,10 +1307,11 @@ static int mxs_lradc_buffer_postdisable(struct iio_dev *iio)
+ 	mxs_lradc_reg_clear(lradc, LRADC_DELAY_TRIGGER_LRADCS_MASK |
+ 					LRADC_DELAY_KICK, LRADC_DELAY(0));
+ 
+-	mxs_lradc_reg_clear(lradc, 0xff, LRADC_CTRL0);
++	mxs_lradc_reg_clear(lradc, lradc->buffer_vchans, LRADC_CTRL0);
+ 	if (lradc->soc == IMX28_LRADC)
+-		mxs_lradc_reg_clear(lradc, LRADC_CTRL1_MX28_LRADC_IRQ_EN_MASK,
+-					LRADC_CTRL1);
++		mxs_lradc_reg_clear(lradc,
++			lradc->buffer_vchans << LRADC_CTRL1_LRADC_IRQ_EN_OFFSET,
++			LRADC_CTRL1);
+ 
+ 	kfree(lradc->buffer);
+ 	mutex_unlock(&lradc->lock);
+@@ -1346,7 +1337,7 @@ static bool mxs_lradc_validate_scan_mask(struct iio_dev *iio,
+ 	if (lradc->use_touchbutton)
+ 		rsvd_chans++;
+ 	if (lradc->use_touchscreen)
+-		rsvd_chans++;
++		rsvd_chans += 2;
+ 
+ 	/* Test for attempts to map channels with special mode of operation. */
+ 	if (bitmap_intersects(mask, &rsvd_mask, LRADC_MAX_TOTAL_CHANS))
+@@ -1406,6 +1397,13 @@ static const struct iio_chan_spec mxs_lradc_chan_spec[] = {
+ 		.channel = 8,
+ 		.scan_type = {.sign = 'u', .realbits = 18, .storagebits = 32,},
+ 	},
++	/* Hidden channel to keep indexes */
++	{
++		.type = IIO_TEMP,
++		.indexed = 1,
++		.scan_index = -1,
++		.channel = 9,
++	},
+ 	MXS_ADC_CHAN(10, IIO_VOLTAGE),	/* VDDIO */
+ 	MXS_ADC_CHAN(11, IIO_VOLTAGE),	/* VTH */
+ 	MXS_ADC_CHAN(12, IIO_VOLTAGE),	/* VDDA */
+@@ -1558,6 +1556,11 @@ static int mxs_lradc_probe(struct platform_device *pdev)
+ 
+ 	touch_ret = mxs_lradc_probe_touchscreen(lradc, node);
+ 
++	if (touch_ret == 0)
++		lradc->buffer_vchans = BUFFER_VCHANS_LIMITED;
++	else
++		lradc->buffer_vchans = BUFFER_VCHANS_ALL;
++
+ 	/* Grab all IRQ sources */
+ 	for (i = 0; i < of_cfg->irq_count; i++) {
+ 		lradc->irq[i] = platform_get_irq(pdev, i);
+diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
+index 283cf786ef98..2de6fb8cee8d 100644
+--- a/drivers/target/target_core_pr.c
++++ b/drivers/target/target_core_pr.c
+@@ -1874,8 +1874,8 @@ static int core_scsi3_update_aptpl_buf(
+ 		}
+ 
+ 		if ((len + strlen(tmp) >= pr_aptpl_buf_len)) {
+-			pr_err("Unable to update renaming"
+-				" APTPL metadata\n");
++			pr_err("Unable to update renaming APTPL metadata,"
++			       " reallocating larger buffer\n");
+ 			ret = -EMSGSIZE;
+ 			goto out;
+ 		}
+@@ -1892,8 +1892,8 @@ static int core_scsi3_update_aptpl_buf(
+ 			lun->lun_sep->sep_rtpi, lun->unpacked_lun, reg_count);
+ 
+ 		if ((len + strlen(tmp) >= pr_aptpl_buf_len)) {
+-			pr_err("Unable to update renaming"
+-				" APTPL metadata\n");
++			pr_err("Unable to update renaming APTPL metadata,"
++			       " reallocating larger buffer\n");
+ 			ret = -EMSGSIZE;
+ 			goto out;
+ 		}
+@@ -1956,7 +1956,7 @@ static int __core_scsi3_write_aptpl_to_file(
+ static sense_reason_t core_scsi3_update_and_write_aptpl(struct se_device *dev, bool aptpl)
+ {
+ 	unsigned char *buf;
+-	int rc;
++	int rc, len = PR_APTPL_BUF_LEN;
+ 
+ 	if (!aptpl) {
+ 		char *null_buf = "No Registrations or Reservations\n";
+@@ -1970,25 +1970,26 @@ static sense_reason_t core_scsi3_update_and_write_aptpl(struct se_device *dev, b
+ 
+ 		return 0;
+ 	}
+-
+-	buf = kzalloc(PR_APTPL_BUF_LEN, GFP_KERNEL);
++retry:
++	buf = vzalloc(len);
+ 	if (!buf)
+ 		return TCM_OUT_OF_RESOURCES;
+ 
+-	rc = core_scsi3_update_aptpl_buf(dev, buf, PR_APTPL_BUF_LEN);
++	rc = core_scsi3_update_aptpl_buf(dev, buf, len);
+ 	if (rc < 0) {
+-		kfree(buf);
+-		return TCM_OUT_OF_RESOURCES;
++		vfree(buf);
++		len *= 2;
++		goto retry;
+ 	}
+ 
+ 	rc = __core_scsi3_write_aptpl_to_file(dev, buf);
+ 	if (rc != 0) {
+ 		pr_err("SPC-3 PR: Could not update APTPL\n");
+-		kfree(buf);
++		vfree(buf);
+ 		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ 	}
+ 	dev->t10_pr.pr_aptpl_active = 1;
+-	kfree(buf);
++	vfree(buf);
+ 	pr_debug("SPC-3 PR: Set APTPL Bit Activated\n");
+ 	return 0;
+ }
+diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
+index cd4bed7b2757..36b471389169 100644
+--- a/drivers/target/target_core_sbc.c
++++ b/drivers/target/target_core_sbc.c
+@@ -251,6 +251,8 @@ static inline unsigned long long transport_lba_64_ext(unsigned char *cdb)
+ static sense_reason_t
+ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *ops)
+ {
++	struct se_device *dev = cmd->se_dev;
++	sector_t end_lba = dev->transport->get_blocks(dev) + 1;
+ 	unsigned int sectors = sbc_get_write_same_sectors(cmd);
+ 
+ 	if ((flags[0] & 0x04) || (flags[0] & 0x02)) {
+@@ -264,6 +266,16 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o
+ 			sectors, cmd->se_dev->dev_attrib.max_write_same_len);
+ 		return TCM_INVALID_CDB_FIELD;
+ 	}
++	/*
++	 * Sanity check for LBA wrap and request past end of device.
++	 */
++	if (((cmd->t_task_lba + sectors) < cmd->t_task_lba) ||
++	    ((cmd->t_task_lba + sectors) > end_lba)) {
++		pr_err("WRITE_SAME exceeds last lba %llu (lba %llu, sectors %u)\n",
++		       (unsigned long long)end_lba, cmd->t_task_lba, sectors);
++		return TCM_ADDRESS_OUT_OF_RANGE;
++	}
++
+ 	/* We always have ANC_SUP == 0 so setting ANCHOR is always an error */
+ 	if (flags[0] & 0x10) {
+ 		pr_warn("WRITE SAME with ANCHOR not supported\n");
+@@ -955,7 +967,8 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
+ 		unsigned long long end_lba;
+ check_lba:
+ 		end_lba = dev->transport->get_blocks(dev) + 1;
+-		if (cmd->t_task_lba + sectors > end_lba) {
++		if (((cmd->t_task_lba + sectors) < cmd->t_task_lba) ||
++		    ((cmd->t_task_lba + sectors) > end_lba)) {
+ 			pr_err("cmd exceeds last lba %llu "
+ 				"(lba %llu, sectors %u)\n",
+ 				end_lba, cmd->t_task_lba, sectors);
+diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
+index 11c66856ba2f..ea1a8dabd5dc 100644
+--- a/drivers/tty/serial/8250/8250_core.c
++++ b/drivers/tty/serial/8250/8250_core.c
+@@ -2107,8 +2107,8 @@ int serial8250_do_startup(struct uart_port *port)
+ 	/*
+ 	 * Clear the interrupt registers.
+ 	 */
+-	if (serial_port_in(port, UART_LSR) & UART_LSR_DR)
+-		serial_port_in(port, UART_RX);
++	serial_port_in(port, UART_LSR);
++	serial_port_in(port, UART_RX);
+ 	serial_port_in(port, UART_IIR);
+ 	serial_port_in(port, UART_MSR);
+ 
+@@ -2269,8 +2269,8 @@ dont_test_tx_en:
+ 	 * saved flags to avoid getting false values from polling
+ 	 * routines or the previous session.
+ 	 */
+-	if (serial_port_in(port, UART_LSR) & UART_LSR_DR)
+-		serial_port_in(port, UART_RX);
++	serial_port_in(port, UART_LSR);
++	serial_port_in(port, UART_RX);
+ 	serial_port_in(port, UART_IIR);
+ 	serial_port_in(port, UART_MSR);
+ 	up->lsr_saved_flags = 0;
+@@ -2363,8 +2363,7 @@ void serial8250_do_shutdown(struct uart_port *port)
+ 	 * Read data port to reset things, and then unlink from
+ 	 * the IRQ chain.
+ 	 */
+-	if (serial_port_in(port, UART_LSR) & UART_LSR_DR)
+-		serial_port_in(port, UART_RX);
++	serial_port_in(port, UART_RX);
+ 	serial8250_rpm_put(up);
+ 
+ 	del_timer_sync(&up->timer);
+diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
+index 51f066aa375e..2bb4dfc02873 100644
+--- a/drivers/tty/tty_io.c
++++ b/drivers/tty/tty_io.c
+@@ -1028,8 +1028,8 @@ EXPORT_SYMBOL(start_tty);
+ /* We limit tty time update visibility to every 8 seconds or so. */
+ static void tty_update_time(struct timespec *time)
+ {
+-	unsigned long sec = get_seconds() & ~7;
+-	if ((long)(sec - time->tv_sec) > 0)
++	unsigned long sec = get_seconds();
++	if (abs(sec - time->tv_sec) & ~7)
+ 		time->tv_sec = sec;
+ }
+ 
+diff --git a/drivers/tty/tty_ioctl.c b/drivers/tty/tty_ioctl.c
+index 1787fa4d9448..552076b25091 100644
+--- a/drivers/tty/tty_ioctl.c
++++ b/drivers/tty/tty_ioctl.c
+@@ -217,11 +217,17 @@ void tty_wait_until_sent(struct tty_struct *tty, long timeout)
+ #endif
+ 	if (!timeout)
+ 		timeout = MAX_SCHEDULE_TIMEOUT;
++
+ 	if (wait_event_interruptible_timeout(tty->write_wait,
+-			!tty_chars_in_buffer(tty), timeout) >= 0) {
+-		if (tty->ops->wait_until_sent)
+-			tty->ops->wait_until_sent(tty, timeout);
++			!tty_chars_in_buffer(tty), timeout) < 0) {
++		return;
+ 	}
++
++	if (timeout == MAX_SCHEDULE_TIMEOUT)
++		timeout = 0;
++
++	if (tty->ops->wait_until_sent)
++		tty->ops->wait_until_sent(tty, timeout);
+ }
+ EXPORT_SYMBOL(tty_wait_until_sent);
+ 
+diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
+index 0b59731c3021..e500243803d8 100644
+--- a/drivers/usb/core/devio.c
++++ b/drivers/usb/core/devio.c
+@@ -501,6 +501,7 @@ static void async_completed(struct urb *urb)
+ 	as->status = urb->status;
+ 	signr = as->signr;
+ 	if (signr) {
++		memset(&sinfo, 0, sizeof(sinfo));
+ 		sinfo.si_signo = as->signr;
+ 		sinfo.si_errno = as->status;
+ 		sinfo.si_code = SI_ASYNCIO;
+@@ -2371,6 +2372,7 @@ static void usbdev_remove(struct usb_device *udev)
+ 		wake_up_all(&ps->wait);
+ 		list_del_init(&ps->list);
+ 		if (ps->discsignr) {
++			memset(&sinfo, 0, sizeof(sinfo));
+ 			sinfo.si_signo = ps->discsignr;
+ 			sinfo.si_errno = EPIPE;
+ 			sinfo.si_code = SI_ASYNCIO;
+diff --git a/drivers/usb/dwc3/dwc3-omap.c b/drivers/usb/dwc3/dwc3-omap.c
+index 172d64e585b6..52e0c4e5e48e 100644
+--- a/drivers/usb/dwc3/dwc3-omap.c
++++ b/drivers/usb/dwc3/dwc3-omap.c
+@@ -205,6 +205,18 @@ static void dwc3_omap_write_irq0_set(struct dwc3_omap *omap, u32 value)
+ 						omap->irq0_offset, value);
+ }
+ 
++static void dwc3_omap_write_irqmisc_clr(struct dwc3_omap *omap, u32 value)
++{
++	dwc3_omap_writel(omap->base, USBOTGSS_IRQENABLE_CLR_MISC +
++						omap->irqmisc_offset, value);
++}
++
++static void dwc3_omap_write_irq0_clr(struct dwc3_omap *omap, u32 value)
++{
++	dwc3_omap_writel(omap->base, USBOTGSS_IRQENABLE_CLR_0 -
++						omap->irq0_offset, value);
++}
++
+ static void dwc3_omap_set_mailbox(struct dwc3_omap *omap,
+ 	enum omap_dwc3_vbus_id_status status)
+ {
+@@ -345,9 +357,23 @@ static void dwc3_omap_enable_irqs(struct dwc3_omap *omap)
+ 
+ static void dwc3_omap_disable_irqs(struct dwc3_omap *omap)
+ {
++	u32			reg;
++
+ 	/* disable all IRQs */
+-	dwc3_omap_write_irqmisc_set(omap, 0x00);
+-	dwc3_omap_write_irq0_set(omap, 0x00);
++	reg = USBOTGSS_IRQO_COREIRQ_ST;
++	dwc3_omap_write_irq0_clr(omap, reg);
++
++	reg = (USBOTGSS_IRQMISC_OEVT |
++			USBOTGSS_IRQMISC_DRVVBUS_RISE |
++			USBOTGSS_IRQMISC_CHRGVBUS_RISE |
++			USBOTGSS_IRQMISC_DISCHRGVBUS_RISE |
++			USBOTGSS_IRQMISC_IDPULLUP_RISE |
++			USBOTGSS_IRQMISC_DRVVBUS_FALL |
++			USBOTGSS_IRQMISC_CHRGVBUS_FALL |
++			USBOTGSS_IRQMISC_DISCHRGVBUS_FALL |
++			USBOTGSS_IRQMISC_IDPULLUP_FALL);
++
++	dwc3_omap_write_irqmisc_clr(omap, reg);
+ }
+ 
+ static u64 dwc3_omap_dma_mask = DMA_BIT_MASK(32);
+diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
+index 75648145dc1b..c42765b3a060 100644
+--- a/drivers/usb/gadget/configfs.c
++++ b/drivers/usb/gadget/configfs.c
+@@ -1161,7 +1161,6 @@ static ssize_t interf_grp_compatible_id_store(struct usb_os_desc *desc,
+ 	if (desc->opts_mutex)
+ 		mutex_lock(desc->opts_mutex);
+ 	memcpy(desc->ext_compat_id, page, l);
+-	desc->ext_compat_id[l] = '\0';
+ 
+ 	if (desc->opts_mutex)
+ 		mutex_unlock(desc->opts_mutex);
+@@ -1192,7 +1191,6 @@ static ssize_t interf_grp_sub_compatible_id_store(struct usb_os_desc *desc,
+ 	if (desc->opts_mutex)
+ 		mutex_lock(desc->opts_mutex);
+ 	memcpy(desc->ext_compat_id + 8, page, l);
+-	desc->ext_compat_id[l + 8] = '\0';
+ 
+ 	if (desc->opts_mutex)
+ 		mutex_unlock(desc->opts_mutex);
+diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
+index 7f76c8a12f89..fd53c9ebd662 100644
+--- a/drivers/usb/host/xhci-pci.c
++++ b/drivers/usb/host/xhci-pci.c
+@@ -37,6 +37,9 @@
+ 
+ #define PCI_DEVICE_ID_INTEL_LYNXPOINT_XHCI	0x8c31
+ #define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI	0x9c31
++#define PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI		0x22b5
++#define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI		0xa12f
++#define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI	0x9d2f
+ 
+ static const char hcd_name[] = "xhci_hcd";
+ 
+@@ -133,6 +136,12 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
+ 		pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI) {
+ 		xhci->quirks |= XHCI_SPURIOUS_REBOOT;
+ 	}
++	if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
++		(pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI ||
++		 pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI ||
++		 pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI)) {
++		xhci->quirks |= XHCI_PME_STUCK_QUIRK;
++	}
+ 	if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
+ 			pdev->device == PCI_DEVICE_ID_EJ168) {
+ 		xhci->quirks |= XHCI_RESET_ON_RESUME;
+@@ -159,6 +168,21 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
+ 				"QUIRK: Resetting on resume");
+ }
+ 
++/*
++ * Make sure PME works on some Intel xHCI controllers by writing 1 to clear
++ * the Internal PME flag bit in vendor specific PMCTRL register at offset 0x80a4
++ */
++static void xhci_pme_quirk(struct xhci_hcd *xhci)
++{
++	u32 val;
++	void __iomem *reg;
++
++	reg = (void __iomem *) xhci->cap_regs + 0x80a4;
++	val = readl(reg);
++	writel(val | BIT(28), reg);
++	readl(reg);
++}
++
+ /* called during probe() after chip reset completes */
+ static int xhci_pci_setup(struct usb_hcd *hcd)
+ {
+@@ -283,6 +307,9 @@ static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
+ 	if (xhci->quirks & XHCI_COMP_MODE_QUIRK)
+ 		pdev->no_d3cold = true;
+ 
++	if (xhci->quirks & XHCI_PME_STUCK_QUIRK)
++		xhci_pme_quirk(xhci);
++
+ 	return xhci_suspend(xhci, do_wakeup);
+ }
+ 
+@@ -313,6 +340,9 @@ static int xhci_pci_resume(struct usb_hcd *hcd, bool hibernated)
+ 	if (pdev->vendor == PCI_VENDOR_ID_INTEL)
+ 		usb_enable_intel_xhci_ports(pdev);
+ 
++	if (xhci->quirks & XHCI_PME_STUCK_QUIRK)
++		xhci_pme_quirk(xhci);
++
+ 	retval = xhci_resume(xhci, hibernated);
+ 	return retval;
+ }
+diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
+index 08d402b15482..0e11d61408ff 100644
+--- a/drivers/usb/host/xhci-plat.c
++++ b/drivers/usb/host/xhci-plat.c
+@@ -83,16 +83,6 @@ static int xhci_plat_probe(struct platform_device *pdev)
+ 	if (irq < 0)
+ 		return -ENODEV;
+ 
+-
+-	if (of_device_is_compatible(pdev->dev.of_node,
+-				    "marvell,armada-375-xhci") ||
+-	    of_device_is_compatible(pdev->dev.of_node,
+-				    "marvell,armada-380-xhci")) {
+-		ret = xhci_mvebu_mbus_init_quirk(pdev);
+-		if (ret)
+-			return ret;
+-	}
+-
+ 	/* Initialize dma_mask and coherent_dma_mask to 32-bits */
+ 	ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+ 	if (ret)
+@@ -127,6 +117,15 @@ static int xhci_plat_probe(struct platform_device *pdev)
+ 			goto put_hcd;
+ 	}
+ 
++	if (of_device_is_compatible(pdev->dev.of_node,
++				    "marvell,armada-375-xhci") ||
++	    of_device_is_compatible(pdev->dev.of_node,
++				    "marvell,armada-380-xhci")) {
++		ret = xhci_mvebu_mbus_init_quirk(pdev);
++		if (ret)
++			goto disable_clk;
++	}
++
+ 	ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
+ 	if (ret)
+ 		goto disable_clk;
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index e692e769c50c..2a924d500d8a 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -1946,7 +1946,7 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
+ 	if (event_trb != ep_ring->dequeue) {
+ 		/* The event was for the status stage */
+ 		if (event_trb == td->last_trb) {
+-			if (td->urb->actual_length != 0) {
++			if (td->urb_length_set) {
+ 				/* Don't overwrite a previously set error code
+ 				 */
+ 				if ((*status == -EINPROGRESS || *status == 0) &&
+@@ -1960,7 +1960,13 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
+ 					td->urb->transfer_buffer_length;
+ 			}
+ 		} else {
+-		/* Maybe the event was for the data stage? */
++			/*
++			 * Maybe the event was for the data stage? If so, update
++			 * already the actual_length of the URB and flag it as
++			 * set, so that it is not overwritten in the event for
++			 * the last TRB.
++			 */
++			td->urb_length_set = true;
+ 			td->urb->actual_length =
+ 				td->urb->transfer_buffer_length -
+ 				EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
+diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
+index cc7c5bb7cbcf..ab09b1ae5f83 100644
+--- a/drivers/usb/host/xhci.h
++++ b/drivers/usb/host/xhci.h
+@@ -1,3 +1,4 @@
++
+ /*
+  * xHCI host controller driver
+  *
+@@ -88,9 +89,10 @@ struct xhci_cap_regs {
+ #define HCS_IST(p)		(((p) >> 0) & 0xf)
+ /* bits 4:7, max number of Event Ring segments */
+ #define HCS_ERST_MAX(p)		(((p) >> 4) & 0xf)
++/* bits 21:25 Hi 5 bits of Scratchpad buffers SW must allocate for the HW */
+ /* bit 26 Scratchpad restore - for save/restore HW state - not used yet */
+-/* bits 27:31 number of Scratchpad buffers SW must allocate for the HW */
+-#define HCS_MAX_SCRATCHPAD(p)   (((p) >> 27) & 0x1f)
++/* bits 27:31 Lo 5 bits of Scratchpad buffers SW must allocate for the HW */
++#define HCS_MAX_SCRATCHPAD(p)   ((((p) >> 16) & 0x3e0) | (((p) >> 27) & 0x1f))
+ 
+ /* HCSPARAMS3 - hcs_params3 - bitmasks */
+ /* bits 0:7, Max U1 to U0 latency for the roothub ports */
+@@ -1288,6 +1290,8 @@ struct xhci_td {
+ 	struct xhci_segment	*start_seg;
+ 	union xhci_trb		*first_trb;
+ 	union xhci_trb		*last_trb;
++	/* actual_length of the URB has already been set */
++	bool			urb_length_set;
+ };
+ 
+ /* xHCI command default timeout value */
+@@ -1560,6 +1564,7 @@ struct xhci_hcd {
+ #define XHCI_SPURIOUS_WAKEUP	(1 << 18)
+ /* For controllers with a broken beyond repair streams implementation */
+ #define XHCI_BROKEN_STREAMS	(1 << 19)
++#define XHCI_PME_STUCK_QUIRK	(1 << 20)
+ 	unsigned int		num_active_eps;
+ 	unsigned int		limit_active_eps;
+ 	/* There are two roothubs to keep track of bus suspend info for */
+diff --git a/drivers/usb/serial/bus.c b/drivers/usb/serial/bus.c
+index 9374bd2aba20..6f91eb9ae81a 100644
+--- a/drivers/usb/serial/bus.c
++++ b/drivers/usb/serial/bus.c
+@@ -51,6 +51,7 @@ static int usb_serial_device_probe(struct device *dev)
+ {
+ 	struct usb_serial_driver *driver;
+ 	struct usb_serial_port *port;
++	struct device *tty_dev;
+ 	int retval = 0;
+ 	int minor;
+ 
+@@ -75,12 +76,20 @@ static int usb_serial_device_probe(struct device *dev)
+ 	retval = device_create_file(dev, &dev_attr_port_number);
+ 	if (retval) {
+ 		if (driver->port_remove)
+-			retval = driver->port_remove(port);
++			driver->port_remove(port);
+ 		goto exit_with_autopm;
+ 	}
+ 
+ 	minor = port->minor;
+-	tty_register_device(usb_serial_tty_driver, minor, dev);
++	tty_dev = tty_register_device(usb_serial_tty_driver, minor, dev);
++	if (IS_ERR(tty_dev)) {
++		retval = PTR_ERR(tty_dev);
++		device_remove_file(dev, &dev_attr_port_number);
++		if (driver->port_remove)
++			driver->port_remove(port);
++		goto exit_with_autopm;
++	}
++
+ 	dev_info(&port->serial->dev->dev,
+ 		 "%s converter now attached to ttyUSB%d\n",
+ 		 driver->description, minor);
+diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
+index f40c856ff758..84ce2d74894c 100644
+--- a/drivers/usb/serial/cp210x.c
++++ b/drivers/usb/serial/cp210x.c
+@@ -147,6 +147,8 @@ static const struct usb_device_id id_table[] = {
+ 	{ USB_DEVICE(0x166A, 0x0305) }, /* Clipsal C-5000CT2 C-Bus Spectrum Colour Touchscreen */
+ 	{ USB_DEVICE(0x166A, 0x0401) }, /* Clipsal L51xx C-Bus Architectural Dimmer */
+ 	{ USB_DEVICE(0x166A, 0x0101) }, /* Clipsal 5560884 C-Bus Multi-room Audio Matrix Switcher */
++	{ USB_DEVICE(0x16C0, 0x09B0) }, /* Lunatico Seletek */
++	{ USB_DEVICE(0x16C0, 0x09B1) }, /* Lunatico Seletek */
+ 	{ USB_DEVICE(0x16D6, 0x0001) }, /* Jablotron serial interface */
+ 	{ USB_DEVICE(0x16DC, 0x0010) }, /* W-IE-NE-R Plein & Baus GmbH PL512 Power Supply */
+ 	{ USB_DEVICE(0x16DC, 0x0011) }, /* W-IE-NE-R Plein & Baus GmbH RCM Remote Control for MARATON Power Supply */
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index 1ebb351b9e9a..3086dec0ef53 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -799,6 +799,8 @@ static const struct usb_device_id id_table_combined[] = {
+ 	{ USB_DEVICE(FTDI_VID, FTDI_ELSTER_UNICOM_PID) },
+ 	{ USB_DEVICE(FTDI_VID, FTDI_PROPOX_JTAGCABLEII_PID) },
+ 	{ USB_DEVICE(FTDI_VID, FTDI_PROPOX_ISPCABLEIII_PID) },
++	{ USB_DEVICE(FTDI_VID, CYBER_CORTEX_AV_PID),
++		.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+ 	{ USB_DEVICE(OLIMEX_VID, OLIMEX_ARM_USB_OCD_PID),
+ 		.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+ 	{ USB_DEVICE(OLIMEX_VID, OLIMEX_ARM_USB_OCD_H_PID),
+@@ -978,6 +980,23 @@ static const struct usb_device_id id_table_combined[] = {
+ 	{ USB_DEVICE_INTERFACE_NUMBER(INFINEON_VID, INFINEON_TRIBOARD_PID, 1) },
+ 	/* GE Healthcare devices */
+ 	{ USB_DEVICE(GE_HEALTHCARE_VID, GE_HEALTHCARE_NEMO_TRACKER_PID) },
++	/* Active Research (Actisense) devices */
++	{ USB_DEVICE(FTDI_VID, ACTISENSE_NDC_PID) },
++	{ USB_DEVICE(FTDI_VID, ACTISENSE_USG_PID) },
++	{ USB_DEVICE(FTDI_VID, ACTISENSE_NGT_PID) },
++	{ USB_DEVICE(FTDI_VID, ACTISENSE_NGW_PID) },
++	{ USB_DEVICE(FTDI_VID, ACTISENSE_D9AC_PID) },
++	{ USB_DEVICE(FTDI_VID, ACTISENSE_D9AD_PID) },
++	{ USB_DEVICE(FTDI_VID, ACTISENSE_D9AE_PID) },
++	{ USB_DEVICE(FTDI_VID, ACTISENSE_D9AF_PID) },
++	{ USB_DEVICE(FTDI_VID, CHETCO_SEAGAUGE_PID) },
++	{ USB_DEVICE(FTDI_VID, CHETCO_SEASWITCH_PID) },
++	{ USB_DEVICE(FTDI_VID, CHETCO_SEASMART_NMEA2000_PID) },
++	{ USB_DEVICE(FTDI_VID, CHETCO_SEASMART_ETHERNET_PID) },
++	{ USB_DEVICE(FTDI_VID, CHETCO_SEASMART_WIFI_PID) },
++	{ USB_DEVICE(FTDI_VID, CHETCO_SEASMART_DISPLAY_PID) },
++	{ USB_DEVICE(FTDI_VID, CHETCO_SEASMART_LITE_PID) },
++	{ USB_DEVICE(FTDI_VID, CHETCO_SEASMART_ANALOG_PID) },
+ 	{ }					/* Terminating entry */
+ };
+ 
+diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
+index e52409c9be99..56b1b55c4751 100644
+--- a/drivers/usb/serial/ftdi_sio_ids.h
++++ b/drivers/usb/serial/ftdi_sio_ids.h
+@@ -38,6 +38,9 @@
+ 
+ #define FTDI_LUMEL_PD12_PID	0x6002
+ 
++/* Cyber Cortex AV by Fabulous Silicon (http://fabuloussilicon.com) */
++#define CYBER_CORTEX_AV_PID	0x8698
++
+ /*
+  * Marvell OpenRD Base, Client
+  * http://www.open-rd.org
+@@ -1438,3 +1441,23 @@
+  */
+ #define GE_HEALTHCARE_VID		0x1901
+ #define GE_HEALTHCARE_NEMO_TRACKER_PID	0x0015
++
++/*
++ * Active Research (Actisense) devices
++ */
++#define ACTISENSE_NDC_PID		0xD9A8 /* NDC USB Serial Adapter */
++#define ACTISENSE_USG_PID		0xD9A9 /* USG USB Serial Adapter */
++#define ACTISENSE_NGT_PID		0xD9AA /* NGT NMEA2000 Interface */
++#define ACTISENSE_NGW_PID		0xD9AB /* NGW NMEA2000 Gateway */
++#define ACTISENSE_D9AC_PID		0xD9AC /* Actisense Reserved */
++#define ACTISENSE_D9AD_PID		0xD9AD /* Actisense Reserved */
++#define ACTISENSE_D9AE_PID		0xD9AE /* Actisense Reserved */
++#define ACTISENSE_D9AF_PID		0xD9AF /* Actisense Reserved */
++#define CHETCO_SEAGAUGE_PID		0xA548 /* SeaGauge USB Adapter */
++#define CHETCO_SEASWITCH_PID		0xA549 /* SeaSwitch USB Adapter */
++#define CHETCO_SEASMART_NMEA2000_PID	0xA54A /* SeaSmart NMEA2000 Gateway */
++#define CHETCO_SEASMART_ETHERNET_PID	0xA54B /* SeaSmart Ethernet Gateway */
++#define CHETCO_SEASMART_WIFI_PID	0xA5AC /* SeaSmart Wifi Gateway */
++#define CHETCO_SEASMART_DISPLAY_PID	0xA5AD /* SeaSmart NMEA2000 Display */
++#define CHETCO_SEASMART_LITE_PID	0xA5AE /* SeaSmart Lite USB Adapter */
++#define CHETCO_SEASMART_ANALOG_PID	0xA5AF /* SeaSmart Analog Adapter */
+diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c
+index ccf1df7c4b80..54e170dd3dad 100644
+--- a/drivers/usb/serial/generic.c
++++ b/drivers/usb/serial/generic.c
+@@ -258,7 +258,8 @@ void usb_serial_generic_wait_until_sent(struct tty_struct *tty, long timeout)
+ 	 * character or at least one jiffy.
+ 	 */
+ 	period = max_t(unsigned long, (10 * HZ / bps), 1);
+-	period = min_t(unsigned long, period, timeout);
++	if (timeout)
++		period = min_t(unsigned long, period, timeout);
+ 
+ 	dev_dbg(&port->dev, "%s - timeout = %u ms, period = %u ms\n",
+ 					__func__, jiffies_to_msecs(timeout),
+@@ -268,7 +269,7 @@ void usb_serial_generic_wait_until_sent(struct tty_struct *tty, long timeout)
+ 		schedule_timeout_interruptible(period);
+ 		if (signal_pending(current))
+ 			break;
+-		if (time_after(jiffies, expire))
++		if (timeout && time_after(jiffies, expire))
+ 			break;
+ 	}
+ }
+diff --git a/drivers/usb/serial/mxuport.c b/drivers/usb/serial/mxuport.c
+index ab1d690274ae..460a40669967 100644
+--- a/drivers/usb/serial/mxuport.c
++++ b/drivers/usb/serial/mxuport.c
+@@ -1284,7 +1284,8 @@ static int mxuport_open(struct tty_struct *tty, struct usb_serial_port *port)
+ 	}
+ 
+ 	/* Initial port termios */
+-	mxuport_set_termios(tty, port, NULL);
++	if (tty)
++		mxuport_set_termios(tty, port, NULL);
+ 
+ 	/*
+ 	 * TODO: use RQ_VENDOR_GET_MSR, once we know what it
+diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
+index 475723c006f9..19842370a07f 100644
+--- a/drivers/usb/serial/usb-serial.c
++++ b/drivers/usb/serial/usb-serial.c
+@@ -940,8 +940,9 @@ static int usb_serial_probe(struct usb_interface *interface,
+ 		port = serial->port[i];
+ 		if (kfifo_alloc(&port->write_fifo, PAGE_SIZE, GFP_KERNEL))
+ 			goto probe_error;
+-		buffer_size = max_t(int, serial->type->bulk_out_size,
+-						usb_endpoint_maxp(endpoint));
++		buffer_size = serial->type->bulk_out_size;
++		if (!buffer_size)
++			buffer_size = usb_endpoint_maxp(endpoint);
+ 		port->bulk_out_size = buffer_size;
+ 		port->bulk_out_endpointAddress = endpoint->bEndpointAddress;
+ 
+diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
+index dbc00e56c7f5..82570425fdfe 100644
+--- a/drivers/usb/storage/unusual_uas.h
++++ b/drivers/usb/storage/unusual_uas.h
+@@ -113,6 +113,13 @@ UNUSUAL_DEV(0x0bc2, 0xab2a, 0x0000, 0x9999,
+ 		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ 		US_FL_NO_ATA_1X),
+ 
++/* Reported-by: Tom Arild Naess <tanaess@gmail.com> */
++UNUSUAL_DEV(0x152d, 0x0539, 0x0000, 0x9999,
++		"JMicron",
++		"JMS539",
++		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
++		US_FL_NO_REPORT_OPCODES),
++
+ /* Reported-by: Claudio Bizzarri <claudio.bizzarri@gmail.com> */
+ UNUSUAL_DEV(0x152d, 0x0567, 0x0000, 0x9999,
+ 		"JMicron",
+diff --git a/fs/autofs4/dev-ioctl.c b/fs/autofs4/dev-ioctl.c
+index aaf96cb25452..ac7d921ed984 100644
+--- a/fs/autofs4/dev-ioctl.c
++++ b/fs/autofs4/dev-ioctl.c
+@@ -95,7 +95,7 @@ static int check_dev_ioctl_version(int cmd, struct autofs_dev_ioctl *param)
+  */
+ static struct autofs_dev_ioctl *copy_dev_ioctl(struct autofs_dev_ioctl __user *in)
+ {
+-	struct autofs_dev_ioctl tmp;
++	struct autofs_dev_ioctl tmp, *res;
+ 
+ 	if (copy_from_user(&tmp, in, sizeof(tmp)))
+ 		return ERR_PTR(-EFAULT);
+@@ -106,7 +106,11 @@ static struct autofs_dev_ioctl *copy_dev_ioctl(struct autofs_dev_ioctl __user *i
+ 	if (tmp.size > (PATH_MAX + sizeof(tmp)))
+ 		return ERR_PTR(-ENAMETOOLONG);
+ 
+-	return memdup_user(in, tmp.size);
++	res = memdup_user(in, tmp.size);
++	if (!IS_ERR(res))
++		res->size = tmp.size;
++
++	return res;
+ }
+ 
+ static inline void free_dev_ioctl(struct autofs_dev_ioctl *param)
+diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c
+index dbb5b7212ce1..7ba355b8d4ac 100644
+--- a/fs/autofs4/root.c
++++ b/fs/autofs4/root.c
+@@ -108,7 +108,7 @@ static int autofs4_dir_open(struct inode *inode, struct file *file)
+ 	struct dentry *dentry = file->f_path.dentry;
+ 	struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
+ 
+-	DPRINTK("file=%p dentry=%p %pD", file, dentry, dentry);
++	DPRINTK("file=%p dentry=%p %pd", file, dentry, dentry);
+ 
+ 	if (autofs4_oz_mode(sbi))
+ 		goto out;
+diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
+index e4090259569b..5529ca84496f 100644
+--- a/fs/btrfs/file.c
++++ b/fs/btrfs/file.c
+@@ -1811,22 +1811,10 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
+ 	mutex_unlock(&inode->i_mutex);
+ 
+ 	/*
+-	 * we want to make sure fsync finds this change
+-	 * but we haven't joined a transaction running right now.
+-	 *
+-	 * Later on, someone is sure to update the inode and get the
+-	 * real transid recorded.
+-	 *
+-	 * We set last_trans now to the fs_info generation + 1,
+-	 * this will either be one more than the running transaction
+-	 * or the generation used for the next transaction if there isn't
+-	 * one running right now.
+-	 *
+ 	 * We also have to set last_sub_trans to the current log transid,
+ 	 * otherwise subsequent syncs to a file that's been synced in this
+ 	 * transaction will appear to have already occured.
+ 	 */
+-	BTRFS_I(inode)->last_trans = root->fs_info->generation + 1;
+ 	BTRFS_I(inode)->last_sub_trans = root->log_transid;
+ 	if (num_written > 0) {
+ 		err = generic_write_sync(file, pos, num_written);
+@@ -1959,25 +1947,37 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
+ 	atomic_inc(&root->log_batch);
+ 
+ 	/*
+-	 * check the transaction that last modified this inode
+-	 * and see if its already been committed
+-	 */
+-	if (!BTRFS_I(inode)->last_trans) {
+-		mutex_unlock(&inode->i_mutex);
+-		goto out;
+-	}
+-
+-	/*
+-	 * if the last transaction that changed this file was before
+-	 * the current transaction, we can bail out now without any
+-	 * syncing
++	 * If the last transaction that changed this file was before the current
++	 * transaction and we have the full sync flag set in our inode, we can
++	 * bail out now without any syncing.
++	 *
++	 * Note that we can't bail out if the full sync flag isn't set. This is
++	 * because when the full sync flag is set we start all ordered extents
++	 * and wait for them to fully complete - when they complete they update
++	 * the inode's last_trans field through:
++	 *
++	 *     btrfs_finish_ordered_io() ->
++	 *         btrfs_update_inode_fallback() ->
++	 *             btrfs_update_inode() ->
++	 *                 btrfs_set_inode_last_trans()
++	 *
++	 * So we are sure that last_trans is up to date and can do this check to
++	 * bail out safely. For the fast path, when the full sync flag is not
++	 * set in our inode, we can not do it because we start only our ordered
++	 * extents and don't wait for them to complete (that is when
++	 * btrfs_finish_ordered_io runs), so here at this point their last_trans
++	 * value might be less than or equals to fs_info->last_trans_committed,
++	 * and setting a speculative last_trans for an inode when a buffered
++	 * write is made (such as fs_info->generation + 1 for example) would not
++	 * be reliable since after setting the value and before fsync is called
++	 * any number of transactions can start and commit (transaction kthread
++	 * commits the current transaction periodically), and a transaction
++	 * commit does not start nor waits for ordered extents to complete.
+ 	 */
+ 	smp_mb();
+ 	if (btrfs_inode_in_log(inode, root->fs_info->generation) ||
+-	    BTRFS_I(inode)->last_trans <=
+-	    root->fs_info->last_trans_committed) {
+-		BTRFS_I(inode)->last_trans = 0;
+-
++	    (full_sync && BTRFS_I(inode)->last_trans <=
++	     root->fs_info->last_trans_committed)) {
+ 		/*
+ 		 * We'v had everything committed since the last time we were
+ 		 * modified so clear this flag in case it was set for whatever
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index 8bf326affb94..35911f057a77 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -7208,7 +7208,6 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
+ 	    ((BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) &&
+ 	     em->block_start != EXTENT_MAP_HOLE)) {
+ 		int type;
+-		int ret;
+ 		u64 block_start, orig_start, orig_block_len, ram_bytes;
+ 
+ 		if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
+diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
+index 534544e08f76..157cc54fc634 100644
+--- a/fs/btrfs/ordered-data.c
++++ b/fs/btrfs/ordered-data.c
+@@ -452,9 +452,7 @@ void btrfs_get_logged_extents(struct inode *inode,
+ 			continue;
+ 		if (entry_end(ordered) <= start)
+ 			break;
+-		if (!list_empty(&ordered->log_list))
+-			continue;
+-		if (test_bit(BTRFS_ORDERED_LOGGED, &ordered->flags))
++		if (test_and_set_bit(BTRFS_ORDERED_LOGGED, &ordered->flags))
+ 			continue;
+ 		list_add(&ordered->log_list, logged_list);
+ 		atomic_inc(&ordered->refs);
+@@ -511,8 +509,7 @@ void btrfs_wait_logged_extents(struct btrfs_trans_handle *trans,
+ 		wait_event(ordered->wait, test_bit(BTRFS_ORDERED_IO_DONE,
+ 						   &ordered->flags));
+ 
+-		if (!test_and_set_bit(BTRFS_ORDERED_LOGGED, &ordered->flags))
+-			list_add_tail(&ordered->trans_list, &trans->ordered);
++		list_add_tail(&ordered->trans_list, &trans->ordered);
+ 		spin_lock_irq(&log->log_extents_lock[index]);
+ 	}
+ 	spin_unlock_irq(&log->log_extents_lock[index]);
+diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
+index f78e9dc5d574..069ab24badaa 100644
+--- a/fs/btrfs/tree-log.c
++++ b/fs/btrfs/tree-log.c
+@@ -1010,7 +1010,7 @@ again:
+ 		base = btrfs_item_ptr_offset(leaf, path->slots[0]);
+ 
+ 		while (cur_offset < item_size) {
+-			extref = (struct btrfs_inode_extref *)base + cur_offset;
++			extref = (struct btrfs_inode_extref *)(base + cur_offset);
+ 
+ 			victim_name_len = btrfs_inode_extref_name_len(leaf, extref);
+ 
+diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
+index 05f2960ed7c3..6f0ce531e221 100644
+--- a/fs/debugfs/inode.c
++++ b/fs/debugfs/inode.c
+@@ -246,10 +246,19 @@ static int debugfs_show_options(struct seq_file *m, struct dentry *root)
+ 	return 0;
+ }
+ 
++static void debugfs_evict_inode(struct inode *inode)
++{
++	truncate_inode_pages_final(&inode->i_data);
++	clear_inode(inode);
++	if (S_ISLNK(inode->i_mode))
++		kfree(inode->i_private);
++}
++
+ static const struct super_operations debugfs_super_operations = {
+ 	.statfs		= simple_statfs,
+ 	.remount_fs	= debugfs_remount,
+ 	.show_options	= debugfs_show_options,
++	.evict_inode	= debugfs_evict_inode,
+ };
+ 
+ static int debug_fill_super(struct super_block *sb, void *data, int silent)
+@@ -466,23 +475,14 @@ static int __debugfs_remove(struct dentry *dentry, struct dentry *parent)
+ 	int ret = 0;
+ 
+ 	if (debugfs_positive(dentry)) {
+-		if (dentry->d_inode) {
+-			dget(dentry);
+-			switch (dentry->d_inode->i_mode & S_IFMT) {
+-			case S_IFDIR:
+-				ret = simple_rmdir(parent->d_inode, dentry);
+-				break;
+-			case S_IFLNK:
+-				kfree(dentry->d_inode->i_private);
+-				/* fall through */
+-			default:
+-				simple_unlink(parent->d_inode, dentry);
+-				break;
+-			}
+-			if (!ret)
+-				d_delete(dentry);
+-			dput(dentry);
+-		}
++		dget(dentry);
++		if (S_ISDIR(dentry->d_inode->i_mode))
++			ret = simple_rmdir(parent->d_inode, dentry);
++		else
++			simple_unlink(parent->d_inode, dentry);
++		if (!ret)
++			d_delete(dentry);
++		dput(dentry);
+ 	}
+ 	return ret;
+ }
+diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c
+index 6f4e659f508f..6058c7c996d6 100644
+--- a/fs/ecryptfs/file.c
++++ b/fs/ecryptfs/file.c
+@@ -303,9 +303,22 @@ ecryptfs_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+ 	struct file *lower_file = ecryptfs_file_to_lower(file);
+ 	long rc = -ENOTTY;
+ 
+-	if (lower_file->f_op->unlocked_ioctl)
++	if (!lower_file->f_op->unlocked_ioctl)
++		return rc;
++
++	switch (cmd) {
++	case FITRIM:
++	case FS_IOC_GETFLAGS:
++	case FS_IOC_SETFLAGS:
++	case FS_IOC_GETVERSION:
++	case FS_IOC_SETVERSION:
+ 		rc = lower_file->f_op->unlocked_ioctl(lower_file, cmd, arg);
+-	return rc;
++		fsstack_copy_attr_all(file_inode(file), file_inode(lower_file));
++
++		return rc;
++	default:
++		return rc;
++	}
+ }
+ 
+ #ifdef CONFIG_COMPAT
+@@ -315,9 +328,22 @@ ecryptfs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+ 	struct file *lower_file = ecryptfs_file_to_lower(file);
+ 	long rc = -ENOIOCTLCMD;
+ 
+-	if (lower_file->f_op->compat_ioctl)
++	if (!lower_file->f_op->compat_ioctl)
++		return rc;
++
++	switch (cmd) {
++	case FITRIM:
++	case FS_IOC32_GETFLAGS:
++	case FS_IOC32_SETFLAGS:
++	case FS_IOC32_GETVERSION:
++	case FS_IOC32_SETVERSION:
+ 		rc = lower_file->f_op->compat_ioctl(lower_file, cmd, arg);
+-	return rc;
++		fsstack_copy_attr_all(file_inode(file), file_inode(lower_file));
++
++		return rc;
++	default:
++		return rc;
++	}
+ }
+ #endif
+ 
+diff --git a/fs/gfs2/acl.c b/fs/gfs2/acl.c
+index 3088e2a38e30..7b3143064af1 100644
+--- a/fs/gfs2/acl.c
++++ b/fs/gfs2/acl.c
+@@ -73,7 +73,7 @@ int gfs2_set_acl(struct inode *inode, struct posix_acl *acl, int type)
+ 
+ 	BUG_ON(name == NULL);
+ 
+-	if (acl->a_count > GFS2_ACL_MAX_ENTRIES(GFS2_SB(inode)))
++	if (acl && acl->a_count > GFS2_ACL_MAX_ENTRIES(GFS2_SB(inode)))
+ 		return -E2BIG;
+ 
+ 	if (type == ACL_TYPE_ACCESS) {
+diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
+index 7f3f60641344..4030b558b07e 100644
+--- a/fs/nfs/delegation.c
++++ b/fs/nfs/delegation.c
+@@ -177,8 +177,8 @@ void nfs_inode_reclaim_delegation(struct inode *inode, struct rpc_cred *cred,
+ 				  &delegation->flags);
+ 			NFS_I(inode)->delegation_state = delegation->type;
+ 			spin_unlock(&delegation->lock);
+-			put_rpccred(oldcred);
+ 			rcu_read_unlock();
++			put_rpccred(oldcred);
+ 			trace_nfs4_reclaim_delegation(inode, res->delegation_type);
+ 		} else {
+ 			/* We appear to have raced with a delegation return. */
+diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
+index 9b0c55cb2a2e..4ad7fff9ccaf 100644
+--- a/fs/nfs/dir.c
++++ b/fs/nfs/dir.c
+@@ -469,6 +469,8 @@ void nfs_prime_dcache(struct dentry *parent, struct nfs_entry *entry)
+ 	struct inode *inode;
+ 	int status;
+ 
++	if (!(entry->fattr->valid & NFS_ATTR_FATTR_FSID))
++		return;
+ 	if (filename.name[0] == '.') {
+ 		if (filename.len == 1)
+ 			return;
+@@ -479,6 +481,10 @@ void nfs_prime_dcache(struct dentry *parent, struct nfs_entry *entry)
+ 
+ 	dentry = d_lookup(parent, &filename);
+ 	if (dentry != NULL) {
++		/* Is there a mountpoint here? If so, just exit */
++		if (!nfs_fsid_equal(&NFS_SB(dentry->d_sb)->fsid,
++					&entry->fattr->fsid))
++			goto out;
+ 		if (nfs_same_file(dentry, entry)) {
+ 			nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
+ 			status = nfs_refresh_inode(dentry->d_inode, entry->fattr);
+diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
+index c06a1ba80d73..1685b82a9ccd 100644
+--- a/fs/nfsd/nfs4state.c
++++ b/fs/nfsd/nfs4state.c
+@@ -1653,7 +1653,7 @@ __destroy_client(struct nfs4_client *clp)
+ 		nfs4_put_stid(&dp->dl_stid);
+ 	}
+ 	while (!list_empty(&clp->cl_revoked)) {
+-		dp = list_entry(reaplist.next, struct nfs4_delegation, dl_recall_lru);
++		dp = list_entry(clp->cl_revoked.next, struct nfs4_delegation, dl_recall_lru);
+ 		list_del_init(&dp->dl_recall_lru);
+ 		nfs4_put_stid(&dp->dl_stid);
+ 	}
+diff --git a/fs/nilfs2/btree.c b/fs/nilfs2/btree.c
+index b2e3ff347620..ecdbae19a766 100644
+--- a/fs/nilfs2/btree.c
++++ b/fs/nilfs2/btree.c
+@@ -31,6 +31,8 @@
+ #include "alloc.h"
+ #include "dat.h"
+ 
++static void __nilfs_btree_init(struct nilfs_bmap *bmap);
++
+ static struct nilfs_btree_path *nilfs_btree_alloc_path(void)
+ {
+ 	struct nilfs_btree_path *path;
+@@ -368,6 +370,34 @@ static int nilfs_btree_node_broken(const struct nilfs_btree_node *node,
+ 	return ret;
+ }
+ 
++/**
++ * nilfs_btree_root_broken - verify consistency of btree root node
++ * @node: btree root node to be examined
++ * @ino: inode number
++ *
++ * Return Value: If node is broken, 1 is returned. Otherwise, 0 is returned.
++ */
++static int nilfs_btree_root_broken(const struct nilfs_btree_node *node,
++				   unsigned long ino)
++{
++	int level, flags, nchildren;
++	int ret = 0;
++
++	level = nilfs_btree_node_get_level(node);
++	flags = nilfs_btree_node_get_flags(node);
++	nchildren = nilfs_btree_node_get_nchildren(node);
++
++	if (unlikely(level < NILFS_BTREE_LEVEL_NODE_MIN ||
++		     level > NILFS_BTREE_LEVEL_MAX ||
++		     nchildren < 0 ||
++		     nchildren > NILFS_BTREE_ROOT_NCHILDREN_MAX)) {
++		pr_crit("NILFS: bad btree root (inode number=%lu): level = %d, flags = 0x%x, nchildren = %d\n",
++			ino, level, flags, nchildren);
++		ret = 1;
++	}
++	return ret;
++}
++
+ int nilfs_btree_broken_node_block(struct buffer_head *bh)
+ {
+ 	int ret;
+@@ -1713,7 +1743,7 @@ nilfs_btree_commit_convert_and_insert(struct nilfs_bmap *btree,
+ 
+ 	/* convert and insert */
+ 	dat = NILFS_BMAP_USE_VBN(btree) ? nilfs_bmap_get_dat(btree) : NULL;
+-	nilfs_btree_init(btree);
++	__nilfs_btree_init(btree);
+ 	if (nreq != NULL) {
+ 		nilfs_bmap_commit_alloc_ptr(btree, dreq, dat);
+ 		nilfs_bmap_commit_alloc_ptr(btree, nreq, dat);
+@@ -2294,12 +2324,23 @@ static const struct nilfs_bmap_operations nilfs_btree_ops_gc = {
+ 	.bop_gather_data	=	NULL,
+ };
+ 
+-int nilfs_btree_init(struct nilfs_bmap *bmap)
++static void __nilfs_btree_init(struct nilfs_bmap *bmap)
+ {
+ 	bmap->b_ops = &nilfs_btree_ops;
+ 	bmap->b_nchildren_per_block =
+ 		NILFS_BTREE_NODE_NCHILDREN_MAX(nilfs_btree_node_size(bmap));
+-	return 0;
++}
++
++int nilfs_btree_init(struct nilfs_bmap *bmap)
++{
++	int ret = 0;
++
++	__nilfs_btree_init(bmap);
++
++	if (nilfs_btree_root_broken(nilfs_btree_get_root(bmap),
++				    bmap->b_inode->i_ino))
++		ret = -EIO;
++	return ret;
+ }
+ 
+ void nilfs_btree_init_gc(struct nilfs_bmap *bmap)
+diff --git a/fs/proc/generic.c b/fs/proc/generic.c
+index 7fea13229f33..b502bba0f9fd 100644
+--- a/fs/proc/generic.c
++++ b/fs/proc/generic.c
+@@ -19,7 +19,6 @@
+ #include <linux/mount.h>
+ #include <linux/init.h>
+ #include <linux/idr.h>
+-#include <linux/namei.h>
+ #include <linux/bitops.h>
+ #include <linux/spinlock.h>
+ #include <linux/completion.h>
+@@ -223,17 +222,6 @@ void proc_free_inum(unsigned int inum)
+ 	spin_unlock_irqrestore(&proc_inum_lock, flags);
+ }
+ 
+-static void *proc_follow_link(struct dentry *dentry, struct nameidata *nd)
+-{
+-	nd_set_link(nd, __PDE_DATA(dentry->d_inode));
+-	return NULL;
+-}
+-
+-static const struct inode_operations proc_link_inode_operations = {
+-	.readlink	= generic_readlink,
+-	.follow_link	= proc_follow_link,
+-};
+-
+ /*
+  * Don't create negative dentries here, return -ENOENT by hand
+  * instead.
+diff --git a/fs/proc/inode.c b/fs/proc/inode.c
+index 8420a2f80811..3b0f8384ab21 100644
+--- a/fs/proc/inode.c
++++ b/fs/proc/inode.c
+@@ -23,6 +23,7 @@
+ #include <linux/slab.h>
+ #include <linux/mount.h>
+ #include <linux/magic.h>
++#include <linux/namei.h>
+ 
+ #include <asm/uaccess.h>
+ 
+@@ -393,6 +394,26 @@ static const struct file_operations proc_reg_file_ops_no_compat = {
+ };
+ #endif
+ 
++static void *proc_follow_link(struct dentry *dentry, struct nameidata *nd)
++{
++	struct proc_dir_entry *pde = PDE(dentry->d_inode);
++	if (unlikely(!use_pde(pde)))
++		return ERR_PTR(-EINVAL);
++	nd_set_link(nd, pde->data);
++	return pde;
++}
++
++static void proc_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
++{
++	unuse_pde(p);
++}
++
++const struct inode_operations proc_link_inode_operations = {
++	.readlink	= generic_readlink,
++	.follow_link	= proc_follow_link,
++	.put_link	= proc_put_link,
++};
++
+ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
+ {
+ 	struct inode *inode = new_inode_pseudo(sb);
+diff --git a/fs/proc/internal.h b/fs/proc/internal.h
+index 6fcdba573e0f..c835b94c0cd3 100644
+--- a/fs/proc/internal.h
++++ b/fs/proc/internal.h
+@@ -200,6 +200,7 @@ struct pde_opener {
+ 	int closing;
+ 	struct completion *c;
+ };
++extern const struct inode_operations proc_link_inode_operations;
+ 
+ extern const struct inode_operations proc_pid_link_inode_operations;
+ 
+diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
+index 180ad0e6de21..d016dc57f007 100644
+--- a/include/drm/i915_pciids.h
++++ b/include/drm/i915_pciids.h
+@@ -214,9 +214,9 @@
+ 	INTEL_VGA_DEVICE((((gt) - 1) << 4) | (id), info)
+ 
+ #define _INTEL_BDW_M_IDS(gt, info) \
+-	_INTEL_BDW_M(gt, 0x1602, info), /* ULT */ \
++	_INTEL_BDW_M(gt, 0x1602, info), /* Halo */ \
+ 	_INTEL_BDW_M(gt, 0x1606, info), /* ULT */ \
+-	_INTEL_BDW_M(gt, 0x160B, info), /* Iris */ \
++	_INTEL_BDW_M(gt, 0x160B, info), /* ULT */ \
+ 	_INTEL_BDW_M(gt, 0x160E, info) /* ULX */
+ 
+ #define _INTEL_BDW_D_IDS(gt, info) \
+diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h
+index 9bb547c7bce7..704a1ab8240c 100644
+--- a/include/linux/usb/serial.h
++++ b/include/linux/usb/serial.h
+@@ -190,8 +190,7 @@ static inline void usb_set_serial_data(struct usb_serial *serial, void *data)
+  * @num_ports: the number of different ports this device will have.
+  * @bulk_in_size: minimum number of bytes to allocate for bulk-in buffer
+  *	(0 = end-point size)
+- * @bulk_out_size: minimum number of bytes to allocate for bulk-out buffer
+- *	(0 = end-point size)
++ * @bulk_out_size: bytes to allocate for bulk-out buffer (0 = end-point size)
+  * @calc_num_ports: pointer to a function to determine how many ports this
+  *	device has dynamically.  It will be called after the probe()
+  *	callback is called, but before attach()
+diff --git a/include/net/ipv6.h b/include/net/ipv6.h
+index 6e416f6d3e3c..fde3b593c3f2 100644
+--- a/include/net/ipv6.h
++++ b/include/net/ipv6.h
+@@ -671,8 +671,6 @@ static inline int ipv6_addr_diff(const struct in6_addr *a1, const struct in6_add
+ 	return __ipv6_addr_diff(a1, a2, sizeof(struct in6_addr));
+ }
+ 
+-u32 __ipv6_select_ident(u32 hashrnd, struct in6_addr *dst,
+-			struct in6_addr *src);
+ void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt);
+ void ipv6_proxy_select_ident(struct sk_buff *skb);
+ 
+diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
+index 4a8795a87b9e..672150b6aaf5 100644
+--- a/include/target/target_core_base.h
++++ b/include/target/target_core_base.h
+@@ -407,7 +407,7 @@ struct t10_reservation {
+ 	/* Activate Persistence across Target Power Loss enabled
+ 	 * for SCSI device */
+ 	int pr_aptpl_active;
+-#define PR_APTPL_BUF_LEN			8192
++#define PR_APTPL_BUF_LEN			262144
+ 	u32 pr_generation;
+ 	spinlock_t registration_lock;
+ 	spinlock_t aptpl_reg_lock;
+diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h
+index aece1346ceb7..4ad10baecd4d 100644
+--- a/include/trace/events/kmem.h
++++ b/include/trace/events/kmem.h
+@@ -268,11 +268,11 @@ TRACE_EVENT(mm_page_alloc_extfrag,
+ 
+ 	TP_PROTO(struct page *page,
+ 		int alloc_order, int fallback_order,
+-		int alloc_migratetype, int fallback_migratetype, int new_migratetype),
++		int alloc_migratetype, int fallback_migratetype),
+ 
+ 	TP_ARGS(page,
+ 		alloc_order, fallback_order,
+-		alloc_migratetype, fallback_migratetype, new_migratetype),
++		alloc_migratetype, fallback_migratetype),
+ 
+ 	TP_STRUCT__entry(
+ 		__field(	struct page *,	page			)
+@@ -289,7 +289,8 @@ TRACE_EVENT(mm_page_alloc_extfrag,
+ 		__entry->fallback_order		= fallback_order;
+ 		__entry->alloc_migratetype	= alloc_migratetype;
+ 		__entry->fallback_migratetype	= fallback_migratetype;
+-		__entry->change_ownership	= (new_migratetype == alloc_migratetype);
++		__entry->change_ownership	= (alloc_migratetype ==
++					get_pageblock_migratetype(page));
+ 	),
+ 
+ 	TP_printk("page=%p pfn=%lu alloc_order=%d fallback_order=%d pageblock_order=%d alloc_migratetype=%d fallback_migratetype=%d fragmenting=%d change_ownership=%d",
+diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
+index 7c98873a3077..41d53e515914 100644
+--- a/kernel/locking/rtmutex.c
++++ b/kernel/locking/rtmutex.c
+@@ -1193,7 +1193,8 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
+ 	set_current_state(TASK_RUNNING);
+ 
+ 	if (unlikely(ret)) {
+-		remove_waiter(lock, &waiter);
++		if (rt_mutex_has_waiters(lock))
++			remove_waiter(lock, &waiter);
+ 		rt_mutex_handle_deadlock(ret, chwalk, &waiter);
+ 	}
+ 
+diff --git a/kernel/sched/auto_group.c b/kernel/sched/auto_group.c
+index 8a2e230fb86a..eae160dd669d 100644
+--- a/kernel/sched/auto_group.c
++++ b/kernel/sched/auto_group.c
+@@ -87,8 +87,7 @@ static inline struct autogroup *autogroup_create(void)
+ 	 * so we don't have to move tasks around upon policy change,
+ 	 * or flail around trying to allocate bandwidth on the fly.
+ 	 * A bandwidth exception in __sched_setscheduler() allows
+-	 * the policy change to proceed.  Thereafter, task_group()
+-	 * returns &root_task_group, so zero bandwidth is required.
++	 * the policy change to proceed.
+ 	 */
+ 	free_rt_sched_group(tg);
+ 	tg->rt_se = root_task_group.rt_se;
+@@ -115,9 +114,6 @@ bool task_wants_autogroup(struct task_struct *p, struct task_group *tg)
+ 	if (tg != &root_task_group)
+ 		return false;
+ 
+-	if (p->sched_class != &fair_sched_class)
+-		return false;
+-
+ 	/*
+ 	 * We can only assume the task group can't go away on us if
+ 	 * autogroup_move_group() can see us on ->thread_group list.
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index 5eab11d4b747..44dfc8b46bd0 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -490,6 +490,11 @@ static __init void init_hrtick(void)
+  */
+ void hrtick_start(struct rq *rq, u64 delay)
+ {
++	/*
++	 * Don't schedule slices shorter than 10000ns, that just
++	 * doesn't make sense. Rely on vruntime for fairness.
++	 */
++	delay = max_t(u64, delay, 10000LL);
+ 	__hrtimer_start_range_ns(&rq->hrtick_timer, ns_to_ktime(delay), 0,
+ 			HRTIMER_MODE_REL_PINNED, 0);
+ }
+@@ -7613,6 +7618,12 @@ static inline int tg_has_rt_tasks(struct task_group *tg)
+ {
+ 	struct task_struct *g, *p;
+ 
++	/*
++	 * Autogroups do not have RT tasks; see autogroup_create().
++	 */
++	if (task_group_is_autogroup(tg))
++		return 0;
++
+ 	for_each_process_thread(g, p) {
+ 		if (rt_task(p) && task_group(p) == tg)
+ 			return 1;
+diff --git a/kernel/sysctl.c b/kernel/sysctl.c
+index 137c7f69b264..88ea2d6e0031 100644
+--- a/kernel/sysctl.c
++++ b/kernel/sysctl.c
+@@ -1248,7 +1248,6 @@ static struct ctl_table vm_table[] = {
+ 		.maxlen		= sizeof(unsigned long),
+ 		.mode		= 0644,
+ 		.proc_handler	= hugetlb_sysctl_handler,
+-		.extra1		= &zero,
+ 	},
+ #ifdef CONFIG_NUMA
+ 	{
+@@ -1257,7 +1256,6 @@ static struct ctl_table vm_table[] = {
+ 		.maxlen         = sizeof(unsigned long),
+ 		.mode           = 0644,
+ 		.proc_handler   = &hugetlb_mempolicy_sysctl_handler,
+-		.extra1		= &zero,
+ 	},
+ #endif
+ 	 {
+@@ -1280,7 +1278,6 @@ static struct ctl_table vm_table[] = {
+ 		.maxlen		= sizeof(unsigned long),
+ 		.mode		= 0644,
+ 		.proc_handler	= hugetlb_overcommit_handler,
+-		.extra1		= &zero,
+ 	},
+ #endif
+ 	{
+diff --git a/mm/compaction.c b/mm/compaction.c
+index 546e571e9d60..91357ff6d3a8 100644
+--- a/mm/compaction.c
++++ b/mm/compaction.c
+@@ -1015,8 +1015,10 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone,
+ 		low_pfn = isolate_migratepages_block(cc, low_pfn, end_pfn,
+ 								isolate_mode);
+ 
+-		if (!low_pfn || cc->contended)
++		if (!low_pfn || cc->contended) {
++			acct_isolated(zone, cc);
+ 			return ISOLATE_ABORT;
++		}
+ 
+ 		/*
+ 		 * Either we isolated something and proceed with migration. Or
+@@ -1088,7 +1090,7 @@ static int compact_finished(struct zone *zone, struct compact_control *cc,
+ 			return COMPACT_PARTIAL;
+ 
+ 		/* Job done if allocation would set block type */
+-		if (cc->order >= pageblock_order && area->nr_free)
++		if (order >= pageblock_order && area->nr_free)
+ 			return COMPACT_PARTIAL;
+ 	}
+ 
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index c49586f40758..267e41971100 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -2657,9 +2657,10 @@ again:
+ 			goto unlock;
+ 
+ 		/*
+-		 * HWPoisoned hugepage is already unmapped and dropped reference
++		 * Migrating hugepage or HWPoisoned hugepage is already
++		 * unmapped and its refcount is dropped, so just clear pte here.
+ 		 */
+-		if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) {
++		if (unlikely(!pte_present(pte))) {
+ 			huge_pte_clear(mm, address, ptep);
+ 			goto unlock;
+ 		}
+@@ -3134,6 +3135,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+ 	struct page *pagecache_page = NULL;
+ 	struct hstate *h = hstate_vma(vma);
+ 	struct address_space *mapping;
++	int need_wait_lock = 0;
+ 
+ 	address &= huge_page_mask(h);
+ 
+@@ -3172,6 +3174,16 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+ 	ret = 0;
+ 
+ 	/*
++	 * entry could be a migration/hwpoison entry at this point, so this
++	 * check prevents the kernel from going below assuming that we have
++	 * a active hugepage in pagecache. This goto expects the 2nd page fault,
++	 * and is_hugetlb_entry_(migration|hwpoisoned) check will properly
++	 * handle it.
++	 */
++	if (!pte_present(entry))
++		goto out_mutex;
++
++	/*
+ 	 * If we are going to COW the mapping later, we examine the pending
+ 	 * reservations for this page now. This will ensure that any
+ 	 * allocations necessary to record that reservation occur outside the
+@@ -3190,30 +3202,31 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+ 								vma, address);
+ 	}
+ 
++	ptl = huge_pte_lock(h, mm, ptep);
++
++	/* Check for a racing update before calling hugetlb_cow */
++	if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
++		goto out_ptl;
++
+ 	/*
+ 	 * hugetlb_cow() requires page locks of pte_page(entry) and
+ 	 * pagecache_page, so here we need take the former one
+ 	 * when page != pagecache_page or !pagecache_page.
+-	 * Note that locking order is always pagecache_page -> page,
+-	 * so no worry about deadlock.
+ 	 */
+ 	page = pte_page(entry);
+-	get_page(page);
+ 	if (page != pagecache_page)
+-		lock_page(page);
+-
+-	ptl = huge_pte_lockptr(h, mm, ptep);
+-	spin_lock(ptl);
+-	/* Check for a racing update before calling hugetlb_cow */
+-	if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
+-		goto out_ptl;
++		if (!trylock_page(page)) {
++			need_wait_lock = 1;
++			goto out_ptl;
++		}
+ 
++	get_page(page);
+ 
+ 	if (flags & FAULT_FLAG_WRITE) {
+ 		if (!huge_pte_write(entry)) {
+ 			ret = hugetlb_cow(mm, vma, address, ptep, entry,
+ 					pagecache_page, ptl);
+-			goto out_ptl;
++			goto out_put_page;
+ 		}
+ 		entry = huge_pte_mkdirty(entry);
+ 	}
+@@ -3221,7 +3234,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+ 	if (huge_ptep_set_access_flags(vma, address, ptep, entry,
+ 						flags & FAULT_FLAG_WRITE))
+ 		update_mmu_cache(vma, address, ptep);
+-
++out_put_page:
++	if (page != pagecache_page)
++		unlock_page(page);
++	put_page(page);
+ out_ptl:
+ 	spin_unlock(ptl);
+ 
+@@ -3229,12 +3245,17 @@ out_ptl:
+ 		unlock_page(pagecache_page);
+ 		put_page(pagecache_page);
+ 	}
+-	if (page != pagecache_page)
+-		unlock_page(page);
+-	put_page(page);
+-
+ out_mutex:
+ 	mutex_unlock(&htlb_fault_mutex_table[hash]);
++	/*
++	 * Generally it's safe to hold refcount during waiting page lock. But
++	 * here we just wait to defer the next page fault to avoid busy loop and
++	 * the page is not used after unlocked before returning from the current
++	 * page fault. So we are safe from accessing freed page, even if we wait
++	 * here without taking refcount.
++	 */
++	if (need_wait_lock)
++		wait_on_page_locked(page);
+ 	return ret;
+ }
+ 
+@@ -3364,7 +3385,26 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
+ 			spin_unlock(ptl);
+ 			continue;
+ 		}
+-		if (!huge_pte_none(huge_ptep_get(ptep))) {
++		pte = huge_ptep_get(ptep);
++		if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) {
++			spin_unlock(ptl);
++			continue;
++		}
++		if (unlikely(is_hugetlb_entry_migration(pte))) {
++			swp_entry_t entry = pte_to_swp_entry(pte);
++
++			if (is_write_migration_entry(entry)) {
++				pte_t newpte;
++
++				make_migration_entry_read(&entry);
++				newpte = swp_entry_to_pte(entry);
++				set_huge_pte_at(mm, address, ptep, newpte);
++				pages++;
++			}
++			spin_unlock(ptl);
++			continue;
++		}
++		if (!huge_pte_none(pte)) {
+ 			pte = huge_ptep_get_and_clear(mm, address, ptep);
+ 			pte = pte_mkhuge(huge_pte_modify(pte, newprot));
+ 			pte = arch_make_huge_pte(pte, vma, NULL, 0);
+diff --git a/mm/memory-failure.c b/mm/memory-failure.c
+index feb803bf3443..20c29ddff17b 100644
+--- a/mm/memory-failure.c
++++ b/mm/memory-failure.c
+@@ -1654,8 +1654,6 @@ static int __soft_offline_page(struct page *page, int flags)
+ 			 * setting PG_hwpoison.
+ 			 */
+ 			if (!is_free_buddy_page(page))
+-				lru_add_drain_all();
+-			if (!is_free_buddy_page(page))
+ 				drain_all_pages(page_zone(page));
+ 			SetPageHWPoison(page);
+ 			if (!is_free_buddy_page(page))
+diff --git a/mm/memory.c b/mm/memory.c
+index 2c3536cc6c63..6aa7822bb64d 100644
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -3561,7 +3561,7 @@ int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
+ 	if (follow_phys(vma, addr, write, &prot, &phys_addr))
+ 		return -EINVAL;
+ 
+-	maddr = ioremap_prot(phys_addr, PAGE_SIZE, prot);
++	maddr = ioremap_prot(phys_addr, PAGE_ALIGN(len + offset), prot);
+ 	if (write)
+ 		memcpy_toio(maddr + offset, buf, len);
+ 	else
+diff --git a/mm/mmap.c b/mm/mmap.c
+index 7f684d5a8087..e5cc3ca1d869 100644
+--- a/mm/mmap.c
++++ b/mm/mmap.c
+@@ -152,7 +152,7 @@ EXPORT_SYMBOL_GPL(vm_memory_committed);
+  */
+ int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
+ {
+-	unsigned long free, allowed, reserve;
++	long free, allowed, reserve;
+ 
+ 	VM_WARN_ONCE(percpu_counter_read(&vm_committed_as) <
+ 			-(s64)vm_committed_as_batch * num_online_cpus(),
+@@ -220,7 +220,7 @@ int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
+ 	 */
+ 	if (mm) {
+ 		reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10);
+-		allowed -= min(mm->total_vm / 32, reserve);
++		allowed -= min_t(long, mm->total_vm / 32, reserve);
+ 	}
+ 
+ 	if (percpu_counter_read_positive(&vm_committed_as) < allowed)
+diff --git a/mm/nommu.c b/mm/nommu.c
+index 28bd8c4dff6f..ae5baae8e212 100644
+--- a/mm/nommu.c
++++ b/mm/nommu.c
+@@ -1189,11 +1189,9 @@ static int do_mmap_private(struct vm_area_struct *vma,
+ 	if (sysctl_nr_trim_pages && total - point >= sysctl_nr_trim_pages) {
+ 		total = point;
+ 		kdebug("try to alloc exact %lu pages", total);
+-		base = alloc_pages_exact(len, GFP_KERNEL);
+-	} else {
+-		base = (void *)__get_free_pages(GFP_KERNEL, order);
+ 	}
+ 
++	base = alloc_pages_exact(total << PAGE_SHIFT, GFP_KERNEL);
+ 	if (!base)
+ 		goto enomem;
+ 
+@@ -1895,7 +1893,7 @@ EXPORT_SYMBOL(unmap_mapping_range);
+  */
+ int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
+ {
+-	unsigned long free, allowed, reserve;
++	long free, allowed, reserve;
+ 
+ 	vm_acct_memory(pages);
+ 
+@@ -1959,7 +1957,7 @@ int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
+ 	 */
+ 	if (mm) {
+ 		reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10);
+-		allowed -= min(mm->total_vm / 32, reserve);
++		allowed -= min_t(long, mm->total_vm / 32, reserve);
+ 	}
+ 
+ 	if (percpu_counter_read_positive(&vm_committed_as) < allowed)
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index 8e20f9c2fa5a..8bbef06de720 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -1138,8 +1138,8 @@ static void change_pageblock_range(struct page *pageblock_page,
+  * nor move CMA pages to different free lists. We don't want unmovable pages
+  * to be allocated from MIGRATE_CMA areas.
+  *
+- * Returns the new migratetype of the pageblock (or the same old migratetype
+- * if it was unchanged).
++ * Returns the allocation migratetype if free pages were stolen, or the
++ * fallback migratetype if it was decided not to steal.
+  */
+ static int try_to_steal_freepages(struct zone *zone, struct page *page,
+ 				  int start_type, int fallback_type)
+@@ -1170,12 +1170,10 @@ static int try_to_steal_freepages(struct zone *zone, struct page *page,
+ 
+ 		/* Claim the whole block if over half of it is free */
+ 		if (pages >= (1 << (pageblock_order-1)) ||
+-				page_group_by_mobility_disabled) {
+-
++				page_group_by_mobility_disabled)
+ 			set_pageblock_migratetype(page, start_type);
+-			return start_type;
+-		}
+ 
++		return start_type;
+ 	}
+ 
+ 	return fallback_type;
+@@ -1227,7 +1225,7 @@ __rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype)
+ 			set_freepage_migratetype(page, new_type);
+ 
+ 			trace_mm_page_alloc_extfrag(page, order, current_order,
+-				start_migratetype, migratetype, new_type);
++				start_migratetype, migratetype);
+ 
+ 			return page;
+ 		}
+@@ -2382,8 +2380,15 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
+ 		if (high_zoneidx < ZONE_NORMAL)
+ 			goto out;
+ 		/* The OOM killer does not compensate for light reclaim */
+-		if (!(gfp_mask & __GFP_FS))
++		if (!(gfp_mask & __GFP_FS)) {
++			/*
++			 * XXX: Page reclaim didn't yield anything,
++			 * and the OOM killer can't be invoked, but
++			 * keep looping as per should_alloc_retry().
++			 */
++			*did_some_progress = 1;
+ 			goto out;
++		}
+ 		/*
+ 		 * GFP_THISNODE contains __GFP_NORETRY and we never hit this.
+ 		 * Sanity check for bare calls of __GFP_THISNODE, not real OOM.
+diff --git a/mm/vmstat.c b/mm/vmstat.c
+index 1284f89fca08..cdac77398880 100644
+--- a/mm/vmstat.c
++++ b/mm/vmstat.c
+@@ -1450,7 +1450,7 @@ static void __init start_shepherd_timer(void)
+ 	int cpu;
+ 
+ 	for_each_possible_cpu(cpu)
+-		INIT_DEFERRABLE_WORK(per_cpu_ptr(&vmstat_work, cpu),
++		INIT_DELAYED_WORK(per_cpu_ptr(&vmstat_work, cpu),
+ 			vmstat_update);
+ 
+ 	if (!alloc_cpumask_var(&cpu_stat_off, GFP_KERNEL))
+diff --git a/net/compat.c b/net/compat.c
+index 3236b4167a32..94d3d5e97883 100644
+--- a/net/compat.c
++++ b/net/compat.c
+@@ -711,24 +711,18 @@ static unsigned char nas[21] = {
+ 
+ COMPAT_SYSCALL_DEFINE3(sendmsg, int, fd, struct compat_msghdr __user *, msg, unsigned int, flags)
+ {
+-	if (flags & MSG_CMSG_COMPAT)
+-		return -EINVAL;
+ 	return __sys_sendmsg(fd, (struct user_msghdr __user *)msg, flags | MSG_CMSG_COMPAT);
+ }
+ 
+ COMPAT_SYSCALL_DEFINE4(sendmmsg, int, fd, struct compat_mmsghdr __user *, mmsg,
+ 		       unsigned int, vlen, unsigned int, flags)
+ {
+-	if (flags & MSG_CMSG_COMPAT)
+-		return -EINVAL;
+ 	return __sys_sendmmsg(fd, (struct mmsghdr __user *)mmsg, vlen,
+ 			      flags | MSG_CMSG_COMPAT);
+ }
+ 
+ COMPAT_SYSCALL_DEFINE3(recvmsg, int, fd, struct compat_msghdr __user *, msg, unsigned int, flags)
+ {
+-	if (flags & MSG_CMSG_COMPAT)
+-		return -EINVAL;
+ 	return __sys_recvmsg(fd, (struct user_msghdr __user *)msg, flags | MSG_CMSG_COMPAT);
+ }
+ 
+@@ -751,9 +745,6 @@ COMPAT_SYSCALL_DEFINE5(recvmmsg, int, fd, struct compat_mmsghdr __user *, mmsg,
+ 	int datagrams;
+ 	struct timespec ktspec;
+ 
+-	if (flags & MSG_CMSG_COMPAT)
+-		return -EINVAL;
+-
+ 	if (timeout == NULL)
+ 		return __sys_recvmmsg(fd, (struct mmsghdr __user *)mmsg, vlen,
+ 				      flags | MSG_CMSG_COMPAT, NULL);
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 7fe82929f509..4ff46f8054d4 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -945,7 +945,7 @@ bool dev_valid_name(const char *name)
+ 		return false;
+ 
+ 	while (*name) {
+-		if (*name == '/' || isspace(*name))
++		if (*name == '/' || *name == ':' || isspace(*name))
+ 			return false;
+ 		name++;
+ 	}
+diff --git a/net/core/flow.c b/net/core/flow.c
+index a0348fde1fdf..1033725be40b 100644
+--- a/net/core/flow.c
++++ b/net/core/flow.c
+@@ -379,7 +379,7 @@ done:
+ static void flow_cache_flush_task(struct work_struct *work)
+ {
+ 	struct netns_xfrm *xfrm = container_of(work, struct netns_xfrm,
+-						flow_cache_gc_work);
++						flow_cache_flush_work);
+ 	struct net *net = container_of(xfrm, struct net, xfrm);
+ 
+ 	flow_cache_flush(net);
+diff --git a/net/core/gen_stats.c b/net/core/gen_stats.c
+index 0c08062d1796..1e2f46a69d50 100644
+--- a/net/core/gen_stats.c
++++ b/net/core/gen_stats.c
+@@ -32,6 +32,9 @@ gnet_stats_copy(struct gnet_dump *d, int type, void *buf, int size)
+ 	return 0;
+ 
+ nla_put_failure:
++	kfree(d->xstats);
++	d->xstats = NULL;
++	d->xstats_len = 0;
+ 	spin_unlock_bh(d->lock);
+ 	return -1;
+ }
+@@ -305,7 +308,9 @@ int
+ gnet_stats_copy_app(struct gnet_dump *d, void *st, int len)
+ {
+ 	if (d->compat_xstats) {
+-		d->xstats = st;
++		d->xstats = kmemdup(st, len, GFP_ATOMIC);
++		if (!d->xstats)
++			goto err_out;
+ 		d->xstats_len = len;
+ 	}
+ 
+@@ -313,6 +318,11 @@ gnet_stats_copy_app(struct gnet_dump *d, void *st, int len)
+ 		return gnet_stats_copy(d, TCA_STATS_APP, st, len);
+ 
+ 	return 0;
++
++err_out:
++	d->xstats_len = 0;
++	spin_unlock_bh(d->lock);
++	return -1;
+ }
+ EXPORT_SYMBOL(gnet_stats_copy_app);
+ 
+@@ -345,6 +355,9 @@ gnet_stats_finish_copy(struct gnet_dump *d)
+ 			return -1;
+ 	}
+ 
++	kfree(d->xstats);
++	d->xstats = NULL;
++	d->xstats_len = 0;
+ 	spin_unlock_bh(d->lock);
+ 	return 0;
+ }
+diff --git a/net/core/pktgen.c b/net/core/pktgen.c
+index da934fc3faa8..352d183ecba3 100644
+--- a/net/core/pktgen.c
++++ b/net/core/pktgen.c
+@@ -1134,6 +1134,9 @@ static ssize_t pktgen_if_write(struct file *file,
+ 			return len;
+ 
+ 		i += len;
++		if ((value > 1) &&
++		    (!(pkt_dev->odev->priv_flags & IFF_TX_SKB_SHARING)))
++			return -ENOTSUPP;
+ 		pkt_dev->burst = value < 1 ? 1 : value;
+ 		sprintf(pg_result, "OK: burst=%d", pkt_dev->burst);
+ 		return count;
+@@ -2842,25 +2845,25 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
+ 	skb->dev = odev;
+ 	skb->pkt_type = PACKET_HOST;
+ 
++	pktgen_finalize_skb(pkt_dev, skb, datalen);
++
+ 	if (!(pkt_dev->flags & F_UDPCSUM)) {
+ 		skb->ip_summed = CHECKSUM_NONE;
+ 	} else if (odev->features & NETIF_F_V4_CSUM) {
+ 		skb->ip_summed = CHECKSUM_PARTIAL;
+ 		skb->csum = 0;
+-		udp4_hwcsum(skb, udph->source, udph->dest);
++		udp4_hwcsum(skb, iph->saddr, iph->daddr);
+ 	} else {
+-		__wsum csum = udp_csum(skb);
++		__wsum csum = skb_checksum(skb, skb_transport_offset(skb), datalen + 8, 0);
+ 
+ 		/* add protocol-dependent pseudo-header */
+-		udph->check = csum_tcpudp_magic(udph->source, udph->dest,
++		udph->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
+ 						datalen + 8, IPPROTO_UDP, csum);
+ 
+ 		if (udph->check == 0)
+ 			udph->check = CSUM_MANGLED_0;
+ 	}
+ 
+-	pktgen_finalize_skb(pkt_dev, skb, datalen);
+-
+ #ifdef CONFIG_XFRM
+ 	if (!process_ipsec(pkt_dev, skb, protocol))
+ 		return NULL;
+@@ -2976,6 +2979,8 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
+ 	skb->dev = odev;
+ 	skb->pkt_type = PACKET_HOST;
+ 
++	pktgen_finalize_skb(pkt_dev, skb, datalen);
++
+ 	if (!(pkt_dev->flags & F_UDPCSUM)) {
+ 		skb->ip_summed = CHECKSUM_NONE;
+ 	} else if (odev->features & NETIF_F_V6_CSUM) {
+@@ -2984,7 +2989,7 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
+ 		skb->csum_offset = offsetof(struct udphdr, check);
+ 		udph->check = ~csum_ipv6_magic(&iph->saddr, &iph->daddr, udplen, IPPROTO_UDP, 0);
+ 	} else {
+-		__wsum csum = udp_csum(skb);
++		__wsum csum = skb_checksum(skb, skb_transport_offset(skb), udplen, 0);
+ 
+ 		/* add protocol-dependent pseudo-header */
+ 		udph->check = csum_ipv6_magic(&iph->saddr, &iph->daddr, udplen, IPPROTO_UDP, csum);
+@@ -2993,8 +2998,6 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
+ 			udph->check = CSUM_MANGLED_0;
+ 	}
+ 
+-	pktgen_finalize_skb(pkt_dev, skb, datalen);
+-
+ 	return skb;
+ }
+ 
+diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
+index 446cbaf81185..76ec6c52e3a3 100644
+--- a/net/core/rtnetlink.c
++++ b/net/core/rtnetlink.c
+@@ -1237,18 +1237,12 @@ static const struct nla_policy ifla_vfinfo_policy[IFLA_VF_INFO_MAX+1] = {
+ };
+ 
+ static const struct nla_policy ifla_vf_policy[IFLA_VF_MAX+1] = {
+-	[IFLA_VF_MAC]		= { .type = NLA_BINARY,
+-				    .len = sizeof(struct ifla_vf_mac) },
+-	[IFLA_VF_VLAN]		= { .type = NLA_BINARY,
+-				    .len = sizeof(struct ifla_vf_vlan) },
+-	[IFLA_VF_TX_RATE]	= { .type = NLA_BINARY,
+-				    .len = sizeof(struct ifla_vf_tx_rate) },
+-	[IFLA_VF_SPOOFCHK]	= { .type = NLA_BINARY,
+-				    .len = sizeof(struct ifla_vf_spoofchk) },
+-	[IFLA_VF_RATE]		= { .type = NLA_BINARY,
+-				    .len = sizeof(struct ifla_vf_rate) },
+-	[IFLA_VF_LINK_STATE]	= { .type = NLA_BINARY,
+-				    .len = sizeof(struct ifla_vf_link_state) },
++	[IFLA_VF_MAC]		= { .len = sizeof(struct ifla_vf_mac) },
++	[IFLA_VF_VLAN]		= { .len = sizeof(struct ifla_vf_vlan) },
++	[IFLA_VF_TX_RATE]	= { .len = sizeof(struct ifla_vf_tx_rate) },
++	[IFLA_VF_SPOOFCHK]	= { .len = sizeof(struct ifla_vf_spoofchk) },
++	[IFLA_VF_RATE]		= { .len = sizeof(struct ifla_vf_rate) },
++	[IFLA_VF_LINK_STATE]	= { .len = sizeof(struct ifla_vf_link_state) },
+ };
+ 
+ static const struct nla_policy ifla_port_policy[IFLA_PORT_MAX+1] = {
+@@ -1280,7 +1274,6 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
+ 	s_h = cb->args[0];
+ 	s_idx = cb->args[1];
+ 
+-	rcu_read_lock();
+ 	cb->seq = net->dev_base_seq;
+ 
+ 	/* A hack to preserve kernel<->userspace interface.
+@@ -1302,7 +1295,7 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
+ 	for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
+ 		idx = 0;
+ 		head = &net->dev_index_head[h];
+-		hlist_for_each_entry_rcu(dev, head, index_hlist) {
++		hlist_for_each_entry(dev, head, index_hlist) {
+ 			if (idx < s_idx)
+ 				goto cont;
+ 			err = rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK,
+@@ -1324,7 +1317,6 @@ cont:
+ 		}
+ 	}
+ out:
+-	rcu_read_unlock();
+ 	cb->args[1] = idx;
+ 	cb->args[0] = h;
+ 
+@@ -2130,8 +2122,16 @@ replay:
+ 			}
+ 		}
+ 		err = rtnl_configure_link(dev, ifm);
+-		if (err < 0)
+-			unregister_netdevice(dev);
++		if (err < 0) {
++			if (ops->newlink) {
++				LIST_HEAD(list_kill);
++
++				ops->dellink(dev, &list_kill);
++				unregister_netdevice_many(&list_kill);
++			} else {
++				unregister_netdevice(dev);
++			}
++		}
+ out:
+ 		put_net(dest_net);
+ 		return err;
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index 395c15b82087..62c67bebcaf5 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -3623,13 +3623,14 @@ struct sk_buff *sock_dequeue_err_skb(struct sock *sk)
+ {
+ 	struct sk_buff_head *q = &sk->sk_error_queue;
+ 	struct sk_buff *skb, *skb_next;
++	unsigned long flags;
+ 	int err = 0;
+ 
+-	spin_lock_bh(&q->lock);
++	spin_lock_irqsave(&q->lock, flags);
+ 	skb = __skb_dequeue(q);
+ 	if (skb && (skb_next = skb_peek(q)))
+ 		err = SKB_EXT_ERR(skb_next)->ee.ee_errno;
+-	spin_unlock_bh(&q->lock);
++	spin_unlock_irqrestore(&q->lock, flags);
+ 
+ 	sk->sk_err = err;
+ 	if (err)
+diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
+index e5b6d0ddcb58..145a50c4d566 100644
+--- a/net/ipv4/ip_fragment.c
++++ b/net/ipv4/ip_fragment.c
+@@ -659,27 +659,30 @@ EXPORT_SYMBOL(ip_defrag);
+ struct sk_buff *ip_check_defrag(struct sk_buff *skb, u32 user)
+ {
+ 	struct iphdr iph;
++	int netoff;
+ 	u32 len;
+ 
+ 	if (skb->protocol != htons(ETH_P_IP))
+ 		return skb;
+ 
+-	if (!skb_copy_bits(skb, 0, &iph, sizeof(iph)))
++	netoff = skb_network_offset(skb);
++
++	if (skb_copy_bits(skb, netoff, &iph, sizeof(iph)) < 0)
+ 		return skb;
+ 
+ 	if (iph.ihl < 5 || iph.version != 4)
+ 		return skb;
+ 
+ 	len = ntohs(iph.tot_len);
+-	if (skb->len < len || len < (iph.ihl * 4))
++	if (skb->len < netoff + len || len < (iph.ihl * 4))
+ 		return skb;
+ 
+ 	if (ip_is_fragment(&iph)) {
+ 		skb = skb_share_check(skb, GFP_ATOMIC);
+ 		if (skb) {
+-			if (!pskb_may_pull(skb, iph.ihl*4))
++			if (!pskb_may_pull(skb, netoff + iph.ihl * 4))
+ 				return skb;
+-			if (pskb_trim_rcsum(skb, len))
++			if (pskb_trim_rcsum(skb, netoff + len))
+ 				return skb;
+ 			memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
+ 			if (ip_defrag(skb, user))
+diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
+index c373c0708d97..2e2f687ef8a2 100644
+--- a/net/ipv4/ip_output.c
++++ b/net/ipv4/ip_output.c
+@@ -890,7 +890,8 @@ static int __ip_append_data(struct sock *sk,
+ 	cork->length += length;
+ 	if (((length > mtu) || (skb && skb_is_gso(skb))) &&
+ 	    (sk->sk_protocol == IPPROTO_UDP) &&
+-	    (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len) {
++	    (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len &&
++	    (sk->sk_type == SOCK_DGRAM)) {
+ 		err = ip_ufo_append_data(sk, queue, getfrag, from, length,
+ 					 hh_len, fragheaderlen, transhdrlen,
+ 					 maxfraglen, flags);
+diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
+index 2a3720fb5a5f..0ae28f517a9b 100644
+--- a/net/ipv4/ping.c
++++ b/net/ipv4/ping.c
+@@ -259,6 +259,9 @@ int ping_init_sock(struct sock *sk)
+ 	kgid_t low, high;
+ 	int ret = 0;
+ 
++	if (sk->sk_family == AF_INET6)
++		sk->sk_ipv6only = 1;
++
+ 	inet_get_ping_group_range_net(net, &low, &high);
+ 	if (gid_lte(low, group) && gid_lte(group, high))
+ 		return 0;
+@@ -305,6 +308,11 @@ static int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk,
+ 		if (addr_len < sizeof(*addr))
+ 			return -EINVAL;
+ 
++		if (addr->sin_family != AF_INET &&
++		    !(addr->sin_family == AF_UNSPEC &&
++		      addr->sin_addr.s_addr == htonl(INADDR_ANY)))
++			return -EAFNOSUPPORT;
++
+ 		pr_debug("ping_check_bind_addr(sk=%p,addr=%pI4,port=%d)\n",
+ 			 sk, &addr->sin_addr.s_addr, ntohs(addr->sin_port));
+ 
+@@ -330,7 +338,7 @@ static int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk,
+ 			return -EINVAL;
+ 
+ 		if (addr->sin6_family != AF_INET6)
+-			return -EINVAL;
++			return -EAFNOSUPPORT;
+ 
+ 		pr_debug("ping_check_bind_addr(sk=%p,addr=%pI6c,port=%d)\n",
+ 			 sk, addr->sin6_addr.s6_addr, ntohs(addr->sin6_port));
+@@ -716,7 +724,7 @@ static int ping_v4_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m
+ 		if (msg->msg_namelen < sizeof(*usin))
+ 			return -EINVAL;
+ 		if (usin->sin_family != AF_INET)
+-			return -EINVAL;
++			return -EAFNOSUPPORT;
+ 		daddr = usin->sin_addr.s_addr;
+ 		/* no remote port */
+ 	} else {
+diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c
+index 815c85e3b1e0..c73077280ad4 100644
+--- a/net/ipv4/tcp_fastopen.c
++++ b/net/ipv4/tcp_fastopen.c
+@@ -134,6 +134,7 @@ static bool tcp_fastopen_create_child(struct sock *sk,
+ 	struct tcp_sock *tp;
+ 	struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
+ 	struct sock *child;
++	u32 end_seq;
+ 
+ 	req->num_retrans = 0;
+ 	req->num_timeout = 0;
+@@ -185,20 +186,35 @@ static bool tcp_fastopen_create_child(struct sock *sk,
+ 
+ 	/* Queue the data carried in the SYN packet. We need to first
+ 	 * bump skb's refcnt because the caller will attempt to free it.
++	 * Note that IPv6 might also have used skb_get() trick
++	 * in tcp_v6_conn_request() to keep this SYN around (treq->pktopts)
++	 * So we need to eventually get a clone of the packet,
++	 * before inserting it in sk_receive_queue.
+ 	 *
+ 	 * XXX (TFO) - we honor a zero-payload TFO request for now,
+ 	 * (any reason not to?) but no need to queue the skb since
+ 	 * there is no data. How about SYN+FIN?
+ 	 */
+-	if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq + 1) {
+-		skb = skb_get(skb);
+-		skb_dst_drop(skb);
+-		__skb_pull(skb, tcp_hdr(skb)->doff * 4);
+-		skb_set_owner_r(skb, child);
+-		__skb_queue_tail(&child->sk_receive_queue, skb);
+-		tp->syn_data_acked = 1;
++	end_seq = TCP_SKB_CB(skb)->end_seq;
++	if (end_seq != TCP_SKB_CB(skb)->seq + 1) {
++		struct sk_buff *skb2;
++
++		if (unlikely(skb_shared(skb)))
++			skb2 = skb_clone(skb, GFP_ATOMIC);
++		else
++			skb2 = skb_get(skb);
++
++		if (likely(skb2)) {
++			skb_dst_drop(skb2);
++			__skb_pull(skb2, tcp_hdrlen(skb));
++			skb_set_owner_r(skb2, child);
++			__skb_queue_tail(&child->sk_receive_queue, skb2);
++			tp->syn_data_acked = 1;
++		} else {
++			end_seq = TCP_SKB_CB(skb)->seq + 1;
++		}
+ 	}
+-	tcp_rsk(req)->rcv_nxt = tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
++	tcp_rsk(req)->rcv_nxt = tp->rcv_nxt = end_seq;
+ 	sk->sk_data_ready(sk);
+ 	bh_unlock_sock(child);
+ 	sock_put(child);
+diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
+index f7c8bbeb27b7..dac94195a4b9 100644
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -4572,6 +4572,22 @@ static int inet6_set_iftoken(struct inet6_dev *idev, struct in6_addr *token)
+ 	return 0;
+ }
+ 
++static const struct nla_policy inet6_af_policy[IFLA_INET6_MAX + 1] = {
++	[IFLA_INET6_ADDR_GEN_MODE]	= { .type = NLA_U8 },
++	[IFLA_INET6_TOKEN]		= { .len = sizeof(struct in6_addr) },
++};
++
++static int inet6_validate_link_af(const struct net_device *dev,
++				  const struct nlattr *nla)
++{
++	struct nlattr *tb[IFLA_INET6_MAX + 1];
++
++	if (dev && !__in6_dev_get(dev))
++		return -EAFNOSUPPORT;
++
++	return nla_parse_nested(tb, IFLA_INET6_MAX, nla, inet6_af_policy);
++}
++
+ static int inet6_set_link_af(struct net_device *dev, const struct nlattr *nla)
+ {
+ 	int err = -EINVAL;
+@@ -5393,6 +5409,7 @@ static struct rtnl_af_ops inet6_ops = {
+ 	.family		  = AF_INET6,
+ 	.fill_link_af	  = inet6_fill_link_af,
+ 	.get_link_af_size = inet6_get_link_af_size,
++	.validate_link_af = inet6_validate_link_af,
+ 	.set_link_af	  = inet6_set_link_af,
+ };
+ 
+diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
+index d28f2a2efb32..3f5aa9959076 100644
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -1283,7 +1283,8 @@ emsgsize:
+ 	if (((length > mtu) ||
+ 	     (skb && skb_is_gso(skb))) &&
+ 	    (sk->sk_protocol == IPPROTO_UDP) &&
+-	    (rt->dst.dev->features & NETIF_F_UFO)) {
++	    (rt->dst.dev->features & NETIF_F_UFO) &&
++	    (sk->sk_type == SOCK_DGRAM)) {
+ 		err = ip6_ufo_append_data(sk, getfrag, from, length,
+ 					  hh_len, fragheaderlen,
+ 					  transhdrlen, mtu, flags, rt);
+diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c
+index 54520a0bd5e3..74581f706c4d 100644
+--- a/net/ipv6/output_core.c
++++ b/net/ipv6/output_core.c
+@@ -9,7 +9,8 @@
+ #include <net/addrconf.h>
+ #include <net/secure_seq.h>
+ 
+-u32 __ipv6_select_ident(u32 hashrnd, struct in6_addr *dst, struct in6_addr *src)
++static u32 __ipv6_select_ident(u32 hashrnd, struct in6_addr *dst,
++			       struct in6_addr *src)
+ {
+ 	u32 hash, id;
+ 
+@@ -54,7 +55,7 @@ void ipv6_proxy_select_ident(struct sk_buff *skb)
+ 
+ 	id = __ipv6_select_ident(ip6_proxy_idents_hashrnd,
+ 				 &addrs[1], &addrs[0]);
+-	skb_shinfo(skb)->ip6_frag_id = id;
++	skb_shinfo(skb)->ip6_frag_id = htonl(id);
+ }
+ EXPORT_SYMBOL_GPL(ipv6_proxy_select_ident);
+ 
+diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
+index 2d3148378a1f..fe7e3e403499 100644
+--- a/net/ipv6/ping.c
++++ b/net/ipv6/ping.c
+@@ -102,9 +102,10 @@ int ping_v6_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+ 
+ 	if (msg->msg_name) {
+ 		DECLARE_SOCKADDR(struct sockaddr_in6 *, u, msg->msg_name);
+-		if (msg->msg_namelen < sizeof(struct sockaddr_in6) ||
+-		    u->sin6_family != AF_INET6) {
++		if (msg->msg_namelen < sizeof(*u))
+ 			return -EINVAL;
++		if (u->sin6_family != AF_INET6) {
++			return -EAFNOSUPPORT;
+ 		}
+ 		if (sk->sk_bound_dev_if &&
+ 		    sk->sk_bound_dev_if != u->sin6_scope_id) {
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index 495965358d22..1528d8404cd2 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -141,7 +141,7 @@ static u32 *ipv6_cow_metrics(struct dst_entry *dst, unsigned long old)
+ 	u32 *p = NULL;
+ 
+ 	if (!(rt->dst.flags & DST_HOST))
+-		return NULL;
++		return dst_cow_metrics_generic(dst, old);
+ 
+ 	peer = rt6_get_peer_create(rt);
+ 	if (peer) {
+diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
+index 40695b9751c1..4efe486baee6 100644
+--- a/net/irda/ircomm/ircomm_tty.c
++++ b/net/irda/ircomm/ircomm_tty.c
+@@ -798,7 +798,9 @@ static void ircomm_tty_wait_until_sent(struct tty_struct *tty, int timeout)
+ 	orig_jiffies = jiffies;
+ 
+ 	/* Set poll time to 200 ms */
+-	poll_time = IRDA_MIN(timeout, msecs_to_jiffies(200));
++	poll_time = msecs_to_jiffies(200);
++	if (timeout)
++		poll_time = min_t(unsigned long, timeout, poll_time);
+ 
+ 	spin_lock_irqsave(&self->spinlock, flags);
+ 	while (self->tx_skb && self->tx_skb->len) {
+diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
+index 837a406a9dd6..bf4b84ff1c70 100644
+--- a/net/mac80211/mlme.c
++++ b/net/mac80211/mlme.c
+@@ -1054,8 +1054,6 @@ static void ieee80211_chswitch_post_beacon(struct ieee80211_sub_if_data *sdata)
+ 		sdata->csa_block_tx = false;
+ 	}
+ 
+-	cfg80211_ch_switch_notify(sdata->dev, &sdata->reserved_chandef);
+-
+ 	sdata->vif.csa_active = false;
+ 	ifmgd->csa_waiting_bcn = false;
+ 
+@@ -1067,6 +1065,8 @@ static void ieee80211_chswitch_post_beacon(struct ieee80211_sub_if_data *sdata)
+ 				     &ifmgd->csa_connection_drop_work);
+ 		return;
+ 	}
++
++	cfg80211_ch_switch_notify(sdata->dev, &sdata->reserved_chandef);
+ }
+ 
+ void ieee80211_chswitch_done(struct ieee80211_vif *vif, bool success)
+diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
+index 058686a721a1..097821b1c9ca 100644
+--- a/net/mac80211/tx.c
++++ b/net/mac80211/tx.c
+@@ -566,6 +566,7 @@ ieee80211_tx_h_check_control_port_protocol(struct ieee80211_tx_data *tx)
+ 		if (tx->sdata->control_port_no_encrypt)
+ 			info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
+ 		info->control.flags |= IEEE80211_TX_CTRL_PORT_CTRL_PROTO;
++		info->flags |= IEEE80211_TX_CTL_USE_MINRATE;
+ 	}
+ 
+ 	return TX_CONTINUE;
+diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c
+index 30dbe34915ae..a9faae89f955 100644
+--- a/net/netfilter/xt_recent.c
++++ b/net/netfilter/xt_recent.c
+@@ -43,29 +43,25 @@ MODULE_LICENSE("GPL");
+ MODULE_ALIAS("ipt_recent");
+ MODULE_ALIAS("ip6t_recent");
+ 
+-static unsigned int ip_list_tot __read_mostly = 100;
+-static unsigned int ip_list_hash_size __read_mostly;
+-static unsigned int ip_list_perms __read_mostly = 0644;
+-static unsigned int ip_list_uid __read_mostly;
+-static unsigned int ip_list_gid __read_mostly;
++static unsigned int ip_list_tot = 100;
++static unsigned int ip_pkt_list_tot = 20;
++static unsigned int ip_list_hash_size = 0;
++static unsigned int ip_list_perms = 0644;
++static unsigned int ip_list_uid = 0;
++static unsigned int ip_list_gid = 0;
+ module_param(ip_list_tot, uint, 0400);
++module_param(ip_pkt_list_tot, uint, 0400);
+ module_param(ip_list_hash_size, uint, 0400);
+ module_param(ip_list_perms, uint, 0400);
+ module_param(ip_list_uid, uint, S_IRUGO | S_IWUSR);
+ module_param(ip_list_gid, uint, S_IRUGO | S_IWUSR);
+ MODULE_PARM_DESC(ip_list_tot, "number of IPs to remember per list");
++MODULE_PARM_DESC(ip_pkt_list_tot, "number of packets per IP address to remember (max. 255)");
+ MODULE_PARM_DESC(ip_list_hash_size, "size of hash table used to look up IPs");
+ MODULE_PARM_DESC(ip_list_perms, "permissions on /proc/net/xt_recent/* files");
+ MODULE_PARM_DESC(ip_list_uid, "default owner of /proc/net/xt_recent/* files");
+ MODULE_PARM_DESC(ip_list_gid, "default owning group of /proc/net/xt_recent/* files");
+ 
+-/* retained for backwards compatibility */
+-static unsigned int ip_pkt_list_tot __read_mostly;
+-module_param(ip_pkt_list_tot, uint, 0400);
+-MODULE_PARM_DESC(ip_pkt_list_tot, "number of packets per IP address to remember (max. 255)");
+-
+-#define XT_RECENT_MAX_NSTAMPS	256
+-
+ struct recent_entry {
+ 	struct list_head	list;
+ 	struct list_head	lru_list;
+@@ -83,7 +79,6 @@ struct recent_table {
+ 	union nf_inet_addr	mask;
+ 	unsigned int		refcnt;
+ 	unsigned int		entries;
+-	u8			nstamps_max_mask;
+ 	struct list_head	lru_list;
+ 	struct list_head	iphash[0];
+ };
+@@ -95,8 +90,7 @@ struct recent_net {
+ #endif
+ };
+ 
+-static int recent_net_id __read_mostly;
+-
++static int recent_net_id;
+ static inline struct recent_net *recent_pernet(struct net *net)
+ {
+ 	return net_generic(net, recent_net_id);
+@@ -177,15 +171,12 @@ recent_entry_init(struct recent_table *t, const union nf_inet_addr *addr,
+ 		  u_int16_t family, u_int8_t ttl)
+ {
+ 	struct recent_entry *e;
+-	unsigned int nstamps_max = t->nstamps_max_mask;
+ 
+ 	if (t->entries >= ip_list_tot) {
+ 		e = list_entry(t->lru_list.next, struct recent_entry, lru_list);
+ 		recent_entry_remove(t, e);
+ 	}
+-
+-	nstamps_max += 1;
+-	e = kmalloc(sizeof(*e) + sizeof(e->stamps[0]) * nstamps_max,
++	e = kmalloc(sizeof(*e) + sizeof(e->stamps[0]) * ip_pkt_list_tot,
+ 		    GFP_ATOMIC);
+ 	if (e == NULL)
+ 		return NULL;
+@@ -206,7 +197,7 @@ recent_entry_init(struct recent_table *t, const union nf_inet_addr *addr,
+ 
+ static void recent_entry_update(struct recent_table *t, struct recent_entry *e)
+ {
+-	e->index &= t->nstamps_max_mask;
++	e->index %= ip_pkt_list_tot;
+ 	e->stamps[e->index++] = jiffies;
+ 	if (e->index > e->nstamps)
+ 		e->nstamps = e->index;
+@@ -335,7 +326,6 @@ static int recent_mt_check(const struct xt_mtchk_param *par,
+ 	kuid_t uid;
+ 	kgid_t gid;
+ #endif
+-	unsigned int nstamp_mask;
+ 	unsigned int i;
+ 	int ret = -EINVAL;
+ 	size_t sz;
+@@ -359,33 +349,19 @@ static int recent_mt_check(const struct xt_mtchk_param *par,
+ 		return -EINVAL;
+ 	if ((info->check_set & XT_RECENT_REAP) && !info->seconds)
+ 		return -EINVAL;
+-	if (info->hit_count >= XT_RECENT_MAX_NSTAMPS) {
+-		pr_info("hitcount (%u) is larger than allowed maximum (%u)\n",
+-			info->hit_count, XT_RECENT_MAX_NSTAMPS - 1);
++	if (info->hit_count > ip_pkt_list_tot) {
++		pr_info("hitcount (%u) is larger than "
++			"packets to be remembered (%u)\n",
++			info->hit_count, ip_pkt_list_tot);
+ 		return -EINVAL;
+ 	}
+ 	if (info->name[0] == '\0' ||
+ 	    strnlen(info->name, XT_RECENT_NAME_LEN) == XT_RECENT_NAME_LEN)
+ 		return -EINVAL;
+ 
+-	if (ip_pkt_list_tot && info->hit_count < ip_pkt_list_tot)
+-		nstamp_mask = roundup_pow_of_two(ip_pkt_list_tot) - 1;
+-	else if (info->hit_count)
+-		nstamp_mask = roundup_pow_of_two(info->hit_count) - 1;
+-	else
+-		nstamp_mask = 32 - 1;
+-
+ 	mutex_lock(&recent_mutex);
+ 	t = recent_table_lookup(recent_net, info->name);
+ 	if (t != NULL) {
+-		if (info->hit_count > t->nstamps_max_mask) {
+-			pr_info("hitcount (%u) is larger than packets to be remembered (%u) for table %s\n",
+-				info->hit_count, t->nstamps_max_mask + 1,
+-				info->name);
+-			ret = -EINVAL;
+-			goto out;
+-		}
+-
+ 		t->refcnt++;
+ 		ret = 0;
+ 		goto out;
+@@ -401,7 +377,6 @@ static int recent_mt_check(const struct xt_mtchk_param *par,
+ 		goto out;
+ 	}
+ 	t->refcnt = 1;
+-	t->nstamps_max_mask = nstamp_mask;
+ 
+ 	memcpy(&t->mask, &info->mask, sizeof(t->mask));
+ 	strcpy(t->name, info->name);
+@@ -522,12 +497,9 @@ static void recent_seq_stop(struct seq_file *s, void *v)
+ static int recent_seq_show(struct seq_file *seq, void *v)
+ {
+ 	const struct recent_entry *e = v;
+-	struct recent_iter_state *st = seq->private;
+-	const struct recent_table *t = st->table;
+ 	unsigned int i;
+ 
+-	i = (e->index - 1) & t->nstamps_max_mask;
+-
++	i = (e->index - 1) % ip_pkt_list_tot;
+ 	if (e->family == NFPROTO_IPV4)
+ 		seq_printf(seq, "src=%pI4 ttl: %u last_seen: %lu oldest_pkt: %u",
+ 			   &e->addr.ip, e->ttl, e->stamps[i], e->index);
+@@ -745,9 +717,7 @@ static int __init recent_mt_init(void)
+ {
+ 	int err;
+ 
+-	BUILD_BUG_ON_NOT_POWER_OF_2(XT_RECENT_MAX_NSTAMPS);
+-
+-	if (!ip_list_tot || ip_pkt_list_tot >= XT_RECENT_MAX_NSTAMPS)
++	if (!ip_list_tot || !ip_pkt_list_tot || ip_pkt_list_tot > 255)
+ 		return -EINVAL;
+ 	ip_list_hash_size = 1 << fls(ip_list_tot);
+ 
+diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
+index b07349e82d78..58a6ef58c017 100644
+--- a/net/openvswitch/datapath.c
++++ b/net/openvswitch/datapath.c
+@@ -2113,14 +2113,55 @@ static int __net_init ovs_init_net(struct net *net)
+ 	return 0;
+ }
+ 
+-static void __net_exit ovs_exit_net(struct net *net)
++static void __net_exit list_vports_from_net(struct net *net, struct net *dnet,
++					    struct list_head *head)
+ {
+-	struct datapath *dp, *dp_next;
+ 	struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
++	struct datapath *dp;
++
++	list_for_each_entry(dp, &ovs_net->dps, list_node) {
++		int i;
++
++		for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) {
++			struct vport *vport;
++
++			hlist_for_each_entry(vport, &dp->ports[i], dp_hash_node) {
++				struct netdev_vport *netdev_vport;
++
++				if (vport->ops->type != OVS_VPORT_TYPE_INTERNAL)
++					continue;
++
++				netdev_vport = netdev_vport_priv(vport);
++				if (dev_net(netdev_vport->dev) == dnet)
++					list_add(&vport->detach_list, head);
++			}
++		}
++	}
++}
++
++static void __net_exit ovs_exit_net(struct net *dnet)
++{
++	struct datapath *dp, *dp_next;
++	struct ovs_net *ovs_net = net_generic(dnet, ovs_net_id);
++	struct vport *vport, *vport_next;
++	struct net *net;
++	LIST_HEAD(head);
+ 
+ 	ovs_lock();
+ 	list_for_each_entry_safe(dp, dp_next, &ovs_net->dps, list_node)
+ 		__dp_destroy(dp);
++
++	rtnl_lock();
++	for_each_net(net)
++		list_vports_from_net(net, dnet, &head);
++	rtnl_unlock();
++
++	/* Detach all vports from given namespace. */
++	list_for_each_entry_safe(vport, vport_next, &head, detach_list) {
++		list_del(&vport->detach_list);
++		ovs_dp_detach_port(vport);
++	}
++
+ 	ovs_unlock();
+ 
+ 	cancel_work_sync(&ovs_net->dp_notify_work);
+diff --git a/net/openvswitch/vport.h b/net/openvswitch/vport.h
+index 99c8e71d9e6c..8a057d7a86b6 100644
+--- a/net/openvswitch/vport.h
++++ b/net/openvswitch/vport.h
+@@ -103,6 +103,7 @@ struct vport_portids {
+  * @ops: Class structure.
+  * @percpu_stats: Points to per-CPU statistics used and maintained by vport
+  * @err_stats: Points to error statistics used and maintained by vport
++ * @detach_list: list used for detaching vport in net-exit call.
+  */
+ struct vport {
+ 	struct rcu_head rcu;
+@@ -117,6 +118,7 @@ struct vport {
+ 	struct pcpu_sw_netstats __percpu *percpu_stats;
+ 
+ 	struct vport_err_stats err_stats;
++	struct list_head detach_list;
+ };
+ 
+ /**
+diff --git a/net/sched/ematch.c b/net/sched/ematch.c
+index 6742200b1307..fbb7ebfc58c6 100644
+--- a/net/sched/ematch.c
++++ b/net/sched/ematch.c
+@@ -228,6 +228,7 @@ static int tcf_em_validate(struct tcf_proto *tp,
+ 				 * to replay the request.
+ 				 */
+ 				module_put(em->ops->owner);
++				em->ops = NULL;
+ 				err = -EAGAIN;
+ 			}
+ #endif
+diff --git a/net/sunrpc/backchannel_rqst.c b/net/sunrpc/backchannel_rqst.c
+index 651f49ab601f..9dd0ea8db463 100644
+--- a/net/sunrpc/backchannel_rqst.c
++++ b/net/sunrpc/backchannel_rqst.c
+@@ -309,12 +309,15 @@ void xprt_complete_bc_request(struct rpc_rqst *req, uint32_t copied)
+ 	struct rpc_xprt *xprt = req->rq_xprt;
+ 	struct svc_serv *bc_serv = xprt->bc_serv;
+ 
++	spin_lock(&xprt->bc_pa_lock);
++	list_del(&req->rq_bc_pa_list);
++	spin_unlock(&xprt->bc_pa_lock);
++
+ 	req->rq_private_buf.len = copied;
+ 	set_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state);
+ 
+ 	dprintk("RPC:       add callback request to list\n");
+ 	spin_lock(&bc_serv->sv_cb_lock);
+-	list_del(&req->rq_bc_pa_list);
+ 	list_add(&req->rq_bc_list, &bc_serv->sv_cb_list);
+ 	wake_up(&bc_serv->sv_cb_waitq);
+ 	spin_unlock(&bc_serv->sv_cb_lock);
+diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
+index 33fb105d4352..5199bb1a017e 100644
+--- a/net/sunrpc/cache.c
++++ b/net/sunrpc/cache.c
+@@ -921,7 +921,7 @@ static unsigned int cache_poll(struct file *filp, poll_table *wait,
+ 	poll_wait(filp, &queue_wait, wait);
+ 
+ 	/* alway allow write */
+-	mask = POLL_OUT | POLLWRNORM;
++	mask = POLLOUT | POLLWRNORM;
+ 
+ 	if (!rp)
+ 		return mask;
+diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
+index 095d9572ad2b..64d9863d6565 100644
+--- a/sound/core/pcm_native.c
++++ b/sound/core/pcm_native.c
+@@ -1546,6 +1546,8 @@ static int snd_pcm_do_drain_init(struct snd_pcm_substream *substream, int state)
+ 			if (! snd_pcm_playback_empty(substream)) {
+ 				snd_pcm_do_start(substream, SNDRV_PCM_STATE_DRAINING);
+ 				snd_pcm_post_start(substream, SNDRV_PCM_STATE_DRAINING);
++			} else {
++				runtime->status->state = SNDRV_PCM_STATE_SETUP;
+ 			}
+ 			break;
+ 		case SNDRV_PCM_STATE_RUNNING:
+diff --git a/sound/firewire/amdtp.c b/sound/firewire/amdtp.c
+index 0d580186ef1a..911341b2a897 100644
+--- a/sound/firewire/amdtp.c
++++ b/sound/firewire/amdtp.c
+@@ -78,7 +78,7 @@ static void pcm_period_tasklet(unsigned long data);
+ int amdtp_stream_init(struct amdtp_stream *s, struct fw_unit *unit,
+ 		      enum amdtp_stream_direction dir, enum cip_flags flags)
+ {
+-	s->unit = fw_unit_get(unit);
++	s->unit = unit;
+ 	s->direction = dir;
+ 	s->flags = flags;
+ 	s->context = ERR_PTR(-1);
+@@ -102,7 +102,6 @@ void amdtp_stream_destroy(struct amdtp_stream *s)
+ {
+ 	WARN_ON(amdtp_stream_running(s));
+ 	mutex_destroy(&s->mutex);
+-	fw_unit_put(s->unit);
+ }
+ EXPORT_SYMBOL(amdtp_stream_destroy);
+ 
+diff --git a/sound/firewire/bebob/bebob.c b/sound/firewire/bebob/bebob.c
+index fc19c99654aa..611b7dae7ee5 100644
+--- a/sound/firewire/bebob/bebob.c
++++ b/sound/firewire/bebob/bebob.c
+@@ -116,11 +116,22 @@ end:
+ 	return err;
+ }
+ 
++/*
++ * This module releases the FireWire unit data after all ALSA character devices
++ * are released by applications. This is for releasing stream data or finishing
++ * transactions safely. Thus at returning from .remove(), this module still keep
++ * references for the unit.
++ */
+ static void
+ bebob_card_free(struct snd_card *card)
+ {
+ 	struct snd_bebob *bebob = card->private_data;
+ 
++	snd_bebob_stream_destroy_duplex(bebob);
++	fw_unit_put(bebob->unit);
++
++	kfree(bebob->maudio_special_quirk);
++
+ 	if (bebob->card_index >= 0) {
+ 		mutex_lock(&devices_mutex);
+ 		clear_bit(bebob->card_index, devices_used);
+@@ -205,7 +216,7 @@ bebob_probe(struct fw_unit *unit,
+ 	card->private_free = bebob_card_free;
+ 
+ 	bebob->card = card;
+-	bebob->unit = unit;
++	bebob->unit = fw_unit_get(unit);
+ 	bebob->spec = spec;
+ 	mutex_init(&bebob->mutex);
+ 	spin_lock_init(&bebob->lock);
+@@ -306,10 +317,11 @@ static void bebob_remove(struct fw_unit *unit)
+ 	if (bebob == NULL)
+ 		return;
+ 
+-	kfree(bebob->maudio_special_quirk);
++	/* Awake bus-reset waiters. */
++	if (!completion_done(&bebob->bus_reset))
++		complete_all(&bebob->bus_reset);
+ 
+-	snd_bebob_stream_destroy_duplex(bebob);
+-	snd_card_disconnect(bebob->card);
++	/* No need to wait for releasing card object in this context. */
+ 	snd_card_free_when_closed(bebob->card);
+ }
+ 
+diff --git a/sound/firewire/bebob/bebob_stream.c b/sound/firewire/bebob/bebob_stream.c
+index 0ebcabfdc7ce..98e4fc8121a1 100644
+--- a/sound/firewire/bebob/bebob_stream.c
++++ b/sound/firewire/bebob/bebob_stream.c
+@@ -410,8 +410,6 @@ break_both_connections(struct snd_bebob *bebob)
+ static void
+ destroy_both_connections(struct snd_bebob *bebob)
+ {
+-	break_both_connections(bebob);
+-
+ 	cmp_connection_destroy(&bebob->in_conn);
+ 	cmp_connection_destroy(&bebob->out_conn);
+ }
+@@ -712,22 +710,16 @@ void snd_bebob_stream_update_duplex(struct snd_bebob *bebob)
+ 	mutex_unlock(&bebob->mutex);
+ }
+ 
++/*
++ * This function should be called before starting streams or after stopping
++ * streams.
++ */
+ void snd_bebob_stream_destroy_duplex(struct snd_bebob *bebob)
+ {
+-	mutex_lock(&bebob->mutex);
+-
+-	amdtp_stream_pcm_abort(&bebob->rx_stream);
+-	amdtp_stream_pcm_abort(&bebob->tx_stream);
+-
+-	amdtp_stream_stop(&bebob->rx_stream);
+-	amdtp_stream_stop(&bebob->tx_stream);
+-
+ 	amdtp_stream_destroy(&bebob->rx_stream);
+ 	amdtp_stream_destroy(&bebob->tx_stream);
+ 
+ 	destroy_both_connections(bebob);
+-
+-	mutex_unlock(&bebob->mutex);
+ }
+ 
+ /*
+diff --git a/sound/firewire/dice/dice-stream.c b/sound/firewire/dice/dice-stream.c
+index fa9cf761b610..07dbd01d7a6b 100644
+--- a/sound/firewire/dice/dice-stream.c
++++ b/sound/firewire/dice/dice-stream.c
+@@ -311,14 +311,21 @@ end:
+ 	return err;
+ }
+ 
++/*
++ * This function should be called before starting streams or after stopping
++ * streams.
++ */
+ static void destroy_stream(struct snd_dice *dice, struct amdtp_stream *stream)
+ {
+-	amdtp_stream_destroy(stream);
++	struct fw_iso_resources *resources;
+ 
+ 	if (stream == &dice->tx_stream)
+-		fw_iso_resources_destroy(&dice->tx_resources);
++		resources = &dice->tx_resources;
+ 	else
+-		fw_iso_resources_destroy(&dice->rx_resources);
++		resources = &dice->rx_resources;
++
++	amdtp_stream_destroy(stream);
++	fw_iso_resources_destroy(resources);
+ }
+ 
+ int snd_dice_stream_init_duplex(struct snd_dice *dice)
+@@ -332,6 +339,8 @@ int snd_dice_stream_init_duplex(struct snd_dice *dice)
+ 		goto end;
+ 
+ 	err = init_stream(dice, &dice->rx_stream);
++	if (err < 0)
++		destroy_stream(dice, &dice->tx_stream);
+ end:
+ 	return err;
+ }
+@@ -340,10 +349,7 @@ void snd_dice_stream_destroy_duplex(struct snd_dice *dice)
+ {
+ 	snd_dice_transaction_clear_enable(dice);
+ 
+-	stop_stream(dice, &dice->tx_stream);
+ 	destroy_stream(dice, &dice->tx_stream);
+-
+-	stop_stream(dice, &dice->rx_stream);
+ 	destroy_stream(dice, &dice->rx_stream);
+ 
+ 	dice->substreams_counter = 0;
+diff --git a/sound/firewire/dice/dice.c b/sound/firewire/dice/dice.c
+index 90d8f40ff727..70a111d7f428 100644
+--- a/sound/firewire/dice/dice.c
++++ b/sound/firewire/dice/dice.c
+@@ -226,11 +226,20 @@ static void dice_card_strings(struct snd_dice *dice)
+ 	strcpy(card->mixername, "DICE");
+ }
+ 
++/*
++ * This module releases the FireWire unit data after all ALSA character devices
++ * are released by applications. This is for releasing stream data or finishing
++ * transactions safely. Thus at returning from .remove(), this module still keep
++ * references for the unit.
++ */
+ static void dice_card_free(struct snd_card *card)
+ {
+ 	struct snd_dice *dice = card->private_data;
+ 
++	snd_dice_stream_destroy_duplex(dice);
+ 	snd_dice_transaction_destroy(dice);
++	fw_unit_put(dice->unit);
++
+ 	mutex_destroy(&dice->mutex);
+ }
+ 
+@@ -251,7 +260,7 @@ static int dice_probe(struct fw_unit *unit, const struct ieee1394_device_id *id)
+ 
+ 	dice = card->private_data;
+ 	dice->card = card;
+-	dice->unit = unit;
++	dice->unit = fw_unit_get(unit);
+ 	card->private_free = dice_card_free;
+ 
+ 	spin_lock_init(&dice->lock);
+@@ -305,10 +314,7 @@ static void dice_remove(struct fw_unit *unit)
+ {
+ 	struct snd_dice *dice = dev_get_drvdata(&unit->device);
+ 
+-	snd_card_disconnect(dice->card);
+-
+-	snd_dice_stream_destroy_duplex(dice);
+-
++	/* No need to wait for releasing card object in this context. */
+ 	snd_card_free_when_closed(dice->card);
+ }
+ 
+diff --git a/sound/firewire/fireworks/fireworks.c b/sound/firewire/fireworks/fireworks.c
+index 3e2ed8e82cbc..2682e7e3e5c9 100644
+--- a/sound/firewire/fireworks/fireworks.c
++++ b/sound/firewire/fireworks/fireworks.c
+@@ -173,11 +173,23 @@ end:
+ 	return err;
+ }
+ 
++/*
++ * This module releases the FireWire unit data after all ALSA character devices
++ * are released by applications. This is for releasing stream data or finishing
++ * transactions safely. Thus at returning from .remove(), this module still keep
++ * references for the unit.
++ */
+ static void
+ efw_card_free(struct snd_card *card)
+ {
+ 	struct snd_efw *efw = card->private_data;
+ 
++	snd_efw_stream_destroy_duplex(efw);
++	snd_efw_transaction_remove_instance(efw);
++	fw_unit_put(efw->unit);
++
++	kfree(efw->resp_buf);
++
+ 	if (efw->card_index >= 0) {
+ 		mutex_lock(&devices_mutex);
+ 		clear_bit(efw->card_index, devices_used);
+@@ -185,7 +197,6 @@ efw_card_free(struct snd_card *card)
+ 	}
+ 
+ 	mutex_destroy(&efw->mutex);
+-	kfree(efw->resp_buf);
+ }
+ 
+ static int
+@@ -218,7 +229,7 @@ efw_probe(struct fw_unit *unit,
+ 	card->private_free = efw_card_free;
+ 
+ 	efw->card = card;
+-	efw->unit = unit;
++	efw->unit = fw_unit_get(unit);
+ 	mutex_init(&efw->mutex);
+ 	spin_lock_init(&efw->lock);
+ 	init_waitqueue_head(&efw->hwdep_wait);
+@@ -289,10 +300,7 @@ static void efw_remove(struct fw_unit *unit)
+ {
+ 	struct snd_efw *efw = dev_get_drvdata(&unit->device);
+ 
+-	snd_efw_stream_destroy_duplex(efw);
+-	snd_efw_transaction_remove_instance(efw);
+-
+-	snd_card_disconnect(efw->card);
++	/* No need to wait for releasing card object in this context. */
+ 	snd_card_free_when_closed(efw->card);
+ }
+ 
+diff --git a/sound/firewire/fireworks/fireworks_stream.c b/sound/firewire/fireworks/fireworks_stream.c
+index 4f440e163667..c55db1bddc80 100644
+--- a/sound/firewire/fireworks/fireworks_stream.c
++++ b/sound/firewire/fireworks/fireworks_stream.c
+@@ -100,17 +100,22 @@ end:
+ 	return err;
+ }
+ 
++/*
++ * This function should be called before starting the stream or after stopping
++ * the streams.
++ */
+ static void
+ destroy_stream(struct snd_efw *efw, struct amdtp_stream *stream)
+ {
+-	stop_stream(efw, stream);
+-
+-	amdtp_stream_destroy(stream);
++	struct cmp_connection *conn;
+ 
+ 	if (stream == &efw->tx_stream)
+-		cmp_connection_destroy(&efw->out_conn);
++		conn = &efw->out_conn;
+ 	else
+-		cmp_connection_destroy(&efw->in_conn);
++		conn = &efw->in_conn;
++
++	amdtp_stream_destroy(stream);
++	cmp_connection_destroy(&efw->out_conn);
+ }
+ 
+ static int
+@@ -319,12 +324,8 @@ void snd_efw_stream_update_duplex(struct snd_efw *efw)
+ 
+ void snd_efw_stream_destroy_duplex(struct snd_efw *efw)
+ {
+-	mutex_lock(&efw->mutex);
+-
+ 	destroy_stream(efw, &efw->rx_stream);
+ 	destroy_stream(efw, &efw->tx_stream);
+-
+-	mutex_unlock(&efw->mutex);
+ }
+ 
+ void snd_efw_stream_lock_changed(struct snd_efw *efw)
+diff --git a/sound/firewire/oxfw/oxfw-stream.c b/sound/firewire/oxfw/oxfw-stream.c
+index bda845afb470..e6757cd85724 100644
+--- a/sound/firewire/oxfw/oxfw-stream.c
++++ b/sound/firewire/oxfw/oxfw-stream.c
+@@ -171,9 +171,10 @@ static int start_stream(struct snd_oxfw *oxfw, struct amdtp_stream *stream,
+ 	}
+ 
+ 	/* Wait first packet */
+-	err = amdtp_stream_wait_callback(stream, CALLBACK_TIMEOUT);
+-	if (err < 0)
++	if (!amdtp_stream_wait_callback(stream, CALLBACK_TIMEOUT)) {
+ 		stop_stream(oxfw, stream);
++		err = -ETIMEDOUT;
++	}
+ end:
+ 	return err;
+ }
+@@ -337,6 +338,10 @@ void snd_oxfw_stream_stop_simplex(struct snd_oxfw *oxfw,
+ 	stop_stream(oxfw, stream);
+ }
+ 
++/*
++ * This function should be called before starting the stream or after stopping
++ * the streams.
++ */
+ void snd_oxfw_stream_destroy_simplex(struct snd_oxfw *oxfw,
+ 				     struct amdtp_stream *stream)
+ {
+@@ -347,8 +352,6 @@ void snd_oxfw_stream_destroy_simplex(struct snd_oxfw *oxfw,
+ 	else
+ 		conn = &oxfw->in_conn;
+ 
+-	stop_stream(oxfw, stream);
+-
+ 	amdtp_stream_destroy(stream);
+ 	cmp_connection_destroy(conn);
+ }
+diff --git a/sound/firewire/oxfw/oxfw.c b/sound/firewire/oxfw/oxfw.c
+index 60e5cad0531a..8c6ce019f437 100644
+--- a/sound/firewire/oxfw/oxfw.c
++++ b/sound/firewire/oxfw/oxfw.c
+@@ -104,11 +104,23 @@ end:
+ 	return err;
+ }
+ 
++/*
++ * This module releases the FireWire unit data after all ALSA character devices
++ * are released by applications. This is for releasing stream data or finishing
++ * transactions safely. Thus at returning from .remove(), this module still keep
++ * references for the unit.
++ */
+ static void oxfw_card_free(struct snd_card *card)
+ {
+ 	struct snd_oxfw *oxfw = card->private_data;
+ 	unsigned int i;
+ 
++	snd_oxfw_stream_destroy_simplex(oxfw, &oxfw->rx_stream);
++	if (oxfw->has_output)
++		snd_oxfw_stream_destroy_simplex(oxfw, &oxfw->tx_stream);
++
++	fw_unit_put(oxfw->unit);
++
+ 	for (i = 0; i < SND_OXFW_STREAM_FORMAT_ENTRIES; i++) {
+ 		kfree(oxfw->tx_stream_formats[i]);
+ 		kfree(oxfw->rx_stream_formats[i]);
+@@ -136,7 +148,7 @@ static int oxfw_probe(struct fw_unit *unit,
+ 	oxfw = card->private_data;
+ 	oxfw->card = card;
+ 	mutex_init(&oxfw->mutex);
+-	oxfw->unit = unit;
++	oxfw->unit = fw_unit_get(unit);
+ 	oxfw->device_info = (const struct device_info *)id->driver_data;
+ 	spin_lock_init(&oxfw->lock);
+ 	init_waitqueue_head(&oxfw->hwdep_wait);
+@@ -212,12 +224,7 @@ static void oxfw_remove(struct fw_unit *unit)
+ {
+ 	struct snd_oxfw *oxfw = dev_get_drvdata(&unit->device);
+ 
+-	snd_card_disconnect(oxfw->card);
+-
+-	snd_oxfw_stream_destroy_simplex(oxfw, &oxfw->rx_stream);
+-	if (oxfw->has_output)
+-		snd_oxfw_stream_destroy_simplex(oxfw, &oxfw->tx_stream);
+-
++	/* No need to wait for releasing card object in this context. */
+ 	snd_card_free_when_closed(oxfw->card);
+ }
+ 
+diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c
+index 0cfc9c8c4b4e..c2aa3cd844e5 100644
+--- a/sound/pci/hda/hda_controller.c
++++ b/sound/pci/hda/hda_controller.c
+@@ -957,7 +957,6 @@ static int azx_alloc_cmd_io(struct azx *chip)
+ 		dev_err(chip->card->dev, "cannot allocate CORB/RIRB\n");
+ 	return err;
+ }
+-EXPORT_SYMBOL_GPL(azx_alloc_cmd_io);
+ 
+ static void azx_init_cmd_io(struct azx *chip)
+ {
+@@ -1022,7 +1021,6 @@ static void azx_init_cmd_io(struct azx *chip)
+ 	azx_writeb(chip, RIRBCTL, AZX_RBCTL_DMA_EN | AZX_RBCTL_IRQ_EN);
+ 	spin_unlock_irq(&chip->reg_lock);
+ }
+-EXPORT_SYMBOL_GPL(azx_init_cmd_io);
+ 
+ static void azx_free_cmd_io(struct azx *chip)
+ {
+@@ -1032,7 +1030,6 @@ static void azx_free_cmd_io(struct azx *chip)
+ 	azx_writeb(chip, CORBCTL, 0);
+ 	spin_unlock_irq(&chip->reg_lock);
+ }
+-EXPORT_SYMBOL_GPL(azx_free_cmd_io);
+ 
+ static unsigned int azx_command_addr(u32 cmd)
+ {
+@@ -1312,7 +1309,6 @@ static int azx_send_cmd(struct hda_bus *bus, unsigned int val)
+ 	else
+ 		return azx_corb_send_cmd(bus, val);
+ }
+-EXPORT_SYMBOL_GPL(azx_send_cmd);
+ 
+ /* get a response */
+ static unsigned int azx_get_response(struct hda_bus *bus,
+@@ -1326,7 +1322,6 @@ static unsigned int azx_get_response(struct hda_bus *bus,
+ 	else
+ 		return azx_rirb_get_response(bus, addr);
+ }
+-EXPORT_SYMBOL_GPL(azx_get_response);
+ 
+ #ifdef CONFIG_SND_HDA_DSP_LOADER
+ /*
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
+index d426a0bd6a5f..84bed149a41a 100644
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -2004,7 +2004,7 @@ static const struct pci_device_id azx_ids[] = {
+ 	  .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH_NOPM },
+ 	/* Panther Point */
+ 	{ PCI_DEVICE(0x8086, 0x1e20),
+-	  .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
++	  .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH_NOPM },
+ 	/* Lynx Point */
+ 	{ PCI_DEVICE(0x8086, 0x8c20),
+ 	  .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 0c993f7f9181..d3e2fc700c5d 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -5114,6 +5114,13 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
+ 		{0x17, 0x40000000},
+ 		{0x1d, 0x40700001},
+ 		{0x21, 0x02211040}),
++	SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
++		ALC255_STANDARD_PINS,
++		{0x12, 0x90a60170},
++		{0x14, 0x90170140},
++		{0x17, 0x40000000},
++		{0x1d, 0x40700001},
++		{0x21, 0x02211050}),
+ 	SND_HDA_PIN_QUIRK(0x10ec0280, 0x103c, "HP", ALC280_FIXUP_HP_GPIO4,
+ 		{0x12, 0x90a60130},
+ 		{0x13, 0x40000000},
+diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
+index 6d36c5b78805..87eff3173ce9 100644
+--- a/sound/pci/hda/patch_sigmatel.c
++++ b/sound/pci/hda/patch_sigmatel.c
+@@ -79,6 +79,7 @@ enum {
+ 	STAC_ALIENWARE_M17X,
+ 	STAC_92HD89XX_HP_FRONT_JACK,
+ 	STAC_92HD89XX_HP_Z1_G2_RIGHT_MIC_JACK,
++	STAC_92HD73XX_ASUS_MOBO,
+ 	STAC_92HD73XX_MODELS
+ };
+ 
+@@ -1911,7 +1912,18 @@ static const struct hda_fixup stac92hd73xx_fixups[] = {
+ 	[STAC_92HD89XX_HP_Z1_G2_RIGHT_MIC_JACK] = {
+ 		.type = HDA_FIXUP_PINS,
+ 		.v.pins = stac92hd89xx_hp_z1_g2_right_mic_jack_pin_configs,
+-	}
++	},
++	[STAC_92HD73XX_ASUS_MOBO] = {
++		.type = HDA_FIXUP_PINS,
++		.v.pins = (const struct hda_pintbl[]) {
++			/* enable 5.1 and SPDIF out */
++			{ 0x0c, 0x01014411 },
++			{ 0x0d, 0x01014410 },
++			{ 0x0e, 0x01014412 },
++			{ 0x22, 0x014b1180 },
++			{ }
++		}
++	},
+ };
+ 
+ static const struct hda_model_fixup stac92hd73xx_models[] = {
+@@ -1923,6 +1935,7 @@ static const struct hda_model_fixup stac92hd73xx_models[] = {
+ 	{ .id = STAC_DELL_M6_BOTH, .name = "dell-m6" },
+ 	{ .id = STAC_DELL_EQ, .name = "dell-eq" },
+ 	{ .id = STAC_ALIENWARE_M17X, .name = "alienware" },
++	{ .id = STAC_92HD73XX_ASUS_MOBO, .name = "asus-mobo" },
+ 	{}
+ };
+ 
+@@ -1975,6 +1988,8 @@ static const struct snd_pci_quirk stac92hd73xx_fixup_tbl[] = {
+ 				"HP Z1 G2", STAC_92HD89XX_HP_Z1_G2_RIGHT_MIC_JACK),
+ 	SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x2b17,
+ 				"unknown HP", STAC_92HD89XX_HP_FRONT_JACK),
++	SND_PCI_QUIRK(PCI_VENDOR_ID_ASUSTEK, 0x83f8, "ASUS AT4NM10",
++		      STAC_92HD73XX_ASUS_MOBO),
+ 	{} /* terminator */
+ };
+ 
+diff --git a/sound/soc/codecs/rt5670.c b/sound/soc/codecs/rt5670.c
+index 1e574c85b9c8..f4b869c638cc 100644
+--- a/sound/soc/codecs/rt5670.c
++++ b/sound/soc/codecs/rt5670.c
+@@ -223,7 +223,6 @@ static bool rt5670_volatile_register(struct device *dev, unsigned int reg)
+ 	case RT5670_ADC_EQ_CTRL1:
+ 	case RT5670_EQ_CTRL1:
+ 	case RT5670_ALC_CTRL_1:
+-	case RT5670_IRQ_CTRL1:
+ 	case RT5670_IRQ_CTRL2:
+ 	case RT5670_INT_IRQ_ST:
+ 	case RT5670_IL_CMD:
+diff --git a/sound/soc/omap/omap-pcm.c b/sound/soc/omap/omap-pcm.c
+index f4b05bc23e4b..1343ecbf0bd5 100644
+--- a/sound/soc/omap/omap-pcm.c
++++ b/sound/soc/omap/omap-pcm.c
+@@ -201,7 +201,7 @@ static int omap_pcm_new(struct snd_soc_pcm_runtime *rtd)
+ 	struct snd_pcm *pcm = rtd->pcm;
+ 	int ret;
+ 
+-	ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(64));
++	ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
+ 	if (ret)
+ 		return ret;
+ 


^ permalink raw reply related	[flat|nested] 19+ messages in thread

* [gentoo-commits] proj/linux-patches:3.19 commit in: /
@ 2015-03-21 20:01 Mike Pagano
  0 siblings, 0 replies; 19+ messages in thread
From: Mike Pagano @ 2015-03-21 20:01 UTC (permalink / raw
  To: gentoo-commits

commit:     8629a8873e8eb7f6032c575ed4b4646c023ce8f7
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sat Mar 21 20:01:20 2015 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sat Mar 21 20:01:20 2015 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=8629a887

Update gcc >= 4.9 optimization patch. See bug #544028.

 ...-additional-cpu-optimizations-for-gcc-4.9.patch | 67 +++++++++++++---------
 1 file changed, 41 insertions(+), 26 deletions(-)

diff --git a/5010_enable-additional-cpu-optimizations-for-gcc-4.9.patch b/5010_enable-additional-cpu-optimizations-for-gcc-4.9.patch
index f931f75..c4efd06 100644
--- a/5010_enable-additional-cpu-optimizations-for-gcc-4.9.patch
+++ b/5010_enable-additional-cpu-optimizations-for-gcc-4.9.patch
@@ -18,13 +18,14 @@ should use the newer 'march=bonnell' flag for atom processors.
 I have made that change to this patch set as well.  See the following kernel
 bug report to see if I'm right: https://bugzilla.kernel.org/show_bug.cgi?id=77461
 
-This patch will expand the number of microarchitectures to include new
+This patch will expand the number of microarchitectures to include newer
 processors including: AMD K10-family, AMD Family 10h (Barcelona), AMD Family
 14h (Bobcat), AMD Family 15h (Bulldozer), AMD Family 15h (Piledriver), AMD
 Family 16h (Jaguar), Intel 1st Gen Core i3/i5/i7 (Nehalem), Intel 1.5 Gen Core
 i3/i5/i7 (Westmere), Intel 2nd Gen Core i3/i5/i7 (Sandybridge), Intel 3rd Gen
-Core i3/i5/i7 (Ivybridge), Intel 4th Gen Core i3/i5/i7 (Haswell), and Intel 5th
-Gen Core i3/i5/i7 (Broadwell). It also offers the compiler the 'native' flag.
+Core i3/i5/i7 (Ivybridge), Intel 4th Gen Core i3/i5/i7 (Haswell), Intel 5th
+Gen Core i3/i5/i7 (Broadwell), and the low power Silvermont series of Atom
+processors (Silvermont). It also offers the compiler the 'native' flag.
 
 Small but real speed increases are measurable using a make endpoint comparing
 a generic kernel to one built with one of the respective microarchs.
@@ -36,9 +37,9 @@ REQUIREMENTS
 linux version >=3.15
 gcc version >=4.9
 
---- a/arch/x86/include/asm/module.h	2014-08-03 18:25:02.000000000 -0400
-+++ b/arch/x86/include/asm/module.h	2014-09-13 09:37:16.721385247 -0400
-@@ -15,6 +15,20 @@
+--- a/arch/x86/include/asm/module.h	2014-06-16 16:44:27.000000000 -0400
++++ b/arch/x86/include/asm/module.h	2015-03-07 03:27:32.556672424 -0500
+@@ -15,6 +15,22 @@
  #define MODULE_PROC_FAMILY "586MMX "
  #elif defined CONFIG_MCORE2
  #define MODULE_PROC_FAMILY "CORE2 "
@@ -48,6 +49,8 @@ gcc version >=4.9
 +#define MODULE_PROC_FAMILY "NEHALEM "
 +#elif defined CONFIG_MWESTMERE
 +#define MODULE_PROC_FAMILY "WESTMERE "
++#elif defined CONFIG_MSILVERMONT
++#define MODULE_PROC_FAMILY "SILVERMONT "
 +#elif defined CONFIG_MSANDYBRIDGE
 +#define MODULE_PROC_FAMILY "SANDYBRIDGE "
 +#elif defined CONFIG_MIVYBRIDGE
@@ -59,7 +62,7 @@ gcc version >=4.9
  #elif defined CONFIG_MATOM
  #define MODULE_PROC_FAMILY "ATOM "
  #elif defined CONFIG_M686
-@@ -33,6 +47,20 @@
+@@ -33,6 +49,20 @@
  #define MODULE_PROC_FAMILY "K7 "
  #elif defined CONFIG_MK8
  #define MODULE_PROC_FAMILY "K8 "
@@ -80,8 +83,8 @@ gcc version >=4.9
  #elif defined CONFIG_MELAN
  #define MODULE_PROC_FAMILY "ELAN "
  #elif defined CONFIG_MCRUSOE
---- a/arch/x86/Kconfig.cpu	2014-08-03 18:25:02.000000000 -0400
-+++ b/arch/x86/Kconfig.cpu	2014-09-13 09:37:16.721385247 -0400
+--- a/arch/x86/Kconfig.cpu	2014-06-16 16:44:27.000000000 -0400
++++ b/arch/x86/Kconfig.cpu	2015-03-07 03:32:14.337713226 -0500
 @@ -137,9 +137,8 @@ config MPENTIUM4
  		-Paxville
  		-Dempsey
@@ -185,7 +188,7 @@ gcc version >=4.9
  	---help---
  
  	  Select this for Intel Core 2 and newer Core 2 Xeons (Xeon 51xx and
-@@ -260,14 +318,55 @@ config MCORE2
+@@ -260,14 +318,63 @@ config MCORE2
  	  family in /proc/cpuinfo. Newer ones have 6 and older ones 15
  	  (not a typo)
  
@@ -213,6 +216,14 @@ gcc version >=4.9
 +
 +	  Enables -march=westmere
 +
++config MSILVERMONT
++	bool "Intel Silvermont"
++	---help---
++
++	  Select this for the Intel Silvermont platform.
++
++	  Enables -march=silvermont
++
 +config MSANDYBRIDGE
 +	bool "Intel Sandy Bridge"
 +	---help---
@@ -247,7 +258,7 @@ gcc version >=4.9
  
  config GENERIC_CPU
  	bool "Generic-x86-64"
-@@ -276,6 +375,19 @@ config GENERIC_CPU
+@@ -276,6 +383,19 @@ config GENERIC_CPU
  	  Generic x86-64 CPU.
  	  Run equally well on all x86-64 CPUs.
  
@@ -267,53 +278,53 @@ gcc version >=4.9
  endchoice
  
  config X86_GENERIC
-@@ -300,7 +412,7 @@ config X86_INTERNODE_CACHE_SHIFT
+@@ -300,7 +420,7 @@ config X86_INTERNODE_CACHE_SHIFT
  config X86_L1_CACHE_SHIFT
  	int
  	default "7" if MPENTIUM4 || MPSC
 -	default "6" if MK7 || MK8 || MPENTIUMM || MCORE2 || MATOM || MVIAC7 || X86_GENERIC || GENERIC_CPU
-+	default "6" if MK7 || MK8 || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MBULLDOZER || MPILEDRIVER || MJAGUAR || MPENTIUMM || MCORE2 || MNEHALEM || MWESTMERE || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || BROADWELL || MNATIVE || MATOM || MVIAC7 || X86_GENERIC || GENERIC_CPU
++	default "6" if MK7 || MK8 || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MBULLDOZER || MPILEDRIVER || MJAGUAR || MPENTIUMM || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || BROADWELL || MNATIVE || MATOM || MVIAC7 || X86_GENERIC || GENERIC_CPU
  	default "4" if MELAN || M486 || MGEODEGX1
  	default "5" if MWINCHIP3D || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2 || MGEODE_LX
  
-@@ -331,11 +443,11 @@ config X86_ALIGNMENT_16
+@@ -331,11 +451,11 @@ config X86_ALIGNMENT_16
  
  config X86_INTEL_USERCOPY
  	def_bool y
 -	depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M586MMX || X86_GENERIC || MK8 || MK7 || MEFFICEON || MCORE2
-+	depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M586MMX || X86_GENERIC || MK8 || MK8SSE3 || MK7 || MEFFICEON || MCORE2 || MK10 || MBARCELONA || MNEHALEM || MWESTMERE || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MNATIVE
++	depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M586MMX || X86_GENERIC || MK8 || MK8SSE3 || MK7 || MEFFICEON || MCORE2 || MK10 || MBARCELONA || MNEHALEM || MWESTMERE || MSILVERMONT || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MNATIVE
  
  config X86_USE_PPRO_CHECKSUM
  	def_bool y
 -	depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MVIAC7 || MEFFICEON || MGEODE_LX || MCORE2 || MATOM
-+	depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MK10 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MK8SSE3 || MVIAC3_2 || MVIAC7 || MEFFICEON || MGEODE_LX || MCORE2 || MNEHALEM || MWESTMERE || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MATOM || MNATIVE
++	depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MK10 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MK8SSE3 || MVIAC3_2 || MVIAC7 || MEFFICEON || MGEODE_LX || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MATOM || MNATIVE
  
  config X86_USE_3DNOW
  	def_bool y
-@@ -359,17 +471,17 @@ config X86_P6_NOP
+@@ -359,17 +479,17 @@ config X86_P6_NOP
  
  config X86_TSC
  	def_bool y
 -	depends on (MWINCHIP3D || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MVIAC7 || MGEODEGX1 || MGEODE_LX || MCORE2 || MATOM) || X86_64
-+	depends on (MWINCHIP3D || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MK8SSE3 || MVIAC3_2 || MVIAC7 || MGEODEGX1 || MGEODE_LX || MCORE2 || MNEHALEM || MWESTMERE || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MNATIVE || MATOM) || X86_64
++	depends on (MWINCHIP3D || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MK8SSE3 || MVIAC3_2 || MVIAC7 || MGEODEGX1 || MGEODE_LX || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MNATIVE || MATOM) || X86_64
  
  config X86_CMPXCHG64
  	def_bool y
 -	depends on X86_PAE || X86_64 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MATOM
-+	depends on X86_PAE || X86_64 || MCORE2 || MNEHALEM || MWESTMERE || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MATOM || MNATIVE
++	depends on X86_PAE || X86_64 || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MATOM || MNATIVE
  
  # this should be set for all -march=.. options where the compiler
  # generates cmov.
  config X86_CMOV
  	def_bool y
 -	depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
-+	depends on (MK8 || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MBULLDOZER || MPILEDRIVER || MJAGUAR || MK7 || MCORE2 || MNEHALEM || MWESTMERE || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MNATIVE || MATOM || MGEODE_LX)
++	depends on (MK8 || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MBULLDOZER || MPILEDRIVER || MJAGUAR || MK7 || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MNATIVE || MATOM || MGEODE_LX)
  
  config X86_MINIMUM_CPU_FAMILY
  	int
---- a/arch/x86/Makefile	2014-08-03 18:25:02.000000000 -0400
-+++ b/arch/x86/Makefile	2014-09-13 09:37:16.721385247 -0400
-@@ -92,13 +92,33 @@ else
+--- a/arch/x86/Makefile	2014-06-16 16:44:27.000000000 -0400
++++ b/arch/x86/Makefile	2015-03-07 03:33:27.650843211 -0500
+@@ -92,13 +92,35 @@ else
  	KBUILD_CFLAGS += $(call cc-option,-mpreferred-stack-boundary=3)
  
          # FIXME - should be integrated in Makefile.cpu (Makefile_32.cpu)
@@ -337,6 +348,8 @@ gcc version >=4.9
 +                $(call cc-option,-march=nehalem,$(call cc-option,-mtune=nehalem))
 +        cflags-$(CONFIG_MWESTMERE) += \
 +                $(call cc-option,-march=westmere,$(call cc-option,-mtune=westmere))
++        cflags-$(CONFIG_MSILVERMONT) += \
++                $(call cc-option,-march=silvermont,$(call cc-option,-mtune=silvermont))
 +        cflags-$(CONFIG_MSANDYBRIDGE) += \
 +                $(call cc-option,-march=sandybridge,$(call cc-option,-mtune=sandybridge))
 +        cflags-$(CONFIG_MIVYBRIDGE) += \
@@ -350,8 +363,8 @@ gcc version >=4.9
          cflags-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=generic)
          KBUILD_CFLAGS += $(cflags-y)
  
---- a/arch/x86/Makefile_32.cpu	2014-08-03 18:25:02.000000000 -0400
-+++ b/arch/x86/Makefile_32.cpu	2014-09-13 09:37:16.721385247 -0400
+--- a/arch/x86/Makefile_32.cpu	2014-06-16 16:44:27.000000000 -0400
++++ b/arch/x86/Makefile_32.cpu	2015-03-07 03:34:15.203586024 -0500
 @@ -23,7 +23,15 @@ cflags-$(CONFIG_MK6)		+= -march=k6
  # Please note, that patches that add -march=athlon-xp and friends are pointless.
  # They make zero difference whatsosever to performance at this time.
@@ -368,7 +381,7 @@ gcc version >=4.9
  cflags-$(CONFIG_MCRUSOE)	+= -march=i686 $(align)-functions=0 $(align)-jumps=0 $(align)-loops=0
  cflags-$(CONFIG_MEFFICEON)	+= -march=i686 $(call tune,pentium3) $(align)-functions=0 $(align)-jumps=0 $(align)-loops=0
  cflags-$(CONFIG_MWINCHIPC6)	+= $(call cc-option,-march=winchip-c6,-march=i586)
-@@ -32,8 +40,14 @@ cflags-$(CONFIG_MCYRIXIII)	+= $(call cc-
+@@ -32,8 +40,15 @@ cflags-$(CONFIG_MCYRIXIII)	+= $(call cc-
  cflags-$(CONFIG_MVIAC3_2)	+= $(call cc-option,-march=c3-2,-march=i686)
  cflags-$(CONFIG_MVIAC7)		+= -march=i686
  cflags-$(CONFIG_MCORE2)		+= -march=i686 $(call tune,core2)
@@ -376,6 +389,7 @@ gcc version >=4.9
 -	$(call cc-option,-mtune=atom,$(call cc-option,-mtune=generic))
 +cflags-$(CONFIG_MNEHALEM)	+= -march=i686 $(call tune,nehalem)
 +cflags-$(CONFIG_MWESTMERE)	+= -march=i686 $(call tune,westmere)
++cflags-$(CONFIG_MSILVERMONT)	+= -march=i686 $(call tune,silvermont)
 +cflags-$(CONFIG_MSANDYBRIDGE)	+= -march=i686 $(call tune,sandybridge)
 +cflags-$(CONFIG_MIVYBRIDGE)	+= -march=i686 $(call tune,ivybridge)
 +cflags-$(CONFIG_MHASWELL)	+= -march=i686 $(call tune,haswell)
@@ -385,3 +399,4 @@ gcc version >=4.9
  
  # AMD Elan support
  cflags-$(CONFIG_MELAN)		+= -march=i486
+


^ permalink raw reply related	[flat|nested] 19+ messages in thread

* [gentoo-commits] proj/linux-patches:3.19 commit in: /
@ 2015-03-26 21:08 Mike Pagano
  0 siblings, 0 replies; 19+ messages in thread
From: Mike Pagano @ 2015-03-26 21:08 UTC (permalink / raw
  To: gentoo-commits

commit:     f4b3f29666a9f4fa68d6e2495d12384fb070c26e
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu Mar 26 21:07:51 2015 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu Mar 26 21:07:51 2015 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=f4b3f296

Linux patch 3.19.3

 0000_README             |    4 +
 1002_linux-3.19.3.patch | 4081 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 4085 insertions(+)

diff --git a/0000_README b/0000_README
index 48e21ed..513fccd 100644
--- a/0000_README
+++ b/0000_README
@@ -51,6 +51,10 @@ Patch:  1001_linux-3.19.2.patch
 From:   http://www.kernel.org
 Desc:   Linux 3.19.2
 
+Patch:  1002_linux-3.19.3.patch
+From:   http://www.kernel.org
+Desc:   Linux 3.19.3
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1002_linux-3.19.3.patch b/1002_linux-3.19.3.patch
new file mode 100644
index 0000000..42e848b
--- /dev/null
+++ b/1002_linux-3.19.3.patch
@@ -0,0 +1,4081 @@
+diff --git a/Makefile b/Makefile
+index e49665a2b5ac..713bf263952f 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 19
+-SUBLEVEL = 2
++SUBLEVEL = 3
+ EXTRAVERSION =
+ NAME = Diseased Newt
+ 
+diff --git a/arch/arm/boot/dts/am33xx-clocks.dtsi b/arch/arm/boot/dts/am33xx-clocks.dtsi
+index 712edce7d6fb..071b56aa0c7e 100644
+--- a/arch/arm/boot/dts/am33xx-clocks.dtsi
++++ b/arch/arm/boot/dts/am33xx-clocks.dtsi
+@@ -99,7 +99,7 @@
+ 	ehrpwm0_tbclk: ehrpwm0_tbclk@44e10664 {
+ 		#clock-cells = <0>;
+ 		compatible = "ti,gate-clock";
+-		clocks = <&dpll_per_m2_ck>;
++		clocks = <&l4ls_gclk>;
+ 		ti,bit-shift = <0>;
+ 		reg = <0x0664>;
+ 	};
+@@ -107,7 +107,7 @@
+ 	ehrpwm1_tbclk: ehrpwm1_tbclk@44e10664 {
+ 		#clock-cells = <0>;
+ 		compatible = "ti,gate-clock";
+-		clocks = <&dpll_per_m2_ck>;
++		clocks = <&l4ls_gclk>;
+ 		ti,bit-shift = <1>;
+ 		reg = <0x0664>;
+ 	};
+@@ -115,7 +115,7 @@
+ 	ehrpwm2_tbclk: ehrpwm2_tbclk@44e10664 {
+ 		#clock-cells = <0>;
+ 		compatible = "ti,gate-clock";
+-		clocks = <&dpll_per_m2_ck>;
++		clocks = <&l4ls_gclk>;
+ 		ti,bit-shift = <2>;
+ 		reg = <0x0664>;
+ 	};
+diff --git a/arch/arm/boot/dts/am43xx-clocks.dtsi b/arch/arm/boot/dts/am43xx-clocks.dtsi
+index c7dc9dab93a4..cfb49686ab6a 100644
+--- a/arch/arm/boot/dts/am43xx-clocks.dtsi
++++ b/arch/arm/boot/dts/am43xx-clocks.dtsi
+@@ -107,7 +107,7 @@
+ 	ehrpwm0_tbclk: ehrpwm0_tbclk {
+ 		#clock-cells = <0>;
+ 		compatible = "ti,gate-clock";
+-		clocks = <&dpll_per_m2_ck>;
++		clocks = <&l4ls_gclk>;
+ 		ti,bit-shift = <0>;
+ 		reg = <0x0664>;
+ 	};
+@@ -115,7 +115,7 @@
+ 	ehrpwm1_tbclk: ehrpwm1_tbclk {
+ 		#clock-cells = <0>;
+ 		compatible = "ti,gate-clock";
+-		clocks = <&dpll_per_m2_ck>;
++		clocks = <&l4ls_gclk>;
+ 		ti,bit-shift = <1>;
+ 		reg = <0x0664>;
+ 	};
+@@ -123,7 +123,7 @@
+ 	ehrpwm2_tbclk: ehrpwm2_tbclk {
+ 		#clock-cells = <0>;
+ 		compatible = "ti,gate-clock";
+-		clocks = <&dpll_per_m2_ck>;
++		clocks = <&l4ls_gclk>;
+ 		ti,bit-shift = <2>;
+ 		reg = <0x0664>;
+ 	};
+@@ -131,7 +131,7 @@
+ 	ehrpwm3_tbclk: ehrpwm3_tbclk {
+ 		#clock-cells = <0>;
+ 		compatible = "ti,gate-clock";
+-		clocks = <&dpll_per_m2_ck>;
++		clocks = <&l4ls_gclk>;
+ 		ti,bit-shift = <4>;
+ 		reg = <0x0664>;
+ 	};
+@@ -139,7 +139,7 @@
+ 	ehrpwm4_tbclk: ehrpwm4_tbclk {
+ 		#clock-cells = <0>;
+ 		compatible = "ti,gate-clock";
+-		clocks = <&dpll_per_m2_ck>;
++		clocks = <&l4ls_gclk>;
+ 		ti,bit-shift = <5>;
+ 		reg = <0x0664>;
+ 	};
+@@ -147,7 +147,7 @@
+ 	ehrpwm5_tbclk: ehrpwm5_tbclk {
+ 		#clock-cells = <0>;
+ 		compatible = "ti,gate-clock";
+-		clocks = <&dpll_per_m2_ck>;
++		clocks = <&l4ls_gclk>;
+ 		ti,bit-shift = <6>;
+ 		reg = <0x0664>;
+ 	};
+diff --git a/arch/arm/boot/dts/dra7xx-clocks.dtsi b/arch/arm/boot/dts/dra7xx-clocks.dtsi
+index 4bdcbd61ce47..99b09a44e269 100644
+--- a/arch/arm/boot/dts/dra7xx-clocks.dtsi
++++ b/arch/arm/boot/dts/dra7xx-clocks.dtsi
+@@ -243,10 +243,18 @@
+ 		ti,invert-autoidle-bit;
+ 	};
+ 
++	dpll_core_byp_mux: dpll_core_byp_mux {
++		#clock-cells = <0>;
++		compatible = "ti,mux-clock";
++		clocks = <&sys_clkin1>, <&dpll_abe_m3x2_ck>;
++		ti,bit-shift = <23>;
++		reg = <0x012c>;
++	};
++
+ 	dpll_core_ck: dpll_core_ck {
+ 		#clock-cells = <0>;
+ 		compatible = "ti,omap4-dpll-core-clock";
+-		clocks = <&sys_clkin1>, <&dpll_abe_m3x2_ck>;
++		clocks = <&sys_clkin1>, <&dpll_core_byp_mux>;
+ 		reg = <0x0120>, <0x0124>, <0x012c>, <0x0128>;
+ 	};
+ 
+@@ -309,10 +317,18 @@
+ 		clock-div = <1>;
+ 	};
+ 
++	dpll_dsp_byp_mux: dpll_dsp_byp_mux {
++		#clock-cells = <0>;
++		compatible = "ti,mux-clock";
++		clocks = <&sys_clkin1>, <&dsp_dpll_hs_clk_div>;
++		ti,bit-shift = <23>;
++		reg = <0x0240>;
++	};
++
+ 	dpll_dsp_ck: dpll_dsp_ck {
+ 		#clock-cells = <0>;
+ 		compatible = "ti,omap4-dpll-clock";
+-		clocks = <&sys_clkin1>, <&dsp_dpll_hs_clk_div>;
++		clocks = <&sys_clkin1>, <&dpll_dsp_byp_mux>;
+ 		reg = <0x0234>, <0x0238>, <0x0240>, <0x023c>;
+ 	};
+ 
+@@ -335,10 +351,18 @@
+ 		clock-div = <1>;
+ 	};
+ 
++	dpll_iva_byp_mux: dpll_iva_byp_mux {
++		#clock-cells = <0>;
++		compatible = "ti,mux-clock";
++		clocks = <&sys_clkin1>, <&iva_dpll_hs_clk_div>;
++		ti,bit-shift = <23>;
++		reg = <0x01ac>;
++	};
++
+ 	dpll_iva_ck: dpll_iva_ck {
+ 		#clock-cells = <0>;
+ 		compatible = "ti,omap4-dpll-clock";
+-		clocks = <&sys_clkin1>, <&iva_dpll_hs_clk_div>;
++		clocks = <&sys_clkin1>, <&dpll_iva_byp_mux>;
+ 		reg = <0x01a0>, <0x01a4>, <0x01ac>, <0x01a8>;
+ 	};
+ 
+@@ -361,10 +385,18 @@
+ 		clock-div = <1>;
+ 	};
+ 
++	dpll_gpu_byp_mux: dpll_gpu_byp_mux {
++		#clock-cells = <0>;
++		compatible = "ti,mux-clock";
++		clocks = <&sys_clkin1>, <&dpll_abe_m3x2_ck>;
++		ti,bit-shift = <23>;
++		reg = <0x02e4>;
++	};
++
+ 	dpll_gpu_ck: dpll_gpu_ck {
+ 		#clock-cells = <0>;
+ 		compatible = "ti,omap4-dpll-clock";
+-		clocks = <&sys_clkin1>, <&dpll_abe_m3x2_ck>;
++		clocks = <&sys_clkin1>, <&dpll_gpu_byp_mux>;
+ 		reg = <0x02d8>, <0x02dc>, <0x02e4>, <0x02e0>;
+ 	};
+ 
+@@ -398,10 +430,18 @@
+ 		clock-div = <1>;
+ 	};
+ 
++	dpll_ddr_byp_mux: dpll_ddr_byp_mux {
++		#clock-cells = <0>;
++		compatible = "ti,mux-clock";
++		clocks = <&sys_clkin1>, <&dpll_abe_m3x2_ck>;
++		ti,bit-shift = <23>;
++		reg = <0x021c>;
++	};
++
+ 	dpll_ddr_ck: dpll_ddr_ck {
+ 		#clock-cells = <0>;
+ 		compatible = "ti,omap4-dpll-clock";
+-		clocks = <&sys_clkin1>, <&dpll_abe_m3x2_ck>;
++		clocks = <&sys_clkin1>, <&dpll_ddr_byp_mux>;
+ 		reg = <0x0210>, <0x0214>, <0x021c>, <0x0218>;
+ 	};
+ 
+@@ -416,10 +456,18 @@
+ 		ti,invert-autoidle-bit;
+ 	};
+ 
++	dpll_gmac_byp_mux: dpll_gmac_byp_mux {
++		#clock-cells = <0>;
++		compatible = "ti,mux-clock";
++		clocks = <&sys_clkin1>, <&dpll_abe_m3x2_ck>;
++		ti,bit-shift = <23>;
++		reg = <0x02b4>;
++	};
++
+ 	dpll_gmac_ck: dpll_gmac_ck {
+ 		#clock-cells = <0>;
+ 		compatible = "ti,omap4-dpll-clock";
+-		clocks = <&sys_clkin1>, <&dpll_abe_m3x2_ck>;
++		clocks = <&sys_clkin1>, <&dpll_gmac_byp_mux>;
+ 		reg = <0x02a8>, <0x02ac>, <0x02b4>, <0x02b0>;
+ 	};
+ 
+@@ -482,10 +530,18 @@
+ 		clock-div = <1>;
+ 	};
+ 
++	dpll_eve_byp_mux: dpll_eve_byp_mux {
++		#clock-cells = <0>;
++		compatible = "ti,mux-clock";
++		clocks = <&sys_clkin1>, <&eve_dpll_hs_clk_div>;
++		ti,bit-shift = <23>;
++		reg = <0x0290>;
++	};
++
+ 	dpll_eve_ck: dpll_eve_ck {
+ 		#clock-cells = <0>;
+ 		compatible = "ti,omap4-dpll-clock";
+-		clocks = <&sys_clkin1>, <&eve_dpll_hs_clk_div>;
++		clocks = <&sys_clkin1>, <&dpll_eve_byp_mux>;
+ 		reg = <0x0284>, <0x0288>, <0x0290>, <0x028c>;
+ 	};
+ 
+@@ -1249,10 +1305,18 @@
+ 		clock-div = <1>;
+ 	};
+ 
++	dpll_per_byp_mux: dpll_per_byp_mux {
++		#clock-cells = <0>;
++		compatible = "ti,mux-clock";
++		clocks = <&sys_clkin1>, <&per_dpll_hs_clk_div>;
++		ti,bit-shift = <23>;
++		reg = <0x014c>;
++	};
++
+ 	dpll_per_ck: dpll_per_ck {
+ 		#clock-cells = <0>;
+ 		compatible = "ti,omap4-dpll-clock";
+-		clocks = <&sys_clkin1>, <&per_dpll_hs_clk_div>;
++		clocks = <&sys_clkin1>, <&dpll_per_byp_mux>;
+ 		reg = <0x0140>, <0x0144>, <0x014c>, <0x0148>;
+ 	};
+ 
+@@ -1275,10 +1339,18 @@
+ 		clock-div = <1>;
+ 	};
+ 
++	dpll_usb_byp_mux: dpll_usb_byp_mux {
++		#clock-cells = <0>;
++		compatible = "ti,mux-clock";
++		clocks = <&sys_clkin1>, <&usb_dpll_hs_clk_div>;
++		ti,bit-shift = <23>;
++		reg = <0x018c>;
++	};
++
+ 	dpll_usb_ck: dpll_usb_ck {
+ 		#clock-cells = <0>;
+ 		compatible = "ti,omap4-dpll-j-type-clock";
+-		clocks = <&sys_clkin1>, <&usb_dpll_hs_clk_div>;
++		clocks = <&sys_clkin1>, <&dpll_usb_byp_mux>;
+ 		reg = <0x0180>, <0x0184>, <0x018c>, <0x0188>;
+ 	};
+ 
+diff --git a/arch/arm/boot/dts/imx6qdl-sabresd.dtsi b/arch/arm/boot/dts/imx6qdl-sabresd.dtsi
+index f1cd2147421d..a626e6dd8022 100644
+--- a/arch/arm/boot/dts/imx6qdl-sabresd.dtsi
++++ b/arch/arm/boot/dts/imx6qdl-sabresd.dtsi
+@@ -35,6 +35,7 @@
+ 			regulator-max-microvolt = <5000000>;
+ 			gpio = <&gpio3 22 0>;
+ 			enable-active-high;
++			vin-supply = <&swbst_reg>;
+ 		};
+ 
+ 		reg_usb_h1_vbus: regulator@1 {
+@@ -45,6 +46,7 @@
+ 			regulator-max-microvolt = <5000000>;
+ 			gpio = <&gpio1 29 0>;
+ 			enable-active-high;
++			vin-supply = <&swbst_reg>;
+ 		};
+ 
+ 		reg_audio: regulator@2 {
+diff --git a/arch/arm/boot/dts/imx6sl-evk.dts b/arch/arm/boot/dts/imx6sl-evk.dts
+index fda4932faefd..945887d3fdb3 100644
+--- a/arch/arm/boot/dts/imx6sl-evk.dts
++++ b/arch/arm/boot/dts/imx6sl-evk.dts
+@@ -52,6 +52,7 @@
+ 			regulator-max-microvolt = <5000000>;
+ 			gpio = <&gpio4 0 0>;
+ 			enable-active-high;
++			vin-supply = <&swbst_reg>;
+ 		};
+ 
+ 		reg_usb_otg2_vbus: regulator@1 {
+@@ -62,6 +63,7 @@
+ 			regulator-max-microvolt = <5000000>;
+ 			gpio = <&gpio4 2 0>;
+ 			enable-active-high;
++			vin-supply = <&swbst_reg>;
+ 		};
+ 
+ 		reg_aud3v: regulator@2 {
+diff --git a/arch/arm/crypto/aesbs-core.S_shipped b/arch/arm/crypto/aesbs-core.S_shipped
+index 71e5fc7cfb18..1d1800f71c5b 100644
+--- a/arch/arm/crypto/aesbs-core.S_shipped
++++ b/arch/arm/crypto/aesbs-core.S_shipped
+@@ -58,14 +58,18 @@
+ # define VFP_ABI_FRAME	0
+ # define BSAES_ASM_EXTENDED_KEY
+ # define XTS_CHAIN_TWEAK
+-# define __ARM_ARCH__	7
++# define __ARM_ARCH__ __LINUX_ARM_ARCH__
++# define __ARM_MAX_ARCH__ 7
+ #endif
+ 
+ #ifdef __thumb__
+ # define adrl adr
+ #endif
+ 
+-#if __ARM_ARCH__>=7
++#if __ARM_MAX_ARCH__>=7
++.arch	armv7-a
++.fpu	neon
++
+ .text
+ .syntax	unified 	@ ARMv7-capable assembler is expected to handle this
+ #ifdef __thumb2__
+@@ -74,8 +78,6 @@
+ .code   32
+ #endif
+ 
+-.fpu	neon
+-
+ .type	_bsaes_decrypt8,%function
+ .align	4
+ _bsaes_decrypt8:
+@@ -2095,9 +2097,11 @@ bsaes_xts_decrypt:
+ 	vld1.8	{q8}, [r0]			@ initial tweak
+ 	adr	r2, .Lxts_magic
+ 
++#ifndef	XTS_CHAIN_TWEAK
+ 	tst	r9, #0xf			@ if not multiple of 16
+ 	it	ne				@ Thumb2 thing, sanity check in ARM
+ 	subne	r9, #0x10			@ subtract another 16 bytes
++#endif
+ 	subs	r9, #0x80
+ 
+ 	blo	.Lxts_dec_short
+diff --git a/arch/arm/crypto/bsaes-armv7.pl b/arch/arm/crypto/bsaes-armv7.pl
+index be068db960ee..a4d3856e7d24 100644
+--- a/arch/arm/crypto/bsaes-armv7.pl
++++ b/arch/arm/crypto/bsaes-armv7.pl
+@@ -701,14 +701,18 @@ $code.=<<___;
+ # define VFP_ABI_FRAME	0
+ # define BSAES_ASM_EXTENDED_KEY
+ # define XTS_CHAIN_TWEAK
+-# define __ARM_ARCH__	7
++# define __ARM_ARCH__ __LINUX_ARM_ARCH__
++# define __ARM_MAX_ARCH__ 7
+ #endif
+ 
+ #ifdef __thumb__
+ # define adrl adr
+ #endif
+ 
+-#if __ARM_ARCH__>=7
++#if __ARM_MAX_ARCH__>=7
++.arch	armv7-a
++.fpu	neon
++
+ .text
+ .syntax	unified 	@ ARMv7-capable assembler is expected to handle this
+ #ifdef __thumb2__
+@@ -717,8 +721,6 @@ $code.=<<___;
+ .code   32
+ #endif
+ 
+-.fpu	neon
+-
+ .type	_bsaes_decrypt8,%function
+ .align	4
+ _bsaes_decrypt8:
+@@ -2076,9 +2078,11 @@ bsaes_xts_decrypt:
+ 	vld1.8	{@XMM[8]}, [r0]			@ initial tweak
+ 	adr	$magic, .Lxts_magic
+ 
++#ifndef	XTS_CHAIN_TWEAK
+ 	tst	$len, #0xf			@ if not multiple of 16
+ 	it	ne				@ Thumb2 thing, sanity check in ARM
+ 	subne	$len, #0x10			@ subtract another 16 bytes
++#endif
+ 	subs	$len, #0x80
+ 
+ 	blo	.Lxts_dec_short
+diff --git a/arch/arm/mach-at91/pm.h b/arch/arm/mach-at91/pm.h
+index d2c89963af2d..86c0aa819d25 100644
+--- a/arch/arm/mach-at91/pm.h
++++ b/arch/arm/mach-at91/pm.h
+@@ -44,7 +44,7 @@ static inline void at91rm9200_standby(void)
+ 		"    mcr    p15, 0, %0, c7, c0, 4\n\t"
+ 		"    str    %5, [%1, %2]"
+ 		:
+-		: "r" (0), "r" (AT91_BASE_SYS), "r" (AT91RM9200_SDRAMC_LPR),
++		: "r" (0), "r" (at91_ramc_base[0]), "r" (AT91RM9200_SDRAMC_LPR),
+ 		  "r" (1), "r" (AT91RM9200_SDRAMC_SRR),
+ 		  "r" (lpr));
+ }
+diff --git a/arch/arm/mach-exynos/platsmp.c b/arch/arm/mach-exynos/platsmp.c
+index 7a1ebfeeeeb8..0abb7dff73ab 100644
+--- a/arch/arm/mach-exynos/platsmp.c
++++ b/arch/arm/mach-exynos/platsmp.c
+@@ -126,8 +126,7 @@ static inline void platform_do_lowpower(unsigned int cpu, int *spurious)
+  */
+ void exynos_cpu_power_down(int cpu)
+ {
+-	if (cpu == 0 && (of_machine_is_compatible("samsung,exynos5420") ||
+-		of_machine_is_compatible("samsung,exynos5800"))) {
++	if (cpu == 0 && (soc_is_exynos5420() || soc_is_exynos5800())) {
+ 		/*
+ 		 * Bypass power down for CPU0 during suspend. Check for
+ 		 * the SYS_PWR_REG value to decide if we are suspending
+diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h
+index c028fe37456f..53d9c354219f 100644
+--- a/arch/arm64/include/asm/tlb.h
++++ b/arch/arm64/include/asm/tlb.h
+@@ -48,6 +48,7 @@ static inline void tlb_flush(struct mmu_gather *tlb)
+ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
+ 				  unsigned long addr)
+ {
++	__flush_tlb_pgtable(tlb->mm, addr);
+ 	pgtable_page_dtor(pte);
+ 	tlb_remove_entry(tlb, pte);
+ }
+@@ -56,6 +57,7 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
+ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
+ 				  unsigned long addr)
+ {
++	__flush_tlb_pgtable(tlb->mm, addr);
+ 	tlb_remove_entry(tlb, virt_to_page(pmdp));
+ }
+ #endif
+@@ -64,6 +66,7 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
+ static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pudp,
+ 				  unsigned long addr)
+ {
++	__flush_tlb_pgtable(tlb->mm, addr);
+ 	tlb_remove_entry(tlb, virt_to_page(pudp));
+ }
+ #endif
+diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
+index 73f0ce570fb3..8b8d8cb46e01 100644
+--- a/arch/arm64/include/asm/tlbflush.h
++++ b/arch/arm64/include/asm/tlbflush.h
+@@ -149,6 +149,19 @@ static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end
+ }
+ 
+ /*
++ * Used to invalidate the TLB (walk caches) corresponding to intermediate page
++ * table levels (pgd/pud/pmd).
++ */
++static inline void __flush_tlb_pgtable(struct mm_struct *mm,
++				       unsigned long uaddr)
++{
++	unsigned long addr = uaddr >> 12 | ((unsigned long)ASID(mm) << 48);
++
++	dsb(ishst);
++	asm("tlbi	vae1is, %0" : : "r" (addr));
++	dsb(ish);
++}
++/*
+  * On AArch64, the cache coherency is handled via the set_pte_at() function.
+  */
+ static inline void update_mmu_cache(struct vm_area_struct *vma,
+diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
+index d92094203913..df34a70caca1 100644
+--- a/arch/arm64/mm/dma-mapping.c
++++ b/arch/arm64/mm/dma-mapping.c
+@@ -51,7 +51,7 @@ static int __init early_coherent_pool(char *p)
+ }
+ early_param("coherent_pool", early_coherent_pool);
+ 
+-static void *__alloc_from_pool(size_t size, struct page **ret_page)
++static void *__alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags)
+ {
+ 	unsigned long val;
+ 	void *ptr = NULL;
+@@ -67,6 +67,8 @@ static void *__alloc_from_pool(size_t size, struct page **ret_page)
+ 
+ 		*ret_page = phys_to_page(phys);
+ 		ptr = (void *)val;
++		if (flags & __GFP_ZERO)
++			memset(ptr, 0, size);
+ 	}
+ 
+ 	return ptr;
+@@ -101,6 +103,7 @@ static void *__dma_alloc_coherent(struct device *dev, size_t size,
+ 		flags |= GFP_DMA;
+ 	if (IS_ENABLED(CONFIG_DMA_CMA) && (flags & __GFP_WAIT)) {
+ 		struct page *page;
++		void *addr;
+ 
+ 		size = PAGE_ALIGN(size);
+ 		page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
+@@ -109,7 +112,10 @@ static void *__dma_alloc_coherent(struct device *dev, size_t size,
+ 			return NULL;
+ 
+ 		*dma_handle = phys_to_dma(dev, page_to_phys(page));
+-		return page_address(page);
++		addr = page_address(page);
++		if (flags & __GFP_ZERO)
++			memset(addr, 0, size);
++		return addr;
+ 	} else {
+ 		return swiotlb_alloc_coherent(dev, size, dma_handle, flags);
+ 	}
+@@ -145,7 +151,7 @@ static void *__dma_alloc_noncoherent(struct device *dev, size_t size,
+ 
+ 	if (!(flags & __GFP_WAIT)) {
+ 		struct page *page = NULL;
+-		void *addr = __alloc_from_pool(size, &page);
++		void *addr = __alloc_from_pool(size, &page, flags);
+ 
+ 		if (addr)
+ 			*dma_handle = phys_to_dma(dev, page_to_phys(page));
+diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h
+index 9cfa3706a1b8..f1ea5972f6ec 100644
+--- a/arch/powerpc/include/asm/iommu.h
++++ b/arch/powerpc/include/asm/iommu.h
+@@ -113,6 +113,7 @@ extern void iommu_register_group(struct iommu_table *tbl,
+ 				 int pci_domain_number, unsigned long pe_num);
+ extern int iommu_add_device(struct device *dev);
+ extern void iommu_del_device(struct device *dev);
++extern int __init tce_iommu_bus_notifier_init(void);
+ #else
+ static inline void iommu_register_group(struct iommu_table *tbl,
+ 					int pci_domain_number,
+@@ -128,6 +129,11 @@ static inline int iommu_add_device(struct device *dev)
+ static inline void iommu_del_device(struct device *dev)
+ {
+ }
++
++static inline int __init tce_iommu_bus_notifier_init(void)
++{
++        return 0;
++}
+ #endif /* !CONFIG_IOMMU_API */
+ 
+ static inline void set_iommu_table_base_and_group(struct device *dev,
+diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
+index 5d3968c4d799..b054f33ab1fb 100644
+--- a/arch/powerpc/kernel/iommu.c
++++ b/arch/powerpc/kernel/iommu.c
+@@ -1175,4 +1175,30 @@ void iommu_del_device(struct device *dev)
+ }
+ EXPORT_SYMBOL_GPL(iommu_del_device);
+ 
++static int tce_iommu_bus_notifier(struct notifier_block *nb,
++                unsigned long action, void *data)
++{
++        struct device *dev = data;
++
++        switch (action) {
++        case BUS_NOTIFY_ADD_DEVICE:
++                return iommu_add_device(dev);
++        case BUS_NOTIFY_DEL_DEVICE:
++                if (dev->iommu_group)
++                        iommu_del_device(dev);
++                return 0;
++        default:
++                return 0;
++        }
++}
++
++static struct notifier_block tce_iommu_bus_nb = {
++        .notifier_call = tce_iommu_bus_notifier,
++};
++
++int __init tce_iommu_bus_notifier_init(void)
++{
++        bus_register_notifier(&pci_bus_type, &tce_iommu_bus_nb);
++        return 0;
++}
+ #endif /* CONFIG_IOMMU_API */
+diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
+index 8b2d2dc8ef10..c68cc9dd28fb 100644
+--- a/arch/powerpc/kernel/smp.c
++++ b/arch/powerpc/kernel/smp.c
+@@ -555,8 +555,8 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle)
+ 	if (smp_ops->give_timebase)
+ 		smp_ops->give_timebase();
+ 
+-	/* Wait until cpu puts itself in the online map */
+-	while (!cpu_online(cpu))
++	/* Wait until cpu puts itself in the online & active maps */
++	while (!cpu_online(cpu) || !cpu_active(cpu))
+ 		cpu_relax();
+ 
+ 	return 0;
+diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c
+index 4945e87f12dc..3948b8a868f2 100644
+--- a/arch/powerpc/platforms/powernv/pci.c
++++ b/arch/powerpc/platforms/powernv/pci.c
+@@ -866,30 +866,4 @@ void __init pnv_pci_init(void)
+ #endif
+ }
+ 
+-static int tce_iommu_bus_notifier(struct notifier_block *nb,
+-		unsigned long action, void *data)
+-{
+-	struct device *dev = data;
+-
+-	switch (action) {
+-	case BUS_NOTIFY_ADD_DEVICE:
+-		return iommu_add_device(dev);
+-	case BUS_NOTIFY_DEL_DEVICE:
+-		if (dev->iommu_group)
+-			iommu_del_device(dev);
+-		return 0;
+-	default:
+-		return 0;
+-	}
+-}
+-
+-static struct notifier_block tce_iommu_bus_nb = {
+-	.notifier_call = tce_iommu_bus_notifier,
+-};
+-
+-static int __init tce_iommu_bus_notifier_init(void)
+-{
+-	bus_register_notifier(&pci_bus_type, &tce_iommu_bus_nb);
+-	return 0;
+-}
+ machine_subsys_initcall_sync(powernv, tce_iommu_bus_notifier_init);
+diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
+index 1d3d52dc3ff3..7803a19adb31 100644
+--- a/arch/powerpc/platforms/pseries/iommu.c
++++ b/arch/powerpc/platforms/pseries/iommu.c
+@@ -1340,3 +1340,5 @@ static int __init disable_multitce(char *str)
+ }
+ 
+ __setup("multitce=", disable_multitce);
++
++machine_subsys_initcall_sync(pseries, tce_iommu_bus_notifier_init);
+diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
+index 9af01dc966d0..e3bab3a0da98 100644
+--- a/arch/s390/kvm/kvm-s390.c
++++ b/arch/s390/kvm/kvm-s390.c
+@@ -159,7 +159,6 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
+ 	case KVM_CAP_ONE_REG:
+ 	case KVM_CAP_ENABLE_CAP:
+ 	case KVM_CAP_S390_CSS_SUPPORT:
+-	case KVM_CAP_IRQFD:
+ 	case KVM_CAP_IOEVENTFD:
+ 	case KVM_CAP_DEVICE_CTRL:
+ 	case KVM_CAP_ENABLE_CAP_VM:
+diff --git a/arch/s390/pci/pci_mmio.c b/arch/s390/pci/pci_mmio.c
+index 62c5ea6d8682..ec86d4f22efd 100644
+--- a/arch/s390/pci/pci_mmio.c
++++ b/arch/s390/pci/pci_mmio.c
+@@ -64,8 +64,7 @@ SYSCALL_DEFINE3(s390_pci_mmio_write, unsigned long, mmio_addr,
+ 	if (copy_from_user(buf, user_buffer, length))
+ 		goto out;
+ 
+-	memcpy_toio(io_addr, buf, length);
+-	ret = 0;
++	ret = zpci_memcpy_toio(io_addr, buf, length);
+ out:
+ 	if (buf != local_buf)
+ 		kfree(buf);
+@@ -98,16 +97,16 @@ SYSCALL_DEFINE3(s390_pci_mmio_read, unsigned long, mmio_addr,
+ 		goto out;
+ 	io_addr = (void *)((pfn << PAGE_SHIFT) | (mmio_addr & ~PAGE_MASK));
+ 
+-	ret = -EFAULT;
+-	if ((unsigned long) io_addr < ZPCI_IOMAP_ADDR_BASE)
++	if ((unsigned long) io_addr < ZPCI_IOMAP_ADDR_BASE) {
++		ret = -EFAULT;
+ 		goto out;
+-
+-	memcpy_fromio(buf, io_addr, length);
+-
+-	if (copy_to_user(user_buffer, buf, length))
++	}
++	ret = zpci_memcpy_fromio(buf, io_addr, length);
++	if (ret)
+ 		goto out;
++	if (copy_to_user(user_buffer, buf, length))
++		ret = -EFAULT;
+ 
+-	ret = 0;
+ out:
+ 	if (buf != local_buf)
+ 		kfree(buf);
+diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
+index 46a5e4508752..af53c25da2e7 100644
+--- a/arch/sparc/kernel/perf_event.c
++++ b/arch/sparc/kernel/perf_event.c
+@@ -960,6 +960,8 @@ out:
+ 	cpuc->pcr[0] |= cpuc->event[0]->hw.config_base;
+ }
+ 
++static void sparc_pmu_start(struct perf_event *event, int flags);
++
+ /* On this PMU each PIC has it's own PCR control register.  */
+ static void calculate_multiple_pcrs(struct cpu_hw_events *cpuc)
+ {
+@@ -972,20 +974,13 @@ static void calculate_multiple_pcrs(struct cpu_hw_events *cpuc)
+ 		struct perf_event *cp = cpuc->event[i];
+ 		struct hw_perf_event *hwc = &cp->hw;
+ 		int idx = hwc->idx;
+-		u64 enc;
+ 
+ 		if (cpuc->current_idx[i] != PIC_NO_INDEX)
+ 			continue;
+ 
+-		sparc_perf_event_set_period(cp, hwc, idx);
+ 		cpuc->current_idx[i] = idx;
+ 
+-		enc = perf_event_get_enc(cpuc->events[i]);
+-		cpuc->pcr[idx] &= ~mask_for_index(idx);
+-		if (hwc->state & PERF_HES_STOPPED)
+-			cpuc->pcr[idx] |= nop_for_index(idx);
+-		else
+-			cpuc->pcr[idx] |= event_encoding(enc, idx);
++		sparc_pmu_start(cp, PERF_EF_RELOAD);
+ 	}
+ out:
+ 	for (i = 0; i < cpuc->n_events; i++) {
+@@ -1101,7 +1096,6 @@ static void sparc_pmu_del(struct perf_event *event, int _flags)
+ 	int i;
+ 
+ 	local_irq_save(flags);
+-	perf_pmu_disable(event->pmu);
+ 
+ 	for (i = 0; i < cpuc->n_events; i++) {
+ 		if (event == cpuc->event[i]) {
+@@ -1127,7 +1121,6 @@ static void sparc_pmu_del(struct perf_event *event, int _flags)
+ 		}
+ 	}
+ 
+-	perf_pmu_enable(event->pmu);
+ 	local_irq_restore(flags);
+ }
+ 
+@@ -1361,7 +1354,6 @@ static int sparc_pmu_add(struct perf_event *event, int ef_flags)
+ 	unsigned long flags;
+ 
+ 	local_irq_save(flags);
+-	perf_pmu_disable(event->pmu);
+ 
+ 	n0 = cpuc->n_events;
+ 	if (n0 >= sparc_pmu->max_hw_events)
+@@ -1394,7 +1386,6 @@ nocheck:
+ 
+ 	ret = 0;
+ out:
+-	perf_pmu_enable(event->pmu);
+ 	local_irq_restore(flags);
+ 	return ret;
+ }
+diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
+index 0be7bf978cb1..46a59643bb1c 100644
+--- a/arch/sparc/kernel/process_64.c
++++ b/arch/sparc/kernel/process_64.c
+@@ -287,6 +287,8 @@ void arch_trigger_all_cpu_backtrace(bool include_self)
+ 			printk("             TPC[%lx] O7[%lx] I7[%lx] RPC[%lx]\n",
+ 			       gp->tpc, gp->o7, gp->i7, gp->rpc);
+ 		}
++
++		touch_nmi_watchdog();
+ 	}
+ 
+ 	memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot));
+@@ -362,6 +364,8 @@ static void pmu_snapshot_all_cpus(void)
+ 		       (cpu == this_cpu ? '*' : ' '), cpu,
+ 		       pp->pcr[0], pp->pcr[1], pp->pcr[2], pp->pcr[3],
+ 		       pp->pic[0], pp->pic[1], pp->pic[2], pp->pic[3]);
++
++		touch_nmi_watchdog();
+ 	}
+ 
+ 	memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot));
+diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
+index c85403d0496c..30e7ddb27a3a 100644
+--- a/arch/sparc/kernel/sys_sparc_64.c
++++ b/arch/sparc/kernel/sys_sparc_64.c
+@@ -333,7 +333,7 @@ SYSCALL_DEFINE6(sparc_ipc, unsigned int, call, int, first, unsigned long, second
+ 	long err;
+ 
+ 	/* No need for backward compatibility. We can start fresh... */
+-	if (call <= SEMCTL) {
++	if (call <= SEMTIMEDOP) {
+ 		switch (call) {
+ 		case SEMOP:
+ 			err = sys_semtimedop(first, ptr,
+diff --git a/arch/sparc/lib/memmove.S b/arch/sparc/lib/memmove.S
+index b7f6334e159f..857ad4f8905f 100644
+--- a/arch/sparc/lib/memmove.S
++++ b/arch/sparc/lib/memmove.S
+@@ -8,9 +8,11 @@
+ 
+ 	.text
+ ENTRY(memmove) /* o0=dst o1=src o2=len */
+-	mov		%o0, %g1
++	brz,pn		%o2, 99f
++	 mov		%o0, %g1
++
+ 	cmp		%o0, %o1
+-	bleu,pt		%xcc, memcpy
++	bleu,pt		%xcc, 2f
+ 	 add		%o1, %o2, %g7
+ 	cmp		%g7, %o0
+ 	bleu,pt		%xcc, memcpy
+@@ -24,7 +26,34 @@ ENTRY(memmove) /* o0=dst o1=src o2=len */
+ 	stb		%g7, [%o0]
+ 	bne,pt		%icc, 1b
+ 	 sub		%o0, 1, %o0
+-
++99:
+ 	retl
+ 	 mov		%g1, %o0
++
++	/* We can't just call memcpy for these memmove cases.  On some
++	 * chips the memcpy uses cache initializing stores and when dst
++	 * and src are close enough, those can clobber the source data
++	 * before we've loaded it in.
++	 */
++2:	or		%o0, %o1, %g7
++	or		%o2, %g7, %g7
++	andcc		%g7, 0x7, %g0
++	bne,pn		%xcc, 4f
++	 nop
++
++3:	ldx		[%o1], %g7
++	add		%o1, 8, %o1
++	subcc		%o2, 8, %o2
++	add		%o0, 8, %o0
++	bne,pt		%icc, 3b
++	 stx		%g7, [%o0 - 0x8]
++	ba,a,pt		%xcc, 99b
++
++4:	ldub		[%o1], %g7
++	add		%o1, 1, %o1
++	subcc		%o2, 1, %o2
++	add		%o0, 1, %o0
++	bne,pt		%icc, 4b
++	 stb		%g7, [%o0 - 0x1]
++	ba,a,pt		%xcc, 99b
+ ENDPROC(memmove)
+diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
+index ae855f4f64b7..95ad7adeb5fb 100644
+--- a/arch/x86/crypto/aesni-intel_glue.c
++++ b/arch/x86/crypto/aesni-intel_glue.c
+@@ -1133,7 +1133,7 @@ static int __driver_rfc4106_decrypt(struct aead_request *req)
+ 		src = kmalloc(req->cryptlen + req->assoclen, GFP_ATOMIC);
+ 		if (!src)
+ 			return -ENOMEM;
+-		assoc = (src + req->cryptlen + auth_tag_len);
++		assoc = (src + req->cryptlen);
+ 		scatterwalk_map_and_copy(src, req->src, 0, req->cryptlen, 0);
+ 		scatterwalk_map_and_copy(assoc, req->assoc, 0,
+ 			req->assoclen, 0);
+@@ -1158,7 +1158,7 @@ static int __driver_rfc4106_decrypt(struct aead_request *req)
+ 		scatterwalk_done(&src_sg_walk, 0, 0);
+ 		scatterwalk_done(&assoc_sg_walk, 0, 0);
+ 	} else {
+-		scatterwalk_map_and_copy(dst, req->dst, 0, req->cryptlen, 1);
++		scatterwalk_map_and_copy(dst, req->dst, 0, tempCipherLen, 1);
+ 		kfree(src);
+ 	}
+ 	return retval;
+diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h
+index e97622f57722..f895358db0ab 100644
+--- a/arch/x86/include/asm/fpu-internal.h
++++ b/arch/x86/include/asm/fpu-internal.h
+@@ -368,7 +368,7 @@ static inline void drop_fpu(struct task_struct *tsk)
+ 	preempt_disable();
+ 	tsk->thread.fpu_counter = 0;
+ 	__drop_fpu(tsk);
+-	clear_used_math();
++	clear_stopped_child_used_math(tsk);
+ 	preempt_enable();
+ }
+ 
+diff --git a/arch/x86/kernel/apic/apic_numachip.c b/arch/x86/kernel/apic/apic_numachip.c
+index c2fd21fed002..017149cded07 100644
+--- a/arch/x86/kernel/apic/apic_numachip.c
++++ b/arch/x86/kernel/apic/apic_numachip.c
+@@ -37,10 +37,12 @@ static const struct apic apic_numachip;
+ static unsigned int get_apic_id(unsigned long x)
+ {
+ 	unsigned long value;
+-	unsigned int id;
++	unsigned int id = (x >> 24) & 0xff;
+ 
+-	rdmsrl(MSR_FAM10H_NODE_ID, value);
+-	id = ((x >> 24) & 0xffU) | ((value << 2) & 0xff00U);
++	if (static_cpu_has_safe(X86_FEATURE_NODEID_MSR)) {
++		rdmsrl(MSR_FAM10H_NODE_ID, value);
++		id |= (value << 2) & 0xff00;
++	}
+ 
+ 	return id;
+ }
+@@ -155,10 +157,18 @@ static int __init numachip_probe(void)
+ 
+ static void fixup_cpu_id(struct cpuinfo_x86 *c, int node)
+ {
+-	if (c->phys_proc_id != node) {
+-		c->phys_proc_id = node;
+-		per_cpu(cpu_llc_id, smp_processor_id()) = node;
++	u64 val;
++	u32 nodes = 1;
++
++	this_cpu_write(cpu_llc_id, node);
++
++	/* Account for nodes per socket in multi-core-module processors */
++	if (static_cpu_has_safe(X86_FEATURE_NODEID_MSR)) {
++		rdmsrl(MSR_FAM10H_NODE_ID, val);
++		nodes = ((val >> 3) & 7) + 1;
+ 	}
++
++	c->phys_proc_id = node / nodes;
+ }
+ 
+ static int __init numachip_system_init(void)
+diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
+index 88900e288021..89f4e64c2cea 100644
+--- a/arch/x86/kernel/traps.c
++++ b/arch/x86/kernel/traps.c
+@@ -300,7 +300,7 @@ dotraplinkage void do_bounds(struct pt_regs *regs, long error_code)
+ 		goto exit;
+ 	conditional_sti(regs);
+ 
+-	if (!user_mode(regs))
++	if (!user_mode_vm(regs))
+ 		die("bounds", regs, error_code);
+ 
+ 	if (!cpu_feature_enabled(X86_FEATURE_MPX)) {
+@@ -566,7 +566,7 @@ dotraplinkage void do_debug(struct pt_regs *regs, long error_code)
+ 	 * then it's very likely the result of an icebp/int01 trap.
+ 	 * User wants a sigtrap for that.
+ 	 */
+-	if (!dr6 && user_mode(regs))
++	if (!dr6 && user_mode_vm(regs))
+ 		user_icebp = 1;
+ 
+ 	/* Catch kmemcheck conditions first of all! */
+diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
+index 0de1fae2bdf0..8be1e1711203 100644
+--- a/arch/x86/kernel/xsave.c
++++ b/arch/x86/kernel/xsave.c
+@@ -378,7 +378,7 @@ int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size)
+ 		 * thread's fpu state, reconstruct fxstate from the fsave
+ 		 * header. Sanitize the copied state etc.
+ 		 */
+-		struct xsave_struct *xsave = &tsk->thread.fpu.state->xsave;
++		struct fpu *fpu = &tsk->thread.fpu;
+ 		struct user_i387_ia32_struct env;
+ 		int err = 0;
+ 
+@@ -392,14 +392,15 @@ int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size)
+ 		 */
+ 		drop_fpu(tsk);
+ 
+-		if (__copy_from_user(xsave, buf_fx, state_size) ||
++		if (__copy_from_user(&fpu->state->xsave, buf_fx, state_size) ||
+ 		    __copy_from_user(&env, buf, sizeof(env))) {
++			fpu_finit(fpu);
+ 			err = -1;
+ 		} else {
+ 			sanitize_restored_xstate(tsk, &env, xstate_bv, fx_only);
+-			set_used_math();
+ 		}
+ 
++		set_used_math();
+ 		if (use_eager_fpu()) {
+ 			preempt_disable();
+ 			math_state_restore();
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index c259814200bd..64d76c102230 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -2716,7 +2716,6 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
+ 	case KVM_CAP_USER_NMI:
+ 	case KVM_CAP_REINJECT_CONTROL:
+ 	case KVM_CAP_IRQ_INJECT_STATUS:
+-	case KVM_CAP_IRQFD:
+ 	case KVM_CAP_IOEVENTFD:
+ 	case KVM_CAP_IOEVENTFD_NO_LENGTH:
+ 	case KVM_CAP_PIT2:
+diff --git a/arch/x86/vdso/vdso32/sigreturn.S b/arch/x86/vdso/vdso32/sigreturn.S
+index 31776d0efc8c..d7ec4e251c0a 100644
+--- a/arch/x86/vdso/vdso32/sigreturn.S
++++ b/arch/x86/vdso/vdso32/sigreturn.S
+@@ -17,6 +17,7 @@
+ 	.text
+ 	.globl __kernel_sigreturn
+ 	.type __kernel_sigreturn,@function
++	nop /* this guy is needed for .LSTARTFDEDLSI1 below (watch for HACK) */
+ 	ALIGN
+ __kernel_sigreturn:
+ .LSTART_sigreturn:
+diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
+index 376a0a9dc670..cc45a9f1a6e6 100644
+--- a/arch/x86/xen/p2m.c
++++ b/arch/x86/xen/p2m.c
+@@ -567,7 +567,7 @@ static bool alloc_p2m(unsigned long pfn)
+ 		if (p2m_pfn == PFN_DOWN(__pa(p2m_missing)))
+ 			p2m_init(p2m);
+ 		else
+-			p2m_init_identity(p2m, pfn);
++			p2m_init_identity(p2m, pfn & ~(P2M_PER_PAGE - 1));
+ 
+ 		spin_lock_irqsave(&p2m_update_lock, flags);
+ 
+diff --git a/drivers/char/tpm/tpm_i2c_stm_st33.c b/drivers/char/tpm/tpm_i2c_stm_st33.c
+index 7d1c540fa26a..3f187a529e92 100644
+--- a/drivers/char/tpm/tpm_i2c_stm_st33.c
++++ b/drivers/char/tpm/tpm_i2c_stm_st33.c
+@@ -397,7 +397,7 @@ static int wait_for_stat(struct tpm_chip *chip, u8 mask, unsigned long timeout,
+  */
+ static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count)
+ {
+-	int size = 0, burstcnt, len;
++	int size = 0, burstcnt, len, ret;
+ 	struct i2c_client *client;
+ 
+ 	client = (struct i2c_client *)TPM_VPRIV(chip);
+@@ -406,13 +406,15 @@ static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count)
+ 	       wait_for_stat(chip,
+ 			     TPM_STS_DATA_AVAIL | TPM_STS_VALID,
+ 			     chip->vendor.timeout_c,
+-			     &chip->vendor.read_queue)
+-	       == 0) {
++			     &chip->vendor.read_queue) == 0) {
+ 		burstcnt = get_burstcount(chip);
+ 		if (burstcnt < 0)
+ 			return burstcnt;
+ 		len = min_t(int, burstcnt, count - size);
+-		I2C_READ_DATA(client, TPM_DATA_FIFO, buf + size, len);
++		ret = I2C_READ_DATA(client, TPM_DATA_FIFO, buf + size, len);
++		if (ret < 0)
++			return ret;
++
+ 		size += len;
+ 	}
+ 	return size;
+diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c
+index eff9d5870034..102463ba745d 100644
+--- a/drivers/char/tpm/tpm_ibmvtpm.c
++++ b/drivers/char/tpm/tpm_ibmvtpm.c
+@@ -124,7 +124,7 @@ static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
+ {
+ 	struct ibmvtpm_dev *ibmvtpm;
+ 	struct ibmvtpm_crq crq;
+-	u64 *word = (u64 *) &crq;
++	__be64 *word = (__be64 *)&crq;
+ 	int rc;
+ 
+ 	ibmvtpm = (struct ibmvtpm_dev *)TPM_VPRIV(chip);
+@@ -145,11 +145,11 @@ static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
+ 	memcpy((void *)ibmvtpm->rtce_buf, (void *)buf, count);
+ 	crq.valid = (u8)IBMVTPM_VALID_CMD;
+ 	crq.msg = (u8)VTPM_TPM_COMMAND;
+-	crq.len = (u16)count;
+-	crq.data = ibmvtpm->rtce_dma_handle;
++	crq.len = cpu_to_be16(count);
++	crq.data = cpu_to_be32(ibmvtpm->rtce_dma_handle);
+ 
+-	rc = ibmvtpm_send_crq(ibmvtpm->vdev, cpu_to_be64(word[0]),
+-			      cpu_to_be64(word[1]));
++	rc = ibmvtpm_send_crq(ibmvtpm->vdev, be64_to_cpu(word[0]),
++			      be64_to_cpu(word[1]));
+ 	if (rc != H_SUCCESS) {
+ 		dev_err(ibmvtpm->dev, "tpm_ibmvtpm_send failed rc=%d\n", rc);
+ 		rc = 0;
+diff --git a/drivers/char/tpm/tpm_ibmvtpm.h b/drivers/char/tpm/tpm_ibmvtpm.h
+index bd82a791f995..b2c231b1beec 100644
+--- a/drivers/char/tpm/tpm_ibmvtpm.h
++++ b/drivers/char/tpm/tpm_ibmvtpm.h
+@@ -22,9 +22,9 @@
+ struct ibmvtpm_crq {
+ 	u8 valid;
+ 	u8 msg;
+-	u16 len;
+-	u32 data;
+-	u64 reserved;
++	__be16 len;
++	__be32 data;
++	__be64 reserved;
+ } __attribute__((packed, aligned(8)));
+ 
+ struct ibmvtpm_crq_queue {
+diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
+index de03df9dd7c9..c3aac4c1a554 100644
+--- a/drivers/char/virtio_console.c
++++ b/drivers/char/virtio_console.c
+@@ -142,6 +142,7 @@ struct ports_device {
+ 	 * notification
+ 	 */
+ 	struct work_struct control_work;
++	struct work_struct config_work;
+ 
+ 	struct list_head ports;
+ 
+@@ -1837,10 +1838,21 @@ static void config_intr(struct virtio_device *vdev)
+ 
+ 	portdev = vdev->priv;
+ 
++	if (!use_multiport(portdev))
++		schedule_work(&portdev->config_work);
++}
++
++static void config_work_handler(struct work_struct *work)
++{
++	struct ports_device *portdev;
++
++	portdev = container_of(work, struct ports_device, control_work);
+ 	if (!use_multiport(portdev)) {
++		struct virtio_device *vdev;
+ 		struct port *port;
+ 		u16 rows, cols;
+ 
++		vdev = portdev->vdev;
+ 		virtio_cread(vdev, struct virtio_console_config, cols, &cols);
+ 		virtio_cread(vdev, struct virtio_console_config, rows, &rows);
+ 
+@@ -2031,12 +2043,14 @@ static int virtcons_probe(struct virtio_device *vdev)
+ 
+ 	virtio_device_ready(portdev->vdev);
+ 
++	INIT_WORK(&portdev->config_work, &config_work_handler);
++	INIT_WORK(&portdev->control_work, &control_work_handler);
++
+ 	if (multiport) {
+ 		unsigned int nr_added_bufs;
+ 
+ 		spin_lock_init(&portdev->c_ivq_lock);
+ 		spin_lock_init(&portdev->c_ovq_lock);
+-		INIT_WORK(&portdev->control_work, &control_work_handler);
+ 
+ 		nr_added_bufs = fill_queue(portdev->c_ivq,
+ 					   &portdev->c_ivq_lock);
+@@ -2104,6 +2118,8 @@ static void virtcons_remove(struct virtio_device *vdev)
+ 	/* Finish up work that's lined up */
+ 	if (use_multiport(portdev))
+ 		cancel_work_sync(&portdev->control_work);
++	else
++		cancel_work_sync(&portdev->config_work);
+ 
+ 	list_for_each_entry_safe(port, port2, &portdev->ports, list)
+ 		unplug_port(port);
+@@ -2155,6 +2171,7 @@ static int virtcons_freeze(struct virtio_device *vdev)
+ 
+ 	virtqueue_disable_cb(portdev->c_ivq);
+ 	cancel_work_sync(&portdev->control_work);
++	cancel_work_sync(&portdev->config_work);
+ 	/*
+ 	 * Once more: if control_work_handler() was running, it would
+ 	 * enable the cb as the last step.
+diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
+index 5213da499d39..29168fae3dcb 100644
+--- a/drivers/gpu/drm/drm_crtc.c
++++ b/drivers/gpu/drm/drm_crtc.c
+@@ -42,9 +42,10 @@
+ #include "drm_crtc_internal.h"
+ #include "drm_internal.h"
+ 
+-static struct drm_framebuffer *add_framebuffer_internal(struct drm_device *dev,
+-							struct drm_mode_fb_cmd2 *r,
+-							struct drm_file *file_priv);
++static struct drm_framebuffer *
++internal_framebuffer_create(struct drm_device *dev,
++			    struct drm_mode_fb_cmd2 *r,
++			    struct drm_file *file_priv);
+ 
+ /* Avoid boilerplate.  I'm tired of typing. */
+ #define DRM_ENUM_NAME_FN(fnname, list)				\
+@@ -2817,13 +2818,11 @@ static int drm_mode_cursor_universal(struct drm_crtc *crtc,
+ 	 */
+ 	if (req->flags & DRM_MODE_CURSOR_BO) {
+ 		if (req->handle) {
+-			fb = add_framebuffer_internal(dev, &fbreq, file_priv);
++			fb = internal_framebuffer_create(dev, &fbreq, file_priv);
+ 			if (IS_ERR(fb)) {
+ 				DRM_DEBUG_KMS("failed to wrap cursor buffer in drm framebuffer\n");
+ 				return PTR_ERR(fb);
+ 			}
+-
+-			drm_framebuffer_reference(fb);
+ 		} else {
+ 			fb = NULL;
+ 		}
+@@ -3175,9 +3174,10 @@ static int framebuffer_check(const struct drm_mode_fb_cmd2 *r)
+ 	return 0;
+ }
+ 
+-static struct drm_framebuffer *add_framebuffer_internal(struct drm_device *dev,
+-							struct drm_mode_fb_cmd2 *r,
+-							struct drm_file *file_priv)
++static struct drm_framebuffer *
++internal_framebuffer_create(struct drm_device *dev,
++			    struct drm_mode_fb_cmd2 *r,
++			    struct drm_file *file_priv)
+ {
+ 	struct drm_mode_config *config = &dev->mode_config;
+ 	struct drm_framebuffer *fb;
+@@ -3209,12 +3209,6 @@ static struct drm_framebuffer *add_framebuffer_internal(struct drm_device *dev,
+ 		return fb;
+ 	}
+ 
+-	mutex_lock(&file_priv->fbs_lock);
+-	r->fb_id = fb->base.id;
+-	list_add(&fb->filp_head, &file_priv->fbs);
+-	DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id);
+-	mutex_unlock(&file_priv->fbs_lock);
+-
+ 	return fb;
+ }
+ 
+@@ -3236,15 +3230,24 @@ static struct drm_framebuffer *add_framebuffer_internal(struct drm_device *dev,
+ int drm_mode_addfb2(struct drm_device *dev,
+ 		    void *data, struct drm_file *file_priv)
+ {
++	struct drm_mode_fb_cmd2 *r = data;
+ 	struct drm_framebuffer *fb;
+ 
+ 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ 		return -EINVAL;
+ 
+-	fb = add_framebuffer_internal(dev, data, file_priv);
++	fb = internal_framebuffer_create(dev, r, file_priv);
+ 	if (IS_ERR(fb))
+ 		return PTR_ERR(fb);
+ 
++	/* Transfer ownership to the filp for reaping on close */
++
++	DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id);
++	mutex_lock(&file_priv->fbs_lock);
++	r->fb_id = fb->base.id;
++	list_add(&fb->filp_head, &file_priv->fbs);
++	mutex_unlock(&file_priv->fbs_lock);
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
+index 7643300828c3..4e6405e7226f 100644
+--- a/drivers/gpu/drm/i915/i915_drv.c
++++ b/drivers/gpu/drm/i915/i915_drv.c
+@@ -622,7 +622,7 @@ static int i915_drm_suspend(struct drm_device *dev)
+ 	return 0;
+ }
+ 
+-static int i915_drm_suspend_late(struct drm_device *drm_dev)
++static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation)
+ {
+ 	struct drm_i915_private *dev_priv = drm_dev->dev_private;
+ 	int ret;
+@@ -636,7 +636,17 @@ static int i915_drm_suspend_late(struct drm_device *drm_dev)
+ 	}
+ 
+ 	pci_disable_device(drm_dev->pdev);
+-	pci_set_power_state(drm_dev->pdev, PCI_D3hot);
++	/*
++	 * During hibernation on some GEN4 platforms the BIOS may try to access
++	 * the device even though it's already in D3 and hang the machine. So
++	 * leave the device in D0 on those platforms and hope the BIOS will
++	 * power down the device properly. Platforms where this was seen:
++	 * Lenovo Thinkpad X301, X61s
++	 */
++	if (!(hibernation &&
++	      drm_dev->pdev->subsystem_vendor == PCI_VENDOR_ID_LENOVO &&
++	      INTEL_INFO(dev_priv)->gen == 4))
++		pci_set_power_state(drm_dev->pdev, PCI_D3hot);
+ 
+ 	return 0;
+ }
+@@ -662,7 +672,7 @@ int i915_suspend_legacy(struct drm_device *dev, pm_message_t state)
+ 	if (error)
+ 		return error;
+ 
+-	return i915_drm_suspend_late(dev);
++	return i915_drm_suspend_late(dev, false);
+ }
+ 
+ static int i915_drm_resume(struct drm_device *dev)
+@@ -934,8 +944,7 @@ static int i915_pm_suspend(struct device *dev)
+ 
+ static int i915_pm_suspend_late(struct device *dev)
+ {
+-	struct pci_dev *pdev = to_pci_dev(dev);
+-	struct drm_device *drm_dev = pci_get_drvdata(pdev);
++	struct drm_device *drm_dev = dev_to_i915(dev)->dev;
+ 
+ 	/*
+ 	 * We have a suspedn ordering issue with the snd-hda driver also
+@@ -949,13 +958,22 @@ static int i915_pm_suspend_late(struct device *dev)
+ 	if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+ 		return 0;
+ 
+-	return i915_drm_suspend_late(drm_dev);
++	return i915_drm_suspend_late(drm_dev, false);
++}
++
++static int i915_pm_poweroff_late(struct device *dev)
++{
++	struct drm_device *drm_dev = dev_to_i915(dev)->dev;
++
++	if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
++		return 0;
++
++	return i915_drm_suspend_late(drm_dev, true);
+ }
+ 
+ static int i915_pm_resume_early(struct device *dev)
+ {
+-	struct pci_dev *pdev = to_pci_dev(dev);
+-	struct drm_device *drm_dev = pci_get_drvdata(pdev);
++	struct drm_device *drm_dev = dev_to_i915(dev)->dev;
+ 
+ 	if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+ 		return 0;
+@@ -965,8 +983,7 @@ static int i915_pm_resume_early(struct device *dev)
+ 
+ static int i915_pm_resume(struct device *dev)
+ {
+-	struct pci_dev *pdev = to_pci_dev(dev);
+-	struct drm_device *drm_dev = pci_get_drvdata(pdev);
++	struct drm_device *drm_dev = dev_to_i915(dev)->dev;
+ 
+ 	if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+ 		return 0;
+@@ -1517,7 +1534,7 @@ static const struct dev_pm_ops i915_pm_ops = {
+ 	.thaw_early = i915_pm_resume_early,
+ 	.thaw = i915_pm_resume,
+ 	.poweroff = i915_pm_suspend,
+-	.poweroff_late = i915_pm_suspend_late,
++	.poweroff_late = i915_pm_poweroff_late,
+ 	.restore_early = i915_pm_resume_early,
+ 	.restore = i915_pm_resume,
+ 
+diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
+index 0936b0f94826..ddd005ce3a94 100644
+--- a/drivers/gpu/drm/i915/i915_drv.h
++++ b/drivers/gpu/drm/i915/i915_drv.h
+@@ -1781,6 +1781,11 @@ static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
+ 	return dev->dev_private;
+ }
+ 
++static inline struct drm_i915_private *dev_to_i915(struct device *dev)
++{
++	return to_i915(dev_get_drvdata(dev));
++}
++
+ /* Iterate over initialised rings */
+ #define for_each_ring(ring__, dev_priv__, i__) \
+ 	for ((i__) = 0; (i__) < I915_NUM_RINGS; (i__)++) \
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index 30d4eb300be0..c10b52ef116d 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -9702,7 +9702,7 @@ void intel_check_page_flip(struct drm_device *dev, int pipe)
+ 	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+ 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ 
+-	WARN_ON(!in_irq());
++	WARN_ON(!in_interrupt());
+ 
+ 	if (crtc == NULL)
+ 		return;
+diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
+index ed644a4f6f57..86807ee91bd1 100644
+--- a/drivers/gpu/drm/radeon/atombios_crtc.c
++++ b/drivers/gpu/drm/radeon/atombios_crtc.c
+@@ -1405,6 +1405,9 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
+ 	       (x << 16) | y);
+ 	viewport_w = crtc->mode.hdisplay;
+ 	viewport_h = (crtc->mode.vdisplay + 1) & ~1;
++	if ((rdev->family >= CHIP_BONAIRE) &&
++	    (crtc->mode.flags & DRM_MODE_FLAG_INTERLACE))
++		viewport_h *= 2;
+ 	WREG32(EVERGREEN_VIEWPORT_SIZE + radeon_crtc->crtc_offset,
+ 	       (viewport_w << 16) | viewport_h);
+ 
+diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
+index de9a56205f0a..53b9ac3f8121 100644
+--- a/drivers/gpu/drm/radeon/cik.c
++++ b/drivers/gpu/drm/radeon/cik.c
+@@ -7526,6 +7526,9 @@ int cik_irq_set(struct radeon_device *rdev)
+ 	WREG32(DC_HPD5_INT_CONTROL, hpd5);
+ 	WREG32(DC_HPD6_INT_CONTROL, hpd6);
+ 
++	/* posting read */
++	RREG32(SRBM_STATUS);
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
+index 85995b4e3338..c674f63d7f14 100644
+--- a/drivers/gpu/drm/radeon/evergreen.c
++++ b/drivers/gpu/drm/radeon/evergreen.c
+@@ -4589,6 +4589,9 @@ int evergreen_irq_set(struct radeon_device *rdev)
+ 	WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, afmt5);
+ 	WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, afmt6);
+ 
++	/* posting read */
++	RREG32(SRBM_STATUS);
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
+index 279801ca5110..04f2514f7564 100644
+--- a/drivers/gpu/drm/radeon/r100.c
++++ b/drivers/gpu/drm/radeon/r100.c
+@@ -728,6 +728,10 @@ int r100_irq_set(struct radeon_device *rdev)
+ 		tmp |= RADEON_FP2_DETECT_MASK;
+ 	}
+ 	WREG32(RADEON_GEN_INT_CNTL, tmp);
++
++	/* read back to post the write */
++	RREG32(RADEON_GEN_INT_CNTL);
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
+index ef5d6066fa5b..0e2cf2a3795b 100644
+--- a/drivers/gpu/drm/radeon/r600.c
++++ b/drivers/gpu/drm/radeon/r600.c
+@@ -3783,6 +3783,9 @@ int r600_irq_set(struct radeon_device *rdev)
+ 		WREG32(RV770_CG_THERMAL_INT, thermal_int);
+ 	}
+ 
++	/* posting read */
++	RREG32(R_000E50_SRBM_STATUS);
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
+index c830863bc98a..26e7d2837b41 100644
+--- a/drivers/gpu/drm/radeon/radeon_cs.c
++++ b/drivers/gpu/drm/radeon/radeon_cs.c
+@@ -256,11 +256,13 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
+ 	u32 ring = RADEON_CS_RING_GFX;
+ 	s32 priority = 0;
+ 
++	INIT_LIST_HEAD(&p->validated);
++
+ 	if (!cs->num_chunks) {
+ 		return 0;
+ 	}
++
+ 	/* get chunks */
+-	INIT_LIST_HEAD(&p->validated);
+ 	p->idx = 0;
+ 	p->ib.sa_bo = NULL;
+ 	p->const_ib.sa_bo = NULL;
+diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
+index d13d1b5a859f..df09ca7c4889 100644
+--- a/drivers/gpu/drm/radeon/radeon_fence.c
++++ b/drivers/gpu/drm/radeon/radeon_fence.c
+@@ -1030,37 +1030,59 @@ static inline bool radeon_test_signaled(struct radeon_fence *fence)
+ 	return test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags);
+ }
+ 
++struct radeon_wait_cb {
++	struct fence_cb base;
++	struct task_struct *task;
++};
++
++static void
++radeon_fence_wait_cb(struct fence *fence, struct fence_cb *cb)
++{
++	struct radeon_wait_cb *wait =
++		container_of(cb, struct radeon_wait_cb, base);
++
++	wake_up_process(wait->task);
++}
++
+ static signed long radeon_fence_default_wait(struct fence *f, bool intr,
+ 					     signed long t)
+ {
+ 	struct radeon_fence *fence = to_radeon_fence(f);
+ 	struct radeon_device *rdev = fence->rdev;
+-	bool signaled;
++	struct radeon_wait_cb cb;
+ 
+-	fence_enable_sw_signaling(&fence->base);
++	cb.task = current;
+ 
+-	/*
+-	 * This function has to return -EDEADLK, but cannot hold
+-	 * exclusive_lock during the wait because some callers
+-	 * may already hold it. This means checking needs_reset without
+-	 * lock, and not fiddling with any gpu internals.
+-	 *
+-	 * The callback installed with fence_enable_sw_signaling will
+-	 * run before our wait_event_*timeout call, so we will see
+-	 * both the signaled fence and the changes to needs_reset.
+-	 */
++	if (fence_add_callback(f, &cb.base, radeon_fence_wait_cb))
++		return t;
++
++	while (t > 0) {
++		if (intr)
++			set_current_state(TASK_INTERRUPTIBLE);
++		else
++			set_current_state(TASK_UNINTERRUPTIBLE);
++
++		/*
++		 * radeon_test_signaled must be called after
++		 * set_current_state to prevent a race with wake_up_process
++		 */
++		if (radeon_test_signaled(fence))
++			break;
++
++		if (rdev->needs_reset) {
++			t = -EDEADLK;
++			break;
++		}
++
++		t = schedule_timeout(t);
++
++		if (t > 0 && intr && signal_pending(current))
++			t = -ERESTARTSYS;
++	}
++
++	__set_current_state(TASK_RUNNING);
++	fence_remove_callback(f, &cb.base);
+ 
+-	if (intr)
+-		t = wait_event_interruptible_timeout(rdev->fence_queue,
+-			((signaled = radeon_test_signaled(fence)) ||
+-			 rdev->needs_reset), t);
+-	else
+-		t = wait_event_timeout(rdev->fence_queue,
+-			((signaled = radeon_test_signaled(fence)) ||
+-			 rdev->needs_reset), t);
+-
+-	if (t > 0 && !signaled)
+-		return -EDEADLK;
+ 	return t;
+ }
+ 
+diff --git a/drivers/gpu/drm/radeon/radeon_kfd.c b/drivers/gpu/drm/radeon/radeon_kfd.c
+index bef9a0953284..2151cec4e2c8 100644
+--- a/drivers/gpu/drm/radeon/radeon_kfd.c
++++ b/drivers/gpu/drm/radeon/radeon_kfd.c
+@@ -152,7 +152,7 @@ void radeon_kfd_device_init(struct radeon_device *rdev)
+ 			.compute_vmid_bitmap = 0xFF00,
+ 
+ 			.first_compute_pipe = 1,
+-			.compute_pipe_count = 8 - 1,
++			.compute_pipe_count = 4 - 1,
+ 		};
+ 
+ 		radeon_doorbell_get_kfd_info(rdev,
+diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
+index 040d2847f8e8..24f7d30b239d 100644
+--- a/drivers/gpu/drm/radeon/radeon_object.c
++++ b/drivers/gpu/drm/radeon/radeon_object.c
+@@ -173,17 +173,6 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
+ 		else
+ 			rbo->placements[i].lpfn = 0;
+ 	}
+-
+-	/*
+-	 * Use two-ended allocation depending on the buffer size to
+-	 * improve fragmentation quality.
+-	 * 512kb was measured as the most optimal number.
+-	 */
+-	if (rbo->tbo.mem.size > 512 * 1024) {
+-		for (i = 0; i < c; i++) {
+-			rbo->placements[i].flags |= TTM_PL_FLAG_TOPDOWN;
+-		}
+-	}
+ }
+ 
+ int radeon_bo_create(struct radeon_device *rdev,
+diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
+index 74bce91aecc1..039660662ee8 100644
+--- a/drivers/gpu/drm/radeon/rs600.c
++++ b/drivers/gpu/drm/radeon/rs600.c
+@@ -693,6 +693,10 @@ int rs600_irq_set(struct radeon_device *rdev)
+ 	WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2);
+ 	if (ASIC_IS_DCE2(rdev))
+ 		WREG32(R_007408_HDMI0_AUDIO_PACKET_CONTROL, hdmi0);
++
++	/* posting read */
++	RREG32(R_000040_GEN_INT_CNTL);
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
+index 5d89b874a1a2..eed21db73e59 100644
+--- a/drivers/gpu/drm/radeon/si.c
++++ b/drivers/gpu/drm/radeon/si.c
+@@ -6198,6 +6198,9 @@ int si_irq_set(struct radeon_device *rdev)
+ 
+ 	WREG32(CG_THERMAL_INT, thermal_int);
+ 
++	/* posting read */
++	RREG32(SRBM_STATUS);
++
+ 	return 0;
+ }
+ 
+@@ -7118,8 +7121,7 @@ int si_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
+ 	WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_BYPASS_EN_MASK, ~UPLL_BYPASS_EN_MASK);
+ 
+ 	if (!vclk || !dclk) {
+-		/* keep the Bypass mode, put PLL to sleep */
+-		WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK);
++		/* keep the Bypass mode */
+ 		return 0;
+ 	}
+ 
+@@ -7135,8 +7137,7 @@ int si_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
+ 	/* set VCO_MODE to 1 */
+ 	WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_VCO_MODE_MASK, ~UPLL_VCO_MODE_MASK);
+ 
+-	/* toggle UPLL_SLEEP to 1 then back to 0 */
+-	WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK);
++	/* disable sleep mode */
+ 	WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_SLEEP_MASK);
+ 
+ 	/* deassert UPLL_RESET */
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+index 6c6b655defcf..74a2e2318693 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+@@ -725,32 +725,6 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
+ 		goto out_err1;
+ 	}
+ 
+-	ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM,
+-			     (dev_priv->vram_size >> PAGE_SHIFT));
+-	if (unlikely(ret != 0)) {
+-		DRM_ERROR("Failed initializing memory manager for VRAM.\n");
+-		goto out_err2;
+-	}
+-
+-	dev_priv->has_gmr = true;
+-	if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) ||
+-	    refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR,
+-					 VMW_PL_GMR) != 0) {
+-		DRM_INFO("No GMR memory available. "
+-			 "Graphics memory resources are very limited.\n");
+-		dev_priv->has_gmr = false;
+-	}
+-
+-	if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
+-		dev_priv->has_mob = true;
+-		if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB,
+-				   VMW_PL_MOB) != 0) {
+-			DRM_INFO("No MOB memory available. "
+-				 "3D will be disabled.\n");
+-			dev_priv->has_mob = false;
+-		}
+-	}
+-
+ 	dev_priv->mmio_mtrr = arch_phys_wc_add(dev_priv->mmio_start,
+ 					       dev_priv->mmio_size);
+ 
+@@ -813,6 +787,33 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
+ 		goto out_no_fman;
+ 	}
+ 
++
++	ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM,
++			     (dev_priv->vram_size >> PAGE_SHIFT));
++	if (unlikely(ret != 0)) {
++		DRM_ERROR("Failed initializing memory manager for VRAM.\n");
++		goto out_no_vram;
++	}
++
++	dev_priv->has_gmr = true;
++	if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) ||
++	    refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR,
++					 VMW_PL_GMR) != 0) {
++		DRM_INFO("No GMR memory available. "
++			 "Graphics memory resources are very limited.\n");
++		dev_priv->has_gmr = false;
++	}
++
++	if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
++		dev_priv->has_mob = true;
++		if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB,
++				   VMW_PL_MOB) != 0) {
++			DRM_INFO("No MOB memory available. "
++				 "3D will be disabled.\n");
++			dev_priv->has_mob = false;
++		}
++	}
++
+ 	vmw_kms_save_vga(dev_priv);
+ 
+ 	/* Start kms and overlay systems, needs fifo. */
+@@ -838,6 +839,12 @@ out_no_fifo:
+ 	vmw_kms_close(dev_priv);
+ out_no_kms:
+ 	vmw_kms_restore_vga(dev_priv);
++	if (dev_priv->has_mob)
++		(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
++	if (dev_priv->has_gmr)
++		(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
++	(void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
++out_no_vram:
+ 	vmw_fence_manager_takedown(dev_priv->fman);
+ out_no_fman:
+ 	if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
+@@ -853,12 +860,6 @@ out_err4:
+ 	iounmap(dev_priv->mmio_virt);
+ out_err3:
+ 	arch_phys_wc_del(dev_priv->mmio_mtrr);
+-	if (dev_priv->has_mob)
+-		(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
+-	if (dev_priv->has_gmr)
+-		(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
+-	(void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
+-out_err2:
+ 	(void)ttm_bo_device_release(&dev_priv->bdev);
+ out_err1:
+ 	vmw_ttm_global_release(dev_priv);
+@@ -887,6 +888,13 @@ static int vmw_driver_unload(struct drm_device *dev)
+ 	}
+ 	vmw_kms_close(dev_priv);
+ 	vmw_overlay_close(dev_priv);
++
++	if (dev_priv->has_mob)
++		(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
++	if (dev_priv->has_gmr)
++		(void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
++	(void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
++
+ 	vmw_fence_manager_takedown(dev_priv->fman);
+ 	if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
+ 		drm_irq_uninstall(dev_priv->dev);
+@@ -898,11 +906,6 @@ static int vmw_driver_unload(struct drm_device *dev)
+ 	ttm_object_device_release(&dev_priv->tdev);
+ 	iounmap(dev_priv->mmio_virt);
+ 	arch_phys_wc_del(dev_priv->mmio_mtrr);
+-	if (dev_priv->has_mob)
+-		(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
+-	if (dev_priv->has_gmr)
+-		(void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
+-	(void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
+ 	(void)ttm_bo_device_release(&dev_priv->bdev);
+ 	vmw_ttm_global_release(dev_priv);
+ 
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+index 33176d05db35..1e114893a001 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+@@ -2780,13 +2780,11 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
+ 				  NULL, arg->command_size, arg->throttle_us,
+ 				  (void __user *)(unsigned long)arg->fence_rep,
+ 				  NULL);
+-
++	ttm_read_unlock(&dev_priv->reservation_sem);
+ 	if (unlikely(ret != 0))
+-		goto out_unlock;
++		return ret;
+ 
+ 	vmw_kms_cursor_post_execbuf(dev_priv);
+ 
+-out_unlock:
+-	ttm_read_unlock(&dev_priv->reservation_sem);
+-	return ret;
++	return 0;
+ }
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+index 8725b79e7847..07cda8cbbddb 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+@@ -2033,23 +2033,17 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
+ 	int i;
+ 	struct drm_mode_config *mode_config = &dev->mode_config;
+ 
+-	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
+-	if (unlikely(ret != 0))
+-		return ret;
+-
+ 	if (!arg->num_outputs) {
+ 		struct drm_vmw_rect def_rect = {0, 0, 800, 600};
+ 		vmw_du_update_layout(dev_priv, 1, &def_rect);
+-		goto out_unlock;
++		return 0;
+ 	}
+ 
+ 	rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect);
+ 	rects = kcalloc(arg->num_outputs, sizeof(struct drm_vmw_rect),
+ 			GFP_KERNEL);
+-	if (unlikely(!rects)) {
+-		ret = -ENOMEM;
+-		goto out_unlock;
+-	}
++	if (unlikely(!rects))
++		return -ENOMEM;
+ 
+ 	user_rects = (void __user *)(unsigned long)arg->rects;
+ 	ret = copy_from_user(rects, user_rects, rects_size);
+@@ -2074,7 +2068,5 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
+ 
+ out_free:
+ 	kfree(rects);
+-out_unlock:
+-	ttm_read_unlock(&dev_priv->reservation_sem);
+ 	return ret;
+ }
+diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
+index e9eae57a2b50..63663332391d 100644
+--- a/drivers/i2c/i2c-core.c
++++ b/drivers/i2c/i2c-core.c
+@@ -679,9 +679,6 @@ static int i2c_device_remove(struct device *dev)
+ 		status = driver->remove(client);
+ 	}
+ 
+-	if (dev->of_node)
+-		irq_dispose_mapping(client->irq);
+-
+ 	dev_pm_domain_detach(&client->dev, true);
+ 	return status;
+ }
+diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c
+index 463c235acbdc..4387dae14e45 100644
+--- a/drivers/irqchip/irq-armada-370-xp.c
++++ b/drivers/irqchip/irq-armada-370-xp.c
+@@ -69,6 +69,7 @@ static void __iomem *per_cpu_int_base;
+ static void __iomem *main_int_base;
+ static struct irq_domain *armada_370_xp_mpic_domain;
+ static u32 doorbell_mask_reg;
++static int parent_irq;
+ #ifdef CONFIG_PCI_MSI
+ static struct irq_domain *armada_370_xp_msi_domain;
+ static DECLARE_BITMAP(msi_used, PCI_MSI_DOORBELL_NR);
+@@ -356,6 +357,7 @@ static int armada_xp_mpic_secondary_init(struct notifier_block *nfb,
+ {
+ 	if (action == CPU_STARTING || action == CPU_STARTING_FROZEN)
+ 		armada_xp_mpic_smp_cpu_init();
++
+ 	return NOTIFY_OK;
+ }
+ 
+@@ -364,6 +366,20 @@ static struct notifier_block armada_370_xp_mpic_cpu_notifier = {
+ 	.priority = 100,
+ };
+ 
++static int mpic_cascaded_secondary_init(struct notifier_block *nfb,
++					unsigned long action, void *hcpu)
++{
++	if (action == CPU_STARTING || action == CPU_STARTING_FROZEN)
++		enable_percpu_irq(parent_irq, IRQ_TYPE_NONE);
++
++	return NOTIFY_OK;
++}
++
++static struct notifier_block mpic_cascaded_cpu_notifier = {
++	.notifier_call = mpic_cascaded_secondary_init,
++	.priority = 100,
++};
++
+ #endif /* CONFIG_SMP */
+ 
+ static struct irq_domain_ops armada_370_xp_mpic_irq_ops = {
+@@ -539,7 +555,7 @@ static int __init armada_370_xp_mpic_of_init(struct device_node *node,
+ 					     struct device_node *parent)
+ {
+ 	struct resource main_int_res, per_cpu_int_res;
+-	int parent_irq, nr_irqs, i;
++	int nr_irqs, i;
+ 	u32 control;
+ 
+ 	BUG_ON(of_address_to_resource(node, 0, &main_int_res));
+@@ -587,6 +603,9 @@ static int __init armada_370_xp_mpic_of_init(struct device_node *node,
+ 		register_cpu_notifier(&armada_370_xp_mpic_cpu_notifier);
+ #endif
+ 	} else {
++#ifdef CONFIG_SMP
++		register_cpu_notifier(&mpic_cascaded_cpu_notifier);
++#endif
+ 		irq_set_chained_handler(parent_irq,
+ 					armada_370_xp_mpic_handle_cascade_irq);
+ 	}
+diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
+index 96b0b1d27df1..bc677362bc73 100644
+--- a/drivers/mtd/nand/pxa3xx_nand.c
++++ b/drivers/mtd/nand/pxa3xx_nand.c
+@@ -480,6 +480,42 @@ static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
+ 	nand_writel(info, NDCR, ndcr | int_mask);
+ }
+ 
++static void drain_fifo(struct pxa3xx_nand_info *info, void *data, int len)
++{
++	if (info->ecc_bch) {
++		int timeout;
++
++		/*
++		 * According to the datasheet, when reading from NDDB
++		 * with BCH enabled, after each 32 bytes reads, we
++		 * have to make sure that the NDSR.RDDREQ bit is set.
++		 *
++		 * Drain the FIFO 8 32 bits reads at a time, and skip
++		 * the polling on the last read.
++		 */
++		while (len > 8) {
++			__raw_readsl(info->mmio_base + NDDB, data, 8);
++
++			for (timeout = 0;
++			     !(nand_readl(info, NDSR) & NDSR_RDDREQ);
++			     timeout++) {
++				if (timeout >= 5) {
++					dev_err(&info->pdev->dev,
++						"Timeout on RDDREQ while draining the FIFO\n");
++					return;
++				}
++
++				mdelay(1);
++			}
++
++			data += 32;
++			len -= 8;
++		}
++	}
++
++	__raw_readsl(info->mmio_base + NDDB, data, len);
++}
++
+ static void handle_data_pio(struct pxa3xx_nand_info *info)
+ {
+ 	unsigned int do_bytes = min(info->data_size, info->chunk_size);
+@@ -496,14 +532,14 @@ static void handle_data_pio(struct pxa3xx_nand_info *info)
+ 				      DIV_ROUND_UP(info->oob_size, 4));
+ 		break;
+ 	case STATE_PIO_READING:
+-		__raw_readsl(info->mmio_base + NDDB,
+-			     info->data_buff + info->data_buff_pos,
+-			     DIV_ROUND_UP(do_bytes, 4));
++		drain_fifo(info,
++			   info->data_buff + info->data_buff_pos,
++			   DIV_ROUND_UP(do_bytes, 4));
+ 
+ 		if (info->oob_size > 0)
+-			__raw_readsl(info->mmio_base + NDDB,
+-				     info->oob_buff + info->oob_buff_pos,
+-				     DIV_ROUND_UP(info->oob_size, 4));
++			drain_fifo(info,
++				   info->oob_buff + info->oob_buff_pos,
++				   DIV_ROUND_UP(info->oob_size, 4));
+ 		break;
+ 	default:
+ 		dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
+diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
+index 847c1f813261..62ca0e8e1fbc 100644
+--- a/drivers/net/can/dev.c
++++ b/drivers/net/can/dev.c
+@@ -578,6 +578,10 @@ struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf)
+ 	skb->pkt_type = PACKET_BROADCAST;
+ 	skb->ip_summed = CHECKSUM_UNNECESSARY;
+ 
++	skb_reset_mac_header(skb);
++	skb_reset_network_header(skb);
++	skb_reset_transport_header(skb);
++
+ 	can_skb_reserve(skb);
+ 	can_skb_prv(skb)->ifindex = dev->ifindex;
+ 
+@@ -602,6 +606,10 @@ struct sk_buff *alloc_canfd_skb(struct net_device *dev,
+ 	skb->pkt_type = PACKET_BROADCAST;
+ 	skb->ip_summed = CHECKSUM_UNNECESSARY;
+ 
++	skb_reset_mac_header(skb);
++	skb_reset_network_header(skb);
++	skb_reset_transport_header(skb);
++
+ 	can_skb_reserve(skb);
+ 	can_skb_prv(skb)->ifindex = dev->ifindex;
+ 
+diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c
+index 7af379ca861b..913a2bc8598d 100644
+--- a/drivers/net/can/usb/kvaser_usb.c
++++ b/drivers/net/can/usb/kvaser_usb.c
+@@ -12,6 +12,7 @@
+  * Copyright (C) 2012 Olivier Sobrie <olivier@sobrie.be>
+  */
+ 
++#include <linux/kernel.h>
+ #include <linux/completion.h>
+ #include <linux/module.h>
+ #include <linux/netdevice.h>
+@@ -403,8 +404,15 @@ static int kvaser_usb_wait_msg(const struct kvaser_usb *dev, u8 id,
+ 		while (pos <= actual_len - MSG_HEADER_LEN) {
+ 			tmp = buf + pos;
+ 
+-			if (!tmp->len)
+-				break;
++			/* Handle messages crossing the USB endpoint max packet
++			 * size boundary. Check kvaser_usb_read_bulk_callback()
++			 * for further details.
++			 */
++			if (tmp->len == 0) {
++				pos = round_up(pos,
++					       dev->bulk_in->wMaxPacketSize);
++				continue;
++			}
+ 
+ 			if (pos + tmp->len > actual_len) {
+ 				dev_err(dev->udev->dev.parent,
+@@ -980,8 +988,19 @@ static void kvaser_usb_read_bulk_callback(struct urb *urb)
+ 	while (pos <= urb->actual_length - MSG_HEADER_LEN) {
+ 		msg = urb->transfer_buffer + pos;
+ 
+-		if (!msg->len)
+-			break;
++		/* The Kvaser firmware can only read and write messages that
++		 * does not cross the USB's endpoint wMaxPacketSize boundary.
++		 * If a follow-up command crosses such boundary, firmware puts
++		 * a placeholder zero-length command in its place then aligns
++		 * the real command to the next max packet size.
++		 *
++		 * Handle such cases or we're going to miss a significant
++		 * number of events in case of a heavy rx load on the bus.
++		 */
++		if (msg->len == 0) {
++			pos = round_up(pos, dev->bulk_in->wMaxPacketSize);
++			continue;
++		}
+ 
+ 		if (pos + msg->len > urb->actual_length) {
+ 			dev_err(dev->udev->dev.parent, "Format error\n");
+@@ -989,7 +1008,6 @@ static void kvaser_usb_read_bulk_callback(struct urb *urb)
+ 		}
+ 
+ 		kvaser_usb_handle_message(dev, msg);
+-
+ 		pos += msg->len;
+ 	}
+ 
+diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+index 72eef9fc883e..ac6a0ef44e13 100644
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+@@ -12722,6 +12722,9 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
+ 	pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
+ 			       PCICFG_VENDOR_ID_OFFSET);
+ 
++	/* Set PCIe reset type to fundamental for EEH recovery */
++	pdev->needs_freset = 1;
++
+ 	/* AER (Advanced Error reporting) configuration */
+ 	rc = pci_enable_pcie_error_reporting(pdev);
+ 	if (!rc)
+diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
+index bba87775419d..fba3c98acd71 100644
+--- a/drivers/net/ethernet/freescale/fec_main.c
++++ b/drivers/net/ethernet/freescale/fec_main.c
+@@ -1448,8 +1448,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
+ 
+ 			vlan_packet_rcvd = true;
+ 
+-			skb_copy_to_linear_data_offset(skb, VLAN_HLEN,
+-						       data, (2 * ETH_ALEN));
++			memmove(skb->data + VLAN_HLEN, data, ETH_ALEN * 2);
+ 			skb_pull(skb, VLAN_HLEN);
+ 		}
+ 
+@@ -1566,7 +1565,7 @@ fec_enet_interrupt(int irq, void *dev_id)
+ 	writel(int_events, fep->hwp + FEC_IEVENT);
+ 	fec_enet_collect_events(fep, int_events);
+ 
+-	if (fep->work_tx || fep->work_rx) {
++	if ((fep->work_tx || fep->work_rx) && fep->link) {
+ 		ret = IRQ_HANDLED;
+ 
+ 		if (napi_schedule_prep(&fep->napi)) {
+diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+index 944a112dff37..8805ef1a4c75 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
++++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+@@ -451,7 +451,7 @@ struct mlx4_en_port_stats {
+ 	unsigned long rx_chksum_none;
+ 	unsigned long rx_chksum_complete;
+ 	unsigned long tx_chksum_offload;
+-#define NUM_PORT_STATS		9
++#define NUM_PORT_STATS		10
+ };
+ 
+ struct mlx4_en_perf_stats {
+diff --git a/drivers/net/usb/cx82310_eth.c b/drivers/net/usb/cx82310_eth.c
+index 3eed708a6182..fe48f4c51373 100644
+--- a/drivers/net/usb/cx82310_eth.c
++++ b/drivers/net/usb/cx82310_eth.c
+@@ -300,9 +300,18 @@ static const struct driver_info	cx82310_info = {
+ 	.tx_fixup	= cx82310_tx_fixup,
+ };
+ 
++#define USB_DEVICE_CLASS(vend, prod, cl, sc, pr) \
++	.match_flags = USB_DEVICE_ID_MATCH_DEVICE | \
++		       USB_DEVICE_ID_MATCH_DEV_INFO, \
++	.idVendor = (vend), \
++	.idProduct = (prod), \
++	.bDeviceClass = (cl), \
++	.bDeviceSubClass = (sc), \
++	.bDeviceProtocol = (pr)
++
+ static const struct usb_device_id products[] = {
+ 	{
+-		USB_DEVICE_AND_INTERFACE_INFO(0x0572, 0xcb01, 0xff, 0, 0),
++		USB_DEVICE_CLASS(0x0572, 0xcb01, 0xff, 0, 0),
+ 		.driver_info = (unsigned long) &cx82310_info
+ 	},
+ 	{ },
+diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
+index 059fdf1bf5ee..0ad6c0c1b00d 100644
+--- a/drivers/net/virtio_net.c
++++ b/drivers/net/virtio_net.c
+@@ -1444,8 +1444,10 @@ static void virtnet_free_queues(struct virtnet_info *vi)
+ {
+ 	int i;
+ 
+-	for (i = 0; i < vi->max_queue_pairs; i++)
++	for (i = 0; i < vi->max_queue_pairs; i++) {
++		napi_hash_del(&vi->rq[i].napi);
+ 		netif_napi_del(&vi->rq[i].napi);
++	}
+ 
+ 	kfree(vi->rq);
+ 	kfree(vi->sq);
+@@ -1936,11 +1938,8 @@ static int virtnet_freeze(struct virtio_device *vdev)
+ 	cancel_delayed_work_sync(&vi->refill);
+ 
+ 	if (netif_running(vi->dev)) {
+-		for (i = 0; i < vi->max_queue_pairs; i++) {
++		for (i = 0; i < vi->max_queue_pairs; i++)
+ 			napi_disable(&vi->rq[i].napi);
+-			napi_hash_del(&vi->rq[i].napi);
+-			netif_napi_del(&vi->rq[i].napi);
+-		}
+ 	}
+ 
+ 	remove_vq_common(vi);
+diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
+index 47731cb0d815..2fa0dbb41dde 100644
+--- a/drivers/net/wireless/b43/main.c
++++ b/drivers/net/wireless/b43/main.c
+@@ -5322,6 +5322,7 @@ static void b43_supported_bands(struct b43_wldev *dev, bool *have_2ghz_phy,
+ 	case 0x432a: /* BCM4321 */
+ 	case 0x432d: /* BCM4322 */
+ 	case 0x4352: /* BCM43222 */
++	case 0x435a: /* BCM43228 */
+ 	case 0x4333: /* BCM4331 */
+ 	case 0x43a2: /* BCM4360 */
+ 	case 0x43b3: /* BCM4352 */
+diff --git a/drivers/of/base.c b/drivers/of/base.c
+index 36536b6a8834..65a47f4dcac6 100644
+--- a/drivers/of/base.c
++++ b/drivers/of/base.c
+@@ -714,16 +714,12 @@ static struct device_node *__of_find_node_by_path(struct device_node *parent,
+ 						const char *path)
+ {
+ 	struct device_node *child;
+-	int len = strchrnul(path, '/') - path;
+-	int term;
++	int len;
+ 
++	len = strcspn(path, "/:");
+ 	if (!len)
+ 		return NULL;
+ 
+-	term = strchrnul(path, ':') - path;
+-	if (term < len)
+-		len = term;
+-
+ 	__for_each_child_of_node(parent, child) {
+ 		const char *name = strrchr(child->full_name, '/');
+ 		if (WARN(!name, "malformed device_node %s\n", child->full_name))
+@@ -768,8 +764,12 @@ struct device_node *of_find_node_opts_by_path(const char *path, const char **opt
+ 
+ 	/* The path could begin with an alias */
+ 	if (*path != '/') {
+-		char *p = strchrnul(path, '/');
+-		int len = separator ? separator - path : p - path;
++		int len;
++		const char *p = separator;
++
++		if (!p)
++			p = strchrnul(path, '/');
++		len = p - path;
+ 
+ 		/* of_aliases must not be NULL */
+ 		if (!of_aliases)
+@@ -794,6 +794,8 @@ struct device_node *of_find_node_opts_by_path(const char *path, const char **opt
+ 		path++; /* Increment past '/' delimiter */
+ 		np = __of_find_node_by_path(np, path);
+ 		path = strchrnul(path, '/');
++		if (separator && separator < path)
++			break;
+ 	}
+ 	raw_spin_unlock_irqrestore(&devtree_lock, flags);
+ 	return np;
+diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
+index aa012fb3834b..312f23a8429c 100644
+--- a/drivers/pci/pci-sysfs.c
++++ b/drivers/pci/pci-sysfs.c
+@@ -521,7 +521,8 @@ static ssize_t driver_override_store(struct device *dev,
+ 	struct pci_dev *pdev = to_pci_dev(dev);
+ 	char *driver_override, *old = pdev->driver_override, *cp;
+ 
+-	if (count > PATH_MAX)
++	/* We need to keep extra room for a newline */
++	if (count >= (PAGE_SIZE - 1))
+ 		return -EINVAL;
+ 
+ 	driver_override = kstrndup(buf, count, GFP_KERNEL);
+@@ -549,7 +550,7 @@ static ssize_t driver_override_show(struct device *dev,
+ {
+ 	struct pci_dev *pdev = to_pci_dev(dev);
+ 
+-	return sprintf(buf, "%s\n", pdev->driver_override);
++	return snprintf(buf, PAGE_SIZE, "%s\n", pdev->driver_override);
+ }
+ static DEVICE_ATTR_RW(driver_override);
+ 
+diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
+index 9c48fb32f660..a5761d0953d8 100644
+--- a/drivers/regulator/core.c
++++ b/drivers/regulator/core.c
+@@ -1843,10 +1843,12 @@ static int _regulator_do_enable(struct regulator_dev *rdev)
+ 	}
+ 
+ 	if (rdev->ena_pin) {
+-		ret = regulator_ena_gpio_ctrl(rdev, true);
+-		if (ret < 0)
+-			return ret;
+-		rdev->ena_gpio_state = 1;
++		if (!rdev->ena_gpio_state) {
++			ret = regulator_ena_gpio_ctrl(rdev, true);
++			if (ret < 0)
++				return ret;
++			rdev->ena_gpio_state = 1;
++		}
+ 	} else if (rdev->desc->ops->enable) {
+ 		ret = rdev->desc->ops->enable(rdev);
+ 		if (ret < 0)
+@@ -1943,10 +1945,12 @@ static int _regulator_do_disable(struct regulator_dev *rdev)
+ 	trace_regulator_disable(rdev_get_name(rdev));
+ 
+ 	if (rdev->ena_pin) {
+-		ret = regulator_ena_gpio_ctrl(rdev, false);
+-		if (ret < 0)
+-			return ret;
+-		rdev->ena_gpio_state = 0;
++		if (rdev->ena_gpio_state) {
++			ret = regulator_ena_gpio_ctrl(rdev, false);
++			if (ret < 0)
++				return ret;
++			rdev->ena_gpio_state = 0;
++		}
+ 
+ 	} else if (rdev->desc->ops->disable) {
+ 		ret = rdev->desc->ops->disable(rdev);
+@@ -3678,12 +3682,6 @@ regulator_register(const struct regulator_desc *regulator_desc,
+ 				 config->ena_gpio, ret);
+ 			goto wash;
+ 		}
+-
+-		if (config->ena_gpio_flags & GPIOF_OUT_INIT_HIGH)
+-			rdev->ena_gpio_state = 1;
+-
+-		if (config->ena_gpio_invert)
+-			rdev->ena_gpio_state = !rdev->ena_gpio_state;
+ 	}
+ 
+ 	/* set regulator constraints */
+@@ -3856,9 +3854,11 @@ int regulator_suspend_finish(void)
+ 	list_for_each_entry(rdev, &regulator_list, list) {
+ 		mutex_lock(&rdev->mutex);
+ 		if (rdev->use_count > 0  || rdev->constraints->always_on) {
+-			error = _regulator_do_enable(rdev);
+-			if (error)
+-				ret = error;
++			if (!_regulator_is_enabled(rdev)) {
++				error = _regulator_do_enable(rdev);
++				if (error)
++					ret = error;
++			}
+ 		} else {
+ 			if (!have_full_constraints())
+ 				goto unlock;
+diff --git a/drivers/regulator/rk808-regulator.c b/drivers/regulator/rk808-regulator.c
+index c94a3e0f3b91..3f6722863bd2 100644
+--- a/drivers/regulator/rk808-regulator.c
++++ b/drivers/regulator/rk808-regulator.c
+@@ -235,6 +235,7 @@ static const struct regulator_desc rk808_reg[] = {
+ 		.vsel_mask = RK808_LDO_VSEL_MASK,
+ 		.enable_reg = RK808_LDO_EN_REG,
+ 		.enable_mask = BIT(0),
++		.enable_time = 400,
+ 		.owner = THIS_MODULE,
+ 	}, {
+ 		.name = "LDO_REG2",
+@@ -249,6 +250,7 @@ static const struct regulator_desc rk808_reg[] = {
+ 		.vsel_mask = RK808_LDO_VSEL_MASK,
+ 		.enable_reg = RK808_LDO_EN_REG,
+ 		.enable_mask = BIT(1),
++		.enable_time = 400,
+ 		.owner = THIS_MODULE,
+ 	}, {
+ 		.name = "LDO_REG3",
+@@ -263,6 +265,7 @@ static const struct regulator_desc rk808_reg[] = {
+ 		.vsel_mask = RK808_BUCK4_VSEL_MASK,
+ 		.enable_reg = RK808_LDO_EN_REG,
+ 		.enable_mask = BIT(2),
++		.enable_time = 400,
+ 		.owner = THIS_MODULE,
+ 	}, {
+ 		.name = "LDO_REG4",
+@@ -277,6 +280,7 @@ static const struct regulator_desc rk808_reg[] = {
+ 		.vsel_mask = RK808_LDO_VSEL_MASK,
+ 		.enable_reg = RK808_LDO_EN_REG,
+ 		.enable_mask = BIT(3),
++		.enable_time = 400,
+ 		.owner = THIS_MODULE,
+ 	}, {
+ 		.name = "LDO_REG5",
+@@ -291,6 +295,7 @@ static const struct regulator_desc rk808_reg[] = {
+ 		.vsel_mask = RK808_LDO_VSEL_MASK,
+ 		.enable_reg = RK808_LDO_EN_REG,
+ 		.enable_mask = BIT(4),
++		.enable_time = 400,
+ 		.owner = THIS_MODULE,
+ 	}, {
+ 		.name = "LDO_REG6",
+@@ -305,6 +310,7 @@ static const struct regulator_desc rk808_reg[] = {
+ 		.vsel_mask = RK808_LDO_VSEL_MASK,
+ 		.enable_reg = RK808_LDO_EN_REG,
+ 		.enable_mask = BIT(5),
++		.enable_time = 400,
+ 		.owner = THIS_MODULE,
+ 	}, {
+ 		.name = "LDO_REG7",
+@@ -319,6 +325,7 @@ static const struct regulator_desc rk808_reg[] = {
+ 		.vsel_mask = RK808_LDO_VSEL_MASK,
+ 		.enable_reg = RK808_LDO_EN_REG,
+ 		.enable_mask = BIT(6),
++		.enable_time = 400,
+ 		.owner = THIS_MODULE,
+ 	}, {
+ 		.name = "LDO_REG8",
+@@ -333,6 +340,7 @@ static const struct regulator_desc rk808_reg[] = {
+ 		.vsel_mask = RK808_LDO_VSEL_MASK,
+ 		.enable_reg = RK808_LDO_EN_REG,
+ 		.enable_mask = BIT(7),
++		.enable_time = 400,
+ 		.owner = THIS_MODULE,
+ 	}, {
+ 		.name = "SWITCH_REG1",
+diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
+index 4241eeab3386..f4cf6851fae9 100644
+--- a/drivers/rtc/rtc-s3c.c
++++ b/drivers/rtc/rtc-s3c.c
+@@ -849,6 +849,7 @@ static struct s3c_rtc_data const s3c2443_rtc_data = {
+ 
+ static struct s3c_rtc_data const s3c6410_rtc_data = {
+ 	.max_user_freq		= 32768,
++	.needs_src_clk		= true,
+ 	.irq_handler		= s3c6410_rtc_irq,
+ 	.set_freq		= s3c6410_rtc_setfreq,
+ 	.enable_tick		= s3c6410_rtc_enable_tick,
+diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c
+index 62b58d38ce2e..60de66252fa2 100644
+--- a/drivers/scsi/libsas/sas_discover.c
++++ b/drivers/scsi/libsas/sas_discover.c
+@@ -500,6 +500,7 @@ static void sas_revalidate_domain(struct work_struct *work)
+ 	struct sas_discovery_event *ev = to_sas_discovery_event(work);
+ 	struct asd_sas_port *port = ev->port;
+ 	struct sas_ha_struct *ha = port->ha;
++	struct domain_device *ddev = port->port_dev;
+ 
+ 	/* prevent revalidation from finding sata links in recovery */
+ 	mutex_lock(&ha->disco_mutex);
+@@ -514,8 +515,9 @@ static void sas_revalidate_domain(struct work_struct *work)
+ 	SAS_DPRINTK("REVALIDATING DOMAIN on port %d, pid:%d\n", port->id,
+ 		    task_pid_nr(current));
+ 
+-	if (port->port_dev)
+-		res = sas_ex_revalidate_domain(port->port_dev);
++	if (ddev && (ddev->dev_type == SAS_FANOUT_EXPANDER_DEVICE ||
++		     ddev->dev_type == SAS_EDGE_EXPANDER_DEVICE))
++		res = sas_ex_revalidate_domain(ddev);
+ 
+ 	SAS_DPRINTK("done REVALIDATING DOMAIN on port %d, pid:%d, res 0x%x\n",
+ 		    port->id, task_pid_nr(current), res);
+diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
+index 23d8f5f56579..df93c9700c2e 100644
+--- a/drivers/spi/spi-atmel.c
++++ b/drivers/spi/spi-atmel.c
+@@ -764,17 +764,17 @@ static void atmel_spi_pdc_next_xfer(struct spi_master *master,
+ 			(unsigned long long)xfer->rx_dma);
+ 	}
+ 
+-	/* REVISIT: We're waiting for ENDRX before we start the next
++	/* REVISIT: We're waiting for RXBUFF before we start the next
+ 	 * transfer because we need to handle some difficult timing
+-	 * issues otherwise. If we wait for ENDTX in one transfer and
+-	 * then starts waiting for ENDRX in the next, it's difficult
+-	 * to tell the difference between the ENDRX interrupt we're
+-	 * actually waiting for and the ENDRX interrupt of the
++	 * issues otherwise. If we wait for TXBUFE in one transfer and
++	 * then starts waiting for RXBUFF in the next, it's difficult
++	 * to tell the difference between the RXBUFF interrupt we're
++	 * actually waiting for and the RXBUFF interrupt of the
+ 	 * previous transfer.
+ 	 *
+ 	 * It should be doable, though. Just not now...
+ 	 */
+-	spi_writel(as, IER, SPI_BIT(ENDRX) | SPI_BIT(OVRES));
++	spi_writel(as, IER, SPI_BIT(RXBUFF) | SPI_BIT(OVRES));
+ 	spi_writel(as, PTCR, SPI_BIT(TXTEN) | SPI_BIT(RXTEN));
+ }
+ 
+diff --git a/drivers/spi/spi-dw-mid.c b/drivers/spi/spi-dw-mid.c
+index a67d37c7e3c0..22ca08a18b9e 100644
+--- a/drivers/spi/spi-dw-mid.c
++++ b/drivers/spi/spi-dw-mid.c
+@@ -139,6 +139,9 @@ static struct dma_async_tx_descriptor *dw_spi_dma_prepare_tx(struct dw_spi *dws)
+ 				1,
+ 				DMA_MEM_TO_DEV,
+ 				DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
++	if (!txdesc)
++		return NULL;
++
+ 	txdesc->callback = dw_spi_dma_tx_done;
+ 	txdesc->callback_param = dws;
+ 
+@@ -184,6 +187,9 @@ static struct dma_async_tx_descriptor *dw_spi_dma_prepare_rx(struct dw_spi *dws)
+ 				1,
+ 				DMA_DEV_TO_MEM,
+ 				DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
++	if (!rxdesc)
++		return NULL;
++
+ 	rxdesc->callback = dw_spi_dma_rx_done;
+ 	rxdesc->callback_param = dws;
+ 
+diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c
+index 89ca162801da..ee513a85296b 100644
+--- a/drivers/spi/spi-pl022.c
++++ b/drivers/spi/spi-pl022.c
+@@ -534,12 +534,12 @@ static void giveback(struct pl022 *pl022)
+ 	pl022->cur_msg = NULL;
+ 	pl022->cur_transfer = NULL;
+ 	pl022->cur_chip = NULL;
+-	spi_finalize_current_message(pl022->master);
+ 
+ 	/* disable the SPI/SSP operation */
+ 	writew((readw(SSP_CR1(pl022->virtbase)) &
+ 		(~SSP_CR1_MASK_SSE)), SSP_CR1(pl022->virtbase));
+ 
++	spi_finalize_current_message(pl022->master);
+ }
+ 
+ /**
+diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
+index aebde3289c50..8d27db47560c 100644
+--- a/drivers/target/iscsi/iscsi_target.c
++++ b/drivers/target/iscsi/iscsi_target.c
+@@ -4221,11 +4221,17 @@ int iscsit_close_connection(
+ 	pr_debug("Closing iSCSI connection CID %hu on SID:"
+ 		" %u\n", conn->cid, sess->sid);
+ 	/*
+-	 * Always up conn_logout_comp just in case the RX Thread is sleeping
+-	 * and the logout response never got sent because the connection
+-	 * failed.
++	 * Always up conn_logout_comp for the traditional TCP case just in case
++	 * the RX Thread in iscsi_target_rx_opcode() is sleeping and the logout
++	 * response never got sent because the connection failed.
++	 *
++	 * However for iser-target, isert_wait4logout() is using conn_logout_comp
++	 * to signal logout response TX interrupt completion.  Go ahead and skip
++	 * this for iser since isert_rx_opcode() does not wait on logout failure,
++	 * and to avoid iscsi_conn pointer dereference in iser-target code.
+ 	 */
+-	complete(&conn->conn_logout_comp);
++	if (conn->conn_transport->transport_type == ISCSI_TCP)
++		complete(&conn->conn_logout_comp);
+ 
+ 	iscsi_release_thread_set(conn);
+ 
+diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
+index 58f49ff69b14..54da2a42049a 100644
+--- a/drivers/target/target_core_device.c
++++ b/drivers/target/target_core_device.c
+@@ -1534,8 +1534,6 @@ int target_configure_device(struct se_device *dev)
+ 	ret = dev->transport->configure_device(dev);
+ 	if (ret)
+ 		goto out;
+-	dev->dev_flags |= DF_CONFIGURED;
+-
+ 	/*
+ 	 * XXX: there is not much point to have two different values here..
+ 	 */
+@@ -1597,6 +1595,8 @@ int target_configure_device(struct se_device *dev)
+ 	list_add_tail(&dev->g_dev_node, &g_device_list);
+ 	mutex_unlock(&g_device_mutex);
+ 
++	dev->dev_flags |= DF_CONFIGURED;
++
+ 	return 0;
+ 
+ out_free_alua:
+diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
+index 1045dcd7bf65..f6c954c4635f 100644
+--- a/drivers/target/target_core_pscsi.c
++++ b/drivers/target/target_core_pscsi.c
+@@ -1121,7 +1121,7 @@ static u32 pscsi_get_device_type(struct se_device *dev)
+ 	struct pscsi_dev_virt *pdv = PSCSI_DEV(dev);
+ 	struct scsi_device *sd = pdv->pdv_sd;
+ 
+-	return sd->type;
++	return (sd) ? sd->type : TYPE_NO_LUN;
+ }
+ 
+ static sector_t pscsi_get_blocks(struct se_device *dev)
+diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
+index 0adc0f650213..ac3cbabdbdf0 100644
+--- a/drivers/target/target_core_transport.c
++++ b/drivers/target/target_core_transport.c
+@@ -2389,6 +2389,10 @@ int target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd,
+ 	list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list);
+ out:
+ 	spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
++
++	if (ret && ack_kref)
++		target_put_sess_cmd(se_sess, se_cmd);
++
+ 	return ret;
+ }
+ EXPORT_SYMBOL(target_get_sess_cmd);
+diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
+index 555de07db593..7d89da349c3b 100644
+--- a/drivers/tty/serial/8250/8250_dw.c
++++ b/drivers/tty/serial/8250/8250_dw.c
+@@ -111,7 +111,10 @@ static void dw8250_serial_out(struct uart_port *p, int offset, int value)
+ 			dw8250_force_idle(p);
+ 			writeb(value, p->membase + (UART_LCR << p->regshift));
+ 		}
+-		dev_err(p->dev, "Couldn't set LCR to %d\n", value);
++		/*
++		 * FIXME: this deadlocks if port->lock is already held
++		 * dev_err(p->dev, "Couldn't set LCR to %d\n", value);
++		 */
+ 	}
+ }
+ 
+@@ -155,7 +158,10 @@ static void dw8250_serial_outq(struct uart_port *p, int offset, int value)
+ 			__raw_writeq(value & 0xff,
+ 				     p->membase + (UART_LCR << p->regshift));
+ 		}
+-		dev_err(p->dev, "Couldn't set LCR to %d\n", value);
++		/*
++		 * FIXME: this deadlocks if port->lock is already held
++		 * dev_err(p->dev, "Couldn't set LCR to %d\n", value);
++		 */
+ 	}
+ }
+ #endif /* CONFIG_64BIT */
+@@ -179,7 +185,10 @@ static void dw8250_serial_out32(struct uart_port *p, int offset, int value)
+ 			dw8250_force_idle(p);
+ 			writel(value, p->membase + (UART_LCR << p->regshift));
+ 		}
+-		dev_err(p->dev, "Couldn't set LCR to %d\n", value);
++		/*
++		 * FIXME: this deadlocks if port->lock is already held
++		 * dev_err(p->dev, "Couldn't set LCR to %d\n", value);
++		 */
+ 	}
+ }
+ 
+diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
+index d1f8dc6aabcb..1442956d5e9c 100644
+--- a/drivers/tty/serial/8250/8250_pci.c
++++ b/drivers/tty/serial/8250/8250_pci.c
+@@ -69,7 +69,7 @@ static void moan_device(const char *str, struct pci_dev *dev)
+ 	       "Please send the output of lspci -vv, this\n"
+ 	       "message (0x%04x,0x%04x,0x%04x,0x%04x), the\n"
+ 	       "manufacturer and name of serial board or\n"
+-	       "modem board to rmk+serial@arm.linux.org.uk.\n",
++	       "modem board to <linux-serial@vger.kernel.org>.\n",
+ 	       pci_name(dev), str, dev->vendor, dev->device,
+ 	       dev->subsystem_vendor, dev->subsystem_device);
+ }
+diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
+index db49ec4c748e..9fbbaa041a31 100644
+--- a/drivers/usb/gadget/legacy/inode.c
++++ b/drivers/usb/gadget/legacy/inode.c
+@@ -566,7 +566,6 @@ static ssize_t ep_copy_to_user(struct kiocb_priv *priv)
+ 		if (total == 0)
+ 			break;
+ 	}
+-
+ 	return len;
+ }
+ 
+@@ -585,6 +584,7 @@ static void ep_user_copy_worker(struct work_struct *work)
+ 	aio_complete(iocb, ret, ret);
+ 
+ 	kfree(priv->buf);
++	kfree(priv->iv);
+ 	kfree(priv);
+ }
+ 
+@@ -605,6 +605,7 @@ static void ep_aio_complete(struct usb_ep *ep, struct usb_request *req)
+ 	 */
+ 	if (priv->iv == NULL || unlikely(req->actual == 0)) {
+ 		kfree(req->buf);
++		kfree(priv->iv);
+ 		kfree(priv);
+ 		iocb->private = NULL;
+ 		/* aio_complete() reports bytes-transferred _and_ faults */
+@@ -640,7 +641,7 @@ ep_aio_rwtail(
+ 	struct usb_request	*req;
+ 	ssize_t			value;
+ 
+-	priv = kmalloc(sizeof *priv, GFP_KERNEL);
++	priv = kzalloc(sizeof *priv, GFP_KERNEL);
+ 	if (!priv) {
+ 		value = -ENOMEM;
+ fail:
+@@ -649,7 +650,14 @@ fail:
+ 	}
+ 	iocb->private = priv;
+ 	priv->iocb = iocb;
+-	priv->iv = iv;
++	if (iv) {
++		priv->iv = kmemdup(iv, nr_segs * sizeof(struct iovec),
++				   GFP_KERNEL);
++		if (!priv->iv) {
++			kfree(priv);
++			goto fail;
++		}
++	}
+ 	priv->nr_segs = nr_segs;
+ 	INIT_WORK(&priv->work, ep_user_copy_worker);
+ 
+@@ -689,6 +697,7 @@ fail:
+ 	mutex_unlock(&epdata->lock);
+ 
+ 	if (unlikely(value)) {
++		kfree(priv->iv);
+ 		kfree(priv);
+ 		put_ep(epdata);
+ 	} else
+diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
+index b4bca2d4a7e5..70fba973a107 100644
+--- a/drivers/xen/events/events_base.c
++++ b/drivers/xen/events/events_base.c
+@@ -526,20 +526,26 @@ static unsigned int __startup_pirq(unsigned int irq)
+ 	pirq_query_unmask(irq);
+ 
+ 	rc = set_evtchn_to_irq(evtchn, irq);
+-	if (rc != 0) {
+-		pr_err("irq%d: Failed to set port to irq mapping (%d)\n",
+-		       irq, rc);
+-		xen_evtchn_close(evtchn);
+-		return 0;
+-	}
++	if (rc)
++		goto err;
++
+ 	bind_evtchn_to_cpu(evtchn, 0);
+ 	info->evtchn = evtchn;
+ 
++	rc = xen_evtchn_port_setup(info);
++	if (rc)
++		goto err;
++
+ out:
+ 	unmask_evtchn(evtchn);
+ 	eoi_pirq(irq_get_irq_data(irq));
+ 
+ 	return 0;
++
++err:
++	pr_err("irq%d: Failed to set port to irq mapping (%d)\n", irq, rc);
++	xen_evtchn_close(evtchn);
++	return 0;
+ }
+ 
+ static unsigned int startup_pirq(struct irq_data *data)
+diff --git a/drivers/xen/xen-pciback/conf_space.c b/drivers/xen/xen-pciback/conf_space.c
+index 46ae0f9f02ad..75fe3d466515 100644
+--- a/drivers/xen/xen-pciback/conf_space.c
++++ b/drivers/xen/xen-pciback/conf_space.c
+@@ -16,7 +16,7 @@
+ #include "conf_space.h"
+ #include "conf_space_quirks.h"
+ 
+-static bool permissive;
++bool permissive;
+ module_param(permissive, bool, 0644);
+ 
+ /* This is where xen_pcibk_read_config_byte, xen_pcibk_read_config_word,
+diff --git a/drivers/xen/xen-pciback/conf_space.h b/drivers/xen/xen-pciback/conf_space.h
+index e56c934ad137..2e1d73d1d5d0 100644
+--- a/drivers/xen/xen-pciback/conf_space.h
++++ b/drivers/xen/xen-pciback/conf_space.h
+@@ -64,6 +64,8 @@ struct config_field_entry {
+ 	void *data;
+ };
+ 
++extern bool permissive;
++
+ #define OFFSET(cfg_entry) ((cfg_entry)->base_offset+(cfg_entry)->field->offset)
+ 
+ /* Add fields to a device - the add_fields macro expects to get a pointer to
+diff --git a/drivers/xen/xen-pciback/conf_space_header.c b/drivers/xen/xen-pciback/conf_space_header.c
+index c5ee82587e8c..2d7369391472 100644
+--- a/drivers/xen/xen-pciback/conf_space_header.c
++++ b/drivers/xen/xen-pciback/conf_space_header.c
+@@ -11,6 +11,10 @@
+ #include "pciback.h"
+ #include "conf_space.h"
+ 
++struct pci_cmd_info {
++	u16 val;
++};
++
+ struct pci_bar_info {
+ 	u32 val;
+ 	u32 len_val;
+@@ -20,22 +24,36 @@ struct pci_bar_info {
+ #define is_enable_cmd(value) ((value)&(PCI_COMMAND_MEMORY|PCI_COMMAND_IO))
+ #define is_master_cmd(value) ((value)&PCI_COMMAND_MASTER)
+ 
+-static int command_read(struct pci_dev *dev, int offset, u16 *value, void *data)
++/* Bits guests are allowed to control in permissive mode. */
++#define PCI_COMMAND_GUEST (PCI_COMMAND_MASTER|PCI_COMMAND_SPECIAL| \
++			   PCI_COMMAND_INVALIDATE|PCI_COMMAND_VGA_PALETTE| \
++			   PCI_COMMAND_WAIT|PCI_COMMAND_FAST_BACK)
++
++static void *command_init(struct pci_dev *dev, int offset)
+ {
+-	int i;
+-	int ret;
+-
+-	ret = xen_pcibk_read_config_word(dev, offset, value, data);
+-	if (!pci_is_enabled(dev))
+-		return ret;
+-
+-	for (i = 0; i < PCI_ROM_RESOURCE; i++) {
+-		if (dev->resource[i].flags & IORESOURCE_IO)
+-			*value |= PCI_COMMAND_IO;
+-		if (dev->resource[i].flags & IORESOURCE_MEM)
+-			*value |= PCI_COMMAND_MEMORY;
++	struct pci_cmd_info *cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
++	int err;
++
++	if (!cmd)
++		return ERR_PTR(-ENOMEM);
++
++	err = pci_read_config_word(dev, PCI_COMMAND, &cmd->val);
++	if (err) {
++		kfree(cmd);
++		return ERR_PTR(err);
+ 	}
+ 
++	return cmd;
++}
++
++static int command_read(struct pci_dev *dev, int offset, u16 *value, void *data)
++{
++	int ret = pci_read_config_word(dev, offset, value);
++	const struct pci_cmd_info *cmd = data;
++
++	*value &= PCI_COMMAND_GUEST;
++	*value |= cmd->val & ~PCI_COMMAND_GUEST;
++
+ 	return ret;
+ }
+ 
+@@ -43,6 +61,8 @@ static int command_write(struct pci_dev *dev, int offset, u16 value, void *data)
+ {
+ 	struct xen_pcibk_dev_data *dev_data;
+ 	int err;
++	u16 val;
++	struct pci_cmd_info *cmd = data;
+ 
+ 	dev_data = pci_get_drvdata(dev);
+ 	if (!pci_is_enabled(dev) && is_enable_cmd(value)) {
+@@ -83,6 +103,19 @@ static int command_write(struct pci_dev *dev, int offset, u16 value, void *data)
+ 		}
+ 	}
+ 
++	cmd->val = value;
++
++	if (!permissive && (!dev_data || !dev_data->permissive))
++		return 0;
++
++	/* Only allow the guest to control certain bits. */
++	err = pci_read_config_word(dev, offset, &val);
++	if (err || val == value)
++		return err;
++
++	value &= PCI_COMMAND_GUEST;
++	value |= val & ~PCI_COMMAND_GUEST;
++
+ 	return pci_write_config_word(dev, offset, value);
+ }
+ 
+@@ -282,6 +315,8 @@ static const struct config_field header_common[] = {
+ 	{
+ 	 .offset    = PCI_COMMAND,
+ 	 .size      = 2,
++	 .init      = command_init,
++	 .release   = bar_release,
+ 	 .u.w.read  = command_read,
+ 	 .u.w.write = command_write,
+ 	},
+diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
+index ed19a7d622fa..71c4619af333 100644
+--- a/fs/fuse/dev.c
++++ b/fs/fuse/dev.c
+@@ -890,8 +890,8 @@ static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
+ 
+ 	newpage = buf->page;
+ 
+-	if (WARN_ON(!PageUptodate(newpage)))
+-		return -EIO;
++	if (!PageUptodate(newpage))
++		SetPageUptodate(newpage);
+ 
+ 	ClearPageMappedToDisk(newpage);
+ 
+@@ -1797,6 +1797,9 @@ copy_finish:
+ static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code,
+ 		       unsigned int size, struct fuse_copy_state *cs)
+ {
++	/* Don't try to move pages (yet) */
++	cs->move_pages = 0;
++
+ 	switch (code) {
+ 	case FUSE_NOTIFY_POLL:
+ 		return fuse_notify_poll(fc, size, cs);
+diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
+index 469086b9f99b..0c3f303baf32 100644
+--- a/fs/nilfs2/segment.c
++++ b/fs/nilfs2/segment.c
+@@ -1907,6 +1907,7 @@ static void nilfs_segctor_drop_written_files(struct nilfs_sc_info *sci,
+ 					     struct the_nilfs *nilfs)
+ {
+ 	struct nilfs_inode_info *ii, *n;
++	int during_mount = !(sci->sc_super->s_flags & MS_ACTIVE);
+ 	int defer_iput = false;
+ 
+ 	spin_lock(&nilfs->ns_inode_lock);
+@@ -1919,10 +1920,10 @@ static void nilfs_segctor_drop_written_files(struct nilfs_sc_info *sci,
+ 		brelse(ii->i_bh);
+ 		ii->i_bh = NULL;
+ 		list_del_init(&ii->i_dirty);
+-		if (!ii->vfs_inode.i_nlink) {
++		if (!ii->vfs_inode.i_nlink || during_mount) {
+ 			/*
+-			 * Defer calling iput() to avoid a deadlock
+-			 * over I_SYNC flag for inodes with i_nlink == 0
++			 * Defer calling iput() to avoid deadlocks if
++			 * i_nlink == 0 or mount is not yet finished.
+ 			 */
+ 			list_add_tail(&ii->i_dirty, &sci->sc_iput_queue);
+ 			defer_iput = true;
+diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
+index 88f9b8352742..f86e5499caec 100644
+--- a/fs/proc/task_mmu.c
++++ b/fs/proc/task_mmu.c
+@@ -1326,6 +1326,9 @@ out:
+ 
+ static int pagemap_open(struct inode *inode, struct file *file)
+ {
++	/* do not disclose physical addresses: attack vector */
++	if (!capable(CAP_SYS_ADMIN))
++		return -EPERM;
+ 	pr_warn_once("Bits 55-60 of /proc/PID/pagemap entries are about "
+ 			"to stop being page-shift some time soon. See the "
+ 			"linux/Documentation/vm/pagemap.txt for details.\n");
+diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
+index 057038cf2788..b405a62d1d4b 100644
+--- a/include/linux/serial_core.h
++++ b/include/linux/serial_core.h
+@@ -146,9 +146,9 @@ struct uart_port {
+ #define UPIO_HUB6		(1)			/* Hub6 ISA card */
+ #define UPIO_MEM		(2)			/* 8b MMIO access */
+ #define UPIO_MEM32		(3)			/* 32b little endian */
+-#define UPIO_MEM32BE		(4)			/* 32b big endian */
+-#define UPIO_AU			(5)			/* Au1x00 and RT288x type IO */
+-#define UPIO_TSI		(6)			/* Tsi108/109 type IO */
++#define UPIO_AU			(4)			/* Au1x00 and RT288x type IO */
++#define UPIO_TSI		(5)			/* Tsi108/109 type IO */
++#define UPIO_MEM32BE		(6)			/* 32b big endian */
+ 
+ 	unsigned int		read_status_mask;	/* driver specific */
+ 	unsigned int		ignore_status_mask;	/* driver specific */
+diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
+index b996e6cde6bb..9eb54f41623e 100644
+--- a/include/linux/workqueue.h
++++ b/include/linux/workqueue.h
+@@ -70,7 +70,8 @@ enum {
+ 	/* data contains off-queue information when !WORK_STRUCT_PWQ */
+ 	WORK_OFFQ_FLAG_BASE	= WORK_STRUCT_COLOR_SHIFT,
+ 
+-	WORK_OFFQ_CANCELING	= (1 << WORK_OFFQ_FLAG_BASE),
++	__WORK_OFFQ_CANCELING	= WORK_OFFQ_FLAG_BASE,
++	WORK_OFFQ_CANCELING	= (1 << __WORK_OFFQ_CANCELING),
+ 
+ 	/*
+ 	 * When a work item is off queue, its high bits point to the last
+diff --git a/kernel/cpuset.c b/kernel/cpuset.c
+index 64b257f6bca2..9e255991410c 100644
+--- a/kernel/cpuset.c
++++ b/kernel/cpuset.c
+@@ -548,9 +548,6 @@ static void update_domain_attr_tree(struct sched_domain_attr *dattr,
+ 
+ 	rcu_read_lock();
+ 	cpuset_for_each_descendant_pre(cp, pos_css, root_cs) {
+-		if (cp == root_cs)
+-			continue;
+-
+ 		/* skip the whole subtree if @cp doesn't have any CPU */
+ 		if (cpumask_empty(cp->cpus_allowed)) {
+ 			pos_css = css_rightmost_descendant(pos_css);
+@@ -873,7 +870,7 @@ static void update_cpumasks_hier(struct cpuset *cs, struct cpumask *new_cpus)
+ 		 * If it becomes empty, inherit the effective mask of the
+ 		 * parent, which is guaranteed to have some CPUs.
+ 		 */
+-		if (cpumask_empty(new_cpus))
++		if (cgroup_on_dfl(cp->css.cgroup) && cpumask_empty(new_cpus))
+ 			cpumask_copy(new_cpus, parent->effective_cpus);
+ 
+ 		/* Skip the whole subtree if the cpumask remains the same. */
+@@ -1129,7 +1126,7 @@ static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems)
+ 		 * If it becomes empty, inherit the effective mask of the
+ 		 * parent, which is guaranteed to have some MEMs.
+ 		 */
+-		if (nodes_empty(*new_mems))
++		if (cgroup_on_dfl(cp->css.cgroup) && nodes_empty(*new_mems))
+ 			*new_mems = parent->effective_mems;
+ 
+ 		/* Skip the whole subtree if the nodemask remains the same. */
+@@ -1992,7 +1989,9 @@ static int cpuset_css_online(struct cgroup_subsys_state *css)
+ 
+ 	spin_lock_irq(&callback_lock);
+ 	cs->mems_allowed = parent->mems_allowed;
++	cs->effective_mems = parent->mems_allowed;
+ 	cpumask_copy(cs->cpus_allowed, parent->cpus_allowed);
++	cpumask_copy(cs->effective_cpus, parent->cpus_allowed);
+ 	spin_unlock_irq(&callback_lock);
+ out_unlock:
+ 	mutex_unlock(&cpuset_mutex);
+diff --git a/kernel/printk/console_cmdline.h b/kernel/printk/console_cmdline.h
+index cbd69d842341..2ca4a8b5fe57 100644
+--- a/kernel/printk/console_cmdline.h
++++ b/kernel/printk/console_cmdline.h
+@@ -3,7 +3,7 @@
+ 
+ struct console_cmdline
+ {
+-	char	name[8];			/* Name of the driver	    */
++	char	name[16];			/* Name of the driver	    */
+ 	int	index;				/* Minor dev. to use	    */
+ 	char	*options;			/* Options for the driver   */
+ #ifdef CONFIG_A11Y_BRAILLE_CONSOLE
+diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
+index fae29e3ffbf0..2cdd35302af8 100644
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -2464,6 +2464,7 @@ void register_console(struct console *newcon)
+ 	for (i = 0, c = console_cmdline;
+ 	     i < MAX_CMDLINECONSOLES && c->name[0];
+ 	     i++, c++) {
++		BUILD_BUG_ON(sizeof(c->name) != sizeof(newcon->name));
+ 		if (strcmp(c->name, newcon->name) != 0)
+ 			continue;
+ 		if (newcon->index >= 0 &&
+diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
+index 224e768bdc73..af5bffd1053c 100644
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -1059,6 +1059,12 @@ static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
+ 
+ static struct pid * const ftrace_swapper_pid = &init_struct_pid;
+ 
++#ifdef CONFIG_FUNCTION_GRAPH_TRACER
++static int ftrace_graph_active;
++#else
++# define ftrace_graph_active 0
++#endif
++
+ #ifdef CONFIG_DYNAMIC_FTRACE
+ 
+ static struct ftrace_ops *removed_ops;
+@@ -2041,8 +2047,12 @@ static int ftrace_check_record(struct dyn_ftrace *rec, int enable, int update)
+ 		if (!ftrace_rec_count(rec))
+ 			rec->flags = 0;
+ 		else
+-			/* Just disable the record (keep REGS state) */
+-			rec->flags &= ~FTRACE_FL_ENABLED;
++			/*
++			 * Just disable the record, but keep the ops TRAMP
++			 * and REGS states. The _EN flags must be disabled though.
++			 */
++			rec->flags &= ~(FTRACE_FL_ENABLED | FTRACE_FL_TRAMP_EN |
++					FTRACE_FL_REGS_EN);
+ 	}
+ 
+ 	return FTRACE_UPDATE_MAKE_NOP;
+@@ -2688,24 +2698,36 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command)
+ 
+ static void ftrace_startup_sysctl(void)
+ {
++	int command;
++
+ 	if (unlikely(ftrace_disabled))
+ 		return;
+ 
+ 	/* Force update next time */
+ 	saved_ftrace_func = NULL;
+ 	/* ftrace_start_up is true if we want ftrace running */
+-	if (ftrace_start_up)
+-		ftrace_run_update_code(FTRACE_UPDATE_CALLS);
++	if (ftrace_start_up) {
++		command = FTRACE_UPDATE_CALLS;
++		if (ftrace_graph_active)
++			command |= FTRACE_START_FUNC_RET;
++		ftrace_startup_enable(command);
++	}
+ }
+ 
+ static void ftrace_shutdown_sysctl(void)
+ {
++	int command;
++
+ 	if (unlikely(ftrace_disabled))
+ 		return;
+ 
+ 	/* ftrace_start_up is true if ftrace is running */
+-	if (ftrace_start_up)
+-		ftrace_run_update_code(FTRACE_DISABLE_CALLS);
++	if (ftrace_start_up) {
++		command = FTRACE_DISABLE_CALLS;
++		if (ftrace_graph_active)
++			command |= FTRACE_STOP_FUNC_RET;
++		ftrace_run_update_code(command);
++	}
+ }
+ 
+ static cycle_t		ftrace_update_time;
+@@ -5558,12 +5580,12 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
+ 
+ 	if (ftrace_enabled) {
+ 
+-		ftrace_startup_sysctl();
+-
+ 		/* we are starting ftrace again */
+ 		if (ftrace_ops_list != &ftrace_list_end)
+ 			update_ftrace_function();
+ 
++		ftrace_startup_sysctl();
++
+ 	} else {
+ 		/* stopping ftrace calls (just send to ftrace_stub) */
+ 		ftrace_trace_function = ftrace_stub;
+@@ -5590,8 +5612,6 @@ static struct ftrace_ops graph_ops = {
+ 	ASSIGN_OPS_HASH(graph_ops, &global_ops.local_hash)
+ };
+ 
+-static int ftrace_graph_active;
+-
+ int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
+ {
+ 	return 0;
+diff --git a/kernel/workqueue.c b/kernel/workqueue.c
+index beeeac9e0e3e..82d0c8d4fe48 100644
+--- a/kernel/workqueue.c
++++ b/kernel/workqueue.c
+@@ -2728,19 +2728,57 @@ bool flush_work(struct work_struct *work)
+ }
+ EXPORT_SYMBOL_GPL(flush_work);
+ 
++struct cwt_wait {
++	wait_queue_t		wait;
++	struct work_struct	*work;
++};
++
++static int cwt_wakefn(wait_queue_t *wait, unsigned mode, int sync, void *key)
++{
++	struct cwt_wait *cwait = container_of(wait, struct cwt_wait, wait);
++
++	if (cwait->work != key)
++		return 0;
++	return autoremove_wake_function(wait, mode, sync, key);
++}
++
+ static bool __cancel_work_timer(struct work_struct *work, bool is_dwork)
+ {
++	static DECLARE_WAIT_QUEUE_HEAD(cancel_waitq);
+ 	unsigned long flags;
+ 	int ret;
+ 
+ 	do {
+ 		ret = try_to_grab_pending(work, is_dwork, &flags);
+ 		/*
+-		 * If someone else is canceling, wait for the same event it
+-		 * would be waiting for before retrying.
++		 * If someone else is already canceling, wait for it to
++		 * finish.  flush_work() doesn't work for PREEMPT_NONE
++		 * because we may get scheduled between @work's completion
++		 * and the other canceling task resuming and clearing
++		 * CANCELING - flush_work() will return false immediately
++		 * as @work is no longer busy, try_to_grab_pending() will
++		 * return -ENOENT as @work is still being canceled and the
++		 * other canceling task won't be able to clear CANCELING as
++		 * we're hogging the CPU.
++		 *
++		 * Let's wait for completion using a waitqueue.  As this
++		 * may lead to the thundering herd problem, use a custom
++		 * wake function which matches @work along with exclusive
++		 * wait and wakeup.
+ 		 */
+-		if (unlikely(ret == -ENOENT))
+-			flush_work(work);
++		if (unlikely(ret == -ENOENT)) {
++			struct cwt_wait cwait;
++
++			init_wait(&cwait.wait);
++			cwait.wait.func = cwt_wakefn;
++			cwait.work = work;
++
++			prepare_to_wait_exclusive(&cancel_waitq, &cwait.wait,
++						  TASK_UNINTERRUPTIBLE);
++			if (work_is_canceling(work))
++				schedule();
++			finish_wait(&cancel_waitq, &cwait.wait);
++		}
+ 	} while (unlikely(ret < 0));
+ 
+ 	/* tell other tasks trying to grab @work to back off */
+@@ -2749,6 +2787,16 @@ static bool __cancel_work_timer(struct work_struct *work, bool is_dwork)
+ 
+ 	flush_work(work);
+ 	clear_work_data(work);
++
++	/*
++	 * Paired with prepare_to_wait() above so that either
++	 * waitqueue_active() is visible here or !work_is_canceling() is
++	 * visible there.
++	 */
++	smp_mb();
++	if (waitqueue_active(&cancel_waitq))
++		__wake_up(&cancel_waitq, TASK_NORMAL, 1, work);
++
+ 	return ret;
+ }
+ 
+diff --git a/lib/lz4/lz4_decompress.c b/lib/lz4/lz4_decompress.c
+index 7a85967060a5..f0f5c5c3de12 100644
+--- a/lib/lz4/lz4_decompress.c
++++ b/lib/lz4/lz4_decompress.c
+@@ -139,6 +139,9 @@ static int lz4_uncompress(const char *source, char *dest, int osize)
+ 			/* Error: request to write beyond destination buffer */
+ 			if (cpy > oend)
+ 				goto _output_error;
++			if ((ref + COPYLENGTH) > oend ||
++					(op + COPYLENGTH) > oend)
++				goto _output_error;
+ 			LZ4_SECURECOPY(ref, op, (oend - COPYLENGTH));
+ 			while (op < cpy)
+ 				*op++ = *ref++;
+diff --git a/lib/seq_buf.c b/lib/seq_buf.c
+index 4eedfedb9e31..f25c33b279f2 100644
+--- a/lib/seq_buf.c
++++ b/lib/seq_buf.c
+@@ -61,7 +61,7 @@ int seq_buf_vprintf(struct seq_buf *s, const char *fmt, va_list args)
+ 
+ 	if (s->len < s->size) {
+ 		len = vsnprintf(s->buffer + s->len, s->size - s->len, fmt, args);
+-		if (seq_buf_can_fit(s, len)) {
++		if (s->len + len < s->size) {
+ 			s->len += len;
+ 			return 0;
+ 		}
+@@ -154,7 +154,7 @@ int seq_buf_bprintf(struct seq_buf *s, const char *fmt, const u32 *binary)
+ 
+ 	if (s->len < s->size) {
+ 		ret = bstr_printf(s->buffer + s->len, len, fmt, binary);
+-		if (seq_buf_can_fit(s, ret)) {
++		if (s->len + ret < s->size) {
+ 			s->len += ret;
+ 			return 0;
+ 		}
+diff --git a/mm/cma.c b/mm/cma.c
+index a85ae28709a3..f1bbcb6b7368 100644
+--- a/mm/cma.c
++++ b/mm/cma.c
+@@ -64,15 +64,17 @@ static unsigned long cma_bitmap_aligned_mask(struct cma *cma, int align_order)
+ 	return (1UL << (align_order - cma->order_per_bit)) - 1;
+ }
+ 
++/*
++ * Find a PFN aligned to the specified order and return an offset represented in
++ * order_per_bits.
++ */
+ static unsigned long cma_bitmap_aligned_offset(struct cma *cma, int align_order)
+ {
+-	unsigned int alignment;
+-
+ 	if (align_order <= cma->order_per_bit)
+ 		return 0;
+-	alignment = 1UL << (align_order - cma->order_per_bit);
+-	return ALIGN(cma->base_pfn, alignment) -
+-		(cma->base_pfn >> cma->order_per_bit);
++
++	return (ALIGN(cma->base_pfn, (1UL << align_order))
++		- cma->base_pfn) >> cma->order_per_bit;
+ }
+ 
+ static unsigned long cma_bitmap_maxno(struct cma *cma)
+diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
+index 769b185fefbd..a6e2da0bc718 100644
+--- a/net/caif/caif_socket.c
++++ b/net/caif/caif_socket.c
+@@ -281,7 +281,7 @@ static int caif_seqpkt_recvmsg(struct kiocb *iocb, struct socket *sock,
+ 	int copylen;
+ 
+ 	ret = -EOPNOTSUPP;
+-	if (m->msg_flags&MSG_OOB)
++	if (flags & MSG_OOB)
+ 		goto read_error;
+ 
+ 	skb = skb_recv_datagram(sk, flags, 0 , &ret);
+diff --git a/net/can/af_can.c b/net/can/af_can.c
+index 66e08040ced7..32d710eaf1fc 100644
+--- a/net/can/af_can.c
++++ b/net/can/af_can.c
+@@ -259,6 +259,9 @@ int can_send(struct sk_buff *skb, int loop)
+ 		goto inval_skb;
+ 	}
+ 
++	skb->ip_summed = CHECKSUM_UNNECESSARY;
++
++	skb_reset_mac_header(skb);
+ 	skb_reset_network_header(skb);
+ 	skb_reset_transport_header(skb);
+ 
+diff --git a/net/compat.c b/net/compat.c
+index 94d3d5e97883..f7bd286a8280 100644
+--- a/net/compat.c
++++ b/net/compat.c
+@@ -49,6 +49,13 @@ ssize_t get_compat_msghdr(struct msghdr *kmsg,
+ 	    __get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
+ 	    __get_user(kmsg->msg_flags, &umsg->msg_flags))
+ 		return -EFAULT;
++
++	if (!uaddr)
++		kmsg->msg_namelen = 0;
++
++	if (kmsg->msg_namelen < 0)
++		return -EINVAL;
++
+ 	if (kmsg->msg_namelen > sizeof(struct sockaddr_storage))
+ 		kmsg->msg_namelen = sizeof(struct sockaddr_storage);
+ 	kmsg->msg_control = compat_ptr(tmp3);
+diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
+index 31baba2a71ce..bbb1d5ac4a72 100644
+--- a/net/core/sysctl_net_core.c
++++ b/net/core/sysctl_net_core.c
+@@ -25,6 +25,8 @@
+ static int zero = 0;
+ static int one = 1;
+ static int ushort_max = USHRT_MAX;
++static int min_sndbuf = SOCK_MIN_SNDBUF;
++static int min_rcvbuf = SOCK_MIN_RCVBUF;
+ 
+ static int net_msg_warn;	/* Unused, but still a sysctl */
+ 
+@@ -237,7 +239,7 @@ static struct ctl_table net_core_table[] = {
+ 		.maxlen		= sizeof(int),
+ 		.mode		= 0644,
+ 		.proc_handler	= proc_dointvec_minmax,
+-		.extra1		= &one,
++		.extra1		= &min_sndbuf,
+ 	},
+ 	{
+ 		.procname	= "rmem_max",
+@@ -245,7 +247,7 @@ static struct ctl_table net_core_table[] = {
+ 		.maxlen		= sizeof(int),
+ 		.mode		= 0644,
+ 		.proc_handler	= proc_dointvec_minmax,
+-		.extra1		= &one,
++		.extra1		= &min_rcvbuf,
+ 	},
+ 	{
+ 		.procname	= "wmem_default",
+@@ -253,7 +255,7 @@ static struct ctl_table net_core_table[] = {
+ 		.maxlen		= sizeof(int),
+ 		.mode		= 0644,
+ 		.proc_handler	= proc_dointvec_minmax,
+-		.extra1		= &one,
++		.extra1		= &min_sndbuf,
+ 	},
+ 	{
+ 		.procname	= "rmem_default",
+@@ -261,7 +263,7 @@ static struct ctl_table net_core_table[] = {
+ 		.maxlen		= sizeof(int),
+ 		.mode		= 0644,
+ 		.proc_handler	= proc_dointvec_minmax,
+-		.extra1		= &one,
++		.extra1		= &min_rcvbuf,
+ 	},
+ 	{
+ 		.procname	= "dev_weight",
+diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
+index e34dccbc4d70..4eeba4e497a0 100644
+--- a/net/ipv4/inet_diag.c
++++ b/net/ipv4/inet_diag.c
+@@ -71,6 +71,20 @@ static inline void inet_diag_unlock_handler(
+ 	mutex_unlock(&inet_diag_table_mutex);
+ }
+ 
++static size_t inet_sk_attr_size(void)
++{
++	return	  nla_total_size(sizeof(struct tcp_info))
++		+ nla_total_size(1) /* INET_DIAG_SHUTDOWN */
++		+ nla_total_size(1) /* INET_DIAG_TOS */
++		+ nla_total_size(1) /* INET_DIAG_TCLASS */
++		+ nla_total_size(sizeof(struct inet_diag_meminfo))
++		+ nla_total_size(sizeof(struct inet_diag_msg))
++		+ nla_total_size(SK_MEMINFO_VARS * sizeof(u32))
++		+ nla_total_size(TCP_CA_NAME_MAX)
++		+ nla_total_size(sizeof(struct tcpvegas_info))
++		+ 64;
++}
++
+ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
+ 			      struct sk_buff *skb, struct inet_diag_req_v2 *req,
+ 			      struct user_namespace *user_ns,		      	
+@@ -324,9 +338,7 @@ int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *in_s
+ 	if (err)
+ 		goto out;
+ 
+-	rep = nlmsg_new(sizeof(struct inet_diag_msg) +
+-			sizeof(struct inet_diag_meminfo) +
+-			sizeof(struct tcp_info) + 64, GFP_KERNEL);
++	rep = nlmsg_new(inet_sk_attr_size(), GFP_KERNEL);
+ 	if (!rep) {
+ 		err = -ENOMEM;
+ 		goto out;
+diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
+index 8670e68e2ce6..f2d40971c712 100644
+--- a/net/ipv4/tcp_cong.c
++++ b/net/ipv4/tcp_cong.c
+@@ -309,6 +309,12 @@ EXPORT_SYMBOL_GPL(tcp_slow_start);
+  */
+ void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked)
+ {
++	/* If credits accumulated at a higher w, apply them gently now. */
++	if (tp->snd_cwnd_cnt >= w) {
++		tp->snd_cwnd_cnt = 0;
++		tp->snd_cwnd++;
++	}
++
+ 	tp->snd_cwnd_cnt += acked;
+ 	if (tp->snd_cwnd_cnt >= w) {
+ 		u32 delta = tp->snd_cwnd_cnt / w;
+diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c
+index 4b276d1ed980..06d3d665a9fd 100644
+--- a/net/ipv4/tcp_cubic.c
++++ b/net/ipv4/tcp_cubic.c
+@@ -306,8 +306,10 @@ tcp_friendliness:
+ 		}
+ 	}
+ 
+-	if (ca->cnt == 0)			/* cannot be zero */
+-		ca->cnt = 1;
++	/* The maximum rate of cwnd increase CUBIC allows is 1 packet per
++	 * 2 packets ACKed, meaning cwnd grows at 1.5x per RTT.
++	 */
++	ca->cnt = max(ca->cnt, 2U);
+ }
+ 
+ static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
+diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
+index 65caf8b95e17..9790f396ce5e 100644
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -2775,15 +2775,11 @@ void tcp_send_fin(struct sock *sk)
+ 	} else {
+ 		/* Socket is locked, keep trying until memory is available. */
+ 		for (;;) {
+-			skb = alloc_skb_fclone(MAX_TCP_HEADER,
+-					       sk->sk_allocation);
++			skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation);
+ 			if (skb)
+ 				break;
+ 			yield();
+ 		}
+-
+-		/* Reserve space for headers and prepare control bits. */
+-		skb_reserve(skb, MAX_TCP_HEADER);
+ 		/* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */
+ 		tcp_init_nondata_skb(skb, tp->write_seq,
+ 				     TCPHDR_ACK | TCPHDR_FIN);
+diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c
+index b4d5e1d97c1b..27ca79682efb 100644
+--- a/net/ipv6/fib6_rules.c
++++ b/net/ipv6/fib6_rules.c
+@@ -104,6 +104,7 @@ static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp,
+ 				goto again;
+ 			flp6->saddr = saddr;
+ 		}
++		err = rt->dst.error;
+ 		goto out;
+ 	}
+ again:
+diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c
+index a56276996b72..4b869d324010 100644
+--- a/net/ipv6/udp_offload.c
++++ b/net/ipv6/udp_offload.c
+@@ -112,11 +112,9 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
+ 		fptr = (struct frag_hdr *)(skb_network_header(skb) + unfrag_ip6hlen);
+ 		fptr->nexthdr = nexthdr;
+ 		fptr->reserved = 0;
+-		if (skb_shinfo(skb)->ip6_frag_id)
+-			fptr->identification = skb_shinfo(skb)->ip6_frag_id;
+-		else
+-			ipv6_select_ident(fptr,
+-					  (struct rt6_info *)skb_dst(skb));
++		if (!skb_shinfo(skb)->ip6_frag_id)
++			ipv6_proxy_select_ident(skb);
++		fptr->identification = skb_shinfo(skb)->ip6_frag_id;
+ 
+ 		/* Fragment the skb. ipv6 header and the remaining fields of the
+ 		 * fragment header are updated in ipv6_gso_segment()
+diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
+index b8295a430a56..fdcda8be1f0f 100644
+--- a/net/netfilter/ipvs/ip_vs_ctl.c
++++ b/net/netfilter/ipvs/ip_vs_ctl.c
+@@ -3399,7 +3399,7 @@ static int ip_vs_genl_set_cmd(struct sk_buff *skb, struct genl_info *info)
+ 		if (udest.af == 0)
+ 			udest.af = svc->af;
+ 
+-		if (udest.af != svc->af) {
++		if (udest.af != svc->af && cmd != IPVS_CMD_DEL_DEST) {
+ 			/* The synchronization protocol is incompatible
+ 			 * with mixed family services
+ 			 */
+diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
+index c47ffd7a0a70..d93ceeb3ef04 100644
+--- a/net/netfilter/ipvs/ip_vs_sync.c
++++ b/net/netfilter/ipvs/ip_vs_sync.c
+@@ -896,6 +896,8 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
+ 			IP_VS_DBG(2, "BACKUP, add new conn. failed\n");
+ 			return;
+ 		}
++		if (!(flags & IP_VS_CONN_F_TEMPLATE))
++			kfree(param->pe_data);
+ 	}
+ 
+ 	if (opt)
+@@ -1169,6 +1171,7 @@ static inline int ip_vs_proc_sync_conn(struct net *net, __u8 *p, __u8 *msg_end)
+ 				(opt_flags & IPVS_OPT_F_SEQ_DATA ? &opt : NULL)
+ 				);
+ #endif
++	ip_vs_pe_put(param.pe);
+ 	return 0;
+ 	/* Error exit */
+ out:
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index 1ff04bcd4871..f77f4cc8e3e9 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -227,7 +227,7 @@ nft_rule_deactivate_next(struct net *net, struct nft_rule *rule)
+ 
+ static inline void nft_rule_clear(struct net *net, struct nft_rule *rule)
+ {
+-	rule->genmask = 0;
++	rule->genmask &= ~(1 << gencursor_next(net));
+ }
+ 
+ static int
+@@ -3606,12 +3606,11 @@ static int nf_tables_commit(struct sk_buff *skb)
+ 						 &te->elem,
+ 						 NFT_MSG_DELSETELEM, 0);
+ 			te->set->ops->get(te->set, &te->elem);
+-			te->set->ops->remove(te->set, &te->elem);
+ 			nft_data_uninit(&te->elem.key, NFT_DATA_VALUE);
+-			if (te->elem.flags & NFT_SET_MAP) {
+-				nft_data_uninit(&te->elem.data,
+-						te->set->dtype);
+-			}
++			if (te->set->flags & NFT_SET_MAP &&
++			    !(te->elem.flags & NFT_SET_ELEM_INTERVAL_END))
++				nft_data_uninit(&te->elem.data, te->set->dtype);
++			te->set->ops->remove(te->set, &te->elem);
+ 			nft_trans_destroy(trans);
+ 			break;
+ 		}
+@@ -3652,7 +3651,7 @@ static int nf_tables_abort(struct sk_buff *skb)
+ {
+ 	struct net *net = sock_net(skb->sk);
+ 	struct nft_trans *trans, *next;
+-	struct nft_set *set;
++	struct nft_trans_elem *te;
+ 
+ 	list_for_each_entry_safe(trans, next, &net->nft.commit_list, list) {
+ 		switch (trans->msg_type) {
+@@ -3713,9 +3712,13 @@ static int nf_tables_abort(struct sk_buff *skb)
+ 			break;
+ 		case NFT_MSG_NEWSETELEM:
+ 			nft_trans_elem_set(trans)->nelems--;
+-			set = nft_trans_elem_set(trans);
+-			set->ops->get(set, &nft_trans_elem(trans));
+-			set->ops->remove(set, &nft_trans_elem(trans));
++			te = (struct nft_trans_elem *)trans->data;
++			te->set->ops->get(te->set, &te->elem);
++			nft_data_uninit(&te->elem.key, NFT_DATA_VALUE);
++			if (te->set->flags & NFT_SET_MAP &&
++			    !(te->elem.flags & NFT_SET_ELEM_INTERVAL_END))
++				nft_data_uninit(&te->elem.data, te->set->dtype);
++			te->set->ops->remove(te->set, &te->elem);
+ 			nft_trans_destroy(trans);
+ 			break;
+ 		case NFT_MSG_DELSETELEM:
+diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
+index 265e190f2218..b6364869c2e0 100644
+--- a/net/netfilter/nft_compat.c
++++ b/net/netfilter/nft_compat.c
+@@ -578,8 +578,12 @@ nft_match_select_ops(const struct nft_ctx *ctx,
+ 		struct xt_match *match = nft_match->ops.data;
+ 
+ 		if (strcmp(match->name, mt_name) == 0 &&
+-		    match->revision == rev && match->family == family)
++		    match->revision == rev && match->family == family) {
++			if (!try_module_get(match->me))
++				return ERR_PTR(-ENOENT);
++
+ 			return &nft_match->ops;
++		}
+ 	}
+ 
+ 	match = xt_request_find_match(family, mt_name, rev);
+@@ -648,8 +652,12 @@ nft_target_select_ops(const struct nft_ctx *ctx,
+ 		struct xt_target *target = nft_target->ops.data;
+ 
+ 		if (strcmp(target->name, tg_name) == 0 &&
+-		    target->revision == rev && target->family == family)
++		    target->revision == rev && target->family == family) {
++			if (!try_module_get(target->me))
++				return ERR_PTR(-ENOENT);
++
+ 			return &nft_target->ops;
++		}
+ 	}
+ 
+ 	target = xt_request_find_target(family, tg_name, rev);
+diff --git a/net/netfilter/xt_socket.c b/net/netfilter/xt_socket.c
+index 1ba67931eb1b..13332dbf291d 100644
+--- a/net/netfilter/xt_socket.c
++++ b/net/netfilter/xt_socket.c
+@@ -243,12 +243,13 @@ static int
+ extract_icmp6_fields(const struct sk_buff *skb,
+ 		     unsigned int outside_hdrlen,
+ 		     int *protocol,
+-		     struct in6_addr **raddr,
+-		     struct in6_addr **laddr,
++		     const struct in6_addr **raddr,
++		     const struct in6_addr **laddr,
+ 		     __be16 *rport,
+-		     __be16 *lport)
++		     __be16 *lport,
++		     struct ipv6hdr *ipv6_var)
+ {
+-	struct ipv6hdr *inside_iph, _inside_iph;
++	const struct ipv6hdr *inside_iph;
+ 	struct icmp6hdr *icmph, _icmph;
+ 	__be16 *ports, _ports[2];
+ 	u8 inside_nexthdr;
+@@ -263,12 +264,14 @@ extract_icmp6_fields(const struct sk_buff *skb,
+ 	if (icmph->icmp6_type & ICMPV6_INFOMSG_MASK)
+ 		return 1;
+ 
+-	inside_iph = skb_header_pointer(skb, outside_hdrlen + sizeof(_icmph), sizeof(_inside_iph), &_inside_iph);
++	inside_iph = skb_header_pointer(skb, outside_hdrlen + sizeof(_icmph),
++					sizeof(*ipv6_var), ipv6_var);
+ 	if (inside_iph == NULL)
+ 		return 1;
+ 	inside_nexthdr = inside_iph->nexthdr;
+ 
+-	inside_hdrlen = ipv6_skip_exthdr(skb, outside_hdrlen + sizeof(_icmph) + sizeof(_inside_iph),
++	inside_hdrlen = ipv6_skip_exthdr(skb, outside_hdrlen + sizeof(_icmph) +
++					      sizeof(*ipv6_var),
+ 					 &inside_nexthdr, &inside_fragoff);
+ 	if (inside_hdrlen < 0)
+ 		return 1; /* hjm: Packet has no/incomplete transport layer headers. */
+@@ -315,10 +318,10 @@ xt_socket_get_sock_v6(struct net *net, const u8 protocol,
+ static bool
+ socket_mt6_v1_v2(const struct sk_buff *skb, struct xt_action_param *par)
+ {
+-	struct ipv6hdr *iph = ipv6_hdr(skb);
++	struct ipv6hdr ipv6_var, *iph = ipv6_hdr(skb);
+ 	struct udphdr _hdr, *hp = NULL;
+ 	struct sock *sk = skb->sk;
+-	struct in6_addr *daddr = NULL, *saddr = NULL;
++	const struct in6_addr *daddr = NULL, *saddr = NULL;
+ 	__be16 uninitialized_var(dport), uninitialized_var(sport);
+ 	int thoff = 0, uninitialized_var(tproto);
+ 	const struct xt_socket_mtinfo1 *info = (struct xt_socket_mtinfo1 *) par->matchinfo;
+@@ -342,7 +345,7 @@ socket_mt6_v1_v2(const struct sk_buff *skb, struct xt_action_param *par)
+ 
+ 	} else if (tproto == IPPROTO_ICMPV6) {
+ 		if (extract_icmp6_fields(skb, thoff, &tproto, &saddr, &daddr,
+-					 &sport, &dport))
++					 &sport, &dport, &ipv6_var))
+ 			return false;
+ 	} else {
+ 		return false;
+diff --git a/net/rds/iw_rdma.c b/net/rds/iw_rdma.c
+index a817705ce2d0..dba8d0864f18 100644
+--- a/net/rds/iw_rdma.c
++++ b/net/rds/iw_rdma.c
+@@ -88,7 +88,9 @@ static unsigned int rds_iw_unmap_fastreg_list(struct rds_iw_mr_pool *pool,
+ 			int *unpinned);
+ static void rds_iw_destroy_fastreg(struct rds_iw_mr_pool *pool, struct rds_iw_mr *ibmr);
+ 
+-static int rds_iw_get_device(struct rds_sock *rs, struct rds_iw_device **rds_iwdev, struct rdma_cm_id **cm_id)
++static int rds_iw_get_device(struct sockaddr_in *src, struct sockaddr_in *dst,
++			     struct rds_iw_device **rds_iwdev,
++			     struct rdma_cm_id **cm_id)
+ {
+ 	struct rds_iw_device *iwdev;
+ 	struct rds_iw_cm_id *i_cm_id;
+@@ -112,15 +114,15 @@ static int rds_iw_get_device(struct rds_sock *rs, struct rds_iw_device **rds_iwd
+ 				src_addr->sin_port,
+ 				dst_addr->sin_addr.s_addr,
+ 				dst_addr->sin_port,
+-				rs->rs_bound_addr,
+-				rs->rs_bound_port,
+-				rs->rs_conn_addr,
+-				rs->rs_conn_port);
++				src->sin_addr.s_addr,
++				src->sin_port,
++				dst->sin_addr.s_addr,
++				dst->sin_port);
+ #ifdef WORKING_TUPLE_DETECTION
+-			if (src_addr->sin_addr.s_addr == rs->rs_bound_addr &&
+-			    src_addr->sin_port == rs->rs_bound_port &&
+-			    dst_addr->sin_addr.s_addr == rs->rs_conn_addr &&
+-			    dst_addr->sin_port == rs->rs_conn_port) {
++			if (src_addr->sin_addr.s_addr == src->sin_addr.s_addr &&
++			    src_addr->sin_port == src->sin_port &&
++			    dst_addr->sin_addr.s_addr == dst->sin_addr.s_addr &&
++			    dst_addr->sin_port == dst->sin_port) {
+ #else
+ 			/* FIXME - needs to compare the local and remote
+ 			 * ipaddr/port tuple, but the ipaddr is the only
+@@ -128,7 +130,7 @@ static int rds_iw_get_device(struct rds_sock *rs, struct rds_iw_device **rds_iwd
+ 			 * zero'ed.  It doesn't appear to be properly populated
+ 			 * during connection setup...
+ 			 */
+-			if (src_addr->sin_addr.s_addr == rs->rs_bound_addr) {
++			if (src_addr->sin_addr.s_addr == src->sin_addr.s_addr) {
+ #endif
+ 				spin_unlock_irq(&iwdev->spinlock);
+ 				*rds_iwdev = iwdev;
+@@ -180,19 +182,13 @@ int rds_iw_update_cm_id(struct rds_iw_device *rds_iwdev, struct rdma_cm_id *cm_i
+ {
+ 	struct sockaddr_in *src_addr, *dst_addr;
+ 	struct rds_iw_device *rds_iwdev_old;
+-	struct rds_sock rs;
+ 	struct rdma_cm_id *pcm_id;
+ 	int rc;
+ 
+ 	src_addr = (struct sockaddr_in *)&cm_id->route.addr.src_addr;
+ 	dst_addr = (struct sockaddr_in *)&cm_id->route.addr.dst_addr;
+ 
+-	rs.rs_bound_addr = src_addr->sin_addr.s_addr;
+-	rs.rs_bound_port = src_addr->sin_port;
+-	rs.rs_conn_addr = dst_addr->sin_addr.s_addr;
+-	rs.rs_conn_port = dst_addr->sin_port;
+-
+-	rc = rds_iw_get_device(&rs, &rds_iwdev_old, &pcm_id);
++	rc = rds_iw_get_device(src_addr, dst_addr, &rds_iwdev_old, &pcm_id);
+ 	if (rc)
+ 		rds_iw_remove_cm_id(rds_iwdev, cm_id);
+ 
+@@ -598,9 +594,17 @@ void *rds_iw_get_mr(struct scatterlist *sg, unsigned long nents,
+ 	struct rds_iw_device *rds_iwdev;
+ 	struct rds_iw_mr *ibmr = NULL;
+ 	struct rdma_cm_id *cm_id;
++	struct sockaddr_in src = {
++		.sin_addr.s_addr = rs->rs_bound_addr,
++		.sin_port = rs->rs_bound_port,
++	};
++	struct sockaddr_in dst = {
++		.sin_addr.s_addr = rs->rs_conn_addr,
++		.sin_port = rs->rs_conn_port,
++	};
+ 	int ret;
+ 
+-	ret = rds_iw_get_device(rs, &rds_iwdev, &cm_id);
++	ret = rds_iw_get_device(&src, &dst, &rds_iwdev, &cm_id);
+ 	if (ret || !cm_id) {
+ 		ret = -ENODEV;
+ 		goto out;
+diff --git a/net/rxrpc/ar-recvmsg.c b/net/rxrpc/ar-recvmsg.c
+index 4575485ad1b4..19a560626dc4 100644
+--- a/net/rxrpc/ar-recvmsg.c
++++ b/net/rxrpc/ar-recvmsg.c
+@@ -87,7 +87,7 @@ int rxrpc_recvmsg(struct kiocb *iocb, struct socket *sock,
+ 		if (!skb) {
+ 			/* nothing remains on the queue */
+ 			if (copied &&
+-			    (msg->msg_flags & MSG_PEEK || timeo == 0))
++			    (flags & MSG_PEEK || timeo == 0))
+ 				goto out;
+ 
+ 			/* wait for a message to turn up */
+diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
+index 09487afbfd51..95fdf4e40051 100644
+--- a/net/sched/cls_u32.c
++++ b/net/sched/cls_u32.c
+@@ -78,8 +78,11 @@ struct tc_u_hnode {
+ 	struct tc_u_common	*tp_c;
+ 	int			refcnt;
+ 	unsigned int		divisor;
+-	struct tc_u_knode __rcu	*ht[1];
+ 	struct rcu_head		rcu;
++	/* The 'ht' field MUST be the last field in structure to allow for
++	 * more entries allocated at end of structure.
++	 */
++	struct tc_u_knode __rcu	*ht[1];
+ };
+ 
+ struct tc_u_common {
+diff --git a/net/socket.c b/net/socket.c
+index 418795caa897..d50e7ca6aeea 100644
+--- a/net/socket.c
++++ b/net/socket.c
+@@ -1765,6 +1765,8 @@ SYSCALL_DEFINE6(sendto, int, fd, void __user *, buff, size_t, len,
+ 
+ 	if (len > INT_MAX)
+ 		len = INT_MAX;
++	if (unlikely(!access_ok(VERIFY_READ, buff, len)))
++		return -EFAULT;
+ 	sock = sockfd_lookup_light(fd, &err, &fput_needed);
+ 	if (!sock)
+ 		goto out;
+@@ -1823,6 +1825,8 @@ SYSCALL_DEFINE6(recvfrom, int, fd, void __user *, ubuf, size_t, size,
+ 
+ 	if (size > INT_MAX)
+ 		size = INT_MAX;
++	if (unlikely(!access_ok(VERIFY_WRITE, ubuf, size)))
++		return -EFAULT;
+ 	sock = sockfd_lookup_light(fd, &err, &fput_needed);
+ 	if (!sock)
+ 		goto out;
+diff --git a/sound/core/control.c b/sound/core/control.c
+index bb96a467e88d..23b018b717b0 100644
+--- a/sound/core/control.c
++++ b/sound/core/control.c
+@@ -1168,6 +1168,10 @@ static int snd_ctl_elem_add(struct snd_ctl_file *file,
+ 
+ 	if (info->count < 1)
+ 		return -EINVAL;
++	if (!*info->id.name)
++		return -EINVAL;
++	if (strnlen(info->id.name, sizeof(info->id.name)) >= sizeof(info->id.name))
++		return -EINVAL;
+ 	access = info->access == 0 ? SNDRV_CTL_ELEM_ACCESS_READWRITE :
+ 		(info->access & (SNDRV_CTL_ELEM_ACCESS_READWRITE|
+ 				 SNDRV_CTL_ELEM_ACCESS_INACTIVE|
+diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c
+index c2aa3cd844e5..a9536bb0dd73 100644
+--- a/sound/pci/hda/hda_controller.c
++++ b/sound/pci/hda/hda_controller.c
+@@ -1160,7 +1160,7 @@ static unsigned int azx_rirb_get_response(struct hda_bus *bus,
+ 		}
+ 	}
+ 
+-	if (!bus->no_response_fallback)
++	if (bus->no_response_fallback)
+ 		return -1;
+ 
+ 	if (!chip->polling_mode && chip->poll_count < 2) {
+diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
+index b680b4ec6331..8ec5289f8e05 100644
+--- a/sound/pci/hda/hda_generic.c
++++ b/sound/pci/hda/hda_generic.c
+@@ -687,12 +687,45 @@ static int get_amp_val_to_activate(struct hda_codec *codec, hda_nid_t nid,
+ 	return val;
+ }
+ 
++/* is this a stereo widget or a stereo-to-mono mix? */
++static bool is_stereo_amps(struct hda_codec *codec, hda_nid_t nid, int dir)
++{
++	unsigned int wcaps = get_wcaps(codec, nid);
++	hda_nid_t conn;
++
++	if (wcaps & AC_WCAP_STEREO)
++		return true;
++	if (dir != HDA_INPUT || get_wcaps_type(wcaps) != AC_WID_AUD_MIX)
++		return false;
++	if (snd_hda_get_num_conns(codec, nid) != 1)
++		return false;
++	if (snd_hda_get_connections(codec, nid, &conn, 1) < 0)
++		return false;
++	return !!(get_wcaps(codec, conn) & AC_WCAP_STEREO);
++}
++
+ /* initialize the amp value (only at the first time) */
+ static void init_amp(struct hda_codec *codec, hda_nid_t nid, int dir, int idx)
+ {
+ 	unsigned int caps = query_amp_caps(codec, nid, dir);
+ 	int val = get_amp_val_to_activate(codec, nid, dir, caps, false);
+-	snd_hda_codec_amp_init_stereo(codec, nid, dir, idx, 0xff, val);
++
++	if (is_stereo_amps(codec, nid, dir))
++		snd_hda_codec_amp_init_stereo(codec, nid, dir, idx, 0xff, val);
++	else
++		snd_hda_codec_amp_init(codec, nid, 0, dir, idx, 0xff, val);
++}
++
++/* update the amp, doing in stereo or mono depending on NID */
++static int update_amp(struct hda_codec *codec, hda_nid_t nid, int dir, int idx,
++		      unsigned int mask, unsigned int val)
++{
++	if (is_stereo_amps(codec, nid, dir))
++		return snd_hda_codec_amp_stereo(codec, nid, dir, idx,
++						mask, val);
++	else
++		return snd_hda_codec_amp_update(codec, nid, 0, dir, idx,
++						mask, val);
+ }
+ 
+ /* calculate amp value mask we can modify;
+@@ -732,7 +765,7 @@ static void activate_amp(struct hda_codec *codec, hda_nid_t nid, int dir,
+ 		return;
+ 
+ 	val &= mask;
+-	snd_hda_codec_amp_stereo(codec, nid, dir, idx, mask, val);
++	update_amp(codec, nid, dir, idx, mask, val);
+ }
+ 
+ static void activate_amp_out(struct hda_codec *codec, struct nid_path *path,
+@@ -4424,13 +4457,11 @@ static void mute_all_mixer_nid(struct hda_codec *codec, hda_nid_t mix)
+ 	has_amp = nid_has_mute(codec, mix, HDA_INPUT);
+ 	for (i = 0; i < nums; i++) {
+ 		if (has_amp)
+-			snd_hda_codec_amp_stereo(codec, mix,
+-						 HDA_INPUT, i,
+-						 0xff, HDA_AMP_MUTE);
++			update_amp(codec, mix, HDA_INPUT, i,
++				   0xff, HDA_AMP_MUTE);
+ 		else if (nid_has_volume(codec, conn[i], HDA_OUTPUT))
+-			snd_hda_codec_amp_stereo(codec, conn[i],
+-						 HDA_OUTPUT, 0,
+-						 0xff, HDA_AMP_MUTE);
++			update_amp(codec, conn[i], HDA_OUTPUT, 0,
++				   0xff, HDA_AMP_MUTE);
+ 	}
+ }
+ 
+diff --git a/sound/pci/hda/hda_proc.c b/sound/pci/hda/hda_proc.c
+index ce5a6da83419..05e19f78b4cb 100644
+--- a/sound/pci/hda/hda_proc.c
++++ b/sound/pci/hda/hda_proc.c
+@@ -134,13 +134,38 @@ static void print_amp_caps(struct snd_info_buffer *buffer,
+ 		    (caps & AC_AMPCAP_MUTE) >> AC_AMPCAP_MUTE_SHIFT);
+ }
+ 
++/* is this a stereo widget or a stereo-to-mono mix? */
++static bool is_stereo_amps(struct hda_codec *codec, hda_nid_t nid,
++			   int dir, unsigned int wcaps, int indices)
++{
++	hda_nid_t conn;
++
++	if (wcaps & AC_WCAP_STEREO)
++		return true;
++	/* check for a stereo-to-mono mix; it must be:
++	 * only a single connection, only for input, and only a mixer widget
++	 */
++	if (indices != 1 || dir != HDA_INPUT ||
++	    get_wcaps_type(wcaps) != AC_WID_AUD_MIX)
++		return false;
++
++	if (snd_hda_get_raw_connections(codec, nid, &conn, 1) < 0)
++		return false;
++	/* the connection source is a stereo? */
++	wcaps = snd_hda_param_read(codec, conn, AC_PAR_AUDIO_WIDGET_CAP);
++	return !!(wcaps & AC_WCAP_STEREO);
++}
++
+ static void print_amp_vals(struct snd_info_buffer *buffer,
+ 			   struct hda_codec *codec, hda_nid_t nid,
+-			   int dir, int stereo, int indices)
++			   int dir, unsigned int wcaps, int indices)
+ {
+ 	unsigned int val;
++	bool stereo;
+ 	int i;
+ 
++	stereo = is_stereo_amps(codec, nid, dir, wcaps, indices);
++
+ 	dir = dir == HDA_OUTPUT ? AC_AMP_GET_OUTPUT : AC_AMP_GET_INPUT;
+ 	for (i = 0; i < indices; i++) {
+ 		snd_iprintf(buffer, " [");
+@@ -757,12 +782,10 @@ static void print_codec_info(struct snd_info_entry *entry,
+ 			    (codec->single_adc_amp &&
+ 			     wid_type == AC_WID_AUD_IN))
+ 				print_amp_vals(buffer, codec, nid, HDA_INPUT,
+-					       wid_caps & AC_WCAP_STEREO,
+-					       1);
++					       wid_caps, 1);
+ 			else
+ 				print_amp_vals(buffer, codec, nid, HDA_INPUT,
+-					       wid_caps & AC_WCAP_STEREO,
+-					       conn_len);
++					       wid_caps, conn_len);
+ 		}
+ 		if (wid_caps & AC_WCAP_OUT_AMP) {
+ 			snd_iprintf(buffer, "  Amp-Out caps: ");
+@@ -771,11 +794,10 @@ static void print_codec_info(struct snd_info_entry *entry,
+ 			if (wid_type == AC_WID_PIN &&
+ 			    codec->pin_amp_workaround)
+ 				print_amp_vals(buffer, codec, nid, HDA_OUTPUT,
+-					       wid_caps & AC_WCAP_STEREO,
+-					       conn_len);
++					       wid_caps, conn_len);
+ 			else
+ 				print_amp_vals(buffer, codec, nid, HDA_OUTPUT,
+-					       wid_caps & AC_WCAP_STEREO, 1);
++					       wid_caps, 1);
+ 		}
+ 
+ 		switch (wid_type) {
+diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
+index 1589c9bcce3e..dd2b3d92071f 100644
+--- a/sound/pci/hda/patch_cirrus.c
++++ b/sound/pci/hda/patch_cirrus.c
+@@ -393,6 +393,7 @@ static const struct snd_pci_quirk cs420x_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x106b, 0x1c00, "MacBookPro 8,1", CS420X_MBP81),
+ 	SND_PCI_QUIRK(0x106b, 0x2000, "iMac 12,2", CS420X_IMAC27_122),
+ 	SND_PCI_QUIRK(0x106b, 0x2800, "MacBookPro 10,1", CS420X_MBP101),
++	SND_PCI_QUIRK(0x106b, 0x5600, "MacBookAir 5,2", CS420X_MBP81),
+ 	SND_PCI_QUIRK(0x106b, 0x5b00, "MacBookAir 4,2", CS420X_MBA42),
+ 	SND_PCI_QUIRK_VENDOR(0x106b, "Apple", CS420X_APPLE),
+ 	{} /* terminator */
+@@ -584,6 +585,7 @@ static int patch_cs420x(struct hda_codec *codec)
+ 		return -ENOMEM;
+ 
+ 	spec->gen.automute_hook = cs_automute;
++	codec->single_adc_amp = 1;
+ 
+ 	snd_hda_pick_fixup(codec, cs420x_models, cs420x_fixup_tbl,
+ 			   cs420x_fixups);
+diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
+index fd3ed18670e9..da67ea8645a6 100644
+--- a/sound/pci/hda/patch_conexant.c
++++ b/sound/pci/hda/patch_conexant.c
+@@ -223,6 +223,7 @@ enum {
+ 	CXT_PINCFG_LENOVO_TP410,
+ 	CXT_PINCFG_LEMOTE_A1004,
+ 	CXT_PINCFG_LEMOTE_A1205,
++	CXT_PINCFG_COMPAQ_CQ60,
+ 	CXT_FIXUP_STEREO_DMIC,
+ 	CXT_FIXUP_INC_MIC_BOOST,
+ 	CXT_FIXUP_HEADPHONE_MIC_PIN,
+@@ -660,6 +661,15 @@ static const struct hda_fixup cxt_fixups[] = {
+ 		.type = HDA_FIXUP_PINS,
+ 		.v.pins = cxt_pincfg_lemote,
+ 	},
++	[CXT_PINCFG_COMPAQ_CQ60] = {
++		.type = HDA_FIXUP_PINS,
++		.v.pins = (const struct hda_pintbl[]) {
++			/* 0x17 was falsely set up as a mic, it should 0x1d */
++			{ 0x17, 0x400001f0 },
++			{ 0x1d, 0x97a70120 },
++			{ }
++		}
++	},
+ 	[CXT_FIXUP_STEREO_DMIC] = {
+ 		.type = HDA_FIXUP_FUNC,
+ 		.v.func = cxt_fixup_stereo_dmic,
+@@ -769,6 +779,7 @@ static const struct hda_model_fixup cxt5047_fixup_models[] = {
+ };
+ 
+ static const struct snd_pci_quirk cxt5051_fixups[] = {
++	SND_PCI_QUIRK(0x103c, 0x360b, "Compaq CQ60", CXT_PINCFG_COMPAQ_CQ60),
+ 	SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo X200", CXT_PINCFG_LENOVO_X200),
+ 	{}
+ };
+diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
+index 0a598af9b38b..e61c167da72c 100644
+--- a/sound/usb/quirks-table.h
++++ b/sound/usb/quirks-table.h
+@@ -1773,6 +1773,36 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ 		}
+ 	}
+ },
++{
++	USB_DEVICE(0x0582, 0x0159),
++	.driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
++		/* .vendor_name = "Roland", */
++		/* .product_name = "UA-22", */
++		.ifnum = QUIRK_ANY_INTERFACE,
++		.type = QUIRK_COMPOSITE,
++		.data = (const struct snd_usb_audio_quirk[]) {
++			{
++				.ifnum = 0,
++				.type = QUIRK_AUDIO_STANDARD_INTERFACE
++			},
++			{
++				.ifnum = 1,
++				.type = QUIRK_AUDIO_STANDARD_INTERFACE
++			},
++			{
++				.ifnum = 2,
++				.type = QUIRK_MIDI_FIXED_ENDPOINT,
++				.data = & (const struct snd_usb_midi_endpoint_info) {
++					.out_cables = 0x0001,
++					.in_cables = 0x0001
++				}
++			},
++			{
++				.ifnum = -1
++			}
++		}
++	}
++},
+ /* this catches most recent vendor-specific Roland devices */
+ {
+ 	.match_flags = USB_DEVICE_ID_MATCH_VENDOR |
+diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
+index 1cc6e2e19982..ec83b11c5978 100644
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -2416,6 +2416,7 @@ static long kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg)
+ 	case KVM_CAP_SIGNAL_MSI:
+ #endif
+ #ifdef CONFIG_HAVE_KVM_IRQFD
++	case KVM_CAP_IRQFD:
+ 	case KVM_CAP_IRQFD_RESAMPLE:
+ #endif
+ 	case KVM_CAP_CHECK_EXTENSION_VM:


^ permalink raw reply related	[flat|nested] 19+ messages in thread

* [gentoo-commits] proj/linux-patches:3.19 commit in: /
@ 2015-03-28 19:54 Mike Pagano
  0 siblings, 0 replies; 19+ messages in thread
From: Mike Pagano @ 2015-03-28 19:54 UTC (permalink / raw
  To: gentoo-commits

commit:     bb36852c647b9905eaff367fe8792a6a00d54838
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sat Mar 28 19:54:57 2015 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sat Mar 28 19:54:57 2015 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=bb36852c

Add check to saved_root_name for supported filesystem path naming.

 2900_dev-root-proc-mount-fix.patch | 10 +++++-----
 1 file changed, 5 insertions(+), 5 deletions(-)

diff --git a/2900_dev-root-proc-mount-fix.patch b/2900_dev-root-proc-mount-fix.patch
index 6ea86e2..aa6fa19 100644
--- a/2900_dev-root-proc-mount-fix.patch
+++ b/2900_dev-root-proc-mount-fix.patch
@@ -1,6 +1,6 @@
---- a/init/do_mounts.c	2014-08-26 08:03:30.000013100 -0400
-+++ b/init/do_mounts.c	2014-08-26 08:11:19.720014712 -0400
-@@ -484,7 +484,10 @@ void __init change_floppy(char *fmt, ...
+--- a/init/do_mounts.c	2015-03-28 15:38:01.750855358 -0400
++++ b/init/do_mounts.c	2015-03-28 15:41:47.873853202 -0400
+@@ -485,7 +485,10 @@ void __init change_floppy(char *fmt, ...
  	va_start(args, fmt);
  	vsprintf(buf, fmt, args);
  	va_end(args);
@@ -12,13 +12,13 @@
  	if (fd >= 0) {
  		sys_ioctl(fd, FDEJECT, 0);
  		sys_close(fd);
-@@ -527,8 +530,13 @@ void __init mount_root(void)
+@@ -528,8 +531,13 @@ void __init mount_root(void)
  	}
  #endif
  #ifdef CONFIG_BLOCK
 -	create_dev("/dev/root", ROOT_DEV);
 -	mount_block_root("/dev/root", root_mountflags);
-+	if (saved_root_name[0]) {
++	if (saved_root_name[0] == '/') {
 +		create_dev(saved_root_name, ROOT_DEV);
 +		mount_block_root(saved_root_name, root_mountflags);
 +	} else {


^ permalink raw reply related	[flat|nested] 19+ messages in thread

* [gentoo-commits] proj/linux-patches:3.19 commit in: /
@ 2015-04-14  0:27 Mike Pagano
  0 siblings, 0 replies; 19+ messages in thread
From: Mike Pagano @ 2015-04-14  0:27 UTC (permalink / raw
  To: gentoo-commits

commit:     35b1a0bb1b39da74831209977922d6d07ce12213
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Tue Apr 14 00:27:15 2015 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Tue Apr 14 00:27:15 2015 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=35b1a0bb

Linux patch 3.19.4

 0000_README             |    4 +
 1003_linux-3.19.4.patch | 3185 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 3189 insertions(+)

diff --git a/0000_README b/0000_README
index 513fccd..5c9a4bb 100644
--- a/0000_README
+++ b/0000_README
@@ -55,6 +55,10 @@ Patch:  1002_linux-3.19.3.patch
 From:   http://www.kernel.org
 Desc:   Linux 3.19.3
 
+Patch:  1003_linux-3.19.4.patch
+From:   http://www.kernel.org
+Desc:   Linux 3.19.4
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1003_linux-3.19.4.patch b/1003_linux-3.19.4.patch
new file mode 100644
index 0000000..288af92
--- /dev/null
+++ b/1003_linux-3.19.4.patch
@@ -0,0 +1,3185 @@
+diff --git a/Makefile b/Makefile
+index 713bf263952f..2ef20781ad25 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 19
+-SUBLEVEL = 3
++SUBLEVEL = 4
+ EXTRAVERSION =
+ NAME = Diseased Newt
+ 
+diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h
+index cb9593079f29..d8c25b7b18fb 100644
+--- a/arch/arm64/include/asm/cmpxchg.h
++++ b/arch/arm64/include/asm/cmpxchg.h
+@@ -246,14 +246,30 @@ static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old,
+ 	__ret; \
+ })
+ 
+-#define this_cpu_cmpxchg_1(ptr, o, n) cmpxchg_local(raw_cpu_ptr(&(ptr)), o, n)
+-#define this_cpu_cmpxchg_2(ptr, o, n) cmpxchg_local(raw_cpu_ptr(&(ptr)), o, n)
+-#define this_cpu_cmpxchg_4(ptr, o, n) cmpxchg_local(raw_cpu_ptr(&(ptr)), o, n)
+-#define this_cpu_cmpxchg_8(ptr, o, n) cmpxchg_local(raw_cpu_ptr(&(ptr)), o, n)
+-
+-#define this_cpu_cmpxchg_double_8(ptr1, ptr2, o1, o2, n1, n2) \
+-	cmpxchg_double_local(raw_cpu_ptr(&(ptr1)), raw_cpu_ptr(&(ptr2)), \
+-				o1, o2, n1, n2)
++#define _protect_cmpxchg_local(pcp, o, n)			\
++({								\
++	typeof(*raw_cpu_ptr(&(pcp))) __ret;			\
++	preempt_disable();					\
++	__ret = cmpxchg_local(raw_cpu_ptr(&(pcp)), o, n);	\
++	preempt_enable();					\
++	__ret;							\
++})
++
++#define this_cpu_cmpxchg_1(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
++#define this_cpu_cmpxchg_2(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
++#define this_cpu_cmpxchg_4(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
++#define this_cpu_cmpxchg_8(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
++
++#define this_cpu_cmpxchg_double_8(ptr1, ptr2, o1, o2, n1, n2)		\
++({									\
++	int __ret;							\
++	preempt_disable();						\
++	__ret = cmpxchg_double_local(	raw_cpu_ptr(&(ptr1)),		\
++					raw_cpu_ptr(&(ptr2)),		\
++					o1, o2, n1, n2);		\
++	preempt_enable();						\
++	__ret;								\
++})
+ 
+ #define cmpxchg64(ptr,o,n)		cmpxchg((ptr),(o),(n))
+ #define cmpxchg64_local(ptr,o,n)	cmpxchg_local((ptr),(o),(n))
+diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h
+index a9eee33dfa62..101a42bde728 100644
+--- a/arch/arm64/include/asm/mmu_context.h
++++ b/arch/arm64/include/asm/mmu_context.h
+@@ -151,6 +151,15 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
+ {
+ 	unsigned int cpu = smp_processor_id();
+ 
++	/*
++	 * init_mm.pgd does not contain any user mappings and it is always
++	 * active for kernel addresses in TTBR1. Just set the reserved TTBR0.
++	 */
++	if (next == &init_mm) {
++		cpu_set_reserved_ttbr0();
++		return;
++	}
++
+ 	if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next)
+ 		check_and_switch_context(next, tsk);
+ }
+diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h
+index 09da25bc596f..4fde8c1df97f 100644
+--- a/arch/arm64/include/asm/percpu.h
++++ b/arch/arm64/include/asm/percpu.h
+@@ -204,25 +204,47 @@ static inline unsigned long __percpu_xchg(void *ptr, unsigned long val,
+ 	return ret;
+ }
+ 
++#define _percpu_read(pcp)						\
++({									\
++	typeof(pcp) __retval;						\
++	preempt_disable();						\
++	__retval = (typeof(pcp))__percpu_read(raw_cpu_ptr(&(pcp)), 	\
++					      sizeof(pcp));		\
++	preempt_enable();						\
++	__retval;							\
++})
++
++#define _percpu_write(pcp, val)						\
++do {									\
++	preempt_disable();						\
++	__percpu_write(raw_cpu_ptr(&(pcp)), (unsigned long)(val), 	\
++				sizeof(pcp));				\
++	preempt_enable();						\
++} while(0)								\
++
++#define _pcp_protect(operation, pcp, val)			\
++({								\
++	typeof(pcp) __retval;					\
++	preempt_disable();					\
++	__retval = (typeof(pcp))operation(raw_cpu_ptr(&(pcp)),	\
++					  (val), sizeof(pcp));	\
++	preempt_enable();					\
++	__retval;						\
++})
++
+ #define _percpu_add(pcp, val) \
+-	__percpu_add(raw_cpu_ptr(&(pcp)), val, sizeof(pcp))
++	_pcp_protect(__percpu_add, pcp, val)
+ 
+-#define _percpu_add_return(pcp, val) (typeof(pcp)) (_percpu_add(pcp, val))
++#define _percpu_add_return(pcp, val) _percpu_add(pcp, val)
+ 
+ #define _percpu_and(pcp, val) \
+-	__percpu_and(raw_cpu_ptr(&(pcp)), val, sizeof(pcp))
++	_pcp_protect(__percpu_and, pcp, val)
+ 
+ #define _percpu_or(pcp, val) \
+-	__percpu_or(raw_cpu_ptr(&(pcp)), val, sizeof(pcp))
+-
+-#define _percpu_read(pcp) (typeof(pcp))	\
+-	(__percpu_read(raw_cpu_ptr(&(pcp)), sizeof(pcp)))
+-
+-#define _percpu_write(pcp, val) \
+-	__percpu_write(raw_cpu_ptr(&(pcp)), (unsigned long)(val), sizeof(pcp))
++	_pcp_protect(__percpu_or, pcp, val)
+ 
+ #define _percpu_xchg(pcp, val) (typeof(pcp)) \
+-	(__percpu_xchg(raw_cpu_ptr(&(pcp)), (unsigned long)(val), sizeof(pcp)))
++	_pcp_protect(__percpu_xchg, pcp, (unsigned long)(val))
+ 
+ #define this_cpu_add_1(pcp, val) _percpu_add(pcp, val)
+ #define this_cpu_add_2(pcp, val) _percpu_add(pcp, val)
+diff --git a/arch/powerpc/boot/dts/fsl/pq3-etsec2-0.dtsi b/arch/powerpc/boot/dts/fsl/pq3-etsec2-0.dtsi
+index 1382fec9e8c5..7fcb1ac0f232 100644
+--- a/arch/powerpc/boot/dts/fsl/pq3-etsec2-0.dtsi
++++ b/arch/powerpc/boot/dts/fsl/pq3-etsec2-0.dtsi
+@@ -50,6 +50,7 @@ ethernet@b0000 {
+ 	fsl,num_tx_queues = <0x8>;
+ 	fsl,magic-packet;
+ 	local-mac-address = [ 00 00 00 00 00 00 ];
++	ranges;
+ 
+ 	queue-group@b0000 {
+ 		#address-cells = <1>;
+diff --git a/arch/powerpc/boot/dts/fsl/pq3-etsec2-1.dtsi b/arch/powerpc/boot/dts/fsl/pq3-etsec2-1.dtsi
+index 221cd2ea5b31..9f25427c1527 100644
+--- a/arch/powerpc/boot/dts/fsl/pq3-etsec2-1.dtsi
++++ b/arch/powerpc/boot/dts/fsl/pq3-etsec2-1.dtsi
+@@ -50,6 +50,7 @@ ethernet@b1000 {
+ 	fsl,num_tx_queues = <0x8>;
+ 	fsl,magic-packet;
+ 	local-mac-address = [ 00 00 00 00 00 00 ];
++	ranges;
+ 
+ 	queue-group@b1000 {
+ 		#address-cells = <1>;
+diff --git a/arch/powerpc/boot/dts/fsl/pq3-etsec2-2.dtsi b/arch/powerpc/boot/dts/fsl/pq3-etsec2-2.dtsi
+index 61456c317609..cd7c318ab131 100644
+--- a/arch/powerpc/boot/dts/fsl/pq3-etsec2-2.dtsi
++++ b/arch/powerpc/boot/dts/fsl/pq3-etsec2-2.dtsi
+@@ -49,6 +49,7 @@ ethernet@b2000 {
+ 	fsl,num_tx_queues = <0x8>;
+ 	fsl,magic-packet;
+ 	local-mac-address = [ 00 00 00 00 00 00 ];
++	ranges;
+ 
+ 	queue-group@b2000 {
+ 		#address-cells = <1>;
+diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
+index c2df8150bd7a..9519e6bdc6d7 100644
+--- a/arch/powerpc/kernel/exceptions-64s.S
++++ b/arch/powerpc/kernel/exceptions-64s.S
+@@ -1408,7 +1408,7 @@ machine_check_handle_early:
+ 	bne	9f			/* continue in V mode if we are. */
+ 
+ 5:
+-#ifdef CONFIG_KVM_BOOK3S_64_HV
++#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
+ 	/*
+ 	 * We are coming from kernel context. Check if we are coming from
+ 	 * guest. if yes, then we can continue. We will fall through
+diff --git a/arch/powerpc/platforms/pseries/mobility.c b/arch/powerpc/platforms/pseries/mobility.c
+index e7cb6d4a871a..f8c9ff7886e1 100644
+--- a/arch/powerpc/platforms/pseries/mobility.c
++++ b/arch/powerpc/platforms/pseries/mobility.c
+@@ -25,10 +25,10 @@
+ static struct kobject *mobility_kobj;
+ 
+ struct update_props_workarea {
+-	u32 phandle;
+-	u32 state;
+-	u64 reserved;
+-	u32 nprops;
++	__be32 phandle;
++	__be32 state;
++	__be64 reserved;
++	__be32 nprops;
+ } __packed;
+ 
+ #define NODE_ACTION_MASK	0xff000000
+@@ -54,11 +54,11 @@ static int mobility_rtas_call(int token, char *buf, s32 scope)
+ 	return rc;
+ }
+ 
+-static int delete_dt_node(u32 phandle)
++static int delete_dt_node(__be32 phandle)
+ {
+ 	struct device_node *dn;
+ 
+-	dn = of_find_node_by_phandle(phandle);
++	dn = of_find_node_by_phandle(be32_to_cpu(phandle));
+ 	if (!dn)
+ 		return -ENOENT;
+ 
+@@ -127,7 +127,7 @@ static int update_dt_property(struct device_node *dn, struct property **prop,
+ 	return 0;
+ }
+ 
+-static int update_dt_node(u32 phandle, s32 scope)
++static int update_dt_node(__be32 phandle, s32 scope)
+ {
+ 	struct update_props_workarea *upwa;
+ 	struct device_node *dn;
+@@ -136,6 +136,7 @@ static int update_dt_node(u32 phandle, s32 scope)
+ 	char *prop_data;
+ 	char *rtas_buf;
+ 	int update_properties_token;
++	u32 nprops;
+ 	u32 vd;
+ 
+ 	update_properties_token = rtas_token("ibm,update-properties");
+@@ -146,7 +147,7 @@ static int update_dt_node(u32 phandle, s32 scope)
+ 	if (!rtas_buf)
+ 		return -ENOMEM;
+ 
+-	dn = of_find_node_by_phandle(phandle);
++	dn = of_find_node_by_phandle(be32_to_cpu(phandle));
+ 	if (!dn) {
+ 		kfree(rtas_buf);
+ 		return -ENOENT;
+@@ -162,6 +163,7 @@ static int update_dt_node(u32 phandle, s32 scope)
+ 			break;
+ 
+ 		prop_data = rtas_buf + sizeof(*upwa);
++		nprops = be32_to_cpu(upwa->nprops);
+ 
+ 		/* On the first call to ibm,update-properties for a node the
+ 		 * the first property value descriptor contains an empty
+@@ -170,17 +172,17 @@ static int update_dt_node(u32 phandle, s32 scope)
+ 		 */
+ 		if (*prop_data == 0) {
+ 			prop_data++;
+-			vd = *(u32 *)prop_data;
++			vd = be32_to_cpu(*(__be32 *)prop_data);
+ 			prop_data += vd + sizeof(vd);
+-			upwa->nprops--;
++			nprops--;
+ 		}
+ 
+-		for (i = 0; i < upwa->nprops; i++) {
++		for (i = 0; i < nprops; i++) {
+ 			char *prop_name;
+ 
+ 			prop_name = prop_data;
+ 			prop_data += strlen(prop_name) + 1;
+-			vd = *(u32 *)prop_data;
++			vd = be32_to_cpu(*(__be32 *)prop_data);
+ 			prop_data += sizeof(vd);
+ 
+ 			switch (vd) {
+@@ -212,13 +214,13 @@ static int update_dt_node(u32 phandle, s32 scope)
+ 	return 0;
+ }
+ 
+-static int add_dt_node(u32 parent_phandle, u32 drc_index)
++static int add_dt_node(__be32 parent_phandle, __be32 drc_index)
+ {
+ 	struct device_node *dn;
+ 	struct device_node *parent_dn;
+ 	int rc;
+ 
+-	parent_dn = of_find_node_by_phandle(parent_phandle);
++	parent_dn = of_find_node_by_phandle(be32_to_cpu(parent_phandle));
+ 	if (!parent_dn)
+ 		return -ENOENT;
+ 
+@@ -237,7 +239,7 @@ static int add_dt_node(u32 parent_phandle, u32 drc_index)
+ int pseries_devicetree_update(s32 scope)
+ {
+ 	char *rtas_buf;
+-	u32 *data;
++	__be32 *data;
+ 	int update_nodes_token;
+ 	int rc;
+ 
+@@ -254,17 +256,17 @@ int pseries_devicetree_update(s32 scope)
+ 		if (rc && rc != 1)
+ 			break;
+ 
+-		data = (u32 *)rtas_buf + 4;
+-		while (*data & NODE_ACTION_MASK) {
++		data = (__be32 *)rtas_buf + 4;
++		while (be32_to_cpu(*data) & NODE_ACTION_MASK) {
+ 			int i;
+-			u32 action = *data & NODE_ACTION_MASK;
+-			int node_count = *data & NODE_COUNT_MASK;
++			u32 action = be32_to_cpu(*data) & NODE_ACTION_MASK;
++			u32 node_count = be32_to_cpu(*data) & NODE_COUNT_MASK;
+ 
+ 			data++;
+ 
+ 			for (i = 0; i < node_count; i++) {
+-				u32 phandle = *data++;
+-				u32 drc_index;
++				__be32 phandle = *data++;
++				__be32 drc_index;
+ 
+ 				switch (action) {
+ 				case DELETE_DT_NODE:
+diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h
+index 0da5865df5b1..8e1f5f67e25f 100644
+--- a/drivers/base/regmap/internal.h
++++ b/drivers/base/regmap/internal.h
+@@ -237,4 +237,12 @@ extern struct regcache_ops regcache_rbtree_ops;
+ extern struct regcache_ops regcache_lzo_ops;
+ extern struct regcache_ops regcache_flat_ops;
+ 
++static inline const char *regmap_name(const struct regmap *map)
++{
++	if (map->dev)
++		return dev_name(map->dev);
++
++	return map->name;
++}
++
+ #endif
+diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c
+index d453a2c98ad0..81751a49d8bf 100644
+--- a/drivers/base/regmap/regcache-rbtree.c
++++ b/drivers/base/regmap/regcache-rbtree.c
+@@ -307,7 +307,7 @@ static int regcache_rbtree_insert_to_block(struct regmap *map,
+ 	if (pos == 0) {
+ 		memmove(blk + offset * map->cache_word_size,
+ 			blk, rbnode->blklen * map->cache_word_size);
+-		bitmap_shift_right(present, present, offset, blklen);
++		bitmap_shift_left(present, present, offset, blklen);
+ 	}
+ 
+ 	/* update the rbnode block, its size and the base register */
+diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
+index f373c35f9e1d..f5db662e951e 100644
+--- a/drivers/base/regmap/regcache.c
++++ b/drivers/base/regmap/regcache.c
+@@ -218,7 +218,7 @@ int regcache_read(struct regmap *map,
+ 		ret = map->cache_ops->read(map, reg, value);
+ 
+ 		if (ret == 0)
+-			trace_regmap_reg_read_cache(map->dev, reg, *value);
++			trace_regmap_reg_read_cache(map, reg, *value);
+ 
+ 		return ret;
+ 	}
+@@ -311,7 +311,7 @@ int regcache_sync(struct regmap *map)
+ 	dev_dbg(map->dev, "Syncing %s cache\n",
+ 		map->cache_ops->name);
+ 	name = map->cache_ops->name;
+-	trace_regcache_sync(map->dev, name, "start");
++	trace_regcache_sync(map, name, "start");
+ 
+ 	if (!map->cache_dirty)
+ 		goto out;
+@@ -346,7 +346,7 @@ out:
+ 
+ 	regmap_async_complete(map);
+ 
+-	trace_regcache_sync(map->dev, name, "stop");
++	trace_regcache_sync(map, name, "stop");
+ 
+ 	return ret;
+ }
+@@ -381,7 +381,7 @@ int regcache_sync_region(struct regmap *map, unsigned int min,
+ 	name = map->cache_ops->name;
+ 	dev_dbg(map->dev, "Syncing %s cache from %d-%d\n", name, min, max);
+ 
+-	trace_regcache_sync(map->dev, name, "start region");
++	trace_regcache_sync(map, name, "start region");
+ 
+ 	if (!map->cache_dirty)
+ 		goto out;
+@@ -401,7 +401,7 @@ out:
+ 
+ 	regmap_async_complete(map);
+ 
+-	trace_regcache_sync(map->dev, name, "stop region");
++	trace_regcache_sync(map, name, "stop region");
+ 
+ 	return ret;
+ }
+@@ -428,7 +428,7 @@ int regcache_drop_region(struct regmap *map, unsigned int min,
+ 
+ 	map->lock(map->lock_arg);
+ 
+-	trace_regcache_drop_region(map->dev, min, max);
++	trace_regcache_drop_region(map, min, max);
+ 
+ 	ret = map->cache_ops->drop(map, min, max);
+ 
+@@ -455,7 +455,7 @@ void regcache_cache_only(struct regmap *map, bool enable)
+ 	map->lock(map->lock_arg);
+ 	WARN_ON(map->cache_bypass && enable);
+ 	map->cache_only = enable;
+-	trace_regmap_cache_only(map->dev, enable);
++	trace_regmap_cache_only(map, enable);
+ 	map->unlock(map->lock_arg);
+ }
+ EXPORT_SYMBOL_GPL(regcache_cache_only);
+@@ -493,7 +493,7 @@ void regcache_cache_bypass(struct regmap *map, bool enable)
+ 	map->lock(map->lock_arg);
+ 	WARN_ON(map->cache_only && enable);
+ 	map->cache_bypass = enable;
+-	trace_regmap_cache_bypass(map->dev, enable);
++	trace_regmap_cache_bypass(map, enable);
+ 	map->unlock(map->lock_arg);
+ }
+ EXPORT_SYMBOL_GPL(regcache_cache_bypass);
+diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
+index d2f8a818d200..ee731bb7d957 100644
+--- a/drivers/base/regmap/regmap.c
++++ b/drivers/base/regmap/regmap.c
+@@ -1280,7 +1280,7 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg,
+ 	if (map->async && map->bus->async_write) {
+ 		struct regmap_async *async;
+ 
+-		trace_regmap_async_write_start(map->dev, reg, val_len);
++		trace_regmap_async_write_start(map, reg, val_len);
+ 
+ 		spin_lock_irqsave(&map->async_lock, flags);
+ 		async = list_first_entry_or_null(&map->async_free,
+@@ -1338,8 +1338,7 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg,
+ 		return ret;
+ 	}
+ 
+-	trace_regmap_hw_write_start(map->dev, reg,
+-				    val_len / map->format.val_bytes);
++	trace_regmap_hw_write_start(map, reg, val_len / map->format.val_bytes);
+ 
+ 	/* If we're doing a single register write we can probably just
+ 	 * send the work_buf directly, otherwise try to do a gather
+@@ -1371,8 +1370,7 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg,
+ 		kfree(buf);
+ 	}
+ 
+-	trace_regmap_hw_write_done(map->dev, reg,
+-				   val_len / map->format.val_bytes);
++	trace_regmap_hw_write_done(map, reg, val_len / map->format.val_bytes);
+ 
+ 	return ret;
+ }
+@@ -1406,12 +1404,12 @@ static int _regmap_bus_formatted_write(void *context, unsigned int reg,
+ 
+ 	map->format.format_write(map, reg, val);
+ 
+-	trace_regmap_hw_write_start(map->dev, reg, 1);
++	trace_regmap_hw_write_start(map, reg, 1);
+ 
+ 	ret = map->bus->write(map->bus_context, map->work_buf,
+ 			      map->format.buf_size);
+ 
+-	trace_regmap_hw_write_done(map->dev, reg, 1);
++	trace_regmap_hw_write_done(map, reg, 1);
+ 
+ 	return ret;
+ }
+@@ -1469,7 +1467,7 @@ int _regmap_write(struct regmap *map, unsigned int reg,
+ 		dev_info(map->dev, "%x <= %x\n", reg, val);
+ #endif
+ 
+-	trace_regmap_reg_write(map->dev, reg, val);
++	trace_regmap_reg_write(map, reg, val);
+ 
+ 	return map->reg_write(context, reg, val);
+ }
+@@ -1772,7 +1770,7 @@ static int _regmap_raw_multi_reg_write(struct regmap *map,
+ 	for (i = 0; i < num_regs; i++) {
+ 		int reg = regs[i].reg;
+ 		int val = regs[i].def;
+-		trace_regmap_hw_write_start(map->dev, reg, 1);
++		trace_regmap_hw_write_start(map, reg, 1);
+ 		map->format.format_reg(u8, reg, map->reg_shift);
+ 		u8 += reg_bytes + pad_bytes;
+ 		map->format.format_val(u8, val, 0);
+@@ -1787,7 +1785,7 @@ static int _regmap_raw_multi_reg_write(struct regmap *map,
+ 
+ 	for (i = 0; i < num_regs; i++) {
+ 		int reg = regs[i].reg;
+-		trace_regmap_hw_write_done(map->dev, reg, 1);
++		trace_regmap_hw_write_done(map, reg, 1);
+ 	}
+ 	return ret;
+ }
+@@ -2058,15 +2056,13 @@ static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
+ 	 */
+ 	u8[0] |= map->read_flag_mask;
+ 
+-	trace_regmap_hw_read_start(map->dev, reg,
+-				   val_len / map->format.val_bytes);
++	trace_regmap_hw_read_start(map, reg, val_len / map->format.val_bytes);
+ 
+ 	ret = map->bus->read(map->bus_context, map->work_buf,
+ 			     map->format.reg_bytes + map->format.pad_bytes,
+ 			     val, val_len);
+ 
+-	trace_regmap_hw_read_done(map->dev, reg,
+-				  val_len / map->format.val_bytes);
++	trace_regmap_hw_read_done(map, reg, val_len / map->format.val_bytes);
+ 
+ 	return ret;
+ }
+@@ -2122,7 +2118,7 @@ static int _regmap_read(struct regmap *map, unsigned int reg,
+ 			dev_info(map->dev, "%x => %x\n", reg, *val);
+ #endif
+ 
+-		trace_regmap_reg_read(map->dev, reg, *val);
++		trace_regmap_reg_read(map, reg, *val);
+ 
+ 		if (!map->cache_bypass)
+ 			regcache_write(map, reg, *val);
+@@ -2479,7 +2475,7 @@ void regmap_async_complete_cb(struct regmap_async *async, int ret)
+ 	struct regmap *map = async->map;
+ 	bool wake;
+ 
+-	trace_regmap_async_io_complete(map->dev);
++	trace_regmap_async_io_complete(map);
+ 
+ 	spin_lock(&map->async_lock);
+ 	list_move(&async->list, &map->async_free);
+@@ -2524,7 +2520,7 @@ int regmap_async_complete(struct regmap *map)
+ 	if (!map->bus || !map->bus->async_write)
+ 		return 0;
+ 
+-	trace_regmap_async_complete_start(map->dev);
++	trace_regmap_async_complete_start(map);
+ 
+ 	wait_event(map->async_waitq, regmap_async_is_done(map));
+ 
+@@ -2533,7 +2529,7 @@ int regmap_async_complete(struct regmap *map)
+ 	map->async_ret = 0;
+ 	spin_unlock_irqrestore(&map->async_lock, flags);
+ 
+-	trace_regmap_async_complete_done(map->dev);
++	trace_regmap_async_complete_done(map);
+ 
+ 	return ret;
+ }
+diff --git a/drivers/clocksource/time-efm32.c b/drivers/clocksource/time-efm32.c
+index bba62f9deefb..ec57ba2bbd87 100644
+--- a/drivers/clocksource/time-efm32.c
++++ b/drivers/clocksource/time-efm32.c
+@@ -225,12 +225,12 @@ static int __init efm32_clockevent_init(struct device_node *np)
+ 	clock_event_ddata.base = base;
+ 	clock_event_ddata.periodic_top = DIV_ROUND_CLOSEST(rate, 1024 * HZ);
+ 
+-	setup_irq(irq, &efm32_clock_event_irq);
+-
+ 	clockevents_config_and_register(&clock_event_ddata.evtdev,
+ 					DIV_ROUND_CLOSEST(rate, 1024),
+ 					0xf, 0xffff);
+ 
++	setup_irq(irq, &efm32_clock_event_irq);
++
+ 	return 0;
+ 
+ err_get_irq:
+diff --git a/drivers/clocksource/timer-sun5i.c b/drivers/clocksource/timer-sun5i.c
+index 02268448dc85..5dcbf90b8015 100644
+--- a/drivers/clocksource/timer-sun5i.c
++++ b/drivers/clocksource/timer-sun5i.c
+@@ -178,10 +178,6 @@ static void __init sun5i_timer_init(struct device_node *node)
+ 
+ 	ticks_per_jiffy = DIV_ROUND_UP(rate, HZ);
+ 
+-	ret = setup_irq(irq, &sun5i_timer_irq);
+-	if (ret)
+-		pr_warn("failed to setup irq %d\n", irq);
+-
+ 	/* Enable timer0 interrupt */
+ 	val = readl(timer_base + TIMER_IRQ_EN_REG);
+ 	writel(val | TIMER_IRQ_EN(0), timer_base + TIMER_IRQ_EN_REG);
+@@ -191,6 +187,10 @@ static void __init sun5i_timer_init(struct device_node *node)
+ 
+ 	clockevents_config_and_register(&sun5i_clockevent, rate,
+ 					TIMER_SYNC_TICKS, 0xffffffff);
++
++	ret = setup_irq(irq, &sun5i_timer_irq);
++	if (ret)
++		pr_warn("failed to setup irq %d\n", irq);
+ }
+ CLOCKSOURCE_OF_DECLARE(sun5i_a13, "allwinner,sun5i-a13-hstimer",
+ 		       sun5i_timer_init);
+diff --git a/drivers/cpuidle/cpuidle-mvebu-v7.c b/drivers/cpuidle/cpuidle-mvebu-v7.c
+index 38e68618513a..cefa07438ae1 100644
+--- a/drivers/cpuidle/cpuidle-mvebu-v7.c
++++ b/drivers/cpuidle/cpuidle-mvebu-v7.c
+@@ -37,11 +37,11 @@ static int mvebu_v7_enter_idle(struct cpuidle_device *dev,
+ 		deepidle = true;
+ 
+ 	ret = mvebu_v7_cpu_suspend(deepidle);
++	cpu_pm_exit();
++
+ 	if (ret)
+ 		return ret;
+ 
+-	cpu_pm_exit();
+-
+ 	return index;
+ }
+ 
+diff --git a/drivers/dma/dw/platform.c b/drivers/dma/dw/platform.c
+index 32ea1aca7a0e..272f01fe667b 100644
+--- a/drivers/dma/dw/platform.c
++++ b/drivers/dma/dw/platform.c
+@@ -26,6 +26,8 @@
+ 
+ #include "internal.h"
+ 
++#define DRV_NAME	"dw_dmac"
++
+ static struct dma_chan *dw_dma_of_xlate(struct of_phandle_args *dma_spec,
+ 					struct of_dma *ofdma)
+ {
+@@ -284,7 +286,7 @@ static struct platform_driver dw_driver = {
+ 	.remove		= dw_remove,
+ 	.shutdown       = dw_shutdown,
+ 	.driver = {
+-		.name	= "dw_dmac",
++		.name	= DRV_NAME,
+ 		.pm	= &dw_dev_pm_ops,
+ 		.of_match_table = of_match_ptr(dw_dma_of_id_table),
+ 		.acpi_match_table = ACPI_PTR(dw_dma_acpi_id_table),
+@@ -305,3 +307,4 @@ module_exit(dw_exit);
+ 
+ MODULE_LICENSE("GPL v2");
+ MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller platform driver");
++MODULE_ALIAS("platform:" DRV_NAME);
+diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
+index 23e26e0768b5..2176874a41b1 100644
+--- a/drivers/input/mouse/synaptics.c
++++ b/drivers/input/mouse/synaptics.c
+@@ -120,32 +120,41 @@ void synaptics_reset(struct psmouse *psmouse)
+ 
+ static bool cr48_profile_sensor;
+ 
++#define ANY_BOARD_ID 0
+ struct min_max_quirk {
+ 	const char * const *pnp_ids;
++	struct {
++		unsigned long int min, max;
++	} board_id;
+ 	int x_min, x_max, y_min, y_max;
+ };
+ 
+ static const struct min_max_quirk min_max_pnpid_table[] = {
+ 	{
+ 		(const char * const []){"LEN0033", NULL},
++		{ANY_BOARD_ID, ANY_BOARD_ID},
+ 		1024, 5052, 2258, 4832
+ 	},
+ 	{
+-		(const char * const []){"LEN0035", "LEN0042", NULL},
++		(const char * const []){"LEN0042", NULL},
++		{ANY_BOARD_ID, ANY_BOARD_ID},
+ 		1232, 5710, 1156, 4696
+ 	},
+ 	{
+ 		(const char * const []){"LEN0034", "LEN0036", "LEN0037",
+ 					"LEN0039", "LEN2002", "LEN2004",
+ 					NULL},
++		{ANY_BOARD_ID, 2961},
+ 		1024, 5112, 2024, 4832
+ 	},
+ 	{
+ 		(const char * const []){"LEN2001", NULL},
++		{ANY_BOARD_ID, ANY_BOARD_ID},
+ 		1024, 5022, 2508, 4832
+ 	},
+ 	{
+ 		(const char * const []){"LEN2006", NULL},
++		{ANY_BOARD_ID, ANY_BOARD_ID},
+ 		1264, 5675, 1171, 4688
+ 	},
+ 	{ }
+@@ -241,6 +250,10 @@ static int synaptics_board_id(struct psmouse *psmouse)
+ 	struct synaptics_data *priv = psmouse->private;
+ 	unsigned char bid[3];
+ 
++	/* firmwares prior 7.5 have no board_id encoded */
++	if (SYN_ID_FULL(priv->identity) < 0x705)
++		return 0;
++
+ 	if (synaptics_send_cmd(psmouse, SYN_QUE_MODES, bid))
+ 		return -1;
+ 	priv->board_id = ((bid[0] & 0xfc) << 6) | bid[1];
+@@ -343,7 +356,6 @@ static int synaptics_resolution(struct psmouse *psmouse)
+ {
+ 	struct synaptics_data *priv = psmouse->private;
+ 	unsigned char resp[3];
+-	int i;
+ 
+ 	if (SYN_ID_MAJOR(priv->identity) < 4)
+ 		return 0;
+@@ -355,17 +367,6 @@ static int synaptics_resolution(struct psmouse *psmouse)
+ 		}
+ 	}
+ 
+-	for (i = 0; min_max_pnpid_table[i].pnp_ids; i++) {
+-		if (psmouse_matches_pnp_id(psmouse,
+-					   min_max_pnpid_table[i].pnp_ids)) {
+-			priv->x_min = min_max_pnpid_table[i].x_min;
+-			priv->x_max = min_max_pnpid_table[i].x_max;
+-			priv->y_min = min_max_pnpid_table[i].y_min;
+-			priv->y_max = min_max_pnpid_table[i].y_max;
+-			return 0;
+-		}
+-	}
+-
+ 	if (SYN_EXT_CAP_REQUESTS(priv->capabilities) >= 5 &&
+ 	    SYN_CAP_MAX_DIMENSIONS(priv->ext_cap_0c)) {
+ 		if (synaptics_send_cmd(psmouse, SYN_QUE_EXT_MAX_COORDS, resp)) {
+@@ -374,23 +375,69 @@ static int synaptics_resolution(struct psmouse *psmouse)
+ 		} else {
+ 			priv->x_max = (resp[0] << 5) | ((resp[1] & 0x0f) << 1);
+ 			priv->y_max = (resp[2] << 5) | ((resp[1] & 0xf0) >> 3);
++			psmouse_info(psmouse,
++				     "queried max coordinates: x [..%d], y [..%d]\n",
++				     priv->x_max, priv->y_max);
+ 		}
+ 	}
+ 
+-	if (SYN_EXT_CAP_REQUESTS(priv->capabilities) >= 7 &&
+-	    SYN_CAP_MIN_DIMENSIONS(priv->ext_cap_0c)) {
++	if (SYN_CAP_MIN_DIMENSIONS(priv->ext_cap_0c) &&
++	    (SYN_EXT_CAP_REQUESTS(priv->capabilities) >= 7 ||
++	     /*
++	      * Firmware v8.1 does not report proper number of extended
++	      * capabilities, but has been proven to report correct min
++	      * coordinates.
++	      */
++	     SYN_ID_FULL(priv->identity) == 0x801)) {
+ 		if (synaptics_send_cmd(psmouse, SYN_QUE_EXT_MIN_COORDS, resp)) {
+ 			psmouse_warn(psmouse,
+ 				     "device claims to have min coordinates query, but I'm not able to read it.\n");
+ 		} else {
+ 			priv->x_min = (resp[0] << 5) | ((resp[1] & 0x0f) << 1);
+ 			priv->y_min = (resp[2] << 5) | ((resp[1] & 0xf0) >> 3);
++			psmouse_info(psmouse,
++				     "queried min coordinates: x [%d..], y [%d..]\n",
++				     priv->x_min, priv->y_min);
+ 		}
+ 	}
+ 
+ 	return 0;
+ }
+ 
++/*
++ * Apply quirk(s) if the hardware matches
++ */
++
++static void synaptics_apply_quirks(struct psmouse *psmouse)
++{
++	struct synaptics_data *priv = psmouse->private;
++	int i;
++
++	for (i = 0; min_max_pnpid_table[i].pnp_ids; i++) {
++		if (!psmouse_matches_pnp_id(psmouse,
++					    min_max_pnpid_table[i].pnp_ids))
++			continue;
++
++		if (min_max_pnpid_table[i].board_id.min != ANY_BOARD_ID &&
++		    priv->board_id < min_max_pnpid_table[i].board_id.min)
++			continue;
++
++		if (min_max_pnpid_table[i].board_id.max != ANY_BOARD_ID &&
++		    priv->board_id > min_max_pnpid_table[i].board_id.max)
++			continue;
++
++		priv->x_min = min_max_pnpid_table[i].x_min;
++		priv->x_max = min_max_pnpid_table[i].x_max;
++		priv->y_min = min_max_pnpid_table[i].y_min;
++		priv->y_max = min_max_pnpid_table[i].y_max;
++		psmouse_info(psmouse,
++			     "quirked min/max coordinates: x [%d..%d], y [%d..%d]\n",
++			     priv->x_min, priv->x_max,
++			     priv->y_min, priv->y_max);
++		break;
++	}
++}
++
+ static int synaptics_query_hardware(struct psmouse *psmouse)
+ {
+ 	if (synaptics_identify(psmouse))
+@@ -406,6 +453,8 @@ static int synaptics_query_hardware(struct psmouse *psmouse)
+ 	if (synaptics_resolution(psmouse))
+ 		return -1;
+ 
++	synaptics_apply_quirks(psmouse);
++
+ 	return 0;
+ }
+ 
+@@ -613,6 +662,18 @@ static void synaptics_parse_agm(const unsigned char buf[],
+ 	priv->agm_pending = true;
+ }
+ 
++static void synaptics_parse_ext_buttons(const unsigned char buf[],
++					struct synaptics_data *priv,
++					struct synaptics_hw_state *hw)
++{
++	unsigned int ext_bits =
++		(SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap) + 1) >> 1;
++	unsigned int ext_mask = GENMASK(ext_bits - 1, 0);
++
++	hw->ext_buttons = buf[4] & ext_mask;
++	hw->ext_buttons |= (buf[5] & ext_mask) << ext_bits;
++}
++
+ static bool is_forcepad;
+ 
+ static int synaptics_parse_hw_state(const unsigned char buf[],
+@@ -699,28 +760,9 @@ static int synaptics_parse_hw_state(const unsigned char buf[],
+ 			hw->down = ((buf[0] ^ buf[3]) & 0x02) ? 1 : 0;
+ 		}
+ 
+-		if (SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap) &&
++		if (SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap) > 0 &&
+ 		    ((buf[0] ^ buf[3]) & 0x02)) {
+-			switch (SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap) & ~0x01) {
+-			default:
+-				/*
+-				 * if nExtBtn is greater than 8 it should be
+-				 * considered invalid and treated as 0
+-				 */
+-				break;
+-			case 8:
+-				hw->ext_buttons |= ((buf[5] & 0x08)) ? 0x80 : 0;
+-				hw->ext_buttons |= ((buf[4] & 0x08)) ? 0x40 : 0;
+-			case 6:
+-				hw->ext_buttons |= ((buf[5] & 0x04)) ? 0x20 : 0;
+-				hw->ext_buttons |= ((buf[4] & 0x04)) ? 0x10 : 0;
+-			case 4:
+-				hw->ext_buttons |= ((buf[5] & 0x02)) ? 0x08 : 0;
+-				hw->ext_buttons |= ((buf[4] & 0x02)) ? 0x04 : 0;
+-			case 2:
+-				hw->ext_buttons |= ((buf[5] & 0x01)) ? 0x02 : 0;
+-				hw->ext_buttons |= ((buf[4] & 0x01)) ? 0x01 : 0;
+-			}
++			synaptics_parse_ext_buttons(buf, priv, hw);
+ 		}
+ 	} else {
+ 		hw->x = (((buf[1] & 0x1f) << 8) | buf[2]);
+@@ -782,12 +824,35 @@ static void synaptics_report_semi_mt_data(struct input_dev *dev,
+ 	}
+ }
+ 
++static void synaptics_report_ext_buttons(struct psmouse *psmouse,
++					 const struct synaptics_hw_state *hw)
++{
++	struct input_dev *dev = psmouse->dev;
++	struct synaptics_data *priv = psmouse->private;
++	int ext_bits = (SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap) + 1) >> 1;
++	int i;
++
++	if (!SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap))
++		return;
++
++	/* Bug in FW 8.1, buttons are reported only when ExtBit is 1 */
++	if (SYN_ID_FULL(priv->identity) == 0x801 &&
++	    !((psmouse->packet[0] ^ psmouse->packet[3]) & 0x02))
++		return;
++
++	for (i = 0; i < ext_bits; i++) {
++		input_report_key(dev, BTN_0 + 2 * i,
++			hw->ext_buttons & (1 << i));
++		input_report_key(dev, BTN_1 + 2 * i,
++			hw->ext_buttons & (1 << (i + ext_bits)));
++	}
++}
++
+ static void synaptics_report_buttons(struct psmouse *psmouse,
+ 				     const struct synaptics_hw_state *hw)
+ {
+ 	struct input_dev *dev = psmouse->dev;
+ 	struct synaptics_data *priv = psmouse->private;
+-	int i;
+ 
+ 	input_report_key(dev, BTN_LEFT, hw->left);
+ 	input_report_key(dev, BTN_RIGHT, hw->right);
+@@ -800,8 +865,7 @@ static void synaptics_report_buttons(struct psmouse *psmouse,
+ 		input_report_key(dev, BTN_BACK, hw->down);
+ 	}
+ 
+-	for (i = 0; i < SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap); i++)
+-		input_report_key(dev, BTN_0 + i, hw->ext_buttons & (1 << i));
++	synaptics_report_ext_buttons(psmouse, hw);
+ }
+ 
+ static void synaptics_report_slot(struct input_dev *dev, int slot,
+diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
+index 37de0173b6d2..74adcd2c967e 100644
+--- a/drivers/md/dm-io.c
++++ b/drivers/md/dm-io.c
+@@ -289,9 +289,16 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,
+ 	struct request_queue *q = bdev_get_queue(where->bdev);
+ 	unsigned short logical_block_size = queue_logical_block_size(q);
+ 	sector_t num_sectors;
++	unsigned int uninitialized_var(special_cmd_max_sectors);
+ 
+-	/* Reject unsupported discard requests */
+-	if ((rw & REQ_DISCARD) && !blk_queue_discard(q)) {
++	/*
++	 * Reject unsupported discard and write same requests.
++	 */
++	if (rw & REQ_DISCARD)
++		special_cmd_max_sectors = q->limits.max_discard_sectors;
++	else if (rw & REQ_WRITE_SAME)
++		special_cmd_max_sectors = q->limits.max_write_same_sectors;
++	if ((rw & (REQ_DISCARD | REQ_WRITE_SAME)) && special_cmd_max_sectors == 0) {
+ 		dec_count(io, region, -EOPNOTSUPP);
+ 		return;
+ 	}
+@@ -317,7 +324,7 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,
+ 		store_io_and_region_in_bio(bio, io, region);
+ 
+ 		if (rw & REQ_DISCARD) {
+-			num_sectors = min_t(sector_t, q->limits.max_discard_sectors, remaining);
++			num_sectors = min_t(sector_t, special_cmd_max_sectors, remaining);
+ 			bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT;
+ 			remaining -= num_sectors;
+ 		} else if (rw & REQ_WRITE_SAME) {
+@@ -326,7 +333,7 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,
+ 			 */
+ 			dp->get_page(dp, &page, &len, &offset);
+ 			bio_add_page(bio, page, logical_block_size, offset);
+-			num_sectors = min_t(sector_t, q->limits.max_write_same_sectors, remaining);
++			num_sectors = min_t(sector_t, special_cmd_max_sectors, remaining);
+ 			bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT;
+ 
+ 			offset = 0;
+diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
+index 8b204ae216ab..f83a0f3fc365 100644
+--- a/drivers/md/dm-snap.c
++++ b/drivers/md/dm-snap.c
+@@ -20,6 +20,8 @@
+ #include <linux/log2.h>
+ #include <linux/dm-kcopyd.h>
+ 
++#include "dm.h"
++
+ #include "dm-exception-store.h"
+ 
+ #define DM_MSG_PREFIX "snapshots"
+@@ -291,12 +293,23 @@ struct origin {
+ };
+ 
+ /*
++ * This structure is allocated for each origin target
++ */
++struct dm_origin {
++	struct dm_dev *dev;
++	struct dm_target *ti;
++	unsigned split_boundary;
++	struct list_head hash_list;
++};
++
++/*
+  * Size of the hash table for origin volumes. If we make this
+  * the size of the minors list then it should be nearly perfect
+  */
+ #define ORIGIN_HASH_SIZE 256
+ #define ORIGIN_MASK      0xFF
+ static struct list_head *_origins;
++static struct list_head *_dm_origins;
+ static struct rw_semaphore _origins_lock;
+ 
+ static DECLARE_WAIT_QUEUE_HEAD(_pending_exceptions_done);
+@@ -310,12 +323,22 @@ static int init_origin_hash(void)
+ 	_origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head),
+ 			   GFP_KERNEL);
+ 	if (!_origins) {
+-		DMERR("unable to allocate memory");
++		DMERR("unable to allocate memory for _origins");
+ 		return -ENOMEM;
+ 	}
+-
+ 	for (i = 0; i < ORIGIN_HASH_SIZE; i++)
+ 		INIT_LIST_HEAD(_origins + i);
++
++	_dm_origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head),
++			      GFP_KERNEL);
++	if (!_dm_origins) {
++		DMERR("unable to allocate memory for _dm_origins");
++		kfree(_origins);
++		return -ENOMEM;
++	}
++	for (i = 0; i < ORIGIN_HASH_SIZE; i++)
++		INIT_LIST_HEAD(_dm_origins + i);
++
+ 	init_rwsem(&_origins_lock);
+ 
+ 	return 0;
+@@ -324,6 +347,7 @@ static int init_origin_hash(void)
+ static void exit_origin_hash(void)
+ {
+ 	kfree(_origins);
++	kfree(_dm_origins);
+ }
+ 
+ static unsigned origin_hash(struct block_device *bdev)
+@@ -350,6 +374,30 @@ static void __insert_origin(struct origin *o)
+ 	list_add_tail(&o->hash_list, sl);
+ }
+ 
++static struct dm_origin *__lookup_dm_origin(struct block_device *origin)
++{
++	struct list_head *ol;
++	struct dm_origin *o;
++
++	ol = &_dm_origins[origin_hash(origin)];
++	list_for_each_entry (o, ol, hash_list)
++		if (bdev_equal(o->dev->bdev, origin))
++			return o;
++
++	return NULL;
++}
++
++static void __insert_dm_origin(struct dm_origin *o)
++{
++	struct list_head *sl = &_dm_origins[origin_hash(o->dev->bdev)];
++	list_add_tail(&o->hash_list, sl);
++}
++
++static void __remove_dm_origin(struct dm_origin *o)
++{
++	list_del(&o->hash_list);
++}
++
+ /*
+  * _origins_lock must be held when calling this function.
+  * Returns number of snapshots registered using the supplied cow device, plus:
+@@ -1840,9 +1888,40 @@ static int snapshot_preresume(struct dm_target *ti)
+ static void snapshot_resume(struct dm_target *ti)
+ {
+ 	struct dm_snapshot *s = ti->private;
+-	struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
++	struct dm_snapshot *snap_src = NULL, *snap_dest = NULL, *snap_merging = NULL;
++	struct dm_origin *o;
++	struct mapped_device *origin_md = NULL;
++	bool must_restart_merging = false;
+ 
+ 	down_read(&_origins_lock);
++
++	o = __lookup_dm_origin(s->origin->bdev);
++	if (o)
++		origin_md = dm_table_get_md(o->ti->table);
++	if (!origin_md) {
++		(void) __find_snapshots_sharing_cow(s, NULL, NULL, &snap_merging);
++		if (snap_merging)
++			origin_md = dm_table_get_md(snap_merging->ti->table);
++	}
++	if (origin_md == dm_table_get_md(ti->table))
++		origin_md = NULL;
++	if (origin_md) {
++		if (dm_hold(origin_md))
++			origin_md = NULL;
++	}
++
++	up_read(&_origins_lock);
++
++	if (origin_md) {
++		dm_internal_suspend_fast(origin_md);
++		if (snap_merging && test_bit(RUNNING_MERGE, &snap_merging->state_bits)) {
++			must_restart_merging = true;
++			stop_merge(snap_merging);
++		}
++	}
++
++	down_read(&_origins_lock);
++
+ 	(void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
+ 	if (snap_src && snap_dest) {
+ 		down_write(&snap_src->lock);
+@@ -1851,8 +1930,16 @@ static void snapshot_resume(struct dm_target *ti)
+ 		up_write(&snap_dest->lock);
+ 		up_write(&snap_src->lock);
+ 	}
++
+ 	up_read(&_origins_lock);
+ 
++	if (origin_md) {
++		if (must_restart_merging)
++			start_merge(snap_merging);
++		dm_internal_resume_fast(origin_md);
++		dm_put(origin_md);
++	}
++
+ 	/* Now we have correct chunk size, reregister */
+ 	reregister_snapshot(s);
+ 
+@@ -2133,11 +2220,6 @@ static int origin_write_extent(struct dm_snapshot *merging_snap,
+  * Origin: maps a linear range of a device, with hooks for snapshotting.
+  */
+ 
+-struct dm_origin {
+-	struct dm_dev *dev;
+-	unsigned split_boundary;
+-};
+-
+ /*
+  * Construct an origin mapping: <dev_path>
+  * The context for an origin is merely a 'struct dm_dev *'
+@@ -2166,6 +2248,7 @@ static int origin_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+ 		goto bad_open;
+ 	}
+ 
++	o->ti = ti;
+ 	ti->private = o;
+ 	ti->num_flush_bios = 1;
+ 
+@@ -2180,6 +2263,7 @@ bad_alloc:
+ static void origin_dtr(struct dm_target *ti)
+ {
+ 	struct dm_origin *o = ti->private;
++
+ 	dm_put_device(ti, o->dev);
+ 	kfree(o);
+ }
+@@ -2216,6 +2300,19 @@ static void origin_resume(struct dm_target *ti)
+ 	struct dm_origin *o = ti->private;
+ 
+ 	o->split_boundary = get_origin_minimum_chunksize(o->dev->bdev);
++
++	down_write(&_origins_lock);
++	__insert_dm_origin(o);
++	up_write(&_origins_lock);
++}
++
++static void origin_postsuspend(struct dm_target *ti)
++{
++	struct dm_origin *o = ti->private;
++
++	down_write(&_origins_lock);
++	__remove_dm_origin(o);
++	up_write(&_origins_lock);
+ }
+ 
+ static void origin_status(struct dm_target *ti, status_type_t type,
+@@ -2258,12 +2355,13 @@ static int origin_iterate_devices(struct dm_target *ti,
+ 
+ static struct target_type origin_target = {
+ 	.name    = "snapshot-origin",
+-	.version = {1, 8, 1},
++	.version = {1, 9, 0},
+ 	.module  = THIS_MODULE,
+ 	.ctr     = origin_ctr,
+ 	.dtr     = origin_dtr,
+ 	.map     = origin_map,
+ 	.resume  = origin_resume,
++	.postsuspend = origin_postsuspend,
+ 	.status  = origin_status,
+ 	.merge	 = origin_merge,
+ 	.iterate_devices = origin_iterate_devices,
+@@ -2271,7 +2369,7 @@ static struct target_type origin_target = {
+ 
+ static struct target_type snapshot_target = {
+ 	.name    = "snapshot",
+-	.version = {1, 12, 0},
++	.version = {1, 13, 0},
+ 	.module  = THIS_MODULE,
+ 	.ctr     = snapshot_ctr,
+ 	.dtr     = snapshot_dtr,
+@@ -2285,7 +2383,7 @@ static struct target_type snapshot_target = {
+ 
+ static struct target_type merge_target = {
+ 	.name    = dm_snapshot_merge_target_name,
+-	.version = {1, 2, 0},
++	.version = {1, 3, 0},
+ 	.module  = THIS_MODULE,
+ 	.ctr     = snapshot_ctr,
+ 	.dtr     = snapshot_dtr,
+diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
+index 07705ee181e3..159a113c3ad8 100644
+--- a/drivers/md/dm-thin.c
++++ b/drivers/md/dm-thin.c
+@@ -2357,17 +2357,6 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio)
+ 		return DM_MAPIO_REMAPPED;
+ 
+ 	case -ENODATA:
+-		if (get_pool_mode(tc->pool) == PM_READ_ONLY) {
+-			/*
+-			 * This block isn't provisioned, and we have no way
+-			 * of doing so.
+-			 */
+-			handle_unserviceable_bio(tc->pool, bio);
+-			cell_defer_no_holder(tc, virt_cell);
+-			return DM_MAPIO_SUBMITTED;
+-		}
+-		/* fall through */
+-
+ 	case -EWOULDBLOCK:
+ 		thin_defer_cell(tc, virt_cell);
+ 		return DM_MAPIO_SUBMITTED;
+diff --git a/drivers/md/dm.c b/drivers/md/dm.c
+index 64b10e006f9c..b71c600128a3 100644
+--- a/drivers/md/dm.c
++++ b/drivers/md/dm.c
+@@ -2507,6 +2507,19 @@ void dm_get(struct mapped_device *md)
+ 	BUG_ON(test_bit(DMF_FREEING, &md->flags));
+ }
+ 
++int dm_hold(struct mapped_device *md)
++{
++	spin_lock(&_minor_lock);
++	if (test_bit(DMF_FREEING, &md->flags)) {
++		spin_unlock(&_minor_lock);
++		return -EBUSY;
++	}
++	dm_get(md);
++	spin_unlock(&_minor_lock);
++	return 0;
++}
++EXPORT_SYMBOL_GPL(dm_hold);
++
+ const char *dm_device_name(struct mapped_device *md)
+ {
+ 	return md->name;
+@@ -2526,10 +2539,16 @@ static void __dm_destroy(struct mapped_device *md, bool wait)
+ 	set_bit(DMF_FREEING, &md->flags);
+ 	spin_unlock(&_minor_lock);
+ 
++	/*
++	 * Take suspend_lock so that presuspend and postsuspend methods
++	 * do not race with internal suspend.
++	 */
++	mutex_lock(&md->suspend_lock);
+ 	if (!dm_suspended_md(md)) {
+ 		dm_table_presuspend_targets(map);
+ 		dm_table_postsuspend_targets(map);
+ 	}
++	mutex_unlock(&md->suspend_lock);
+ 
+ 	/* dm_put_live_table must be before msleep, otherwise deadlock is possible */
+ 	dm_put_live_table(md, srcu_idx);
+@@ -3001,6 +3020,7 @@ void dm_internal_suspend_fast(struct mapped_device *md)
+ 	flush_workqueue(md->wq);
+ 	dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE);
+ }
++EXPORT_SYMBOL_GPL(dm_internal_suspend_fast);
+ 
+ void dm_internal_resume_fast(struct mapped_device *md)
+ {
+@@ -3012,6 +3032,7 @@ void dm_internal_resume_fast(struct mapped_device *md)
+ done:
+ 	mutex_unlock(&md->suspend_lock);
+ }
++EXPORT_SYMBOL_GPL(dm_internal_resume_fast);
+ 
+ /*-----------------------------------------------------------------
+  * Event notification.
+diff --git a/drivers/mfd/kempld-core.c b/drivers/mfd/kempld-core.c
+index f38ec424872e..5615522f8d62 100644
+--- a/drivers/mfd/kempld-core.c
++++ b/drivers/mfd/kempld-core.c
+@@ -739,7 +739,7 @@ static int __init kempld_init(void)
+ 		for (id = kempld_dmi_table;
+ 		     id->matches[0].slot != DMI_NONE; id++)
+ 			if (strstr(id->ident, force_device_id))
+-				if (id->callback && id->callback(id))
++				if (id->callback && !id->callback(id))
+ 					break;
+ 		if (id->matches[0].slot == DMI_NONE)
+ 			return -ENODEV;
+diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c
+index e2e3aaf501a2..30f088be6a1a 100644
+--- a/drivers/net/ethernet/amd/pcnet32.c
++++ b/drivers/net/ethernet/amd/pcnet32.c
+@@ -1543,7 +1543,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
+ {
+ 	struct pcnet32_private *lp;
+ 	int i, media;
+-	int fdx, mii, fset, dxsuflo;
++	int fdx, mii, fset, dxsuflo, sram;
+ 	int chip_version;
+ 	char *chipname;
+ 	struct net_device *dev;
+@@ -1580,7 +1580,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
+ 	}
+ 
+ 	/* initialize variables */
+-	fdx = mii = fset = dxsuflo = 0;
++	fdx = mii = fset = dxsuflo = sram = 0;
+ 	chip_version = (chip_version >> 12) & 0xffff;
+ 
+ 	switch (chip_version) {
+@@ -1613,6 +1613,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
+ 		chipname = "PCnet/FAST III 79C973";	/* PCI */
+ 		fdx = 1;
+ 		mii = 1;
++		sram = 1;
+ 		break;
+ 	case 0x2626:
+ 		chipname = "PCnet/Home 79C978";	/* PCI */
+@@ -1636,6 +1637,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
+ 		chipname = "PCnet/FAST III 79C975";	/* PCI */
+ 		fdx = 1;
+ 		mii = 1;
++		sram = 1;
+ 		break;
+ 	case 0x2628:
+ 		chipname = "PCnet/PRO 79C976";
+@@ -1664,6 +1666,31 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
+ 		dxsuflo = 1;
+ 	}
+ 
++	/*
++	 * The Am79C973/Am79C975 controllers come with 12K of SRAM
++	 * which we can use for the Tx/Rx buffers but most importantly,
++	 * the use of SRAM allow us to use the BCR18:NOUFLO bit to avoid
++	 * Tx fifo underflows.
++	 */
++	if (sram) {
++		/*
++		 * The SRAM is being configured in two steps. First we
++		 * set the SRAM size in the BCR25:SRAM_SIZE bits. According
++		 * to the datasheet, each bit corresponds to a 512-byte
++		 * page so we can have at most 24 pages. The SRAM_SIZE
++		 * holds the value of the upper 8 bits of the 16-bit SRAM size.
++		 * The low 8-bits start at 0x00 and end at 0xff. So the
++		 * address range is from 0x0000 up to 0x17ff. Therefore,
++		 * the SRAM_SIZE is set to 0x17. The next step is to set
++		 * the BCR26:SRAM_BND midway through so the Tx and Rx
++		 * buffers can share the SRAM equally.
++		 */
++		a->write_bcr(ioaddr, 25, 0x17);
++		a->write_bcr(ioaddr, 26, 0xc);
++		/* And finally enable the NOUFLO bit */
++		a->write_bcr(ioaddr, 18, a->read_bcr(ioaddr, 18) | (1 << 11));
++	}
++
+ 	dev = alloc_etherdev(sizeof(*lp));
+ 	if (!dev) {
+ 		ret = -ENOMEM;
+diff --git a/drivers/net/wireless/brcm80211/brcmfmac/vendor.c b/drivers/net/wireless/brcm80211/brcmfmac/vendor.c
+index 50cdf7090198..8eff2753abad 100644
+--- a/drivers/net/wireless/brcm80211/brcmfmac/vendor.c
++++ b/drivers/net/wireless/brcm80211/brcmfmac/vendor.c
+@@ -39,13 +39,22 @@ static int brcmf_cfg80211_vndr_cmds_dcmd_handler(struct wiphy *wiphy,
+ 	void *dcmd_buf = NULL, *wr_pointer;
+ 	u16 msglen, maxmsglen = PAGE_SIZE - 0x100;
+ 
+-	brcmf_dbg(TRACE, "cmd %x set %d len %d\n", cmdhdr->cmd, cmdhdr->set,
+-		  cmdhdr->len);
++	if (len < sizeof(*cmdhdr)) {
++		brcmf_err("vendor command too short: %d\n", len);
++		return -EINVAL;
++	}
+ 
+ 	vif = container_of(wdev, struct brcmf_cfg80211_vif, wdev);
+ 	ifp = vif->ifp;
+ 
+-	len -= sizeof(struct brcmf_vndr_dcmd_hdr);
++	brcmf_dbg(TRACE, "ifidx=%d, cmd=%d\n", ifp->ifidx, cmdhdr->cmd);
++
++	if (cmdhdr->offset > len) {
++		brcmf_err("bad buffer offset %d > %d\n", cmdhdr->offset, len);
++		return -EINVAL;
++	}
++
++	len -= cmdhdr->offset;
+ 	ret_len = cmdhdr->len;
+ 	if (ret_len > 0 || len > 0) {
+ 		if (len > BRCMF_DCMD_MAXLEN) {
+diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c
+index c3817fae16c0..06f6cc08f451 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-1000.c
++++ b/drivers/net/wireless/iwlwifi/iwl-1000.c
+@@ -95,7 +95,8 @@ static const struct iwl_eeprom_params iwl1000_eeprom_params = {
+ 	.nvm_calib_ver = EEPROM_1000_TX_POWER_VERSION,	\
+ 	.base_params = &iwl1000_base_params,			\
+ 	.eeprom_params = &iwl1000_eeprom_params,		\
+-	.led_mode = IWL_LED_BLINK
++	.led_mode = IWL_LED_BLINK,				\
++	.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
+ 
+ const struct iwl_cfg iwl1000_bgn_cfg = {
+ 	.name = "Intel(R) Centrino(R) Wireless-N 1000 BGN",
+@@ -121,7 +122,8 @@ const struct iwl_cfg iwl1000_bg_cfg = {
+ 	.base_params = &iwl1000_base_params,			\
+ 	.eeprom_params = &iwl1000_eeprom_params,		\
+ 	.led_mode = IWL_LED_RF_STATE,				\
+-	.rx_with_siso_diversity = true
++	.rx_with_siso_diversity = true,				\
++	.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
+ 
+ const struct iwl_cfg iwl100_bgn_cfg = {
+ 	.name = "Intel(R) Centrino(R) Wireless-N 100 BGN",
+diff --git a/drivers/net/wireless/iwlwifi/iwl-2000.c b/drivers/net/wireless/iwlwifi/iwl-2000.c
+index 21e5d0843a62..890b95f497d6 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-2000.c
++++ b/drivers/net/wireless/iwlwifi/iwl-2000.c
+@@ -123,7 +123,9 @@ static const struct iwl_eeprom_params iwl20x0_eeprom_params = {
+ 	.nvm_calib_ver = EEPROM_2000_TX_POWER_VERSION,		\
+ 	.base_params = &iwl2000_base_params,			\
+ 	.eeprom_params = &iwl20x0_eeprom_params,		\
+-	.led_mode = IWL_LED_RF_STATE
++	.led_mode = IWL_LED_RF_STATE,				\
++	.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
++
+ 
+ const struct iwl_cfg iwl2000_2bgn_cfg = {
+ 	.name = "Intel(R) Centrino(R) Wireless-N 2200 BGN",
+@@ -149,7 +151,8 @@ const struct iwl_cfg iwl2000_2bgn_d_cfg = {
+ 	.nvm_calib_ver = EEPROM_2000_TX_POWER_VERSION,	\
+ 	.base_params = &iwl2030_base_params,			\
+ 	.eeprom_params = &iwl20x0_eeprom_params,		\
+-	.led_mode = IWL_LED_RF_STATE
++	.led_mode = IWL_LED_RF_STATE,				\
++	.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
+ 
+ const struct iwl_cfg iwl2030_2bgn_cfg = {
+ 	.name = "Intel(R) Centrino(R) Wireless-N 2230 BGN",
+@@ -170,7 +173,8 @@ const struct iwl_cfg iwl2030_2bgn_cfg = {
+ 	.base_params = &iwl2000_base_params,			\
+ 	.eeprom_params = &iwl20x0_eeprom_params,		\
+ 	.led_mode = IWL_LED_RF_STATE,				\
+-	.rx_with_siso_diversity = true
++	.rx_with_siso_diversity = true,				\
++	.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
+ 
+ const struct iwl_cfg iwl105_bgn_cfg = {
+ 	.name = "Intel(R) Centrino(R) Wireless-N 105 BGN",
+@@ -197,7 +201,8 @@ const struct iwl_cfg iwl105_bgn_d_cfg = {
+ 	.base_params = &iwl2030_base_params,			\
+ 	.eeprom_params = &iwl20x0_eeprom_params,		\
+ 	.led_mode = IWL_LED_RF_STATE,				\
+-	.rx_with_siso_diversity = true
++	.rx_with_siso_diversity = true,				\
++	.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
+ 
+ const struct iwl_cfg iwl135_bgn_cfg = {
+ 	.name = "Intel(R) Centrino(R) Wireless-N 135 BGN",
+diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
+index 332bbede39e5..724194e23414 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
++++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
+@@ -93,7 +93,8 @@ static const struct iwl_eeprom_params iwl5000_eeprom_params = {
+ 	.nvm_calib_ver = EEPROM_5000_TX_POWER_VERSION,	\
+ 	.base_params = &iwl5000_base_params,			\
+ 	.eeprom_params = &iwl5000_eeprom_params,		\
+-	.led_mode = IWL_LED_BLINK
++	.led_mode = IWL_LED_BLINK,				\
++	.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
+ 
+ const struct iwl_cfg iwl5300_agn_cfg = {
+ 	.name = "Intel(R) Ultimate N WiFi Link 5300 AGN",
+@@ -158,7 +159,8 @@ const struct iwl_cfg iwl5350_agn_cfg = {
+ 	.base_params = &iwl5000_base_params,			\
+ 	.eeprom_params = &iwl5000_eeprom_params,		\
+ 	.led_mode = IWL_LED_BLINK,				\
+-	.internal_wimax_coex = true
++	.internal_wimax_coex = true,				\
++	.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
+ 
+ const struct iwl_cfg iwl5150_agn_cfg = {
+ 	.name = "Intel(R) WiMAX/WiFi Link 5150 AGN",
+diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
+index 8f2c3c8c6b84..21b2630763dc 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
++++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
+@@ -145,7 +145,8 @@ static const struct iwl_eeprom_params iwl6000_eeprom_params = {
+ 	.nvm_calib_ver = EEPROM_6005_TX_POWER_VERSION,	\
+ 	.base_params = &iwl6000_g2_base_params,			\
+ 	.eeprom_params = &iwl6000_eeprom_params,		\
+-	.led_mode = IWL_LED_RF_STATE
++	.led_mode = IWL_LED_RF_STATE,				\
++	.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
+ 
+ const struct iwl_cfg iwl6005_2agn_cfg = {
+ 	.name = "Intel(R) Centrino(R) Advanced-N 6205 AGN",
+@@ -199,7 +200,8 @@ const struct iwl_cfg iwl6005_2agn_mow2_cfg = {
+ 	.nvm_calib_ver = EEPROM_6030_TX_POWER_VERSION,	\
+ 	.base_params = &iwl6000_g2_base_params,			\
+ 	.eeprom_params = &iwl6000_eeprom_params,		\
+-	.led_mode = IWL_LED_RF_STATE
++	.led_mode = IWL_LED_RF_STATE,				\
++	.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
+ 
+ const struct iwl_cfg iwl6030_2agn_cfg = {
+ 	.name = "Intel(R) Centrino(R) Advanced-N 6230 AGN",
+@@ -235,7 +237,8 @@ const struct iwl_cfg iwl6030_2bg_cfg = {
+ 	.nvm_calib_ver = EEPROM_6030_TX_POWER_VERSION,	\
+ 	.base_params = &iwl6000_g2_base_params,			\
+ 	.eeprom_params = &iwl6000_eeprom_params,		\
+-	.led_mode = IWL_LED_RF_STATE
++	.led_mode = IWL_LED_RF_STATE,				\
++	.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
+ 
+ const struct iwl_cfg iwl6035_2agn_cfg = {
+ 	.name = "Intel(R) Centrino(R) Advanced-N 6235 AGN",
+@@ -290,7 +293,8 @@ const struct iwl_cfg iwl130_bg_cfg = {
+ 	.nvm_calib_ver = EEPROM_6000_TX_POWER_VERSION,	\
+ 	.base_params = &iwl6000_base_params,			\
+ 	.eeprom_params = &iwl6000_eeprom_params,		\
+-	.led_mode = IWL_LED_BLINK
++	.led_mode = IWL_LED_BLINK,				\
++	.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
+ 
+ const struct iwl_cfg iwl6000i_2agn_cfg = {
+ 	.name = "Intel(R) Centrino(R) Advanced-N 6200 AGN",
+@@ -322,7 +326,8 @@ const struct iwl_cfg iwl6000i_2bg_cfg = {
+ 	.base_params = &iwl6050_base_params,			\
+ 	.eeprom_params = &iwl6000_eeprom_params,		\
+ 	.led_mode = IWL_LED_BLINK,				\
+-	.internal_wimax_coex = true
++	.internal_wimax_coex = true,				\
++	.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
+ 
+ const struct iwl_cfg iwl6050_2agn_cfg = {
+ 	.name = "Intel(R) Centrino(R) Advanced-N + WiMAX 6250 AGN",
+@@ -347,7 +352,8 @@ const struct iwl_cfg iwl6050_2abg_cfg = {
+ 	.base_params = &iwl6050_base_params,			\
+ 	.eeprom_params = &iwl6000_eeprom_params,		\
+ 	.led_mode = IWL_LED_BLINK,				\
+-	.internal_wimax_coex = true
++	.internal_wimax_coex = true,				\
++	.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
+ 
+ const struct iwl_cfg iwl6150_bgn_cfg = {
+ 	.name = "Intel(R) Centrino(R) Wireless-N + WiMAX 6150 BGN",
+diff --git a/drivers/net/wireless/iwlwifi/mvm/coex.c b/drivers/net/wireless/iwlwifi/mvm/coex.c
+index a3bfda45d9e6..ae5a4ec7556a 100644
+--- a/drivers/net/wireless/iwlwifi/mvm/coex.c
++++ b/drivers/net/wireless/iwlwifi/mvm/coex.c
+@@ -793,7 +793,8 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
+ 	if (!vif->bss_conf.assoc)
+ 		smps_mode = IEEE80211_SMPS_AUTOMATIC;
+ 
+-	if (IWL_COEX_IS_RRC_ON(mvm->last_bt_notif.ttc_rrc_status,
++	if (mvmvif->phy_ctxt &&
++	    IWL_COEX_IS_RRC_ON(mvm->last_bt_notif.ttc_rrc_status,
+ 			       mvmvif->phy_ctxt->id))
+ 		smps_mode = IEEE80211_SMPS_AUTOMATIC;
+ 
+diff --git a/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c b/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c
+index b3210cfbecc8..d8045856995d 100644
+--- a/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c
++++ b/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c
+@@ -832,7 +832,8 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
+ 	if (!vif->bss_conf.assoc)
+ 		smps_mode = IEEE80211_SMPS_AUTOMATIC;
+ 
+-	if (data->notif->rrc_enabled & BIT(mvmvif->phy_ctxt->id))
++	if (mvmvif->phy_ctxt &&
++	    data->notif->rrc_enabled & BIT(mvmvif->phy_ctxt->id))
+ 		smps_mode = IEEE80211_SMPS_AUTOMATIC;
+ 
+ 	IWL_DEBUG_COEX(data->mvm,
+diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.c b/drivers/net/wireless/iwlwifi/mvm/rs.c
+index 30ceb67ed7a7..1a0327075eb8 100644
+--- a/drivers/net/wireless/iwlwifi/mvm/rs.c
++++ b/drivers/net/wireless/iwlwifi/mvm/rs.c
+@@ -146,9 +146,12 @@ enum rs_column_mode {
+ #define MAX_NEXT_COLUMNS 7
+ #define MAX_COLUMN_CHECKS 3
+ 
++struct rs_tx_column;
++
+ typedef bool (*allow_column_func_t) (struct iwl_mvm *mvm,
+ 				     struct ieee80211_sta *sta,
+-				     struct iwl_scale_tbl_info *tbl);
++				     struct iwl_scale_tbl_info *tbl,
++				     const struct rs_tx_column *next_col);
+ 
+ struct rs_tx_column {
+ 	enum rs_column_mode mode;
+@@ -159,13 +162,15 @@ struct rs_tx_column {
+ };
+ 
+ static bool rs_ant_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+-			 struct iwl_scale_tbl_info *tbl)
++			 struct iwl_scale_tbl_info *tbl,
++			 const struct rs_tx_column *next_col)
+ {
+-	return iwl_mvm_bt_coex_is_ant_avail(mvm, tbl->rate.ant);
++	return iwl_mvm_bt_coex_is_ant_avail(mvm, next_col->ant);
+ }
+ 
+ static bool rs_mimo_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+-			  struct iwl_scale_tbl_info *tbl)
++			  struct iwl_scale_tbl_info *tbl,
++			  const struct rs_tx_column *next_col)
+ {
+ 	if (!sta->ht_cap.ht_supported)
+ 		return false;
+@@ -183,7 +188,8 @@ static bool rs_mimo_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+ }
+ 
+ static bool rs_siso_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+-			  struct iwl_scale_tbl_info *tbl)
++			  struct iwl_scale_tbl_info *tbl,
++			  const struct rs_tx_column *next_col)
+ {
+ 	if (!sta->ht_cap.ht_supported)
+ 		return false;
+@@ -192,7 +198,8 @@ static bool rs_siso_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+ }
+ 
+ static bool rs_sgi_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+-			 struct iwl_scale_tbl_info *tbl)
++			 struct iwl_scale_tbl_info *tbl,
++			 const struct rs_tx_column *next_col)
+ {
+ 	struct rs_rate *rate = &tbl->rate;
+ 	struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+@@ -1594,7 +1601,7 @@ static enum rs_column rs_get_next_column(struct iwl_mvm *mvm,
+ 
+ 		for (j = 0; j < MAX_COLUMN_CHECKS; j++) {
+ 			allow_func = next_col->checks[j];
+-			if (allow_func && !allow_func(mvm, sta, tbl))
++			if (allow_func && !allow_func(mvm, sta, tbl, next_col))
+ 				break;
+ 		}
+ 
+diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.c b/drivers/net/wireless/iwlwifi/mvm/time-event.c
+index 54fafbf9a711..f8d6f306dd76 100644
+--- a/drivers/net/wireless/iwlwifi/mvm/time-event.c
++++ b/drivers/net/wireless/iwlwifi/mvm/time-event.c
+@@ -750,8 +750,7 @@ void iwl_mvm_stop_roc(struct iwl_mvm *mvm)
+ 	 * request
+ 	 */
+ 	list_for_each_entry(te_data, &mvm->time_event_list, list) {
+-		if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE &&
+-		    te_data->running) {
++		if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) {
+ 			mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif);
+ 			is_p2p = true;
+ 			goto remove_te;
+@@ -766,10 +765,8 @@ void iwl_mvm_stop_roc(struct iwl_mvm *mvm)
+ 	 * request
+ 	 */
+ 	list_for_each_entry(te_data, &mvm->aux_roc_te_list, list) {
+-		if (te_data->running) {
+-			mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif);
+-			goto remove_te;
+-		}
++		mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif);
++		goto remove_te;
+ 	}
+ 
+ remove_te:
+diff --git a/drivers/net/wireless/rtlwifi/base.c b/drivers/net/wireless/rtlwifi/base.c
+index 40b6d1d006d7..af2486965782 100644
+--- a/drivers/net/wireless/rtlwifi/base.c
++++ b/drivers/net/wireless/rtlwifi/base.c
+@@ -1314,8 +1314,11 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
+ 		}
+ 
+ 		return true;
+-	} else if (0x86DD == ether_type) {
+-		return true;
++	} else if (ETH_P_IPV6 == ether_type) {
++		/* TODO: Handle any IPv6 cases that need special handling.
++		 * For now, always return false
++		 */
++		goto end;
+ 	}
+ 
+ end:
+diff --git a/drivers/of/irq.c b/drivers/of/irq.c
+index 0d7765807f49..1a7980692f25 100644
+--- a/drivers/of/irq.c
++++ b/drivers/of/irq.c
+@@ -290,7 +290,7 @@ int of_irq_parse_one(struct device_node *device, int index, struct of_phandle_ar
+ 	struct device_node *p;
+ 	const __be32 *intspec, *tmp, *addr;
+ 	u32 intsize, intlen;
+-	int i, res = -EINVAL;
++	int i, res;
+ 
+ 	pr_debug("of_irq_parse_one: dev=%s, index=%d\n", of_node_full_name(device), index);
+ 
+@@ -323,15 +323,19 @@ int of_irq_parse_one(struct device_node *device, int index, struct of_phandle_ar
+ 
+ 	/* Get size of interrupt specifier */
+ 	tmp = of_get_property(p, "#interrupt-cells", NULL);
+-	if (tmp == NULL)
++	if (tmp == NULL) {
++		res = -EINVAL;
+ 		goto out;
++	}
+ 	intsize = be32_to_cpu(*tmp);
+ 
+ 	pr_debug(" intsize=%d intlen=%d\n", intsize, intlen);
+ 
+ 	/* Check index */
+-	if ((index + 1) * intsize > intlen)
++	if ((index + 1) * intsize > intlen) {
++		res = -EINVAL;
+ 		goto out;
++	}
+ 
+ 	/* Copy intspec into irq structure */
+ 	intspec += index * intsize;
+diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c
+index a12d35338313..04fc84f2b289 100644
+--- a/drivers/phy/phy-core.c
++++ b/drivers/phy/phy-core.c
+@@ -52,7 +52,9 @@ static void devm_phy_consume(struct device *dev, void *res)
+ 
+ static int devm_phy_match(struct device *dev, void *res, void *match_data)
+ {
+-	return res == match_data;
++	struct phy **phy = res;
++
++	return *phy == match_data;
+ }
+ 
+ /**
+diff --git a/drivers/powercap/intel_rapl.c b/drivers/powercap/intel_rapl.c
+index 97b5e4ee1ca4..63d4033eb683 100644
+--- a/drivers/powercap/intel_rapl.c
++++ b/drivers/powercap/intel_rapl.c
+@@ -73,7 +73,7 @@
+ 
+ #define TIME_WINDOW_MAX_MSEC 40000
+ #define TIME_WINDOW_MIN_MSEC 250
+-
++#define ENERGY_UNIT_SCALE    1000 /* scale from driver unit to powercap unit */
+ enum unit_type {
+ 	ARBITRARY_UNIT, /* no translation */
+ 	POWER_UNIT,
+@@ -158,6 +158,7 @@ struct rapl_domain {
+ 	struct rapl_power_limit rpl[NR_POWER_LIMITS];
+ 	u64 attr_map; /* track capabilities */
+ 	unsigned int state;
++	unsigned int domain_energy_unit;
+ 	int package_id;
+ };
+ #define power_zone_to_rapl_domain(_zone) \
+@@ -190,6 +191,7 @@ struct rapl_defaults {
+ 	void (*set_floor_freq)(struct rapl_domain *rd, bool mode);
+ 	u64 (*compute_time_window)(struct rapl_package *rp, u64 val,
+ 				bool to_raw);
++	unsigned int dram_domain_energy_unit;
+ };
+ static struct rapl_defaults *rapl_defaults;
+ 
+@@ -227,7 +229,8 @@ static int rapl_read_data_raw(struct rapl_domain *rd,
+ static int rapl_write_data_raw(struct rapl_domain *rd,
+ 			enum rapl_primitives prim,
+ 			unsigned long long value);
+-static u64 rapl_unit_xlate(int package, enum unit_type type, u64 value,
++static u64 rapl_unit_xlate(struct rapl_domain *rd, int package,
++			enum unit_type type, u64 value,
+ 			int to_raw);
+ static void package_power_limit_irq_save(int package_id);
+ 
+@@ -305,7 +308,9 @@ static int get_energy_counter(struct powercap_zone *power_zone, u64 *energy_raw)
+ 
+ static int get_max_energy_counter(struct powercap_zone *pcd_dev, u64 *energy)
+ {
+-	*energy = rapl_unit_xlate(0, ENERGY_UNIT, ENERGY_STATUS_MASK, 0);
++	struct rapl_domain *rd = power_zone_to_rapl_domain(pcd_dev);
++
++	*energy = rapl_unit_xlate(rd, 0, ENERGY_UNIT, ENERGY_STATUS_MASK, 0);
+ 	return 0;
+ }
+ 
+@@ -639,6 +644,11 @@ static void rapl_init_domains(struct rapl_package *rp)
+ 			rd->msrs[4] = MSR_DRAM_POWER_INFO;
+ 			rd->rpl[0].prim_id = PL1_ENABLE;
+ 			rd->rpl[0].name = pl1_name;
++			rd->domain_energy_unit =
++				rapl_defaults->dram_domain_energy_unit;
++			if (rd->domain_energy_unit)
++				pr_info("DRAM domain energy unit %dpj\n",
++					rd->domain_energy_unit);
+ 			break;
+ 		}
+ 		if (mask) {
+@@ -648,11 +658,13 @@ static void rapl_init_domains(struct rapl_package *rp)
+ 	}
+ }
+ 
+-static u64 rapl_unit_xlate(int package, enum unit_type type, u64 value,
++static u64 rapl_unit_xlate(struct rapl_domain *rd, int package,
++			enum unit_type type, u64 value,
+ 			int to_raw)
+ {
+ 	u64 units = 1;
+ 	struct rapl_package *rp;
++	u64 scale = 1;
+ 
+ 	rp = find_package_by_id(package);
+ 	if (!rp)
+@@ -663,7 +675,12 @@ static u64 rapl_unit_xlate(int package, enum unit_type type, u64 value,
+ 		units = rp->power_unit;
+ 		break;
+ 	case ENERGY_UNIT:
+-		units = rp->energy_unit;
++		scale = ENERGY_UNIT_SCALE;
++		/* per domain unit takes precedence */
++		if (rd && rd->domain_energy_unit)
++			units = rd->domain_energy_unit;
++		else
++			units = rp->energy_unit;
+ 		break;
+ 	case TIME_UNIT:
+ 		return rapl_defaults->compute_time_window(rp, value, to_raw);
+@@ -673,11 +690,11 @@ static u64 rapl_unit_xlate(int package, enum unit_type type, u64 value,
+ 	};
+ 
+ 	if (to_raw)
+-		return div64_u64(value, units);
++		return div64_u64(value, units) * scale;
+ 
+ 	value *= units;
+ 
+-	return value;
++	return div64_u64(value, scale);
+ }
+ 
+ /* in the order of enum rapl_primitives */
+@@ -773,7 +790,7 @@ static int rapl_read_data_raw(struct rapl_domain *rd,
+ 	final = value & rp->mask;
+ 	final = final >> rp->shift;
+ 	if (xlate)
+-		*data = rapl_unit_xlate(rd->package_id, rp->unit, final, 0);
++		*data = rapl_unit_xlate(rd, rd->package_id, rp->unit, final, 0);
+ 	else
+ 		*data = final;
+ 
+@@ -799,7 +816,7 @@ static int rapl_write_data_raw(struct rapl_domain *rd,
+ 			"failed to read msr 0x%x on cpu %d\n", msr, cpu);
+ 		return -EIO;
+ 	}
+-	value = rapl_unit_xlate(rd->package_id, rp->unit, value, 1);
++	value = rapl_unit_xlate(rd, rd->package_id, rp->unit, value, 1);
+ 	msr_val &= ~rp->mask;
+ 	msr_val |= value << rp->shift;
+ 	if (wrmsrl_safe_on_cpu(cpu, msr, msr_val)) {
+@@ -818,7 +835,7 @@ static int rapl_write_data_raw(struct rapl_domain *rd,
+  * calculate units differ on different CPUs.
+  * We convert the units to below format based on CPUs.
+  * i.e.
+- * energy unit: microJoules : Represented in microJoules by default
++ * energy unit: picoJoules  : Represented in picoJoules by default
+  * power unit : microWatts  : Represented in milliWatts by default
+  * time unit  : microseconds: Represented in seconds by default
+  */
+@@ -834,7 +851,7 @@ static int rapl_check_unit_core(struct rapl_package *rp, int cpu)
+ 	}
+ 
+ 	value = (msr_val & ENERGY_UNIT_MASK) >> ENERGY_UNIT_OFFSET;
+-	rp->energy_unit = 1000000 / (1 << value);
++	rp->energy_unit = ENERGY_UNIT_SCALE * 1000000 / (1 << value);
+ 
+ 	value = (msr_val & POWER_UNIT_MASK) >> POWER_UNIT_OFFSET;
+ 	rp->power_unit = 1000000 / (1 << value);
+@@ -842,7 +859,7 @@ static int rapl_check_unit_core(struct rapl_package *rp, int cpu)
+ 	value = (msr_val & TIME_UNIT_MASK) >> TIME_UNIT_OFFSET;
+ 	rp->time_unit = 1000000 / (1 << value);
+ 
+-	pr_debug("Core CPU package %d energy=%duJ, time=%dus, power=%duW\n",
++	pr_debug("Core CPU package %d energy=%dpJ, time=%dus, power=%duW\n",
+ 		rp->id, rp->energy_unit, rp->time_unit, rp->power_unit);
+ 
+ 	return 0;
+@@ -859,7 +876,7 @@ static int rapl_check_unit_atom(struct rapl_package *rp, int cpu)
+ 		return -ENODEV;
+ 	}
+ 	value = (msr_val & ENERGY_UNIT_MASK) >> ENERGY_UNIT_OFFSET;
+-	rp->energy_unit = 1 << value;
++	rp->energy_unit = ENERGY_UNIT_SCALE * 1 << value;
+ 
+ 	value = (msr_val & POWER_UNIT_MASK) >> POWER_UNIT_OFFSET;
+ 	rp->power_unit = (1 << value) * 1000;
+@@ -867,7 +884,7 @@ static int rapl_check_unit_atom(struct rapl_package *rp, int cpu)
+ 	value = (msr_val & TIME_UNIT_MASK) >> TIME_UNIT_OFFSET;
+ 	rp->time_unit = 1000000 / (1 << value);
+ 
+-	pr_debug("Atom package %d energy=%duJ, time=%dus, power=%duW\n",
++	pr_debug("Atom package %d energy=%dpJ, time=%dus, power=%duW\n",
+ 		rp->id, rp->energy_unit, rp->time_unit, rp->power_unit);
+ 
+ 	return 0;
+@@ -1017,6 +1034,13 @@ static const struct rapl_defaults rapl_defaults_core = {
+ 	.compute_time_window = rapl_compute_time_window_core,
+ };
+ 
++static const struct rapl_defaults rapl_defaults_hsw_server = {
++	.check_unit = rapl_check_unit_core,
++	.set_floor_freq = set_floor_freq_default,
++	.compute_time_window = rapl_compute_time_window_core,
++	.dram_domain_energy_unit = 15300,
++};
++
+ static const struct rapl_defaults rapl_defaults_atom = {
+ 	.check_unit = rapl_check_unit_atom,
+ 	.set_floor_freq = set_floor_freq_atom,
+@@ -1037,7 +1061,7 @@ static const struct x86_cpu_id rapl_ids[] = {
+ 	RAPL_CPU(0x3a, rapl_defaults_core),/* Ivy Bridge */
+ 	RAPL_CPU(0x3c, rapl_defaults_core),/* Haswell */
+ 	RAPL_CPU(0x3d, rapl_defaults_core),/* Broadwell */
+-	RAPL_CPU(0x3f, rapl_defaults_core),/* Haswell */
++	RAPL_CPU(0x3f, rapl_defaults_hsw_server),/* Haswell servers */
+ 	RAPL_CPU(0x45, rapl_defaults_core),/* Haswell ULT */
+ 	RAPL_CPU(0x4C, rapl_defaults_atom),/* Braswell */
+ 	RAPL_CPU(0x4A, rapl_defaults_atom),/* Tangier */
+diff --git a/drivers/regulator/palmas-regulator.c b/drivers/regulator/palmas-regulator.c
+index 9205f433573c..18198316b6cf 100644
+--- a/drivers/regulator/palmas-regulator.c
++++ b/drivers/regulator/palmas-regulator.c
+@@ -1572,6 +1572,10 @@ static int palmas_regulators_probe(struct platform_device *pdev)
+ 	if (!pmic)
+ 		return -ENOMEM;
+ 
++	if (of_device_is_compatible(node, "ti,tps659038-pmic"))
++		palmas_generic_regs_info[PALMAS_REG_REGEN2].ctrl_addr =
++							TPS659038_REGEN2_CTRL;
++
+ 	pmic->dev = &pdev->dev;
+ 	pmic->palmas = palmas;
+ 	palmas->pmic = pmic;
+diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+index 73f9feecda72..272a2646a759 100644
+--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
++++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+@@ -1598,7 +1598,7 @@ static int tcm_qla2xxx_check_initiator_node_acl(
+ 	/*
+ 	 * Finally register the new FC Nexus with TCM
+ 	 */
+-	__transport_register_session(se_nacl->se_tpg, se_nacl, se_sess, sess);
++	transport_register_session(se_nacl->se_tpg, se_nacl, se_sess, sess);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/spi/spi-dw-mid.c b/drivers/spi/spi-dw-mid.c
+index 22ca08a18b9e..8076e89f1ea1 100644
+--- a/drivers/spi/spi-dw-mid.c
++++ b/drivers/spi/spi-dw-mid.c
+@@ -108,7 +108,8 @@ static void dw_spi_dma_tx_done(void *arg)
+ {
+ 	struct dw_spi *dws = arg;
+ 
+-	if (test_and_clear_bit(TX_BUSY, &dws->dma_chan_busy) & BIT(RX_BUSY))
++	clear_bit(TX_BUSY, &dws->dma_chan_busy);
++	if (test_bit(RX_BUSY, &dws->dma_chan_busy))
+ 		return;
+ 	dw_spi_xfer_done(dws);
+ }
+@@ -156,7 +157,8 @@ static void dw_spi_dma_rx_done(void *arg)
+ {
+ 	struct dw_spi *dws = arg;
+ 
+-	if (test_and_clear_bit(RX_BUSY, &dws->dma_chan_busy) & BIT(TX_BUSY))
++	clear_bit(RX_BUSY, &dws->dma_chan_busy);
++	if (test_bit(TX_BUSY, &dws->dma_chan_busy))
+ 		return;
+ 	dw_spi_xfer_done(dws);
+ }
+diff --git a/drivers/spi/spi-qup.c b/drivers/spi/spi-qup.c
+index e7fb5a0d2e8d..e27c12a6df96 100644
+--- a/drivers/spi/spi-qup.c
++++ b/drivers/spi/spi-qup.c
+@@ -489,7 +489,7 @@ static int spi_qup_probe(struct platform_device *pdev)
+ 	struct resource *res;
+ 	struct device *dev;
+ 	void __iomem *base;
+-	u32 max_freq, iomode;
++	u32 max_freq, iomode, num_cs;
+ 	int ret, irq, size;
+ 
+ 	dev = &pdev->dev;
+@@ -541,10 +541,11 @@ static int spi_qup_probe(struct platform_device *pdev)
+ 	}
+ 
+ 	/* use num-cs unless not present or out of range */
+-	if (of_property_read_u16(dev->of_node, "num-cs",
+-			&master->num_chipselect) ||
+-			(master->num_chipselect > SPI_NUM_CHIPSELECTS))
++	if (of_property_read_u32(dev->of_node, "num-cs", &num_cs) ||
++	    num_cs > SPI_NUM_CHIPSELECTS)
+ 		master->num_chipselect = SPI_NUM_CHIPSELECTS;
++	else
++		master->num_chipselect = num_cs;
+ 
+ 	master->bus_num = pdev->id;
+ 	master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP;
+diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
+index 66a70e9bc743..a17f53303339 100644
+--- a/drivers/spi/spi.c
++++ b/drivers/spi/spi.c
+@@ -1073,13 +1073,14 @@ void spi_finalize_current_message(struct spi_master *master)
+ 				"failed to unprepare message: %d\n", ret);
+ 		}
+ 	}
++
++	trace_spi_message_done(mesg);
++
+ 	master->cur_msg_prepared = false;
+ 
+ 	mesg->state = NULL;
+ 	if (mesg->complete)
+ 		mesg->complete(mesg->context);
+-
+-	trace_spi_message_done(mesg);
+ }
+ EXPORT_SYMBOL_GPL(spi_finalize_current_message);
+ 
+diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
+index cd1a277d853b..ace0521f1151 100644
+--- a/drivers/staging/vt6655/device_main.c
++++ b/drivers/staging/vt6655/device_main.c
+@@ -357,16 +357,6 @@ static void device_init_registers(struct vnt_private *pDevice)
+ 	/* zonetype initial */
+ 	pDevice->byOriginalZonetype = pDevice->abyEEPROM[EEP_OFS_ZONETYPE];
+ 
+-	/* Get RFType */
+-	pDevice->byRFType = SROMbyReadEmbedded(pDevice->PortOffset, EEP_OFS_RFTYPE);
+-
+-	/* force change RevID for VT3253 emu */
+-	if ((pDevice->byRFType & RF_EMU) != 0)
+-			pDevice->byRevId = 0x80;
+-
+-	pDevice->byRFType &= RF_MASK;
+-	pr_debug("pDevice->byRFType = %x\n", pDevice->byRFType);
+-
+ 	if (!pDevice->bZoneRegExist)
+ 		pDevice->byZoneType = pDevice->abyEEPROM[EEP_OFS_ZONETYPE];
+ 
+@@ -1806,6 +1796,12 @@ vt6655_probe(struct pci_dev *pcid, const struct pci_device_id *ent)
+ 	MACvInitialize(priv->PortOffset);
+ 	MACvReadEtherAddress(priv->PortOffset, priv->abyCurrentNetAddr);
+ 
++	/* Get RFType */
++	priv->byRFType = SROMbyReadEmbedded(priv->PortOffset, EEP_OFS_RFTYPE);
++	priv->byRFType &= RF_MASK;
++
++	dev_dbg(&pcid->dev, "RF Type = %x\n", priv->byRFType);
++
+ 	device_get_options(priv);
+ 	device_set_options(priv);
+ 	/* Mask out the options cannot be set to the chip */
+diff --git a/drivers/staging/vt6655/rf.c b/drivers/staging/vt6655/rf.c
+index 32ef99341e20..5d65ad09bcd1 100644
+--- a/drivers/staging/vt6655/rf.c
++++ b/drivers/staging/vt6655/rf.c
+@@ -791,6 +791,7 @@ bool RFbSetPower(
+ 		break;
+ 	case RATE_6M:
+ 	case RATE_9M:
++	case RATE_12M:
+ 	case RATE_18M:
+ 		byPwr = priv->abyOFDMPwrTbl[uCH];
+ 		if (priv->byRFType == RF_UW2452)
+diff --git a/drivers/staging/vt6656/rf.c b/drivers/staging/vt6656/rf.c
+index c42cde59f598..c4286ccac320 100644
+--- a/drivers/staging/vt6656/rf.c
++++ b/drivers/staging/vt6656/rf.c
+@@ -640,6 +640,7 @@ int vnt_rf_setpower(struct vnt_private *priv, u32 rate, u32 channel)
+ 		break;
+ 	case RATE_6M:
+ 	case RATE_9M:
++	case RATE_12M:
+ 	case RATE_18M:
+ 	case RATE_24M:
+ 	case RATE_36M:
+diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c
+index 97b486c3dda1..583e755d8091 100644
+--- a/drivers/target/tcm_fc/tfc_io.c
++++ b/drivers/target/tcm_fc/tfc_io.c
+@@ -359,7 +359,7 @@ void ft_invl_hw_context(struct ft_cmd *cmd)
+ 		ep = fc_seq_exch(seq);
+ 		if (ep) {
+ 			lport = ep->lp;
+-			if (lport && (ep->xid <= lport->lro_xid))
++			if (lport && (ep->xid <= lport->lro_xid)) {
+ 				/*
+ 				 * "ddp_done" trigger invalidation of HW
+ 				 * specific DDP context
+@@ -374,6 +374,7 @@ void ft_invl_hw_context(struct ft_cmd *cmd)
+ 				 * identified using ep->xid)
+ 				 */
+ 				cmd->was_ddp_setup = 0;
++			}
+ 		}
+ 	}
+ }
+diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
+index 4fe18ce3bd5a..7c145655cd4b 100644
+--- a/drivers/usb/chipidea/udc.c
++++ b/drivers/usb/chipidea/udc.c
+@@ -929,6 +929,13 @@ __acquires(hwep->lock)
+ 	return retval;
+ }
+ 
++static int otg_a_alt_hnp_support(struct ci_hdrc *ci)
++{
++	dev_warn(&ci->gadget.dev,
++		"connect the device to an alternate port if you want HNP\n");
++	return isr_setup_status_phase(ci);
++}
++
+ /**
+  * isr_setup_packet_handler: setup packet handler
+  * @ci: UDC descriptor
+@@ -1061,6 +1068,10 @@ __acquires(ci->lock)
+ 							ci);
+ 				}
+ 				break;
++			case USB_DEVICE_A_ALT_HNP_SUPPORT:
++				if (ci_otg_is_fsm_mode(ci))
++					err = otg_a_alt_hnp_support(ci);
++				break;
+ 			default:
+ 				goto delegate;
+ 			}
+diff --git a/drivers/usb/common/usb-otg-fsm.c b/drivers/usb/common/usb-otg-fsm.c
+index c6b35b77dab7..61d538aa2346 100644
+--- a/drivers/usb/common/usb-otg-fsm.c
++++ b/drivers/usb/common/usb-otg-fsm.c
+@@ -150,9 +150,9 @@ static int otg_set_state(struct otg_fsm *fsm, enum usb_otg_state new_state)
+ 		break;
+ 	case OTG_STATE_B_PERIPHERAL:
+ 		otg_chrg_vbus(fsm, 0);
+-		otg_loc_conn(fsm, 1);
+ 		otg_loc_sof(fsm, 0);
+ 		otg_set_protocol(fsm, PROTO_GADGET);
++		otg_loc_conn(fsm, 1);
+ 		break;
+ 	case OTG_STATE_B_WAIT_ACON:
+ 		otg_chrg_vbus(fsm, 0);
+@@ -213,10 +213,10 @@ static int otg_set_state(struct otg_fsm *fsm, enum usb_otg_state new_state)
+ 
+ 		break;
+ 	case OTG_STATE_A_PERIPHERAL:
+-		otg_loc_conn(fsm, 1);
+ 		otg_loc_sof(fsm, 0);
+ 		otg_set_protocol(fsm, PROTO_GADGET);
+ 		otg_drv_vbus(fsm, 1);
++		otg_loc_conn(fsm, 1);
+ 		otg_add_timer(fsm, A_BIDL_ADIS);
+ 		break;
+ 	case OTG_STATE_A_WAIT_VFALL:
+diff --git a/drivers/usb/phy/phy-am335x-control.c b/drivers/usb/phy/phy-am335x-control.c
+index 403fab772724..7b3035ff9434 100644
+--- a/drivers/usb/phy/phy-am335x-control.c
++++ b/drivers/usb/phy/phy-am335x-control.c
+@@ -126,6 +126,9 @@ struct phy_control *am335x_get_phy_control(struct device *dev)
+ 		return NULL;
+ 
+ 	dev = bus_find_device(&platform_bus_type, NULL, node, match);
++	if (!dev)
++		return NULL;
++
+ 	ctrl_usb = dev_get_drvdata(dev);
+ 	if (!ctrl_usb)
+ 		return NULL;
+diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
+index 82570425fdfe..c85ea530085f 100644
+--- a/drivers/usb/storage/unusual_uas.h
++++ b/drivers/usb/storage/unusual_uas.h
+@@ -113,6 +113,13 @@ UNUSUAL_DEV(0x0bc2, 0xab2a, 0x0000, 0x9999,
+ 		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ 		US_FL_NO_ATA_1X),
+ 
++/* Reported-by: Benjamin Tissoires <benjamin.tissoires@redhat.com> */
++UNUSUAL_DEV(0x13fd, 0x3940, 0x0000, 0x9999,
++		"Initio Corporation",
++		"",
++		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
++		US_FL_NO_ATA_1X),
++
+ /* Reported-by: Tom Arild Naess <tanaess@gmail.com> */
+ UNUSUAL_DEV(0x152d, 0x0539, 0x0000, 0x9999,
+ 		"JMicron",
+diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
+index 50c5f42d7a9f..b8b7a6c712ae 100644
+--- a/drivers/virtio/virtio_balloon.c
++++ b/drivers/virtio/virtio_balloon.c
+@@ -29,6 +29,7 @@
+ #include <linux/module.h>
+ #include <linux/balloon_compaction.h>
+ #include <linux/oom.h>
++#include <linux/wait.h>
+ 
+ /*
+  * Balloon device works in 4K page units.  So each page is pointed to by
+@@ -335,17 +336,25 @@ static int virtballoon_oom_notify(struct notifier_block *self,
+ static int balloon(void *_vballoon)
+ {
+ 	struct virtio_balloon *vb = _vballoon;
++	DEFINE_WAIT_FUNC(wait, woken_wake_function);
+ 
+ 	set_freezable();
+ 	while (!kthread_should_stop()) {
+ 		s64 diff;
+ 
+ 		try_to_freeze();
+-		wait_event_interruptible(vb->config_change,
+-					 (diff = towards_target(vb)) != 0
+-					 || vb->need_stats_update
+-					 || kthread_should_stop()
+-					 || freezing(current));
++
++		add_wait_queue(&vb->config_change, &wait);
++		for (;;) {
++			if ((diff = towards_target(vb)) != 0 ||
++			    vb->need_stats_update ||
++			    kthread_should_stop() ||
++			    freezing(current))
++				break;
++			wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
++		}
++		remove_wait_queue(&vb->config_change, &wait);
++
+ 		if (vb->need_stats_update)
+ 			stats_handle_request(vb);
+ 		if (diff > 0)
+@@ -494,6 +503,8 @@ static int virtballoon_probe(struct virtio_device *vdev)
+ 	if (err < 0)
+ 		goto out_oom_notify;
+ 
++	virtio_device_ready(vdev);
++
+ 	vb->thread = kthread_run(balloon, vb, "vballoon");
+ 	if (IS_ERR(vb->thread)) {
+ 		err = PTR_ERR(vb->thread);
+diff --git a/fs/hfsplus/brec.c b/fs/hfsplus/brec.c
+index 6e560d56094b..754fdf8c6356 100644
+--- a/fs/hfsplus/brec.c
++++ b/fs/hfsplus/brec.c
+@@ -131,13 +131,16 @@ skip:
+ 	hfs_bnode_write(node, entry, data_off + key_len, entry_len);
+ 	hfs_bnode_dump(node);
+ 
+-	if (new_node) {
+-		/* update parent key if we inserted a key
+-		 * at the start of the first node
+-		 */
+-		if (!rec && new_node != node)
+-			hfs_brec_update_parent(fd);
++	/*
++	 * update parent key if we inserted a key
++	 * at the start of the node and it is not the new node
++	 */
++	if (!rec && new_node != node) {
++		hfs_bnode_read_key(node, fd->search_key, data_off + size);
++		hfs_brec_update_parent(fd);
++	}
+ 
++	if (new_node) {
+ 		hfs_bnode_put(fd->bnode);
+ 		if (!new_node->parent) {
+ 			hfs_btree_inc_height(tree);
+@@ -168,9 +171,6 @@ skip:
+ 		goto again;
+ 	}
+ 
+-	if (!rec)
+-		hfs_brec_update_parent(fd);
+-
+ 	return 0;
+ }
+ 
+@@ -370,6 +370,8 @@ again:
+ 	if (IS_ERR(parent))
+ 		return PTR_ERR(parent);
+ 	__hfs_brec_find(parent, fd, hfs_find_rec_by_key);
++	if (fd->record < 0)
++		return -ENOENT;
+ 	hfs_bnode_dump(parent);
+ 	rec = fd->record;
+ 
+diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
+index ca6d2acc5eb7..f39722b2836c 100644
+--- a/include/linux/device-mapper.h
++++ b/include/linux/device-mapper.h
+@@ -368,6 +368,7 @@ int dm_create(int minor, struct mapped_device **md);
+  */
+ struct mapped_device *dm_get_md(dev_t dev);
+ void dm_get(struct mapped_device *md);
++int dm_hold(struct mapped_device *md);
+ void dm_put(struct mapped_device *md);
+ 
+ /*
+diff --git a/include/linux/mfd/palmas.h b/include/linux/mfd/palmas.h
+index fb0390a1a498..ee7b1ce7a6f8 100644
+--- a/include/linux/mfd/palmas.h
++++ b/include/linux/mfd/palmas.h
+@@ -2999,6 +2999,9 @@ enum usb_irq_events {
+ #define PALMAS_GPADC_TRIM15					0x0E
+ #define PALMAS_GPADC_TRIM16					0x0F
+ 
++/* TPS659038 regen2_ctrl offset iss different from palmas */
++#define TPS659038_REGEN2_CTRL					0x12
++
+ /* TPS65917 Interrupt registers */
+ 
+ /* Registers for function INTERRUPT */
+diff --git a/include/trace/events/regmap.h b/include/trace/events/regmap.h
+index 23d561512f64..22317d2b52ab 100644
+--- a/include/trace/events/regmap.h
++++ b/include/trace/events/regmap.h
+@@ -7,27 +7,26 @@
+ #include <linux/ktime.h>
+ #include <linux/tracepoint.h>
+ 
+-struct device;
+-struct regmap;
++#include "../../../drivers/base/regmap/internal.h"
+ 
+ /*
+  * Log register events
+  */
+ DECLARE_EVENT_CLASS(regmap_reg,
+ 
+-	TP_PROTO(struct device *dev, unsigned int reg,
++	TP_PROTO(struct regmap *map, unsigned int reg,
+ 		 unsigned int val),
+ 
+-	TP_ARGS(dev, reg, val),
++	TP_ARGS(map, reg, val),
+ 
+ 	TP_STRUCT__entry(
+-		__string(	name,		dev_name(dev)	)
+-		__field(	unsigned int,	reg		)
+-		__field(	unsigned int,	val		)
++		__string(	name,		regmap_name(map)	)
++		__field(	unsigned int,	reg			)
++		__field(	unsigned int,	val			)
+ 	),
+ 
+ 	TP_fast_assign(
+-		__assign_str(name, dev_name(dev));
++		__assign_str(name, regmap_name(map));
+ 		__entry->reg = reg;
+ 		__entry->val = val;
+ 	),
+@@ -39,45 +38,45 @@ DECLARE_EVENT_CLASS(regmap_reg,
+ 
+ DEFINE_EVENT(regmap_reg, regmap_reg_write,
+ 
+-	TP_PROTO(struct device *dev, unsigned int reg,
++	TP_PROTO(struct regmap *map, unsigned int reg,
+ 		 unsigned int val),
+ 
+-	TP_ARGS(dev, reg, val)
++	TP_ARGS(map, reg, val)
+ 
+ );
+ 
+ DEFINE_EVENT(regmap_reg, regmap_reg_read,
+ 
+-	TP_PROTO(struct device *dev, unsigned int reg,
++	TP_PROTO(struct regmap *map, unsigned int reg,
+ 		 unsigned int val),
+ 
+-	TP_ARGS(dev, reg, val)
++	TP_ARGS(map, reg, val)
+ 
+ );
+ 
+ DEFINE_EVENT(regmap_reg, regmap_reg_read_cache,
+ 
+-	TP_PROTO(struct device *dev, unsigned int reg,
++	TP_PROTO(struct regmap *map, unsigned int reg,
+ 		 unsigned int val),
+ 
+-	TP_ARGS(dev, reg, val)
++	TP_ARGS(map, reg, val)
+ 
+ );
+ 
+ DECLARE_EVENT_CLASS(regmap_block,
+ 
+-	TP_PROTO(struct device *dev, unsigned int reg, int count),
++	TP_PROTO(struct regmap *map, unsigned int reg, int count),
+ 
+-	TP_ARGS(dev, reg, count),
++	TP_ARGS(map, reg, count),
+ 
+ 	TP_STRUCT__entry(
+-		__string(	name,		dev_name(dev)	)
+-		__field(	unsigned int,	reg		)
+-		__field(	int,		count		)
++		__string(	name,		regmap_name(map)	)
++		__field(	unsigned int,	reg			)
++		__field(	int,		count			)
+ 	),
+ 
+ 	TP_fast_assign(
+-		__assign_str(name, dev_name(dev));
++		__assign_str(name, regmap_name(map));
+ 		__entry->reg = reg;
+ 		__entry->count = count;
+ 	),
+@@ -89,48 +88,48 @@ DECLARE_EVENT_CLASS(regmap_block,
+ 
+ DEFINE_EVENT(regmap_block, regmap_hw_read_start,
+ 
+-	TP_PROTO(struct device *dev, unsigned int reg, int count),
++	TP_PROTO(struct regmap *map, unsigned int reg, int count),
+ 
+-	TP_ARGS(dev, reg, count)
++	TP_ARGS(map, reg, count)
+ );
+ 
+ DEFINE_EVENT(regmap_block, regmap_hw_read_done,
+ 
+-	TP_PROTO(struct device *dev, unsigned int reg, int count),
++	TP_PROTO(struct regmap *map, unsigned int reg, int count),
+ 
+-	TP_ARGS(dev, reg, count)
++	TP_ARGS(map, reg, count)
+ );
+ 
+ DEFINE_EVENT(regmap_block, regmap_hw_write_start,
+ 
+-	TP_PROTO(struct device *dev, unsigned int reg, int count),
++	TP_PROTO(struct regmap *map, unsigned int reg, int count),
+ 
+-	TP_ARGS(dev, reg, count)
++	TP_ARGS(map, reg, count)
+ );
+ 
+ DEFINE_EVENT(regmap_block, regmap_hw_write_done,
+ 
+-	TP_PROTO(struct device *dev, unsigned int reg, int count),
++	TP_PROTO(struct regmap *map, unsigned int reg, int count),
+ 
+-	TP_ARGS(dev, reg, count)
++	TP_ARGS(map, reg, count)
+ );
+ 
+ TRACE_EVENT(regcache_sync,
+ 
+-	TP_PROTO(struct device *dev, const char *type,
++	TP_PROTO(struct regmap *map, const char *type,
+ 		 const char *status),
+ 
+-	TP_ARGS(dev, type, status),
++	TP_ARGS(map, type, status),
+ 
+ 	TP_STRUCT__entry(
+-		__string(       name,           dev_name(dev)   )
+-		__string(	status,		status		)
+-		__string(	type,		type		)
+-		__field(	int,		type		)
++		__string(       name,           regmap_name(map)	)
++		__string(	status,		status			)
++		__string(	type,		type			)
++		__field(	int,		type			)
+ 	),
+ 
+ 	TP_fast_assign(
+-		__assign_str(name, dev_name(dev));
++		__assign_str(name, regmap_name(map));
+ 		__assign_str(status, status);
+ 		__assign_str(type, type);
+ 	),
+@@ -141,17 +140,17 @@ TRACE_EVENT(regcache_sync,
+ 
+ DECLARE_EVENT_CLASS(regmap_bool,
+ 
+-	TP_PROTO(struct device *dev, bool flag),
++	TP_PROTO(struct regmap *map, bool flag),
+ 
+-	TP_ARGS(dev, flag),
++	TP_ARGS(map, flag),
+ 
+ 	TP_STRUCT__entry(
+-		__string(	name,		dev_name(dev)	)
+-		__field(	int,		flag		)
++		__string(	name,		regmap_name(map)	)
++		__field(	int,		flag			)
+ 	),
+ 
+ 	TP_fast_assign(
+-		__assign_str(name, dev_name(dev));
++		__assign_str(name, regmap_name(map));
+ 		__entry->flag = flag;
+ 	),
+ 
+@@ -161,32 +160,32 @@ DECLARE_EVENT_CLASS(regmap_bool,
+ 
+ DEFINE_EVENT(regmap_bool, regmap_cache_only,
+ 
+-	TP_PROTO(struct device *dev, bool flag),
++	TP_PROTO(struct regmap *map, bool flag),
+ 
+-	TP_ARGS(dev, flag)
++	TP_ARGS(map, flag)
+ 
+ );
+ 
+ DEFINE_EVENT(regmap_bool, regmap_cache_bypass,
+ 
+-	TP_PROTO(struct device *dev, bool flag),
++	TP_PROTO(struct regmap *map, bool flag),
+ 
+-	TP_ARGS(dev, flag)
++	TP_ARGS(map, flag)
+ 
+ );
+ 
+ DECLARE_EVENT_CLASS(regmap_async,
+ 
+-	TP_PROTO(struct device *dev),
++	TP_PROTO(struct regmap *map),
+ 
+-	TP_ARGS(dev),
++	TP_ARGS(map),
+ 
+ 	TP_STRUCT__entry(
+-		__string(	name,		dev_name(dev)	)
++		__string(	name,		regmap_name(map)	)
+ 	),
+ 
+ 	TP_fast_assign(
+-		__assign_str(name, dev_name(dev));
++		__assign_str(name, regmap_name(map));
+ 	),
+ 
+ 	TP_printk("%s", __get_str(name))
+@@ -194,50 +193,50 @@ DECLARE_EVENT_CLASS(regmap_async,
+ 
+ DEFINE_EVENT(regmap_block, regmap_async_write_start,
+ 
+-	TP_PROTO(struct device *dev, unsigned int reg, int count),
++	TP_PROTO(struct regmap *map, unsigned int reg, int count),
+ 
+-	TP_ARGS(dev, reg, count)
++	TP_ARGS(map, reg, count)
+ );
+ 
+ DEFINE_EVENT(regmap_async, regmap_async_io_complete,
+ 
+-	TP_PROTO(struct device *dev),
++	TP_PROTO(struct regmap *map),
+ 
+-	TP_ARGS(dev)
++	TP_ARGS(map)
+ 
+ );
+ 
+ DEFINE_EVENT(regmap_async, regmap_async_complete_start,
+ 
+-	TP_PROTO(struct device *dev),
++	TP_PROTO(struct regmap *map),
+ 
+-	TP_ARGS(dev)
++	TP_ARGS(map)
+ 
+ );
+ 
+ DEFINE_EVENT(regmap_async, regmap_async_complete_done,
+ 
+-	TP_PROTO(struct device *dev),
++	TP_PROTO(struct regmap *map),
+ 
+-	TP_ARGS(dev)
++	TP_ARGS(map)
+ 
+ );
+ 
+ TRACE_EVENT(regcache_drop_region,
+ 
+-	TP_PROTO(struct device *dev, unsigned int from,
++	TP_PROTO(struct regmap *map, unsigned int from,
+ 		 unsigned int to),
+ 
+-	TP_ARGS(dev, from, to),
++	TP_ARGS(map, from, to),
+ 
+ 	TP_STRUCT__entry(
+-		__string(       name,           dev_name(dev)   )
+-		__field(	unsigned int,	from		)
+-		__field(	unsigned int,	to		)
++		__string(       name,           regmap_name(map)	)
++		__field(	unsigned int,	from			)
++		__field(	unsigned int,	to			)
+ 	),
+ 
+ 	TP_fast_assign(
+-		__assign_str(name, dev_name(dev));
++		__assign_str(name, regmap_name(map));
+ 		__entry->from = from;
+ 		__entry->to = to;
+ 	),
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index 19efcf13375a..795962442f99 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -4412,6 +4412,13 @@ static void perf_pending_event(struct irq_work *entry)
+ {
+ 	struct perf_event *event = container_of(entry,
+ 			struct perf_event, pending);
++	int rctx;
++
++	rctx = perf_swevent_get_recursion_context();
++	/*
++	 * If we 'fail' here, that's OK, it means recursion is already disabled
++	 * and we won't recurse 'further'.
++	 */
+ 
+ 	if (event->pending_disable) {
+ 		event->pending_disable = 0;
+@@ -4422,6 +4429,9 @@ static void perf_pending_event(struct irq_work *entry)
+ 		event->pending_wakeup = 0;
+ 		perf_event_wakeup(event);
+ 	}
++
++	if (rctx >= 0)
++		perf_swevent_put_recursion_context(rctx);
+ }
+ 
+ /*
+diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
+index cc6e964d9837..fa7568ce7ef3 100644
+--- a/net/mac80211/ieee80211_i.h
++++ b/net/mac80211/ieee80211_i.h
+@@ -58,13 +58,24 @@ struct ieee80211_local;
+ #define IEEE80211_UNSET_POWER_LEVEL	INT_MIN
+ 
+ /*
+- * Some APs experience problems when working with U-APSD. Decrease the
+- * probability of that happening by using legacy mode for all ACs but VO.
+- * The AP that caused us trouble was a Cisco 4410N. It ignores our
+- * setting, and always treats non-VO ACs as legacy.
++ * Some APs experience problems when working with U-APSD. Decreasing the
++ * probability of that happening by using legacy mode for all ACs but VO isn't
++ * enough.
++ *
++ * Cisco 4410N originally forced us to enable VO by default only because it
++ * treated non-VO ACs as legacy.
++ *
++ * However some APs (notably Netgear R7000) silently reclassify packets to
++ * different ACs. Since u-APSD ACs require trigger frames for frame retrieval
++ * clients would never see some frames (e.g. ARP responses) or would fetch them
++ * accidentally after a long time.
++ *
++ * It makes little sense to enable u-APSD queues by default because it needs
++ * userspace applications to be aware of it to actually take advantage of the
++ * possible additional powersavings. Implicitly depending on driver autotrigger
++ * frame support doesn't make much sense.
+  */
+-#define IEEE80211_DEFAULT_UAPSD_QUEUES \
+-	IEEE80211_WMM_IE_STA_QOSINFO_AC_VO
++#define IEEE80211_DEFAULT_UAPSD_QUEUES 0
+ 
+ #define IEEE80211_DEFAULT_MAX_SP_LEN		\
+ 	IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL
+diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
+index d69ca513848e..b448e8ff3213 100644
+--- a/net/mac80211/rx.c
++++ b/net/mac80211/rx.c
+@@ -2191,6 +2191,9 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
+ 	hdr = (struct ieee80211_hdr *) skb->data;
+ 	mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
+ 
++	if (ieee80211_drop_unencrypted(rx, hdr->frame_control))
++		return RX_DROP_MONITOR;
++
+ 	/* frame is in RMC, don't forward */
+ 	if (ieee80211_is_data(hdr->frame_control) &&
+ 	    is_multicast_ether_addr(hdr->addr1) &&
+diff --git a/net/mac80211/util.c b/net/mac80211/util.c
+index 974ebe70f5b0..1ce38e71ecd8 100644
+--- a/net/mac80211/util.c
++++ b/net/mac80211/util.c
+@@ -3153,7 +3153,7 @@ int ieee80211_check_combinations(struct ieee80211_sub_if_data *sdata,
+ 		wdev_iter = &sdata_iter->wdev;
+ 
+ 		if (sdata_iter == sdata ||
+-		    rcu_access_pointer(sdata_iter->vif.chanctx_conf) == NULL ||
++		    !ieee80211_sdata_running(sdata_iter) ||
+ 		    local->hw.wiphy->software_iftypes & BIT(wdev_iter->iftype))
+ 			continue;
+ 
+diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
+index 8887c6e5fca8..e13325f709f2 100644
+--- a/net/wireless/nl80211.c
++++ b/net/wireless/nl80211.c
+@@ -4360,6 +4360,16 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info)
+ 	if (parse_station_flags(info, dev->ieee80211_ptr->iftype, &params))
+ 		return -EINVAL;
+ 
++	/* HT/VHT requires QoS, but if we don't have that just ignore HT/VHT
++	 * as userspace might just pass through the capabilities from the IEs
++	 * directly, rather than enforcing this restriction and returning an
++	 * error in this case.
++	 */
++	if (!(params.sta_flags_set & BIT(NL80211_STA_FLAG_WME))) {
++		params.ht_capa = NULL;
++		params.vht_capa = NULL;
++	}
++
+ 	/* When you run into this, adjust the code below for the new flag */
+ 	BUILD_BUG_ON(NL80211_STA_FLAG_MAX != 7);
+ 
+diff --git a/sound/soc/codecs/adav80x.c b/sound/soc/codecs/adav80x.c
+index b67480f1b1aa..4373ada95648 100644
+--- a/sound/soc/codecs/adav80x.c
++++ b/sound/soc/codecs/adav80x.c
+@@ -317,7 +317,7 @@ static int adav80x_put_deemph(struct snd_kcontrol *kcontrol,
+ {
+ 	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+ 	struct adav80x *adav80x = snd_soc_codec_get_drvdata(codec);
+-	unsigned int deemph = ucontrol->value.enumerated.item[0];
++	unsigned int deemph = ucontrol->value.integer.value[0];
+ 
+ 	if (deemph > 1)
+ 		return -EINVAL;
+@@ -333,7 +333,7 @@ static int adav80x_get_deemph(struct snd_kcontrol *kcontrol,
+ 	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+ 	struct adav80x *adav80x = snd_soc_codec_get_drvdata(codec);
+ 
+-	ucontrol->value.enumerated.item[0] = adav80x->deemph;
++	ucontrol->value.integer.value[0] = adav80x->deemph;
+ 	return 0;
+ };
+ 
+diff --git a/sound/soc/codecs/ak4641.c b/sound/soc/codecs/ak4641.c
+index 70861c7b1631..81b54a270bd8 100644
+--- a/sound/soc/codecs/ak4641.c
++++ b/sound/soc/codecs/ak4641.c
+@@ -76,7 +76,7 @@ static int ak4641_put_deemph(struct snd_kcontrol *kcontrol,
+ {
+ 	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+ 	struct ak4641_priv *ak4641 = snd_soc_codec_get_drvdata(codec);
+-	int deemph = ucontrol->value.enumerated.item[0];
++	int deemph = ucontrol->value.integer.value[0];
+ 
+ 	if (deemph > 1)
+ 		return -EINVAL;
+@@ -92,7 +92,7 @@ static int ak4641_get_deemph(struct snd_kcontrol *kcontrol,
+ 	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+ 	struct ak4641_priv *ak4641 = snd_soc_codec_get_drvdata(codec);
+ 
+-	ucontrol->value.enumerated.item[0] = ak4641->deemph;
++	ucontrol->value.integer.value[0] = ak4641->deemph;
+ 	return 0;
+ };
+ 
+diff --git a/sound/soc/codecs/ak4671.c b/sound/soc/codecs/ak4671.c
+index 686cacb0e835..a981c1e8a2cf 100644
+--- a/sound/soc/codecs/ak4671.c
++++ b/sound/soc/codecs/ak4671.c
+@@ -343,25 +343,25 @@ static const struct snd_soc_dapm_widget ak4671_dapm_widgets[] = {
+ };
+ 
+ static const struct snd_soc_dapm_route ak4671_intercon[] = {
+-	{"DAC Left", "NULL", "PMPLL"},
+-	{"DAC Right", "NULL", "PMPLL"},
+-	{"ADC Left", "NULL", "PMPLL"},
+-	{"ADC Right", "NULL", "PMPLL"},
++	{"DAC Left", NULL, "PMPLL"},
++	{"DAC Right", NULL, "PMPLL"},
++	{"ADC Left", NULL, "PMPLL"},
++	{"ADC Right", NULL, "PMPLL"},
+ 
+ 	/* Outputs */
+-	{"LOUT1", "NULL", "LOUT1 Mixer"},
+-	{"ROUT1", "NULL", "ROUT1 Mixer"},
+-	{"LOUT2", "NULL", "LOUT2 Mix Amp"},
+-	{"ROUT2", "NULL", "ROUT2 Mix Amp"},
+-	{"LOUT3", "NULL", "LOUT3 Mixer"},
+-	{"ROUT3", "NULL", "ROUT3 Mixer"},
++	{"LOUT1", NULL, "LOUT1 Mixer"},
++	{"ROUT1", NULL, "ROUT1 Mixer"},
++	{"LOUT2", NULL, "LOUT2 Mix Amp"},
++	{"ROUT2", NULL, "ROUT2 Mix Amp"},
++	{"LOUT3", NULL, "LOUT3 Mixer"},
++	{"ROUT3", NULL, "ROUT3 Mixer"},
+ 
+ 	{"LOUT1 Mixer", "DACL", "DAC Left"},
+ 	{"ROUT1 Mixer", "DACR", "DAC Right"},
+ 	{"LOUT2 Mixer", "DACHL", "DAC Left"},
+ 	{"ROUT2 Mixer", "DACHR", "DAC Right"},
+-	{"LOUT2 Mix Amp", "NULL", "LOUT2 Mixer"},
+-	{"ROUT2 Mix Amp", "NULL", "ROUT2 Mixer"},
++	{"LOUT2 Mix Amp", NULL, "LOUT2 Mixer"},
++	{"ROUT2 Mix Amp", NULL, "ROUT2 Mixer"},
+ 	{"LOUT3 Mixer", "DACSL", "DAC Left"},
+ 	{"ROUT3 Mixer", "DACSR", "DAC Right"},
+ 
+@@ -381,18 +381,18 @@ static const struct snd_soc_dapm_route ak4671_intercon[] = {
+ 	{"LIN2", NULL, "Mic Bias"},
+ 	{"RIN2", NULL, "Mic Bias"},
+ 
+-	{"ADC Left", "NULL", "LIN MUX"},
+-	{"ADC Right", "NULL", "RIN MUX"},
++	{"ADC Left", NULL, "LIN MUX"},
++	{"ADC Right", NULL, "RIN MUX"},
+ 
+ 	/* Analog Loops */
+-	{"LIN1 Mixing Circuit", "NULL", "LIN1"},
+-	{"RIN1 Mixing Circuit", "NULL", "RIN1"},
+-	{"LIN2 Mixing Circuit", "NULL", "LIN2"},
+-	{"RIN2 Mixing Circuit", "NULL", "RIN2"},
+-	{"LIN3 Mixing Circuit", "NULL", "LIN3"},
+-	{"RIN3 Mixing Circuit", "NULL", "RIN3"},
+-	{"LIN4 Mixing Circuit", "NULL", "LIN4"},
+-	{"RIN4 Mixing Circuit", "NULL", "RIN4"},
++	{"LIN1 Mixing Circuit", NULL, "LIN1"},
++	{"RIN1 Mixing Circuit", NULL, "RIN1"},
++	{"LIN2 Mixing Circuit", NULL, "LIN2"},
++	{"RIN2 Mixing Circuit", NULL, "RIN2"},
++	{"LIN3 Mixing Circuit", NULL, "LIN3"},
++	{"RIN3 Mixing Circuit", NULL, "RIN3"},
++	{"LIN4 Mixing Circuit", NULL, "LIN4"},
++	{"RIN4 Mixing Circuit", NULL, "RIN4"},
+ 
+ 	{"LOUT1 Mixer", "LINL1", "LIN1 Mixing Circuit"},
+ 	{"ROUT1 Mixer", "RINR1", "RIN1 Mixing Circuit"},
+diff --git a/sound/soc/codecs/cs4271.c b/sound/soc/codecs/cs4271.c
+index 79a4efcb894c..7d3a6accaf9a 100644
+--- a/sound/soc/codecs/cs4271.c
++++ b/sound/soc/codecs/cs4271.c
+@@ -286,7 +286,7 @@ static int cs4271_get_deemph(struct snd_kcontrol *kcontrol,
+ 	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+ 	struct cs4271_private *cs4271 = snd_soc_codec_get_drvdata(codec);
+ 
+-	ucontrol->value.enumerated.item[0] = cs4271->deemph;
++	ucontrol->value.integer.value[0] = cs4271->deemph;
+ 	return 0;
+ }
+ 
+@@ -296,7 +296,7 @@ static int cs4271_put_deemph(struct snd_kcontrol *kcontrol,
+ 	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+ 	struct cs4271_private *cs4271 = snd_soc_codec_get_drvdata(codec);
+ 
+-	cs4271->deemph = ucontrol->value.enumerated.item[0];
++	cs4271->deemph = ucontrol->value.integer.value[0];
+ 	return cs4271_set_deemph(codec);
+ }
+ 
+diff --git a/sound/soc/codecs/da732x.c b/sound/soc/codecs/da732x.c
+index 61b2f9a2eef1..e3110c67f3b2 100644
+--- a/sound/soc/codecs/da732x.c
++++ b/sound/soc/codecs/da732x.c
+@@ -876,11 +876,11 @@ static const struct snd_soc_dapm_widget da732x_dapm_widgets[] = {
+ 
+ static const struct snd_soc_dapm_route da732x_dapm_routes[] = {
+ 	/* Inputs */
+-	{"AUX1L PGA", "NULL", "AUX1L"},
+-	{"AUX1R PGA", "NULL", "AUX1R"},
++	{"AUX1L PGA", NULL, "AUX1L"},
++	{"AUX1R PGA", NULL, "AUX1R"},
+ 	{"MIC1 PGA", NULL, "MIC1"},
+-	{"MIC2 PGA", "NULL", "MIC2"},
+-	{"MIC3 PGA", "NULL", "MIC3"},
++	{"MIC2 PGA", NULL, "MIC2"},
++	{"MIC3 PGA", NULL, "MIC3"},
+ 
+ 	/* Capture Path */
+ 	{"ADC1 Left MUX", "MIC1", "MIC1 PGA"},
+diff --git a/sound/soc/codecs/es8328.c b/sound/soc/codecs/es8328.c
+index f27325155ace..c5f35a07e8e4 100644
+--- a/sound/soc/codecs/es8328.c
++++ b/sound/soc/codecs/es8328.c
+@@ -120,7 +120,7 @@ static int es8328_get_deemph(struct snd_kcontrol *kcontrol,
+ 	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+ 	struct es8328_priv *es8328 = snd_soc_codec_get_drvdata(codec);
+ 
+-	ucontrol->value.enumerated.item[0] = es8328->deemph;
++	ucontrol->value.integer.value[0] = es8328->deemph;
+ 	return 0;
+ }
+ 
+@@ -129,7 +129,7 @@ static int es8328_put_deemph(struct snd_kcontrol *kcontrol,
+ {
+ 	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+ 	struct es8328_priv *es8328 = snd_soc_codec_get_drvdata(codec);
+-	int deemph = ucontrol->value.enumerated.item[0];
++	int deemph = ucontrol->value.integer.value[0];
+ 	int ret;
+ 
+ 	if (deemph > 1)
+diff --git a/sound/soc/codecs/pcm1681.c b/sound/soc/codecs/pcm1681.c
+index a722a023c262..477e13d30971 100644
+--- a/sound/soc/codecs/pcm1681.c
++++ b/sound/soc/codecs/pcm1681.c
+@@ -118,7 +118,7 @@ static int pcm1681_get_deemph(struct snd_kcontrol *kcontrol,
+ 	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+ 	struct pcm1681_private *priv = snd_soc_codec_get_drvdata(codec);
+ 
+-	ucontrol->value.enumerated.item[0] = priv->deemph;
++	ucontrol->value.integer.value[0] = priv->deemph;
+ 
+ 	return 0;
+ }
+@@ -129,7 +129,7 @@ static int pcm1681_put_deemph(struct snd_kcontrol *kcontrol,
+ 	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+ 	struct pcm1681_private *priv = snd_soc_codec_get_drvdata(codec);
+ 
+-	priv->deemph = ucontrol->value.enumerated.item[0];
++	priv->deemph = ucontrol->value.integer.value[0];
+ 
+ 	return pcm1681_set_deemph(codec);
+ }
+diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c
+index aa98be32bb60..10d2415047ad 100644
+--- a/sound/soc/codecs/sgtl5000.c
++++ b/sound/soc/codecs/sgtl5000.c
+@@ -1149,13 +1149,7 @@ static int sgtl5000_set_power_regs(struct snd_soc_codec *codec)
+ 		/* Enable VDDC charge pump */
+ 		ana_pwr |= SGTL5000_VDDC_CHRGPMP_POWERUP;
+ 	} else if (vddio >= 3100 && vdda >= 3100) {
+-		/*
+-		 * if vddio and vddd > 3.1v,
+-		 * charge pump should be clean before set ana_pwr
+-		 */
+-		snd_soc_update_bits(codec, SGTL5000_CHIP_ANA_POWER,
+-				SGTL5000_VDDC_CHRGPMP_POWERUP, 0);
+-
++		ana_pwr &= ~SGTL5000_VDDC_CHRGPMP_POWERUP;
+ 		/* VDDC use VDDIO rail */
+ 		lreg_ctrl |= SGTL5000_VDDC_ASSN_OVRD;
+ 		lreg_ctrl |= SGTL5000_VDDC_MAN_ASSN_VDDIO <<
+diff --git a/sound/soc/codecs/sn95031.c b/sound/soc/codecs/sn95031.c
+index 1f451a1946eb..4ff4aa73473b 100644
+--- a/sound/soc/codecs/sn95031.c
++++ b/sound/soc/codecs/sn95031.c
+@@ -531,8 +531,8 @@ static const struct snd_soc_dapm_route sn95031_audio_map[] = {
+ 	/* speaker map */
+ 	{ "IHFOUTL", NULL, "Speaker Rail"},
+ 	{ "IHFOUTR", NULL, "Speaker Rail"},
+-	{ "IHFOUTL", "NULL", "Speaker Left Playback"},
+-	{ "IHFOUTR", "NULL", "Speaker Right Playback"},
++	{ "IHFOUTL", NULL, "Speaker Left Playback"},
++	{ "IHFOUTR", NULL, "Speaker Right Playback"},
+ 	{ "Speaker Left Playback", NULL, "Speaker Left Filter"},
+ 	{ "Speaker Right Playback", NULL, "Speaker Right Filter"},
+ 	{ "Speaker Left Filter", NULL, "IHFDAC Left"},
+diff --git a/sound/soc/codecs/tas5086.c b/sound/soc/codecs/tas5086.c
+index 249ef5c4c762..32942bed34b1 100644
+--- a/sound/soc/codecs/tas5086.c
++++ b/sound/soc/codecs/tas5086.c
+@@ -281,7 +281,7 @@ static int tas5086_get_deemph(struct snd_kcontrol *kcontrol,
+ 	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+ 	struct tas5086_private *priv = snd_soc_codec_get_drvdata(codec);
+ 
+-	ucontrol->value.enumerated.item[0] = priv->deemph;
++	ucontrol->value.integer.value[0] = priv->deemph;
+ 
+ 	return 0;
+ }
+@@ -292,7 +292,7 @@ static int tas5086_put_deemph(struct snd_kcontrol *kcontrol,
+ 	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+ 	struct tas5086_private *priv = snd_soc_codec_get_drvdata(codec);
+ 
+-	priv->deemph = ucontrol->value.enumerated.item[0];
++	priv->deemph = ucontrol->value.integer.value[0];
+ 
+ 	return tas5086_set_deemph(codec);
+ }
+diff --git a/sound/soc/codecs/wm2000.c b/sound/soc/codecs/wm2000.c
+index 34ef65c52a7d..8eeab47a4235 100644
+--- a/sound/soc/codecs/wm2000.c
++++ b/sound/soc/codecs/wm2000.c
+@@ -610,7 +610,7 @@ static int wm2000_anc_mode_get(struct snd_kcontrol *kcontrol,
+ 	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+ 	struct wm2000_priv *wm2000 = dev_get_drvdata(codec->dev);
+ 
+-	ucontrol->value.enumerated.item[0] = wm2000->anc_active;
++	ucontrol->value.integer.value[0] = wm2000->anc_active;
+ 
+ 	return 0;
+ }
+@@ -620,7 +620,7 @@ static int wm2000_anc_mode_put(struct snd_kcontrol *kcontrol,
+ {
+ 	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+ 	struct wm2000_priv *wm2000 = dev_get_drvdata(codec->dev);
+-	int anc_active = ucontrol->value.enumerated.item[0];
++	int anc_active = ucontrol->value.integer.value[0];
+ 	int ret;
+ 
+ 	if (anc_active > 1)
+@@ -643,7 +643,7 @@ static int wm2000_speaker_get(struct snd_kcontrol *kcontrol,
+ 	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+ 	struct wm2000_priv *wm2000 = dev_get_drvdata(codec->dev);
+ 
+-	ucontrol->value.enumerated.item[0] = wm2000->spk_ena;
++	ucontrol->value.integer.value[0] = wm2000->spk_ena;
+ 
+ 	return 0;
+ }
+@@ -653,7 +653,7 @@ static int wm2000_speaker_put(struct snd_kcontrol *kcontrol,
+ {
+ 	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+ 	struct wm2000_priv *wm2000 = dev_get_drvdata(codec->dev);
+-	int val = ucontrol->value.enumerated.item[0];
++	int val = ucontrol->value.integer.value[0];
+ 	int ret;
+ 
+ 	if (val > 1)
+diff --git a/sound/soc/codecs/wm8731.c b/sound/soc/codecs/wm8731.c
+index b115ed815db9..391534f774b1 100644
+--- a/sound/soc/codecs/wm8731.c
++++ b/sound/soc/codecs/wm8731.c
+@@ -125,7 +125,7 @@ static int wm8731_get_deemph(struct snd_kcontrol *kcontrol,
+ 	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+ 	struct wm8731_priv *wm8731 = snd_soc_codec_get_drvdata(codec);
+ 
+-	ucontrol->value.enumerated.item[0] = wm8731->deemph;
++	ucontrol->value.integer.value[0] = wm8731->deemph;
+ 
+ 	return 0;
+ }
+@@ -135,7 +135,7 @@ static int wm8731_put_deemph(struct snd_kcontrol *kcontrol,
+ {
+ 	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+ 	struct wm8731_priv *wm8731 = snd_soc_codec_get_drvdata(codec);
+-	int deemph = ucontrol->value.enumerated.item[0];
++	int deemph = ucontrol->value.integer.value[0];
+ 	int ret = 0;
+ 
+ 	if (deemph > 1)
+diff --git a/sound/soc/codecs/wm8903.c b/sound/soc/codecs/wm8903.c
+index cc6b0ef98a34..15435c995fd0 100644
+--- a/sound/soc/codecs/wm8903.c
++++ b/sound/soc/codecs/wm8903.c
+@@ -442,7 +442,7 @@ static int wm8903_get_deemph(struct snd_kcontrol *kcontrol,
+ 	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+ 	struct wm8903_priv *wm8903 = snd_soc_codec_get_drvdata(codec);
+ 
+-	ucontrol->value.enumerated.item[0] = wm8903->deemph;
++	ucontrol->value.integer.value[0] = wm8903->deemph;
+ 
+ 	return 0;
+ }
+@@ -452,7 +452,7 @@ static int wm8903_put_deemph(struct snd_kcontrol *kcontrol,
+ {
+ 	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+ 	struct wm8903_priv *wm8903 = snd_soc_codec_get_drvdata(codec);
+-	int deemph = ucontrol->value.enumerated.item[0];
++	int deemph = ucontrol->value.integer.value[0];
+ 	int ret = 0;
+ 
+ 	if (deemph > 1)
+diff --git a/sound/soc/codecs/wm8904.c b/sound/soc/codecs/wm8904.c
+index 75b87c5c0f04..b945a3be067f 100644
+--- a/sound/soc/codecs/wm8904.c
++++ b/sound/soc/codecs/wm8904.c
+@@ -525,7 +525,7 @@ static int wm8904_get_deemph(struct snd_kcontrol *kcontrol,
+ 	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+ 	struct wm8904_priv *wm8904 = snd_soc_codec_get_drvdata(codec);
+ 
+-	ucontrol->value.enumerated.item[0] = wm8904->deemph;
++	ucontrol->value.integer.value[0] = wm8904->deemph;
+ 	return 0;
+ }
+ 
+@@ -534,7 +534,7 @@ static int wm8904_put_deemph(struct snd_kcontrol *kcontrol,
+ {
+ 	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+ 	struct wm8904_priv *wm8904 = snd_soc_codec_get_drvdata(codec);
+-	int deemph = ucontrol->value.enumerated.item[0];
++	int deemph = ucontrol->value.integer.value[0];
+ 
+ 	if (deemph > 1)
+ 		return -EINVAL;
+diff --git a/sound/soc/codecs/wm8955.c b/sound/soc/codecs/wm8955.c
+index 1173f7fef5a7..035bdc41c81d 100644
+--- a/sound/soc/codecs/wm8955.c
++++ b/sound/soc/codecs/wm8955.c
+@@ -393,7 +393,7 @@ static int wm8955_get_deemph(struct snd_kcontrol *kcontrol,
+ 	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+ 	struct wm8955_priv *wm8955 = snd_soc_codec_get_drvdata(codec);
+ 
+-	ucontrol->value.enumerated.item[0] = wm8955->deemph;
++	ucontrol->value.integer.value[0] = wm8955->deemph;
+ 	return 0;
+ }
+ 
+@@ -402,7 +402,7 @@ static int wm8955_put_deemph(struct snd_kcontrol *kcontrol,
+ {
+ 	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+ 	struct wm8955_priv *wm8955 = snd_soc_codec_get_drvdata(codec);
+-	int deemph = ucontrol->value.enumerated.item[0];
++	int deemph = ucontrol->value.integer.value[0];
+ 
+ 	if (deemph > 1)
+ 		return -EINVAL;
+diff --git a/sound/soc/codecs/wm8960.c b/sound/soc/codecs/wm8960.c
+index a96eb497a379..0435aeba2110 100644
+--- a/sound/soc/codecs/wm8960.c
++++ b/sound/soc/codecs/wm8960.c
+@@ -182,7 +182,7 @@ static int wm8960_get_deemph(struct snd_kcontrol *kcontrol,
+ 	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+ 	struct wm8960_priv *wm8960 = snd_soc_codec_get_drvdata(codec);
+ 
+-	ucontrol->value.enumerated.item[0] = wm8960->deemph;
++	ucontrol->value.integer.value[0] = wm8960->deemph;
+ 	return 0;
+ }
+ 
+@@ -191,7 +191,7 @@ static int wm8960_put_deemph(struct snd_kcontrol *kcontrol,
+ {
+ 	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+ 	struct wm8960_priv *wm8960 = snd_soc_codec_get_drvdata(codec);
+-	int deemph = ucontrol->value.enumerated.item[0];
++	int deemph = ucontrol->value.integer.value[0];
+ 
+ 	if (deemph > 1)
+ 		return -EINVAL;
+diff --git a/sound/soc/codecs/wm9712.c b/sound/soc/codecs/wm9712.c
+index 9517571e820d..98c9525bd751 100644
+--- a/sound/soc/codecs/wm9712.c
++++ b/sound/soc/codecs/wm9712.c
+@@ -180,7 +180,7 @@ static int wm9712_hp_mixer_put(struct snd_kcontrol *kcontrol,
+ 	struct snd_soc_dapm_context *dapm = snd_soc_dapm_kcontrol_dapm(kcontrol);
+ 	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(dapm);
+ 	struct wm9712_priv *wm9712 = snd_soc_codec_get_drvdata(codec);
+-	unsigned int val = ucontrol->value.enumerated.item[0];
++	unsigned int val = ucontrol->value.integer.value[0];
+ 	struct soc_mixer_control *mc =
+ 		(struct soc_mixer_control *)kcontrol->private_value;
+ 	unsigned int mixer, mask, shift, old;
+@@ -193,7 +193,7 @@ static int wm9712_hp_mixer_put(struct snd_kcontrol *kcontrol,
+ 
+ 	mutex_lock(&wm9712->lock);
+ 	old = wm9712->hp_mixer[mixer];
+-	if (ucontrol->value.enumerated.item[0])
++	if (ucontrol->value.integer.value[0])
+ 		wm9712->hp_mixer[mixer] |= mask;
+ 	else
+ 		wm9712->hp_mixer[mixer] &= ~mask;
+@@ -231,7 +231,7 @@ static int wm9712_hp_mixer_get(struct snd_kcontrol *kcontrol,
+ 	mixer = mc->shift >> 8;
+ 	shift = mc->shift & 0xff;
+ 
+-	ucontrol->value.enumerated.item[0] =
++	ucontrol->value.integer.value[0] =
+ 		(wm9712->hp_mixer[mixer] >> shift) & 1;
+ 
+ 	return 0;
+diff --git a/sound/soc/codecs/wm9713.c b/sound/soc/codecs/wm9713.c
+index 6ab1122a3872..db644c66809e 100644
+--- a/sound/soc/codecs/wm9713.c
++++ b/sound/soc/codecs/wm9713.c
+@@ -255,7 +255,7 @@ static int wm9713_hp_mixer_put(struct snd_kcontrol *kcontrol,
+ 	struct snd_soc_dapm_context *dapm = snd_soc_dapm_kcontrol_dapm(kcontrol);
+ 	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(dapm);
+ 	struct wm9713_priv *wm9713 = snd_soc_codec_get_drvdata(codec);
+-	unsigned int val = ucontrol->value.enumerated.item[0];
++	unsigned int val = ucontrol->value.integer.value[0];
+ 	struct soc_mixer_control *mc =
+ 		(struct soc_mixer_control *)kcontrol->private_value;
+ 	unsigned int mixer, mask, shift, old;
+@@ -268,7 +268,7 @@ static int wm9713_hp_mixer_put(struct snd_kcontrol *kcontrol,
+ 
+ 	mutex_lock(&wm9713->lock);
+ 	old = wm9713->hp_mixer[mixer];
+-	if (ucontrol->value.enumerated.item[0])
++	if (ucontrol->value.integer.value[0])
+ 		wm9713->hp_mixer[mixer] |= mask;
+ 	else
+ 		wm9713->hp_mixer[mixer] &= ~mask;
+@@ -306,7 +306,7 @@ static int wm9713_hp_mixer_get(struct snd_kcontrol *kcontrol,
+ 	mixer = mc->shift >> 8;
+ 	shift = mc->shift & 0xff;
+ 
+-	ucontrol->value.enumerated.item[0] =
++	ucontrol->value.integer.value[0] =
+ 		(wm9713->hp_mixer[mixer] >> shift) & 1;
+ 
+ 	return 0;


^ permalink raw reply related	[flat|nested] 19+ messages in thread

* [gentoo-commits] proj/linux-patches:3.19 commit in: /
@ 2015-04-20 12:24 Mike Pagano
  0 siblings, 0 replies; 19+ messages in thread
From: Mike Pagano @ 2015-04-20 12:24 UTC (permalink / raw
  To: gentoo-commits

commit:     9f9fd7b0f07a3c22b315dc3397fdc92ec25e1bd0
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Mon Apr 20 12:24:22 2015 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Mon Apr 20 12:24:22 2015 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=9f9fd7b0

Linux patch 3.19.5

 0000_README             |    4 +
 1004_linux-3.19.5.patch | 3485 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 3489 insertions(+)

diff --git a/0000_README b/0000_README
index 5c9a4bb..89efc26 100644
--- a/0000_README
+++ b/0000_README
@@ -59,6 +59,10 @@ Patch:  1003_linux-3.19.4.patch
 From:   http://www.kernel.org
 Desc:   Linux 3.19.4
 
+Patch:  1004_linux-3.19.5.patch
+From:   http://www.kernel.org
+Desc:   Linux 3.19.5
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1004_linux-3.19.5.patch b/1004_linux-3.19.5.patch
new file mode 100644
index 0000000..dc69211
--- /dev/null
+++ b/1004_linux-3.19.5.patch
@@ -0,0 +1,3485 @@
+diff --git a/Makefile b/Makefile
+index 2ef20781ad25..633b5f0f11a0 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 19
+-SUBLEVEL = 4
++SUBLEVEL = 5
+ EXTRAVERSION =
+ NAME = Diseased Newt
+ 
+diff --git a/arch/arc/kernel/signal.c b/arch/arc/kernel/signal.c
+index cb3142a2d40b..a86d567f6c70 100644
+--- a/arch/arc/kernel/signal.c
++++ b/arch/arc/kernel/signal.c
+@@ -67,7 +67,7 @@ stash_usr_regs(struct rt_sigframe __user *sf, struct pt_regs *regs,
+ 	       sigset_t *set)
+ {
+ 	int err;
+-	err = __copy_to_user(&(sf->uc.uc_mcontext.regs), regs,
++	err = __copy_to_user(&(sf->uc.uc_mcontext.regs.scratch), regs,
+ 			     sizeof(sf->uc.uc_mcontext.regs.scratch));
+ 	err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(sigset_t));
+ 
+@@ -83,7 +83,7 @@ static int restore_usr_regs(struct pt_regs *regs, struct rt_sigframe __user *sf)
+ 	if (!err)
+ 		set_current_blocked(&set);
+ 
+-	err |= __copy_from_user(regs, &(sf->uc.uc_mcontext.regs),
++	err |= __copy_from_user(regs, &(sf->uc.uc_mcontext.regs.scratch),
+ 				sizeof(sf->uc.uc_mcontext.regs.scratch));
+ 
+ 	return err;
+@@ -131,6 +131,15 @@ SYSCALL_DEFINE0(rt_sigreturn)
+ 	/* Don't restart from sigreturn */
+ 	syscall_wont_restart(regs);
+ 
++	/*
++	 * Ensure that sigreturn always returns to user mode (in case the
++	 * regs saved on user stack got fudged between save and sigreturn)
++	 * Otherwise it is easy to panic the kernel with a custom
++	 * signal handler and/or restorer which clobberes the status32/ret
++	 * to return to a bogus location in kernel mode.
++	 */
++	regs->status32 |= STATUS_U_MASK;
++
+ 	return regs->r0;
+ 
+ badframe:
+@@ -229,8 +238,11 @@ setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs)
+ 
+ 	/*
+ 	 * handler returns using sigreturn stub provided already by userpsace
++	 * If not, nuke the process right away
+ 	 */
+-	BUG_ON(!(ksig->ka.sa.sa_flags & SA_RESTORER));
++	if(!(ksig->ka.sa.sa_flags & SA_RESTORER))
++		return 1;
++
+ 	regs->blink = (unsigned long)ksig->ka.sa.sa_restorer;
+ 
+ 	/* User Stack for signal handler will be above the frame just carved */
+@@ -296,12 +308,12 @@ static void
+ handle_signal(struct ksignal *ksig, struct pt_regs *regs)
+ {
+ 	sigset_t *oldset = sigmask_to_save();
+-	int ret;
++	int failed;
+ 
+ 	/* Set up the stack frame */
+-	ret = setup_rt_frame(ksig, oldset, regs);
++	failed = setup_rt_frame(ksig, oldset, regs);
+ 
+-	signal_setup_done(ret, ksig, 0);
++	signal_setup_done(failed, ksig, 0);
+ }
+ 
+ void do_signal(struct pt_regs *regs)
+diff --git a/arch/arm/mach-sunxi/Kconfig b/arch/arm/mach-sunxi/Kconfig
+index a77604fbaf25..81502b90dd91 100644
+--- a/arch/arm/mach-sunxi/Kconfig
++++ b/arch/arm/mach-sunxi/Kconfig
+@@ -1,10 +1,12 @@
+ menuconfig ARCH_SUNXI
+ 	bool "Allwinner SoCs" if ARCH_MULTI_V7
+ 	select ARCH_REQUIRE_GPIOLIB
++	select ARCH_HAS_RESET_CONTROLLER
+ 	select CLKSRC_MMIO
+ 	select GENERIC_IRQ_CHIP
+ 	select PINCTRL
+ 	select SUN4I_TIMER
++	select RESET_CONTROLLER
+ 
+ if ARCH_SUNXI
+ 
+@@ -20,10 +22,8 @@ config MACH_SUN5I
+ config MACH_SUN6I
+ 	bool "Allwinner A31 (sun6i) SoCs support"
+ 	default ARCH_SUNXI
+-	select ARCH_HAS_RESET_CONTROLLER
+ 	select ARM_GIC
+ 	select MFD_SUN6I_PRCM
+-	select RESET_CONTROLLER
+ 	select SUN5I_HSTIMER
+ 
+ config MACH_SUN7I
+@@ -37,16 +37,12 @@ config MACH_SUN7I
+ config MACH_SUN8I
+ 	bool "Allwinner A23 (sun8i) SoCs support"
+ 	default ARCH_SUNXI
+-	select ARCH_HAS_RESET_CONTROLLER
+ 	select ARM_GIC
+ 	select MFD_SUN6I_PRCM
+-	select RESET_CONTROLLER
+ 
+ config MACH_SUN9I
+ 	bool "Allwinner (sun9i) SoCs support"
+ 	default ARCH_SUNXI
+-	select ARCH_HAS_RESET_CONTROLLER
+ 	select ARM_GIC
+-	select RESET_CONTROLLER
+ 
+ endif
+diff --git a/arch/powerpc/include/asm/cputhreads.h b/arch/powerpc/include/asm/cputhreads.h
+index 2bf8e9307be9..4c8ad592ae33 100644
+--- a/arch/powerpc/include/asm/cputhreads.h
++++ b/arch/powerpc/include/asm/cputhreads.h
+@@ -55,7 +55,7 @@ static inline cpumask_t cpu_thread_mask_to_cores(const struct cpumask *threads)
+ 
+ static inline int cpu_nr_cores(void)
+ {
+-	return NR_CPUS >> threads_shift;
++	return nr_cpu_ids >> threads_shift;
+ }
+ 
+ static inline cpumask_t cpu_online_cores_map(void)
+diff --git a/arch/powerpc/include/asm/irq_work.h b/arch/powerpc/include/asm/irq_work.h
+new file mode 100644
+index 000000000000..744fd54de374
+--- /dev/null
++++ b/arch/powerpc/include/asm/irq_work.h
+@@ -0,0 +1,9 @@
++#ifndef _ASM_POWERPC_IRQ_WORK_H
++#define _ASM_POWERPC_IRQ_WORK_H
++
++static inline bool arch_irq_work_has_interrupt(void)
++{
++	return true;
++}
++
++#endif /* _ASM_POWERPC_IRQ_WORK_H */
+diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c
+index 9ce5afe167ff..b36365f49478 100644
+--- a/arch/sparc/kernel/pci.c
++++ b/arch/sparc/kernel/pci.c
+@@ -639,10 +639,7 @@ static void pci_claim_bus_resources(struct pci_bus *bus)
+ 				       (unsigned long long)r->end,
+ 				       (unsigned int)r->flags);
+ 
+-			if (pci_claim_resource(dev, i) == 0)
+-				continue;
+-
+-			pci_claim_bridge_resource(dev, i);
++			pci_claim_resource(dev, i);
+ 		}
+ 	}
+ 
+diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
+index bae6c609888e..86db4bcd7ce5 100644
+--- a/arch/x86/kernel/reboot.c
++++ b/arch/x86/kernel/reboot.c
+@@ -183,6 +183,16 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
+ 		},
+ 	},
+ 
++	/* ASRock */
++	{	/* Handle problems with rebooting on ASRock Q1900DC-ITX */
++		.callback = set_pci_reboot,
++		.ident = "ASRock Q1900DC-ITX",
++		.matches = {
++			DMI_MATCH(DMI_BOARD_VENDOR, "ASRock"),
++			DMI_MATCH(DMI_BOARD_NAME, "Q1900DC-ITX"),
++		},
++	},
++
+ 	/* ASUS */
+ 	{	/* Handle problems with rebooting on ASUS P4S800 */
+ 		.callback = set_bios_reboot,
+diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
+index cc45a9f1a6e6..0537a0911c96 100644
+--- a/arch/x86/xen/p2m.c
++++ b/arch/x86/xen/p2m.c
+@@ -93,6 +93,12 @@ EXPORT_SYMBOL_GPL(xen_p2m_size);
+ unsigned long xen_max_p2m_pfn __read_mostly;
+ EXPORT_SYMBOL_GPL(xen_max_p2m_pfn);
+ 
++#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG_LIMIT
++#define P2M_LIMIT CONFIG_XEN_BALLOON_MEMORY_HOTPLUG_LIMIT
++#else
++#define P2M_LIMIT 0
++#endif
++
+ static DEFINE_SPINLOCK(p2m_update_lock);
+ 
+ static unsigned long *p2m_mid_missing_mfn;
+@@ -387,9 +393,11 @@ static void __init xen_rebuild_p2m_list(unsigned long *p2m)
+ void __init xen_vmalloc_p2m_tree(void)
+ {
+ 	static struct vm_struct vm;
++	unsigned long p2m_limit;
+ 
++	p2m_limit = (phys_addr_t)P2M_LIMIT * 1024 * 1024 * 1024 / PAGE_SIZE;
+ 	vm.flags = VM_ALLOC;
+-	vm.size = ALIGN(sizeof(unsigned long) * xen_max_p2m_pfn,
++	vm.size = ALIGN(sizeof(unsigned long) * max(xen_max_p2m_pfn, p2m_limit),
+ 			PMD_SIZE * PMDS_PER_MID_PAGE);
+ 	vm_area_register_early(&vm, PMD_SIZE * PMDS_PER_MID_PAGE);
+ 	pr_notice("p2m virtual area at %p, size is %lx\n", vm.addr, vm.size);
+diff --git a/block/blk-merge.c b/block/blk-merge.c
+index 89b97b5e0881..2be75ff7f171 100644
+--- a/block/blk-merge.c
++++ b/block/blk-merge.c
+@@ -609,7 +609,7 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
+ 	if (q->queue_flags & (1 << QUEUE_FLAG_SG_GAPS)) {
+ 		struct bio_vec *bprev;
+ 
+-		bprev = &rq->biotail->bi_io_vec[bio->bi_vcnt - 1];
++		bprev = &rq->biotail->bi_io_vec[rq->biotail->bi_vcnt - 1];
+ 		if (bvec_gap_to_prev(bprev, bio->bi_io_vec[0].bv_offset))
+ 			return false;
+ 	}
+diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
+index 3a415ecfe3d4..e6b6283cb5e8 100644
+--- a/block/blk-mq-tag.c
++++ b/block/blk-mq-tag.c
+@@ -265,9 +265,11 @@ static int bt_get(struct blk_mq_alloc_data *data,
+ 		/*
+ 		 * We're out of tags on this hardware queue, kick any
+ 		 * pending IO submits before going to sleep waiting for
+-		 * some to complete.
++		 * some to complete. Note that hctx can be NULL here for
++		 * reserved tag allocation.
+ 		 */
+-		blk_mq_run_hw_queue(hctx, false);
++		if (hctx)
++			blk_mq_run_hw_queue(hctx, false);
+ 
+ 		/*
+ 		 * Retry tag allocation after running the hardware queue,
+diff --git a/block/blk-mq.c b/block/blk-mq.c
+index 2390c5541e71..447f5336960a 100644
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -1937,7 +1937,7 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
+ 	 */
+ 	if (percpu_ref_init(&q->mq_usage_counter, blk_mq_usage_counter_release,
+ 			    PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
+-		goto err_map;
++		goto err_mq_usage;
+ 
+ 	setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q);
+ 	blk_queue_rq_timeout(q, 30000);
+@@ -1980,7 +1980,7 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
+ 	blk_mq_init_cpu_queues(q, set->nr_hw_queues);
+ 
+ 	if (blk_mq_init_hw_queues(q, set))
+-		goto err_hw;
++		goto err_mq_usage;
+ 
+ 	mutex_lock(&all_q_mutex);
+ 	list_add_tail(&q->all_q_node, &all_q_list);
+@@ -1992,7 +1992,7 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
+ 
+ 	return q;
+ 
+-err_hw:
++err_mq_usage:
+ 	blk_cleanup_queue(q);
+ err_hctxs:
+ 	kfree(map);
+diff --git a/block/blk-settings.c b/block/blk-settings.c
+index 6ed2cbe5e8c9..12600bfffca9 100644
+--- a/block/blk-settings.c
++++ b/block/blk-settings.c
+@@ -585,7 +585,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
+ 				     b->physical_block_size);
+ 
+ 	t->io_min = max(t->io_min, b->io_min);
+-	t->io_opt = lcm(t->io_opt, b->io_opt);
++	t->io_opt = lcm_not_zero(t->io_opt, b->io_opt);
+ 
+ 	t->cluster &= b->cluster;
+ 	t->discard_zeroes_data &= b->discard_zeroes_data;
+@@ -616,7 +616,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
+ 		    b->raid_partial_stripes_expensive);
+ 
+ 	/* Find lowest common alignment_offset */
+-	t->alignment_offset = lcm(t->alignment_offset, alignment)
++	t->alignment_offset = lcm_not_zero(t->alignment_offset, alignment)
+ 		% max(t->physical_block_size, t->io_min);
+ 
+ 	/* Verify that new alignment_offset is on a logical block boundary */
+@@ -643,7 +643,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
+ 						      b->max_discard_sectors);
+ 		t->discard_granularity = max(t->discard_granularity,
+ 					     b->discard_granularity);
+-		t->discard_alignment = lcm(t->discard_alignment, alignment) %
++		t->discard_alignment = lcm_not_zero(t->discard_alignment, alignment) %
+ 			t->discard_granularity;
+ 	}
+ 
+diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
+index 87b704e41877..b27ab7a0741d 100644
+--- a/drivers/acpi/processor_idle.c
++++ b/drivers/acpi/processor_idle.c
+@@ -962,7 +962,7 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
+ 		return -EINVAL;
+ 
+ 	drv->safe_state_index = -1;
+-	for (i = 0; i < CPUIDLE_STATE_MAX; i++) {
++	for (i = CPUIDLE_DRIVER_STATE_START; i < CPUIDLE_STATE_MAX; i++) {
+ 		drv->states[i].name[0] = '\0';
+ 		drv->states[i].desc[0] = '\0';
+ 	}
+diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
+index d1a05f9bb91f..00f2f740b425 100644
+--- a/drivers/ata/libata-core.c
++++ b/drivers/ata/libata-core.c
+@@ -4233,9 +4233,18 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
+ 	{ "PIONEER DVD-RW  DVR-216D",	NULL,	ATA_HORKAGE_NOSETXFER },
+ 
+ 	/* devices that don't properly handle queued TRIM commands */
+-	{ "Micron_M[56]*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
++	{ "Micron_M500*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
++						ATA_HORKAGE_ZERO_AFTER_TRIM, },
++	{ "Crucial_CT*M500*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
++						ATA_HORKAGE_ZERO_AFTER_TRIM, },
++	{ "Micron_M5[15]0*",		"MU01",	ATA_HORKAGE_NO_NCQ_TRIM |
++						ATA_HORKAGE_ZERO_AFTER_TRIM, },
++	{ "Crucial_CT*M550*",		"MU01",	ATA_HORKAGE_NO_NCQ_TRIM |
++						ATA_HORKAGE_ZERO_AFTER_TRIM, },
++	{ "Crucial_CT*MX100*",		"MU01",	ATA_HORKAGE_NO_NCQ_TRIM |
++						ATA_HORKAGE_ZERO_AFTER_TRIM, },
++	{ "Samsung SSD 850 PRO*",	NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
+ 						ATA_HORKAGE_ZERO_AFTER_TRIM, },
+-	{ "Crucial_CT*SSD*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM, },
+ 
+ 	/*
+ 	 * As defined, the DRAT (Deterministic Read After Trim) and RZAT
+@@ -4255,6 +4264,8 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
+ 	 */
+ 	{ "INTEL*SSDSC2MH*",		NULL,	0, },
+ 
++	{ "Micron*",			NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM, },
++	{ "Crucial*",			NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM, },
+ 	{ "INTEL*SSD*", 		NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM, },
+ 	{ "SSD*INTEL*",			NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM, },
+ 	{ "Samsung*SSD*",		NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM, },
+diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
+index 4bc2a5cb9935..a98c41f72c63 100644
+--- a/drivers/block/nbd.c
++++ b/drivers/block/nbd.c
+@@ -803,10 +803,6 @@ static int __init nbd_init(void)
+ 		return -EINVAL;
+ 	}
+ 
+-	nbd_dev = kcalloc(nbds_max, sizeof(*nbd_dev), GFP_KERNEL);
+-	if (!nbd_dev)
+-		return -ENOMEM;
+-
+ 	part_shift = 0;
+ 	if (max_part > 0) {
+ 		part_shift = fls(max_part);
+@@ -828,6 +824,10 @@ static int __init nbd_init(void)
+ 	if (nbds_max > 1UL << (MINORBITS - part_shift))
+ 		return -EINVAL;
+ 
++	nbd_dev = kcalloc(nbds_max, sizeof(*nbd_dev), GFP_KERNEL);
++	if (!nbd_dev)
++		return -ENOMEM;
++
+ 	for (i = 0; i < nbds_max; i++) {
+ 		struct gendisk *disk = alloc_disk(1 << part_shift);
+ 		if (!disk)
+diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c
+index c0a842b335c5..a52154caf526 100644
+--- a/drivers/clk/clk-divider.c
++++ b/drivers/clk/clk-divider.c
+@@ -129,12 +129,6 @@ static unsigned long clk_divider_recalc_rate(struct clk_hw *hw,
+ 	return DIV_ROUND_UP(parent_rate, div);
+ }
+ 
+-/*
+- * The reverse of DIV_ROUND_UP: The maximum number which
+- * divided by m is r
+- */
+-#define MULT_ROUND_UP(r, m) ((r) * (m) + (m) - 1)
+-
+ static bool _is_valid_table_div(const struct clk_div_table *table,
+ 							 unsigned int div)
+ {
+@@ -208,6 +202,7 @@ static int _div_round_closest(struct clk_divider *divider,
+ 		unsigned long parent_rate, unsigned long rate)
+ {
+ 	int up, down, div;
++	unsigned long up_rate, down_rate;
+ 
+ 	up = down = div = DIV_ROUND_CLOSEST(parent_rate, rate);
+ 
+@@ -219,7 +214,10 @@ static int _div_round_closest(struct clk_divider *divider,
+ 		down = _round_down_table(divider->table, div);
+ 	}
+ 
+-	return (up - div) <= (div - down) ? up : down;
++	up_rate = DIV_ROUND_UP(parent_rate, up);
++	down_rate = DIV_ROUND_UP(parent_rate, down);
++
++	return (rate - up_rate) <= (down_rate - rate) ? up : down;
+ }
+ 
+ static int _div_round(struct clk_divider *divider, unsigned long parent_rate,
+@@ -300,7 +298,7 @@ static int clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate,
+ 			return i;
+ 		}
+ 		parent_rate = __clk_round_rate(__clk_get_parent(hw->clk),
+-				MULT_ROUND_UP(rate, i));
++					       rate * i);
+ 		now = DIV_ROUND_UP(parent_rate, i);
+ 		if (_is_best_div(divider, rate, now, best)) {
+ 			bestdiv = i;
+diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
+index 7030c409be24..24736bb88cb2 100644
+--- a/drivers/cpufreq/cpufreq.c
++++ b/drivers/cpufreq/cpufreq.c
+@@ -1724,15 +1724,18 @@ void cpufreq_resume(void)
+ 		    || __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))
+ 			pr_err("%s: Failed to start governor for policy: %p\n",
+ 				__func__, policy);
+-
+-		/*
+-		 * schedule call cpufreq_update_policy() for boot CPU, i.e. last
+-		 * policy in list. It will verify that the current freq is in
+-		 * sync with what we believe it to be.
+-		 */
+-		if (list_is_last(&policy->policy_list, &cpufreq_policy_list))
+-			schedule_work(&policy->update);
+ 	}
++
++	/*
++	 * schedule call cpufreq_update_policy() for first-online CPU, as that
++	 * wouldn't be hotplugged-out on suspend. It will verify that the
++	 * current freq is in sync with what we believe it to be.
++	 */
++	policy = cpufreq_cpu_get_raw(cpumask_first(cpu_online_mask));
++	if (WARN_ON(!policy))
++		return;
++
++	schedule_work(&policy->update);
+ }
+ 
+ /**
+diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
+index 125150dc6e81..9ab99642ca7a 100644
+--- a/drivers/cpuidle/cpuidle.c
++++ b/drivers/cpuidle/cpuidle.c
+@@ -297,9 +297,6 @@ int cpuidle_enable_device(struct cpuidle_device *dev)
+ 	if (!dev->registered)
+ 		return -EINVAL;
+ 
+-	if (!dev->state_count)
+-		dev->state_count = drv->state_count;
+-
+ 	ret = cpuidle_add_device_sysfs(dev);
+ 	if (ret)
+ 		return ret;
+diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
+index 97c5903b4606..832a2c3f01ff 100644
+--- a/drivers/cpuidle/sysfs.c
++++ b/drivers/cpuidle/sysfs.c
+@@ -401,7 +401,7 @@ static int cpuidle_add_state_sysfs(struct cpuidle_device *device)
+ 	struct cpuidle_driver *drv = cpuidle_get_cpu_driver(device);
+ 
+ 	/* state statistics */
+-	for (i = 0; i < device->state_count; i++) {
++	for (i = 0; i < drv->state_count; i++) {
+ 		kobj = kzalloc(sizeof(struct cpuidle_state_kobj), GFP_KERNEL);
+ 		if (!kobj)
+ 			goto error_state;
+@@ -433,9 +433,10 @@ error_state:
+  */
+ static void cpuidle_remove_state_sysfs(struct cpuidle_device *device)
+ {
++	struct cpuidle_driver *drv = cpuidle_get_cpu_driver(device);
+ 	int i;
+ 
+-	for (i = 0; i < device->state_count; i++)
++	for (i = 0; i < drv->state_count; i++)
+ 		cpuidle_free_state_kobj(device, i);
+ }
+ 
+diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
+index b969206439b7..b2ce6e10970f 100644
+--- a/drivers/dma/edma.c
++++ b/drivers/dma/edma.c
+@@ -258,6 +258,13 @@ static int edma_terminate_all(struct edma_chan *echan)
+ 	 */
+ 	if (echan->edesc) {
+ 		int cyclic = echan->edesc->cyclic;
++
++		/*
++		 * free the running request descriptor
++		 * since it is not in any of the vdesc lists
++		 */
++		edma_desc_free(&echan->edesc->vdesc);
++
+ 		echan->edesc = NULL;
+ 		edma_stop(echan->ch_num);
+ 		/* Move the cyclic channel back to default queue */
+diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c
+index c0016a68b446..d35275dbed7e 100644
+--- a/drivers/dma/omap-dma.c
++++ b/drivers/dma/omap-dma.c
+@@ -978,6 +978,7 @@ static int omap_dma_terminate_all(struct omap_chan *c)
+ 	 * c->desc is NULL and exit.)
+ 	 */
+ 	if (c->desc) {
++		omap_dma_desc_free(&c->desc->vd);
+ 		c->desc = NULL;
+ 		/* Avoid stopping the dma twice */
+ 		if (!c->paused)
+diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
+index 69fac068669f..2eebd28b4c40 100644
+--- a/drivers/firmware/dmi_scan.c
++++ b/drivers/firmware/dmi_scan.c
+@@ -86,10 +86,13 @@ static void dmi_table(u8 *buf, u32 len, int num,
+ 	int i = 0;
+ 
+ 	/*
+-	 *	Stop when we see all the items the table claimed to have
+-	 *	OR we run off the end of the table (also happens)
++	 * Stop when we have seen all the items the table claimed to have
++	 * (SMBIOS < 3.0 only) OR we reach an end-of-table marker OR we run
++	 * off the end of the table (should never happen but sometimes does
++	 * on bogus implementations.)
+ 	 */
+-	while ((i < num) && (data - buf + sizeof(struct dmi_header)) <= len) {
++	while ((!num || i < num) &&
++	       (data - buf + sizeof(struct dmi_header)) <= len) {
+ 		const struct dmi_header *dm = (const struct dmi_header *)data;
+ 
+ 		/*
+@@ -529,21 +532,10 @@ static int __init dmi_smbios3_present(const u8 *buf)
+ 	if (memcmp(buf, "_SM3_", 5) == 0 &&
+ 	    buf[6] < 32 && dmi_checksum(buf, buf[6])) {
+ 		dmi_ver = get_unaligned_be16(buf + 7);
++		dmi_num = 0;			/* No longer specified */
+ 		dmi_len = get_unaligned_le32(buf + 12);
+ 		dmi_base = get_unaligned_le64(buf + 16);
+ 
+-		/*
+-		 * The 64-bit SMBIOS 3.0 entry point no longer has a field
+-		 * containing the number of structures present in the table.
+-		 * Instead, it defines the table size as a maximum size, and
+-		 * relies on the end-of-table structure type (#127) to be used
+-		 * to signal the end of the table.
+-		 * So let's define dmi_num as an upper bound as well: each
+-		 * structure has a 4 byte header, so dmi_len / 4 is an upper
+-		 * bound for the number of structures in the table.
+-		 */
+-		dmi_num = dmi_len / 4;
+-
+ 		if (dmi_walk_early(dmi_decode) == 0) {
+ 			pr_info("SMBIOS %d.%d present.\n",
+ 				dmi_ver >> 8, dmi_ver & 0xFF);
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+index 0fd592799d58..c80a8a31890d 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+@@ -56,9 +56,9 @@ static inline unsigned int get_pipes_num(struct device_queue_manager *dqm)
+ 	return dqm->dev->shared_resources.compute_pipe_count;
+ }
+ 
+-static inline unsigned int get_first_pipe(struct device_queue_manager *dqm)
++unsigned int get_first_pipe(struct device_queue_manager *dqm)
+ {
+-	BUG_ON(!dqm);
++	BUG_ON(!dqm || !dqm->dev);
+ 	return dqm->dev->shared_resources.first_compute_pipe;
+ }
+ 
+@@ -693,7 +693,7 @@ static int initialize_cpsch(struct device_queue_manager *dqm)
+ 	INIT_LIST_HEAD(&dqm->queues);
+ 	dqm->queue_count = dqm->processes_count = 0;
+ 	dqm->active_runlist = false;
+-	retval = init_pipelines(dqm, get_pipes_num(dqm), 0);
++	retval = init_pipelines(dqm, get_pipes_num(dqm), get_first_pipe(dqm));
+ 	if (retval != 0)
+ 		goto fail_init_pipelines;
+ 
+diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
+index 29168fae3dcb..27a37e5f274d 100644
+--- a/drivers/gpu/drm/drm_crtc.c
++++ b/drivers/gpu/drm/drm_crtc.c
+@@ -531,17 +531,6 @@ void drm_framebuffer_reference(struct drm_framebuffer *fb)
+ }
+ EXPORT_SYMBOL(drm_framebuffer_reference);
+ 
+-static void drm_framebuffer_free_bug(struct kref *kref)
+-{
+-	BUG();
+-}
+-
+-static void __drm_framebuffer_unreference(struct drm_framebuffer *fb)
+-{
+-	DRM_DEBUG("%p: FB ID: %d (%d)\n", fb, fb->base.id, atomic_read(&fb->refcount.refcount));
+-	kref_put(&fb->refcount, drm_framebuffer_free_bug);
+-}
+-
+ /**
+  * drm_framebuffer_unregister_private - unregister a private fb from the lookup idr
+  * @fb: fb to unregister
+@@ -1322,7 +1311,7 @@ void drm_plane_force_disable(struct drm_plane *plane)
+ 		return;
+ 	}
+ 	/* disconnect the plane from the fb and crtc: */
+-	__drm_framebuffer_unreference(plane->old_fb);
++	drm_framebuffer_unreference(plane->old_fb);
+ 	plane->old_fb = NULL;
+ 	plane->fb = NULL;
+ 	plane->crtc = NULL;
+diff --git a/drivers/gpu/drm/drm_edid_load.c b/drivers/gpu/drm/drm_edid_load.c
+index 732cb6f8e653..4c0aa97aaf03 100644
+--- a/drivers/gpu/drm/drm_edid_load.c
++++ b/drivers/gpu/drm/drm_edid_load.c
+@@ -287,6 +287,7 @@ int drm_load_edid_firmware(struct drm_connector *connector)
+ 
+ 	drm_mode_connector_update_edid_property(connector, edid);
+ 	ret = drm_add_edid_modes(connector, edid);
++	drm_edid_to_eld(connector, edid);
+ 	kfree(edid);
+ 
+ 	return ret;
+diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
+index 7483a47de8e4..9bca854737f0 100644
+--- a/drivers/gpu/drm/drm_probe_helper.c
++++ b/drivers/gpu/drm/drm_probe_helper.c
+@@ -152,6 +152,7 @@ static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connect
+ 			struct edid *edid = (struct edid *) connector->edid_blob_ptr->data;
+ 
+ 			count = drm_add_edid_modes(connector, edid);
++			drm_edid_to_eld(connector, edid);
+ 		} else
+ 			count = (*connector_funcs->get_modes)(connector);
+ 	}
+diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
+index 4e6405e7226f..391d47e1f131 100644
+--- a/drivers/gpu/drm/i915/i915_drv.c
++++ b/drivers/gpu/drm/i915/i915_drv.c
+@@ -1093,6 +1093,7 @@ static void vlv_save_gunit_s0ix_state(struct drm_i915_private *dev_priv)
+ 	/* Gunit-Display CZ domain, 0x182028-0x1821CF */
+ 	s->gu_ctl0		= I915_READ(VLV_GU_CTL0);
+ 	s->gu_ctl1		= I915_READ(VLV_GU_CTL1);
++	s->pcbr			= I915_READ(VLV_PCBR);
+ 	s->clock_gate_dis2	= I915_READ(VLV_GUNIT_CLOCK_GATE2);
+ 
+ 	/*
+@@ -1187,6 +1188,7 @@ static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv)
+ 	/* Gunit-Display CZ domain, 0x182028-0x1821CF */
+ 	I915_WRITE(VLV_GU_CTL0,			s->gu_ctl0);
+ 	I915_WRITE(VLV_GU_CTL1,			s->gu_ctl1);
++	I915_WRITE(VLV_PCBR,			s->pcbr);
+ 	I915_WRITE(VLV_GUNIT_CLOCK_GATE2,	s->clock_gate_dis2);
+ }
+ 
+@@ -1195,19 +1197,7 @@ int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on)
+ 	u32 val;
+ 	int err;
+ 
+-	val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
+-	WARN_ON(!!(val & VLV_GFX_CLK_FORCE_ON_BIT) == force_on);
+-
+ #define COND (I915_READ(VLV_GTLC_SURVIVABILITY_REG) & VLV_GFX_CLK_STATUS_BIT)
+-	/* Wait for a previous force-off to settle */
+-	if (force_on) {
+-		err = wait_for(!COND, 20);
+-		if (err) {
+-			DRM_ERROR("timeout waiting for GFX clock force-off (%08x)\n",
+-				  I915_READ(VLV_GTLC_SURVIVABILITY_REG));
+-			return err;
+-		}
+-	}
+ 
+ 	val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
+ 	val &= ~VLV_GFX_CLK_FORCE_ON_BIT;
+diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
+index ddd005ce3a94..44a3c385eea2 100644
+--- a/drivers/gpu/drm/i915/i915_drv.h
++++ b/drivers/gpu/drm/i915/i915_drv.h
+@@ -985,6 +985,7 @@ struct vlv_s0ix_state {
+ 	/* Display 2 CZ domain */
+ 	u32 gu_ctl0;
+ 	u32 gu_ctl1;
++	u32 pcbr;
+ 	u32 clock_gate_dis2;
+ };
+ 
+diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
+index a2045848bd1a..f75bf292285d 100644
+--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
++++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
+@@ -485,10 +485,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
+ 			stolen_offset, gtt_offset, size);
+ 
+ 	/* KISS and expect everything to be page-aligned */
+-	BUG_ON(stolen_offset & 4095);
+-	BUG_ON(size & 4095);
+-
+-	if (WARN_ON(size == 0))
++	if (WARN_ON(size == 0 || stolen_offset & 4095 || size & 4095))
+ 		return NULL;
+ 
+ 	stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index c10b52ef116d..791b00e612d8 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -2339,13 +2339,19 @@ static bool intel_alloc_plane_obj(struct intel_crtc *crtc,
+ 	struct drm_device *dev = crtc->base.dev;
+ 	struct drm_i915_gem_object *obj = NULL;
+ 	struct drm_mode_fb_cmd2 mode_cmd = { 0 };
+-	u32 base = plane_config->base;
++	u32 base_aligned = round_down(plane_config->base, PAGE_SIZE);
++	u32 size_aligned = round_up(plane_config->base + plane_config->size,
++				    PAGE_SIZE);
++
++	size_aligned -= base_aligned;
+ 
+ 	if (plane_config->size == 0)
+ 		return false;
+ 
+-	obj = i915_gem_object_create_stolen_for_preallocated(dev, base, base,
+-							     plane_config->size);
++	obj = i915_gem_object_create_stolen_for_preallocated(dev,
++							     base_aligned,
++							     base_aligned,
++							     size_aligned);
+ 	if (!obj)
+ 		return false;
+ 
+@@ -4366,15 +4372,15 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
+ 	if (intel_crtc->config.has_pch_encoder)
+ 		ironlake_pch_enable(crtc);
+ 
++	assert_vblank_disabled(crtc);
++	drm_crtc_vblank_on(crtc);
++
+ 	for_each_encoder_on_crtc(dev, crtc, encoder)
+ 		encoder->enable(encoder);
+ 
+ 	if (HAS_PCH_CPT(dev))
+ 		cpt_verify_modeset(dev, intel_crtc->pipe);
+ 
+-	assert_vblank_disabled(crtc);
+-	drm_crtc_vblank_on(crtc);
+-
+ 	intel_crtc_enable_planes(crtc);
+ }
+ 
+@@ -4486,14 +4492,14 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
+ 	if (intel_crtc->config.dp_encoder_is_mst)
+ 		intel_ddi_set_vc_payload_alloc(crtc, true);
+ 
++	assert_vblank_disabled(crtc);
++	drm_crtc_vblank_on(crtc);
++
+ 	for_each_encoder_on_crtc(dev, crtc, encoder) {
+ 		encoder->enable(encoder);
+ 		intel_opregion_notify_encoder(encoder, true);
+ 	}
+ 
+-	assert_vblank_disabled(crtc);
+-	drm_crtc_vblank_on(crtc);
+-
+ 	/* If we change the relative order between pipe/planes enabling, we need
+ 	 * to change the workaround. */
+ 	haswell_mode_set_planes_workaround(intel_crtc);
+@@ -4544,12 +4550,12 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
+ 
+ 	intel_crtc_disable_planes(crtc);
+ 
+-	drm_crtc_vblank_off(crtc);
+-	assert_vblank_disabled(crtc);
+-
+ 	for_each_encoder_on_crtc(dev, crtc, encoder)
+ 		encoder->disable(encoder);
+ 
++	drm_crtc_vblank_off(crtc);
++	assert_vblank_disabled(crtc);
++
+ 	if (intel_crtc->config.has_pch_encoder)
+ 		intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
+ 
+@@ -4608,14 +4614,14 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
+ 
+ 	intel_crtc_disable_planes(crtc);
+ 
+-	drm_crtc_vblank_off(crtc);
+-	assert_vblank_disabled(crtc);
+-
+ 	for_each_encoder_on_crtc(dev, crtc, encoder) {
+ 		intel_opregion_notify_encoder(encoder, false);
+ 		encoder->disable(encoder);
+ 	}
+ 
++	drm_crtc_vblank_off(crtc);
++	assert_vblank_disabled(crtc);
++
+ 	if (intel_crtc->config.has_pch_encoder)
+ 		intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
+ 						      false);
+@@ -5083,12 +5089,12 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
+ 	intel_update_watermarks(crtc);
+ 	intel_enable_pipe(intel_crtc);
+ 
+-	for_each_encoder_on_crtc(dev, crtc, encoder)
+-		encoder->enable(encoder);
+-
+ 	assert_vblank_disabled(crtc);
+ 	drm_crtc_vblank_on(crtc);
+ 
++	for_each_encoder_on_crtc(dev, crtc, encoder)
++		encoder->enable(encoder);
++
+ 	intel_crtc_enable_planes(crtc);
+ 
+ 	/* Underruns don't raise interrupts, so check manually. */
+@@ -5144,12 +5150,12 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
+ 	intel_update_watermarks(crtc);
+ 	intel_enable_pipe(intel_crtc);
+ 
+-	for_each_encoder_on_crtc(dev, crtc, encoder)
+-		encoder->enable(encoder);
+-
+ 	assert_vblank_disabled(crtc);
+ 	drm_crtc_vblank_on(crtc);
+ 
++	for_each_encoder_on_crtc(dev, crtc, encoder)
++		encoder->enable(encoder);
++
+ 	intel_crtc_enable_planes(crtc);
+ 
+ 	/*
+@@ -5221,12 +5227,12 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
+ 	 */
+ 	intel_wait_for_vblank(dev, pipe);
+ 
+-	drm_crtc_vblank_off(crtc);
+-	assert_vblank_disabled(crtc);
+-
+ 	for_each_encoder_on_crtc(dev, crtc, encoder)
+ 		encoder->disable(encoder);
+ 
++	drm_crtc_vblank_off(crtc);
++	assert_vblank_disabled(crtc);
++
+ 	intel_disable_pipe(intel_crtc);
+ 
+ 	i9xx_pfit_disable(intel_crtc);
+@@ -6660,8 +6666,7 @@ static void i9xx_get_plane_config(struct intel_crtc *crtc,
+ 	aligned_height = intel_align_height(dev, crtc->base.primary->fb->height,
+ 					    plane_config->tiled);
+ 
+-	plane_config->size = PAGE_ALIGN(crtc->base.primary->fb->pitches[0] *
+-					aligned_height);
++	plane_config->size = crtc->base.primary->fb->pitches[0] * aligned_height;
+ 
+ 	DRM_DEBUG_KMS("pipe/plane %d/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
+ 		      pipe, plane, crtc->base.primary->fb->width,
+@@ -7711,8 +7716,7 @@ static void ironlake_get_plane_config(struct intel_crtc *crtc,
+ 	aligned_height = intel_align_height(dev, crtc->base.primary->fb->height,
+ 					    plane_config->tiled);
+ 
+-	plane_config->size = PAGE_ALIGN(crtc->base.primary->fb->pitches[0] *
+-					aligned_height);
++	plane_config->size = crtc->base.primary->fb->pitches[0] * aligned_height;
+ 
+ 	DRM_DEBUG_KMS("pipe/plane %d/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
+ 		      pipe, plane, crtc->base.primary->fb->width,
+diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
+index 7d9c340f7693..2d28c3377ff8 100644
+--- a/drivers/gpu/drm/i915/intel_sprite.c
++++ b/drivers/gpu/drm/i915/intel_sprite.c
+@@ -1495,7 +1495,7 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
+ 	drm_modeset_lock_all(dev);
+ 
+ 	plane = drm_plane_find(dev, set->plane_id);
+-	if (!plane) {
++	if (!plane || plane->type != DRM_PLANE_TYPE_OVERLAY) {
+ 		ret = -ENOENT;
+ 		goto out_unlock;
+ 	}
+@@ -1522,7 +1522,7 @@ int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
+ 	drm_modeset_lock_all(dev);
+ 
+ 	plane = drm_plane_find(dev, get->plane_id);
+-	if (!plane) {
++	if (!plane || plane->type != DRM_PLANE_TYPE_OVERLAY) {
+ 		ret = -ENOENT;
+ 		goto out_unlock;
+ 	}
+diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
+index 3f2a8d3febca..6cb4dc1ec1d0 100644
+--- a/drivers/gpu/drm/radeon/radeon.h
++++ b/drivers/gpu/drm/radeon/radeon.h
+@@ -1565,6 +1565,7 @@ struct radeon_dpm {
+ 	int			new_active_crtc_count;
+ 	u32			current_active_crtcs;
+ 	int			current_active_crtc_count;
++	bool single_display;
+ 	struct radeon_dpm_dynamic_state dyn_state;
+ 	struct radeon_dpm_fan fan;
+ 	u32 tdp_limit;
+diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c
+index 63ccb8fa799c..d27e4ccb848c 100644
+--- a/drivers/gpu/drm/radeon/radeon_bios.c
++++ b/drivers/gpu/drm/radeon/radeon_bios.c
+@@ -76,7 +76,7 @@ static bool igp_read_bios_from_vram(struct radeon_device *rdev)
+ 
+ static bool radeon_read_bios(struct radeon_device *rdev)
+ {
+-	uint8_t __iomem *bios;
++	uint8_t __iomem *bios, val1, val2;
+ 	size_t size;
+ 
+ 	rdev->bios = NULL;
+@@ -86,15 +86,19 @@ static bool radeon_read_bios(struct radeon_device *rdev)
+ 		return false;
+ 	}
+ 
+-	if (size == 0 || bios[0] != 0x55 || bios[1] != 0xaa) {
++	val1 = readb(&bios[0]);
++	val2 = readb(&bios[1]);
++
++	if (size == 0 || val1 != 0x55 || val2 != 0xaa) {
+ 		pci_unmap_rom(rdev->pdev, bios);
+ 		return false;
+ 	}
+-	rdev->bios = kmemdup(bios, size, GFP_KERNEL);
++	rdev->bios = kzalloc(size, GFP_KERNEL);
+ 	if (rdev->bios == NULL) {
+ 		pci_unmap_rom(rdev->pdev, bios);
+ 		return false;
+ 	}
++	memcpy_fromio(rdev->bios, bios, size);
+ 	pci_unmap_rom(rdev->pdev, bios);
+ 	return true;
+ }
+diff --git a/drivers/gpu/drm/radeon/radeon_mn.c b/drivers/gpu/drm/radeon/radeon_mn.c
+index a69bd441dd2d..572b4dbec186 100644
+--- a/drivers/gpu/drm/radeon/radeon_mn.c
++++ b/drivers/gpu/drm/radeon/radeon_mn.c
+@@ -122,7 +122,6 @@ static void radeon_mn_invalidate_range_start(struct mmu_notifier *mn,
+ 	it = interval_tree_iter_first(&rmn->objects, start, end);
+ 	while (it) {
+ 		struct radeon_bo *bo;
+-		struct fence *fence;
+ 		int r;
+ 
+ 		bo = container_of(it, struct radeon_bo, mn_it);
+@@ -134,12 +133,10 @@ static void radeon_mn_invalidate_range_start(struct mmu_notifier *mn,
+ 			continue;
+ 		}
+ 
+-		fence = reservation_object_get_excl(bo->tbo.resv);
+-		if (fence) {
+-			r = radeon_fence_wait((struct radeon_fence *)fence, false);
+-			if (r)
+-				DRM_ERROR("(%d) failed to wait for user bo\n", r);
+-		}
++		r = reservation_object_wait_timeout_rcu(bo->tbo.resv, true,
++			false, MAX_SCHEDULE_TIMEOUT);
++		if (r)
++			DRM_ERROR("(%d) failed to wait for user bo\n", r);
+ 
+ 		radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_CPU);
+ 		r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
+index f7da8fe96a66..1d94b542cd82 100644
+--- a/drivers/gpu/drm/radeon/radeon_pm.c
++++ b/drivers/gpu/drm/radeon/radeon_pm.c
+@@ -704,12 +704,8 @@ static void radeon_dpm_thermal_work_handler(struct work_struct *work)
+ 	radeon_pm_compute_clocks(rdev);
+ }
+ 
+-static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev,
+-						     enum radeon_pm_state_type dpm_state)
++static bool radeon_dpm_single_display(struct radeon_device *rdev)
+ {
+-	int i;
+-	struct radeon_ps *ps;
+-	u32 ui_class;
+ 	bool single_display = (rdev->pm.dpm.new_active_crtc_count < 2) ?
+ 		true : false;
+ 
+@@ -719,6 +715,17 @@ static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev,
+ 			single_display = false;
+ 	}
+ 
++	return single_display;
++}
++
++static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev,
++						     enum radeon_pm_state_type dpm_state)
++{
++	int i;
++	struct radeon_ps *ps;
++	u32 ui_class;
++	bool single_display = radeon_dpm_single_display(rdev);
++
+ 	/* certain older asics have a separare 3D performance state,
+ 	 * so try that first if the user selected performance
+ 	 */
+@@ -844,6 +851,7 @@ static void radeon_dpm_change_power_state_locked(struct radeon_device *rdev)
+ 	struct radeon_ps *ps;
+ 	enum radeon_pm_state_type dpm_state;
+ 	int ret;
++	bool single_display = radeon_dpm_single_display(rdev);
+ 
+ 	/* if dpm init failed */
+ 	if (!rdev->pm.dpm_enabled)
+@@ -868,6 +876,9 @@ static void radeon_dpm_change_power_state_locked(struct radeon_device *rdev)
+ 		/* vce just modifies an existing state so force a change */
+ 		if (ps->vce_active != rdev->pm.dpm.vce_active)
+ 			goto force;
++		/* user has made a display change (such as timing) */
++		if (rdev->pm.dpm.single_display != single_display)
++			goto force;
+ 		if ((rdev->family < CHIP_BARTS) || (rdev->flags & RADEON_IS_IGP)) {
+ 			/* for pre-BTC and APUs if the num crtcs changed but state is the same,
+ 			 * all we need to do is update the display configuration.
+@@ -930,6 +941,7 @@ force:
+ 
+ 	rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
+ 	rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
++	rdev->pm.dpm.single_display = single_display;
+ 
+ 	/* wait for the rings to drain */
+ 	for (i = 0; i < RADEON_NUM_RINGS; i++) {
+diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
+index d02aa1d0f588..b292aca0f342 100644
+--- a/drivers/gpu/drm/radeon/radeon_ttm.c
++++ b/drivers/gpu/drm/radeon/radeon_ttm.c
+@@ -598,6 +598,10 @@ static void radeon_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
+ 	enum dma_data_direction direction = write ?
+ 		DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
+ 
++	/* double check that we don't free the table twice */
++	if (!ttm->sg->sgl)
++		return;
++
+ 	/* free the sg table and pages again */
+ 	dma_unmap_sg(rdev->dev, ttm->sg->sgl, ttm->sg->nents, direction);
+ 
+diff --git a/drivers/iio/accel/bmc150-accel.c b/drivers/iio/accel/bmc150-accel.c
+index 066d0c04072c..9f5b41bde7ba 100644
+--- a/drivers/iio/accel/bmc150-accel.c
++++ b/drivers/iio/accel/bmc150-accel.c
+@@ -168,14 +168,14 @@ static const struct {
+ 	int val;
+ 	int val2;
+ 	u8 bw_bits;
+-} bmc150_accel_samp_freq_table[] = { {7, 810000, 0x08},
+-				     {15, 630000, 0x09},
+-				     {31, 250000, 0x0A},
+-				     {62, 500000, 0x0B},
+-				     {125, 0, 0x0C},
+-				     {250, 0, 0x0D},
+-				     {500, 0, 0x0E},
+-				     {1000, 0, 0x0F} };
++} bmc150_accel_samp_freq_table[] = { {15, 620000, 0x08},
++				     {31, 260000, 0x09},
++				     {62, 500000, 0x0A},
++				     {125, 0, 0x0B},
++				     {250, 0, 0x0C},
++				     {500, 0, 0x0D},
++				     {1000, 0, 0x0E},
++				     {2000, 0, 0x0F} };
+ 
+ static const struct {
+ 	int bw_bits;
+@@ -840,7 +840,7 @@ static int bmc150_accel_validate_trigger(struct iio_dev *indio_dev,
+ }
+ 
+ static IIO_CONST_ATTR_SAMP_FREQ_AVAIL(
+-		"7.810000 15.630000 31.250000 62.500000 125 250 500 1000");
++		"15.620000 31.260000 62.50000 125 250 500 1000 2000");
+ 
+ static struct attribute *bmc150_accel_attributes[] = {
+ 	&iio_const_attr_sampling_frequency_available.dev_attr.attr,
+diff --git a/drivers/iio/adc/vf610_adc.c b/drivers/iio/adc/vf610_adc.c
+index 8ec353c01d98..e63b8e76d4c3 100644
+--- a/drivers/iio/adc/vf610_adc.c
++++ b/drivers/iio/adc/vf610_adc.c
+@@ -141,9 +141,13 @@ struct vf610_adc {
+ 	struct regulator *vref;
+ 	struct vf610_adc_feature adc_feature;
+ 
++	u32 sample_freq_avail[5];
++
+ 	struct completion completion;
+ };
+ 
++static const u32 vf610_hw_avgs[] = { 1, 4, 8, 16, 32 };
++
+ #define VF610_ADC_CHAN(_idx, _chan_type) {			\
+ 	.type = (_chan_type),					\
+ 	.indexed = 1,						\
+@@ -180,35 +184,47 @@ static const struct iio_chan_spec vf610_adc_iio_channels[] = {
+ 	/* sentinel */
+ };
+ 
+-/*
+- * ADC sample frequency, unit is ADCK cycles.
+- * ADC clk source is ipg clock, which is the same as bus clock.
+- *
+- * ADC conversion time = SFCAdder + AverageNum x (BCT + LSTAdder)
+- * SFCAdder: fixed to 6 ADCK cycles
+- * AverageNum: 1, 4, 8, 16, 32 samples for hardware average.
+- * BCT (Base Conversion Time): fixed to 25 ADCK cycles for 12 bit mode
+- * LSTAdder(Long Sample Time): fixed to 3 ADCK cycles
+- *
+- * By default, enable 12 bit resolution mode, clock source
+- * set to ipg clock, So get below frequency group:
+- */
+-static const u32 vf610_sample_freq_avail[5] =
+-{1941176, 559332, 286957, 145374, 73171};
++static inline void vf610_adc_calculate_rates(struct vf610_adc *info)
++{
++	unsigned long adck_rate, ipg_rate = clk_get_rate(info->clk);
++	int i;
++
++	/*
++	 * Calculate ADC sample frequencies
++	 * Sample time unit is ADCK cycles. ADCK clk source is ipg clock,
++	 * which is the same as bus clock.
++	 *
++	 * ADC conversion time = SFCAdder + AverageNum x (BCT + LSTAdder)
++	 * SFCAdder: fixed to 6 ADCK cycles
++	 * AverageNum: 1, 4, 8, 16, 32 samples for hardware average.
++	 * BCT (Base Conversion Time): fixed to 25 ADCK cycles for 12 bit mode
++	 * LSTAdder(Long Sample Time): fixed to 3 ADCK cycles
++	 */
++	adck_rate = ipg_rate / info->adc_feature.clk_div;
++	for (i = 0; i < ARRAY_SIZE(vf610_hw_avgs); i++)
++		info->sample_freq_avail[i] =
++			adck_rate / (6 + vf610_hw_avgs[i] * (25 + 3));
++}
+ 
+ static inline void vf610_adc_cfg_init(struct vf610_adc *info)
+ {
++	struct vf610_adc_feature *adc_feature = &info->adc_feature;
++
+ 	/* set default Configuration for ADC controller */
+-	info->adc_feature.clk_sel = VF610_ADCIOC_BUSCLK_SET;
+-	info->adc_feature.vol_ref = VF610_ADCIOC_VR_VREF_SET;
++	adc_feature->clk_sel = VF610_ADCIOC_BUSCLK_SET;
++	adc_feature->vol_ref = VF610_ADCIOC_VR_VREF_SET;
++
++	adc_feature->calibration = true;
++	adc_feature->ovwren = true;
++
++	adc_feature->res_mode = 12;
++	adc_feature->sample_rate = 1;
++	adc_feature->lpm = true;
+ 
+-	info->adc_feature.calibration = true;
+-	info->adc_feature.ovwren = true;
++	/* Use a save ADCK which is below 20MHz on all devices */
++	adc_feature->clk_div = 8;
+ 
+-	info->adc_feature.clk_div = 1;
+-	info->adc_feature.res_mode = 12;
+-	info->adc_feature.sample_rate = 1;
+-	info->adc_feature.lpm = true;
++	vf610_adc_calculate_rates(info);
+ }
+ 
+ static void vf610_adc_cfg_post_set(struct vf610_adc *info)
+@@ -290,12 +306,10 @@ static void vf610_adc_cfg_set(struct vf610_adc *info)
+ 
+ 	cfg_data = readl(info->regs + VF610_REG_ADC_CFG);
+ 
+-	/* low power configuration */
+ 	cfg_data &= ~VF610_ADC_ADLPC_EN;
+ 	if (adc_feature->lpm)
+ 		cfg_data |= VF610_ADC_ADLPC_EN;
+ 
+-	/* disable high speed */
+ 	cfg_data &= ~VF610_ADC_ADHSC_EN;
+ 
+ 	writel(cfg_data, info->regs + VF610_REG_ADC_CFG);
+@@ -435,10 +449,27 @@ static irqreturn_t vf610_adc_isr(int irq, void *dev_id)
+ 	return IRQ_HANDLED;
+ }
+ 
+-static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("1941176, 559332, 286957, 145374, 73171");
++static ssize_t vf610_show_samp_freq_avail(struct device *dev,
++				struct device_attribute *attr, char *buf)
++{
++	struct vf610_adc *info = iio_priv(dev_to_iio_dev(dev));
++	size_t len = 0;
++	int i;
++
++	for (i = 0; i < ARRAY_SIZE(info->sample_freq_avail); i++)
++		len += scnprintf(buf + len, PAGE_SIZE - len,
++			"%u ", info->sample_freq_avail[i]);
++
++	/* replace trailing space by newline */
++	buf[len - 1] = '\n';
++
++	return len;
++}
++
++static IIO_DEV_ATTR_SAMP_FREQ_AVAIL(vf610_show_samp_freq_avail);
+ 
+ static struct attribute *vf610_attributes[] = {
+-	&iio_const_attr_sampling_frequency_available.dev_attr.attr,
++	&iio_dev_attr_sampling_frequency_available.dev_attr.attr,
+ 	NULL
+ };
+ 
+@@ -502,7 +533,7 @@ static int vf610_read_raw(struct iio_dev *indio_dev,
+ 		return IIO_VAL_FRACTIONAL_LOG2;
+ 
+ 	case IIO_CHAN_INFO_SAMP_FREQ:
+-		*val = vf610_sample_freq_avail[info->adc_feature.sample_rate];
++		*val = info->sample_freq_avail[info->adc_feature.sample_rate];
+ 		*val2 = 0;
+ 		return IIO_VAL_INT;
+ 
+@@ -525,9 +556,9 @@ static int vf610_write_raw(struct iio_dev *indio_dev,
+ 	switch (mask) {
+ 		case IIO_CHAN_INFO_SAMP_FREQ:
+ 			for (i = 0;
+-				i < ARRAY_SIZE(vf610_sample_freq_avail);
++				i < ARRAY_SIZE(info->sample_freq_avail);
+ 				i++)
+-				if (val == vf610_sample_freq_avail[i]) {
++				if (val == info->sample_freq_avail[i]) {
+ 					info->adc_feature.sample_rate = i;
+ 					vf610_adc_sample_set(info);
+ 					return 0;
+diff --git a/drivers/iio/imu/adis_trigger.c b/drivers/iio/imu/adis_trigger.c
+index e0017c22bb9c..f53e9a803a0e 100644
+--- a/drivers/iio/imu/adis_trigger.c
++++ b/drivers/iio/imu/adis_trigger.c
+@@ -60,7 +60,7 @@ int adis_probe_trigger(struct adis *adis, struct iio_dev *indio_dev)
+ 	iio_trigger_set_drvdata(adis->trig, adis);
+ 	ret = iio_trigger_register(adis->trig);
+ 
+-	indio_dev->trig = adis->trig;
++	indio_dev->trig = iio_trigger_get(adis->trig);
+ 	if (ret)
+ 		goto error_free_irq;
+ 
+diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
+index 0cd306a72a6e..ba27e277511f 100644
+--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
++++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
+@@ -24,6 +24,16 @@
+ #include <linux/poll.h>
+ #include "inv_mpu_iio.h"
+ 
++static void inv_clear_kfifo(struct inv_mpu6050_state *st)
++{
++	unsigned long flags;
++
++	/* take the spin lock sem to avoid interrupt kick in */
++	spin_lock_irqsave(&st->time_stamp_lock, flags);
++	kfifo_reset(&st->timestamps);
++	spin_unlock_irqrestore(&st->time_stamp_lock, flags);
++}
++
+ int inv_reset_fifo(struct iio_dev *indio_dev)
+ {
+ 	int result;
+@@ -50,6 +60,10 @@ int inv_reset_fifo(struct iio_dev *indio_dev)
+ 					INV_MPU6050_BIT_FIFO_RST);
+ 	if (result)
+ 		goto reset_fifo_fail;
++
++	/* clear timestamps fifo */
++	inv_clear_kfifo(st);
++
+ 	/* enable interrupt */
+ 	if (st->chip_config.accl_fifo_enable ||
+ 	    st->chip_config.gyro_fifo_enable) {
+@@ -83,16 +97,6 @@ reset_fifo_fail:
+ 	return result;
+ }
+ 
+-static void inv_clear_kfifo(struct inv_mpu6050_state *st)
+-{
+-	unsigned long flags;
+-
+-	/* take the spin lock sem to avoid interrupt kick in */
+-	spin_lock_irqsave(&st->time_stamp_lock, flags);
+-	kfifo_reset(&st->timestamps);
+-	spin_unlock_irqrestore(&st->time_stamp_lock, flags);
+-}
+-
+ /**
+  * inv_mpu6050_irq_handler() - Cache a timestamp at each data ready interrupt.
+  */
+@@ -184,7 +188,6 @@ end_session:
+ flush_fifo:
+ 	/* Flush HW and SW FIFOs. */
+ 	inv_reset_fifo(indio_dev);
+-	inv_clear_kfifo(st);
+ 	mutex_unlock(&indio_dev->mlock);
+ 	iio_trigger_notify_done(indio_dev->trig);
+ 
+diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
+index af3e76d652ba..f009d053384a 100644
+--- a/drivers/iio/industrialio-core.c
++++ b/drivers/iio/industrialio-core.c
+@@ -832,8 +832,7 @@ static int iio_device_add_channel_sysfs(struct iio_dev *indio_dev,
+  * @attr_list: List of IIO device attributes
+  *
+  * This function frees the memory allocated for each of the IIO device
+- * attributes in the list. Note: if you want to reuse the list after calling
+- * this function you have to reinitialize it using INIT_LIST_HEAD().
++ * attributes in the list.
+  */
+ void iio_free_chan_devattr_list(struct list_head *attr_list)
+ {
+@@ -841,6 +840,7 @@ void iio_free_chan_devattr_list(struct list_head *attr_list)
+ 
+ 	list_for_each_entry_safe(p, n, attr_list, l) {
+ 		kfree(p->dev_attr.attr.name);
++		list_del(&p->l);
+ 		kfree(p);
+ 	}
+ }
+@@ -921,6 +921,7 @@ static void iio_device_unregister_sysfs(struct iio_dev *indio_dev)
+ 
+ 	iio_free_chan_devattr_list(&indio_dev->channel_attr_list);
+ 	kfree(indio_dev->chan_attr_group.attrs);
++	indio_dev->chan_attr_group.attrs = NULL;
+ }
+ 
+ static void iio_dev_release(struct device *device)
+diff --git a/drivers/iio/industrialio-event.c b/drivers/iio/industrialio-event.c
+index 0c1e37e3120a..35c02aeec75e 100644
+--- a/drivers/iio/industrialio-event.c
++++ b/drivers/iio/industrialio-event.c
+@@ -493,6 +493,7 @@ int iio_device_register_eventset(struct iio_dev *indio_dev)
+ error_free_setup_event_lines:
+ 	iio_free_chan_devattr_list(&indio_dev->event_interface->dev_attr_list);
+ 	kfree(indio_dev->event_interface);
++	indio_dev->event_interface = NULL;
+ 	return ret;
+ }
+ 
+diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
+index aec7a6aa2951..8c014b5dab4c 100644
+--- a/drivers/infiniband/core/umem.c
++++ b/drivers/infiniband/core/umem.c
+@@ -99,6 +99,14 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
+ 	if (dmasync)
+ 		dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs);
+ 
++	/*
++	 * If the combination of the addr and size requested for this memory
++	 * region causes an integer overflow, return error.
++	 */
++	if ((PAGE_ALIGN(addr + size) <= size) ||
++	    (PAGE_ALIGN(addr + size) <= addr))
++		return ERR_PTR(-EINVAL);
++
+ 	if (!can_do_mlock())
+ 		return ERR_PTR(-EPERM);
+ 
+diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
+index 82a7dd87089b..729382c06c5e 100644
+--- a/drivers/infiniband/hw/mlx4/mad.c
++++ b/drivers/infiniband/hw/mlx4/mad.c
+@@ -64,6 +64,14 @@ enum {
+ #define GUID_TBL_BLK_NUM_ENTRIES 8
+ #define GUID_TBL_BLK_SIZE (GUID_TBL_ENTRY_SIZE * GUID_TBL_BLK_NUM_ENTRIES)
+ 
++/* Counters should be saturate once they reach their maximum value */
++#define ASSIGN_32BIT_COUNTER(counter, value) do {\
++	if ((value) > U32_MAX)			 \
++		counter = cpu_to_be32(U32_MAX); \
++	else					 \
++		counter = cpu_to_be32(value);	 \
++} while (0)
++
+ struct mlx4_mad_rcv_buf {
+ 	struct ib_grh grh;
+ 	u8 payload[256];
+@@ -806,10 +814,14 @@ static int ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
+ static void edit_counter(struct mlx4_counter *cnt,
+ 					struct ib_pma_portcounters *pma_cnt)
+ {
+-	pma_cnt->port_xmit_data = cpu_to_be32((be64_to_cpu(cnt->tx_bytes)>>2));
+-	pma_cnt->port_rcv_data  = cpu_to_be32((be64_to_cpu(cnt->rx_bytes)>>2));
+-	pma_cnt->port_xmit_packets = cpu_to_be32(be64_to_cpu(cnt->tx_frames));
+-	pma_cnt->port_rcv_packets  = cpu_to_be32(be64_to_cpu(cnt->rx_frames));
++	ASSIGN_32BIT_COUNTER(pma_cnt->port_xmit_data,
++			     (be64_to_cpu(cnt->tx_bytes) >> 2));
++	ASSIGN_32BIT_COUNTER(pma_cnt->port_rcv_data,
++			     (be64_to_cpu(cnt->rx_bytes) >> 2));
++	ASSIGN_32BIT_COUNTER(pma_cnt->port_xmit_packets,
++			     be64_to_cpu(cnt->tx_frames));
++	ASSIGN_32BIT_COUNTER(pma_cnt->port_rcv_packets,
++			     be64_to_cpu(cnt->rx_frames));
+ }
+ 
+ static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
+diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
+index 40dfbc0444c0..8dd078b83e3b 100644
+--- a/drivers/iommu/intel-iommu.c
++++ b/drivers/iommu/intel-iommu.c
+@@ -1749,8 +1749,8 @@ static int domain_init(struct dmar_domain *domain, int guest_width)
+ static void domain_exit(struct dmar_domain *domain)
+ {
+ 	struct dmar_drhd_unit *drhd;
+-	struct intel_iommu *iommu;
+ 	struct page *freelist = NULL;
++	int i;
+ 
+ 	/* Domain 0 is reserved, so dont process it */
+ 	if (!domain)
+@@ -1770,8 +1770,8 @@ static void domain_exit(struct dmar_domain *domain)
+ 
+ 	/* clear attached or cached domains */
+ 	rcu_read_lock();
+-	for_each_active_iommu(iommu, drhd)
+-		iommu_detach_domain(domain, iommu);
++	for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus)
++		iommu_detach_domain(domain, g_iommus[i]);
+ 	rcu_read_unlock();
+ 
+ 	dma_free_pagelist(freelist);
+diff --git a/drivers/media/pci/cx23885/cx23885-417.c b/drivers/media/pci/cx23885/cx23885-417.c
+index e4901a503c73..63c0ee5d0bf5 100644
+--- a/drivers/media/pci/cx23885/cx23885-417.c
++++ b/drivers/media/pci/cx23885/cx23885-417.c
+@@ -1339,14 +1339,13 @@ static int vidioc_querycap(struct file *file, void  *priv,
+ 	strlcpy(cap->driver, dev->name, sizeof(cap->driver));
+ 	strlcpy(cap->card, cx23885_boards[tsport->dev->board].name,
+ 		sizeof(cap->card));
+-	sprintf(cap->bus_info, "PCI:%s", pci_name(dev->pci));
+-	cap->capabilities =
+-		V4L2_CAP_VIDEO_CAPTURE |
+-		V4L2_CAP_READWRITE     |
+-		V4L2_CAP_STREAMING     |
+-		0;
++	sprintf(cap->bus_info, "PCIe:%s", pci_name(dev->pci));
++	cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE |
++			   V4L2_CAP_STREAMING;
+ 	if (dev->tuner_type != TUNER_ABSENT)
+-		cap->capabilities |= V4L2_CAP_TUNER;
++		cap->device_caps |= V4L2_CAP_TUNER;
++	cap->capabilities = cap->device_caps | V4L2_CAP_VBI_CAPTURE |
++		V4L2_CAP_AUDIO | V4L2_CAP_DEVICE_CAPS;
+ 
+ 	return 0;
+ }
+diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_common.h b/drivers/media/platform/s5p-mfc/s5p_mfc_common.h
+index 15f7663dd9f5..24262bbb1a35 100644
+--- a/drivers/media/platform/s5p-mfc/s5p_mfc_common.h
++++ b/drivers/media/platform/s5p-mfc/s5p_mfc_common.h
+@@ -29,7 +29,7 @@
+ 
+ /* Offset base used to differentiate between CAPTURE and OUTPUT
+ *  while mmaping */
+-#define DST_QUEUE_OFF_BASE      (TASK_SIZE / 2)
++#define DST_QUEUE_OFF_BASE	(1 << 30)
+ 
+ #define MFC_BANK1_ALLOC_CTX	0
+ #define MFC_BANK2_ALLOC_CTX	1
+diff --git a/drivers/media/platform/sh_veu.c b/drivers/media/platform/sh_veu.c
+index aaa1f6f25a29..4d507fef0c84 100644
+--- a/drivers/media/platform/sh_veu.c
++++ b/drivers/media/platform/sh_veu.c
+@@ -1179,6 +1179,7 @@ static int sh_veu_probe(struct platform_device *pdev)
+ 	}
+ 
+ 	*vdev = sh_veu_videodev;
++	vdev->v4l2_dev = &veu->v4l2_dev;
+ 	spin_lock_init(&veu->lock);
+ 	mutex_init(&veu->fop_lock);
+ 	vdev->lock = &veu->fop_lock;
+diff --git a/drivers/media/platform/soc_camera/soc_camera.c b/drivers/media/platform/soc_camera/soc_camera.c
+index b3db51c82bde..f9dfebb0bac2 100644
+--- a/drivers/media/platform/soc_camera/soc_camera.c
++++ b/drivers/media/platform/soc_camera/soc_camera.c
+@@ -1681,7 +1681,7 @@ eclkreg:
+ eaddpdev:
+ 	platform_device_put(sasc->pdev);
+ eallocpdev:
+-	devm_kfree(ici->v4l2_dev.dev, sasc);
++	devm_kfree(ici->v4l2_dev.dev, info);
+ 	dev_err(ici->v4l2_dev.dev, "group probe failed: %d\n", ret);
+ 
+ 	return ret;
+diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
+index bc08a829bc13..cc16e76a2493 100644
+--- a/drivers/media/v4l2-core/videobuf2-core.c
++++ b/drivers/media/v4l2-core/videobuf2-core.c
+@@ -3230,18 +3230,13 @@ int vb2_thread_stop(struct vb2_queue *q)
+ 
+ 	if (threadio == NULL)
+ 		return 0;
+-	call_void_qop(q, wait_finish, q);
+ 	threadio->stop = true;
+-	vb2_internal_streamoff(q, q->type);
+-	call_void_qop(q, wait_prepare, q);
++	/* Wake up all pending sleeps in the thread */
++	vb2_queue_error(q);
+ 	err = kthread_stop(threadio->thread);
+-	q->fileio = NULL;
+-	fileio->req.count = 0;
+-	vb2_reqbufs(q, &fileio->req);
+-	kfree(fileio);
++	__vb2_cleanup_fileio(q);
+ 	threadio->thread = NULL;
+ 	kfree(threadio);
+-	q->fileio = NULL;
+ 	q->threadio = NULL;
+ 	return err;
+ }
+diff --git a/drivers/media/v4l2-core/videobuf2-dma-contig.c b/drivers/media/v4l2-core/videobuf2-dma-contig.c
+index b481d20c8372..69e0483adfee 100644
+--- a/drivers/media/v4l2-core/videobuf2-dma-contig.c
++++ b/drivers/media/v4l2-core/videobuf2-dma-contig.c
+@@ -632,8 +632,7 @@ static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr,
+ 	}
+ 
+ 	/* extract page list from userspace mapping */
+-	ret = vb2_dc_get_user_pages(start, pages, n_pages, vma,
+-				    dma_dir == DMA_FROM_DEVICE);
++	ret = vb2_dc_get_user_pages(start, pages, n_pages, vma, dma_dir);
+ 	if (ret) {
+ 		unsigned long pfn;
+ 		if (vb2_dc_get_user_pfn(start, n_pages, vma, &pfn) == 0) {
+diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
+index b1d583ba9674..334453fa7e9a 100644
+--- a/drivers/net/can/flexcan.c
++++ b/drivers/net/can/flexcan.c
+@@ -593,13 +593,12 @@ static int flexcan_poll_state(struct net_device *dev, u32 reg_esr)
+ 		rx_state = unlikely(reg_esr & FLEXCAN_ESR_RX_WRN) ?
+ 			   CAN_STATE_ERROR_WARNING : CAN_STATE_ERROR_ACTIVE;
+ 		new_state = max(tx_state, rx_state);
+-	} else if (unlikely(flt == FLEXCAN_ESR_FLT_CONF_PASSIVE)) {
++	} else {
+ 		__flexcan_get_berr_counter(dev, &bec);
+-		new_state = CAN_STATE_ERROR_PASSIVE;
++		new_state = flt == FLEXCAN_ESR_FLT_CONF_PASSIVE ?
++			    CAN_STATE_ERROR_PASSIVE : CAN_STATE_BUS_OFF;
+ 		rx_state = bec.rxerr >= bec.txerr ? new_state : 0;
+ 		tx_state = bec.rxerr <= bec.txerr ? new_state : 0;
+-	} else {
+-		new_state = CAN_STATE_BUS_OFF;
+ 	}
+ 
+ 	/* state hasn't changed */
+@@ -1159,12 +1158,19 @@ static int flexcan_probe(struct platform_device *pdev)
+ 	const struct flexcan_devtype_data *devtype_data;
+ 	struct net_device *dev;
+ 	struct flexcan_priv *priv;
++	struct regulator *reg_xceiver;
+ 	struct resource *mem;
+ 	struct clk *clk_ipg = NULL, *clk_per = NULL;
+ 	void __iomem *base;
+ 	int err, irq;
+ 	u32 clock_freq = 0;
+ 
++	reg_xceiver = devm_regulator_get(&pdev->dev, "xceiver");
++	if (PTR_ERR(reg_xceiver) == -EPROBE_DEFER)
++		return -EPROBE_DEFER;
++	else if (IS_ERR(reg_xceiver))
++		reg_xceiver = NULL;
++
+ 	if (pdev->dev.of_node)
+ 		of_property_read_u32(pdev->dev.of_node,
+ 						"clock-frequency", &clock_freq);
+@@ -1226,9 +1232,7 @@ static int flexcan_probe(struct platform_device *pdev)
+ 	priv->pdata = dev_get_platdata(&pdev->dev);
+ 	priv->devtype_data = devtype_data;
+ 
+-	priv->reg_xceiver = devm_regulator_get(&pdev->dev, "xceiver");
+-	if (IS_ERR(priv->reg_xceiver))
+-		priv->reg_xceiver = NULL;
++	priv->reg_xceiver = reg_xceiver;
+ 
+ 	netif_napi_add(dev, &priv->napi, flexcan_poll, FLEXCAN_NAPI_WEIGHT);
+ 
+diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
+index cb366adc820b..f50a6bc5d06e 100644
+--- a/drivers/net/wireless/ath/ath9k/beacon.c
++++ b/drivers/net/wireless/ath/ath9k/beacon.c
+@@ -219,12 +219,15 @@ void ath9k_beacon_remove_slot(struct ath_softc *sc, struct ieee80211_vif *vif)
+ 	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ 	struct ath_vif *avp = (void *)vif->drv_priv;
+ 	struct ath_buf *bf = avp->av_bcbuf;
++	struct ath_beacon_config *cur_conf = &sc->cur_chan->beacon;
+ 
+ 	ath_dbg(common, CONFIG, "Removing interface at beacon slot: %d\n",
+ 		avp->av_bslot);
+ 
+ 	tasklet_disable(&sc->bcon_tasklet);
+ 
++	cur_conf->enable_beacon &= ~BIT(avp->av_bslot);
++
+ 	if (bf && bf->bf_mpdu) {
+ 		struct sk_buff *skb = bf->bf_mpdu;
+ 		dma_unmap_single(sc->dev, bf->bf_buf_addr,
+@@ -521,8 +524,7 @@ static bool ath9k_allow_beacon_config(struct ath_softc *sc,
+ 	}
+ 
+ 	if (sc->sc_ah->opmode == NL80211_IFTYPE_AP) {
+-		if ((vif->type != NL80211_IFTYPE_AP) ||
+-		    (sc->nbcnvifs > 1)) {
++		if (vif->type != NL80211_IFTYPE_AP) {
+ 			ath_dbg(common, CONFIG,
+ 				"An AP interface is already present !\n");
+ 			return false;
+@@ -616,12 +618,14 @@ void ath9k_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif,
+ 	 * enabling/disabling SWBA.
+ 	 */
+ 	if (changed & BSS_CHANGED_BEACON_ENABLED) {
+-		if (!bss_conf->enable_beacon &&
+-		    (sc->nbcnvifs <= 1)) {
+-			cur_conf->enable_beacon = false;
+-		} else if (bss_conf->enable_beacon) {
+-			cur_conf->enable_beacon = true;
+-			ath9k_cache_beacon_config(sc, ctx, bss_conf);
++		bool enabled = cur_conf->enable_beacon;
++
++		if (!bss_conf->enable_beacon) {
++			cur_conf->enable_beacon &= ~BIT(avp->av_bslot);
++		} else {
++			cur_conf->enable_beacon |= BIT(avp->av_bslot);
++			if (!enabled)
++				ath9k_cache_beacon_config(sc, ctx, bss_conf);
+ 		}
+ 	}
+ 
+diff --git a/drivers/net/wireless/ath/ath9k/common.h b/drivers/net/wireless/ath/ath9k/common.h
+index 2b79a568e803..d23737342f4f 100644
+--- a/drivers/net/wireless/ath/ath9k/common.h
++++ b/drivers/net/wireless/ath/ath9k/common.h
+@@ -54,7 +54,7 @@ struct ath_beacon_config {
+ 	u16 dtim_period;
+ 	u16 bmiss_timeout;
+ 	u8 dtim_count;
+-	bool enable_beacon;
++	u8 enable_beacon;
+ 	bool ibss_creator;
+ 	u32 nexttbtt;
+ 	u32 intval;
+diff --git a/drivers/net/wireless/brcm80211/brcmfmac/feature.c b/drivers/net/wireless/brcm80211/brcmfmac/feature.c
+index defb7a44e0bc..7748a1ccf14f 100644
+--- a/drivers/net/wireless/brcm80211/brcmfmac/feature.c
++++ b/drivers/net/wireless/brcm80211/brcmfmac/feature.c
+@@ -126,7 +126,8 @@ void brcmf_feat_attach(struct brcmf_pub *drvr)
+ 	brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_MCHAN, "mchan");
+ 	if (drvr->bus_if->wowl_supported)
+ 		brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_WOWL, "wowl");
+-	brcmf_feat_iovar_int_set(ifp, BRCMF_FEAT_MBSS, "mbss", 0);
++	if (drvr->bus_if->chip != BRCM_CC_43362_CHIP_ID)
++		brcmf_feat_iovar_int_set(ifp, BRCMF_FEAT_MBSS, "mbss", 0);
+ 
+ 	/* set chip related quirks */
+ 	switch (drvr->bus_if->chip) {
+diff --git a/drivers/net/wireless/iwlwifi/dvm/dev.h b/drivers/net/wireless/iwlwifi/dvm/dev.h
+index a6f22c32a279..3811878ab9cd 100644
+--- a/drivers/net/wireless/iwlwifi/dvm/dev.h
++++ b/drivers/net/wireless/iwlwifi/dvm/dev.h
+@@ -708,7 +708,6 @@ struct iwl_priv {
+ 	unsigned long reload_jiffies;
+ 	int reload_count;
+ 	bool ucode_loaded;
+-	bool init_ucode_run;		/* Don't run init uCode again */
+ 
+ 	u8 plcp_delta_threshold;
+ 
+diff --git a/drivers/net/wireless/iwlwifi/dvm/ucode.c b/drivers/net/wireless/iwlwifi/dvm/ucode.c
+index d5cee1530597..80b8094deed1 100644
+--- a/drivers/net/wireless/iwlwifi/dvm/ucode.c
++++ b/drivers/net/wireless/iwlwifi/dvm/ucode.c
+@@ -418,9 +418,6 @@ int iwl_run_init_ucode(struct iwl_priv *priv)
+ 	if (!priv->fw->img[IWL_UCODE_INIT].sec[0].len)
+ 		return 0;
+ 
+-	if (priv->init_ucode_run)
+-		return 0;
+-
+ 	iwl_init_notification_wait(&priv->notif_wait, &calib_wait,
+ 				   calib_complete, ARRAY_SIZE(calib_complete),
+ 				   iwlagn_wait_calib, priv);
+@@ -440,8 +437,6 @@ int iwl_run_init_ucode(struct iwl_priv *priv)
+ 	 */
+ 	ret = iwl_wait_notification(&priv->notif_wait, &calib_wait,
+ 					UCODE_CALIB_TIMEOUT);
+-	if (!ret)
+-		priv->init_ucode_run = true;
+ 
+ 	goto out;
+ 
+diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
+index e25faacf58b7..a5186bb7c63e 100644
+--- a/drivers/net/wireless/rtlwifi/pci.c
++++ b/drivers/net/wireless/rtlwifi/pci.c
+@@ -1118,12 +1118,22 @@ static void _rtl_pci_prepare_bcn_tasklet(struct ieee80211_hw *hw)
+ 	/*This is for new trx flow*/
+ 	struct rtl_tx_buffer_desc *pbuffer_desc = NULL;
+ 	u8 temp_one = 1;
++	u8 *entry;
+ 
+ 	memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc));
+ 	ring = &rtlpci->tx_ring[BEACON_QUEUE];
+ 	pskb = __skb_dequeue(&ring->queue);
+-	if (pskb)
++	if (rtlpriv->use_new_trx_flow)
++		entry = (u8 *)(&ring->buffer_desc[ring->idx]);
++	else
++		entry = (u8 *)(&ring->desc[ring->idx]);
++	if (pskb) {
++		pci_unmap_single(rtlpci->pdev,
++				 rtlpriv->cfg->ops->get_desc(
++				 (u8 *)entry, true, HW_DESC_TXBUFF_ADDR),
++				 pskb->len, PCI_DMA_TODEVICE);
+ 		kfree_skb(pskb);
++	}
+ 
+ 	/*NB: the beacon data buffer must be 32-bit aligned. */
+ 	pskb = ieee80211_beacon_get(hw, mac->vif);
+diff --git a/drivers/of/address.c b/drivers/of/address.c
+index ad2906919d45..78a7dcbec7d8 100644
+--- a/drivers/of/address.c
++++ b/drivers/of/address.c
+@@ -450,12 +450,17 @@ static struct of_bus *of_match_bus(struct device_node *np)
+ 	return NULL;
+ }
+ 
+-static int of_empty_ranges_quirk(void)
++static int of_empty_ranges_quirk(struct device_node *np)
+ {
+ 	if (IS_ENABLED(CONFIG_PPC)) {
+-		/* To save cycles, we cache the result */
++		/* To save cycles, we cache the result for global "Mac" setting */
+ 		static int quirk_state = -1;
+ 
++		/* PA-SEMI sdc DT bug */
++		if (of_device_is_compatible(np, "1682m-sdc"))
++			return true;
++
++		/* Make quirk cached */
+ 		if (quirk_state < 0)
+ 			quirk_state =
+ 				of_machine_is_compatible("Power Macintosh") ||
+@@ -490,7 +495,7 @@ static int of_translate_one(struct device_node *parent, struct of_bus *bus,
+ 	 * This code is only enabled on powerpc. --gcl
+ 	 */
+ 	ranges = of_get_property(parent, rprop, &rlen);
+-	if (ranges == NULL && !of_empty_ranges_quirk()) {
++	if (ranges == NULL && !of_empty_ranges_quirk(parent)) {
+ 		pr_debug("OF: no ranges; cannot translate\n");
+ 		return 1;
+ 	}
+diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c
+index 17ca98657a28..aa2da59a154a 100644
+--- a/drivers/pci/host/pcie-designware.c
++++ b/drivers/pci/host/pcie-designware.c
+@@ -342,7 +342,7 @@ static const struct irq_domain_ops msi_domain_ops = {
+ 	.map = dw_pcie_msi_map,
+ };
+ 
+-int __init dw_pcie_host_init(struct pcie_port *pp)
++int dw_pcie_host_init(struct pcie_port *pp)
+ {
+ 	struct device_node *np = pp->dev->of_node;
+ 	struct platform_device *pdev = to_platform_device(pp->dev);
+diff --git a/drivers/pci/host/pcie-spear13xx.c b/drivers/pci/host/pcie-spear13xx.c
+index 866465fd3dbf..020d78890719 100644
+--- a/drivers/pci/host/pcie-spear13xx.c
++++ b/drivers/pci/host/pcie-spear13xx.c
+@@ -269,7 +269,7 @@ static struct pcie_host_ops spear13xx_pcie_host_ops = {
+ 	.host_init = spear13xx_pcie_host_init,
+ };
+ 
+-static int __init spear13xx_add_pcie_port(struct pcie_port *pp,
++static int spear13xx_add_pcie_port(struct pcie_port *pp,
+ 					 struct platform_device *pdev)
+ {
+ 	struct device *dev = &pdev->dev;
+@@ -299,7 +299,7 @@ static int __init spear13xx_add_pcie_port(struct pcie_port *pp,
+ 	return 0;
+ }
+ 
+-static int __init spear13xx_pcie_probe(struct platform_device *pdev)
++static int spear13xx_pcie_probe(struct platform_device *pdev)
+ {
+ 	struct spear13xx_pcie *spear13xx_pcie;
+ 	struct pcie_port *pp;
+@@ -370,7 +370,7 @@ static const struct of_device_id spear13xx_pcie_of_match[] = {
+ };
+ MODULE_DEVICE_TABLE(of, spear13xx_pcie_of_match);
+ 
+-static struct platform_driver spear13xx_pcie_driver __initdata = {
++static struct platform_driver spear13xx_pcie_driver = {
+ 	.probe		= spear13xx_pcie_probe,
+ 	.driver = {
+ 		.name	= "spear-pcie",
+diff --git a/drivers/pci/hotplug/cpci_hotplug_pci.c b/drivers/pci/hotplug/cpci_hotplug_pci.c
+index 7d48ecae6695..788db48dbbad 100644
+--- a/drivers/pci/hotplug/cpci_hotplug_pci.c
++++ b/drivers/pci/hotplug/cpci_hotplug_pci.c
+@@ -286,11 +286,12 @@ int cpci_configure_slot(struct slot *slot)
+ 	}
+ 	parent = slot->dev->bus;
+ 
+-	list_for_each_entry(dev, &parent->devices, bus_list)
++	list_for_each_entry(dev, &parent->devices, bus_list) {
+ 		if (PCI_SLOT(dev->devfn) != PCI_SLOT(slot->devfn))
+ 			continue;
+ 		if (pci_is_bridge(dev))
+ 			pci_hp_add_bridge(dev);
++	}
+ 
+ 
+ 	pci_assign_unassigned_bridge_resources(parent->self);
+diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
+index 3542150fc8a3..26b37b663a1d 100644
+--- a/drivers/pci/pci-acpi.c
++++ b/drivers/pci/pci-acpi.c
+@@ -248,6 +248,9 @@ int pci_get_hp_params(struct pci_dev *dev, struct hotplug_params *hpp)
+ 	acpi_handle handle, phandle;
+ 	struct pci_bus *pbus;
+ 
++	if (acpi_pci_disabled)
++		return -ENODEV;
++
+ 	handle = NULL;
+ 	for (pbus = dev->bus; pbus; pbus = pbus->parent) {
+ 		handle = acpi_pci_get_bridge_handle(pbus);
+diff --git a/drivers/pci/pcie/aer/aerdrv_errprint.c b/drivers/pci/pcie/aer/aerdrv_errprint.c
+index c6849d9e86ce..167fe411ce2e 100644
+--- a/drivers/pci/pcie/aer/aerdrv_errprint.c
++++ b/drivers/pci/pcie/aer/aerdrv_errprint.c
+@@ -132,16 +132,8 @@ static const char *aer_agent_string[] = {
+ static void __print_tlp_header(struct pci_dev *dev,
+ 			       struct aer_header_log_regs *t)
+ {
+-	unsigned char *tlp = (unsigned char *)&t;
+-
+-	dev_err(&dev->dev, "  TLP Header:"
+-		" %02x%02x%02x%02x %02x%02x%02x%02x"
+-		" %02x%02x%02x%02x %02x%02x%02x%02x\n",
+-		*(tlp + 3), *(tlp + 2), *(tlp + 1), *tlp,
+-		*(tlp + 7), *(tlp + 6), *(tlp + 5), *(tlp + 4),
+-		*(tlp + 11), *(tlp + 10), *(tlp + 9),
+-		*(tlp + 8), *(tlp + 15), *(tlp + 14),
+-		*(tlp + 13), *(tlp + 12));
++	dev_err(&dev->dev, "  TLP Header: %08x %08x %08x %08x\n",
++		t->dw0, t->dw1, t->dw2, t->dw3);
+ }
+ 
+ static void __aer_print_error(struct pci_dev *dev,
+diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
+index 9cc047bc763b..7a56f443ffc0 100644
+--- a/drivers/scsi/be2iscsi/be_main.c
++++ b/drivers/scsi/be2iscsi/be_main.c
+@@ -5735,9 +5735,9 @@ free_port:
+ hba_free:
+ 	if (phba->msix_enabled)
+ 		pci_disable_msix(phba->pcidev);
+-	iscsi_host_remove(phba->shost);
+ 	pci_dev_put(phba->pcidev);
+ 	iscsi_host_free(phba->shost);
++	pci_set_drvdata(pcidev, NULL);
+ disable_pci:
+ 	pci_disable_device(pcidev);
+ 	return ret;
+diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
+index 17bb541f7cc2..71297017e499 100644
+--- a/drivers/scsi/scsi_lib.c
++++ b/drivers/scsi/scsi_lib.c
+@@ -1311,9 +1311,11 @@ scsi_prep_state_check(struct scsi_device *sdev, struct request *req)
+ 				    "rejecting I/O to dead device\n");
+ 			ret = BLKPREP_KILL;
+ 			break;
+-		case SDEV_QUIESCE:
+ 		case SDEV_BLOCK:
+ 		case SDEV_CREATED_BLOCK:
++			ret = BLKPREP_DEFER;
++			break;
++		case SDEV_QUIESCE:
+ 			/*
+ 			 * If the devices is blocked we defer normal commands.
+ 			 */
+diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
+index 8d27db47560c..22d67c034cdf 100644
+--- a/drivers/target/iscsi/iscsi_target.c
++++ b/drivers/target/iscsi/iscsi_target.c
+@@ -1185,7 +1185,7 @@ iscsit_handle_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+ 	 * traditional iSCSI block I/O.
+ 	 */
+ 	if (iscsit_allocate_iovecs(cmd) < 0) {
+-		return iscsit_add_reject_cmd(cmd,
++		return iscsit_reject_cmd(cmd,
+ 				ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
+ 	}
+ 	immed_data = cmd->immediate_data;
+diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
+index 4ddfa60c9222..6f8cf3a52861 100644
+--- a/drivers/tty/n_tty.c
++++ b/drivers/tty/n_tty.c
+@@ -247,8 +247,6 @@ static void n_tty_write_wakeup(struct tty_struct *tty)
+ 
+ static void n_tty_check_throttle(struct tty_struct *tty)
+ {
+-	if (tty->driver->type == TTY_DRIVER_TYPE_PTY)
+-		return;
+ 	/*
+ 	 * Check the remaining room for the input canonicalization
+ 	 * mode.  We don't want to throttle the driver if we're in
+@@ -1512,23 +1510,6 @@ n_tty_receive_char_lnext(struct tty_struct *tty, unsigned char c, char flag)
+ 		n_tty_receive_char_flagged(tty, c, flag);
+ }
+ 
+-/**
+- *	n_tty_receive_buf	-	data receive
+- *	@tty: terminal device
+- *	@cp: buffer
+- *	@fp: flag buffer
+- *	@count: characters
+- *
+- *	Called by the terminal driver when a block of characters has
+- *	been received. This function must be called from soft contexts
+- *	not from interrupt context. The driver is responsible for making
+- *	calls one at a time and in order (or using flush_to_ldisc)
+- *
+- *	n_tty_receive_buf()/producer path:
+- *		claims non-exclusive termios_rwsem
+- *		publishes read_head and canon_head
+- */
+-
+ static void
+ n_tty_receive_buf_real_raw(struct tty_struct *tty, const unsigned char *cp,
+ 			   char *fp, int count)
+@@ -1684,24 +1665,85 @@ static void __receive_buf(struct tty_struct *tty, const unsigned char *cp,
+ 	}
+ }
+ 
++/**
++ *	n_tty_receive_buf_common	-	process input
++ *	@tty: device to receive input
++ *	@cp: input chars
++ *	@fp: flags for each char (if NULL, all chars are TTY_NORMAL)
++ *	@count: number of input chars in @cp
++ *
++ *	Called by the terminal driver when a block of characters has
++ *	been received. This function must be called from soft contexts
++ *	not from interrupt context. The driver is responsible for making
++ *	calls one at a time and in order (or using flush_to_ldisc)
++ *
++ *	Returns the # of input chars from @cp which were processed.
++ *
++ *	In canonical mode, the maximum line length is 4096 chars (including
++ *	the line termination char); lines longer than 4096 chars are
++ *	truncated. After 4095 chars, input data is still processed but
++ *	not stored. Overflow processing ensures the tty can always
++ *	receive more input until at least one line can be read.
++ *
++ *	In non-canonical mode, the read buffer will only accept 4095 chars;
++ *	this provides the necessary space for a newline char if the input
++ *	mode is switched to canonical.
++ *
++ *	Note it is possible for the read buffer to _contain_ 4096 chars
++ *	in non-canonical mode: the read buffer could already contain the
++ *	maximum canon line of 4096 chars when the mode is switched to
++ *	non-canonical.
++ *
++ *	n_tty_receive_buf()/producer path:
++ *		claims non-exclusive termios_rwsem
++ *		publishes commit_head or canon_head
++ */
+ static int
+ n_tty_receive_buf_common(struct tty_struct *tty, const unsigned char *cp,
+ 			 char *fp, int count, int flow)
+ {
+ 	struct n_tty_data *ldata = tty->disc_data;
+-	int room, n, rcvd = 0;
++	int room, n, rcvd = 0, overflow;
+ 
+ 	down_read(&tty->termios_rwsem);
+ 
+ 	while (1) {
+-		room = receive_room(tty);
++		/*
++		 * When PARMRK is set, each input char may take up to 3 chars
++		 * in the read buf; reduce the buffer space avail by 3x
++		 *
++		 * If we are doing input canonicalization, and there are no
++		 * pending newlines, let characters through without limit, so
++		 * that erase characters will be handled.  Other excess
++		 * characters will be beeped.
++		 *
++		 * paired with store in *_copy_from_read_buf() -- guarantees
++		 * the consumer has loaded the data in read_buf up to the new
++		 * read_tail (so this producer will not overwrite unread data)
++		 */
++		size_t tail = ldata->read_tail;
++
++		room = N_TTY_BUF_SIZE - (ldata->read_head - tail);
++		if (I_PARMRK(tty))
++			room = (room + 2) / 3;
++		room--;
++		if (room <= 0) {
++			overflow = ldata->icanon && ldata->canon_head == tail;
++			if (overflow && room < 0)
++				ldata->read_head--;
++			room = overflow;
++			ldata->no_room = flow && !room;
++		} else
++			overflow = 0;
++
+ 		n = min(count, room);
+-		if (!n) {
+-			if (flow && !room)
+-				ldata->no_room = 1;
++		if (!n)
+ 			break;
+-		}
+-		__receive_buf(tty, cp, fp, n);
++
++		/* ignore parity errors if handling overflow */
++		if (!overflow || !fp || *fp != TTY_PARITY)
++			__receive_buf(tty, cp, fp, n);
++
+ 		cp += n;
+ 		if (fp)
+ 			fp += n;
+@@ -1710,7 +1752,17 @@ n_tty_receive_buf_common(struct tty_struct *tty, const unsigned char *cp,
+ 	}
+ 
+ 	tty->receive_room = room;
+-	n_tty_check_throttle(tty);
++
++	/* Unthrottle if handling overflow on pty */
++	if (tty->driver->type == TTY_DRIVER_TYPE_PTY) {
++		if (overflow) {
++			tty_set_flow_change(tty, TTY_UNTHROTTLE_SAFE);
++			tty_unthrottle_safe(tty);
++			__tty_set_flow_change(tty, 0);
++		}
++	} else
++		n_tty_check_throttle(tty);
++
+ 	up_read(&tty->termios_rwsem);
+ 
+ 	return rcvd;
+diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
+index e95c4971327b..0f63b93d8fc6 100644
+--- a/drivers/tty/serial/fsl_lpuart.c
++++ b/drivers/tty/serial/fsl_lpuart.c
+@@ -910,6 +910,9 @@ static void lpuart_setup_watermark(struct lpuart_port *sport)
+ 	writeb(val | UARTPFIFO_TXFE | UARTPFIFO_RXFE,
+ 			sport->port.membase + UARTPFIFO);
+ 
++	/* explicitly clear RDRF */
++	readb(sport->port.membase + UARTSR1);
++
+ 	/* flush Tx and Rx FIFO */
+ 	writeb(UARTCFIFO_TXFLUSH | UARTCFIFO_RXFLUSH,
+ 			sport->port.membase + UARTCFIFO);
+@@ -1095,6 +1098,8 @@ static int lpuart_startup(struct uart_port *port)
+ 	sport->txfifo_size = 0x1 << (((temp >> UARTPFIFO_TXSIZE_OFF) &
+ 		UARTPFIFO_FIFOSIZE_MASK) + 1);
+ 
++	sport->port.fifosize = sport->txfifo_size;
++
+ 	sport->rxfifo_size = 0x1 << (((temp >> UARTPFIFO_RXSIZE_OFF) &
+ 		UARTPFIFO_FIFOSIZE_MASK) + 1);
+ 
+diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
+index a7865c4b0498..0827d7c96527 100644
+--- a/drivers/usb/host/xhci-hub.c
++++ b/drivers/usb/host/xhci-hub.c
+@@ -387,6 +387,10 @@ static void xhci_clear_port_change_bit(struct xhci_hcd *xhci, u16 wValue,
+ 		status = PORT_PLC;
+ 		port_change_bit = "link state";
+ 		break;
++	case USB_PORT_FEAT_C_PORT_CONFIG_ERROR:
++		status = PORT_CEC;
++		port_change_bit = "config error";
++		break;
+ 	default:
+ 		/* Should never happen */
+ 		return;
+@@ -588,6 +592,8 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd,
+ 			status |= USB_PORT_STAT_C_LINK_STATE << 16;
+ 		if ((raw_port_status & PORT_WRC))
+ 			status |= USB_PORT_STAT_C_BH_RESET << 16;
++		if ((raw_port_status & PORT_CEC))
++			status |= USB_PORT_STAT_C_CONFIG_ERROR << 16;
+ 	}
+ 
+ 	if (hcd->speed != HCD_USB3) {
+@@ -1005,6 +1011,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
+ 		case USB_PORT_FEAT_C_OVER_CURRENT:
+ 		case USB_PORT_FEAT_C_ENABLE:
+ 		case USB_PORT_FEAT_C_PORT_LINK_STATE:
++		case USB_PORT_FEAT_C_PORT_CONFIG_ERROR:
+ 			xhci_clear_port_change_bit(xhci, wValue, wIndex,
+ 					port_array[wIndex], temp);
+ 			break;
+@@ -1069,7 +1076,7 @@ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf)
+ 	 */
+ 	status = bus_state->resuming_ports;
+ 
+-	mask = PORT_CSC | PORT_PEC | PORT_OCC | PORT_PLC | PORT_WRC;
++	mask = PORT_CSC | PORT_PEC | PORT_OCC | PORT_PLC | PORT_WRC | PORT_CEC;
+ 
+ 	spin_lock_irqsave(&xhci->lock, flags);
+ 	/* For each port, did anything change?  If so, set that bit in buf. */
+diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
+index fd53c9ebd662..2af32e26fafc 100644
+--- a/drivers/usb/host/xhci-pci.c
++++ b/drivers/usb/host/xhci-pci.c
+@@ -115,6 +115,7 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
+ 	if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
+ 		xhci->quirks |= XHCI_LPM_SUPPORT;
+ 		xhci->quirks |= XHCI_INTEL_HOST;
++		xhci->quirks |= XHCI_AVOID_BEI;
+ 	}
+ 	if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
+ 			pdev->device == PCI_DEVICE_ID_INTEL_PANTHERPOINT_XHCI) {
+@@ -130,7 +131,6 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
+ 		 * PPT chipsets.
+ 		 */
+ 		xhci->quirks |= XHCI_SPURIOUS_REBOOT;
+-		xhci->quirks |= XHCI_AVOID_BEI;
+ 	}
+ 	if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
+ 		pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI) {
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index 3086dec0ef53..8eb68a31cab6 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -604,6 +604,7 @@ static const struct usb_device_id id_table_combined[] = {
+ 		.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+ 	{ USB_DEVICE(FTDI_VID, FTDI_NT_ORIONLXM_PID),
+ 		.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
++	{ USB_DEVICE(FTDI_VID, FTDI_SYNAPSE_SS200_PID) },
+ 	/*
+ 	 * ELV devices:
+ 	 */
+@@ -1883,8 +1884,12 @@ static int ftdi_8u2232c_probe(struct usb_serial *serial)
+ {
+ 	struct usb_device *udev = serial->dev;
+ 
+-	if ((udev->manufacturer && !strcmp(udev->manufacturer, "CALAO Systems")) ||
+-	    (udev->product && !strcmp(udev->product, "BeagleBone/XDS100V2")))
++	if (udev->manufacturer && !strcmp(udev->manufacturer, "CALAO Systems"))
++		return ftdi_jtag_probe(serial);
++
++	if (udev->product &&
++		(!strcmp(udev->product, "BeagleBone/XDS100V2") ||
++		 !strcmp(udev->product, "SNAP Connect E10")))
+ 		return ftdi_jtag_probe(serial);
+ 
+ 	return 0;
+diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
+index 56b1b55c4751..4e4f46f3c89c 100644
+--- a/drivers/usb/serial/ftdi_sio_ids.h
++++ b/drivers/usb/serial/ftdi_sio_ids.h
+@@ -561,6 +561,12 @@
+  */
+ #define FTDI_NT_ORIONLXM_PID	0x7c90	/* OrionLXm Substation Automation Platform */
+ 
++/*
++ * Synapse Wireless product ids (FTDI_VID)
++ * http://www.synapse-wireless.com
++ */
++#define FTDI_SYNAPSE_SS200_PID 0x9090 /* SS200 - SNAP Stick 200 */
++
+ 
+ /********************************/
+ /** third-party VID/PID combos **/
+diff --git a/drivers/usb/serial/keyspan_pda.c b/drivers/usb/serial/keyspan_pda.c
+index dd97d8b572c3..4f7e072e4e00 100644
+--- a/drivers/usb/serial/keyspan_pda.c
++++ b/drivers/usb/serial/keyspan_pda.c
+@@ -61,6 +61,7 @@ struct keyspan_pda_private {
+ /* For Xircom PGSDB9 and older Entrega version of the same device */
+ #define XIRCOM_VENDOR_ID		0x085a
+ #define XIRCOM_FAKE_ID			0x8027
++#define XIRCOM_FAKE_ID_2		0x8025 /* "PGMFHUB" serial */
+ #define ENTREGA_VENDOR_ID		0x1645
+ #define ENTREGA_FAKE_ID			0x8093
+ 
+@@ -70,6 +71,7 @@ static const struct usb_device_id id_table_combined[] = {
+ #endif
+ #ifdef XIRCOM
+ 	{ USB_DEVICE(XIRCOM_VENDOR_ID, XIRCOM_FAKE_ID) },
++	{ USB_DEVICE(XIRCOM_VENDOR_ID, XIRCOM_FAKE_ID_2) },
+ 	{ USB_DEVICE(ENTREGA_VENDOR_ID, ENTREGA_FAKE_ID) },
+ #endif
+ 	{ USB_DEVICE(KEYSPAN_VENDOR_ID, KEYSPAN_PDA_ID) },
+@@ -93,6 +95,7 @@ static const struct usb_device_id id_table_fake[] = {
+ #ifdef XIRCOM
+ static const struct usb_device_id id_table_fake_xircom[] = {
+ 	{ USB_DEVICE(XIRCOM_VENDOR_ID, XIRCOM_FAKE_ID) },
++	{ USB_DEVICE(XIRCOM_VENDOR_ID, XIRCOM_FAKE_ID_2) },
+ 	{ USB_DEVICE(ENTREGA_VENDOR_ID, ENTREGA_FAKE_ID) },
+ 	{ }
+ };
+diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
+index b812462083fc..94d96809e686 100644
+--- a/drivers/xen/Kconfig
++++ b/drivers/xen/Kconfig
+@@ -55,6 +55,23 @@ config XEN_BALLOON_MEMORY_HOTPLUG
+ 
+ 	  In that case step 3 should be omitted.
+ 
++config XEN_BALLOON_MEMORY_HOTPLUG_LIMIT
++	int "Hotplugged memory limit (in GiB) for a PV guest"
++	default 512 if X86_64
++	default 4 if X86_32
++	range 0 64 if X86_32
++	depends on XEN_HAVE_PVMMU
++	depends on XEN_BALLOON_MEMORY_HOTPLUG
++	help
++	  Maxmium amount of memory (in GiB) that a PV guest can be
++	  expanded to when using memory hotplug.
++
++	  A PV guest can have more memory than this limit if is
++	  started with a larger maximum.
++
++	  This value is used to allocate enough space in internal
++	  tables needed for physical memory administration.
++
+ config XEN_SCRUB_PAGES
+ 	bool "Scrub pages before returning them to system"
+ 	depends on XEN_BALLOON
+diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
+index 3860d02729dc..a325814341b7 100644
+--- a/drivers/xen/balloon.c
++++ b/drivers/xen/balloon.c
+@@ -230,6 +230,29 @@ static enum bp_state reserve_additional_memory(long credit)
+ 	balloon_hotplug = round_up(balloon_hotplug, PAGES_PER_SECTION);
+ 	nid = memory_add_physaddr_to_nid(hotplug_start_paddr);
+ 
++#ifdef CONFIG_XEN_HAVE_PVMMU
++        /*
++         * add_memory() will build page tables for the new memory so
++         * the p2m must contain invalid entries so the correct
++         * non-present PTEs will be written.
++         *
++         * If a failure occurs, the original (identity) p2m entries
++         * are not restored since this region is now known not to
++         * conflict with any devices.
++         */
++	if (!xen_feature(XENFEAT_auto_translated_physmap)) {
++		unsigned long pfn, i;
++
++		pfn = PFN_DOWN(hotplug_start_paddr);
++		for (i = 0; i < balloon_hotplug; i++) {
++			if (!set_phys_to_machine(pfn + i, INVALID_P2M_ENTRY)) {
++				pr_warn("set_phys_to_machine() failed, no memory added\n");
++				return BP_ECANCELED;
++			}
++                }
++	}
++#endif
++
+ 	rc = add_memory(nid, hotplug_start_paddr, balloon_hotplug << PAGE_SHIFT);
+ 
+ 	if (rc) {
+diff --git a/fs/aio.c b/fs/aio.c
+index c428871f1093..ebd0e9bf5323 100644
+--- a/fs/aio.c
++++ b/fs/aio.c
+@@ -740,6 +740,9 @@ static struct kioctx *ioctx_alloc(unsigned nr_events)
+ err_cleanup:
+ 	aio_nr_sub(ctx->max_reqs);
+ err_ctx:
++	atomic_set(&ctx->dead, 1);
++	if (ctx->mmap_size)
++		vm_munmap(ctx->mmap_base, ctx->mmap_size);
+ 	aio_free_ring(ctx);
+ err:
+ 	mutex_unlock(&ctx->ring_lock);
+diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
+index 069ab24badaa..ef64903d6cb3 100644
+--- a/fs/btrfs/tree-log.c
++++ b/fs/btrfs/tree-log.c
+@@ -1266,21 +1266,13 @@ out:
+ }
+ 
+ static int insert_orphan_item(struct btrfs_trans_handle *trans,
+-			      struct btrfs_root *root, u64 offset)
++			      struct btrfs_root *root, u64 ino)
+ {
+ 	int ret;
+-	struct btrfs_path *path;
+-
+-	path = btrfs_alloc_path();
+-	if (!path)
+-		return -ENOMEM;
+ 
+-	ret = btrfs_find_item(root, path, BTRFS_ORPHAN_OBJECTID,
+-			offset, BTRFS_ORPHAN_ITEM_KEY, NULL);
+-	if (ret > 0)
+-		ret = btrfs_insert_orphan_item(trans, root, offset);
+-
+-	btrfs_free_path(path);
++	ret = btrfs_insert_orphan_item(trans, root, ino);
++	if (ret == -EEXIST)
++		ret = 0;
+ 
+ 	return ret;
+ }
+diff --git a/fs/cifs/file.c b/fs/cifs/file.c
+index 74f12877493a..3e30d920f85a 100644
+--- a/fs/cifs/file.c
++++ b/fs/cifs/file.c
+@@ -1829,6 +1829,7 @@ refind_writable:
+ 			cifsFileInfo_put(inv_file);
+ 			spin_lock(&cifs_file_list_lock);
+ 			++refind;
++			inv_file = NULL;
+ 			goto refind_writable;
+ 		}
+ 	}
+diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
+index 96b5d40a2ece..eab05e1aa587 100644
+--- a/fs/cifs/smb2ops.c
++++ b/fs/cifs/smb2ops.c
+@@ -684,7 +684,8 @@ smb2_clone_range(const unsigned int xid,
+ 
+ 			/* No need to change MaxChunks since already set to 1 */
+ 			chunk_sizes_updated = true;
+-		}
++		} else
++			goto cchunk_out;
+ 	}
+ 
+ cchunk_out:
+diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c
+index 36b369697a13..5e7af1c69577 100644
+--- a/fs/ext4/indirect.c
++++ b/fs/ext4/indirect.c
+@@ -1393,10 +1393,7 @@ end_range:
+ 				 * to free. Everything was covered by the start
+ 				 * of the range.
+ 				 */
+-				return 0;
+-			} else {
+-				/* Shared branch grows from an indirect block */
+-				partial2--;
++				goto do_indirects;
+ 			}
+ 		} else {
+ 			/*
+@@ -1427,56 +1424,96 @@ end_range:
+ 	/* Punch happened within the same level (n == n2) */
+ 	partial = ext4_find_shared(inode, n, offsets, chain, &nr);
+ 	partial2 = ext4_find_shared(inode, n2, offsets2, chain2, &nr2);
+-	/*
+-	 * ext4_find_shared returns Indirect structure which
+-	 * points to the last element which should not be
+-	 * removed by truncate. But this is end of the range
+-	 * in punch_hole so we need to point to the next element
+-	 */
+-	partial2->p++;
+-	while ((partial > chain) || (partial2 > chain2)) {
+-		/* We're at the same block, so we're almost finished */
+-		if ((partial->bh && partial2->bh) &&
+-		    (partial->bh->b_blocknr == partial2->bh->b_blocknr)) {
+-			if ((partial > chain) && (partial2 > chain2)) {
++
++	/* Free top, but only if partial2 isn't its subtree. */
++	if (nr) {
++		int level = min(partial - chain, partial2 - chain2);
++		int i;
++		int subtree = 1;
++
++		for (i = 0; i <= level; i++) {
++			if (offsets[i] != offsets2[i]) {
++				subtree = 0;
++				break;
++			}
++		}
++
++		if (!subtree) {
++			if (partial == chain) {
++				/* Shared branch grows from the inode */
++				ext4_free_branches(handle, inode, NULL,
++						   &nr, &nr+1,
++						   (chain+n-1) - partial);
++				*partial->p = 0;
++			} else {
++				/* Shared branch grows from an indirect block */
++				BUFFER_TRACE(partial->bh, "get_write_access");
+ 				ext4_free_branches(handle, inode, partial->bh,
+-						   partial->p + 1,
+-						   partial2->p,
++						   partial->p,
++						   partial->p+1,
+ 						   (chain+n-1) - partial);
+-				BUFFER_TRACE(partial->bh, "call brelse");
+-				brelse(partial->bh);
+-				BUFFER_TRACE(partial2->bh, "call brelse");
+-				brelse(partial2->bh);
+ 			}
+-			return 0;
+ 		}
++	}
++
++	if (!nr2) {
+ 		/*
+-		 * Clear the ends of indirect blocks on the shared branch
+-		 * at the start of the range
++		 * ext4_find_shared returns Indirect structure which
++		 * points to the last element which should not be
++		 * removed by truncate. But this is end of the range
++		 * in punch_hole so we need to point to the next element
+ 		 */
+-		if (partial > chain) {
++		partial2->p++;
++	}
++
++	while (partial > chain || partial2 > chain2) {
++		int depth = (chain+n-1) - partial;
++		int depth2 = (chain2+n2-1) - partial2;
++
++		if (partial > chain && partial2 > chain2 &&
++		    partial->bh->b_blocknr == partial2->bh->b_blocknr) {
++			/*
++			 * We've converged on the same block. Clear the range,
++			 * then we're done.
++			 */
+ 			ext4_free_branches(handle, inode, partial->bh,
+-				   partial->p + 1,
+-				   (__le32 *)partial->bh->b_data+addr_per_block,
+-				   (chain+n-1) - partial);
++					   partial->p + 1,
++					   partial2->p,
++					   (chain+n-1) - partial);
+ 			BUFFER_TRACE(partial->bh, "call brelse");
+ 			brelse(partial->bh);
+-			partial--;
++			BUFFER_TRACE(partial2->bh, "call brelse");
++			brelse(partial2->bh);
++			return 0;
+ 		}
++
+ 		/*
+-		 * Clear the ends of indirect blocks on the shared branch
+-		 * at the end of the range
++		 * The start and end partial branches may not be at the same
++		 * level even though the punch happened within one level. So, we
++		 * give them a chance to arrive at the same level, then walk
++		 * them in step with each other until we converge on the same
++		 * block.
+ 		 */
+-		if (partial2 > chain2) {
++		if (partial > chain && depth <= depth2) {
++			ext4_free_branches(handle, inode, partial->bh,
++					   partial->p + 1,
++					   (__le32 *)partial->bh->b_data+addr_per_block,
++					   (chain+n-1) - partial);
++			BUFFER_TRACE(partial->bh, "call brelse");
++			brelse(partial->bh);
++			partial--;
++		}
++		if (partial2 > chain2 && depth2 <= depth) {
+ 			ext4_free_branches(handle, inode, partial2->bh,
+ 					   (__le32 *)partial2->bh->b_data,
+ 					   partial2->p,
+-					   (chain2+n-1) - partial2);
++					   (chain2+n2-1) - partial2);
+ 			BUFFER_TRACE(partial2->bh, "call brelse");
+ 			brelse(partial2->bh);
+ 			partial2--;
+ 		}
+ 	}
++	return 0;
+ 
+ do_indirects:
+ 	/* Kill the remaining (whole) subtrees */
+diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
+index 1685b82a9ccd..7cfb905a1e90 100644
+--- a/fs/nfsd/nfs4state.c
++++ b/fs/nfsd/nfs4state.c
+@@ -3228,7 +3228,7 @@ alloc_init_open_stateowner(unsigned int strhashval, struct nfsd4_open *open,
+ 	} else
+ 		nfs4_free_openowner(&oo->oo_owner);
+ 	spin_unlock(&clp->cl_lock);
+-	return oo;
++	return ret;
+ }
+ 
+ static void init_open_stateid(struct nfs4_ol_stateid *stp, struct nfs4_file *fp, struct nfsd4_open *open) {
+@@ -5065,7 +5065,7 @@ alloc_init_lock_stateowner(unsigned int strhashval, struct nfs4_client *clp,
+ 	} else
+ 		nfs4_free_lockowner(&lo->lo_owner);
+ 	spin_unlock(&clp->cl_lock);
+-	return lo;
++	return ret;
+ }
+ 
+ static void
+diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
+index 3950693dd0f6..7e5aa3908a84 100644
+--- a/fs/ocfs2/file.c
++++ b/fs/ocfs2/file.c
+@@ -2374,10 +2374,14 @@ out_dio:
+ 	/* buffered aio wouldn't have proper lock coverage today */
+ 	BUG_ON(ret == -EIOCBQUEUED && !(file->f_flags & O_DIRECT));
+ 
++	if (unlikely(written <= 0))
++		goto no_sync;
++
+ 	if (((file->f_flags & O_DSYNC) && !direct_io) || IS_SYNC(inode) ||
+ 	    ((file->f_flags & O_DIRECT) && !direct_io)) {
+-		ret = filemap_fdatawrite_range(file->f_mapping, *ppos,
+-					       *ppos + count - 1);
++		ret = filemap_fdatawrite_range(file->f_mapping,
++					       iocb->ki_pos - written,
++					       iocb->ki_pos - 1);
+ 		if (ret < 0)
+ 			written = ret;
+ 
+@@ -2388,10 +2392,12 @@ out_dio:
+ 		}
+ 
+ 		if (!ret)
+-			ret = filemap_fdatawait_range(file->f_mapping, *ppos,
+-						      *ppos + count - 1);
++			ret = filemap_fdatawait_range(file->f_mapping,
++						      iocb->ki_pos - written,
++						      iocb->ki_pos - 1);
+ 	}
+ 
++no_sync:
+ 	/*
+ 	 * deep in g_f_a_w_n()->ocfs2_direct_IO we pass in a ocfs2_dio_end_io
+ 	 * function pointer which is called when o_direct io completes so that
+diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
+index 13e974e6a889..3dd03af5310d 100644
+--- a/fs/xfs/xfs_file.c
++++ b/fs/xfs/xfs_file.c
+@@ -360,7 +360,8 @@ STATIC int				/* error (positive) */
+ xfs_zero_last_block(
+ 	struct xfs_inode	*ip,
+ 	xfs_fsize_t		offset,
+-	xfs_fsize_t		isize)
++	xfs_fsize_t		isize,
++	bool			*did_zeroing)
+ {
+ 	struct xfs_mount	*mp = ip->i_mount;
+ 	xfs_fileoff_t		last_fsb = XFS_B_TO_FSBT(mp, isize);
+@@ -388,6 +389,7 @@ xfs_zero_last_block(
+ 	zero_len = mp->m_sb.sb_blocksize - zero_offset;
+ 	if (isize + zero_len > offset)
+ 		zero_len = offset - isize;
++	*did_zeroing = true;
+ 	return xfs_iozero(ip, isize, zero_len);
+ }
+ 
+@@ -406,7 +408,8 @@ int					/* error (positive) */
+ xfs_zero_eof(
+ 	struct xfs_inode	*ip,
+ 	xfs_off_t		offset,		/* starting I/O offset */
+-	xfs_fsize_t		isize)		/* current inode size */
++	xfs_fsize_t		isize,		/* current inode size */
++	bool			*did_zeroing)
+ {
+ 	struct xfs_mount	*mp = ip->i_mount;
+ 	xfs_fileoff_t		start_zero_fsb;
+@@ -428,7 +431,7 @@ xfs_zero_eof(
+ 	 * We only zero a part of that block so it is handled specially.
+ 	 */
+ 	if (XFS_B_FSB_OFFSET(mp, isize) != 0) {
+-		error = xfs_zero_last_block(ip, offset, isize);
++		error = xfs_zero_last_block(ip, offset, isize, did_zeroing);
+ 		if (error)
+ 			return error;
+ 	}
+@@ -488,6 +491,7 @@ xfs_zero_eof(
+ 		if (error)
+ 			return error;
+ 
++		*did_zeroing = true;
+ 		start_zero_fsb = imap.br_startoff + imap.br_blockcount;
+ 		ASSERT(start_zero_fsb <= (end_zero_fsb + 1));
+ 	}
+@@ -526,13 +530,15 @@ restart:
+ 	 * having to redo all checks before.
+ 	 */
+ 	if (*pos > i_size_read(inode)) {
++		bool	zero = false;
++
+ 		if (*iolock == XFS_IOLOCK_SHARED) {
+ 			xfs_rw_iunlock(ip, *iolock);
+ 			*iolock = XFS_IOLOCK_EXCL;
+ 			xfs_rw_ilock(ip, *iolock);
+ 			goto restart;
+ 		}
+-		error = xfs_zero_eof(ip, *pos, i_size_read(inode));
++		error = xfs_zero_eof(ip, *pos, i_size_read(inode), &zero);
+ 		if (error)
+ 			return error;
+ 	}
+diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
+index 4ed2ba9342dc..9176c4e728e1 100644
+--- a/fs/xfs/xfs_inode.h
++++ b/fs/xfs/xfs_inode.h
+@@ -377,8 +377,9 @@ int		xfs_droplink(struct xfs_trans *, struct xfs_inode *);
+ int		xfs_bumplink(struct xfs_trans *, struct xfs_inode *);
+ 
+ /* from xfs_file.c */
+-int		xfs_zero_eof(struct xfs_inode *, xfs_off_t, xfs_fsize_t);
+-int		xfs_iozero(struct xfs_inode *, loff_t, size_t);
++int	xfs_zero_eof(struct xfs_inode *ip, xfs_off_t offset,
++		     xfs_fsize_t isize, bool *did_zeroing);
++int	xfs_iozero(struct xfs_inode *ip, loff_t pos, size_t count);
+ 
+ 
+ #define IHOLD(ip) \
+diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
+index c50311cae1b1..17d057c8eb43 100644
+--- a/fs/xfs/xfs_iops.c
++++ b/fs/xfs/xfs_iops.c
+@@ -741,6 +741,7 @@ xfs_setattr_size(
+ 	int			error;
+ 	uint			lock_flags = 0;
+ 	uint			commit_flags = 0;
++	bool			did_zeroing = false;
+ 
+ 	trace_xfs_setattr(ip);
+ 
+@@ -784,20 +785,16 @@ xfs_setattr_size(
+ 		return error;
+ 
+ 	/*
+-	 * Now we can make the changes.  Before we join the inode to the
+-	 * transaction, take care of the part of the truncation that must be
+-	 * done without the inode lock.  This needs to be done before joining
+-	 * the inode to the transaction, because the inode cannot be unlocked
+-	 * once it is a part of the transaction.
++	 * File data changes must be complete before we start the transaction to
++	 * modify the inode.  This needs to be done before joining the inode to
++	 * the transaction because the inode cannot be unlocked once it is a
++	 * part of the transaction.
++	 *
++	 * Start with zeroing any data block beyond EOF that we may expose on
++	 * file extension.
+ 	 */
+ 	if (newsize > oldsize) {
+-		/*
+-		 * Do the first part of growing a file: zero any data in the
+-		 * last block that is beyond the old EOF.  We need to do this
+-		 * before the inode is joined to the transaction to modify
+-		 * i_size.
+-		 */
+-		error = xfs_zero_eof(ip, newsize, oldsize);
++		error = xfs_zero_eof(ip, newsize, oldsize, &did_zeroing);
+ 		if (error)
+ 			return error;
+ 	}
+@@ -807,23 +804,18 @@ xfs_setattr_size(
+ 	 * any previous writes that are beyond the on disk EOF and the new
+ 	 * EOF that have not been written out need to be written here.  If we
+ 	 * do not write the data out, we expose ourselves to the null files
+-	 * problem.
+-	 *
+-	 * Only flush from the on disk size to the smaller of the in memory
+-	 * file size or the new size as that's the range we really care about
+-	 * here and prevents waiting for other data not within the range we
+-	 * care about here.
++	 * problem. Note that this includes any block zeroing we did above;
++	 * otherwise those blocks may not be zeroed after a crash.
+ 	 */
+-	if (oldsize != ip->i_d.di_size && newsize > ip->i_d.di_size) {
++	if (newsize > ip->i_d.di_size &&
++	    (oldsize != ip->i_d.di_size || did_zeroing)) {
+ 		error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
+ 						      ip->i_d.di_size, newsize);
+ 		if (error)
+ 			return error;
+ 	}
+ 
+-	/*
+-	 * Wait for all direct I/O to complete.
+-	 */
++	/* Now wait for all direct I/O to complete. */
+ 	inode_dio_wait(inode);
+ 
+ 	/*
+diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
+index c294e3e25e37..a1b25e35ea5f 100644
+--- a/include/linux/blk_types.h
++++ b/include/linux/blk_types.h
+@@ -181,7 +181,9 @@ enum rq_flag_bits {
+ 	__REQ_ELVPRIV,		/* elevator private data attached */
+ 	__REQ_FAILED,		/* set if the request failed */
+ 	__REQ_QUIET,		/* don't worry about errors */
+-	__REQ_PREEMPT,		/* set for "ide_preempt" requests */
++	__REQ_PREEMPT,		/* set for "ide_preempt" requests and also
++				   for requests for which the SCSI "quiesce"
++				   state must be ignored. */
+ 	__REQ_ALLOCED,		/* request came from our alloc pool */
+ 	__REQ_COPY_USER,	/* contains copies of user pages */
+ 	__REQ_FLUSH_SEQ,	/* request for flush sequence */
+diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
+index ab70f3bc44ad..948df62ee5b7 100644
+--- a/include/linux/cpuidle.h
++++ b/include/linux/cpuidle.h
+@@ -68,7 +68,6 @@ struct cpuidle_device {
+ 	unsigned int		cpu;
+ 
+ 	int			last_residency;
+-	int			state_count;
+ 	struct cpuidle_state_usage	states_usage[CPUIDLE_STATE_MAX];
+ 	struct cpuidle_state_kobj *kobjs[CPUIDLE_STATE_MAX];
+ 	struct cpuidle_driver_kobj *kobj_driver;
+diff --git a/include/linux/lcm.h b/include/linux/lcm.h
+index 7bf01d779b45..1ce79a7f1daa 100644
+--- a/include/linux/lcm.h
++++ b/include/linux/lcm.h
+@@ -4,5 +4,6 @@
+ #include <linux/compiler.h>
+ 
+ unsigned long lcm(unsigned long a, unsigned long b) __attribute_const__;
++unsigned long lcm_not_zero(unsigned long a, unsigned long b) __attribute_const__;
+ 
+ #endif /* _LCM_H */
+diff --git a/include/linux/sunrpc/debug.h b/include/linux/sunrpc/debug.h
+index c57d8ea0716c..59a7889e15db 100644
+--- a/include/linux/sunrpc/debug.h
++++ b/include/linux/sunrpc/debug.h
+@@ -60,17 +60,17 @@ struct rpc_xprt;
+ #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
+ void		rpc_register_sysctl(void);
+ void		rpc_unregister_sysctl(void);
+-int		sunrpc_debugfs_init(void);
++void		sunrpc_debugfs_init(void);
+ void		sunrpc_debugfs_exit(void);
+-int		rpc_clnt_debugfs_register(struct rpc_clnt *);
++void		rpc_clnt_debugfs_register(struct rpc_clnt *);
+ void		rpc_clnt_debugfs_unregister(struct rpc_clnt *);
+-int		rpc_xprt_debugfs_register(struct rpc_xprt *);
++void		rpc_xprt_debugfs_register(struct rpc_xprt *);
+ void		rpc_xprt_debugfs_unregister(struct rpc_xprt *);
+ #else
+-static inline int
++static inline void
+ sunrpc_debugfs_init(void)
+ {
+-	return 0;
++	return;
+ }
+ 
+ static inline void
+@@ -79,10 +79,10 @@ sunrpc_debugfs_exit(void)
+ 	return;
+ }
+ 
+-static inline int
++static inline void
+ rpc_clnt_debugfs_register(struct rpc_clnt *clnt)
+ {
+-	return 0;
++	return;
+ }
+ 
+ static inline void
+@@ -91,10 +91,10 @@ rpc_clnt_debugfs_unregister(struct rpc_clnt *clnt)
+ 	return;
+ }
+ 
+-static inline int
++static inline void
+ rpc_xprt_debugfs_register(struct rpc_xprt *xprt)
+ {
+-	return 0;
++	return;
+ }
+ 
+ static inline void
+diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
+index 0c40c16174b4..a314afdbeb5c 100644
+--- a/kernel/power/snapshot.c
++++ b/kernel/power/snapshot.c
+@@ -955,25 +955,6 @@ static void mark_nosave_pages(struct memory_bitmap *bm)
+ 	}
+ }
+ 
+-static bool is_nosave_page(unsigned long pfn)
+-{
+-	struct nosave_region *region;
+-
+-	list_for_each_entry(region, &nosave_regions, list) {
+-		if (pfn >= region->start_pfn && pfn < region->end_pfn) {
+-			pr_err("PM: %#010llx in e820 nosave region: "
+-			       "[mem %#010llx-%#010llx]\n",
+-			       (unsigned long long) pfn << PAGE_SHIFT,
+-			       (unsigned long long) region->start_pfn << PAGE_SHIFT,
+-			       ((unsigned long long) region->end_pfn << PAGE_SHIFT)
+-					- 1);
+-			return true;
+-		}
+-	}
+-
+-	return false;
+-}
+-
+ /**
+  *	create_basic_memory_bitmaps - create bitmaps needed for marking page
+  *	frames that should not be saved and free page frames.  The pointers
+@@ -2039,7 +2020,7 @@ static int mark_unsafe_pages(struct memory_bitmap *bm)
+ 	do {
+ 		pfn = memory_bm_next_pfn(bm);
+ 		if (likely(pfn != BM_END_OF_MAP)) {
+-			if (likely(pfn_valid(pfn)) && !is_nosave_page(pfn))
++			if (likely(pfn_valid(pfn)))
+ 				swsusp_set_page_free(pfn_to_page(pfn));
+ 			else
+ 				return -EFAULT;
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index 44dfc8b46bd0..d400c827a94e 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -3079,6 +3079,8 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
+ 	} else {
+ 		if (dl_prio(oldprio))
+ 			p->dl.dl_boosted = 0;
++		if (rt_prio(oldprio))
++			p->rt.timeout = 0;
+ 		p->sched_class = &fair_sched_class;
+ 	}
+ 
+diff --git a/kernel/time/tick-broadcast-hrtimer.c b/kernel/time/tick-broadcast-hrtimer.c
+index eb682d5c697c..6aac4beedbbe 100644
+--- a/kernel/time/tick-broadcast-hrtimer.c
++++ b/kernel/time/tick-broadcast-hrtimer.c
+@@ -49,6 +49,7 @@ static void bc_set_mode(enum clock_event_mode mode,
+  */
+ static int bc_set_next(ktime_t expires, struct clock_event_device *bc)
+ {
++	int bc_moved;
+ 	/*
+ 	 * We try to cancel the timer first. If the callback is on
+ 	 * flight on some other cpu then we let it handle it. If we
+@@ -60,9 +61,15 @@ static int bc_set_next(ktime_t expires, struct clock_event_device *bc)
+ 	 * restart the timer because we are in the callback, but we
+ 	 * can set the expiry time and let the callback return
+ 	 * HRTIMER_RESTART.
++	 *
++	 * Since we are in the idle loop at this point and because
++	 * hrtimer_{start/cancel} functions call into tracing,
++	 * calls to these functions must be bound within RCU_NONIDLE.
+ 	 */
+-	if (hrtimer_try_to_cancel(&bctimer) >= 0) {
+-		hrtimer_start(&bctimer, expires, HRTIMER_MODE_ABS_PINNED);
++	RCU_NONIDLE(bc_moved = (hrtimer_try_to_cancel(&bctimer) >= 0) ?
++		!hrtimer_start(&bctimer, expires, HRTIMER_MODE_ABS_PINNED) :
++			0);
++	if (bc_moved) {
+ 		/* Bind the "device" to the cpu */
+ 		bc->bound_on = smp_processor_id();
+ 	} else if (bc->bound_on == smp_processor_id()) {
+diff --git a/lib/lcm.c b/lib/lcm.c
+index 51cc6b13cd52..399643c20880 100644
+--- a/lib/lcm.c
++++ b/lib/lcm.c
+@@ -12,3 +12,14 @@ unsigned long lcm(unsigned long a, unsigned long b)
+ 		return 0;
+ }
+ EXPORT_SYMBOL_GPL(lcm);
++
++unsigned long lcm_not_zero(unsigned long a, unsigned long b)
++{
++	unsigned long l = lcm(a, b);
++
++	if (l)
++		return l;
++
++	return (b ? : a);
++}
++EXPORT_SYMBOL_GPL(lcm_not_zero);
+diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
+index 9fab10795bea..65842d688b7c 100644
+--- a/mm/memory_hotplug.c
++++ b/mm/memory_hotplug.c
+@@ -1092,6 +1092,10 @@ static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start)
+ 			return NULL;
+ 
+ 		arch_refresh_nodedata(nid, pgdat);
++	} else {
++		/* Reset the nr_zones and classzone_idx to 0 before reuse */
++		pgdat->nr_zones = 0;
++		pgdat->classzone_idx = 0;
+ 	}
+ 
+ 	/* we can use NODE_DATA(nid) from here */
+@@ -1977,15 +1981,6 @@ void try_offline_node(int nid)
+ 		if (is_vmalloc_addr(zone->wait_table))
+ 			vfree(zone->wait_table);
+ 	}
+-
+-	/*
+-	 * Since there is no way to guarentee the address of pgdat/zone is not
+-	 * on stack of any kernel threads or used by other kernel objects
+-	 * without reference counting or other symchronizing method, do not
+-	 * reset node_data and free pgdat here. Just reset it to 0 and reuse
+-	 * the memory when the node is online again.
+-	 */
+-	memset(pgdat, 0, sizeof(*pgdat));
+ }
+ EXPORT_SYMBOL(try_offline_node);
+ 
+diff --git a/mm/mmap.c b/mm/mmap.c
+index e5cc3ca1d869..0bc66f1e9637 100644
+--- a/mm/mmap.c
++++ b/mm/mmap.c
+@@ -780,10 +780,8 @@ again:			remove_next = 1 + (end > next->vm_end);
+ 
+ 			importer->anon_vma = exporter->anon_vma;
+ 			error = anon_vma_clone(importer, exporter);
+-			if (error) {
+-				importer->anon_vma = NULL;
++			if (error)
+ 				return error;
+-			}
+ 		}
+ 	}
+ 
+diff --git a/mm/page-writeback.c b/mm/page-writeback.c
+index 6f4335238e33..f24d4c9217a7 100644
+--- a/mm/page-writeback.c
++++ b/mm/page-writeback.c
+@@ -857,8 +857,11 @@ static void bdi_update_write_bandwidth(struct backing_dev_info *bdi,
+ 	 *                   bw * elapsed + write_bandwidth * (period - elapsed)
+ 	 * write_bandwidth = ---------------------------------------------------
+ 	 *                                          period
++	 *
++	 * @written may have decreased due to account_page_redirty().
++	 * Avoid underflowing @bw calculation.
+ 	 */
+-	bw = written - bdi->written_stamp;
++	bw = written - min(written, bdi->written_stamp);
+ 	bw *= HZ;
+ 	if (unlikely(elapsed > period)) {
+ 		do_div(bw, elapsed);
+@@ -922,7 +925,7 @@ static void global_update_bandwidth(unsigned long thresh,
+ 				    unsigned long now)
+ {
+ 	static DEFINE_SPINLOCK(dirty_lock);
+-	static unsigned long update_time;
++	static unsigned long update_time = INITIAL_JIFFIES;
+ 
+ 	/*
+ 	 * check locklessly first to optimize away locking for the most time
+diff --git a/mm/page_isolation.c b/mm/page_isolation.c
+index 72f5ac381ab3..755a42c76eb4 100644
+--- a/mm/page_isolation.c
++++ b/mm/page_isolation.c
+@@ -103,6 +103,7 @@ void unset_migratetype_isolate(struct page *page, unsigned migratetype)
+ 
+ 			if (!is_migrate_isolate_page(buddy)) {
+ 				__isolate_free_page(page, order);
++				kernel_map_pages(page, (1 << order), 1);
+ 				set_page_refcounted(page);
+ 				isolated_page = page;
+ 			}
+diff --git a/mm/rmap.c b/mm/rmap.c
+index 71cd5bd0c17d..ecb444a2ad50 100644
+--- a/mm/rmap.c
++++ b/mm/rmap.c
+@@ -287,6 +287,13 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
+ 	return 0;
+ 
+  enomem_failure:
++	/*
++	 * dst->anon_vma is dropped here otherwise its degree can be incorrectly
++	 * decremented in unlink_anon_vmas().
++	 * We can safely do this because callers of anon_vma_clone() don't care
++	 * about dst->anon_vma if anon_vma_clone() failed.
++	 */
++	dst->anon_vma = NULL;
+ 	unlink_anon_vmas(dst);
+ 	return -ENOMEM;
+ }
+diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
+index 33a2f201e460..74d30ec7b234 100644
+--- a/net/ceph/messenger.c
++++ b/net/ceph/messenger.c
+@@ -484,7 +484,7 @@ static int ceph_tcp_connect(struct ceph_connection *con)
+ 			       IPPROTO_TCP, &sock);
+ 	if (ret)
+ 		return ret;
+-	sock->sk->sk_allocation = GFP_NOFS | __GFP_MEMALLOC;
++	sock->sk->sk_allocation = GFP_NOFS;
+ 
+ #ifdef CONFIG_LOCKDEP
+ 	lockdep_set_class(&sock->sk->sk_lock, &socket_class);
+@@ -510,8 +510,6 @@ static int ceph_tcp_connect(struct ceph_connection *con)
+ 		return ret;
+ 	}
+ 
+-	sk_set_memalloc(sock->sk);
+-
+ 	con->sock = sock;
+ 	return 0;
+ }
+@@ -2798,11 +2796,8 @@ static void con_work(struct work_struct *work)
+ {
+ 	struct ceph_connection *con = container_of(work, struct ceph_connection,
+ 						   work.work);
+-	unsigned long pflags = current->flags;
+ 	bool fault;
+ 
+-	current->flags |= PF_MEMALLOC;
+-
+ 	mutex_lock(&con->mutex);
+ 	while (true) {
+ 		int ret;
+@@ -2856,8 +2851,6 @@ static void con_work(struct work_struct *work)
+ 		con_fault_finish(con);
+ 
+ 	con->ops->put(con);
+-
+-	tsk_restore_flags(current, pflags, PF_MEMALLOC);
+ }
+ 
+ /*
+diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c
+index a48bad468880..7702978a4c99 100644
+--- a/net/mac80211/agg-rx.c
++++ b/net/mac80211/agg-rx.c
+@@ -49,8 +49,6 @@ static void ieee80211_free_tid_rx(struct rcu_head *h)
+ 		container_of(h, struct tid_ampdu_rx, rcu_head);
+ 	int i;
+ 
+-	del_timer_sync(&tid_rx->reorder_timer);
+-
+ 	for (i = 0; i < tid_rx->buf_size; i++)
+ 		__skb_queue_purge(&tid_rx->reorder_buf[i]);
+ 	kfree(tid_rx->reorder_buf);
+@@ -93,6 +91,12 @@ void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
+ 
+ 	del_timer_sync(&tid_rx->session_timer);
+ 
++	/* make sure ieee80211_sta_reorder_release() doesn't re-arm the timer */
++	spin_lock_bh(&tid_rx->reorder_lock);
++	tid_rx->removed = true;
++	spin_unlock_bh(&tid_rx->reorder_lock);
++	del_timer_sync(&tid_rx->reorder_timer);
++
+ 	call_rcu(&tid_rx->rcu_head, ieee80211_free_tid_rx);
+ }
+ 
+diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
+index b448e8ff3213..909913d34d28 100644
+--- a/net/mac80211/rx.c
++++ b/net/mac80211/rx.c
+@@ -870,9 +870,10 @@ static void ieee80211_sta_reorder_release(struct ieee80211_sub_if_data *sdata,
+ 
+  set_release_timer:
+ 
+-		mod_timer(&tid_agg_rx->reorder_timer,
+-			  tid_agg_rx->reorder_time[j] + 1 +
+-			  HT_RX_REORDER_BUF_TIMEOUT);
++		if (!tid_agg_rx->removed)
++			mod_timer(&tid_agg_rx->reorder_timer,
++				  tid_agg_rx->reorder_time[j] + 1 +
++				  HT_RX_REORDER_BUF_TIMEOUT);
+ 	} else {
+ 		del_timer(&tid_agg_rx->reorder_timer);
+ 	}
+diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
+index 4f052bb2a5ad..075fd7858f2f 100644
+--- a/net/mac80211/sta_info.h
++++ b/net/mac80211/sta_info.h
+@@ -175,6 +175,7 @@ struct tid_ampdu_tx {
+  * @reorder_lock: serializes access to reorder buffer, see below.
+  * @auto_seq: used for offloaded BA sessions to automatically pick head_seq_and
+  *	and ssn.
++ * @removed: this session is removed (but might have been found due to RCU)
+  *
+  * This structure's lifetime is managed by RCU, assignments to
+  * the array holding it must hold the aggregation mutex.
+@@ -199,6 +200,7 @@ struct tid_ampdu_rx {
+ 	u16 timeout;
+ 	u8 dialog_token;
+ 	bool auto_seq;
++	bool removed;
+ };
+ 
+ /**
+diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
+index 3f5d4d48f0cb..86e6cc5c0953 100644
+--- a/net/sunrpc/clnt.c
++++ b/net/sunrpc/clnt.c
+@@ -303,9 +303,7 @@ static int rpc_client_register(struct rpc_clnt *clnt,
+ 	struct super_block *pipefs_sb;
+ 	int err;
+ 
+-	err = rpc_clnt_debugfs_register(clnt);
+-	if (err)
+-		return err;
++	rpc_clnt_debugfs_register(clnt);
+ 
+ 	pipefs_sb = rpc_get_sb_net(net);
+ 	if (pipefs_sb) {
+diff --git a/net/sunrpc/debugfs.c b/net/sunrpc/debugfs.c
+index e811f390f9f6..82962f7e6e88 100644
+--- a/net/sunrpc/debugfs.c
++++ b/net/sunrpc/debugfs.c
+@@ -129,48 +129,52 @@ static const struct file_operations tasks_fops = {
+ 	.release	= tasks_release,
+ };
+ 
+-int
++void
+ rpc_clnt_debugfs_register(struct rpc_clnt *clnt)
+ {
+-	int len, err;
++	int len;
+ 	char name[24]; /* enough for "../../rpc_xprt/ + 8 hex digits + NULL */
++	struct rpc_xprt *xprt;
+ 
+ 	/* Already registered? */
+-	if (clnt->cl_debugfs)
+-		return 0;
++	if (clnt->cl_debugfs || !rpc_clnt_dir)
++		return;
+ 
+ 	len = snprintf(name, sizeof(name), "%x", clnt->cl_clid);
+ 	if (len >= sizeof(name))
+-		return -EINVAL;
++		return;
+ 
+ 	/* make the per-client dir */
+ 	clnt->cl_debugfs = debugfs_create_dir(name, rpc_clnt_dir);
+ 	if (!clnt->cl_debugfs)
+-		return -ENOMEM;
++		return;
+ 
+ 	/* make tasks file */
+-	err = -ENOMEM;
+ 	if (!debugfs_create_file("tasks", S_IFREG | S_IRUSR, clnt->cl_debugfs,
+ 				 clnt, &tasks_fops))
+ 		goto out_err;
+ 
+-	err = -EINVAL;
+ 	rcu_read_lock();
++	xprt = rcu_dereference(clnt->cl_xprt);
++	/* no "debugfs" dentry? Don't bother with the symlink. */
++	if (!xprt->debugfs) {
++		rcu_read_unlock();
++		return;
++	}
+ 	len = snprintf(name, sizeof(name), "../../rpc_xprt/%s",
+-			rcu_dereference(clnt->cl_xprt)->debugfs->d_name.name);
++			xprt->debugfs->d_name.name);
+ 	rcu_read_unlock();
++
+ 	if (len >= sizeof(name))
+ 		goto out_err;
+ 
+-	err = -ENOMEM;
+ 	if (!debugfs_create_symlink("xprt", clnt->cl_debugfs, name))
+ 		goto out_err;
+ 
+-	return 0;
++	return;
+ out_err:
+ 	debugfs_remove_recursive(clnt->cl_debugfs);
+ 	clnt->cl_debugfs = NULL;
+-	return err;
+ }
+ 
+ void
+@@ -226,33 +230,33 @@ static const struct file_operations xprt_info_fops = {
+ 	.release	= xprt_info_release,
+ };
+ 
+-int
++void
+ rpc_xprt_debugfs_register(struct rpc_xprt *xprt)
+ {
+ 	int len, id;
+ 	static atomic_t	cur_id;
+ 	char		name[9]; /* 8 hex digits + NULL term */
+ 
++	if (!rpc_xprt_dir)
++		return;
++
+ 	id = (unsigned int)atomic_inc_return(&cur_id);
+ 
+ 	len = snprintf(name, sizeof(name), "%x", id);
+ 	if (len >= sizeof(name))
+-		return -EINVAL;
++		return;
+ 
+ 	/* make the per-client dir */
+ 	xprt->debugfs = debugfs_create_dir(name, rpc_xprt_dir);
+ 	if (!xprt->debugfs)
+-		return -ENOMEM;
++		return;
+ 
+ 	/* make tasks file */
+ 	if (!debugfs_create_file("info", S_IFREG | S_IRUSR, xprt->debugfs,
+ 				 xprt, &xprt_info_fops)) {
+ 		debugfs_remove_recursive(xprt->debugfs);
+ 		xprt->debugfs = NULL;
+-		return -ENOMEM;
+ 	}
+-
+-	return 0;
+ }
+ 
+ void
+@@ -266,14 +270,17 @@ void __exit
+ sunrpc_debugfs_exit(void)
+ {
+ 	debugfs_remove_recursive(topdir);
++	topdir = NULL;
++	rpc_clnt_dir = NULL;
++	rpc_xprt_dir = NULL;
+ }
+ 
+-int __init
++void __init
+ sunrpc_debugfs_init(void)
+ {
+ 	topdir = debugfs_create_dir("sunrpc", NULL);
+ 	if (!topdir)
+-		goto out;
++		return;
+ 
+ 	rpc_clnt_dir = debugfs_create_dir("rpc_clnt", topdir);
+ 	if (!rpc_clnt_dir)
+@@ -283,10 +290,9 @@ sunrpc_debugfs_init(void)
+ 	if (!rpc_xprt_dir)
+ 		goto out_remove;
+ 
+-	return 0;
++	return;
+ out_remove:
+ 	debugfs_remove_recursive(topdir);
+ 	topdir = NULL;
+-out:
+-	return -ENOMEM;
++	rpc_clnt_dir = NULL;
+ }
+diff --git a/net/sunrpc/sunrpc_syms.c b/net/sunrpc/sunrpc_syms.c
+index e37fbed87956..ee5d3d253102 100644
+--- a/net/sunrpc/sunrpc_syms.c
++++ b/net/sunrpc/sunrpc_syms.c
+@@ -98,10 +98,7 @@ init_sunrpc(void)
+ 	if (err)
+ 		goto out4;
+ 
+-	err = sunrpc_debugfs_init();
+-	if (err)
+-		goto out5;
+-
++	sunrpc_debugfs_init();
+ #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
+ 	rpc_register_sysctl();
+ #endif
+@@ -109,8 +106,6 @@ init_sunrpc(void)
+ 	init_socket_xprt();	/* clnt sock transport */
+ 	return 0;
+ 
+-out5:
+-	unregister_rpc_pipefs();
+ out4:
+ 	unregister_pernet_subsys(&sunrpc_net_ops);
+ out3:
+diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
+index ebbefad21a37..5dcf7eac30a6 100644
+--- a/net/sunrpc/xprt.c
++++ b/net/sunrpc/xprt.c
+@@ -1303,7 +1303,6 @@ static void xprt_init(struct rpc_xprt *xprt, struct net *net)
+  */
+ struct rpc_xprt *xprt_create_transport(struct xprt_create *args)
+ {
+-	int err;
+ 	struct rpc_xprt	*xprt;
+ 	struct xprt_class *t;
+ 
+@@ -1344,11 +1343,7 @@ found:
+ 		return ERR_PTR(-ENOMEM);
+ 	}
+ 
+-	err = rpc_xprt_debugfs_register(xprt);
+-	if (err) {
+-		xprt_destroy(xprt);
+-		return ERR_PTR(err);
+-	}
++	rpc_xprt_debugfs_register(xprt);
+ 
+ 	dprintk("RPC:       created transport %p with %u slots\n", xprt,
+ 			xprt->max_reqs);
+diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c
+index 33db1ad4fd10..138949a31eab 100644
+--- a/security/selinux/selinuxfs.c
++++ b/security/selinux/selinuxfs.c
+@@ -152,7 +152,7 @@ static ssize_t sel_write_enforce(struct file *file, const char __user *buf,
+ 		goto out;
+ 
+ 	/* No partial writes. */
+-	length = EINVAL;
++	length = -EINVAL;
+ 	if (*ppos != 0)
+ 		goto out;
+ 
+diff --git a/sound/firewire/bebob/bebob_maudio.c b/sound/firewire/bebob/bebob_maudio.c
+index a422aaa3bb0c..9ee25a63f684 100644
+--- a/sound/firewire/bebob/bebob_maudio.c
++++ b/sound/firewire/bebob/bebob_maudio.c
+@@ -96,10 +96,10 @@ int snd_bebob_maudio_load_firmware(struct fw_unit *unit)
+ 	struct fw_device *device = fw_parent_device(unit);
+ 	int err, rcode;
+ 	u64 date;
+-	__be32 cues[3] = {
+-		MAUDIO_BOOTLOADER_CUE1,
+-		MAUDIO_BOOTLOADER_CUE2,
+-		MAUDIO_BOOTLOADER_CUE3
++	__le32 cues[3] = {
++		cpu_to_le32(MAUDIO_BOOTLOADER_CUE1),
++		cpu_to_le32(MAUDIO_BOOTLOADER_CUE2),
++		cpu_to_le32(MAUDIO_BOOTLOADER_CUE3)
+ 	};
+ 
+ 	/* check date of software used to build */
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index d3e2fc700c5d..172c89996d74 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -394,7 +394,7 @@ static void alc_auto_setup_eapd(struct hda_codec *codec, bool on)
+ {
+ 	/* We currently only handle front, HP */
+ 	static hda_nid_t pins[] = {
+-		0x0f, 0x10, 0x14, 0x15, 0
++		0x0f, 0x10, 0x14, 0x15, 0x17, 0
+ 	};
+ 	hda_nid_t *p;
+ 	for (p = pins; *p; p++)
+@@ -2910,6 +2910,8 @@ static void alc283_init(struct hda_codec *codec)
+ 
+ 	if (!hp_pin)
+ 		return;
++
++	msleep(30);
+ 	hp_pin_sense = snd_hda_jack_detect(codec, hp_pin);
+ 
+ 	/* Index 0x43 Direct Drive HP AMP LPM Control 1 */
+@@ -4289,6 +4291,7 @@ enum {
+ 	ALC269_FIXUP_QUANTA_MUTE,
+ 	ALC269_FIXUP_LIFEBOOK,
+ 	ALC269_FIXUP_LIFEBOOK_EXTMIC,
++	ALC269_FIXUP_LIFEBOOK_HP_PIN,
+ 	ALC269_FIXUP_AMIC,
+ 	ALC269_FIXUP_DMIC,
+ 	ALC269VB_FIXUP_AMIC,
+@@ -4440,6 +4443,13 @@ static const struct hda_fixup alc269_fixups[] = {
+ 			{ }
+ 		},
+ 	},
++	[ALC269_FIXUP_LIFEBOOK_HP_PIN] = {
++		.type = HDA_FIXUP_PINS,
++		.v.pins = (const struct hda_pintbl[]) {
++			{ 0x21, 0x0221102f }, /* HP out */
++			{ }
++		},
++	},
+ 	[ALC269_FIXUP_AMIC] = {
+ 		.type = HDA_FIXUP_PINS,
+ 		.v.pins = (const struct hda_pintbl[]) {
+@@ -4915,6 +4925,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x104d, 0x9084, "Sony VAIO", ALC275_FIXUP_SONY_HWEQ),
+ 	SND_PCI_QUIRK(0x104d, 0x9099, "Sony VAIO S13", ALC275_FIXUP_SONY_DISABLE_AAMIX),
+ 	SND_PCI_QUIRK(0x10cf, 0x1475, "Lifebook", ALC269_FIXUP_LIFEBOOK),
++	SND_PCI_QUIRK(0x10cf, 0x15dc, "Lifebook T731", ALC269_FIXUP_LIFEBOOK_HP_PIN),
+ 	SND_PCI_QUIRK(0x10cf, 0x1845, "Lifebook U904", ALC269_FIXUP_LIFEBOOK_EXTMIC),
+ 	SND_PCI_QUIRK(0x144d, 0xc109, "Samsung Ativ book 9 (NP900X3G)", ALC269_FIXUP_INV_DMIC),
+ 	SND_PCI_QUIRK(0x1458, 0xfa53, "Gigabyte BXBT-2807", ALC283_FIXUP_BXBT2807_MIC),
+@@ -4941,6 +4952,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x17aa, 0x501a, "Thinkpad", ALC283_FIXUP_INT_MIC),
+ 	SND_PCI_QUIRK(0x17aa, 0x501e, "Thinkpad L440", ALC292_FIXUP_TPT440_DOCK),
+ 	SND_PCI_QUIRK(0x17aa, 0x5026, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
++	SND_PCI_QUIRK(0x17aa, 0x5036, "Thinkpad T450s", ALC292_FIXUP_TPT440_DOCK),
+ 	SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+ 	SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
+ 	SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD),
+diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c
+index dc9df007d3e3..337c317ead6f 100644
+--- a/sound/usb/mixer_quirks.c
++++ b/sound/usb/mixer_quirks.c
+@@ -192,6 +192,7 @@ static const struct rc_config {
+ 	{ USB_ID(0x041e, 0x3040), 2, 2, 6, 6,  2,  0x6e91 }, /* Live! 24-bit */
+ 	{ USB_ID(0x041e, 0x3042), 0, 1, 1, 1,  1,  0x000d }, /* Usb X-Fi S51 */
+ 	{ USB_ID(0x041e, 0x30df), 0, 1, 1, 1,  1,  0x000d }, /* Usb X-Fi S51 Pro */
++	{ USB_ID(0x041e, 0x3237), 0, 1, 1, 1,  1,  0x000d }, /* Usb X-Fi S51 Pro */
+ 	{ USB_ID(0x041e, 0x3048), 2, 2, 6, 6,  2,  0x6e91 }, /* Toshiba SB0500 */
+ };
+ 
+diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
+index ec83b11c5978..0fba701bc518 100644
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -466,7 +466,7 @@ static struct kvm *kvm_create_vm(unsigned long type)
+ 	BUILD_BUG_ON(KVM_MEM_SLOTS_NUM > SHRT_MAX);
+ 
+ 	r = -ENOMEM;
+-	kvm->memslots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
++	kvm->memslots = kvm_kvzalloc(sizeof(struct kvm_memslots));
+ 	if (!kvm->memslots)
+ 		goto out_err_no_srcu;
+ 
+@@ -517,7 +517,7 @@ out_err_no_srcu:
+ out_err_no_disable:
+ 	for (i = 0; i < KVM_NR_BUSES; i++)
+ 		kfree(kvm->buses[i]);
+-	kfree(kvm->memslots);
++	kvfree(kvm->memslots);
+ 	kvm_arch_free_vm(kvm);
+ 	return ERR_PTR(r);
+ }
+@@ -573,7 +573,7 @@ static void kvm_free_physmem(struct kvm *kvm)
+ 	kvm_for_each_memslot(memslot, slots)
+ 		kvm_free_physmem_slot(kvm, memslot, NULL);
+ 
+-	kfree(kvm->memslots);
++	kvfree(kvm->memslots);
+ }
+ 
+ static void kvm_destroy_devices(struct kvm *kvm)
+@@ -865,10 +865,10 @@ int __kvm_set_memory_region(struct kvm *kvm,
+ 			goto out_free;
+ 	}
+ 
+-	slots = kmemdup(kvm->memslots, sizeof(struct kvm_memslots),
+-			GFP_KERNEL);
++	slots = kvm_kvzalloc(sizeof(struct kvm_memslots));
+ 	if (!slots)
+ 		goto out_free;
++	memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots));
+ 
+ 	if ((change == KVM_MR_DELETE) || (change == KVM_MR_MOVE)) {
+ 		slot = id_to_memslot(slots, mem->slot);
+@@ -911,7 +911,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
+ 	kvm_arch_commit_memory_region(kvm, mem, &old, change);
+ 
+ 	kvm_free_physmem_slot(kvm, &old, &new);
+-	kfree(old_memslots);
++	kvfree(old_memslots);
+ 
+ 	/*
+ 	 * IOMMU mapping:  New slots need to be mapped.  Old slots need to be
+@@ -930,7 +930,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
+ 	return 0;
+ 
+ out_slots:
+-	kfree(slots);
++	kvfree(slots);
+ out_free:
+ 	kvm_free_physmem_slot(kvm, &new, &old);
+ out:


^ permalink raw reply related	[flat|nested] 19+ messages in thread

* [gentoo-commits] proj/linux-patches:3.19 commit in: /
@ 2015-04-29 14:26 Mike Pagano
  0 siblings, 0 replies; 19+ messages in thread
From: Mike Pagano @ 2015-04-29 14:26 UTC (permalink / raw
  To: gentoo-commits

commit:     44705e257afae352ada6c5bd078b9d9eafc0243b
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Apr 29 14:26:24 2015 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Apr 29 14:26:24 2015 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=44705e25

Linux patch 3.19.6

 0000_README             |    4 +
 1005_linux-3.19.6.patch | 1674 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 1678 insertions(+)

diff --git a/0000_README b/0000_README
index 89efc26..25bc90f 100644
--- a/0000_README
+++ b/0000_README
@@ -63,6 +63,10 @@ Patch:  1004_linux-3.19.5.patch
 From:   http://www.kernel.org
 Desc:   Linux 3.19.5
 
+Patch:  1005_linux-3.19.6.patch
+From:   http://www.kernel.org
+Desc:   Linux 3.19.6
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1005_linux-3.19.6.patch b/1005_linux-3.19.6.patch
new file mode 100644
index 0000000..e702f01
--- /dev/null
+++ b/1005_linux-3.19.6.patch
@@ -0,0 +1,1674 @@
+diff --git a/Makefile b/Makefile
+index 633b5f0f11a0..65c7c8756803 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 19
+-SUBLEVEL = 5
++SUBLEVEL = 6
+ EXTRAVERSION =
+ NAME = Diseased Newt
+ 
+diff --git a/arch/arm/mm/hugetlbpage.c b/arch/arm/mm/hugetlbpage.c
+index 66781bf34077..c72412415093 100644
+--- a/arch/arm/mm/hugetlbpage.c
++++ b/arch/arm/mm/hugetlbpage.c
+@@ -36,12 +36,6 @@
+  * of type casting from pmd_t * to pte_t *.
+  */
+ 
+-struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
+-			      int write)
+-{
+-	return ERR_PTR(-EINVAL);
+-}
+-
+ int pud_huge(pud_t pud)
+ {
+ 	return 0;
+diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
+index 023747bf4dd7..2de9d2e59d96 100644
+--- a/arch/arm64/mm/hugetlbpage.c
++++ b/arch/arm64/mm/hugetlbpage.c
+@@ -38,12 +38,6 @@ int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
+ }
+ #endif
+ 
+-struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
+-			      int write)
+-{
+-	return ERR_PTR(-EINVAL);
+-}
+-
+ int pmd_huge(pmd_t pmd)
+ {
+ 	return !(pmd_val(pmd) & PMD_TABLE_BIT);
+diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
+index 76069c18ee42..52b7604b5215 100644
+--- a/arch/ia64/mm/hugetlbpage.c
++++ b/arch/ia64/mm/hugetlbpage.c
+@@ -114,12 +114,6 @@ int pud_huge(pud_t pud)
+ 	return 0;
+ }
+ 
+-struct page *
+-follow_huge_pmd(struct mm_struct *mm, unsigned long address, pmd_t *pmd, int write)
+-{
+-	return NULL;
+-}
+-
+ void hugetlb_free_pgd_range(struct mmu_gather *tlb,
+ 			unsigned long addr, unsigned long end,
+ 			unsigned long floor, unsigned long ceiling)
+diff --git a/arch/metag/mm/hugetlbpage.c b/arch/metag/mm/hugetlbpage.c
+index 3c32075d2945..7ca80ac42ed5 100644
+--- a/arch/metag/mm/hugetlbpage.c
++++ b/arch/metag/mm/hugetlbpage.c
+@@ -94,12 +94,6 @@ int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
+ 	return 0;
+ }
+ 
+-struct page *follow_huge_addr(struct mm_struct *mm,
+-			      unsigned long address, int write)
+-{
+-	return ERR_PTR(-EINVAL);
+-}
+-
+ int pmd_huge(pmd_t pmd)
+ {
+ 	return pmd_page_shift(pmd) > PAGE_SHIFT;
+diff --git a/arch/mips/mm/hugetlbpage.c b/arch/mips/mm/hugetlbpage.c
+index 4ec8ee10d371..06e0f421b41b 100644
+--- a/arch/mips/mm/hugetlbpage.c
++++ b/arch/mips/mm/hugetlbpage.c
+@@ -68,12 +68,6 @@ int is_aligned_hugepage_range(unsigned long addr, unsigned long len)
+ 	return 0;
+ }
+ 
+-struct page *
+-follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
+-{
+-	return ERR_PTR(-EINVAL);
+-}
+-
+ int pmd_huge(pmd_t pmd)
+ {
+ 	return (pmd_val(pmd) & _PAGE_HUGE) != 0;
+@@ -83,15 +77,3 @@ int pud_huge(pud_t pud)
+ {
+ 	return (pud_val(pud) & _PAGE_HUGE) != 0;
+ }
+-
+-struct page *
+-follow_huge_pmd(struct mm_struct *mm, unsigned long address,
+-		pmd_t *pmd, int write)
+-{
+-	struct page *page;
+-
+-	page = pte_page(*(pte_t *)pmd);
+-	if (page)
+-		page += ((address & ~HPAGE_MASK) >> PAGE_SHIFT);
+-	return page;
+-}
+diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
+index 620d0ec93e6f..7e408bfc7948 100644
+--- a/arch/powerpc/mm/hugetlbpage.c
++++ b/arch/powerpc/mm/hugetlbpage.c
+@@ -714,6 +714,14 @@ follow_huge_pmd(struct mm_struct *mm, unsigned long address,
+ 	return NULL;
+ }
+ 
++struct page *
++follow_huge_pud(struct mm_struct *mm, unsigned long address,
++		pud_t *pud, int write)
++{
++	BUG();
++	return NULL;
++}
++
+ static unsigned long hugepte_addr_end(unsigned long addr, unsigned long end,
+ 				      unsigned long sz)
+ {
+diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c
+index 3c80d2e38f03..210ffede0153 100644
+--- a/arch/s390/mm/hugetlbpage.c
++++ b/arch/s390/mm/hugetlbpage.c
+@@ -192,12 +192,6 @@ int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
+ 	return 0;
+ }
+ 
+-struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
+-			      int write)
+-{
+-	return ERR_PTR(-EINVAL);
+-}
+-
+ int pmd_huge(pmd_t pmd)
+ {
+ 	if (!MACHINE_HAS_HPAGE)
+@@ -210,17 +204,3 @@ int pud_huge(pud_t pud)
+ {
+ 	return 0;
+ }
+-
+-struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
+-			     pmd_t *pmdp, int write)
+-{
+-	struct page *page;
+-
+-	if (!MACHINE_HAS_HPAGE)
+-		return NULL;
+-
+-	page = pmd_page(*pmdp);
+-	if (page)
+-		page += ((address & ~HPAGE_MASK) >> PAGE_SHIFT);
+-	return page;
+-}
+diff --git a/arch/sh/mm/hugetlbpage.c b/arch/sh/mm/hugetlbpage.c
+index d7762349ea48..534bc978af8a 100644
+--- a/arch/sh/mm/hugetlbpage.c
++++ b/arch/sh/mm/hugetlbpage.c
+@@ -67,12 +67,6 @@ int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
+ 	return 0;
+ }
+ 
+-struct page *follow_huge_addr(struct mm_struct *mm,
+-			      unsigned long address, int write)
+-{
+-	return ERR_PTR(-EINVAL);
+-}
+-
+ int pmd_huge(pmd_t pmd)
+ {
+ 	return 0;
+@@ -82,9 +76,3 @@ int pud_huge(pud_t pud)
+ {
+ 	return 0;
+ }
+-
+-struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
+-			     pmd_t *pmd, int write)
+-{
+-	return NULL;
+-}
+diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
+index d329537739c6..4242eab12e10 100644
+--- a/arch/sparc/mm/hugetlbpage.c
++++ b/arch/sparc/mm/hugetlbpage.c
+@@ -215,12 +215,6 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
+ 	return entry;
+ }
+ 
+-struct page *follow_huge_addr(struct mm_struct *mm,
+-			      unsigned long address, int write)
+-{
+-	return ERR_PTR(-EINVAL);
+-}
+-
+ int pmd_huge(pmd_t pmd)
+ {
+ 	return 0;
+@@ -230,9 +224,3 @@ int pud_huge(pud_t pud)
+ {
+ 	return 0;
+ }
+-
+-struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
+-			     pmd_t *pmd, int write)
+-{
+-	return NULL;
+-}
+diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c
+index 3270e0019266..8416240c322c 100644
+--- a/arch/tile/mm/hugetlbpage.c
++++ b/arch/tile/mm/hugetlbpage.c
+@@ -150,12 +150,6 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
+ 	return NULL;
+ }
+ 
+-struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
+-			      int write)
+-{
+-	return ERR_PTR(-EINVAL);
+-}
+-
+ int pmd_huge(pmd_t pmd)
+ {
+ 	return !!(pmd_val(pmd) & _PAGE_HUGE_PAGE);
+@@ -166,28 +160,6 @@ int pud_huge(pud_t pud)
+ 	return !!(pud_val(pud) & _PAGE_HUGE_PAGE);
+ }
+ 
+-struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
+-			     pmd_t *pmd, int write)
+-{
+-	struct page *page;
+-
+-	page = pte_page(*(pte_t *)pmd);
+-	if (page)
+-		page += ((address & ~PMD_MASK) >> PAGE_SHIFT);
+-	return page;
+-}
+-
+-struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
+-			     pud_t *pud, int write)
+-{
+-	struct page *page;
+-
+-	page = pte_page(*(pte_t *)pud);
+-	if (page)
+-		page += ((address & ~PUD_MASK) >> PAGE_SHIFT);
+-	return page;
+-}
+-
+ int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
+ {
+ 	return 0;
+diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
+index d4c58d884838..312446418b36 100644
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -2404,8 +2404,7 @@ static __init void nested_vmx_setup_ctls_msrs(void)
+ 
+ 	if (enable_ept) {
+ 		/* nested EPT: emulate EPT also to L1 */
+-		nested_vmx_secondary_ctls_high |= SECONDARY_EXEC_ENABLE_EPT |
+-			SECONDARY_EXEC_UNRESTRICTED_GUEST;
++		nested_vmx_secondary_ctls_high |= SECONDARY_EXEC_ENABLE_EPT;
+ 		nested_vmx_ept_caps = VMX_EPT_PAGE_WALK_4_BIT |
+ 			 VMX_EPTP_WB_BIT | VMX_EPT_2MB_PAGE_BIT |
+ 			 VMX_EPT_INVEPT_BIT;
+@@ -2419,6 +2418,10 @@ static __init void nested_vmx_setup_ctls_msrs(void)
+ 	} else
+ 		nested_vmx_ept_caps = 0;
+ 
++	if (enable_unrestricted_guest)
++		nested_vmx_secondary_ctls_high |=
++			SECONDARY_EXEC_UNRESTRICTED_GUEST;
++
+ 	/* miscellaneous data */
+ 	rdmsr(MSR_IA32_VMX_MISC, nested_vmx_misc_low, nested_vmx_misc_high);
+ 	nested_vmx_misc_low &= VMX_MISC_SAVE_EFER_LMA;
+diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
+index 006cc914994b..9161f764121e 100644
+--- a/arch/x86/mm/hugetlbpage.c
++++ b/arch/x86/mm/hugetlbpage.c
+@@ -52,20 +52,8 @@ int pud_huge(pud_t pud)
+ 	return 0;
+ }
+ 
+-struct page *
+-follow_huge_pmd(struct mm_struct *mm, unsigned long address,
+-		pmd_t *pmd, int write)
+-{
+-	return NULL;
+-}
+ #else
+ 
+-struct page *
+-follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
+-{
+-	return ERR_PTR(-EINVAL);
+-}
+-
+ /*
+  * pmd_huge() returns 1 if @pmd is hugetlb related entry, that is normal
+  * hugetlb entry or non-present (migration or hwpoisoned) hugetlb entry.
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index 0dceba1a2ba1..68ad39a4b221 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -3797,7 +3797,8 @@ static inline int bond_slave_override(struct bonding *bond,
+ 	/* Find out if any slaves have the same mapping as this skb. */
+ 	bond_for_each_slave_rcu(bond, slave, iter) {
+ 		if (slave->queue_id == skb->queue_mapping) {
+-			if (bond_slave_can_tx(slave)) {
++			if (bond_slave_is_up(slave) &&
++			    slave->link == BOND_LINK_UP) {
+ 				bond_dev_queue_xmit(bond, skb, slave->dev);
+ 				return 0;
+ 			}
+diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+index c3a6072134f5..2559206d8704 100644
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+@@ -531,20 +531,8 @@ struct bnx2x_fastpath {
+ 	struct napi_struct	napi;
+ 
+ #ifdef CONFIG_NET_RX_BUSY_POLL
+-	unsigned int state;
+-#define BNX2X_FP_STATE_IDLE		      0
+-#define BNX2X_FP_STATE_NAPI		(1 << 0)    /* NAPI owns this FP */
+-#define BNX2X_FP_STATE_POLL		(1 << 1)    /* poll owns this FP */
+-#define BNX2X_FP_STATE_DISABLED		(1 << 2)
+-#define BNX2X_FP_STATE_NAPI_YIELD	(1 << 3)    /* NAPI yielded this FP */
+-#define BNX2X_FP_STATE_POLL_YIELD	(1 << 4)    /* poll yielded this FP */
+-#define BNX2X_FP_OWNED	(BNX2X_FP_STATE_NAPI | BNX2X_FP_STATE_POLL)
+-#define BNX2X_FP_YIELD	(BNX2X_FP_STATE_NAPI_YIELD | BNX2X_FP_STATE_POLL_YIELD)
+-#define BNX2X_FP_LOCKED	(BNX2X_FP_OWNED | BNX2X_FP_STATE_DISABLED)
+-#define BNX2X_FP_USER_PEND (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_POLL_YIELD)
+-	/* protect state */
+-	spinlock_t lock;
+-#endif /* CONFIG_NET_RX_BUSY_POLL */
++	unsigned long		busy_poll_state;
++#endif
+ 
+ 	union host_hc_status_block	status_blk;
+ 	/* chip independent shortcuts into sb structure */
+@@ -619,104 +607,83 @@ struct bnx2x_fastpath {
+ #define bnx2x_fp_qstats(bp, fp)	(&((bp)->fp_stats[(fp)->index].eth_q_stats))
+ 
+ #ifdef CONFIG_NET_RX_BUSY_POLL
+-static inline void bnx2x_fp_init_lock(struct bnx2x_fastpath *fp)
++
++enum bnx2x_fp_state {
++	BNX2X_STATE_FP_NAPI	= BIT(0), /* NAPI handler owns the queue */
++
++	BNX2X_STATE_FP_NAPI_REQ_BIT = 1, /* NAPI would like to own the queue */
++	BNX2X_STATE_FP_NAPI_REQ = BIT(1),
++
++	BNX2X_STATE_FP_POLL_BIT = 2,
++	BNX2X_STATE_FP_POLL     = BIT(2), /* busy_poll owns the queue */
++
++	BNX2X_STATE_FP_DISABLE_BIT = 3, /* queue is dismantled */
++};
++
++static inline void bnx2x_fp_busy_poll_init(struct bnx2x_fastpath *fp)
+ {
+-	spin_lock_init(&fp->lock);
+-	fp->state = BNX2X_FP_STATE_IDLE;
++	WRITE_ONCE(fp->busy_poll_state, 0);
+ }
+ 
+ /* called from the device poll routine to get ownership of a FP */
+ static inline bool bnx2x_fp_lock_napi(struct bnx2x_fastpath *fp)
+ {
+-	bool rc = true;
+-
+-	spin_lock_bh(&fp->lock);
+-	if (fp->state & BNX2X_FP_LOCKED) {
+-		WARN_ON(fp->state & BNX2X_FP_STATE_NAPI);
+-		fp->state |= BNX2X_FP_STATE_NAPI_YIELD;
+-		rc = false;
+-	} else {
+-		/* we don't care if someone yielded */
+-		fp->state = BNX2X_FP_STATE_NAPI;
++	unsigned long prev, old = READ_ONCE(fp->busy_poll_state);
++
++	while (1) {
++		switch (old) {
++		case BNX2X_STATE_FP_POLL:
++			/* make sure bnx2x_fp_lock_poll() wont starve us */
++			set_bit(BNX2X_STATE_FP_NAPI_REQ_BIT,
++				&fp->busy_poll_state);
++			/* fallthrough */
++		case BNX2X_STATE_FP_POLL | BNX2X_STATE_FP_NAPI_REQ:
++			return false;
++		default:
++			break;
++		}
++		prev = cmpxchg(&fp->busy_poll_state, old, BNX2X_STATE_FP_NAPI);
++		if (unlikely(prev != old)) {
++			old = prev;
++			continue;
++		}
++		return true;
+ 	}
+-	spin_unlock_bh(&fp->lock);
+-	return rc;
+ }
+ 
+-/* returns true is someone tried to get the FP while napi had it */
+-static inline bool bnx2x_fp_unlock_napi(struct bnx2x_fastpath *fp)
++static inline void bnx2x_fp_unlock_napi(struct bnx2x_fastpath *fp)
+ {
+-	bool rc = false;
+-
+-	spin_lock_bh(&fp->lock);
+-	WARN_ON(fp->state &
+-		(BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_NAPI_YIELD));
+-
+-	if (fp->state & BNX2X_FP_STATE_POLL_YIELD)
+-		rc = true;
+-
+-	/* state ==> idle, unless currently disabled */
+-	fp->state &= BNX2X_FP_STATE_DISABLED;
+-	spin_unlock_bh(&fp->lock);
+-	return rc;
++	smp_wmb();
++	fp->busy_poll_state = 0;
+ }
+ 
+ /* called from bnx2x_low_latency_poll() */
+ static inline bool bnx2x_fp_lock_poll(struct bnx2x_fastpath *fp)
+ {
+-	bool rc = true;
+-
+-	spin_lock_bh(&fp->lock);
+-	if ((fp->state & BNX2X_FP_LOCKED)) {
+-		fp->state |= BNX2X_FP_STATE_POLL_YIELD;
+-		rc = false;
+-	} else {
+-		/* preserve yield marks */
+-		fp->state |= BNX2X_FP_STATE_POLL;
+-	}
+-	spin_unlock_bh(&fp->lock);
+-	return rc;
++	return cmpxchg(&fp->busy_poll_state, 0, BNX2X_STATE_FP_POLL) == 0;
+ }
+ 
+-/* returns true if someone tried to get the FP while it was locked */
+-static inline bool bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp)
++static inline void bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp)
+ {
+-	bool rc = false;
+-
+-	spin_lock_bh(&fp->lock);
+-	WARN_ON(fp->state & BNX2X_FP_STATE_NAPI);
+-
+-	if (fp->state & BNX2X_FP_STATE_POLL_YIELD)
+-		rc = true;
+-
+-	/* state ==> idle, unless currently disabled */
+-	fp->state &= BNX2X_FP_STATE_DISABLED;
+-	spin_unlock_bh(&fp->lock);
+-	return rc;
++	smp_mb__before_atomic();
++	clear_bit(BNX2X_STATE_FP_POLL_BIT, &fp->busy_poll_state);
+ }
+ 
+-/* true if a socket is polling, even if it did not get the lock */
++/* true if a socket is polling */
+ static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp)
+ {
+-	WARN_ON(!(fp->state & BNX2X_FP_OWNED));
+-	return fp->state & BNX2X_FP_USER_PEND;
++	return READ_ONCE(fp->busy_poll_state) & BNX2X_STATE_FP_POLL;
+ }
+ 
+ /* false if fp is currently owned */
+ static inline bool bnx2x_fp_ll_disable(struct bnx2x_fastpath *fp)
+ {
+-	int rc = true;
+-
+-	spin_lock_bh(&fp->lock);
+-	if (fp->state & BNX2X_FP_OWNED)
+-		rc = false;
+-	fp->state |= BNX2X_FP_STATE_DISABLED;
+-	spin_unlock_bh(&fp->lock);
++	set_bit(BNX2X_STATE_FP_DISABLE_BIT, &fp->busy_poll_state);
++	return !bnx2x_fp_ll_polling(fp);
+ 
+-	return rc;
+ }
+ #else
+-static inline void bnx2x_fp_init_lock(struct bnx2x_fastpath *fp)
++static inline void bnx2x_fp_busy_poll_init(struct bnx2x_fastpath *fp)
+ {
+ }
+ 
+@@ -725,9 +692,8 @@ static inline bool bnx2x_fp_lock_napi(struct bnx2x_fastpath *fp)
+ 	return true;
+ }
+ 
+-static inline bool bnx2x_fp_unlock_napi(struct bnx2x_fastpath *fp)
++static inline void bnx2x_fp_unlock_napi(struct bnx2x_fastpath *fp)
+ {
+-	return false;
+ }
+ 
+ static inline bool bnx2x_fp_lock_poll(struct bnx2x_fastpath *fp)
+@@ -735,9 +701,8 @@ static inline bool bnx2x_fp_lock_poll(struct bnx2x_fastpath *fp)
+ 	return false;
+ }
+ 
+-static inline bool bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp)
++static inline void bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp)
+ {
+-	return false;
+ }
+ 
+ static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp)
+diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+index e468ed3f210f..2b8e8b2ce0b6 100644
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+@@ -1849,7 +1849,7 @@ static void bnx2x_napi_enable_cnic(struct bnx2x *bp)
+ 	int i;
+ 
+ 	for_each_rx_queue_cnic(bp, i) {
+-		bnx2x_fp_init_lock(&bp->fp[i]);
++		bnx2x_fp_busy_poll_init(&bp->fp[i]);
+ 		napi_enable(&bnx2x_fp(bp, i, napi));
+ 	}
+ }
+@@ -1859,7 +1859,7 @@ static void bnx2x_napi_enable(struct bnx2x *bp)
+ 	int i;
+ 
+ 	for_each_eth_queue(bp, i) {
+-		bnx2x_fp_init_lock(&bp->fp[i]);
++		bnx2x_fp_busy_poll_init(&bp->fp[i]);
+ 		napi_enable(&bnx2x_fp(bp, i, napi));
+ 	}
+ }
+@@ -3191,9 +3191,10 @@ static int bnx2x_poll(struct napi_struct *napi, int budget)
+ 			}
+ 		}
+ 
++		bnx2x_fp_unlock_napi(fp);
++
+ 		/* Fall out from the NAPI loop if needed */
+-		if (!bnx2x_fp_unlock_napi(fp) &&
+-		    !(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
++		if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
+ 
+ 			/* No need to update SB for FCoE L2 ring as long as
+ 			 * it's connected to the default SB and the SB
+diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
+index 96bf01ba32dd..05ae12690117 100644
+--- a/drivers/net/ethernet/broadcom/tg3.c
++++ b/drivers/net/ethernet/broadcom/tg3.c
+@@ -17868,8 +17868,10 @@ static int tg3_init_one(struct pci_dev *pdev,
+ 	 */
+ 	if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
+ 	    (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
++		tg3_full_lock(tp, 0);
+ 		tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
+ 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
++		tg3_full_unlock(tp);
+ 	}
+ 
+ 	err = tg3_test_dma(tp);
+diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
+index 5c93d1451c44..9842bf963648 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
++++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
+@@ -585,7 +585,8 @@ static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
+ 		 * on the host, we deprecate the error message for this
+ 		 * specific command/input_mod/opcode_mod/fw-status to be debug.
+ 		 */
+-		if (op == MLX4_CMD_SET_PORT && in_modifier == 1 &&
++		if (op == MLX4_CMD_SET_PORT &&
++		    (in_modifier == 1 || in_modifier == 2) &&
+ 		    op_modifier == 0 && context->fw_status == CMD_STAT_BAD_SIZE)
+ 			mlx4_dbg(dev, "command 0x%x failed: fw status = 0x%x\n",
+ 				 op, context->fw_status);
+diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+index ac6a8f1eea6c..2617c9d68d9b 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+@@ -2627,13 +2627,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
+ 	netif_carrier_off(dev);
+ 	mlx4_en_set_default_moderation(priv);
+ 
+-	err = register_netdev(dev);
+-	if (err) {
+-		en_err(priv, "Netdev registration failed for port %d\n", port);
+-		goto out;
+-	}
+-	priv->registered = 1;
+-
+ 	en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num);
+ 	en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num);
+ 
+@@ -2673,6 +2666,14 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
+ 		queue_delayed_work(mdev->workqueue, &priv->service_task,
+ 				   SERVICE_TASK_DELAY);
+ 
++	err = register_netdev(dev);
++	if (err) {
++		en_err(priv, "Netdev registration failed for port %d\n", port);
++		goto out;
++	}
++
++	priv->registered = 1;
++
+ 	return 0;
+ 
+ out:
+diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c
+index 2f398fa4b9e6..24c028473383 100644
+--- a/drivers/net/ethernet/rocker/rocker.c
++++ b/drivers/net/ethernet/rocker/rocker.c
+@@ -4305,10 +4305,16 @@ static int rocker_port_master_changed(struct net_device *dev)
+ 	struct net_device *master = netdev_master_upper_dev_get(dev);
+ 	int err = 0;
+ 
++	/* There are currently three cases handled here:
++	 * 1. Joining a bridge
++	 * 2. Leaving a previously joined bridge
++	 * 3. Other, e.g. being added to or removed from a bond or openvswitch,
++	 *    in which case nothing is done
++	 */
+ 	if (master && master->rtnl_link_ops &&
+ 	    !strcmp(master->rtnl_link_ops->kind, "bridge"))
+ 		err = rocker_port_bridge_join(rocker_port, master);
+-	else
++	else if (rocker_port_is_bridged(rocker_port))
+ 		err = rocker_port_bridge_leave(rocker_port);
+ 
+ 	return err;
+diff --git a/drivers/net/tun.c b/drivers/net/tun.c
+index 10f9e4021b5a..9a409a8f3b19 100644
+--- a/drivers/net/tun.c
++++ b/drivers/net/tun.c
+@@ -1368,7 +1368,7 @@ static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
+ 	skb = __skb_recv_datagram(tfile->socket.sk, noblock ? MSG_DONTWAIT : 0,
+ 				  &peeked, &off, &err);
+ 	if (!skb)
+-		return 0;
++		return err;
+ 
+ 	ret = tun_put_user(tun, tfile, skb, to);
+ 	if (unlikely(ret < 0))
+diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c
+index 5c55f11572ba..75d6f26729a3 100644
+--- a/drivers/net/usb/asix_common.c
++++ b/drivers/net/usb/asix_common.c
+@@ -188,6 +188,8 @@ struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
+ 		memcpy(skb_tail_pointer(skb), &padbytes, sizeof(padbytes));
+ 		skb_put(skb, sizeof(padbytes));
+ 	}
++
++	usbnet_set_skb_tx_stats(skb, 1, 0);
+ 	return skb;
+ }
+ 
+diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
+index 80a844e0ae03..c3e4da9e79ca 100644
+--- a/drivers/net/usb/cdc_ncm.c
++++ b/drivers/net/usb/cdc_ncm.c
+@@ -1172,17 +1172,17 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
+ 
+ 	/* return skb */
+ 	ctx->tx_curr_skb = NULL;
+-	dev->net->stats.tx_packets += ctx->tx_curr_frame_num;
+ 
+ 	/* keep private stats: framing overhead and number of NTBs */
+ 	ctx->tx_overhead += skb_out->len - ctx->tx_curr_frame_payload;
+ 	ctx->tx_ntbs++;
+ 
+-	/* usbnet has already counted all the framing overhead.
++	/* usbnet will count all the framing overhead by default.
+ 	 * Adjust the stats so that the tx_bytes counter show real
+ 	 * payload data instead.
+ 	 */
+-	dev->net->stats.tx_bytes -= skb_out->len - ctx->tx_curr_frame_payload;
++	usbnet_set_skb_tx_stats(skb_out, n,
++				ctx->tx_curr_frame_payload - skb_out->len);
+ 
+ 	return skb_out;
+ 
+diff --git a/drivers/net/usb/sr9800.c b/drivers/net/usb/sr9800.c
+index b94a0fbb8b3b..953de13267df 100644
+--- a/drivers/net/usb/sr9800.c
++++ b/drivers/net/usb/sr9800.c
+@@ -144,6 +144,7 @@ static struct sk_buff *sr_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
+ 		skb_put(skb, sizeof(padbytes));
+ 	}
+ 
++	usbnet_set_skb_tx_stats(skb, 1, 0);
+ 	return skb;
+ }
+ 
+diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
+index 3a6770a65d78..e7ed2513b1d1 100644
+--- a/drivers/net/usb/usbnet.c
++++ b/drivers/net/usb/usbnet.c
+@@ -1189,8 +1189,7 @@ static void tx_complete (struct urb *urb)
+ 	struct usbnet		*dev = entry->dev;
+ 
+ 	if (urb->status == 0) {
+-		if (!(dev->driver_info->flags & FLAG_MULTI_PACKET))
+-			dev->net->stats.tx_packets++;
++		dev->net->stats.tx_packets += entry->packets;
+ 		dev->net->stats.tx_bytes += entry->length;
+ 	} else {
+ 		dev->net->stats.tx_errors++;
+@@ -1348,7 +1347,19 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
+ 		} else
+ 			urb->transfer_flags |= URB_ZERO_PACKET;
+ 	}
+-	entry->length = urb->transfer_buffer_length = length;
++	urb->transfer_buffer_length = length;
++
++	if (info->flags & FLAG_MULTI_PACKET) {
++		/* Driver has set number of packets and a length delta.
++		 * Calculate the complete length and ensure that it's
++		 * positive.
++		 */
++		entry->length += length;
++		if (WARN_ON_ONCE(entry->length <= 0))
++			entry->length = length;
++	} else {
++		usbnet_set_skb_tx_stats(skb, 1, length);
++	}
+ 
+ 	spin_lock_irqsave(&dev->txq.lock, flags);
+ 	retval = usb_autopm_get_interface_async(dev->intf);
+diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
+index a8c755dcab14..6c83846f914c 100644
+--- a/drivers/net/vxlan.c
++++ b/drivers/net/vxlan.c
+@@ -1578,12 +1578,6 @@ static int vxlan6_xmit_skb(struct vxlan_sock *vs,
+ 	int err;
+ 	bool udp_sum = !udp_get_no_check6_tx(vs->sock->sk);
+ 
+-	skb = udp_tunnel_handle_offloads(skb, udp_sum);
+-	if (IS_ERR(skb)) {
+-		err = -EINVAL;
+-		goto err;
+-	}
+-
+ 	skb_scrub_packet(skb, xnet);
+ 
+ 	min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len
+@@ -1603,6 +1597,12 @@ static int vxlan6_xmit_skb(struct vxlan_sock *vs,
+ 		goto err;
+ 	}
+ 
++	skb = udp_tunnel_handle_offloads(skb, udp_sum);
++	if (IS_ERR(skb)) {
++		err = -EINVAL;
++		goto err;
++	}
++
+ 	vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
+ 	vxh->vx_flags = htonl(VXLAN_FLAGS);
+ 	vxh->vx_vni = vni;
+@@ -1628,10 +1628,6 @@ int vxlan_xmit_skb(struct vxlan_sock *vs,
+ 	int err;
+ 	bool udp_sum = !vs->sock->sk->sk_no_check_tx;
+ 
+-	skb = udp_tunnel_handle_offloads(skb, udp_sum);
+-	if (IS_ERR(skb))
+-		return PTR_ERR(skb);
+-
+ 	min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
+ 			+ VXLAN_HLEN + sizeof(struct iphdr)
+ 			+ (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0);
+@@ -1647,6 +1643,10 @@ int vxlan_xmit_skb(struct vxlan_sock *vs,
+ 	if (WARN_ON(!skb))
+ 		return -ENOMEM;
+ 
++	skb = udp_tunnel_handle_offloads(skb, udp_sum);
++	if (IS_ERR(skb))
++		return PTR_ERR(skb);
++
+ 	vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
+ 	vxh->vx_flags = htonl(VXLAN_FLAGS);
+ 	vxh->vx_vni = vni;
+diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
+index a5186bb7c63e..8c45cf44ce24 100644
+--- a/drivers/net/wireless/rtlwifi/pci.c
++++ b/drivers/net/wireless/rtlwifi/pci.c
+@@ -578,6 +578,13 @@ static void _rtl_pci_tx_isr(struct ieee80211_hw *hw, int prio)
+ 		else
+ 			entry = (u8 *)(&ring->desc[ring->idx]);
+ 
++		if (rtlpriv->cfg->ops->get_available_desc &&
++		    rtlpriv->cfg->ops->get_available_desc(hw, prio) <= 1) {
++			RT_TRACE(rtlpriv, (COMP_INTR | COMP_SEND), DBG_DMESG,
++				 "no available desc!\n");
++			return;
++		}
++
+ 		if (!rtlpriv->cfg->ops->is_tx_desc_closed(hw, prio, ring->idx))
+ 			return;
+ 		ring->idx = (ring->idx + 1) % ring->entries;
+@@ -641,10 +648,9 @@ static void _rtl_pci_tx_isr(struct ieee80211_hw *hw, int prio)
+ 
+ 		ieee80211_tx_status_irqsafe(hw, skb);
+ 
+-		if ((ring->entries - skb_queue_len(&ring->queue))
+-				== 2) {
++		if ((ring->entries - skb_queue_len(&ring->queue)) <= 4) {
+ 
+-			RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
++			RT_TRACE(rtlpriv, COMP_ERR, DBG_DMESG,
+ 				 "more desc left, wake skb_queue@%d, ring->idx = %d, skb_queue_len = 0x%x\n",
+ 				 prio, ring->idx,
+ 				 skb_queue_len(&ring->queue));
+@@ -793,7 +799,7 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
+ 			rx_remained_cnt =
+ 				rtlpriv->cfg->ops->rx_desc_buff_remained_cnt(hw,
+ 								      hw_queue);
+-			if (rx_remained_cnt < 1)
++			if (rx_remained_cnt == 0)
+ 				return;
+ 
+ 		} else {	/* rx descriptor */
+@@ -845,18 +851,18 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
+ 			else
+ 				skb_reserve(skb, stats.rx_drvinfo_size +
+ 					    stats.rx_bufshift);
+-
+ 		} else {
+ 			RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
+ 				 "skb->end - skb->tail = %d, len is %d\n",
+ 				 skb->end - skb->tail, len);
+-			break;
++			dev_kfree_skb_any(skb);
++			goto new_trx_end;
+ 		}
+ 		/* handle command packet here */
+ 		if (rtlpriv->cfg->ops->rx_command_packet &&
+ 		    rtlpriv->cfg->ops->rx_command_packet(hw, stats, skb)) {
+ 				dev_kfree_skb_any(skb);
+-				goto end;
++				goto new_trx_end;
+ 		}
+ 
+ 		/*
+@@ -906,6 +912,7 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
+ 		} else {
+ 			dev_kfree_skb_any(skb);
+ 		}
++new_trx_end:
+ 		if (rtlpriv->use_new_trx_flow) {
+ 			rtlpci->rx_ring[hw_queue].next_rx_rp += 1;
+ 			rtlpci->rx_ring[hw_queue].next_rx_rp %=
+@@ -921,7 +928,6 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
+ 			rtlpriv->enter_ps = false;
+ 			schedule_work(&rtlpriv->works.lps_change_work);
+ 		}
+-end:
+ 		skb = new_skb;
+ no_new:
+ 		if (rtlpriv->use_new_trx_flow) {
+@@ -1695,6 +1701,15 @@ static int rtl_pci_tx(struct ieee80211_hw *hw,
+ 		}
+ 	}
+ 
++	if (rtlpriv->cfg->ops->get_available_desc &&
++	    rtlpriv->cfg->ops->get_available_desc(hw, hw_queue) == 0) {
++			RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
++				 "get_available_desc fail\n");
++			spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock,
++					       flags);
++			return skb->len;
++	}
++
+ 	if (ieee80211_is_data_qos(fc)) {
+ 		tid = rtl_get_tid(skb);
+ 		if (sta) {
+diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/sw.c b/drivers/net/wireless/rtlwifi/rtl8192ee/sw.c
+index 9b5a7d5be121..c31c6bfb536d 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8192ee/sw.c
++++ b/drivers/net/wireless/rtlwifi/rtl8192ee/sw.c
+@@ -113,8 +113,6 @@ int rtl92ee_init_sw_vars(struct ieee80211_hw *hw)
+ 				  RCR_HTC_LOC_CTRL		|
+ 				  RCR_AMF			|
+ 				  RCR_ACF			|
+-				  RCR_ADF			|
+-				  RCR_AICV			|
+ 				  RCR_ACRC32			|
+ 				  RCR_AB			|
+ 				  RCR_AM			|
+@@ -241,6 +239,7 @@ static struct rtl_hal_ops rtl8192ee_hal_ops = {
+ 	.set_desc = rtl92ee_set_desc,
+ 	.get_desc = rtl92ee_get_desc,
+ 	.is_tx_desc_closed = rtl92ee_is_tx_desc_closed,
++	.get_available_desc = rtl92ee_get_available_desc,
+ 	.tx_polling = rtl92ee_tx_polling,
+ 	.enable_hw_sec = rtl92ee_enable_hw_security_config,
+ 	.set_key = rtl92ee_set_key,
+diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/trx.c b/drivers/net/wireless/rtlwifi/rtl8192ee/trx.c
+index 00690040be37..1f6d160877e1 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8192ee/trx.c
++++ b/drivers/net/wireless/rtlwifi/rtl8192ee/trx.c
+@@ -707,7 +707,7 @@ static u16 get_desc_addr_fr_q_idx(u16 queue_index)
+ 	return desc_address;
+ }
+ 
+-void rtl92ee_get_available_desc(struct ieee80211_hw *hw, u8 q_idx)
++u16 rtl92ee_get_available_desc(struct ieee80211_hw *hw, u8 q_idx)
+ {
+ 	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ 	struct rtl_priv *rtlpriv = rtl_priv(hw);
+@@ -721,11 +721,12 @@ void rtl92ee_get_available_desc(struct ieee80211_hw *hw, u8 q_idx)
+ 	current_tx_write_point = (u16)((tmp_4byte) & 0x0fff);
+ 
+ 	point_diff = ((current_tx_read_point > current_tx_write_point) ?
+-		      (current_tx_read_point - current_tx_write_point) :
+-		      (TX_DESC_NUM_92E - current_tx_write_point +
++		      (current_tx_read_point - current_tx_write_point - 1) :
++		      (TX_DESC_NUM_92E - 1 - current_tx_write_point +
+ 		       current_tx_read_point));
+ 
+ 	rtlpci->tx_ring[q_idx].avl_desc = point_diff;
++	return point_diff;
+ }
+ 
+ void rtl92ee_pre_fill_tx_bd_desc(struct ieee80211_hw *hw,
+diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/trx.h b/drivers/net/wireless/rtlwifi/rtl8192ee/trx.h
+index 8effef9b13dd..b489dd9c8401 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8192ee/trx.h
++++ b/drivers/net/wireless/rtlwifi/rtl8192ee/trx.h
+@@ -831,7 +831,7 @@ void rtl92ee_rx_check_dma_ok(struct ieee80211_hw *hw, u8 *header_desc,
+ 			     u8 queue_index);
+ u16	rtl92ee_rx_desc_buff_remained_cnt(struct ieee80211_hw *hw,
+ 					  u8 queue_index);
+-void rtl92ee_get_available_desc(struct ieee80211_hw *hw, u8 queue_index);
++u16 rtl92ee_get_available_desc(struct ieee80211_hw *hw, u8 queue_index);
+ void rtl92ee_pre_fill_tx_bd_desc(struct ieee80211_hw *hw,
+ 				 u8 *tx_bd_desc, u8 *desc, u8 queue_index,
+ 				 struct sk_buff *skb, dma_addr_t addr);
+diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/rtlwifi/wifi.h
+index 6866dcf24340..27822fe34d9a 100644
+--- a/drivers/net/wireless/rtlwifi/wifi.h
++++ b/drivers/net/wireless/rtlwifi/wifi.h
+@@ -2161,6 +2161,7 @@ struct rtl_hal_ops {
+ 	void (*add_wowlan_pattern)(struct ieee80211_hw *hw,
+ 				   struct rtl_wow_pattern *rtl_pattern,
+ 				   u8 index);
++	u16 (*get_available_desc)(struct ieee80211_hw *hw, u8 q_idx);
+ };
+ 
+ struct rtl_intf_ops {
+diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
+index d8c10764f130..76ce69cc1382 100644
+--- a/drivers/net/xen-netfront.c
++++ b/drivers/net/xen-netfront.c
+@@ -1062,8 +1062,7 @@ err:
+ 
+ static int xennet_change_mtu(struct net_device *dev, int mtu)
+ {
+-	int max = xennet_can_sg(dev) ?
+-		XEN_NETIF_MAX_TX_SIZE - MAX_TCP_HEADER : ETH_DATA_LEN;
++	int max = xennet_can_sg(dev) ? XEN_NETIF_MAX_TX_SIZE : ETH_DATA_LEN;
+ 
+ 	if (mtu > max)
+ 		return -EINVAL;
+@@ -1333,8 +1332,6 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
+ 	netdev->ethtool_ops = &xennet_ethtool_ops;
+ 	SET_NETDEV_DEV(netdev, &dev->dev);
+ 
+-	netif_set_gso_max_size(netdev, XEN_NETIF_MAX_TX_SIZE - MAX_TCP_HEADER);
+-
+ 	np->netdev = netdev;
+ 
+ 	netif_carrier_off(netdev);
+diff --git a/drivers/staging/comedi/drivers/adv_pci1710.c b/drivers/staging/comedi/drivers/adv_pci1710.c
+index d02df7d0c629..57b7bc2e037a 100644
+--- a/drivers/staging/comedi/drivers/adv_pci1710.c
++++ b/drivers/staging/comedi/drivers/adv_pci1710.c
+@@ -455,7 +455,6 @@ static int pci171x_insn_read_ai(struct comedi_device *dev,
+ 				struct comedi_insn *insn, unsigned int *data)
+ {
+ 	struct pci1710_private *devpriv = dev->private;
+-	unsigned int chan = CR_CHAN(insn->chanspec);
+ 	int ret = 0;
+ 	int i;
+ 
+@@ -477,7 +476,7 @@ static int pci171x_insn_read_ai(struct comedi_device *dev,
+ 			break;
+ 
+ 		val = inw(dev->iobase + PCI171x_AD_DATA);
+-		ret = pci171x_ai_dropout(dev, s, chan, val);
++		ret = pci171x_ai_dropout(dev, s, 0, val);
+ 		if (ret)
+ 			break;
+ 
+diff --git a/fs/exec.c b/fs/exec.c
+index ad8798e26be9..4617a4ec52e3 100644
+--- a/fs/exec.c
++++ b/fs/exec.c
+@@ -1259,6 +1259,53 @@ static void check_unsafe_exec(struct linux_binprm *bprm)
+ 	spin_unlock(&p->fs->lock);
+ }
+ 
++static void bprm_fill_uid(struct linux_binprm *bprm)
++{
++	struct inode *inode;
++	unsigned int mode;
++	kuid_t uid;
++	kgid_t gid;
++
++	/* clear any previous set[ug]id data from a previous binary */
++	bprm->cred->euid = current_euid();
++	bprm->cred->egid = current_egid();
++
++	if (bprm->file->f_path.mnt->mnt_flags & MNT_NOSUID)
++		return;
++
++	if (task_no_new_privs(current))
++		return;
++
++	inode = file_inode(bprm->file);
++	mode = READ_ONCE(inode->i_mode);
++	if (!(mode & (S_ISUID|S_ISGID)))
++		return;
++
++	/* Be careful if suid/sgid is set */
++	mutex_lock(&inode->i_mutex);
++
++	/* reload atomically mode/uid/gid now that lock held */
++	mode = inode->i_mode;
++	uid = inode->i_uid;
++	gid = inode->i_gid;
++	mutex_unlock(&inode->i_mutex);
++
++	/* We ignore suid/sgid if there are no mappings for them in the ns */
++	if (!kuid_has_mapping(bprm->cred->user_ns, uid) ||
++		 !kgid_has_mapping(bprm->cred->user_ns, gid))
++		return;
++
++	if (mode & S_ISUID) {
++		bprm->per_clear |= PER_CLEAR_ON_SETID;
++		bprm->cred->euid = uid;
++	}
++
++	if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) {
++		bprm->per_clear |= PER_CLEAR_ON_SETID;
++		bprm->cred->egid = gid;
++	}
++}
++
+ /*
+  * Fill the binprm structure from the inode.
+  * Check permissions, then read the first 128 (BINPRM_BUF_SIZE) bytes
+@@ -1267,36 +1314,9 @@ static void check_unsafe_exec(struct linux_binprm *bprm)
+  */
+ int prepare_binprm(struct linux_binprm *bprm)
+ {
+-	struct inode *inode = file_inode(bprm->file);
+-	umode_t mode = inode->i_mode;
+ 	int retval;
+ 
+-
+-	/* clear any previous set[ug]id data from a previous binary */
+-	bprm->cred->euid = current_euid();
+-	bprm->cred->egid = current_egid();
+-
+-	if (!(bprm->file->f_path.mnt->mnt_flags & MNT_NOSUID) &&
+-	    !task_no_new_privs(current) &&
+-	    kuid_has_mapping(bprm->cred->user_ns, inode->i_uid) &&
+-	    kgid_has_mapping(bprm->cred->user_ns, inode->i_gid)) {
+-		/* Set-uid? */
+-		if (mode & S_ISUID) {
+-			bprm->per_clear |= PER_CLEAR_ON_SETID;
+-			bprm->cred->euid = inode->i_uid;
+-		}
+-
+-		/* Set-gid? */
+-		/*
+-		 * If setgid is set but no group execute bit then this
+-		 * is a candidate for mandatory locking, not a setgid
+-		 * executable.
+-		 */
+-		if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) {
+-			bprm->per_clear |= PER_CLEAR_ON_SETID;
+-			bprm->cred->egid = inode->i_gid;
+-		}
+-	}
++	bprm_fill_uid(bprm);
+ 
+ 	/* fill in binprm security blob */
+ 	retval = security_bprm_set_creds(bprm);
+diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
+index 431b7fc605c9..e235ec5f1f28 100644
+--- a/include/linux/hugetlb.h
++++ b/include/linux/hugetlb.h
+@@ -99,9 +99,9 @@ int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep);
+ struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
+ 			      int write);
+ struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
+-				pmd_t *pmd, int write);
++				pmd_t *pmd, int flags);
+ struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
+-				pud_t *pud, int write);
++				pud_t *pud, int flags);
+ int pmd_huge(pmd_t pmd);
+ int pud_huge(pud_t pmd);
+ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
+@@ -133,8 +133,8 @@ static inline void hugetlb_report_meminfo(struct seq_file *m)
+ static inline void hugetlb_show_meminfo(void)
+ {
+ }
+-#define follow_huge_pmd(mm, addr, pmd, write)	NULL
+-#define follow_huge_pud(mm, addr, pud, write)	NULL
++#define follow_huge_pmd(mm, addr, pmd, flags)	NULL
++#define follow_huge_pud(mm, addr, pud, flags)	NULL
+ #define prepare_hugepage_range(file, addr, len)	(-EINVAL)
+ #define pmd_huge(x)	0
+ #define pud_huge(x)	0
+diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
+index 52fd8e8694cf..840fb7f7c3de 100644
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -2159,6 +2159,12 @@ void netdev_freemem(struct net_device *dev);
+ void synchronize_net(void);
+ int init_dummy_netdev(struct net_device *dev);
+ 
++DECLARE_PER_CPU(int, xmit_recursion);
++static inline int dev_recursion_level(void)
++{
++	return this_cpu_read(xmit_recursion);
++}
++
+ struct net_device *dev_get_by_index(struct net *net, int ifindex);
+ struct net_device *__dev_get_by_index(struct net *net, int ifindex);
+ struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
+diff --git a/include/linux/swapops.h b/include/linux/swapops.h
+index 6adfb7bfbf44..e288d5c016a7 100644
+--- a/include/linux/swapops.h
++++ b/include/linux/swapops.h
+@@ -137,6 +137,8 @@ static inline void make_migration_entry_read(swp_entry_t *entry)
+ 	*entry = swp_entry(SWP_MIGRATION_READ, swp_offset(*entry));
+ }
+ 
++extern void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
++					spinlock_t *ptl);
+ extern void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
+ 					unsigned long address);
+ extern void migration_entry_wait_huge(struct vm_area_struct *vma,
+@@ -150,6 +152,8 @@ static inline int is_migration_entry(swp_entry_t swp)
+ }
+ #define migration_entry_to_page(swp) NULL
+ static inline void make_migration_entry_read(swp_entry_t *entryp) { }
++static inline void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
++					spinlock_t *ptl) { }
+ static inline void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
+ 					 unsigned long address) { }
+ static inline void migration_entry_wait_huge(struct vm_area_struct *vma,
+diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h
+index d9a4905e01d0..6e0ce8c7b8cb 100644
+--- a/include/linux/usb/usbnet.h
++++ b/include/linux/usb/usbnet.h
+@@ -227,9 +227,23 @@ struct skb_data {	/* skb->cb is one of these */
+ 	struct urb		*urb;
+ 	struct usbnet		*dev;
+ 	enum skb_state		state;
+-	size_t			length;
++	long			length;
++	unsigned long		packets;
+ };
+ 
++/* Drivers that set FLAG_MULTI_PACKET must call this in their
++ * tx_fixup method before returning an skb.
++ */
++static inline void
++usbnet_set_skb_tx_stats(struct sk_buff *skb,
++			unsigned long packets, long bytes_delta)
++{
++	struct skb_data *entry = (struct skb_data *) skb->cb;
++
++	entry->packets = packets;
++	entry->length = bytes_delta;
++}
++
+ extern int usbnet_open(struct net_device *net);
+ extern int usbnet_stop(struct net_device *net);
+ extern netdev_tx_t usbnet_start_xmit(struct sk_buff *skb,
+diff --git a/include/net/ip.h b/include/net/ip.h
+index 09cf5aebb283..c0c26c3deeb5 100644
+--- a/include/net/ip.h
++++ b/include/net/ip.h
+@@ -453,22 +453,6 @@ static __inline__ void inet_reset_saddr(struct sock *sk)
+ 
+ #endif
+ 
+-static inline int sk_mc_loop(struct sock *sk)
+-{
+-	if (!sk)
+-		return 1;
+-	switch (sk->sk_family) {
+-	case AF_INET:
+-		return inet_sk(sk)->mc_loop;
+-#if IS_ENABLED(CONFIG_IPV6)
+-	case AF_INET6:
+-		return inet6_sk(sk)->mc_loop;
+-#endif
+-	}
+-	WARN_ON(1);
+-	return 1;
+-}
+-
+ bool ip_call_ra_chain(struct sk_buff *skb);
+ 
+ /*
+diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
+index 1d09b46c1e48..eda131d179d9 100644
+--- a/include/net/ip6_route.h
++++ b/include/net/ip6_route.h
+@@ -174,7 +174,8 @@ int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *));
+ 
+ static inline int ip6_skb_dst_mtu(struct sk_buff *skb)
+ {
+-	struct ipv6_pinfo *np = skb->sk ? inet6_sk(skb->sk) : NULL;
++	struct ipv6_pinfo *np = skb->sk && !dev_recursion_level() ?
++				inet6_sk(skb->sk) : NULL;
+ 
+ 	return (np && np->pmtudisc >= IPV6_PMTUDISC_PROBE) ?
+ 	       skb_dst(skb)->dev->mtu : dst_mtu(skb_dst(skb));
+diff --git a/include/net/sock.h b/include/net/sock.h
+index 2210fec65669..45b54d3fcb04 100644
+--- a/include/net/sock.h
++++ b/include/net/sock.h
+@@ -1812,6 +1812,8 @@ struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie);
+ 
+ struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie);
+ 
++bool sk_mc_loop(struct sock *sk);
++
+ static inline bool sk_can_gso(const struct sock *sk)
+ {
+ 	return net_gso_ok(sk->sk_route_caps, sk->sk_gso_type);
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index a28e09c7825d..36508e69e92a 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -1380,7 +1380,8 @@ peek_stack:
+ 			/* tell verifier to check for equivalent states
+ 			 * after every call and jump
+ 			 */
+-			env->explored_states[t + 1] = STATE_LIST_MARK;
++			if (t + 1 < insn_cnt)
++				env->explored_states[t + 1] = STATE_LIST_MARK;
+ 		} else {
+ 			/* conditional jump with two edges */
+ 			ret = push_insn(t, t + 1, FALLTHROUGH, env);
+diff --git a/mm/gup.c b/mm/gup.c
+index 9b2afbfe67e3..e29c3745a893 100644
+--- a/mm/gup.c
++++ b/mm/gup.c
+@@ -167,10 +167,10 @@ struct page *follow_page_mask(struct vm_area_struct *vma,
+ 	if (pud_none(*pud))
+ 		return no_page_table(vma, flags);
+ 	if (pud_huge(*pud) && vma->vm_flags & VM_HUGETLB) {
+-		if (flags & FOLL_GET)
+-			return NULL;
+-		page = follow_huge_pud(mm, address, pud, flags & FOLL_WRITE);
+-		return page;
++		page = follow_huge_pud(mm, address, pud, flags);
++		if (page)
++			return page;
++		return no_page_table(vma, flags);
+ 	}
+ 	if (unlikely(pud_bad(*pud)))
+ 		return no_page_table(vma, flags);
+@@ -179,19 +179,10 @@ struct page *follow_page_mask(struct vm_area_struct *vma,
+ 	if (pmd_none(*pmd))
+ 		return no_page_table(vma, flags);
+ 	if (pmd_huge(*pmd) && vma->vm_flags & VM_HUGETLB) {
+-		page = follow_huge_pmd(mm, address, pmd, flags & FOLL_WRITE);
+-		if (flags & FOLL_GET) {
+-			/*
+-			 * Refcount on tail pages are not well-defined and
+-			 * shouldn't be taken. The caller should handle a NULL
+-			 * return when trying to follow tail pages.
+-			 */
+-			if (PageHead(page))
+-				get_page(page);
+-			else
+-				page = NULL;
+-		}
+-		return page;
++		page = follow_huge_pmd(mm, address, pmd, flags);
++		if (page)
++			return page;
++		return no_page_table(vma, flags);
+ 	}
+ 	if ((flags & FOLL_NUMA) && pmd_numa(*pmd))
+ 		return no_page_table(vma, flags);
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index 267e41971100..a2bfd02e289f 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -3700,44 +3700,64 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
+ 	return (pte_t *) pmd;
+ }
+ 
+-struct page *
+-follow_huge_pmd(struct mm_struct *mm, unsigned long address,
+-		pmd_t *pmd, int write)
+-{
+-	struct page *page;
++#endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
+ 
+-	if (!pmd_present(*pmd))
+-		return NULL;
+-	page = pte_page(*(pte_t *)pmd);
+-	if (page)
+-		page += ((address & ~PMD_MASK) >> PAGE_SHIFT);
+-	return page;
++/*
++ * These functions are overwritable if your architecture needs its own
++ * behavior.
++ */
++struct page * __weak
++follow_huge_addr(struct mm_struct *mm, unsigned long address,
++			      int write)
++{
++	return ERR_PTR(-EINVAL);
+ }
+ 
+-struct page *
+-follow_huge_pud(struct mm_struct *mm, unsigned long address,
+-		pud_t *pud, int write)
++struct page * __weak
++follow_huge_pmd(struct mm_struct *mm, unsigned long address,
++		pmd_t *pmd, int flags)
+ {
+-	struct page *page;
+-
+-	page = pte_page(*(pte_t *)pud);
+-	if (page)
+-		page += ((address & ~PUD_MASK) >> PAGE_SHIFT);
++	struct page *page = NULL;
++	spinlock_t *ptl;
++retry:
++	ptl = pmd_lockptr(mm, pmd);
++	spin_lock(ptl);
++	/*
++	 * make sure that the address range covered by this pmd is not
++	 * unmapped from other threads.
++	 */
++	if (!pmd_huge(*pmd))
++		goto out;
++	if (pmd_present(*pmd)) {
++		page = pte_page(*(pte_t *)pmd) +
++			((address & ~PMD_MASK) >> PAGE_SHIFT);
++		if (flags & FOLL_GET)
++			get_page(page);
++	} else {
++		if (is_hugetlb_entry_migration(huge_ptep_get((pte_t *)pmd))) {
++			spin_unlock(ptl);
++			__migration_entry_wait(mm, (pte_t *)pmd, ptl);
++			goto retry;
++		}
++		/*
++		 * hwpoisoned entry is treated as no_page_table in
++		 * follow_page_mask().
++		 */
++	}
++out:
++	spin_unlock(ptl);
+ 	return page;
+ }
+ 
+-#else /* !CONFIG_ARCH_WANT_GENERAL_HUGETLB */
+-
+-/* Can be overriden by architectures */
+ struct page * __weak
+ follow_huge_pud(struct mm_struct *mm, unsigned long address,
+-	       pud_t *pud, int write)
++		pud_t *pud, int flags)
+ {
+-	BUG();
+-	return NULL;
+-}
++	if (flags & FOLL_GET)
++		return NULL;
+ 
+-#endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
++	return pte_page(*(pte_t *)pud) + ((address & ~PUD_MASK) >> PAGE_SHIFT);
++}
+ 
+ #ifdef CONFIG_MEMORY_FAILURE
+ 
+diff --git a/mm/migrate.c b/mm/migrate.c
+index 344cdf692fc8..be6d1edcfcb7 100644
+--- a/mm/migrate.c
++++ b/mm/migrate.c
+@@ -229,7 +229,7 @@ static void remove_migration_ptes(struct page *old, struct page *new)
+  * get to the page and wait until migration is finished.
+  * When we return from this function the fault will be retried.
+  */
+-static void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
++void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
+ 				spinlock_t *ptl)
+ {
+ 	pte_t pte;
+@@ -1268,7 +1268,8 @@ static int do_move_page_to_node_array(struct mm_struct *mm,
+ 			goto put_and_set;
+ 
+ 		if (PageHuge(page)) {
+-			isolate_huge_page(page, &pagelist);
++			if (PageHead(page))
++				isolate_huge_page(page, &pagelist);
+ 			goto put_and_set;
+ 		}
+ 
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 4ff46f8054d4..5dd905ca2654 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -2821,7 +2821,9 @@ static void skb_update_prio(struct sk_buff *skb)
+ #define skb_update_prio(skb)
+ #endif
+ 
+-static DEFINE_PER_CPU(int, xmit_recursion);
++DEFINE_PER_CPU(int, xmit_recursion);
++EXPORT_SYMBOL(xmit_recursion);
++
+ #define RECURSION_LIMIT 10
+ 
+ /**
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index 62c67bebcaf5..39c444c1206d 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -4141,18 +4141,20 @@ EXPORT_SYMBOL(skb_try_coalesce);
+  */
+ void skb_scrub_packet(struct sk_buff *skb, bool xnet)
+ {
+-	if (xnet)
+-		skb_orphan(skb);
+ 	skb->tstamp.tv64 = 0;
+ 	skb->pkt_type = PACKET_HOST;
+ 	skb->skb_iif = 0;
+ 	skb->ignore_df = 0;
+ 	skb_dst_drop(skb);
+-	skb->mark = 0;
+-	skb_init_secmark(skb);
+ 	secpath_reset(skb);
+ 	nf_reset(skb);
+ 	nf_reset_trace(skb);
++
++	if (!xnet)
++		return;
++
++	skb_orphan(skb);
++	skb->mark = 0;
+ }
+ EXPORT_SYMBOL_GPL(skb_scrub_packet);
+ 
+diff --git a/net/core/sock.c b/net/core/sock.c
+index 1c7a33db1314..a91f99f26420 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -651,6 +651,25 @@ static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool)
+ 		sock_reset_flag(sk, bit);
+ }
+ 
++bool sk_mc_loop(struct sock *sk)
++{
++	if (dev_recursion_level())
++		return false;
++	if (!sk)
++		return true;
++	switch (sk->sk_family) {
++	case AF_INET:
++		return inet_sk(sk)->mc_loop;
++#if IS_ENABLED(CONFIG_IPV6)
++	case AF_INET6:
++		return inet6_sk(sk)->mc_loop;
++#endif
++	}
++	WARN_ON(1);
++	return true;
++}
++EXPORT_SYMBOL(sk_mc_loop);
++
+ /*
+  *	This is meant for all protocols to use and covers goings on
+  *	at the socket level. Everything here is generic.
+diff --git a/net/ipv4/geneve.c b/net/ipv4/geneve.c
+index 394a200f93c1..69711d81a88b 100644
+--- a/net/ipv4/geneve.c
++++ b/net/ipv4/geneve.c
+@@ -121,10 +121,6 @@ int geneve_xmit_skb(struct geneve_sock *gs, struct rtable *rt,
+ 	int min_headroom;
+ 	int err;
+ 
+-	skb = udp_tunnel_handle_offloads(skb, !gs->sock->sk->sk_no_check_tx);
+-	if (IS_ERR(skb))
+-		return PTR_ERR(skb);
+-
+ 	min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
+ 			+ GENEVE_BASE_HLEN + opt_len + sizeof(struct iphdr)
+ 			+ (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0);
+@@ -139,6 +135,10 @@ int geneve_xmit_skb(struct geneve_sock *gs, struct rtable *rt,
+ 	if (unlikely(!skb))
+ 		return -ENOMEM;
+ 
++	skb = udp_tunnel_handle_offloads(skb, !gs->sock->sk->sk_no_check_tx);
++	if (IS_ERR(skb))
++		return PTR_ERR(skb);
++
+ 	gnvh = (struct genevehdr *)__skb_push(skb, sizeof(*gnvh) + opt_len);
+ 	geneve_build_header(gnvh, tun_flags, vni, opt_len, opt);
+ 
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index 075ab4d5af5e..08ccca6a8035 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -3104,10 +3104,11 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
+ 			if (!first_ackt.v64)
+ 				first_ackt = last_ackt;
+ 
+-			if (!(sacked & TCPCB_SACKED_ACKED))
++			if (!(sacked & TCPCB_SACKED_ACKED)) {
+ 				reord = min(pkts_acked, reord);
+-			if (!after(scb->end_seq, tp->high_seq))
+-				flag |= FLAG_ORIG_SACK_ACKED;
++				if (!after(scb->end_seq, tp->high_seq))
++					flag |= FLAG_ORIG_SACK_ACKED;
++			}
+ 		}
+ 
+ 		if (sacked & TCPCB_SACKED_ACKED)
+diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
+index d22f54482bab..982347eee104 100644
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -1516,7 +1516,7 @@ void tcp_v4_early_demux(struct sk_buff *skb)
+ 		skb->sk = sk;
+ 		skb->destructor = sock_edemux;
+ 		if (sk->sk_state != TCP_TIME_WAIT) {
+-			struct dst_entry *dst = sk->sk_rx_dst;
++			struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
+ 
+ 			if (dst)
+ 				dst = dst_check(dst, 0);
+diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
+index 9790f396ce5e..9f29453049dc 100644
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -2931,6 +2931,8 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
+ 	}
+ #endif
+ 
++	/* Do not fool tcpdump (if any), clean our debris */
++	skb->tstamp.tv64 = 0;
+ 	return skb;
+ }
+ EXPORT_SYMBOL(tcp_make_synack);
+diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
+index 3f5aa9959076..0bf56e562f76 100644
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -541,7 +541,8 @@ int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
+ {
+ 	struct sk_buff *frag;
+ 	struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
+-	struct ipv6_pinfo *np = skb->sk ? inet6_sk(skb->sk) : NULL;
++	struct ipv6_pinfo *np = skb->sk && !dev_recursion_level() ?
++				inet6_sk(skb->sk) : NULL;
+ 	struct ipv6hdr *tmp_hdr;
+ 	struct frag_hdr *fh;
+ 	unsigned int mtu, hlen, left, len;
+diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
+index 682866777d53..d375ce60463e 100644
+--- a/net/ipv6/ndisc.c
++++ b/net/ipv6/ndisc.c
+@@ -1216,7 +1216,14 @@ static void ndisc_router_discovery(struct sk_buff *skb)
+ 	if (rt)
+ 		rt6_set_expires(rt, jiffies + (HZ * lifetime));
+ 	if (ra_msg->icmph.icmp6_hop_limit) {
+-		in6_dev->cnf.hop_limit = ra_msg->icmph.icmp6_hop_limit;
++		/* Only set hop_limit on the interface if it is higher than
++		 * the current hop_limit.
++		 */
++		if (in6_dev->cnf.hop_limit < ra_msg->icmph.icmp6_hop_limit) {
++			in6_dev->cnf.hop_limit = ra_msg->icmph.icmp6_hop_limit;
++		} else {
++			ND_PRINTK(2, warn, "RA: Got route advertisement with lower hop_limit than current\n");
++		}
+ 		if (rt)
+ 			dst_metric_set(&rt->dst, RTAX_HOPLIMIT,
+ 				       ra_msg->icmph.icmp6_hop_limit);
+diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
+index 9c0b54e87b47..b89979312fbb 100644
+--- a/net/ipv6/tcp_ipv6.c
++++ b/net/ipv6/tcp_ipv6.c
+@@ -1409,6 +1409,15 @@ static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
+ 	TCP_SKB_CB(skb)->sacked = 0;
+ }
+ 
++static void tcp_v6_restore_cb(struct sk_buff *skb)
++{
++	/* We need to move header back to the beginning if xfrm6_policy_check()
++	 * and tcp_v6_fill_cb() are going to be called again.
++	 */
++	memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
++		sizeof(struct inet6_skb_parm));
++}
++
+ static int tcp_v6_rcv(struct sk_buff *skb)
+ {
+ 	const struct tcphdr *th;
+@@ -1541,6 +1550,7 @@ do_time_wait:
+ 			inet_twsk_deschedule(tw, &tcp_death_row);
+ 			inet_twsk_put(tw);
+ 			sk = sk2;
++			tcp_v6_restore_cb(skb);
+ 			goto process;
+ 		}
+ 		/* Fall through to ACK */
+@@ -1549,6 +1559,7 @@ do_time_wait:
+ 		tcp_v6_timewait_ack(sk, skb);
+ 		break;
+ 	case TCP_TW_RST:
++		tcp_v6_restore_cb(skb);
+ 		goto no_tcp_socket;
+ 	case TCP_TW_SUCCESS:
+ 		;
+@@ -1583,7 +1594,7 @@ static void tcp_v6_early_demux(struct sk_buff *skb)
+ 		skb->sk = sk;
+ 		skb->destructor = sock_edemux;
+ 		if (sk->sk_state != TCP_TIME_WAIT) {
+-			struct dst_entry *dst = sk->sk_rx_dst;
++			struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
+ 
+ 			if (dst)
+ 				dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
+diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c
+index 2034c6d9cb5a..296cc246f0a3 100644
+--- a/net/openvswitch/vport.c
++++ b/net/openvswitch/vport.c
+@@ -274,10 +274,8 @@ void ovs_vport_del(struct vport *vport)
+ 	ASSERT_OVSL();
+ 
+ 	hlist_del_rcu(&vport->hash_node);
+-
+-	vport->ops->destroy(vport);
+-
+ 	module_put(vport->ops->owner);
++	vport->ops->destroy(vport);
+ }
+ 
+ /**


^ permalink raw reply related	[flat|nested] 19+ messages in thread

* [gentoo-commits] proj/linux-patches:3.19 commit in: /
@ 2015-05-07 15:11 Mike Pagano
  0 siblings, 0 replies; 19+ messages in thread
From: Mike Pagano @ 2015-05-07 15:11 UTC (permalink / raw
  To: gentoo-commits

commit:     69b8ee16143c8d5a5a7497e0e238eb0cf1e1dfa4
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu May  7 15:11:42 2015 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu May  7 15:11:42 2015 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=69b8ee16

Linux patch 3.19.7

 0000_README             |    4 +
 1006_linux-3.19.7.patch | 6301 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 6305 insertions(+)

diff --git a/0000_README b/0000_README
index 25bc90f..7728485 100644
--- a/0000_README
+++ b/0000_README
@@ -67,6 +67,10 @@ Patch:  1005_linux-3.19.6.patch
 From:   http://www.kernel.org
 Desc:   Linux 3.19.6
 
+Patch:  1006_linux-3.19.7.patch
+From:   http://www.kernel.org
+Desc:   Linux 3.19.7
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1006_linux-3.19.7.patch b/1006_linux-3.19.7.patch
new file mode 100644
index 0000000..deffed6
--- /dev/null
+++ b/1006_linux-3.19.7.patch
@@ -0,0 +1,6301 @@
+diff --git a/Documentation/virtual/kvm/devices/s390_flic.txt b/Documentation/virtual/kvm/devices/s390_flic.txt
+index 4ceef53164b0..d1ad9d5cae46 100644
+--- a/Documentation/virtual/kvm/devices/s390_flic.txt
++++ b/Documentation/virtual/kvm/devices/s390_flic.txt
+@@ -27,6 +27,9 @@ Groups:
+     Copies all floating interrupts into a buffer provided by userspace.
+     When the buffer is too small it returns -ENOMEM, which is the indication
+     for userspace to try again with a bigger buffer.
++    -ENOBUFS is returned when the allocation of a kernelspace buffer has
++    failed.
++    -EFAULT is returned when copying data to userspace failed.
+     All interrupts remain pending, i.e. are not deleted from the list of
+     currently pending interrupts.
+     attr->addr contains the userspace address of the buffer into which all
+diff --git a/Makefile b/Makefile
+index 65c7c8756803..69952c1404b2 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 19
+-SUBLEVEL = 6
++SUBLEVEL = 7
+ EXTRAVERSION =
+ NAME = Diseased Newt
+ 
+diff --git a/arch/arm/boot/dts/at91-sama5d3_xplained.dts b/arch/arm/boot/dts/at91-sama5d3_xplained.dts
+index fec1fca2ad66..6c4bc53cbf4e 100644
+--- a/arch/arm/boot/dts/at91-sama5d3_xplained.dts
++++ b/arch/arm/boot/dts/at91-sama5d3_xplained.dts
+@@ -167,7 +167,13 @@
+ 
+ 			macb1: ethernet@f802c000 {
+ 				phy-mode = "rmii";
++				#address-cells = <1>;
++				#size-cells = <0>;
+ 				status = "okay";
++
++				ethernet-phy@1 {
++					reg = <0x1>;
++				};
+ 			};
+ 
+ 			dbgu: serial@ffffee00 {
+diff --git a/arch/arm/boot/dts/dove.dtsi b/arch/arm/boot/dts/dove.dtsi
+index a5441d5482a6..3cc8b8320345 100644
+--- a/arch/arm/boot/dts/dove.dtsi
++++ b/arch/arm/boot/dts/dove.dtsi
+@@ -154,7 +154,7 @@
+ 
+ 			uart2: serial@12200 {
+ 				compatible = "ns16550a";
+-				reg = <0x12000 0x100>;
++				reg = <0x12200 0x100>;
+ 				reg-shift = <2>;
+ 				interrupts = <9>;
+ 				clocks = <&core_clk 0>;
+@@ -163,7 +163,7 @@
+ 
+ 			uart3: serial@12300 {
+ 				compatible = "ns16550a";
+-				reg = <0x12100 0x100>;
++				reg = <0x12300 0x100>;
+ 				reg-shift = <2>;
+ 				interrupts = <10>;
+ 				clocks = <&core_clk 0>;
+diff --git a/arch/arm/boot/dts/exynos5250-spring.dts b/arch/arm/boot/dts/exynos5250-spring.dts
+index f02775487cd4..c41600e587e0 100644
+--- a/arch/arm/boot/dts/exynos5250-spring.dts
++++ b/arch/arm/boot/dts/exynos5250-spring.dts
+@@ -429,7 +429,6 @@
+ &mmc_0 {
+ 	status = "okay";
+ 	num-slots = <1>;
+-	supports-highspeed;
+ 	broken-cd;
+ 	card-detect-delay = <200>;
+ 	samsung,dw-mshc-ciu-div = <3>;
+@@ -437,11 +436,8 @@
+ 	samsung,dw-mshc-ddr-timing = <1 2>;
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <&sd0_clk &sd0_cmd &sd0_cd &sd0_bus4 &sd0_bus8>;
+-
+-	slot@0 {
+-		reg = <0>;
+-		bus-width = <8>;
+-	};
++	bus-width = <8>;
++	cap-mmc-highspeed;
+ };
+ 
+ /*
+@@ -451,7 +447,6 @@
+ &mmc_1 {
+ 	status = "okay";
+ 	num-slots = <1>;
+-	supports-highspeed;
+ 	broken-cd;
+ 	card-detect-delay = <200>;
+ 	samsung,dw-mshc-ciu-div = <3>;
+@@ -459,11 +454,8 @@
+ 	samsung,dw-mshc-ddr-timing = <1 2>;
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <&sd1_clk &sd1_cmd &sd1_cd &sd1_bus4>;
+-
+-	slot@0 {
+-		reg = <0>;
+-		bus-width = <4>;
+-	};
++	bus-width = <4>;
++	cap-sd-highspeed;
+ };
+ 
+ &pinctrl_0 {
+diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
+index afb9cafd3786..674d03f4ba15 100644
+--- a/arch/arm/include/asm/elf.h
++++ b/arch/arm/include/asm/elf.h
+@@ -115,7 +115,7 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
+    the loader.  We need to make sure that it is out of the way of the program
+    that it will "exec", and that there is sufficient room for the brk.  */
+ 
+-#define ELF_ET_DYN_BASE	(2 * TASK_SIZE / 3)
++#define ELF_ET_DYN_BASE	(TASK_SIZE / 3 * 2)
+ 
+ /* When the program starts, a1 contains a pointer to a function to be 
+    registered with atexit, as per the SVR4 ABI.  A value of 0 means we 
+diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
+index 1bca8f8af442..5de64c0e477d 100644
+--- a/arch/arm/include/asm/kvm_mmu.h
++++ b/arch/arm/include/asm/kvm_mmu.h
+@@ -128,29 +128,28 @@ static inline void kvm_set_s2pmd_writable(pmd_t *pmd)
+ 	(__boundary - 1 < (end) - 1)? __boundary: (end);		\
+ })
+ 
++#define kvm_pgd_index(addr)			pgd_index(addr)
++
+ static inline bool kvm_page_empty(void *ptr)
+ {
+ 	struct page *ptr_page = virt_to_page(ptr);
+ 	return page_count(ptr_page) == 1;
+ }
+ 
+-
+ #define kvm_pte_table_empty(kvm, ptep) kvm_page_empty(ptep)
+ #define kvm_pmd_table_empty(kvm, pmdp) kvm_page_empty(pmdp)
+ #define kvm_pud_table_empty(kvm, pudp) (0)
+ 
+ #define KVM_PREALLOC_LEVEL	0
+ 
+-static inline int kvm_prealloc_hwpgd(struct kvm *kvm, pgd_t *pgd)
++static inline void *kvm_get_hwpgd(struct kvm *kvm)
+ {
+-	return 0;
++	return kvm->arch.pgd;
+ }
+ 
+-static inline void kvm_free_hwpgd(struct kvm *kvm) { }
+-
+-static inline void *kvm_get_hwpgd(struct kvm *kvm)
++static inline unsigned int kvm_get_hwpgd_size(void)
+ {
+-	return kvm->arch.pgd;
++	return PTRS_PER_S2_PGD * sizeof(pgd_t);
+ }
+ 
+ struct kvm;
+@@ -186,7 +185,7 @@ static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu, pfn_t pfn,
+ 
+ 	bool need_flush = !vcpu_has_cache_enabled(vcpu) || ipa_uncached;
+ 
+-	VM_BUG_ON(size & PAGE_MASK);
++	VM_BUG_ON(size & ~PAGE_MASK);
+ 
+ 	if (!need_flush && !icache_is_pipt())
+ 		goto vipt_cache;
+diff --git a/arch/arm/include/uapi/asm/kvm.h b/arch/arm/include/uapi/asm/kvm.h
+index 09ee408c1a67..b404cf886029 100644
+--- a/arch/arm/include/uapi/asm/kvm.h
++++ b/arch/arm/include/uapi/asm/kvm.h
+@@ -193,8 +193,14 @@ struct kvm_arch_memory_slot {
+ #define KVM_ARM_IRQ_CPU_IRQ		0
+ #define KVM_ARM_IRQ_CPU_FIQ		1
+ 
+-/* Highest supported SPI, from VGIC_NR_IRQS */
++/*
++ * This used to hold the highest supported SPI, but it is now obsolete
++ * and only here to provide source code level compatibility with older
++ * userland. The highest SPI number can be set via KVM_DEV_ARM_VGIC_GRP_NR_IRQS.
++ */
++#ifndef __KERNEL__
+ #define KVM_ARM_IRQ_GIC_MAX		127
++#endif
+ 
+ /* PSCI interface */
+ #define KVM_PSCI_FN_BASE		0x95c1ba5e
+diff --git a/arch/arm/kernel/hibernate.c b/arch/arm/kernel/hibernate.c
+index c4cc50e58c13..cfb354ff2a60 100644
+--- a/arch/arm/kernel/hibernate.c
++++ b/arch/arm/kernel/hibernate.c
+@@ -22,6 +22,7 @@
+ #include <asm/suspend.h>
+ #include <asm/memory.h>
+ #include <asm/sections.h>
++#include "reboot.h"
+ 
+ int pfn_is_nosave(unsigned long pfn)
+ {
+@@ -61,7 +62,7 @@ static int notrace arch_save_image(unsigned long unused)
+ 
+ 	ret = swsusp_save();
+ 	if (ret == 0)
+-		soft_restart(virt_to_phys(cpu_resume));
++		_soft_restart(virt_to_phys(cpu_resume), false);
+ 	return ret;
+ }
+ 
+@@ -86,7 +87,7 @@ static void notrace arch_restore_image(void *unused)
+ 	for (pbe = restore_pblist; pbe; pbe = pbe->next)
+ 		copy_page(pbe->orig_address, pbe->address);
+ 
+-	soft_restart(virt_to_phys(cpu_resume));
++	_soft_restart(virt_to_phys(cpu_resume), false);
+ }
+ 
+ static u64 resume_stack[PAGE_SIZE/2/sizeof(u64)] __nosavedata;
+diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
+index fdfa3a78ec8c..2bf1a162defb 100644
+--- a/arch/arm/kernel/process.c
++++ b/arch/arm/kernel/process.c
+@@ -41,6 +41,7 @@
+ #include <asm/system_misc.h>
+ #include <asm/mach/time.h>
+ #include <asm/tls.h>
++#include "reboot.h"
+ 
+ #ifdef CONFIG_CC_STACKPROTECTOR
+ #include <linux/stackprotector.h>
+@@ -95,7 +96,7 @@ static void __soft_restart(void *addr)
+ 	BUG();
+ }
+ 
+-void soft_restart(unsigned long addr)
++void _soft_restart(unsigned long addr, bool disable_l2)
+ {
+ 	u64 *stack = soft_restart_stack + ARRAY_SIZE(soft_restart_stack);
+ 
+@@ -104,7 +105,7 @@ void soft_restart(unsigned long addr)
+ 	local_fiq_disable();
+ 
+ 	/* Disable the L2 if we're the last man standing. */
+-	if (num_online_cpus() == 1)
++	if (disable_l2)
+ 		outer_disable();
+ 
+ 	/* Change to the new stack and continue with the reset. */
+@@ -114,6 +115,11 @@ void soft_restart(unsigned long addr)
+ 	BUG();
+ }
+ 
++void soft_restart(unsigned long addr)
++{
++	_soft_restart(addr, num_online_cpus() == 1);
++}
++
+ /*
+  * Function pointers to optional machine specific functions
+  */
+diff --git a/arch/arm/kernel/reboot.h b/arch/arm/kernel/reboot.h
+new file mode 100644
+index 000000000000..c87f05816d6b
+--- /dev/null
++++ b/arch/arm/kernel/reboot.h
+@@ -0,0 +1,6 @@
++#ifndef REBOOT_H
++#define REBOOT_H
++
++extern void _soft_restart(unsigned long addr, bool disable_l2);
++
++#endif
+diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
+index 0b0d58a905c4..3ec96878e1c3 100644
+--- a/arch/arm/kvm/arm.c
++++ b/arch/arm/kvm/arm.c
+@@ -644,8 +644,7 @@ int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
+ 		if (!irqchip_in_kernel(kvm))
+ 			return -ENXIO;
+ 
+-		if (irq_num < VGIC_NR_PRIVATE_IRQS ||
+-		    irq_num > KVM_ARM_IRQ_GIC_MAX)
++		if (irq_num < VGIC_NR_PRIVATE_IRQS)
+ 			return -EINVAL;
+ 
+ 		return kvm_vgic_inject_irq(kvm, 0, irq_num, level);
+diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
+index 136662547ca6..9ec6dfee6a9c 100644
+--- a/arch/arm/kvm/mmu.c
++++ b/arch/arm/kvm/mmu.c
+@@ -251,7 +251,7 @@ static void unmap_range(struct kvm *kvm, pgd_t *pgdp,
+ 	phys_addr_t addr = start, end = start + size;
+ 	phys_addr_t next;
+ 
+-	pgd = pgdp + pgd_index(addr);
++	pgd = pgdp + kvm_pgd_index(addr);
+ 	do {
+ 		next = kvm_pgd_addr_end(addr, end);
+ 		if (!pgd_none(*pgd))
+@@ -316,7 +316,7 @@ static void stage2_flush_memslot(struct kvm *kvm,
+ 	phys_addr_t next;
+ 	pgd_t *pgd;
+ 
+-	pgd = kvm->arch.pgd + pgd_index(addr);
++	pgd = kvm->arch.pgd + kvm_pgd_index(addr);
+ 	do {
+ 		next = kvm_pgd_addr_end(addr, end);
+ 		stage2_flush_puds(kvm, pgd, addr, next);
+@@ -593,6 +593,20 @@ int create_hyp_io_mappings(void *from, void *to, phys_addr_t phys_addr)
+ 				     __phys_to_pfn(phys_addr), PAGE_HYP_DEVICE);
+ }
+ 
++/* Free the HW pgd, one page at a time */
++static void kvm_free_hwpgd(void *hwpgd)
++{
++	free_pages_exact(hwpgd, kvm_get_hwpgd_size());
++}
++
++/* Allocate the HW PGD, making sure that each page gets its own refcount */
++static void *kvm_alloc_hwpgd(void)
++{
++	unsigned int size = kvm_get_hwpgd_size();
++
++	return alloc_pages_exact(size, GFP_KERNEL | __GFP_ZERO);
++}
++
+ /**
+  * kvm_alloc_stage2_pgd - allocate level-1 table for stage-2 translation.
+  * @kvm:	The KVM struct pointer for the VM.
+@@ -606,15 +620,31 @@ int create_hyp_io_mappings(void *from, void *to, phys_addr_t phys_addr)
+  */
+ int kvm_alloc_stage2_pgd(struct kvm *kvm)
+ {
+-	int ret;
+ 	pgd_t *pgd;
++	void *hwpgd;
+ 
+ 	if (kvm->arch.pgd != NULL) {
+ 		kvm_err("kvm_arch already initialized?\n");
+ 		return -EINVAL;
+ 	}
+ 
++	hwpgd = kvm_alloc_hwpgd();
++	if (!hwpgd)
++		return -ENOMEM;
++
++	/* When the kernel uses more levels of page tables than the
++	 * guest, we allocate a fake PGD and pre-populate it to point
++	 * to the next-level page table, which will be the real
++	 * initial page table pointed to by the VTTBR.
++	 *
++	 * When KVM_PREALLOC_LEVEL==2, we allocate a single page for
++	 * the PMD and the kernel will use folded pud.
++	 * When KVM_PREALLOC_LEVEL==1, we allocate 2 consecutive PUD
++	 * pages.
++	 */
+ 	if (KVM_PREALLOC_LEVEL > 0) {
++		int i;
++
+ 		/*
+ 		 * Allocate fake pgd for the page table manipulation macros to
+ 		 * work.  This is not used by the hardware and we have no
+@@ -622,30 +652,32 @@ int kvm_alloc_stage2_pgd(struct kvm *kvm)
+ 		 */
+ 		pgd = (pgd_t *)kmalloc(PTRS_PER_S2_PGD * sizeof(pgd_t),
+ 				       GFP_KERNEL | __GFP_ZERO);
++
++		if (!pgd) {
++			kvm_free_hwpgd(hwpgd);
++			return -ENOMEM;
++		}
++
++		/* Plug the HW PGD into the fake one. */
++		for (i = 0; i < PTRS_PER_S2_PGD; i++) {
++			if (KVM_PREALLOC_LEVEL == 1)
++				pgd_populate(NULL, pgd + i,
++					     (pud_t *)hwpgd + i * PTRS_PER_PUD);
++			else if (KVM_PREALLOC_LEVEL == 2)
++				pud_populate(NULL, pud_offset(pgd, 0) + i,
++					     (pmd_t *)hwpgd + i * PTRS_PER_PMD);
++		}
+ 	} else {
+ 		/*
+ 		 * Allocate actual first-level Stage-2 page table used by the
+ 		 * hardware for Stage-2 page table walks.
+ 		 */
+-		pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, S2_PGD_ORDER);
++		pgd = (pgd_t *)hwpgd;
+ 	}
+ 
+-	if (!pgd)
+-		return -ENOMEM;
+-
+-	ret = kvm_prealloc_hwpgd(kvm, pgd);
+-	if (ret)
+-		goto out_err;
+-
+ 	kvm_clean_pgd(pgd);
+ 	kvm->arch.pgd = pgd;
+ 	return 0;
+-out_err:
+-	if (KVM_PREALLOC_LEVEL > 0)
+-		kfree(pgd);
+-	else
+-		free_pages((unsigned long)pgd, S2_PGD_ORDER);
+-	return ret;
+ }
+ 
+ /**
+@@ -746,11 +778,10 @@ void kvm_free_stage2_pgd(struct kvm *kvm)
+ 		return;
+ 
+ 	unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE);
+-	kvm_free_hwpgd(kvm);
++	kvm_free_hwpgd(kvm_get_hwpgd(kvm));
+ 	if (KVM_PREALLOC_LEVEL > 0)
+ 		kfree(kvm->arch.pgd);
+-	else
+-		free_pages((unsigned long)kvm->arch.pgd, S2_PGD_ORDER);
++
+ 	kvm->arch.pgd = NULL;
+ }
+ 
+@@ -760,7 +791,7 @@ static pud_t *stage2_get_pud(struct kvm *kvm, struct kvm_mmu_memory_cache *cache
+ 	pgd_t *pgd;
+ 	pud_t *pud;
+ 
+-	pgd = kvm->arch.pgd + pgd_index(addr);
++	pgd = kvm->arch.pgd + kvm_pgd_index(addr);
+ 	if (WARN_ON(pgd_none(*pgd))) {
+ 		if (!cache)
+ 			return NULL;
+diff --git a/arch/arm/mach-mvebu/pmsu.c b/arch/arm/mach-mvebu/pmsu.c
+index d8ab605a44fa..e8ba3c692527 100644
+--- a/arch/arm/mach-mvebu/pmsu.c
++++ b/arch/arm/mach-mvebu/pmsu.c
+@@ -415,6 +415,9 @@ static __init int armada_38x_cpuidle_init(void)
+ 	void __iomem *mpsoc_base;
+ 	u32 reg;
+ 
++	pr_warn("CPU idle is currently broken on Armada 38x: disabling");
++	return 0;
++
+ 	np = of_find_compatible_node(NULL, NULL,
+ 				     "marvell,armada-380-coherency-fabric");
+ 	if (!np)
+@@ -476,6 +479,16 @@ static int __init mvebu_v7_cpu_pm_init(void)
+ 		return 0;
+ 	of_node_put(np);
+ 
++	/*
++	 * Currently the CPU idle support for Armada 38x is broken, as
++	 * the CPU hotplug uses some of the CPU idle functions it is
++	 * broken too, so let's disable it
++	 */
++	if (of_machine_is_compatible("marvell,armada380")) {
++		cpu_hotplug_disable();
++		pr_warn("CPU hotplug support is currently broken on Armada 38x: disabling");
++	}
++
+ 	if (of_machine_is_compatible("marvell,armadaxp"))
+ 		ret = armada_xp_cpuidle_init();
+ 	else if (of_machine_is_compatible("marvell,armada370"))
+@@ -489,7 +502,8 @@ static int __init mvebu_v7_cpu_pm_init(void)
+ 		return ret;
+ 
+ 	mvebu_v7_pmsu_enable_l2_powerdown_onidle();
+-	platform_device_register(&mvebu_v7_cpuidle_device);
++	if (mvebu_v7_cpuidle_device.name)
++		platform_device_register(&mvebu_v7_cpuidle_device);
+ 	cpu_pm_register_notifier(&mvebu_v7_cpu_pm_notifier);
+ 
+ 	return 0;
+diff --git a/arch/arm/mach-s3c64xx/crag6410.h b/arch/arm/mach-s3c64xx/crag6410.h
+index 7bc66682687e..dcbe17f5e5f8 100644
+--- a/arch/arm/mach-s3c64xx/crag6410.h
++++ b/arch/arm/mach-s3c64xx/crag6410.h
+@@ -14,6 +14,7 @@
+ #include <mach/gpio-samsung.h>
+ 
+ #define GLENFARCLAS_PMIC_IRQ_BASE	IRQ_BOARD_START
++#define BANFF_PMIC_IRQ_BASE		(IRQ_BOARD_START + 64)
+ 
+ #define PCA935X_GPIO_BASE		GPIO_BOARD_START
+ #define CODEC_GPIO_BASE			(GPIO_BOARD_START + 8)
+diff --git a/arch/arm/mach-s3c64xx/mach-crag6410.c b/arch/arm/mach-s3c64xx/mach-crag6410.c
+index 10b913baab28..65c426bc45f7 100644
+--- a/arch/arm/mach-s3c64xx/mach-crag6410.c
++++ b/arch/arm/mach-s3c64xx/mach-crag6410.c
+@@ -554,6 +554,7 @@ static struct wm831x_touch_pdata touch_pdata = {
+ 
+ static struct wm831x_pdata crag_pmic_pdata = {
+ 	.wm831x_num = 1,
++	.irq_base = BANFF_PMIC_IRQ_BASE,
+ 	.gpio_base = BANFF_PMIC_GPIO_BASE,
+ 	.soft_shutdown = true,
+ 
+diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
+index b1f9a20a3677..840c232dd456 100644
+--- a/arch/arm64/Kconfig
++++ b/arch/arm64/Kconfig
+@@ -306,6 +306,27 @@ config ARM64_ERRATUM_832075
+ 
+ 	  If unsure, say Y.
+ 
++config ARM64_ERRATUM_845719
++	bool "Cortex-A53: 845719: a load might read incorrect data"
++	depends on COMPAT
++	default y
++	help
++	  This option adds an alternative code sequence to work around ARM
++	  erratum 845719 on Cortex-A53 parts up to r0p4.
++
++	  When running a compat (AArch32) userspace on an affected Cortex-A53
++	  part, a load at EL0 from a virtual address that matches the bottom 32
++	  bits of the virtual address used by a recent load at (AArch64) EL1
++	  might return incorrect data.
++
++	  The workaround is to write the contextidr_el1 register on exception
++	  return to a 32-bit task.
++	  Please note that this does not necessarily enable the workaround,
++	  as it depends on the alternative framework, which will only patch
++	  the kernel if an affected CPU is detected.
++
++	  If unsure, say Y.
++
+ endmenu
+ 
+ 
+@@ -416,6 +437,10 @@ config HOTPLUG_CPU
+ 
+ source kernel/Kconfig.preempt
+ 
++config UP_LATE_INIT
++       def_bool y
++       depends on !SMP
++
+ config HZ
+ 	int
+ 	default 100
+diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
+index 07547ccc1f2b..5fe4befda1a5 100644
+--- a/arch/arm64/include/asm/cpufeature.h
++++ b/arch/arm64/include/asm/cpufeature.h
+@@ -23,8 +23,9 @@
+ 
+ #define ARM64_WORKAROUND_CLEAN_CACHE		0
+ #define ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE	1
++#define ARM64_WORKAROUND_845719			2
+ 
+-#define ARM64_NCAPS				2
++#define ARM64_NCAPS				3
+ 
+ #ifndef __ASSEMBLY__
+ 
+diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
+index adcf49547301..df150ae862f2 100644
+--- a/arch/arm64/include/asm/kvm_mmu.h
++++ b/arch/arm64/include/asm/kvm_mmu.h
+@@ -137,6 +137,8 @@ static inline void kvm_set_s2pmd_writable(pmd_t *pmd)
+ #define PTRS_PER_S2_PGD		(1 << PTRS_PER_S2_PGD_SHIFT)
+ #define S2_PGD_ORDER		get_order(PTRS_PER_S2_PGD * sizeof(pgd_t))
+ 
++#define kvm_pgd_index(addr)	(((addr) >> PGDIR_SHIFT) & (PTRS_PER_S2_PGD - 1))
++
+ /*
+  * If we are concatenating first level stage-2 page tables, we would have less
+  * than or equal to 16 pointers in the fake PGD, because that's what the
+@@ -150,43 +152,6 @@ static inline void kvm_set_s2pmd_writable(pmd_t *pmd)
+ #define KVM_PREALLOC_LEVEL	(0)
+ #endif
+ 
+-/**
+- * kvm_prealloc_hwpgd - allocate inital table for VTTBR
+- * @kvm:	The KVM struct pointer for the VM.
+- * @pgd:	The kernel pseudo pgd
+- *
+- * When the kernel uses more levels of page tables than the guest, we allocate
+- * a fake PGD and pre-populate it to point to the next-level page table, which
+- * will be the real initial page table pointed to by the VTTBR.
+- *
+- * When KVM_PREALLOC_LEVEL==2, we allocate a single page for the PMD and
+- * the kernel will use folded pud.  When KVM_PREALLOC_LEVEL==1, we
+- * allocate 2 consecutive PUD pages.
+- */
+-static inline int kvm_prealloc_hwpgd(struct kvm *kvm, pgd_t *pgd)
+-{
+-	unsigned int i;
+-	unsigned long hwpgd;
+-
+-	if (KVM_PREALLOC_LEVEL == 0)
+-		return 0;
+-
+-	hwpgd = __get_free_pages(GFP_KERNEL | __GFP_ZERO, PTRS_PER_S2_PGD_SHIFT);
+-	if (!hwpgd)
+-		return -ENOMEM;
+-
+-	for (i = 0; i < PTRS_PER_S2_PGD; i++) {
+-		if (KVM_PREALLOC_LEVEL == 1)
+-			pgd_populate(NULL, pgd + i,
+-				     (pud_t *)hwpgd + i * PTRS_PER_PUD);
+-		else if (KVM_PREALLOC_LEVEL == 2)
+-			pud_populate(NULL, pud_offset(pgd, 0) + i,
+-				     (pmd_t *)hwpgd + i * PTRS_PER_PMD);
+-	}
+-
+-	return 0;
+-}
+-
+ static inline void *kvm_get_hwpgd(struct kvm *kvm)
+ {
+ 	pgd_t *pgd = kvm->arch.pgd;
+@@ -203,12 +168,11 @@ static inline void *kvm_get_hwpgd(struct kvm *kvm)
+ 	return pmd_offset(pud, 0);
+ }
+ 
+-static inline void kvm_free_hwpgd(struct kvm *kvm)
++static inline unsigned int kvm_get_hwpgd_size(void)
+ {
+-	if (KVM_PREALLOC_LEVEL > 0) {
+-		unsigned long hwpgd = (unsigned long)kvm_get_hwpgd(kvm);
+-		free_pages(hwpgd, PTRS_PER_S2_PGD_SHIFT);
+-	}
++	if (KVM_PREALLOC_LEVEL > 0)
++		return PTRS_PER_S2_PGD * PAGE_SIZE;
++	return PTRS_PER_S2_PGD * sizeof(pgd_t);
+ }
+ 
+ static inline bool kvm_page_empty(void *ptr)
+diff --git a/arch/arm64/include/asm/smp_plat.h b/arch/arm64/include/asm/smp_plat.h
+index 59e282311b58..8dcd61e32176 100644
+--- a/arch/arm64/include/asm/smp_plat.h
++++ b/arch/arm64/include/asm/smp_plat.h
+@@ -40,4 +40,6 @@ static inline u32 mpidr_hash_size(void)
+ extern u64 __cpu_logical_map[NR_CPUS];
+ #define cpu_logical_map(cpu)    __cpu_logical_map[cpu]
+ 
++void __init do_post_cpus_up_work(void);
++
+ #endif /* __ASM_SMP_PLAT_H */
+diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h
+index 8e38878c87c6..d9e9822efcee 100644
+--- a/arch/arm64/include/uapi/asm/kvm.h
++++ b/arch/arm64/include/uapi/asm/kvm.h
+@@ -179,8 +179,14 @@ struct kvm_arch_memory_slot {
+ #define KVM_ARM_IRQ_CPU_IRQ		0
+ #define KVM_ARM_IRQ_CPU_FIQ		1
+ 
+-/* Highest supported SPI, from VGIC_NR_IRQS */
++/*
++ * This used to hold the highest supported SPI, but it is now obsolete
++ * and only here to provide source code level compatibility with older
++ * userland. The highest SPI number can be set via KVM_DEV_ARM_VGIC_GRP_NR_IRQS.
++ */
++#ifndef __KERNEL__
+ #define KVM_ARM_IRQ_GIC_MAX		127
++#endif
+ 
+ /* PSCI interface */
+ #define KVM_PSCI_FN_BASE		0x95c1ba5e
+diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
+index fa62637e63a8..ad6d52392bb1 100644
+--- a/arch/arm64/kernel/cpu_errata.c
++++ b/arch/arm64/kernel/cpu_errata.c
+@@ -88,7 +88,16 @@ struct arm64_cpu_capabilities arm64_errata[] = {
+ 	/* Cortex-A57 r0p0 - r1p2 */
+ 		.desc = "ARM erratum 832075",
+ 		.capability = ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE,
+-		MIDR_RANGE(MIDR_CORTEX_A57, 0x00, 0x12),
++		MIDR_RANGE(MIDR_CORTEX_A57, 0x00,
++			   (1 << MIDR_VARIANT_SHIFT) | 2),
++	},
++#endif
++#ifdef CONFIG_ARM64_ERRATUM_845719
++	{
++	/* Cortex-A53 r0p[01234] */
++		.desc = "ARM erratum 845719",
++		.capability = ARM64_WORKAROUND_845719,
++		MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x04),
+ 	},
+ #endif
+ 	{
+diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
+index fd4fa374e5d2..9b870a2815e6 100644
+--- a/arch/arm64/kernel/entry.S
++++ b/arch/arm64/kernel/entry.S
+@@ -21,8 +21,10 @@
+ #include <linux/init.h>
+ #include <linux/linkage.h>
+ 
++#include <asm/alternative-asm.h>
+ #include <asm/assembler.h>
+ #include <asm/asm-offsets.h>
++#include <asm/cpufeature.h>
+ #include <asm/errno.h>
+ #include <asm/esr.h>
+ #include <asm/thread_info.h>
+@@ -120,6 +122,24 @@
+ 	ct_user_enter
+ 	ldr	x23, [sp, #S_SP]		// load return stack pointer
+ 	msr	sp_el0, x23
++
++#ifdef CONFIG_ARM64_ERRATUM_845719
++	alternative_insn						\
++	"nop",								\
++	"tbz x22, #4, 1f",						\
++	ARM64_WORKAROUND_845719
++#ifdef CONFIG_PID_IN_CONTEXTIDR
++	alternative_insn						\
++	"nop; nop",							\
++	"mrs x29, contextidr_el1; msr contextidr_el1, x29; 1:",		\
++	ARM64_WORKAROUND_845719
++#else
++	alternative_insn						\
++	"nop",								\
++	"msr contextidr_el1, xzr; 1:",					\
++	ARM64_WORKAROUND_845719
++#endif
++#endif
+ 	.endif
+ 	msr	elr_el1, x21			// set up the return data
+ 	msr	spsr_el1, x22
+diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
+index 8ce88e08c030..98af7da1b8b5 100644
+--- a/arch/arm64/kernel/head.S
++++ b/arch/arm64/kernel/head.S
+@@ -426,6 +426,7 @@ __create_page_tables:
+ 	 */
+ 	mov	x0, x25
+ 	add	x1, x26, #SWAPPER_DIR_SIZE
++	dmb	sy
+ 	bl	__inval_cache_range
+ 
+ 	mov	lr, x27
+diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
+index 20fe2932ad0c..79fedd8dbdd7 100644
+--- a/arch/arm64/kernel/setup.c
++++ b/arch/arm64/kernel/setup.c
+@@ -206,6 +206,18 @@ static void __init smp_build_mpidr_hash(void)
+ }
+ #endif
+ 
++void __init do_post_cpus_up_work(void)
++{
++	apply_alternatives_all();
++}
++
++#ifdef CONFIG_UP_LATE_INIT
++void __init up_late_init(void)
++{
++	do_post_cpus_up_work();
++}
++#endif /* CONFIG_UP_LATE_INIT */
++
+ static void __init setup_processor(void)
+ {
+ 	struct cpu_info *cpu_info;
+diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
+index 7ae6ee085261..e1b857fa2a1d 100644
+--- a/arch/arm64/kernel/smp.c
++++ b/arch/arm64/kernel/smp.c
+@@ -310,7 +310,7 @@ void cpu_die(void)
+ void __init smp_cpus_done(unsigned int max_cpus)
+ {
+ 	pr_info("SMP: Total of %d processors activated.\n", num_online_cpus());
+-	apply_alternatives_all();
++	do_post_cpus_up_work();
+ }
+ 
+ void __init smp_prepare_boot_cpu(void)
+diff --git a/arch/c6x/kernel/time.c b/arch/c6x/kernel/time.c
+index 356ee84cad95..04845aaf5985 100644
+--- a/arch/c6x/kernel/time.c
++++ b/arch/c6x/kernel/time.c
+@@ -49,7 +49,7 @@ u64 sched_clock(void)
+ 	return (tsc * sched_clock_multiplier) >> SCHED_CLOCK_SHIFT;
+ }
+ 
+-void time_init(void)
++void __init time_init(void)
+ {
+ 	u64 tmp = (u64)NSEC_PER_SEC << SCHED_CLOCK_SHIFT;
+ 
+diff --git a/arch/mips/include/asm/asm-eva.h b/arch/mips/include/asm/asm-eva.h
+index e41c56e375b1..1e38f0e1ea3e 100644
+--- a/arch/mips/include/asm/asm-eva.h
++++ b/arch/mips/include/asm/asm-eva.h
+@@ -11,6 +11,36 @@
+ #define __ASM_ASM_EVA_H
+ 
+ #ifndef __ASSEMBLY__
++
++/* Kernel variants */
++
++#define kernel_cache(op, base)		"cache " op ", " base "\n"
++#define kernel_ll(reg, addr)		"ll " reg ", " addr "\n"
++#define kernel_sc(reg, addr)		"sc " reg ", " addr "\n"
++#define kernel_lw(reg, addr)		"lw " reg ", " addr "\n"
++#define kernel_lwl(reg, addr)		"lwl " reg ", " addr "\n"
++#define kernel_lwr(reg, addr)		"lwr " reg ", " addr "\n"
++#define kernel_lh(reg, addr)		"lh " reg ", " addr "\n"
++#define kernel_lb(reg, addr)		"lb " reg ", " addr "\n"
++#define kernel_lbu(reg, addr)		"lbu " reg ", " addr "\n"
++#define kernel_sw(reg, addr)		"sw " reg ", " addr "\n"
++#define kernel_swl(reg, addr)		"swl " reg ", " addr "\n"
++#define kernel_swr(reg, addr)		"swr " reg ", " addr "\n"
++#define kernel_sh(reg, addr)		"sh " reg ", " addr "\n"
++#define kernel_sb(reg, addr)		"sb " reg ", " addr "\n"
++
++#ifdef CONFIG_32BIT
++/*
++ * No 'sd' or 'ld' instructions in 32-bit but the code will
++ * do the correct thing
++ */
++#define kernel_sd(reg, addr)		user_sw(reg, addr)
++#define kernel_ld(reg, addr)		user_lw(reg, addr)
++#else
++#define kernel_sd(reg, addr)		"sd " reg", " addr "\n"
++#define kernel_ld(reg, addr)		"ld " reg", " addr "\n"
++#endif /* CONFIG_32BIT */
++
+ #ifdef CONFIG_EVA
+ 
+ #define __BUILD_EVA_INSN(insn, reg, addr)				\
+@@ -41,37 +71,60 @@
+ 
+ #else
+ 
+-#define user_cache(op, base)		"cache " op ", " base "\n"
+-#define user_ll(reg, addr)		"ll " reg ", " addr "\n"
+-#define user_sc(reg, addr)		"sc " reg ", " addr "\n"
+-#define user_lw(reg, addr)		"lw " reg ", " addr "\n"
+-#define user_lwl(reg, addr)		"lwl " reg ", " addr "\n"
+-#define user_lwr(reg, addr)		"lwr " reg ", " addr "\n"
+-#define user_lh(reg, addr)		"lh " reg ", " addr "\n"
+-#define user_lb(reg, addr)		"lb " reg ", " addr "\n"
+-#define user_lbu(reg, addr)		"lbu " reg ", " addr "\n"
+-#define user_sw(reg, addr)		"sw " reg ", " addr "\n"
+-#define user_swl(reg, addr)		"swl " reg ", " addr "\n"
+-#define user_swr(reg, addr)		"swr " reg ", " addr "\n"
+-#define user_sh(reg, addr)		"sh " reg ", " addr "\n"
+-#define user_sb(reg, addr)		"sb " reg ", " addr "\n"
++#define user_cache(op, base)		kernel_cache(op, base)
++#define user_ll(reg, addr)		kernel_ll(reg, addr)
++#define user_sc(reg, addr)		kernel_sc(reg, addr)
++#define user_lw(reg, addr)		kernel_lw(reg, addr)
++#define user_lwl(reg, addr)		kernel_lwl(reg, addr)
++#define user_lwr(reg, addr)		kernel_lwr(reg, addr)
++#define user_lh(reg, addr)		kernel_lh(reg, addr)
++#define user_lb(reg, addr)		kernel_lb(reg, addr)
++#define user_lbu(reg, addr)		kernel_lbu(reg, addr)
++#define user_sw(reg, addr)		kernel_sw(reg, addr)
++#define user_swl(reg, addr)		kernel_swl(reg, addr)
++#define user_swr(reg, addr)		kernel_swr(reg, addr)
++#define user_sh(reg, addr)		kernel_sh(reg, addr)
++#define user_sb(reg, addr)		kernel_sb(reg, addr)
+ 
+ #ifdef CONFIG_32BIT
+-/*
+- * No 'sd' or 'ld' instructions in 32-bit but the code will
+- * do the correct thing
+- */
+-#define user_sd(reg, addr)		user_sw(reg, addr)
+-#define user_ld(reg, addr)		user_lw(reg, addr)
++#define user_sd(reg, addr)		kernel_sw(reg, addr)
++#define user_ld(reg, addr)		kernel_lw(reg, addr)
+ #else
+-#define user_sd(reg, addr)		"sd " reg", " addr "\n"
+-#define user_ld(reg, addr)		"ld " reg", " addr "\n"
++#define user_sd(reg, addr)		kernel_sd(reg, addr)
++#define user_ld(reg, addr)		kernel_ld(reg, addr)
+ #endif /* CONFIG_32BIT */
+ 
+ #endif /* CONFIG_EVA */
+ 
+ #else /* __ASSEMBLY__ */
+ 
++#define kernel_cache(op, base)		cache op, base
++#define kernel_ll(reg, addr)		ll reg, addr
++#define kernel_sc(reg, addr)		sc reg, addr
++#define kernel_lw(reg, addr)		lw reg, addr
++#define kernel_lwl(reg, addr)		lwl reg, addr
++#define kernel_lwr(reg, addr)		lwr reg, addr
++#define kernel_lh(reg, addr)		lh reg, addr
++#define kernel_lb(reg, addr)		lb reg, addr
++#define kernel_lbu(reg, addr)		lbu reg, addr
++#define kernel_sw(reg, addr)		sw reg, addr
++#define kernel_swl(reg, addr)		swl reg, addr
++#define kernel_swr(reg, addr)		swr reg, addr
++#define kernel_sh(reg, addr)		sh reg, addr
++#define kernel_sb(reg, addr)		sb reg, addr
++
++#ifdef CONFIG_32BIT
++/*
++ * No 'sd' or 'ld' instructions in 32-bit but the code will
++ * do the correct thing
++ */
++#define kernel_sd(reg, addr)		user_sw(reg, addr)
++#define kernel_ld(reg, addr)		user_lw(reg, addr)
++#else
++#define kernel_sd(reg, addr)		sd reg, addr
++#define kernel_ld(reg, addr)		ld reg, addr
++#endif /* CONFIG_32BIT */
++
+ #ifdef CONFIG_EVA
+ 
+ #define __BUILD_EVA_INSN(insn, reg, addr)			\
+@@ -101,31 +154,27 @@
+ #define user_sd(reg, addr)		user_sw(reg, addr)
+ #else
+ 
+-#define user_cache(op, base)		cache op, base
+-#define user_ll(reg, addr)		ll reg, addr
+-#define user_sc(reg, addr)		sc reg, addr
+-#define user_lw(reg, addr)		lw reg, addr
+-#define user_lwl(reg, addr)		lwl reg, addr
+-#define user_lwr(reg, addr)		lwr reg, addr
+-#define user_lh(reg, addr)		lh reg, addr
+-#define user_lb(reg, addr)		lb reg, addr
+-#define user_lbu(reg, addr)		lbu reg, addr
+-#define user_sw(reg, addr)		sw reg, addr
+-#define user_swl(reg, addr)		swl reg, addr
+-#define user_swr(reg, addr)		swr reg, addr
+-#define user_sh(reg, addr)		sh reg, addr
+-#define user_sb(reg, addr)		sb reg, addr
++#define user_cache(op, base)		kernel_cache(op, base)
++#define user_ll(reg, addr)		kernel_ll(reg, addr)
++#define user_sc(reg, addr)		kernel_sc(reg, addr)
++#define user_lw(reg, addr)		kernel_lw(reg, addr)
++#define user_lwl(reg, addr)		kernel_lwl(reg, addr)
++#define user_lwr(reg, addr)		kernel_lwr(reg, addr)
++#define user_lh(reg, addr)		kernel_lh(reg, addr)
++#define user_lb(reg, addr)		kernel_lb(reg, addr)
++#define user_lbu(reg, addr)		kernel_lbu(reg, addr)
++#define user_sw(reg, addr)		kernel_sw(reg, addr)
++#define user_swl(reg, addr)		kernel_swl(reg, addr)
++#define user_swr(reg, addr)		kernel_swr(reg, addr)
++#define user_sh(reg, addr)		kernel_sh(reg, addr)
++#define user_sb(reg, addr)		kernel_sb(reg, addr)
+ 
+ #ifdef CONFIG_32BIT
+-/*
+- * No 'sd' or 'ld' instructions in 32-bit but the code will
+- * do the correct thing
+- */
+-#define user_sd(reg, addr)		user_sw(reg, addr)
+-#define user_ld(reg, addr)		user_lw(reg, addr)
++#define user_sd(reg, addr)		kernel_sw(reg, addr)
++#define user_ld(reg, addr)		kernel_lw(reg, addr)
+ #else
+-#define user_sd(reg, addr)		sd reg, addr
+-#define user_ld(reg, addr)		ld reg, addr
++#define user_sd(reg, addr)		kernel_sd(reg, addr)
++#define user_ld(reg, addr)		kernel_sd(reg, addr)
+ #endif /* CONFIG_32BIT */
+ 
+ #endif /* CONFIG_EVA */
+diff --git a/arch/mips/include/asm/fpu.h b/arch/mips/include/asm/fpu.h
+index affebb78f5d6..270ecc148443 100644
+--- a/arch/mips/include/asm/fpu.h
++++ b/arch/mips/include/asm/fpu.h
+@@ -169,6 +169,7 @@ static inline void lose_fpu(int save)
+ 		}
+ 		disable_msa();
+ 		clear_thread_flag(TIF_USEDMSA);
++		__disable_fpu();
+ 	} else if (is_fpu_owner()) {
+ 		if (save)
+ 			_save_fp(current);
+diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
+index f2c249796ea8..4e3205a3bee2 100644
+--- a/arch/mips/include/asm/kvm_host.h
++++ b/arch/mips/include/asm/kvm_host.h
+@@ -321,6 +321,7 @@ enum mips_mmu_types {
+ #define T_TRAP			13	/* Trap instruction */
+ #define T_VCEI			14	/* Virtual coherency exception */
+ #define T_FPE			15	/* Floating point exception */
++#define T_MSADIS		21	/* MSA disabled exception */
+ #define T_WATCH			23	/* Watch address reference */
+ #define T_VCED			31	/* Virtual coherency data */
+ 
+@@ -577,6 +578,7 @@ struct kvm_mips_callbacks {
+ 	int (*handle_syscall)(struct kvm_vcpu *vcpu);
+ 	int (*handle_res_inst)(struct kvm_vcpu *vcpu);
+ 	int (*handle_break)(struct kvm_vcpu *vcpu);
++	int (*handle_msa_disabled)(struct kvm_vcpu *vcpu);
+ 	int (*vm_init)(struct kvm *kvm);
+ 	int (*vcpu_init)(struct kvm_vcpu *vcpu);
+ 	int (*vcpu_setup)(struct kvm_vcpu *vcpu);
+diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c
+index fb3e8dfd1ff6..838d3a6a5b7d 100644
+--- a/arch/mips/kvm/emulate.c
++++ b/arch/mips/kvm/emulate.c
+@@ -2176,6 +2176,7 @@ enum emulation_result kvm_mips_check_privilege(unsigned long cause,
+ 		case T_SYSCALL:
+ 		case T_BREAK:
+ 		case T_RES_INST:
++		case T_MSADIS:
+ 			break;
+ 
+ 		case T_COP_UNUSABLE:
+diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
+index 270bbd41769e..39074fb83bad 100644
+--- a/arch/mips/kvm/mips.c
++++ b/arch/mips/kvm/mips.c
+@@ -1119,6 +1119,10 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
+ 		ret = kvm_mips_callbacks->handle_break(vcpu);
+ 		break;
+ 
++	case T_MSADIS:
++		ret = kvm_mips_callbacks->handle_msa_disabled(vcpu);
++		break;
++
+ 	default:
+ 		kvm_err("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x  BadVaddr: %#lx Status: %#lx\n",
+ 			exccode, opc, kvm_get_inst(opc, vcpu), badvaddr,
+diff --git a/arch/mips/kvm/trap_emul.c b/arch/mips/kvm/trap_emul.c
+index fd7257b70e65..4372cc86650c 100644
+--- a/arch/mips/kvm/trap_emul.c
++++ b/arch/mips/kvm/trap_emul.c
+@@ -330,6 +330,33 @@ static int kvm_trap_emul_handle_break(struct kvm_vcpu *vcpu)
+ 	return ret;
+ }
+ 
++static int kvm_trap_emul_handle_msa_disabled(struct kvm_vcpu *vcpu)
++{
++	struct kvm_run *run = vcpu->run;
++	uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
++	unsigned long cause = vcpu->arch.host_cp0_cause;
++	enum emulation_result er = EMULATE_DONE;
++	int ret = RESUME_GUEST;
++
++	/* No MSA supported in guest, guest reserved instruction exception */
++	er = kvm_mips_emulate_ri_exc(cause, opc, run, vcpu);
++
++	switch (er) {
++	case EMULATE_DONE:
++		ret = RESUME_GUEST;
++		break;
++
++	case EMULATE_FAIL:
++		run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
++		ret = RESUME_HOST;
++		break;
++
++	default:
++		BUG();
++	}
++	return ret;
++}
++
+ static int kvm_trap_emul_vm_init(struct kvm *kvm)
+ {
+ 	return 0;
+@@ -470,6 +497,7 @@ static struct kvm_mips_callbacks kvm_trap_emul_callbacks = {
+ 	.handle_syscall = kvm_trap_emul_handle_syscall,
+ 	.handle_res_inst = kvm_trap_emul_handle_res_inst,
+ 	.handle_break = kvm_trap_emul_handle_break,
++	.handle_msa_disabled = kvm_trap_emul_handle_msa_disabled,
+ 
+ 	.vm_init = kvm_trap_emul_vm_init,
+ 	.vcpu_init = kvm_trap_emul_vcpu_init,
+diff --git a/arch/mips/loongson/loongson-3/irq.c b/arch/mips/loongson/loongson-3/irq.c
+index 21221edda7a9..0f75b6b3d218 100644
+--- a/arch/mips/loongson/loongson-3/irq.c
++++ b/arch/mips/loongson/loongson-3/irq.c
+@@ -44,6 +44,7 @@ void mach_irq_dispatch(unsigned int pending)
+ 
+ static struct irqaction cascade_irqaction = {
+ 	.handler = no_action,
++	.flags = IRQF_NO_SUSPEND,
+ 	.name = "cascade",
+ };
+ 
+diff --git a/arch/mips/mti-malta/malta-memory.c b/arch/mips/mti-malta/malta-memory.c
+index 8fddd2cdbff7..efe366d618b1 100644
+--- a/arch/mips/mti-malta/malta-memory.c
++++ b/arch/mips/mti-malta/malta-memory.c
+@@ -53,6 +53,12 @@ fw_memblock_t * __init fw_getmdesc(int eva)
+ 		pr_warn("memsize not set in YAMON, set to default (32Mb)\n");
+ 		physical_memsize = 0x02000000;
+ 	} else {
++		if (memsize > (256 << 20)) { /* memsize should be capped to 256M */
++			pr_warn("Unsupported memsize value (0x%lx) detected! "
++				"Using 0x10000000 (256M) instead\n",
++				memsize);
++			memsize = 256 << 20;
++		}
+ 		/* If ememsize is set, then set physical_memsize to that */
+ 		physical_memsize = ememsize ? : memsize;
+ 	}
+diff --git a/arch/mips/power/hibernate.S b/arch/mips/power/hibernate.S
+index 32a7c828f073..e7567c8a9e79 100644
+--- a/arch/mips/power/hibernate.S
++++ b/arch/mips/power/hibernate.S
+@@ -30,6 +30,8 @@ LEAF(swsusp_arch_suspend)
+ END(swsusp_arch_suspend)
+ 
+ LEAF(swsusp_arch_resume)
++	/* Avoid TLB mismatch during and after kernel resume */
++	jal local_flush_tlb_all
+ 	PTR_L t0, restore_pblist
+ 0:
+ 	PTR_L t1, PBE_ADDRESS(t0)   /* source */
+@@ -43,7 +45,6 @@ LEAF(swsusp_arch_resume)
+ 	bne t1, t3, 1b
+ 	PTR_L t0, PBE_NEXT(t0)
+ 	bnez t0, 0b
+-	jal local_flush_tlb_all /* Avoid TLB mismatch after kernel resume */
+ 	PTR_LA t0, saved_regs
+ 	PTR_L ra, PT_R31(t0)
+ 	PTR_L sp, PT_R29(t0)
+diff --git a/arch/powerpc/kernel/cacheinfo.c b/arch/powerpc/kernel/cacheinfo.c
+index 40198d50b4c2..8005b79ecbcf 100644
+--- a/arch/powerpc/kernel/cacheinfo.c
++++ b/arch/powerpc/kernel/cacheinfo.c
+@@ -61,12 +61,22 @@ struct cache_type_info {
+ };
+ 
+ /* These are used to index the cache_type_info array. */
+-#define CACHE_TYPE_UNIFIED     0
+-#define CACHE_TYPE_INSTRUCTION 1
+-#define CACHE_TYPE_DATA        2
++#define CACHE_TYPE_UNIFIED     0 /* cache-size, cache-block-size, etc. */
++#define CACHE_TYPE_UNIFIED_D   1 /* d-cache-size, d-cache-block-size, etc */
++#define CACHE_TYPE_INSTRUCTION 2
++#define CACHE_TYPE_DATA        3
+ 
+ static const struct cache_type_info cache_type_info[] = {
+ 	{
++		/* Embedded systems that use cache-size, cache-block-size,
++		 * etc. for the Unified (typically L2) cache. */
++		.name            = "Unified",
++		.size_prop       = "cache-size",
++		.line_size_props = { "cache-line-size",
++				     "cache-block-size", },
++		.nr_sets_prop    = "cache-sets",
++	},
++	{
+ 		/* PowerPC Processor binding says the [di]-cache-*
+ 		 * must be equal on unified caches, so just use
+ 		 * d-cache properties. */
+@@ -293,7 +303,8 @@ static struct cache *cache_find_first_sibling(struct cache *cache)
+ {
+ 	struct cache *iter;
+ 
+-	if (cache->type == CACHE_TYPE_UNIFIED)
++	if (cache->type == CACHE_TYPE_UNIFIED ||
++	    cache->type == CACHE_TYPE_UNIFIED_D)
+ 		return cache;
+ 
+ 	list_for_each_entry(iter, &cache_list, list)
+@@ -324,16 +335,29 @@ static bool cache_node_is_unified(const struct device_node *np)
+ 	return of_get_property(np, "cache-unified", NULL);
+ }
+ 
+-static struct cache *cache_do_one_devnode_unified(struct device_node *node,
+-						  int level)
++/*
++ * Unified caches can have two different sets of tags.  Most embedded
++ * use cache-size, etc. for the unified cache size, but open firmware systems
++ * use d-cache-size, etc.   Check on initialization for which type we have, and
++ * return the appropriate structure type.  Assume it's embedded if it isn't
++ * open firmware.  If it's yet a 3rd type, then there will be missing entries
++ * in /sys/devices/system/cpu/cpu0/cache/index2/, and this code will need
++ * to be extended further.
++ */
++static int cache_is_unified_d(const struct device_node *np)
+ {
+-	struct cache *cache;
++	return of_get_property(np,
++		cache_type_info[CACHE_TYPE_UNIFIED_D].size_prop, NULL) ?
++		CACHE_TYPE_UNIFIED_D : CACHE_TYPE_UNIFIED;
++}
+ 
++/*
++ */
++static struct cache *cache_do_one_devnode_unified(struct device_node *node, int level)
++{
+ 	pr_debug("creating L%d ucache for %s\n", level, node->full_name);
+ 
+-	cache = new_cache(CACHE_TYPE_UNIFIED, level, node);
+-
+-	return cache;
++	return new_cache(cache_is_unified_d(node), level, node);
+ }
+ 
+ static struct cache *cache_do_one_devnode_split(struct device_node *node,
+diff --git a/arch/powerpc/perf/callchain.c b/arch/powerpc/perf/callchain.c
+index 2396dda282cd..ead55351b254 100644
+--- a/arch/powerpc/perf/callchain.c
++++ b/arch/powerpc/perf/callchain.c
+@@ -243,7 +243,7 @@ static void perf_callchain_user_64(struct perf_callchain_entry *entry,
+ 	sp = regs->gpr[1];
+ 	perf_callchain_store(entry, next_ip);
+ 
+-	for (;;) {
++	while (entry->nr < PERF_MAX_STACK_DEPTH) {
+ 		fp = (unsigned long __user *) sp;
+ 		if (!valid_user_sp(sp, 1) || read_user_stack_64(fp, &next_sp))
+ 			return;
+diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c
+index 4c11421847be..3af8324c122e 100644
+--- a/arch/powerpc/platforms/cell/interrupt.c
++++ b/arch/powerpc/platforms/cell/interrupt.c
+@@ -163,7 +163,7 @@ static unsigned int iic_get_irq(void)
+ 
+ void iic_setup_cpu(void)
+ {
+-	out_be64(this_cpu_ptr(&cpu_iic.regs->prio), 0xff);
++	out_be64(&this_cpu_ptr(&cpu_iic)->regs->prio, 0xff);
+ }
+ 
+ u8 iic_get_target_id(int cpu)
+diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c
+index c7c8720aa39f..63db1b03e756 100644
+--- a/arch/powerpc/platforms/cell/iommu.c
++++ b/arch/powerpc/platforms/cell/iommu.c
+@@ -197,7 +197,7 @@ static int tce_build_cell(struct iommu_table *tbl, long index, long npages,
+ 
+ 	io_pte = (unsigned long *)tbl->it_base + (index - tbl->it_offset);
+ 
+-	for (i = 0; i < npages; i++, uaddr += tbl->it_page_shift)
++	for (i = 0; i < npages; i++, uaddr += (1 << tbl->it_page_shift))
+ 		io_pte[i] = base_pte | (__pa(uaddr) & CBE_IOPTE_RPN_Mask);
+ 
+ 	mb();
+diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
+index 6a9a255d8058..2578b148ddc4 100644
+--- a/arch/powerpc/platforms/powernv/pci-ioda.c
++++ b/arch/powerpc/platforms/powernv/pci-ioda.c
+@@ -1750,7 +1750,8 @@ static void pnv_ioda_setup_pe_seg(struct pci_controller *hose,
+ 				region.start += phb->ioda.io_segsize;
+ 				index++;
+ 			}
+-		} else if (res->flags & IORESOURCE_MEM) {
++		} else if ((res->flags & IORESOURCE_MEM) &&
++			   !pnv_pci_is_mem_pref_64(res->flags)) {
+ 			region.start = res->start -
+ 				       hose->mem_offset[0] -
+ 				       phb->ioda.m32_pci_base;
+diff --git a/arch/s390/kernel/suspend.c b/arch/s390/kernel/suspend.c
+index 1c4c5accd220..d3236c9e226b 100644
+--- a/arch/s390/kernel/suspend.c
++++ b/arch/s390/kernel/suspend.c
+@@ -138,6 +138,8 @@ int pfn_is_nosave(unsigned long pfn)
+ {
+ 	unsigned long nosave_begin_pfn = PFN_DOWN(__pa(&__nosave_begin));
+ 	unsigned long nosave_end_pfn = PFN_DOWN(__pa(&__nosave_end));
++	unsigned long eshared_pfn = PFN_DOWN(__pa(&_eshared)) - 1;
++	unsigned long stext_pfn = PFN_DOWN(__pa(&_stext));
+ 
+ 	/* Always save lowcore pages (LC protection might be enabled). */
+ 	if (pfn <= LC_PAGES)
+@@ -145,6 +147,8 @@ int pfn_is_nosave(unsigned long pfn)
+ 	if (pfn >= nosave_begin_pfn && pfn < nosave_end_pfn)
+ 		return 1;
+ 	/* Skip memory holes and read-only pages (NSS, DCSS, ...). */
++	if (pfn >= stext_pfn && pfn <= eshared_pfn)
++		return ipl_info.type == IPL_TYPE_NSS ? 1 : 0;
+ 	if (tprot(PFN_PHYS(pfn)))
+ 		return 1;
+ 	return 0;
+diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
+index f512cffbf84e..07da75c385e6 100644
+--- a/arch/s390/kvm/interrupt.c
++++ b/arch/s390/kvm/interrupt.c
+@@ -17,6 +17,7 @@
+ #include <linux/signal.h>
+ #include <linux/slab.h>
+ #include <linux/bitmap.h>
++#include <linux/vmalloc.h>
+ #include <asm/asm-offsets.h>
+ #include <asm/uaccess.h>
+ #include "kvm-s390.h"
+@@ -1131,7 +1132,6 @@ struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm,
+ 
+ 	if ((!schid && !cr6) || (schid && cr6))
+ 		return NULL;
+-	mutex_lock(&kvm->lock);
+ 	fi = &kvm->arch.float_int;
+ 	spin_lock(&fi->lock);
+ 	inti = NULL;
+@@ -1159,7 +1159,6 @@ struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm,
+ 	if (list_empty(&fi->list))
+ 		atomic_set(&fi->active, 0);
+ 	spin_unlock(&fi->lock);
+-	mutex_unlock(&kvm->lock);
+ 	return inti;
+ }
+ 
+@@ -1172,7 +1171,6 @@ static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
+ 	int sigcpu;
+ 	int rc = 0;
+ 
+-	mutex_lock(&kvm->lock);
+ 	fi = &kvm->arch.float_int;
+ 	spin_lock(&fi->lock);
+ 	if (fi->irq_count >= KVM_S390_MAX_FLOAT_IRQS) {
+@@ -1225,7 +1223,6 @@ static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
+ 	kvm_s390_vcpu_wakeup(kvm_get_vcpu(kvm, sigcpu));
+ unlock_fi:
+ 	spin_unlock(&fi->lock);
+-	mutex_unlock(&kvm->lock);
+ 	return rc;
+ }
+ 
+@@ -1287,10 +1284,10 @@ int kvm_s390_inject_vm(struct kvm *kvm,
+ 	return rc;
+ }
+ 
+-void kvm_s390_reinject_io_int(struct kvm *kvm,
++int kvm_s390_reinject_io_int(struct kvm *kvm,
+ 			      struct kvm_s390_interrupt_info *inti)
+ {
+-	__inject_vm(kvm, inti);
++	return __inject_vm(kvm, inti);
+ }
+ 
+ int s390int_to_s390irq(struct kvm_s390_interrupt *s390int,
+@@ -1379,7 +1376,6 @@ void kvm_s390_clear_float_irqs(struct kvm *kvm)
+ 	struct kvm_s390_float_interrupt *fi;
+ 	struct kvm_s390_interrupt_info	*n, *inti = NULL;
+ 
+-	mutex_lock(&kvm->lock);
+ 	fi = &kvm->arch.float_int;
+ 	spin_lock(&fi->lock);
+ 	list_for_each_entry_safe(inti, n, &fi->list, list) {
+@@ -1389,66 +1385,68 @@ void kvm_s390_clear_float_irqs(struct kvm *kvm)
+ 	fi->irq_count = 0;
+ 	atomic_set(&fi->active, 0);
+ 	spin_unlock(&fi->lock);
+-	mutex_unlock(&kvm->lock);
+ }
+ 
+-static inline int copy_irq_to_user(struct kvm_s390_interrupt_info *inti,
+-				   u8 *addr)
++static void inti_to_irq(struct kvm_s390_interrupt_info *inti,
++		       struct kvm_s390_irq *irq)
+ {
+-	struct kvm_s390_irq __user *uptr = (struct kvm_s390_irq __user *) addr;
+-	struct kvm_s390_irq irq = {0};
+-
+-	irq.type = inti->type;
++	irq->type = inti->type;
+ 	switch (inti->type) {
+ 	case KVM_S390_INT_PFAULT_INIT:
+ 	case KVM_S390_INT_PFAULT_DONE:
+ 	case KVM_S390_INT_VIRTIO:
+ 	case KVM_S390_INT_SERVICE:
+-		irq.u.ext = inti->ext;
++		irq->u.ext = inti->ext;
+ 		break;
+ 	case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
+-		irq.u.io = inti->io;
++		irq->u.io = inti->io;
+ 		break;
+ 	case KVM_S390_MCHK:
+-		irq.u.mchk = inti->mchk;
++		irq->u.mchk = inti->mchk;
+ 		break;
+-	default:
+-		return -EINVAL;
+ 	}
+-
+-	if (copy_to_user(uptr, &irq, sizeof(irq)))
+-		return -EFAULT;
+-
+-	return 0;
+ }
+ 
+-static int get_all_floating_irqs(struct kvm *kvm, __u8 *buf, __u64 len)
++static int get_all_floating_irqs(struct kvm *kvm, u8 __user *usrbuf, u64 len)
+ {
+ 	struct kvm_s390_interrupt_info *inti;
+ 	struct kvm_s390_float_interrupt *fi;
++	struct kvm_s390_irq *buf;
++	int max_irqs;
+ 	int ret = 0;
+ 	int n = 0;
+ 
+-	mutex_lock(&kvm->lock);
++	if (len > KVM_S390_FLIC_MAX_BUFFER || len == 0)
++		return -EINVAL;
++
++	/*
++	 * We are already using -ENOMEM to signal
++	 * userspace it may retry with a bigger buffer,
++	 * so we need to use something else for this case
++	 */
++	buf = vzalloc(len);
++	if (!buf)
++		return -ENOBUFS;
++
++	max_irqs = len / sizeof(struct kvm_s390_irq);
++
+ 	fi = &kvm->arch.float_int;
+ 	spin_lock(&fi->lock);
+-
+ 	list_for_each_entry(inti, &fi->list, list) {
+-		if (len < sizeof(struct kvm_s390_irq)) {
++		if (n == max_irqs) {
+ 			/* signal userspace to try again */
+ 			ret = -ENOMEM;
+ 			break;
+ 		}
+-		ret = copy_irq_to_user(inti, buf);
+-		if (ret)
+-			break;
+-		buf += sizeof(struct kvm_s390_irq);
+-		len -= sizeof(struct kvm_s390_irq);
++		inti_to_irq(inti, &buf[n]);
+ 		n++;
+ 	}
+-
+ 	spin_unlock(&fi->lock);
+-	mutex_unlock(&kvm->lock);
++	if (!ret && n > 0) {
++		if (copy_to_user(usrbuf, buf, sizeof(struct kvm_s390_irq) * n))
++			ret = -EFAULT;
++	}
++	vfree(buf);
+ 
+ 	return ret < 0 ? ret : n;
+ }
+@@ -1459,7 +1457,7 @@ static int flic_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
+ 
+ 	switch (attr->group) {
+ 	case KVM_DEV_FLIC_GET_ALL_IRQS:
+-		r = get_all_floating_irqs(dev->kvm, (u8 *) attr->addr,
++		r = get_all_floating_irqs(dev->kvm, (u8 __user *) attr->addr,
+ 					  attr->attr);
+ 		break;
+ 	default:
+diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
+index a8f3d9b71c11..2c1ea12ed55d 100644
+--- a/arch/s390/kvm/kvm-s390.h
++++ b/arch/s390/kvm/kvm-s390.h
+@@ -146,8 +146,8 @@ int __must_check kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
+ int __must_check kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code);
+ struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm,
+ 						    u64 cr6, u64 schid);
+-void kvm_s390_reinject_io_int(struct kvm *kvm,
+-			      struct kvm_s390_interrupt_info *inti);
++int kvm_s390_reinject_io_int(struct kvm *kvm,
++			     struct kvm_s390_interrupt_info *inti);
+ int kvm_s390_mask_adapter(struct kvm *kvm, unsigned int id, bool masked);
+ 
+ /* implemented in intercept.c */
+diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
+index 1be578d64dfc..1edbf953cf0e 100644
+--- a/arch/s390/kvm/priv.c
++++ b/arch/s390/kvm/priv.c
+@@ -229,18 +229,19 @@ static int handle_tpi(struct kvm_vcpu *vcpu)
+ 	struct kvm_s390_interrupt_info *inti;
+ 	unsigned long len;
+ 	u32 tpi_data[3];
+-	int cc, rc;
++	int rc;
+ 	u64 addr;
+ 
+-	rc = 0;
+ 	addr = kvm_s390_get_base_disp_s(vcpu);
+ 	if (addr & 3)
+ 		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
+-	cc = 0;
++
+ 	inti = kvm_s390_get_io_int(vcpu->kvm, vcpu->arch.sie_block->gcr[6], 0);
+-	if (!inti)
+-		goto no_interrupt;
+-	cc = 1;
++	if (!inti) {
++		kvm_s390_set_psw_cc(vcpu, 0);
++		return 0;
++	}
++
+ 	tpi_data[0] = inti->io.subchannel_id << 16 | inti->io.subchannel_nr;
+ 	tpi_data[1] = inti->io.io_int_parm;
+ 	tpi_data[2] = inti->io.io_int_word;
+@@ -251,30 +252,38 @@ static int handle_tpi(struct kvm_vcpu *vcpu)
+ 		 */
+ 		len = sizeof(tpi_data) - 4;
+ 		rc = write_guest(vcpu, addr, &tpi_data, len);
+-		if (rc)
+-			return kvm_s390_inject_prog_cond(vcpu, rc);
++		if (rc) {
++			rc = kvm_s390_inject_prog_cond(vcpu, rc);
++			goto reinject_interrupt;
++		}
+ 	} else {
+ 		/*
+ 		 * Store the three-word I/O interruption code into
+ 		 * the appropriate lowcore area.
+ 		 */
+ 		len = sizeof(tpi_data);
+-		if (write_guest_lc(vcpu, __LC_SUBCHANNEL_ID, &tpi_data, len))
++		if (write_guest_lc(vcpu, __LC_SUBCHANNEL_ID, &tpi_data, len)) {
++			/* failed writes to the low core are not recoverable */
+ 			rc = -EFAULT;
++			goto reinject_interrupt;
++		}
+ 	}
++
++	/* irq was successfully handed to the guest */
++	kfree(inti);
++	kvm_s390_set_psw_cc(vcpu, 1);
++	return 0;
++reinject_interrupt:
+ 	/*
+ 	 * If we encounter a problem storing the interruption code, the
+ 	 * instruction is suppressed from the guest's view: reinject the
+ 	 * interrupt.
+ 	 */
+-	if (!rc)
++	if (kvm_s390_reinject_io_int(vcpu->kvm, inti)) {
+ 		kfree(inti);
+-	else
+-		kvm_s390_reinject_io_int(vcpu->kvm, inti);
+-no_interrupt:
+-	/* Set condition code and we're done. */
+-	if (!rc)
+-		kvm_s390_set_psw_cc(vcpu, cc);
++		rc = -EFAULT;
++	}
++	/* don't set the cc, a pgm irq was injected or we drop to user space */
+ 	return rc ? -EFAULT : 0;
+ }
+ 
+@@ -462,6 +471,7 @@ static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem)
+ 	for (n = mem->count - 1; n > 0 ; n--)
+ 		memcpy(&mem->vm[n], &mem->vm[n - 1], sizeof(mem->vm[0]));
+ 
++	memset(&mem->vm[0], 0, sizeof(mem->vm[0]));
+ 	mem->vm[0].cpus_total = cpus;
+ 	mem->vm[0].cpus_configured = cpus;
+ 	mem->vm[0].cpus_standby = 0;
+diff --git a/arch/x86/include/asm/insn.h b/arch/x86/include/asm/insn.h
+index 47f29b1d1846..e7814b74caf8 100644
+--- a/arch/x86/include/asm/insn.h
++++ b/arch/x86/include/asm/insn.h
+@@ -69,7 +69,7 @@ struct insn {
+ 	const insn_byte_t *next_byte;
+ };
+ 
+-#define MAX_INSN_SIZE	16
++#define MAX_INSN_SIZE	15
+ 
+ #define X86_MODRM_MOD(modrm) (((modrm) & 0xc0) >> 6)
+ #define X86_MODRM_REG(modrm) (((modrm) & 0x38) >> 3)
+diff --git a/arch/x86/include/asm/mwait.h b/arch/x86/include/asm/mwait.h
+index a1410db38a1a..653dfa7662e1 100644
+--- a/arch/x86/include/asm/mwait.h
++++ b/arch/x86/include/asm/mwait.h
+@@ -30,6 +30,14 @@ static inline void __mwait(unsigned long eax, unsigned long ecx)
+ 		     :: "a" (eax), "c" (ecx));
+ }
+ 
++static inline void __sti_mwait(unsigned long eax, unsigned long ecx)
++{
++	trace_hardirqs_on();
++	/* "mwait %eax, %ecx;" */
++	asm volatile("sti; .byte 0x0f, 0x01, 0xc9;"
++		     :: "a" (eax), "c" (ecx));
++}
++
+ /*
+  * This uses new MONITOR/MWAIT instructions on P4 processors with PNI,
+  * which can obviate IPI to trigger checking of need_resched.
+diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c
+index 073983398364..666bcf14ce10 100644
+--- a/arch/x86/kernel/cpu/perf_event_intel_ds.c
++++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c
+@@ -557,6 +557,8 @@ struct event_constraint intel_core2_pebs_event_constraints[] = {
+ 	INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c5, 0x1), /* BR_INST_RETIRED.MISPRED */
+ 	INTEL_FLAGS_UEVENT_CONSTRAINT(0x1fc7, 0x1), /* SIMD_INST_RETURED.ANY */
+ 	INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0x1),    /* MEM_LOAD_RETIRED.* */
++	/* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
++	INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x01),
+ 	EVENT_CONSTRAINT_END
+ };
+ 
+@@ -564,6 +566,8 @@ struct event_constraint intel_atom_pebs_event_constraints[] = {
+ 	INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY */
+ 	INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c5, 0x1), /* MISPREDICTED_BRANCH_RETIRED */
+ 	INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0x1),    /* MEM_LOAD_RETIRED.* */
++	/* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
++	INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x01),
+ 	EVENT_CONSTRAINT_END
+ };
+ 
+@@ -587,6 +591,8 @@ struct event_constraint intel_nehalem_pebs_event_constraints[] = {
+ 	INTEL_FLAGS_UEVENT_CONSTRAINT(0x20c8, 0xf), /* ITLB_MISS_RETIRED */
+ 	INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0xf),    /* MEM_LOAD_RETIRED.* */
+ 	INTEL_FLAGS_EVENT_CONSTRAINT(0xf7, 0xf),    /* FP_ASSIST.* */
++	/* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
++	INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x0f),
+ 	EVENT_CONSTRAINT_END
+ };
+ 
+@@ -602,6 +608,8 @@ struct event_constraint intel_westmere_pebs_event_constraints[] = {
+ 	INTEL_FLAGS_UEVENT_CONSTRAINT(0x20c8, 0xf), /* ITLB_MISS_RETIRED */
+ 	INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0xf),    /* MEM_LOAD_RETIRED.* */
+ 	INTEL_FLAGS_EVENT_CONSTRAINT(0xf7, 0xf),    /* FP_ASSIST.* */
++	/* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
++	INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x0f),
+ 	EVENT_CONSTRAINT_END
+ };
+ 
+diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
+index e127ddaa2d5a..6ad8a6396b75 100644
+--- a/arch/x86/kernel/process.c
++++ b/arch/x86/kernel/process.c
+@@ -24,6 +24,7 @@
+ #include <asm/syscalls.h>
+ #include <asm/idle.h>
+ #include <asm/uaccess.h>
++#include <asm/mwait.h>
+ #include <asm/i387.h>
+ #include <asm/fpu-internal.h>
+ #include <asm/debugreg.h>
+@@ -398,6 +399,53 @@ static void amd_e400_idle(void)
+ 		default_idle();
+ }
+ 
++/*
++ * Intel Core2 and older machines prefer MWAIT over HALT for C1.
++ * We can't rely on cpuidle installing MWAIT, because it will not load
++ * on systems that support only C1 -- so the boot default must be MWAIT.
++ *
++ * Some AMD machines are the opposite, they depend on using HALT.
++ *
++ * So for default C1, which is used during boot until cpuidle loads,
++ * use MWAIT-C1 on Intel HW that has it, else use HALT.
++ */
++static int prefer_mwait_c1_over_halt(const struct cpuinfo_x86 *c)
++{
++	if (c->x86_vendor != X86_VENDOR_INTEL)
++		return 0;
++
++	if (!cpu_has(c, X86_FEATURE_MWAIT))
++		return 0;
++
++	return 1;
++}
++
++/*
++ * MONITOR/MWAIT with no hints, used for default default C1 state.
++ * This invokes MWAIT with interrutps enabled and no flags,
++ * which is backwards compatible with the original MWAIT implementation.
++ */
++
++static void mwait_idle(void)
++{
++	if (!current_set_polling_and_test()) {
++		if (this_cpu_has(X86_BUG_CLFLUSH_MONITOR)) {
++			smp_mb(); /* quirk */
++			clflush((void *)&current_thread_info()->flags);
++			smp_mb(); /* quirk */
++		}
++
++		__monitor((void *)&current_thread_info()->flags, 0, 0);
++		if (!need_resched())
++			__sti_mwait(0, 0);
++		else
++			local_irq_enable();
++	} else {
++		local_irq_enable();
++	}
++	__current_clr_polling();
++}
++
+ void select_idle_routine(const struct cpuinfo_x86 *c)
+ {
+ #ifdef CONFIG_SMP
+@@ -411,6 +459,9 @@ void select_idle_routine(const struct cpuinfo_x86 *c)
+ 		/* E400: APIC timer interrupt does not wake up CPU from C1e */
+ 		pr_info("using AMD E400 aware idle routine\n");
+ 		x86_idle = amd_e400_idle;
++	} else if (prefer_mwait_c1_over_halt(c)) {
++		pr_info("using mwait in idle threads\n");
++		x86_idle = mwait_idle;
+ 	} else
+ 		x86_idle = default_idle;
+ }
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 64d76c102230..5ec6a9969581 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -5781,7 +5781,6 @@ int kvm_arch_init(void *opaque)
+ 	kvm_set_mmio_spte_mask();
+ 
+ 	kvm_x86_ops = ops;
+-	kvm_init_msr_list();
+ 
+ 	kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK,
+ 			PT_DIRTY_MASK, PT64_NX_MASK, 0);
+@@ -7218,7 +7217,14 @@ void kvm_arch_hardware_disable(void)
+ 
+ int kvm_arch_hardware_setup(void)
+ {
+-	return kvm_x86_ops->hardware_setup();
++	int r;
++
++	r = kvm_x86_ops->hardware_setup();
++	if (r != 0)
++		return r;
++
++	kvm_init_msr_list();
++	return 0;
+ }
+ 
+ void kvm_arch_hardware_unsetup(void)
+diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
+index 1313ae6b478b..85994f5d48e4 100644
+--- a/arch/x86/lib/insn.c
++++ b/arch/x86/lib/insn.c
+@@ -52,6 +52,13 @@
+  */
+ void insn_init(struct insn *insn, const void *kaddr, int buf_len, int x86_64)
+ {
++	/*
++	 * Instructions longer than MAX_INSN_SIZE (15 bytes) are invalid
++	 * even if the input buffer is long enough to hold them.
++	 */
++	if (buf_len > MAX_INSN_SIZE)
++		buf_len = MAX_INSN_SIZE;
++
+ 	memset(insn, 0, sizeof(*insn));
+ 	insn->kaddr = kaddr;
+ 	insn->end_kaddr = kaddr + buf_len;
+diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig
+index e31d4949124a..87be10e8b57a 100644
+--- a/arch/xtensa/Kconfig
++++ b/arch/xtensa/Kconfig
+@@ -428,6 +428,36 @@ config DEFAULT_MEM_SIZE
+ 
+ 	  If unsure, leave the default value here.
+ 
++config XTFPGA_LCD
++	bool "Enable XTFPGA LCD driver"
++	depends on XTENSA_PLATFORM_XTFPGA
++	default n
++	help
++	  There's a 2x16 LCD on most of XTFPGA boards, kernel may output
++	  progress messages there during bootup/shutdown. It may be useful
++	  during board bringup.
++
++	  If unsure, say N.
++
++config XTFPGA_LCD_BASE_ADDR
++	hex "XTFPGA LCD base address"
++	depends on XTFPGA_LCD
++	default "0x0d0c0000"
++	help
++	  Base address of the LCD controller inside KIO region.
++	  Different boards from XTFPGA family have LCD controller at different
++	  addresses. Please consult prototyping user guide for your board for
++	  the correct address. Wrong address here may lead to hardware lockup.
++
++config XTFPGA_LCD_8BIT_ACCESS
++	bool "Use 8-bit access to XTFPGA LCD"
++	depends on XTFPGA_LCD
++	default n
++	help
++	  LCD may be connected with 4- or 8-bit interface, 8-bit access may
++	  only be used with 8-bit interface. Please consult prototyping user
++	  guide for your board for the correct interface width.
++
+ endmenu
+ 
+ menu "Executable file formats"
+diff --git a/arch/xtensa/include/uapi/asm/unistd.h b/arch/xtensa/include/uapi/asm/unistd.h
+index db5bb72e2f4e..62d84657c60b 100644
+--- a/arch/xtensa/include/uapi/asm/unistd.h
++++ b/arch/xtensa/include/uapi/asm/unistd.h
+@@ -715,7 +715,7 @@ __SYSCALL(323, sys_process_vm_writev, 6)
+ __SYSCALL(324, sys_name_to_handle_at, 5)
+ #define __NR_open_by_handle_at			325
+ __SYSCALL(325, sys_open_by_handle_at, 3)
+-#define __NR_sync_file_range			326
++#define __NR_sync_file_range2			326
+ __SYSCALL(326, sys_sync_file_range2, 6)
+ #define __NR_perf_event_open			327
+ __SYSCALL(327, sys_perf_event_open, 5)
+diff --git a/arch/xtensa/platforms/iss/network.c b/arch/xtensa/platforms/iss/network.c
+index d05f8feeb8d7..17b1ef3232e4 100644
+--- a/arch/xtensa/platforms/iss/network.c
++++ b/arch/xtensa/platforms/iss/network.c
+@@ -349,8 +349,8 @@ static void iss_net_timer(unsigned long priv)
+ {
+ 	struct iss_net_private *lp = (struct iss_net_private *)priv;
+ 
+-	spin_lock(&lp->lock);
+ 	iss_net_poll();
++	spin_lock(&lp->lock);
+ 	mod_timer(&lp->timer, jiffies + lp->timer_val);
+ 	spin_unlock(&lp->lock);
+ }
+@@ -361,7 +361,7 @@ static int iss_net_open(struct net_device *dev)
+ 	struct iss_net_private *lp = netdev_priv(dev);
+ 	int err;
+ 
+-	spin_lock(&lp->lock);
++	spin_lock_bh(&lp->lock);
+ 
+ 	err = lp->tp.open(lp);
+ 	if (err < 0)
+@@ -376,9 +376,11 @@ static int iss_net_open(struct net_device *dev)
+ 	while ((err = iss_net_rx(dev)) > 0)
+ 		;
+ 
+-	spin_lock(&opened_lock);
++	spin_unlock_bh(&lp->lock);
++	spin_lock_bh(&opened_lock);
+ 	list_add(&lp->opened_list, &opened);
+-	spin_unlock(&opened_lock);
++	spin_unlock_bh(&opened_lock);
++	spin_lock_bh(&lp->lock);
+ 
+ 	init_timer(&lp->timer);
+ 	lp->timer_val = ISS_NET_TIMER_VALUE;
+@@ -387,7 +389,7 @@ static int iss_net_open(struct net_device *dev)
+ 	mod_timer(&lp->timer, jiffies + lp->timer_val);
+ 
+ out:
+-	spin_unlock(&lp->lock);
++	spin_unlock_bh(&lp->lock);
+ 	return err;
+ }
+ 
+@@ -395,7 +397,7 @@ static int iss_net_close(struct net_device *dev)
+ {
+ 	struct iss_net_private *lp = netdev_priv(dev);
+ 	netif_stop_queue(dev);
+-	spin_lock(&lp->lock);
++	spin_lock_bh(&lp->lock);
+ 
+ 	spin_lock(&opened_lock);
+ 	list_del(&opened);
+@@ -405,18 +407,17 @@ static int iss_net_close(struct net_device *dev)
+ 
+ 	lp->tp.close(lp);
+ 
+-	spin_unlock(&lp->lock);
++	spin_unlock_bh(&lp->lock);
+ 	return 0;
+ }
+ 
+ static int iss_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ {
+ 	struct iss_net_private *lp = netdev_priv(dev);
+-	unsigned long flags;
+ 	int len;
+ 
+ 	netif_stop_queue(dev);
+-	spin_lock_irqsave(&lp->lock, flags);
++	spin_lock_bh(&lp->lock);
+ 
+ 	len = lp->tp.write(lp, &skb);
+ 
+@@ -438,7 +439,7 @@ static int iss_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ 		pr_err("%s: %s failed(%d)\n", dev->name, __func__, len);
+ 	}
+ 
+-	spin_unlock_irqrestore(&lp->lock, flags);
++	spin_unlock_bh(&lp->lock);
+ 
+ 	dev_kfree_skb(skb);
+ 	return NETDEV_TX_OK;
+@@ -466,9 +467,9 @@ static int iss_net_set_mac(struct net_device *dev, void *addr)
+ 
+ 	if (!is_valid_ether_addr(hwaddr->sa_data))
+ 		return -EADDRNOTAVAIL;
+-	spin_lock(&lp->lock);
++	spin_lock_bh(&lp->lock);
+ 	memcpy(dev->dev_addr, hwaddr->sa_data, ETH_ALEN);
+-	spin_unlock(&lp->lock);
++	spin_unlock_bh(&lp->lock);
+ 	return 0;
+ }
+ 
+@@ -520,11 +521,11 @@ static int iss_net_configure(int index, char *init)
+ 	*lp = (struct iss_net_private) {
+ 		.device_list		= LIST_HEAD_INIT(lp->device_list),
+ 		.opened_list		= LIST_HEAD_INIT(lp->opened_list),
+-		.lock			= __SPIN_LOCK_UNLOCKED(lp.lock),
+ 		.dev			= dev,
+ 		.index			= index,
+-		};
++	};
+ 
++	spin_lock_init(&lp->lock);
+ 	/*
+ 	 * If this name ends up conflicting with an existing registered
+ 	 * netdevice, that is OK, register_netdev{,ice}() will notice this
+diff --git a/arch/xtensa/platforms/xtfpga/Makefile b/arch/xtensa/platforms/xtfpga/Makefile
+index b9ae206340cd..7839d38b2337 100644
+--- a/arch/xtensa/platforms/xtfpga/Makefile
++++ b/arch/xtensa/platforms/xtfpga/Makefile
+@@ -6,4 +6,5 @@
+ #
+ # Note 2! The CFLAGS definitions are in the main makefile...
+ 
+-obj-y			= setup.o lcd.o
++obj-y			+= setup.o
++obj-$(CONFIG_XTFPGA_LCD) += lcd.o
+diff --git a/arch/xtensa/platforms/xtfpga/include/platform/hardware.h b/arch/xtensa/platforms/xtfpga/include/platform/hardware.h
+index 6edd20bb4565..4e0af2662a21 100644
+--- a/arch/xtensa/platforms/xtfpga/include/platform/hardware.h
++++ b/arch/xtensa/platforms/xtfpga/include/platform/hardware.h
+@@ -40,9 +40,6 @@
+ 
+ /* UART */
+ #define DUART16552_PADDR	(XCHAL_KIO_PADDR + 0x0D050020)
+-/* LCD instruction and data addresses. */
+-#define LCD_INSTR_ADDR		((char *)IOADDR(0x0D040000))
+-#define LCD_DATA_ADDR		((char *)IOADDR(0x0D040004))
+ 
+ /* Misc. */
+ #define XTFPGA_FPGAREGS_VADDR	IOADDR(0x0D020000)
+diff --git a/arch/xtensa/platforms/xtfpga/include/platform/lcd.h b/arch/xtensa/platforms/xtfpga/include/platform/lcd.h
+index 0e435645af5a..4c8541ed1139 100644
+--- a/arch/xtensa/platforms/xtfpga/include/platform/lcd.h
++++ b/arch/xtensa/platforms/xtfpga/include/platform/lcd.h
+@@ -11,10 +11,25 @@
+ #ifndef __XTENSA_XTAVNET_LCD_H
+ #define __XTENSA_XTAVNET_LCD_H
+ 
++#ifdef CONFIG_XTFPGA_LCD
+ /* Display string STR at position POS on the LCD. */
+ void lcd_disp_at_pos(char *str, unsigned char pos);
+ 
+ /* Shift the contents of the LCD display left or right. */
+ void lcd_shiftleft(void);
+ void lcd_shiftright(void);
++#else
++static inline void lcd_disp_at_pos(char *str, unsigned char pos)
++{
++}
++
++static inline void lcd_shiftleft(void)
++{
++}
++
++static inline void lcd_shiftright(void)
++{
++}
++#endif
++
+ #endif
+diff --git a/arch/xtensa/platforms/xtfpga/lcd.c b/arch/xtensa/platforms/xtfpga/lcd.c
+index 2872301598df..4dc0c1b43f4b 100644
+--- a/arch/xtensa/platforms/xtfpga/lcd.c
++++ b/arch/xtensa/platforms/xtfpga/lcd.c
+@@ -1,50 +1,63 @@
+ /*
+- * Driver for the LCD display on the Tensilica LX60 Board.
++ * Driver for the LCD display on the Tensilica XTFPGA board family.
++ * http://www.mytechcorp.com/cfdata/productFile/File1/MOC-16216B-B-A0A04.pdf
+  *
+  * This file is subject to the terms and conditions of the GNU General Public
+  * License.  See the file "COPYING" in the main directory of this archive
+  * for more details.
+  *
+  * Copyright (C) 2001, 2006 Tensilica Inc.
++ * Copyright (C) 2015 Cadence Design Systems Inc.
+  */
+ 
+-/*
+- *
+- * FIXME: this code is from the examples from the LX60 user guide.
+- *
+- * The lcd_pause function does busy waiting, which is probably not
+- * great. Maybe the code could be changed to use kernel timers, or
+- * change the hardware to not need to wait.
+- */
+-
++#include <linux/delay.h>
+ #include <linux/init.h>
+ #include <linux/io.h>
+ 
+ #include <platform/hardware.h>
+ #include <platform/lcd.h>
+-#include <linux/delay.h>
+ 
+-#define LCD_PAUSE_ITERATIONS	4000
++/* LCD instruction and data addresses. */
++#define LCD_INSTR_ADDR		((char *)IOADDR(CONFIG_XTFPGA_LCD_BASE_ADDR))
++#define LCD_DATA_ADDR		(LCD_INSTR_ADDR + 4)
++
+ #define LCD_CLEAR		0x1
+ #define LCD_DISPLAY_ON		0xc
+ 
+ /* 8bit and 2 lines display */
+ #define LCD_DISPLAY_MODE8BIT	0x38
++#define LCD_DISPLAY_MODE4BIT	0x28
+ #define LCD_DISPLAY_POS		0x80
+ #define LCD_SHIFT_LEFT		0x18
+ #define LCD_SHIFT_RIGHT		0x1c
+ 
++static void lcd_put_byte(u8 *addr, u8 data)
++{
++#ifdef CONFIG_XTFPGA_LCD_8BIT_ACCESS
++	ACCESS_ONCE(*addr) = data;
++#else
++	ACCESS_ONCE(*addr) = data & 0xf0;
++	ACCESS_ONCE(*addr) = (data << 4) & 0xf0;
++#endif
++}
++
+ static int __init lcd_init(void)
+ {
+-	*LCD_INSTR_ADDR = LCD_DISPLAY_MODE8BIT;
++	ACCESS_ONCE(*LCD_INSTR_ADDR) = LCD_DISPLAY_MODE8BIT;
+ 	mdelay(5);
+-	*LCD_INSTR_ADDR = LCD_DISPLAY_MODE8BIT;
++	ACCESS_ONCE(*LCD_INSTR_ADDR) = LCD_DISPLAY_MODE8BIT;
+ 	udelay(200);
+-	*LCD_INSTR_ADDR = LCD_DISPLAY_MODE8BIT;
++	ACCESS_ONCE(*LCD_INSTR_ADDR) = LCD_DISPLAY_MODE8BIT;
++	udelay(50);
++#ifndef CONFIG_XTFPGA_LCD_8BIT_ACCESS
++	ACCESS_ONCE(*LCD_INSTR_ADDR) = LCD_DISPLAY_MODE4BIT;
++	udelay(50);
++	lcd_put_byte(LCD_INSTR_ADDR, LCD_DISPLAY_MODE4BIT);
+ 	udelay(50);
+-	*LCD_INSTR_ADDR = LCD_DISPLAY_ON;
++#endif
++	lcd_put_byte(LCD_INSTR_ADDR, LCD_DISPLAY_ON);
+ 	udelay(50);
+-	*LCD_INSTR_ADDR = LCD_CLEAR;
++	lcd_put_byte(LCD_INSTR_ADDR, LCD_CLEAR);
+ 	mdelay(10);
+ 	lcd_disp_at_pos("XTENSA LINUX", 0);
+ 	return 0;
+@@ -52,10 +65,10 @@ static int __init lcd_init(void)
+ 
+ void lcd_disp_at_pos(char *str, unsigned char pos)
+ {
+-	*LCD_INSTR_ADDR = LCD_DISPLAY_POS | pos;
++	lcd_put_byte(LCD_INSTR_ADDR, LCD_DISPLAY_POS | pos);
+ 	udelay(100);
+ 	while (*str != 0) {
+-		*LCD_DATA_ADDR = *str;
++		lcd_put_byte(LCD_DATA_ADDR, *str);
+ 		udelay(200);
+ 		str++;
+ 	}
+@@ -63,13 +76,13 @@ void lcd_disp_at_pos(char *str, unsigned char pos)
+ 
+ void lcd_shiftleft(void)
+ {
+-	*LCD_INSTR_ADDR = LCD_SHIFT_LEFT;
++	lcd_put_byte(LCD_INSTR_ADDR, LCD_SHIFT_LEFT);
+ 	udelay(50);
+ }
+ 
+ void lcd_shiftright(void)
+ {
+-	*LCD_INSTR_ADDR = LCD_SHIFT_RIGHT;
++	lcd_put_byte(LCD_INSTR_ADDR, LCD_SHIFT_RIGHT);
+ 	udelay(50);
+ }
+ 
+diff --git a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c
+index 755b90c40ddf..c0b39f304ea3 100644
+--- a/drivers/acpi/acpica/tbinstal.c
++++ b/drivers/acpi/acpica/tbinstal.c
+@@ -346,7 +346,6 @@ acpi_tb_install_standard_table(acpi_physical_address address,
+ 				 */
+ 				acpi_tb_uninstall_table(&new_table_desc);
+ 				*table_index = i;
+-				(void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
+ 				return_ACPI_STATUS(AE_OK);
+ 			}
+ 		}
+diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
+index dc4d8960684a..6b69f7bf2500 100644
+--- a/drivers/acpi/scan.c
++++ b/drivers/acpi/scan.c
+@@ -298,7 +298,11 @@ bool acpi_scan_is_offline(struct acpi_device *adev, bool uevent)
+ 	struct acpi_device_physical_node *pn;
+ 	bool offline = true;
+ 
+-	mutex_lock(&adev->physical_node_lock);
++	/*
++	 * acpi_container_offline() calls this for all of the container's
++	 * children under the container's physical_node_lock lock.
++	 */
++	mutex_lock_nested(&adev->physical_node_lock, SINGLE_DEPTH_NESTING);
+ 
+ 	list_for_each_entry(pn, &adev->physical_node_list, node)
+ 		if (device_supports_offline(pn->dev) && !pn->dev->offline) {
+diff --git a/drivers/base/bus.c b/drivers/base/bus.c
+index 876bae5ade33..79bc203f51ef 100644
+--- a/drivers/base/bus.c
++++ b/drivers/base/bus.c
+@@ -515,11 +515,11 @@ int bus_add_device(struct device *dev)
+ 			goto out_put;
+ 		error = device_add_groups(dev, bus->dev_groups);
+ 		if (error)
+-			goto out_groups;
++			goto out_id;
+ 		error = sysfs_create_link(&bus->p->devices_kset->kobj,
+ 						&dev->kobj, dev_name(dev));
+ 		if (error)
+-			goto out_id;
++			goto out_groups;
+ 		error = sysfs_create_link(&dev->kobj,
+ 				&dev->bus->p->subsys.kobj, "subsystem");
+ 		if (error)
+diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c
+index 6e64563361f0..9c2ba1c97c42 100644
+--- a/drivers/base/cacheinfo.c
++++ b/drivers/base/cacheinfo.c
+@@ -62,15 +62,21 @@ static int cache_setup_of_node(unsigned int cpu)
+ 		return -ENOENT;
+ 	}
+ 
+-	while (np && index < cache_leaves(cpu)) {
++	while (index < cache_leaves(cpu)) {
+ 		this_leaf = this_cpu_ci->info_list + index;
+ 		if (this_leaf->level != 1)
+ 			np = of_find_next_cache_node(np);
+ 		else
+ 			np = of_node_get(np);/* cpu node itself */
++		if (!np)
++			break;
+ 		this_leaf->of_node = np;
+ 		index++;
+ 	}
++
++	if (index != cache_leaves(cpu)) /* not all OF nodes populated */
++		return -ENOENT;
++
+ 	return 0;
+ }
+ 
+@@ -189,8 +195,11 @@ static int detect_cache_attributes(unsigned int cpu)
+ 	 * will be set up here only if they are not populated already
+ 	 */
+ 	ret = cache_shared_cpu_map_setup(cpu);
+-	if (ret)
++	if (ret) {
++		pr_warn("Unable to detect cache hierarcy from DT for CPU %d\n",
++			cpu);
+ 		goto free_ci;
++	}
+ 	return 0;
+ 
+ free_ci:
+diff --git a/drivers/base/platform.c b/drivers/base/platform.c
+index 9421fed40905..e68ab79df28b 100644
+--- a/drivers/base/platform.c
++++ b/drivers/base/platform.c
+@@ -101,6 +101,15 @@ int platform_get_irq(struct platform_device *dev, unsigned int num)
+ 	}
+ 
+ 	r = platform_get_resource(dev, IORESOURCE_IRQ, num);
++	/*
++	 * The resources may pass trigger flags to the irqs that need
++	 * to be set up. It so happens that the trigger flags for
++	 * IORESOURCE_BITS correspond 1-to-1 to the IRQF_TRIGGER*
++	 * settings.
++	 */
++	if (r && r->flags & IORESOURCE_BITS)
++		irqd_set_trigger_type(irq_get_irq_data(r->start),
++				      r->flags & IORESOURCE_BITS);
+ 
+ 	return r ? r->start : -ENXIO;
+ #endif
+diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
+index de4c8499cbac..288547a3c566 100644
+--- a/drivers/bluetooth/ath3k.c
++++ b/drivers/bluetooth/ath3k.c
+@@ -65,6 +65,7 @@ static const struct usb_device_id ath3k_table[] = {
+ 	/* Atheros AR3011 with sflash firmware*/
+ 	{ USB_DEVICE(0x0489, 0xE027) },
+ 	{ USB_DEVICE(0x0489, 0xE03D) },
++	{ USB_DEVICE(0x04F2, 0xAFF1) },
+ 	{ USB_DEVICE(0x0930, 0x0215) },
+ 	{ USB_DEVICE(0x0CF3, 0x3002) },
+ 	{ USB_DEVICE(0x0CF3, 0xE019) },
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index c91ec52a8948..b0449bb18873 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -153,6 +153,7 @@ static const struct usb_device_id blacklist_table[] = {
+ 	/* Atheros 3011 with sflash firmware */
+ 	{ USB_DEVICE(0x0489, 0xe027), .driver_info = BTUSB_IGNORE },
+ 	{ USB_DEVICE(0x0489, 0xe03d), .driver_info = BTUSB_IGNORE },
++	{ USB_DEVICE(0x04f2, 0xaff1), .driver_info = BTUSB_IGNORE },
+ 	{ USB_DEVICE(0x0930, 0x0215), .driver_info = BTUSB_IGNORE },
+ 	{ USB_DEVICE(0x0cf3, 0x3002), .driver_info = BTUSB_IGNORE },
+ 	{ USB_DEVICE(0x0cf3, 0xe019), .driver_info = BTUSB_IGNORE },
+diff --git a/drivers/clk/at91/clk-usb.c b/drivers/clk/at91/clk-usb.c
+index a23ac0c724f0..0b7c3e8840ba 100644
+--- a/drivers/clk/at91/clk-usb.c
++++ b/drivers/clk/at91/clk-usb.c
+@@ -56,22 +56,55 @@ static unsigned long at91sam9x5_clk_usb_recalc_rate(struct clk_hw *hw,
+ 	return DIV_ROUND_CLOSEST(parent_rate, (usbdiv + 1));
+ }
+ 
+-static long at91sam9x5_clk_usb_round_rate(struct clk_hw *hw, unsigned long rate,
+-					  unsigned long *parent_rate)
++static long at91sam9x5_clk_usb_determine_rate(struct clk_hw *hw,
++					      unsigned long rate,
++					      unsigned long min_rate,
++					      unsigned long max_rate,
++					      unsigned long *best_parent_rate,
++					      struct clk_hw **best_parent_hw)
+ {
+-	unsigned long div;
++	struct clk *parent = NULL;
++	long best_rate = -EINVAL;
++	unsigned long tmp_rate;
++	int best_diff = -1;
++	int tmp_diff;
++	int i;
+ 
+-	if (!rate)
+-		return -EINVAL;
++	for (i = 0; i < __clk_get_num_parents(hw->clk); i++) {
++		int div;
+ 
+-	if (rate >= *parent_rate)
+-		return *parent_rate;
++		parent = clk_get_parent_by_index(hw->clk, i);
++		if (!parent)
++			continue;
++
++		for (div = 1; div < SAM9X5_USB_MAX_DIV + 2; div++) {
++			unsigned long tmp_parent_rate;
++
++			tmp_parent_rate = rate * div;
++			tmp_parent_rate = __clk_round_rate(parent,
++							   tmp_parent_rate);
++			tmp_rate = DIV_ROUND_CLOSEST(tmp_parent_rate, div);
++			if (tmp_rate < rate)
++				tmp_diff = rate - tmp_rate;
++			else
++				tmp_diff = tmp_rate - rate;
++
++			if (best_diff < 0 || best_diff > tmp_diff) {
++				best_rate = tmp_rate;
++				best_diff = tmp_diff;
++				*best_parent_rate = tmp_parent_rate;
++				*best_parent_hw = __clk_get_hw(parent);
++			}
++
++			if (!best_diff || tmp_rate < rate)
++				break;
++		}
+ 
+-	div = DIV_ROUND_CLOSEST(*parent_rate, rate);
+-	if (div > SAM9X5_USB_MAX_DIV + 1)
+-		div = SAM9X5_USB_MAX_DIV + 1;
++		if (!best_diff)
++			break;
++	}
+ 
+-	return DIV_ROUND_CLOSEST(*parent_rate, div);
++	return best_rate;
+ }
+ 
+ static int at91sam9x5_clk_usb_set_parent(struct clk_hw *hw, u8 index)
+@@ -121,7 +154,7 @@ static int at91sam9x5_clk_usb_set_rate(struct clk_hw *hw, unsigned long rate,
+ 
+ static const struct clk_ops at91sam9x5_usb_ops = {
+ 	.recalc_rate = at91sam9x5_clk_usb_recalc_rate,
+-	.round_rate = at91sam9x5_clk_usb_round_rate,
++	.determine_rate = at91sam9x5_clk_usb_determine_rate,
+ 	.get_parent = at91sam9x5_clk_usb_get_parent,
+ 	.set_parent = at91sam9x5_clk_usb_set_parent,
+ 	.set_rate = at91sam9x5_clk_usb_set_rate,
+@@ -159,7 +192,7 @@ static const struct clk_ops at91sam9n12_usb_ops = {
+ 	.disable = at91sam9n12_clk_usb_disable,
+ 	.is_enabled = at91sam9n12_clk_usb_is_enabled,
+ 	.recalc_rate = at91sam9x5_clk_usb_recalc_rate,
+-	.round_rate = at91sam9x5_clk_usb_round_rate,
++	.determine_rate = at91sam9x5_clk_usb_determine_rate,
+ 	.set_rate = at91sam9x5_clk_usb_set_rate,
+ };
+ 
+@@ -179,7 +212,8 @@ at91sam9x5_clk_register_usb(struct at91_pmc *pmc, const char *name,
+ 	init.ops = &at91sam9x5_usb_ops;
+ 	init.parent_names = parent_names;
+ 	init.num_parents = num_parents;
+-	init.flags = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE;
++	init.flags = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE |
++		     CLK_SET_RATE_PARENT;
+ 
+ 	usb->hw.init = &init;
+ 	usb->pmc = pmc;
+@@ -207,7 +241,7 @@ at91sam9n12_clk_register_usb(struct at91_pmc *pmc, const char *name,
+ 	init.ops = &at91sam9n12_usb_ops;
+ 	init.parent_names = &parent_name;
+ 	init.num_parents = 1;
+-	init.flags = CLK_SET_RATE_GATE;
++	init.flags = CLK_SET_RATE_GATE | CLK_SET_RATE_PARENT;
+ 
+ 	usb->hw.init = &init;
+ 	usb->pmc = pmc;
+diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
+index 08b8b3729f53..4fe9c01a14b4 100644
+--- a/drivers/clk/qcom/clk-rcg2.c
++++ b/drivers/clk/qcom/clk-rcg2.c
+@@ -242,7 +242,7 @@ static int clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f)
+ 	mask |= CFG_SRC_SEL_MASK | CFG_MODE_MASK;
+ 	cfg = f->pre_div << CFG_SRC_DIV_SHIFT;
+ 	cfg |= rcg->parent_map[f->src] << CFG_SRC_SEL_SHIFT;
+-	if (rcg->mnd_width && f->n)
++	if (rcg->mnd_width && f->n && (f->m != f->n))
+ 		cfg |= CFG_MODE_DUAL_EDGE;
+ 	ret = regmap_update_bits(rcg->clkr.regmap,
+ 			rcg->cmd_rcgr + CFG_REG, mask, cfg);
+diff --git a/drivers/clk/qcom/gcc-ipq806x.c b/drivers/clk/qcom/gcc-ipq806x.c
+index afed5eb0691e..2d12fee408a2 100644
+--- a/drivers/clk/qcom/gcc-ipq806x.c
++++ b/drivers/clk/qcom/gcc-ipq806x.c
+@@ -514,8 +514,8 @@ static struct freq_tbl clk_tbl_gsbi_qup[] = {
+ 	{ 10800000, P_PXO,  1, 2,  5 },
+ 	{ 15060000, P_PLL8, 1, 2, 51 },
+ 	{ 24000000, P_PLL8, 4, 1,  4 },
++	{ 25000000, P_PXO,  1, 0,  0 },
+ 	{ 25600000, P_PLL8, 1, 1, 15 },
+-	{ 27000000, P_PXO,  1, 0,  0 },
+ 	{ 48000000, P_PLL8, 4, 1,  2 },
+ 	{ 51200000, P_PLL8, 1, 2, 15 },
+ 	{ }
+diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c
+index 88e8c6bbd77f..4bc7ccd7e494 100644
+--- a/drivers/clk/samsung/clk-exynos4.c
++++ b/drivers/clk/samsung/clk-exynos4.c
+@@ -1354,7 +1354,7 @@ static struct samsung_pll_clock exynos4x12_plls[nr_plls] __initdata = {
+ 			VPLL_LOCK, VPLL_CON0, NULL),
+ };
+ 
+-static void __init exynos4_core_down_clock(enum exynos4_soc soc)
++static void __init exynos4x12_core_down_clock(void)
+ {
+ 	unsigned int tmp;
+ 
+@@ -1373,11 +1373,9 @@ static void __init exynos4_core_down_clock(enum exynos4_soc soc)
+ 	__raw_writel(tmp, reg_base + PWR_CTRL1);
+ 
+ 	/*
+-	 * Disable the clock up feature on Exynos4x12, in case it was
+-	 * enabled by bootloader.
++	 * Disable the clock up feature in case it was enabled by bootloader.
+ 	 */
+-	if (exynos4_soc == EXYNOS4X12)
+-		__raw_writel(0x0, reg_base + E4X12_PWR_CTRL2);
++	__raw_writel(0x0, reg_base + E4X12_PWR_CTRL2);
+ }
+ 
+ /* register exynos4 clocks */
+@@ -1474,7 +1472,8 @@ static void __init exynos4_clk_init(struct device_node *np,
+ 	samsung_clk_register_alias(ctx, exynos4_aliases,
+ 			ARRAY_SIZE(exynos4_aliases));
+ 
+-	exynos4_core_down_clock(soc);
++	if (soc == EXYNOS4X12)
++		exynos4x12_core_down_clock();
+ 	exynos4_clk_sleep_init();
+ 
+ 	samsung_clk_of_add_provider(np, ctx);
+diff --git a/drivers/clk/tegra/clk.c b/drivers/clk/tegra/clk.c
+index 97dc8595c3cd..c51f7c84c163 100644
+--- a/drivers/clk/tegra/clk.c
++++ b/drivers/clk/tegra/clk.c
+@@ -272,7 +272,7 @@ void __init tegra_add_of_provider(struct device_node *np)
+ 	of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
+ 
+ 	rst_ctlr.of_node = np;
+-	rst_ctlr.nr_resets = clk_num * 32;
++	rst_ctlr.nr_resets = periph_banks * 32;
+ 	reset_controller_register(&rst_ctlr);
+ }
+ 
+diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
+index f79dd410dede..af3b77b14835 100644
+--- a/drivers/crypto/omap-aes.c
++++ b/drivers/crypto/omap-aes.c
+@@ -554,15 +554,23 @@ static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd)
+ 	return err;
+ }
+ 
+-static int omap_aes_check_aligned(struct scatterlist *sg)
++static int omap_aes_check_aligned(struct scatterlist *sg, int total)
+ {
++	int len = 0;
++
+ 	while (sg) {
+ 		if (!IS_ALIGNED(sg->offset, 4))
+ 			return -1;
+ 		if (!IS_ALIGNED(sg->length, AES_BLOCK_SIZE))
+ 			return -1;
++
++		len += sg->length;
+ 		sg = sg_next(sg);
+ 	}
++
++	if (len != total)
++		return -1;
++
+ 	return 0;
+ }
+ 
+@@ -633,8 +641,8 @@ static int omap_aes_handle_queue(struct omap_aes_dev *dd,
+ 	dd->in_sg = req->src;
+ 	dd->out_sg = req->dst;
+ 
+-	if (omap_aes_check_aligned(dd->in_sg) ||
+-	    omap_aes_check_aligned(dd->out_sg)) {
++	if (omap_aes_check_aligned(dd->in_sg, dd->total) ||
++	    omap_aes_check_aligned(dd->out_sg, dd->total)) {
+ 		if (omap_aes_copy_sgs(dd))
+ 			pr_err("Failed to copy SGs for unaligned cases\n");
+ 		dd->sgs_copied = 1;
+diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
+index 7bc3e9b288f3..1be85afe4a00 100644
+--- a/drivers/gpio/gpio-mvebu.c
++++ b/drivers/gpio/gpio-mvebu.c
+@@ -313,11 +313,13 @@ static void mvebu_gpio_edge_irq_mask(struct irq_data *d)
+ {
+ 	struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ 	struct mvebu_gpio_chip *mvchip = gc->private;
++	struct irq_chip_type *ct = irq_data_get_chip_type(d);
+ 	u32 mask = 1 << (d->irq - gc->irq_base);
+ 
+ 	irq_gc_lock(gc);
+-	gc->mask_cache &= ~mask;
+-	writel_relaxed(gc->mask_cache, mvebu_gpioreg_edge_mask(mvchip));
++	ct->mask_cache_priv &= ~mask;
++
++	writel_relaxed(ct->mask_cache_priv, mvebu_gpioreg_edge_mask(mvchip));
+ 	irq_gc_unlock(gc);
+ }
+ 
+@@ -325,11 +327,13 @@ static void mvebu_gpio_edge_irq_unmask(struct irq_data *d)
+ {
+ 	struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ 	struct mvebu_gpio_chip *mvchip = gc->private;
++	struct irq_chip_type *ct = irq_data_get_chip_type(d);
++
+ 	u32 mask = 1 << (d->irq - gc->irq_base);
+ 
+ 	irq_gc_lock(gc);
+-	gc->mask_cache |= mask;
+-	writel_relaxed(gc->mask_cache, mvebu_gpioreg_edge_mask(mvchip));
++	ct->mask_cache_priv |= mask;
++	writel_relaxed(ct->mask_cache_priv, mvebu_gpioreg_edge_mask(mvchip));
+ 	irq_gc_unlock(gc);
+ }
+ 
+@@ -337,11 +341,13 @@ static void mvebu_gpio_level_irq_mask(struct irq_data *d)
+ {
+ 	struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ 	struct mvebu_gpio_chip *mvchip = gc->private;
++	struct irq_chip_type *ct = irq_data_get_chip_type(d);
++
+ 	u32 mask = 1 << (d->irq - gc->irq_base);
+ 
+ 	irq_gc_lock(gc);
+-	gc->mask_cache &= ~mask;
+-	writel_relaxed(gc->mask_cache, mvebu_gpioreg_level_mask(mvchip));
++	ct->mask_cache_priv &= ~mask;
++	writel_relaxed(ct->mask_cache_priv, mvebu_gpioreg_level_mask(mvchip));
+ 	irq_gc_unlock(gc);
+ }
+ 
+@@ -349,11 +355,13 @@ static void mvebu_gpio_level_irq_unmask(struct irq_data *d)
+ {
+ 	struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ 	struct mvebu_gpio_chip *mvchip = gc->private;
++	struct irq_chip_type *ct = irq_data_get_chip_type(d);
++
+ 	u32 mask = 1 << (d->irq - gc->irq_base);
+ 
+ 	irq_gc_lock(gc);
+-	gc->mask_cache |= mask;
+-	writel_relaxed(gc->mask_cache, mvebu_gpioreg_level_mask(mvchip));
++	ct->mask_cache_priv |= mask;
++	writel_relaxed(ct->mask_cache_priv, mvebu_gpioreg_level_mask(mvchip));
+ 	irq_gc_unlock(gc);
+ }
+ 
+diff --git a/drivers/gpu/drm/i2c/adv7511.c b/drivers/gpu/drm/i2c/adv7511.c
+index faf1c0c5ab2e..e5b19c8a0265 100644
+--- a/drivers/gpu/drm/i2c/adv7511.c
++++ b/drivers/gpu/drm/i2c/adv7511.c
+@@ -33,6 +33,7 @@ struct adv7511 {
+ 
+ 	unsigned int current_edid_segment;
+ 	uint8_t edid_buf[256];
++	bool edid_read;
+ 
+ 	wait_queue_head_t wq;
+ 	struct drm_encoder *encoder;
+@@ -379,69 +380,71 @@ static bool adv7511_hpd(struct adv7511 *adv7511)
+ 	return false;
+ }
+ 
+-static irqreturn_t adv7511_irq_handler(int irq, void *devid)
+-{
+-	struct adv7511 *adv7511 = devid;
+-
+-	if (adv7511_hpd(adv7511))
+-		drm_helper_hpd_irq_event(adv7511->encoder->dev);
+-
+-	wake_up_all(&adv7511->wq);
+-
+-	return IRQ_HANDLED;
+-}
+-
+-static unsigned int adv7511_is_interrupt_pending(struct adv7511 *adv7511,
+-						 unsigned int irq)
++static int adv7511_irq_process(struct adv7511 *adv7511)
+ {
+ 	unsigned int irq0, irq1;
+-	unsigned int pending;
+ 	int ret;
+ 
+ 	ret = regmap_read(adv7511->regmap, ADV7511_REG_INT(0), &irq0);
+ 	if (ret < 0)
+-		return 0;
++		return ret;
++
+ 	ret = regmap_read(adv7511->regmap, ADV7511_REG_INT(1), &irq1);
+ 	if (ret < 0)
+-		return 0;
++		return ret;
++
++	regmap_write(adv7511->regmap, ADV7511_REG_INT(0), irq0);
++	regmap_write(adv7511->regmap, ADV7511_REG_INT(1), irq1);
++
++	if (irq0 & ADV7511_INT0_HDP)
++		drm_helper_hpd_irq_event(adv7511->encoder->dev);
++
++	if (irq0 & ADV7511_INT0_EDID_READY || irq1 & ADV7511_INT1_DDC_ERROR) {
++		adv7511->edid_read = true;
++
++		if (adv7511->i2c_main->irq)
++			wake_up_all(&adv7511->wq);
++	}
++
++	return 0;
++}
+ 
+-	pending = (irq1 << 8) | irq0;
++static irqreturn_t adv7511_irq_handler(int irq, void *devid)
++{
++	struct adv7511 *adv7511 = devid;
++	int ret;
+ 
+-	return pending & irq;
++	ret = adv7511_irq_process(adv7511);
++	return ret < 0 ? IRQ_NONE : IRQ_HANDLED;
+ }
+ 
+-static int adv7511_wait_for_interrupt(struct adv7511 *adv7511, int irq,
+-				      int timeout)
++/* -----------------------------------------------------------------------------
++ * EDID retrieval
++ */
++
++static int adv7511_wait_for_edid(struct adv7511 *adv7511, int timeout)
+ {
+-	unsigned int pending;
+ 	int ret;
+ 
+ 	if (adv7511->i2c_main->irq) {
+ 		ret = wait_event_interruptible_timeout(adv7511->wq,
+-				adv7511_is_interrupt_pending(adv7511, irq),
+-				msecs_to_jiffies(timeout));
+-		if (ret <= 0)
+-			return 0;
+-		pending = adv7511_is_interrupt_pending(adv7511, irq);
++				adv7511->edid_read, msecs_to_jiffies(timeout));
+ 	} else {
+-		if (timeout < 25)
+-			timeout = 25;
+-		do {
+-			pending = adv7511_is_interrupt_pending(adv7511, irq);
+-			if (pending)
++		for (; timeout > 0; timeout -= 25) {
++			ret = adv7511_irq_process(adv7511);
++			if (ret < 0)
+ 				break;
++
++			if (adv7511->edid_read)
++				break;
++
+ 			msleep(25);
+-			timeout -= 25;
+-		} while (timeout >= 25);
++		}
+ 	}
+ 
+-	return pending;
++	return adv7511->edid_read ? 0 : -EIO;
+ }
+ 
+-/* -----------------------------------------------------------------------------
+- * EDID retrieval
+- */
+-
+ static int adv7511_get_edid_block(void *data, u8 *buf, unsigned int block,
+ 				  size_t len)
+ {
+@@ -463,19 +466,14 @@ static int adv7511_get_edid_block(void *data, u8 *buf, unsigned int block,
+ 			return ret;
+ 
+ 		if (status != 2) {
++			adv7511->edid_read = false;
+ 			regmap_write(adv7511->regmap, ADV7511_REG_EDID_SEGMENT,
+ 				     block);
+-			ret = adv7511_wait_for_interrupt(adv7511,
+-					ADV7511_INT0_EDID_READY |
+-					ADV7511_INT1_DDC_ERROR, 200);
+-
+-			if (!(ret & ADV7511_INT0_EDID_READY))
+-				return -EIO;
++			ret = adv7511_wait_for_edid(adv7511, 200);
++			if (ret < 0)
++				return ret;
+ 		}
+ 
+-		regmap_write(adv7511->regmap, ADV7511_REG_INT(0),
+-			     ADV7511_INT0_EDID_READY | ADV7511_INT1_DDC_ERROR);
+-
+ 		/* Break this apart, hopefully more I2C controllers will
+ 		 * support 64 byte transfers than 256 byte transfers
+ 		 */
+@@ -528,7 +526,9 @@ static int adv7511_get_modes(struct drm_encoder *encoder,
+ 	/* Reading the EDID only works if the device is powered */
+ 	if (adv7511->dpms_mode != DRM_MODE_DPMS_ON) {
+ 		regmap_write(adv7511->regmap, ADV7511_REG_INT(0),
+-			     ADV7511_INT0_EDID_READY | ADV7511_INT1_DDC_ERROR);
++			     ADV7511_INT0_EDID_READY);
++		regmap_write(adv7511->regmap, ADV7511_REG_INT(1),
++			     ADV7511_INT1_DDC_ERROR);
+ 		regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
+ 				   ADV7511_POWER_POWER_DOWN, 0);
+ 		adv7511->current_edid_segment = -1;
+@@ -563,7 +563,9 @@ static void adv7511_encoder_dpms(struct drm_encoder *encoder, int mode)
+ 		adv7511->current_edid_segment = -1;
+ 
+ 		regmap_write(adv7511->regmap, ADV7511_REG_INT(0),
+-			     ADV7511_INT0_EDID_READY | ADV7511_INT1_DDC_ERROR);
++			     ADV7511_INT0_EDID_READY);
++		regmap_write(adv7511->regmap, ADV7511_REG_INT(1),
++			     ADV7511_INT1_DDC_ERROR);
+ 		regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
+ 				   ADV7511_POWER_POWER_DOWN, 0);
+ 		/*
+diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
+index 391d47e1f131..34d0e58f6421 100644
+--- a/drivers/gpu/drm/i915/i915_drv.c
++++ b/drivers/gpu/drm/i915/i915_drv.c
+@@ -1040,7 +1040,7 @@ static void vlv_save_gunit_s0ix_state(struct drm_i915_private *dev_priv)
+ 		s->lra_limits[i] = I915_READ(GEN7_LRA_LIMITS_BASE + i * 4);
+ 
+ 	s->media_max_req_count	= I915_READ(GEN7_MEDIA_MAX_REQ_COUNT);
+-	s->gfx_max_req_count	= I915_READ(GEN7_MEDIA_MAX_REQ_COUNT);
++	s->gfx_max_req_count	= I915_READ(GEN7_GFX_MAX_REQ_COUNT);
+ 
+ 	s->render_hwsp		= I915_READ(RENDER_HWS_PGA_GEN7);
+ 	s->ecochk		= I915_READ(GAM_ECOCHK);
+@@ -1122,7 +1122,7 @@ static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv)
+ 		I915_WRITE(GEN7_LRA_LIMITS_BASE + i * 4, s->lra_limits[i]);
+ 
+ 	I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->media_max_req_count);
+-	I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->gfx_max_req_count);
++	I915_WRITE(GEN7_GFX_MAX_REQ_COUNT, s->gfx_max_req_count);
+ 
+ 	I915_WRITE(RENDER_HWS_PGA_GEN7,	s->render_hwsp);
+ 	I915_WRITE(GAM_ECOCHK,		s->ecochk);
+diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
+index 1464bc1f8943..247cc18a6f9b 100644
+--- a/drivers/gpu/drm/i915/i915_irq.c
++++ b/drivers/gpu/drm/i915/i915_irq.c
+@@ -3717,14 +3717,12 @@ static int i8xx_irq_postinstall(struct drm_device *dev)
+ 		~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
+ 		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
+ 		  I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
+-		  I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
+-		  I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
++		  I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
+ 	I915_WRITE16(IMR, dev_priv->irq_mask);
+ 
+ 	I915_WRITE16(IER,
+ 		     I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
+ 		     I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
+-		     I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
+ 		     I915_USER_INTERRUPT);
+ 	POSTING_READ16(IER);
+ 
+@@ -3886,14 +3884,12 @@ static int i915_irq_postinstall(struct drm_device *dev)
+ 		  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
+ 		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
+ 		  I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
+-		  I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
+-		  I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
++		  I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
+ 
+ 	enable_mask =
+ 		I915_ASLE_INTERRUPT |
+ 		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
+ 		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
+-		I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
+ 		I915_USER_INTERRUPT;
+ 
+ 	if (I915_HAS_HOTPLUG(dev)) {
+diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
+index 172de3b3433b..6e7a6f03fca3 100644
+--- a/drivers/gpu/drm/i915/i915_reg.h
++++ b/drivers/gpu/drm/i915/i915_reg.h
+@@ -1713,6 +1713,7 @@ enum punit_power_well {
+ #define   GMBUS_CYCLE_INDEX	(2<<25)
+ #define   GMBUS_CYCLE_STOP	(4<<25)
+ #define   GMBUS_BYTE_COUNT_SHIFT 16
++#define   GMBUS_BYTE_COUNT_MAX   256U
+ #define   GMBUS_SLAVE_INDEX_SHIFT 8
+ #define   GMBUS_SLAVE_ADDR_SHIFT 1
+ #define   GMBUS_SLAVE_READ	(1<<0)
+diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
+index b31088a551f2..56e437e31580 100644
+--- a/drivers/gpu/drm/i915/intel_i2c.c
++++ b/drivers/gpu/drm/i915/intel_i2c.c
+@@ -270,18 +270,17 @@ gmbus_wait_idle(struct drm_i915_private *dev_priv)
+ }
+ 
+ static int
+-gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
+-		u32 gmbus1_index)
++gmbus_xfer_read_chunk(struct drm_i915_private *dev_priv,
++		      unsigned short addr, u8 *buf, unsigned int len,
++		      u32 gmbus1_index)
+ {
+ 	int reg_offset = dev_priv->gpio_mmio_base;
+-	u16 len = msg->len;
+-	u8 *buf = msg->buf;
+ 
+ 	I915_WRITE(GMBUS1 + reg_offset,
+ 		   gmbus1_index |
+ 		   GMBUS_CYCLE_WAIT |
+ 		   (len << GMBUS_BYTE_COUNT_SHIFT) |
+-		   (msg->addr << GMBUS_SLAVE_ADDR_SHIFT) |
++		   (addr << GMBUS_SLAVE_ADDR_SHIFT) |
+ 		   GMBUS_SLAVE_READ | GMBUS_SW_RDY);
+ 	while (len) {
+ 		int ret;
+@@ -303,11 +302,35 @@ gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
+ }
+ 
+ static int
+-gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg)
++gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
++		u32 gmbus1_index)
+ {
+-	int reg_offset = dev_priv->gpio_mmio_base;
+-	u16 len = msg->len;
+ 	u8 *buf = msg->buf;
++	unsigned int rx_size = msg->len;
++	unsigned int len;
++	int ret;
++
++	do {
++		len = min(rx_size, GMBUS_BYTE_COUNT_MAX);
++
++		ret = gmbus_xfer_read_chunk(dev_priv, msg->addr,
++					    buf, len, gmbus1_index);
++		if (ret)
++			return ret;
++
++		rx_size -= len;
++		buf += len;
++	} while (rx_size != 0);
++
++	return 0;
++}
++
++static int
++gmbus_xfer_write_chunk(struct drm_i915_private *dev_priv,
++		       unsigned short addr, u8 *buf, unsigned int len)
++{
++	int reg_offset = dev_priv->gpio_mmio_base;
++	unsigned int chunk_size = len;
+ 	u32 val, loop;
+ 
+ 	val = loop = 0;
+@@ -319,8 +342,8 @@ gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg)
+ 	I915_WRITE(GMBUS3 + reg_offset, val);
+ 	I915_WRITE(GMBUS1 + reg_offset,
+ 		   GMBUS_CYCLE_WAIT |
+-		   (msg->len << GMBUS_BYTE_COUNT_SHIFT) |
+-		   (msg->addr << GMBUS_SLAVE_ADDR_SHIFT) |
++		   (chunk_size << GMBUS_BYTE_COUNT_SHIFT) |
++		   (addr << GMBUS_SLAVE_ADDR_SHIFT) |
+ 		   GMBUS_SLAVE_WRITE | GMBUS_SW_RDY);
+ 	while (len) {
+ 		int ret;
+@@ -337,6 +360,29 @@ gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg)
+ 		if (ret)
+ 			return ret;
+ 	}
++
++	return 0;
++}
++
++static int
++gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg)
++{
++	u8 *buf = msg->buf;
++	unsigned int tx_size = msg->len;
++	unsigned int len;
++	int ret;
++
++	do {
++		len = min(tx_size, GMBUS_BYTE_COUNT_MAX);
++
++		ret = gmbus_xfer_write_chunk(dev_priv, msg->addr, buf, len);
++		if (ret)
++			return ret;
++
++		buf += len;
++		tx_size -= len;
++	} while (tx_size != 0);
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
+index 86807ee91bd1..9bd56116fd5a 100644
+--- a/drivers/gpu/drm/radeon/atombios_crtc.c
++++ b/drivers/gpu/drm/radeon/atombios_crtc.c
+@@ -330,8 +330,10 @@ atombios_set_crtc_dtd_timing(struct drm_crtc *crtc,
+ 		misc |= ATOM_COMPOSITESYNC;
+ 	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ 		misc |= ATOM_INTERLACE;
+-	if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
++	if (mode->flags & DRM_MODE_FLAG_DBLCLK)
+ 		misc |= ATOM_DOUBLE_CLOCK_MODE;
++	if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
++		misc |= ATOM_H_REPLICATIONBY2 | ATOM_V_REPLICATIONBY2;
+ 
+ 	args.susModeMiscInfo.usAccess = cpu_to_le16(misc);
+ 	args.ucCRTC = radeon_crtc->crtc_id;
+@@ -374,8 +376,10 @@ static void atombios_crtc_set_timing(struct drm_crtc *crtc,
+ 		misc |= ATOM_COMPOSITESYNC;
+ 	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ 		misc |= ATOM_INTERLACE;
+-	if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
++	if (mode->flags & DRM_MODE_FLAG_DBLCLK)
+ 		misc |= ATOM_DOUBLE_CLOCK_MODE;
++	if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
++		misc |= ATOM_H_REPLICATIONBY2 | ATOM_V_REPLICATIONBY2;
+ 
+ 	args.susModeMiscInfo.usAccess = cpu_to_le16(misc);
+ 	args.ucCRTC = radeon_crtc->crtc_id;
+diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
+index 433f72a1c006..995e2a0cf096 100644
+--- a/drivers/hv/channel.c
++++ b/drivers/hv/channel.c
+@@ -135,7 +135,7 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
+ 			   GFP_KERNEL);
+ 	if (!open_info) {
+ 		err = -ENOMEM;
+-		goto error0;
++		goto error_gpadl;
+ 	}
+ 
+ 	init_completion(&open_info->waitevent);
+@@ -151,7 +151,7 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
+ 
+ 	if (userdatalen > MAX_USER_DEFINED_BYTES) {
+ 		err = -EINVAL;
+-		goto error0;
++		goto error_gpadl;
+ 	}
+ 
+ 	if (userdatalen)
+@@ -195,6 +195,9 @@ error1:
+ 	list_del(&open_info->msglistentry);
+ 	spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
+ 
++error_gpadl:
++	vmbus_teardown_gpadl(newchannel, newchannel->ringbuffer_gpadlhandle);
++
+ error0:
+ 	free_pages((unsigned long)out,
+ 		get_order(send_ringbuffer_size + recv_ringbuffer_size));
+diff --git a/drivers/i2c/busses/i2c-rk3x.c b/drivers/i2c/busses/i2c-rk3x.c
+index 92462843db66..648cea462269 100644
+--- a/drivers/i2c/busses/i2c-rk3x.c
++++ b/drivers/i2c/busses/i2c-rk3x.c
+@@ -800,7 +800,7 @@ static int rk3x_i2c_xfer(struct i2c_adapter *adap,
+ 	clk_disable(i2c->clk);
+ 	spin_unlock_irqrestore(&i2c->lock, flags);
+ 
+-	return ret;
++	return ret < 0 ? ret : num;
+ }
+ 
+ static u32 rk3x_i2c_func(struct i2c_adapter *adap)
+diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
+index 63663332391d..c421e1159037 100644
+--- a/drivers/i2c/i2c-core.c
++++ b/drivers/i2c/i2c-core.c
+@@ -596,6 +596,7 @@ int i2c_generic_scl_recovery(struct i2c_adapter *adap)
+ 	adap->bus_recovery_info->set_scl(adap, 1);
+ 	return i2c_generic_recovery(adap);
+ }
++EXPORT_SYMBOL_GPL(i2c_generic_scl_recovery);
+ 
+ int i2c_generic_gpio_recovery(struct i2c_adapter *adap)
+ {
+@@ -610,6 +611,7 @@ int i2c_generic_gpio_recovery(struct i2c_adapter *adap)
+ 
+ 	return ret;
+ }
++EXPORT_SYMBOL_GPL(i2c_generic_gpio_recovery);
+ 
+ int i2c_recover_bus(struct i2c_adapter *adap)
+ {
+@@ -619,6 +621,7 @@ int i2c_recover_bus(struct i2c_adapter *adap)
+ 	dev_dbg(&adap->dev, "Trying i2c bus recovery\n");
+ 	return adap->bus_recovery_info->recover_bus(adap);
+ }
++EXPORT_SYMBOL_GPL(i2c_recover_bus);
+ 
+ static int i2c_device_probe(struct device *dev)
+ {
+diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c
+index 593f7ca9adc7..06cc1ff088f1 100644
+--- a/drivers/i2c/i2c-mux.c
++++ b/drivers/i2c/i2c-mux.c
+@@ -32,8 +32,9 @@ struct i2c_mux_priv {
+ 	struct i2c_algorithm algo;
+ 
+ 	struct i2c_adapter *parent;
+-	void *mux_priv;	/* the mux chip/device */
+-	u32  chan_id;	/* the channel id */
++	struct device *mux_dev;
++	void *mux_priv;
++	u32 chan_id;
+ 
+ 	int (*select)(struct i2c_adapter *, void *mux_priv, u32 chan_id);
+ 	int (*deselect)(struct i2c_adapter *, void *mux_priv, u32 chan_id);
+@@ -119,6 +120,7 @@ struct i2c_adapter *i2c_add_mux_adapter(struct i2c_adapter *parent,
+ 
+ 	/* Set up private adapter data */
+ 	priv->parent = parent;
++	priv->mux_dev = mux_dev;
+ 	priv->mux_priv = mux_priv;
+ 	priv->chan_id = chan_id;
+ 	priv->select = select;
+@@ -203,7 +205,7 @@ void i2c_del_mux_adapter(struct i2c_adapter *adap)
+ 	char symlink_name[20];
+ 
+ 	snprintf(symlink_name, sizeof(symlink_name), "channel-%u", priv->chan_id);
+-	sysfs_remove_link(&adap->dev.parent->kobj, symlink_name);
++	sysfs_remove_link(&priv->mux_dev->kobj, symlink_name);
+ 
+ 	sysfs_remove_link(&priv->adap.dev.kobj, "mux_device");
+ 	i2c_del_adapter(adap);
+diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
+index 8c014b5dab4c..38acb3cfc545 100644
+--- a/drivers/infiniband/core/umem.c
++++ b/drivers/infiniband/core/umem.c
+@@ -99,12 +99,15 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
+ 	if (dmasync)
+ 		dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs);
+ 
++	if (!size)
++		return ERR_PTR(-EINVAL);
++
+ 	/*
+ 	 * If the combination of the addr and size requested for this memory
+ 	 * region causes an integer overflow, return error.
+ 	 */
+-	if ((PAGE_ALIGN(addr + size) <= size) ||
+-	    (PAGE_ALIGN(addr + size) <= addr))
++	if (((addr + size) < addr) ||
++	    PAGE_ALIGN(addr + size) < (addr + size))
+ 		return ERR_PTR(-EINVAL);
+ 
+ 	if (!can_do_mlock())
+diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
+index c880329b4d64..51722be3e5e9 100644
+--- a/drivers/infiniband/hw/mlx4/qp.c
++++ b/drivers/infiniband/hw/mlx4/qp.c
+@@ -2564,8 +2564,7 @@ static int build_lso_seg(struct mlx4_wqe_lso_seg *wqe, struct ib_send_wr *wr,
+ 
+ 	memcpy(wqe->header, wr->wr.ud.header, wr->wr.ud.hlen);
+ 
+-	*lso_hdr_sz  = cpu_to_be32((wr->wr.ud.mss - wr->wr.ud.hlen) << 16 |
+-				   wr->wr.ud.hlen);
++	*lso_hdr_sz  = cpu_to_be32(wr->wr.ud.mss << 16 | wr->wr.ud.hlen);
+ 	*lso_seg_len = halign;
+ 	return 0;
+ }
+diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
+index 20e859a6f1a6..76eb57b31a59 100644
+--- a/drivers/infiniband/ulp/iser/iser_initiator.c
++++ b/drivers/infiniband/ulp/iser/iser_initiator.c
+@@ -409,8 +409,8 @@ int iser_send_command(struct iscsi_conn *conn,
+ 	if (scsi_prot_sg_count(sc)) {
+ 		prot_buf->buf  = scsi_prot_sglist(sc);
+ 		prot_buf->size = scsi_prot_sg_count(sc);
+-		prot_buf->data_len = data_buf->data_len >>
+-				     ilog2(sc->device->sector_size) * 8;
++		prot_buf->data_len = (data_buf->data_len >>
++				     ilog2(sc->device->sector_size)) * 8;
+ 	}
+ 
+ 	if (hdr->flags & ISCSI_FLAG_CMD_READ) {
+diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
+index dafb3c531f96..0b57ba2c22cb 100644
+--- a/drivers/infiniband/ulp/isert/ib_isert.c
++++ b/drivers/infiniband/ulp/isert/ib_isert.c
+@@ -222,7 +222,7 @@ fail:
+ static void
+ isert_free_rx_descriptors(struct isert_conn *isert_conn)
+ {
+-	struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
++	struct ib_device *ib_dev = isert_conn->conn_device->ib_device;
+ 	struct iser_rx_desc *rx_desc;
+ 	int i;
+ 
+@@ -719,8 +719,8 @@ out:
+ static void
+ isert_connect_release(struct isert_conn *isert_conn)
+ {
+-	struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
+ 	struct isert_device *device = isert_conn->conn_device;
++	struct ib_device *ib_dev = device->ib_device;
+ 
+ 	isert_dbg("conn %p\n", isert_conn);
+ 
+@@ -728,7 +728,8 @@ isert_connect_release(struct isert_conn *isert_conn)
+ 		isert_conn_free_fastreg_pool(isert_conn);
+ 
+ 	isert_free_rx_descriptors(isert_conn);
+-	rdma_destroy_id(isert_conn->conn_cm_id);
++	if (isert_conn->conn_cm_id)
++		rdma_destroy_id(isert_conn->conn_cm_id);
+ 
+ 	if (isert_conn->conn_qp) {
+ 		struct isert_comp *comp = isert_conn->conn_qp->recv_cq->cq_context;
+@@ -878,12 +879,15 @@ isert_disconnected_handler(struct rdma_cm_id *cma_id,
+ 	return 0;
+ }
+ 
+-static void
++static int
+ isert_connect_error(struct rdma_cm_id *cma_id)
+ {
+ 	struct isert_conn *isert_conn = cma_id->qp->qp_context;
+ 
++	isert_conn->conn_cm_id = NULL;
+ 	isert_put_conn(isert_conn);
++
++	return -1;
+ }
+ 
+ static int
+@@ -912,7 +916,7 @@ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
+ 	case RDMA_CM_EVENT_REJECTED:       /* FALLTHRU */
+ 	case RDMA_CM_EVENT_UNREACHABLE:    /* FALLTHRU */
+ 	case RDMA_CM_EVENT_CONNECT_ERROR:
+-		isert_connect_error(cma_id);
++		ret = isert_connect_error(cma_id);
+ 		break;
+ 	default:
+ 		isert_err("Unhandled RDMA CMA event: %d\n", event->event);
+@@ -1848,11 +1852,13 @@ isert_completion_rdma_read(struct iser_tx_desc *tx_desc,
+ 	cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
+ 	spin_unlock_bh(&cmd->istate_lock);
+ 
+-	if (ret)
++	if (ret) {
++		target_put_sess_cmd(se_cmd->se_sess, se_cmd);
+ 		transport_send_check_condition_and_sense(se_cmd,
+ 							 se_cmd->pi_err, 0);
+-	else
++	} else {
+ 		target_execute_cmd(se_cmd);
++	}
+ }
+ 
+ static void
+diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
+index 6e22682c8255..991dc6b20a58 100644
+--- a/drivers/input/mouse/elantech.c
++++ b/drivers/input/mouse/elantech.c
+@@ -893,6 +893,21 @@ static psmouse_ret_t elantech_process_byte(struct psmouse *psmouse)
+ }
+ 
+ /*
++ * This writes the reg_07 value again to the hardware at the end of every
++ * set_rate call because the register loses its value. reg_07 allows setting
++ * absolute mode on v4 hardware
++ */
++static void elantech_set_rate_restore_reg_07(struct psmouse *psmouse,
++		unsigned int rate)
++{
++	struct elantech_data *etd = psmouse->private;
++
++	etd->original_set_rate(psmouse, rate);
++	if (elantech_write_reg(psmouse, 0x07, etd->reg_07))
++		psmouse_err(psmouse, "restoring reg_07 failed\n");
++}
++
++/*
+  * Put the touchpad into absolute mode
+  */
+ static int elantech_set_absolute_mode(struct psmouse *psmouse)
+@@ -1094,6 +1109,8 @@ static int elantech_get_resolution_v4(struct psmouse *psmouse,
+  * Asus K53SV              0x450f01        78, 15, 0c      2 hw buttons
+  * Asus G46VW              0x460f02        00, 18, 0c      2 hw buttons
+  * Asus G750JX             0x360f00        00, 16, 0c      2 hw buttons
++ * Asus TP500LN            0x381f17        10, 14, 0e      clickpad
++ * Asus X750JN             0x381f17        10, 14, 0e      clickpad
+  * Asus UX31               0x361f00        20, 15, 0e      clickpad
+  * Asus UX32VD             0x361f02        00, 15, 0e      clickpad
+  * Avatar AVIU-145A2       0x361f00        ?               clickpad
+@@ -1635,6 +1652,11 @@ int elantech_init(struct psmouse *psmouse)
+ 		goto init_fail;
+ 	}
+ 
++	if (etd->fw_version == 0x381f17) {
++		etd->original_set_rate = psmouse->set_rate;
++		psmouse->set_rate = elantech_set_rate_restore_reg_07;
++	}
++
+ 	if (elantech_set_input_params(psmouse)) {
+ 		psmouse_err(psmouse, "failed to query touchpad range.\n");
+ 		goto init_fail;
+diff --git a/drivers/input/mouse/elantech.h b/drivers/input/mouse/elantech.h
+index 6f3afec02f03..f965d1569cc3 100644
+--- a/drivers/input/mouse/elantech.h
++++ b/drivers/input/mouse/elantech.h
+@@ -142,6 +142,7 @@ struct elantech_data {
+ 	struct finger_pos mt[ETP_MAX_FINGERS];
+ 	unsigned char parity[256];
+ 	int (*send_cmd)(struct psmouse *psmouse, unsigned char c, unsigned char *param);
++	void (*original_set_rate)(struct psmouse *psmouse, unsigned int rate);
+ };
+ 
+ #ifdef CONFIG_MOUSE_PS2_ELANTECH
+diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
+index 08981be7baa1..a9f2266049af 100644
+--- a/drivers/md/dm-crypt.c
++++ b/drivers/md/dm-crypt.c
+@@ -920,11 +920,10 @@ static int crypt_convert(struct crypt_config *cc,
+ 
+ 		switch (r) {
+ 		/* async */
++		case -EINPROGRESS:
+ 		case -EBUSY:
+ 			wait_for_completion(&ctx->restart);
+ 			reinit_completion(&ctx->restart);
+-			/* fall through*/
+-		case -EINPROGRESS:
+ 			ctx->req = NULL;
+ 			ctx->cc_sector++;
+ 			continue;
+@@ -1315,10 +1314,8 @@ static void kcryptd_async_done(struct crypto_async_request *async_req,
+ 	struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx);
+ 	struct crypt_config *cc = io->cc;
+ 
+-	if (error == -EINPROGRESS) {
+-		complete(&ctx->restart);
++	if (error == -EINPROGRESS)
+ 		return;
+-	}
+ 
+ 	if (!error && cc->iv_gen_ops && cc->iv_gen_ops->post)
+ 		error = cc->iv_gen_ops->post(cc, iv_of_dmreq(cc, dmreq), dmreq);
+@@ -1329,12 +1326,15 @@ static void kcryptd_async_done(struct crypto_async_request *async_req,
+ 	crypt_free_req(cc, req_of_dmreq(cc, dmreq), io->base_bio);
+ 
+ 	if (!atomic_dec_and_test(&ctx->cc_pending))
+-		return;
++		goto done;
+ 
+ 	if (bio_data_dir(io->base_bio) == READ)
+ 		kcryptd_crypt_read_done(io);
+ 	else
+ 		kcryptd_crypt_write_io_submit(io, 1);
++done:
++	if (!completion_done(&ctx->restart))
++		complete(&ctx->restart);
+ }
+ 
+ static void kcryptd_crypt(struct work_struct *work)
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index 709755fb6d7b..193feb92c320 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -248,6 +248,7 @@ static void md_make_request(struct request_queue *q, struct bio *bio)
+ 	const int rw = bio_data_dir(bio);
+ 	struct mddev *mddev = q->queuedata;
+ 	unsigned int sectors;
++	int cpu;
+ 
+ 	if (mddev == NULL || mddev->pers == NULL
+ 	    || !mddev->ready) {
+@@ -283,7 +284,10 @@ static void md_make_request(struct request_queue *q, struct bio *bio)
+ 	sectors = bio_sectors(bio);
+ 	mddev->pers->make_request(mddev, bio);
+ 
+-	generic_start_io_acct(rw, sectors, &mddev->gendisk->part0);
++	cpu = part_stat_lock();
++	part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
++	part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], sectors);
++	part_stat_unlock();
+ 
+ 	if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended)
+ 		wake_up(&mddev->sb_wait);
+diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
+index ba6b85de96d2..d5c12e5b6125 100644
+--- a/drivers/md/raid0.c
++++ b/drivers/md/raid0.c
+@@ -319,7 +319,7 @@ static struct strip_zone *find_zone(struct r0conf *conf,
+ 
+ /*
+  * remaps the bio to the target device. we separate two flows.
+- * power 2 flow and a general flow for the sake of perfromance
++ * power 2 flow and a general flow for the sake of performance
+ */
+ static struct md_rdev *map_sector(struct mddev *mddev, struct strip_zone *zone,
+ 				sector_t sector, sector_t *sector_offset)
+@@ -537,6 +537,7 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio)
+ 			split = bio;
+ 		}
+ 
++		sector = bio->bi_iter.bi_sector;
+ 		zone = find_zone(mddev->private, &sector);
+ 		tmp_dev = map_sector(mddev, zone, sector, &sector);
+ 		split->bi_bdev = tmp_dev->bdev;
+diff --git a/drivers/media/rc/img-ir/img-ir-core.c b/drivers/media/rc/img-ir/img-ir-core.c
+index 77c78de4f5bf..7020659f23c2 100644
+--- a/drivers/media/rc/img-ir/img-ir-core.c
++++ b/drivers/media/rc/img-ir/img-ir-core.c
+@@ -146,7 +146,7 @@ static int img_ir_remove(struct platform_device *pdev)
+ {
+ 	struct img_ir_priv *priv = platform_get_drvdata(pdev);
+ 
+-	free_irq(priv->irq, img_ir_isr);
++	free_irq(priv->irq, priv);
+ 	img_ir_remove_hw(priv);
+ 	img_ir_remove_raw(priv);
+ 
+diff --git a/drivers/media/usb/stk1160/stk1160-v4l.c b/drivers/media/usb/stk1160/stk1160-v4l.c
+index a47629108c1b..5bd940fd7a50 100644
+--- a/drivers/media/usb/stk1160/stk1160-v4l.c
++++ b/drivers/media/usb/stk1160/stk1160-v4l.c
+@@ -244,6 +244,11 @@ static int stk1160_stop_streaming(struct stk1160 *dev)
+ 	if (mutex_lock_interruptible(&dev->v4l_lock))
+ 		return -ERESTARTSYS;
+ 
++	/*
++	 * Once URBs are cancelled, the URB complete handler
++	 * won't be running. This is required to safely release the
++	 * current buffer (dev->isoc_ctl.buf).
++	 */
+ 	stk1160_cancel_isoc(dev);
+ 
+ 	/*
+@@ -624,8 +629,16 @@ void stk1160_clear_queue(struct stk1160 *dev)
+ 		stk1160_info("buffer [%p/%d] aborted\n",
+ 				buf, buf->vb.v4l2_buf.index);
+ 	}
+-	/* It's important to clear current buffer */
+-	dev->isoc_ctl.buf = NULL;
++
++	/* It's important to release the current buffer */
++	if (dev->isoc_ctl.buf) {
++		buf = dev->isoc_ctl.buf;
++		dev->isoc_ctl.buf = NULL;
++
++		vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
++		stk1160_info("buffer [%p/%d] aborted\n",
++				buf, buf->vb.v4l2_buf.index);
++	}
+ 	spin_unlock_irqrestore(&dev->buf_lock, flags);
+ }
+ 
+diff --git a/drivers/memstick/core/mspro_block.c b/drivers/memstick/core/mspro_block.c
+index fc145d202c46..922a750640e8 100644
+--- a/drivers/memstick/core/mspro_block.c
++++ b/drivers/memstick/core/mspro_block.c
+@@ -758,7 +758,7 @@ static int mspro_block_complete_req(struct memstick_dev *card, int error)
+ 
+ 		if (error || (card->current_mrq.tpc == MSPRO_CMD_STOP)) {
+ 			if (msb->data_dir == READ) {
+-				for (cnt = 0; cnt < msb->current_seg; cnt++)
++				for (cnt = 0; cnt < msb->current_seg; cnt++) {
+ 					t_len += msb->req_sg[cnt].length
+ 						 / msb->page_size;
+ 
+@@ -766,6 +766,7 @@ static int mspro_block_complete_req(struct memstick_dev *card, int error)
+ 						t_len += msb->current_page - 1;
+ 
+ 					t_len *= msb->page_size;
++				}
+ 			}
+ 		} else
+ 			t_len = blk_rq_bytes(msb->block_req);
+diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c
+index 2a87f69be53d..1aed3b7b8d9b 100644
+--- a/drivers/mfd/mfd-core.c
++++ b/drivers/mfd/mfd-core.c
+@@ -128,7 +128,7 @@ static int mfd_add_device(struct device *parent, int id,
+ 	int platform_id;
+ 	int r;
+ 
+-	if (id < 0)
++	if (id == PLATFORM_DEVID_AUTO)
+ 		platform_id = id;
+ 	else
+ 		platform_id = id + cell->id;
+diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c
+index 15cb8b7ffc34..9bad746425cb 100644
+--- a/drivers/mmc/host/sunxi-mmc.c
++++ b/drivers/mmc/host/sunxi-mmc.c
+@@ -908,7 +908,9 @@ static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host,
+ 		return PTR_ERR(host->clk_mmc);
+ 	}
+ 
+-	host->reset = devm_reset_control_get(&pdev->dev, "ahb");
++	host->reset = devm_reset_control_get_optional(&pdev->dev, "ahb");
++	if (PTR_ERR(host->reset) == -EPROBE_DEFER)
++		return PTR_ERR(host->reset);
+ 
+ 	ret = clk_prepare_enable(host->clk_ahb);
+ 	if (ret) {
+diff --git a/drivers/mtd/ubi/attach.c b/drivers/mtd/ubi/attach.c
+index 9d2e16f3150a..b5e154856994 100644
+--- a/drivers/mtd/ubi/attach.c
++++ b/drivers/mtd/ubi/attach.c
+@@ -410,7 +410,7 @@ int ubi_compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb,
+ 		second_is_newer = !second_is_newer;
+ 	} else {
+ 		dbg_bld("PEB %d CRC is OK", pnum);
+-		bitflips = !!err;
++		bitflips |= !!err;
+ 	}
+ 	mutex_unlock(&ubi->buf_mutex);
+ 
+diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c
+index 3410ea8109f8..d347ecf1d27c 100644
+--- a/drivers/mtd/ubi/cdev.c
++++ b/drivers/mtd/ubi/cdev.c
+@@ -454,7 +454,7 @@ static long vol_cdev_ioctl(struct file *file, unsigned int cmd,
+ 		/* Validate the request */
+ 		err = -EINVAL;
+ 		if (req.lnum < 0 || req.lnum >= vol->reserved_pebs ||
+-		    req.bytes < 0 || req.lnum >= vol->usable_leb_size)
++		    req.bytes < 0 || req.bytes > vol->usable_leb_size)
+ 			break;
+ 
+ 		err = get_exclusive(ubi, desc);
+diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
+index a40020cf0923..8c74076efb9e 100644
+--- a/drivers/mtd/ubi/eba.c
++++ b/drivers/mtd/ubi/eba.c
+@@ -1362,7 +1362,8 @@ int ubi_eba_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
+ 				 * during re-size.
+ 				 */
+ 				ubi_move_aeb_to_list(av, aeb, &ai->erase);
+-			vol->eba_tbl[aeb->lnum] = aeb->pnum;
++			else
++				vol->eba_tbl[aeb->lnum] = aeb->pnum;
+ 		}
+ 	}
+ 
+diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
+index 834f6fe1f5fa..4e07f81094ae 100644
+--- a/drivers/mtd/ubi/wl.c
++++ b/drivers/mtd/ubi/wl.c
+@@ -1005,7 +1005,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
+ 				int shutdown)
+ {
+ 	int err, scrubbing = 0, torture = 0, protect = 0, erroneous = 0;
+-	int vol_id = -1, uninitialized_var(lnum);
++	int vol_id = -1, lnum = -1;
+ #ifdef CONFIG_MTD_UBI_FASTMAP
+ 	int anchor = wrk->anchor;
+ #endif
+diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
+index 83140cbb5f01..82f9c554021d 100644
+--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
++++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
+@@ -144,6 +144,11 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
+ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
+ 				     struct e1000_rx_ring *rx_ring,
+ 				     int *work_done, int work_to_do);
++static void e1000_alloc_dummy_rx_buffers(struct e1000_adapter *adapter,
++					 struct e1000_rx_ring *rx_ring,
++					 int cleaned_count)
++{
++}
+ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
+ 				   struct e1000_rx_ring *rx_ring,
+ 				   int cleaned_count);
+@@ -3548,8 +3553,11 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
+ 		msleep(1);
+ 	/* e1000_down has a dependency on max_frame_size */
+ 	hw->max_frame_size = max_frame;
+-	if (netif_running(netdev))
++	if (netif_running(netdev)) {
++		/* prevent buffers from being reallocated */
++		adapter->alloc_rx_buf = e1000_alloc_dummy_rx_buffers;
+ 		e1000_down(adapter);
++	}
+ 
+ 	/* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
+ 	 * means we reserve 2 more, this pushes us to allocate from the next
+diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
+index af829c578400..7ace07dad6a3 100644
+--- a/drivers/net/ethernet/marvell/pxa168_eth.c
++++ b/drivers/net/ethernet/marvell/pxa168_eth.c
+@@ -1508,7 +1508,8 @@ static int pxa168_eth_probe(struct platform_device *pdev)
+ 		np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
+ 		if (!np) {
+ 			dev_err(&pdev->dev, "missing phy-handle\n");
+-			return -EINVAL;
++			err = -EINVAL;
++			goto err_netdev;
+ 		}
+ 		of_property_read_u32(np, "reg", &pep->phy_addr);
+ 		pep->phy_intf = of_get_phy_mode(pdev->dev.of_node);
+@@ -1526,7 +1527,7 @@ static int pxa168_eth_probe(struct platform_device *pdev)
+ 	pep->smi_bus = mdiobus_alloc();
+ 	if (pep->smi_bus == NULL) {
+ 		err = -ENOMEM;
+-		goto err_base;
++		goto err_netdev;
+ 	}
+ 	pep->smi_bus->priv = pep;
+ 	pep->smi_bus->name = "pxa168_eth smi";
+@@ -1551,13 +1552,10 @@ err_mdiobus:
+ 	mdiobus_unregister(pep->smi_bus);
+ err_free_mdio:
+ 	mdiobus_free(pep->smi_bus);
+-err_base:
+-	iounmap(pep->base);
+ err_netdev:
+ 	free_netdev(dev);
+ err_clk:
+-	clk_disable(clk);
+-	clk_put(clk);
++	clk_disable_unprepare(clk);
+ 	return err;
+ }
+ 
+@@ -1574,13 +1572,9 @@ static int pxa168_eth_remove(struct platform_device *pdev)
+ 	if (pep->phy)
+ 		phy_disconnect(pep->phy);
+ 	if (pep->clk) {
+-		clk_disable(pep->clk);
+-		clk_put(pep->clk);
+-		pep->clk = NULL;
++		clk_disable_unprepare(pep->clk);
+ 	}
+ 
+-	iounmap(pep->base);
+-	pep->base = NULL;
+ 	mdiobus_unregister(pep->smi_bus);
+ 	mdiobus_free(pep->smi_bus);
+ 	unregister_netdev(dev);
+diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+index 90e0f045a6bc..33637eb16ca0 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+@@ -983,20 +983,21 @@ static int mlx4_en_check_rxfh_func(struct net_device *dev, u8 hfunc)
+ 	struct mlx4_en_priv *priv = netdev_priv(dev);
+ 
+ 	/* check if requested function is supported by the device */
+-	if ((hfunc == ETH_RSS_HASH_TOP &&
+-	     !(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_TOP)) ||
+-	    (hfunc == ETH_RSS_HASH_XOR &&
+-	     !(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_XOR)))
+-		return -EINVAL;
++	if (hfunc == ETH_RSS_HASH_TOP) {
++		if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_TOP))
++			return -EINVAL;
++		if (!(dev->features & NETIF_F_RXHASH))
++			en_warn(priv, "Toeplitz hash function should be used in conjunction with RX hashing for optimal performance\n");
++		return 0;
++	} else if (hfunc == ETH_RSS_HASH_XOR) {
++		if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_XOR))
++			return -EINVAL;
++		if (dev->features & NETIF_F_RXHASH)
++			en_warn(priv, "Enabling both XOR Hash function and RX Hashing can limit RPS functionality\n");
++		return 0;
++	}
+ 
+-	priv->rss_hash_fn = hfunc;
+-	if (hfunc == ETH_RSS_HASH_TOP && !(dev->features & NETIF_F_RXHASH))
+-		en_warn(priv,
+-			"Toeplitz hash function should be used in conjunction with RX hashing for optimal performance\n");
+-	if (hfunc == ETH_RSS_HASH_XOR && (dev->features & NETIF_F_RXHASH))
+-		en_warn(priv,
+-			"Enabling both XOR Hash function and RX Hashing can limit RPS functionality\n");
+-	return 0;
++	return -EINVAL;
+ }
+ 
+ static int mlx4_en_get_rxfh(struct net_device *dev, u32 *ring_index, u8 *key,
+@@ -1070,6 +1071,8 @@ static int mlx4_en_set_rxfh(struct net_device *dev, const u32 *ring_index,
+ 		priv->prof->rss_rings = rss_rings;
+ 	if (key)
+ 		memcpy(priv->rss_key, key, MLX4_EN_RSS_KEY_SIZE);
++	if (hfunc !=  ETH_RSS_HASH_NO_CHANGE)
++		priv->rss_hash_fn = hfunc;
+ 
+ 	if (port_up) {
+ 		err = mlx4_en_start_port(dev);
+diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
+index af034dba9bd6..9d15566521a7 100644
+--- a/drivers/net/ppp/ppp_generic.c
++++ b/drivers/net/ppp/ppp_generic.c
+@@ -1716,6 +1716,7 @@ ppp_receive_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch)
+ {
+ 	/* note: a 0-length skb is used as an error indication */
+ 	if (skb->len > 0) {
++		skb_checksum_complete_unset(skb);
+ #ifdef CONFIG_PPP_MULTILINK
+ 		/* XXX do channel-level decompression here */
+ 		if (PPP_PROTO(skb) == PPP_MP)
+diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+index e06bafee37f9..5034660bf411 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
++++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+@@ -321,6 +321,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
+ 	{RTL_USB_DEVICE(0x07b8, 0x8188, rtl92cu_hal_cfg)}, /*Abocom - Abocom*/
+ 	{RTL_USB_DEVICE(0x07b8, 0x8189, rtl92cu_hal_cfg)}, /*Funai - Abocom*/
+ 	{RTL_USB_DEVICE(0x0846, 0x9041, rtl92cu_hal_cfg)}, /*NetGear WNA1000M*/
++	{RTL_USB_DEVICE(0x0b05, 0x17ba, rtl92cu_hal_cfg)}, /*ASUS-Edimax*/
+ 	{RTL_USB_DEVICE(0x0bda, 0x5088, rtl92cu_hal_cfg)}, /*Thinkware-CC&C*/
+ 	{RTL_USB_DEVICE(0x0df6, 0x0052, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
+ 	{RTL_USB_DEVICE(0x0df6, 0x005c, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
+@@ -377,6 +378,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
+ 	{RTL_USB_DEVICE(0x2001, 0x3307, rtl92cu_hal_cfg)}, /*D-Link-Cameo*/
+ 	{RTL_USB_DEVICE(0x2001, 0x3309, rtl92cu_hal_cfg)}, /*D-Link-Alpha*/
+ 	{RTL_USB_DEVICE(0x2001, 0x330a, rtl92cu_hal_cfg)}, /*D-Link-Alpha*/
++	{RTL_USB_DEVICE(0x2001, 0x330d, rtl92cu_hal_cfg)}, /*D-Link DWA-131 */
+ 	{RTL_USB_DEVICE(0x2019, 0xab2b, rtl92cu_hal_cfg)}, /*Planex -Abocom*/
+ 	{RTL_USB_DEVICE(0x20f4, 0x624d, rtl92cu_hal_cfg)}, /*TRENDNet*/
+ 	{RTL_USB_DEVICE(0x2357, 0x0100, rtl92cu_hal_cfg)}, /*TP-Link WN8200ND*/
+diff --git a/drivers/net/wireless/ti/wl18xx/debugfs.c b/drivers/net/wireless/ti/wl18xx/debugfs.c
+index 7f1669cdea09..779dc2b2ca75 100644
+--- a/drivers/net/wireless/ti/wl18xx/debugfs.c
++++ b/drivers/net/wireless/ti/wl18xx/debugfs.c
+@@ -136,7 +136,7 @@ WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, protection_filter, "%u");
+ WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, accum_arp_pend_requests, "%u");
+ WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, max_arp_queue_dep, "%u");
+ 
+-WL18XX_DEBUGFS_FWSTATS_FILE(rx_rate, rx_frames_per_rates, "%u");
++WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(rx_rate, rx_frames_per_rates, 50);
+ 
+ WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(aggr_size, tx_agg_vs_rate,
+ 				  AGGR_STATS_TX_AGG*AGGR_STATS_TX_RATE);
+diff --git a/drivers/net/wireless/ti/wlcore/debugfs.h b/drivers/net/wireless/ti/wlcore/debugfs.h
+index 0f2cfb0d2a9e..bf14676e6515 100644
+--- a/drivers/net/wireless/ti/wlcore/debugfs.h
++++ b/drivers/net/wireless/ti/wlcore/debugfs.h
+@@ -26,8 +26,8 @@
+ 
+ #include "wlcore.h"
+ 
+-int wl1271_format_buffer(char __user *userbuf, size_t count,
+-			 loff_t *ppos, char *fmt, ...);
++__printf(4, 5) int wl1271_format_buffer(char __user *userbuf, size_t count,
++					loff_t *ppos, char *fmt, ...);
+ 
+ int wl1271_debugfs_init(struct wl1271 *wl);
+ void wl1271_debugfs_exit(struct wl1271 *wl);
+diff --git a/drivers/nfc/st21nfcb/i2c.c b/drivers/nfc/st21nfcb/i2c.c
+index 01ba865863ee..b5eb3754578c 100644
+--- a/drivers/nfc/st21nfcb/i2c.c
++++ b/drivers/nfc/st21nfcb/i2c.c
+@@ -109,7 +109,7 @@ static int st21nfcb_nci_i2c_write(void *phy_id, struct sk_buff *skb)
+ 		return phy->ndlc->hard_fault;
+ 
+ 	r = i2c_master_send(client, skb->data, skb->len);
+-	if (r == -EREMOTEIO) {  /* Retry, chip was in standby */
++	if (r < 0) {  /* Retry, chip was in standby */
+ 		usleep_range(1000, 4000);
+ 		r = i2c_master_send(client, skb->data, skb->len);
+ 	}
+@@ -148,7 +148,7 @@ static int st21nfcb_nci_i2c_read(struct st21nfcb_i2c_phy *phy,
+ 	struct i2c_client *client = phy->i2c_dev;
+ 
+ 	r = i2c_master_recv(client, buf, ST21NFCB_NCI_I2C_MIN_SIZE);
+-	if (r == -EREMOTEIO) {  /* Retry, chip was in standby */
++	if (r < 0) {  /* Retry, chip was in standby */
+ 		usleep_range(1000, 4000);
+ 		r = i2c_master_recv(client, buf, ST21NFCB_NCI_I2C_MIN_SIZE);
+ 	}
+diff --git a/drivers/platform/x86/compal-laptop.c b/drivers/platform/x86/compal-laptop.c
+index 15c0fab2bfa1..bceb30b539f3 100644
+--- a/drivers/platform/x86/compal-laptop.c
++++ b/drivers/platform/x86/compal-laptop.c
+@@ -1026,9 +1026,9 @@ static int compal_probe(struct platform_device *pdev)
+ 	if (err)
+ 		return err;
+ 
+-	hwmon_dev = hwmon_device_register_with_groups(&pdev->dev,
+-						      "compal", data,
+-						      compal_hwmon_groups);
++	hwmon_dev = devm_hwmon_device_register_with_groups(&pdev->dev,
++							   "compal", data,
++							   compal_hwmon_groups);
+ 	if (IS_ERR(hwmon_dev)) {
+ 		err = PTR_ERR(hwmon_dev);
+ 		goto remove;
+@@ -1036,7 +1036,9 @@ static int compal_probe(struct platform_device *pdev)
+ 
+ 	/* Power supply */
+ 	initialize_power_supply_data(data);
+-	power_supply_register(&compal_device->dev, &data->psy);
++	err = power_supply_register(&compal_device->dev, &data->psy);
++	if (err < 0)
++		goto remove;
+ 
+ 	platform_set_drvdata(pdev, data);
+ 
+diff --git a/drivers/power/ipaq_micro_battery.c b/drivers/power/ipaq_micro_battery.c
+index 9d694605cdb7..96b15e003f3f 100644
+--- a/drivers/power/ipaq_micro_battery.c
++++ b/drivers/power/ipaq_micro_battery.c
+@@ -226,6 +226,7 @@ static struct power_supply micro_ac_power = {
+ static int micro_batt_probe(struct platform_device *pdev)
+ {
+ 	struct micro_battery *mb;
++	int ret;
+ 
+ 	mb = devm_kzalloc(&pdev->dev, sizeof(*mb), GFP_KERNEL);
+ 	if (!mb)
+@@ -233,14 +234,30 @@ static int micro_batt_probe(struct platform_device *pdev)
+ 
+ 	mb->micro = dev_get_drvdata(pdev->dev.parent);
+ 	mb->wq = create_singlethread_workqueue("ipaq-battery-wq");
++	if (!mb->wq)
++		return -ENOMEM;
++
+ 	INIT_DELAYED_WORK(&mb->update, micro_battery_work);
+ 	platform_set_drvdata(pdev, mb);
+ 	queue_delayed_work(mb->wq, &mb->update, 1);
+-	power_supply_register(&pdev->dev, &micro_batt_power);
+-	power_supply_register(&pdev->dev, &micro_ac_power);
++
++	ret = power_supply_register(&pdev->dev, &micro_batt_power);
++	if (ret < 0)
++		goto batt_err;
++
++	ret = power_supply_register(&pdev->dev, &micro_ac_power);
++	if (ret < 0)
++		goto ac_err;
+ 
+ 	dev_info(&pdev->dev, "iPAQ micro battery driver\n");
+ 	return 0;
++
++ac_err:
++	power_supply_unregister(&micro_ac_power);
++batt_err:
++	cancel_delayed_work_sync(&mb->update);
++	destroy_workqueue(mb->wq);
++	return ret;
+ }
+ 
+ static int micro_batt_remove(struct platform_device *pdev)
+@@ -251,6 +268,7 @@ static int micro_batt_remove(struct platform_device *pdev)
+ 	power_supply_unregister(&micro_ac_power);
+ 	power_supply_unregister(&micro_batt_power);
+ 	cancel_delayed_work_sync(&mb->update);
++	destroy_workqueue(mb->wq);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/power/lp8788-charger.c b/drivers/power/lp8788-charger.c
+index 21fc233c7d61..176dab2e4c16 100644
+--- a/drivers/power/lp8788-charger.c
++++ b/drivers/power/lp8788-charger.c
+@@ -417,8 +417,10 @@ static int lp8788_psy_register(struct platform_device *pdev,
+ 	pchg->battery.num_properties = ARRAY_SIZE(lp8788_battery_prop);
+ 	pchg->battery.get_property = lp8788_battery_get_property;
+ 
+-	if (power_supply_register(&pdev->dev, &pchg->battery))
++	if (power_supply_register(&pdev->dev, &pchg->battery)) {
++		power_supply_unregister(&pchg->charger);
+ 		return -EPERM;
++	}
+ 
+ 	return 0;
+ }
+diff --git a/drivers/power/twl4030_madc_battery.c b/drivers/power/twl4030_madc_battery.c
+index 7ef445a6cfa6..cf907609ec49 100644
+--- a/drivers/power/twl4030_madc_battery.c
++++ b/drivers/power/twl4030_madc_battery.c
+@@ -192,6 +192,7 @@ static int twl4030_madc_battery_probe(struct platform_device *pdev)
+ {
+ 	struct twl4030_madc_battery *twl4030_madc_bat;
+ 	struct twl4030_madc_bat_platform_data *pdata = pdev->dev.platform_data;
++	int ret = 0;
+ 
+ 	twl4030_madc_bat = kzalloc(sizeof(*twl4030_madc_bat), GFP_KERNEL);
+ 	if (!twl4030_madc_bat)
+@@ -216,9 +217,11 @@ static int twl4030_madc_battery_probe(struct platform_device *pdev)
+ 
+ 	twl4030_madc_bat->pdata = pdata;
+ 	platform_set_drvdata(pdev, twl4030_madc_bat);
+-	power_supply_register(&pdev->dev, &twl4030_madc_bat->psy);
++	ret = power_supply_register(&pdev->dev, &twl4030_madc_bat->psy);
++	if (ret < 0)
++		kfree(twl4030_madc_bat);
+ 
+-	return 0;
++	return ret;
+ }
+ 
+ static int twl4030_madc_battery_remove(struct platform_device *pdev)
+diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
+index 2d5ab6d969ec..454536c49315 100644
+--- a/drivers/scsi/mvsas/mv_sas.c
++++ b/drivers/scsi/mvsas/mv_sas.c
+@@ -441,14 +441,11 @@ static u32 mvs_get_ncq_tag(struct sas_task *task, u32 *tag)
+ static int mvs_task_prep_ata(struct mvs_info *mvi,
+ 			     struct mvs_task_exec_info *tei)
+ {
+-	struct sas_ha_struct *sha = mvi->sas;
+ 	struct sas_task *task = tei->task;
+ 	struct domain_device *dev = task->dev;
+ 	struct mvs_device *mvi_dev = dev->lldd_dev;
+ 	struct mvs_cmd_hdr *hdr = tei->hdr;
+ 	struct asd_sas_port *sas_port = dev->port;
+-	struct sas_phy *sphy = dev->phy;
+-	struct asd_sas_phy *sas_phy = sha->sas_phy[sphy->number];
+ 	struct mvs_slot_info *slot;
+ 	void *buf_prd;
+ 	u32 tag = tei->tag, hdr_tag;
+@@ -468,7 +465,7 @@ static int mvs_task_prep_ata(struct mvs_info *mvi,
+ 	slot->tx = mvi->tx_prod;
+ 	del_q = TXQ_MODE_I | tag |
+ 		(TXQ_CMD_STP << TXQ_CMD_SHIFT) |
+-		(MVS_PHY_ID << TXQ_PHY_SHIFT) |
++		((sas_port->phy_mask & TXQ_PHY_MASK) << TXQ_PHY_SHIFT) |
+ 		(mvi_dev->taskfileset << TXQ_SRS_SHIFT);
+ 	mvi->tx[mvi->tx_prod] = cpu_to_le32(del_q);
+ 
+diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
+index 05ea0d49a3a3..ad35ccdb92a2 100644
+--- a/drivers/scsi/sd.c
++++ b/drivers/scsi/sd.c
+@@ -3100,6 +3100,7 @@ static void scsi_disk_release(struct device *dev)
+ 	ida_remove(&sd_index_ida, sdkp->index);
+ 	spin_unlock(&sd_index_lock);
+ 
++	blk_integrity_unregister(disk);
+ 	disk->private_data = NULL;
+ 	put_disk(disk);
+ 	put_device(&sdkp->device->sdev_gendev);
+diff --git a/drivers/scsi/sd_dif.c b/drivers/scsi/sd_dif.c
+index 14c7d42a11c2..5c06d292b94c 100644
+--- a/drivers/scsi/sd_dif.c
++++ b/drivers/scsi/sd_dif.c
+@@ -77,7 +77,7 @@ void sd_dif_config_host(struct scsi_disk *sdkp)
+ 
+ 		disk->integrity->flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
+ 
+-		if (!sdkp)
++		if (!sdkp->ATO)
+ 			return;
+ 
+ 		if (type == SD_DIF_TYPE3_PROTECTION)
+diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
+index 4cff0ddc2c25..3483b1d21681 100644
+--- a/drivers/scsi/storvsc_drv.c
++++ b/drivers/scsi/storvsc_drv.c
+@@ -741,21 +741,22 @@ static unsigned int copy_to_bounce_buffer(struct scatterlist *orig_sgl,
+ 			if (bounce_sgl[j].length == PAGE_SIZE) {
+ 				/* full..move to next entry */
+ 				sg_kunmap_atomic(bounce_addr);
++				bounce_addr = 0;
+ 				j++;
++			}
+ 
+-				/* if we need to use another bounce buffer */
+-				if (srclen || i != orig_sgl_count - 1)
+-					bounce_addr = sg_kmap_atomic(bounce_sgl,j);
++			/* if we need to use another bounce buffer */
++			if (srclen && bounce_addr == 0)
++				bounce_addr = sg_kmap_atomic(bounce_sgl, j);
+ 
+-			} else if (srclen == 0 && i == orig_sgl_count - 1) {
+-				/* unmap the last bounce that is < PAGE_SIZE */
+-				sg_kunmap_atomic(bounce_addr);
+-			}
+ 		}
+ 
+ 		sg_kunmap_atomic(src_addr - orig_sgl[i].offset);
+ 	}
+ 
++	if (bounce_addr)
++		sg_kunmap_atomic(bounce_addr);
++
+ 	local_irq_restore(flags);
+ 
+ 	return total_copied;
+diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
+index fe1b7699fab6..308c2d39b7ce 100644
+--- a/drivers/spi/spi-imx.c
++++ b/drivers/spi/spi-imx.c
+@@ -371,8 +371,6 @@ static int __maybe_unused mx51_ecspi_config(struct spi_imx_data *spi_imx,
+ 	if (spi_imx->dma_is_inited) {
+ 		dma = readl(spi_imx->base + MX51_ECSPI_DMA);
+ 
+-		spi_imx->tx_wml = spi_imx_get_fifosize(spi_imx) / 2;
+-		spi_imx->rx_wml = spi_imx_get_fifosize(spi_imx) / 2;
+ 		spi_imx->rxt_wml = spi_imx_get_fifosize(spi_imx) / 2;
+ 		rx_wml_cfg = spi_imx->rx_wml << MX51_ECSPI_DMA_RX_WML_OFFSET;
+ 		tx_wml_cfg = spi_imx->tx_wml << MX51_ECSPI_DMA_TX_WML_OFFSET;
+@@ -869,6 +867,8 @@ static int spi_imx_sdma_init(struct device *dev, struct spi_imx_data *spi_imx,
+ 	master->max_dma_len = MAX_SDMA_BD_BYTES;
+ 	spi_imx->bitbang.master->flags = SPI_MASTER_MUST_RX |
+ 					 SPI_MASTER_MUST_TX;
++	spi_imx->tx_wml = spi_imx_get_fifosize(spi_imx) / 2;
++	spi_imx->rx_wml = spi_imx_get_fifosize(spi_imx) / 2;
+ 	spi_imx->dma_is_inited = 1;
+ 
+ 	return 0;
+diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
+index 6941e04afb8c..5c854355f621 100644
+--- a/drivers/spi/spidev.c
++++ b/drivers/spi/spidev.c
+@@ -249,7 +249,10 @@ static int spidev_message(struct spidev_data *spidev,
+ 		k_tmp->len = u_tmp->len;
+ 
+ 		total += k_tmp->len;
+-		if (total > bufsiz) {
++		/* Check total length of transfers.  Also check each
++		 * transfer length to avoid arithmetic overflow.
++		 */
++		if (total > bufsiz || k_tmp->len > bufsiz) {
+ 			status = -EMSGSIZE;
+ 			goto done;
+ 		}
+diff --git a/drivers/staging/android/sync.c b/drivers/staging/android/sync.c
+index 7bdb62bf6b40..f83e00c78051 100644
+--- a/drivers/staging/android/sync.c
++++ b/drivers/staging/android/sync.c
+@@ -114,7 +114,7 @@ void sync_timeline_signal(struct sync_timeline *obj)
+ 	list_for_each_entry_safe(pt, next, &obj->active_list_head,
+ 				 active_list) {
+ 		if (fence_is_signaled_locked(&pt->base))
+-			list_del(&pt->active_list);
++			list_del_init(&pt->active_list);
+ 	}
+ 
+ 	spin_unlock_irqrestore(&obj->child_list_lock, flags);
+diff --git a/drivers/staging/panel/panel.c b/drivers/staging/panel/panel.c
+index 98325b7b4462..47de68b6334f 100644
+--- a/drivers/staging/panel/panel.c
++++ b/drivers/staging/panel/panel.c
+@@ -314,11 +314,11 @@ static unsigned char lcd_bits[LCD_PORTS][LCD_BITS][BIT_STATES];
+  * LCD types
+  */
+ #define LCD_TYPE_NONE		0
+-#define LCD_TYPE_OLD		1
+-#define LCD_TYPE_KS0074		2
+-#define LCD_TYPE_HANTRONIX	3
+-#define LCD_TYPE_NEXCOM		4
+-#define LCD_TYPE_CUSTOM		5
++#define LCD_TYPE_CUSTOM		1
++#define LCD_TYPE_OLD		2
++#define LCD_TYPE_KS0074		3
++#define LCD_TYPE_HANTRONIX	4
++#define LCD_TYPE_NEXCOM		5
+ 
+ /*
+  * keypad types
+@@ -481,7 +481,7 @@ MODULE_PARM_DESC(keypad_type,
+ static int lcd_type = NOT_SET;
+ module_param(lcd_type, int, 0000);
+ MODULE_PARM_DESC(lcd_type,
+-		 "LCD type: 0=none, 1=old //, 2=serial ks0074, 3=hantronix //, 4=nexcom //, 5=compiled-in");
++		 "LCD type: 0=none, 1=compiled-in, 2=old, 3=serial ks0074, 4=hantronix, 5=nexcom");
+ 
+ static int lcd_height = NOT_SET;
+ module_param(lcd_height, int, 0000);
+diff --git a/drivers/staging/vt6655/rxtx.c b/drivers/staging/vt6655/rxtx.c
+index b5b0155961f2..baaab3fcae8f 100644
+--- a/drivers/staging/vt6655/rxtx.c
++++ b/drivers/staging/vt6655/rxtx.c
+@@ -1309,10 +1309,18 @@ int vnt_generate_fifo_header(struct vnt_private *priv, u32 dma_idx,
+ 			    priv->hw->conf.chandef.chan->hw_value);
+ 	}
+ 
+-	if (current_rate > RATE_11M)
+-		pkt_type = (u8)priv->byPacketType;
+-	else
++	if (current_rate > RATE_11M) {
++		if (info->band == IEEE80211_BAND_5GHZ) {
++			pkt_type = PK_TYPE_11A;
++		} else {
++			if (tx_rate->flags & IEEE80211_TX_RC_USE_CTS_PROTECT)
++				pkt_type = PK_TYPE_11GB;
++			else
++				pkt_type = PK_TYPE_11GA;
++		}
++	} else {
+ 		pkt_type = PK_TYPE_11B;
++	}
+ 
+ 	/*Set fifo controls */
+ 	if (pkt_type == PK_TYPE_11A)
+diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
+index d836de200a03..15cfcd6b9343 100644
+--- a/drivers/target/target_core_file.c
++++ b/drivers/target/target_core_file.c
+@@ -264,40 +264,32 @@ static int fd_do_prot_rw(struct se_cmd *cmd, struct fd_prot *fd_prot,
+ 	struct se_device *se_dev = cmd->se_dev;
+ 	struct fd_dev *dev = FD_DEV(se_dev);
+ 	struct file *prot_fd = dev->fd_prot_file;
+-	struct scatterlist *sg;
+ 	loff_t pos = (cmd->t_task_lba * se_dev->prot_length);
+ 	unsigned char *buf;
+-	u32 prot_size, len, size;
+-	int rc, ret = 1, i;
++	u32 prot_size;
++	int rc, ret = 1;
+ 
+ 	prot_size = (cmd->data_length / se_dev->dev_attrib.block_size) *
+ 		     se_dev->prot_length;
+ 
+ 	if (!is_write) {
+-		fd_prot->prot_buf = vzalloc(prot_size);
++		fd_prot->prot_buf = kzalloc(prot_size, GFP_KERNEL);
+ 		if (!fd_prot->prot_buf) {
+ 			pr_err("Unable to allocate fd_prot->prot_buf\n");
+ 			return -ENOMEM;
+ 		}
+ 		buf = fd_prot->prot_buf;
+ 
+-		fd_prot->prot_sg_nents = cmd->t_prot_nents;
+-		fd_prot->prot_sg = kzalloc(sizeof(struct scatterlist) *
+-					   fd_prot->prot_sg_nents, GFP_KERNEL);
++		fd_prot->prot_sg_nents = 1;
++		fd_prot->prot_sg = kzalloc(sizeof(struct scatterlist),
++					   GFP_KERNEL);
+ 		if (!fd_prot->prot_sg) {
+ 			pr_err("Unable to allocate fd_prot->prot_sg\n");
+-			vfree(fd_prot->prot_buf);
++			kfree(fd_prot->prot_buf);
+ 			return -ENOMEM;
+ 		}
+-		size = prot_size;
+-
+-		for_each_sg(fd_prot->prot_sg, sg, fd_prot->prot_sg_nents, i) {
+-
+-			len = min_t(u32, PAGE_SIZE, size);
+-			sg_set_buf(sg, buf, len);
+-			size -= len;
+-			buf += len;
+-		}
++		sg_init_table(fd_prot->prot_sg, fd_prot->prot_sg_nents);
++		sg_set_buf(fd_prot->prot_sg, buf, prot_size);
+ 	}
+ 
+ 	if (is_write) {
+@@ -318,7 +310,7 @@ static int fd_do_prot_rw(struct se_cmd *cmd, struct fd_prot *fd_prot,
+ 
+ 	if (is_write || ret < 0) {
+ 		kfree(fd_prot->prot_sg);
+-		vfree(fd_prot->prot_buf);
++		kfree(fd_prot->prot_buf);
+ 	}
+ 
+ 	return ret;
+@@ -544,6 +536,56 @@ fd_execute_write_same(struct se_cmd *cmd)
+ 	return 0;
+ }
+ 
++static int
++fd_do_prot_fill(struct se_device *se_dev, sector_t lba, sector_t nolb,
++		void *buf, size_t bufsize)
++{
++	struct fd_dev *fd_dev = FD_DEV(se_dev);
++	struct file *prot_fd = fd_dev->fd_prot_file;
++	sector_t prot_length, prot;
++	loff_t pos = lba * se_dev->prot_length;
++
++	if (!prot_fd) {
++		pr_err("Unable to locate fd_dev->fd_prot_file\n");
++		return -ENODEV;
++	}
++
++	prot_length = nolb * se_dev->prot_length;
++
++	for (prot = 0; prot < prot_length;) {
++		sector_t len = min_t(sector_t, bufsize, prot_length - prot);
++		ssize_t ret = kernel_write(prot_fd, buf, len, pos + prot);
++
++		if (ret != len) {
++			pr_err("vfs_write to prot file failed: %zd\n", ret);
++			return ret < 0 ? ret : -ENODEV;
++		}
++		prot += ret;
++	}
++
++	return 0;
++}
++
++static int
++fd_do_prot_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb)
++{
++	void *buf;
++	int rc;
++
++	buf = (void *)__get_free_page(GFP_KERNEL);
++	if (!buf) {
++		pr_err("Unable to allocate FILEIO prot buf\n");
++		return -ENOMEM;
++	}
++	memset(buf, 0xff, PAGE_SIZE);
++
++	rc = fd_do_prot_fill(cmd->se_dev, lba, nolb, buf, PAGE_SIZE);
++
++	free_page((unsigned long)buf);
++
++	return rc;
++}
++
+ static sense_reason_t
+ fd_do_unmap(struct se_cmd *cmd, void *priv, sector_t lba, sector_t nolb)
+ {
+@@ -551,6 +593,12 @@ fd_do_unmap(struct se_cmd *cmd, void *priv, sector_t lba, sector_t nolb)
+ 	struct inode *inode = file->f_mapping->host;
+ 	int ret;
+ 
++	if (cmd->se_dev->dev_attrib.pi_prot_type) {
++		ret = fd_do_prot_unmap(cmd, lba, nolb);
++		if (ret)
++			return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
++	}
++
+ 	if (S_ISBLK(inode->i_mode)) {
+ 		/* The backend is block device, use discard */
+ 		struct block_device *bdev = inode->i_bdev;
+@@ -653,11 +701,11 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
+ 						 0, fd_prot.prot_sg, 0);
+ 			if (rc) {
+ 				kfree(fd_prot.prot_sg);
+-				vfree(fd_prot.prot_buf);
++				kfree(fd_prot.prot_buf);
+ 				return rc;
+ 			}
+ 			kfree(fd_prot.prot_sg);
+-			vfree(fd_prot.prot_buf);
++			kfree(fd_prot.prot_buf);
+ 		}
+ 	} else {
+ 		memset(&fd_prot, 0, sizeof(struct fd_prot));
+@@ -673,7 +721,7 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
+ 						  0, fd_prot.prot_sg, 0);
+ 			if (rc) {
+ 				kfree(fd_prot.prot_sg);
+-				vfree(fd_prot.prot_buf);
++				kfree(fd_prot.prot_buf);
+ 				return rc;
+ 			}
+ 		}
+@@ -709,7 +757,7 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
+ 
+ 	if (ret < 0) {
+ 		kfree(fd_prot.prot_sg);
+-		vfree(fd_prot.prot_buf);
++		kfree(fd_prot.prot_buf);
+ 		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ 	}
+ 
+@@ -873,48 +921,28 @@ static int fd_init_prot(struct se_device *dev)
+ 
+ static int fd_format_prot(struct se_device *dev)
+ {
+-	struct fd_dev *fd_dev = FD_DEV(dev);
+-	struct file *prot_fd = fd_dev->fd_prot_file;
+-	sector_t prot_length, prot;
+ 	unsigned char *buf;
+-	loff_t pos = 0;
+ 	int unit_size = FDBD_FORMAT_UNIT_SIZE * dev->dev_attrib.block_size;
+-	int rc, ret = 0, size, len;
++	int ret;
+ 
+ 	if (!dev->dev_attrib.pi_prot_type) {
+ 		pr_err("Unable to format_prot while pi_prot_type == 0\n");
+ 		return -ENODEV;
+ 	}
+-	if (!prot_fd) {
+-		pr_err("Unable to locate fd_dev->fd_prot_file\n");
+-		return -ENODEV;
+-	}
+ 
+ 	buf = vzalloc(unit_size);
+ 	if (!buf) {
+ 		pr_err("Unable to allocate FILEIO prot buf\n");
+ 		return -ENOMEM;
+ 	}
+-	prot_length = (dev->transport->get_blocks(dev) + 1) * dev->prot_length;
+-	size = prot_length;
+ 
+ 	pr_debug("Using FILEIO prot_length: %llu\n",
+-		 (unsigned long long)prot_length);
++		 (unsigned long long)(dev->transport->get_blocks(dev) + 1) *
++					dev->prot_length);
+ 
+ 	memset(buf, 0xff, unit_size);
+-	for (prot = 0; prot < prot_length; prot += unit_size) {
+-		len = min(unit_size, size);
+-		rc = kernel_write(prot_fd, buf, len, pos);
+-		if (rc != len) {
+-			pr_err("vfs_write to prot file failed: %d\n", rc);
+-			ret = -ENODEV;
+-			goto out;
+-		}
+-		pos += len;
+-		size -= len;
+-	}
+-
+-out:
++	ret = fd_do_prot_fill(dev, 0, dev->transport->get_blocks(dev) + 1,
++			      buf, unit_size);
+ 	vfree(buf);
+ 	return ret;
+ }
+diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
+index 36b471389169..48de05366645 100644
+--- a/drivers/target/target_core_sbc.c
++++ b/drivers/target/target_core_sbc.c
+@@ -299,7 +299,7 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o
+ 	return 0;
+ }
+ 
+-static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd)
++static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd, bool success)
+ {
+ 	unsigned char *buf, *addr;
+ 	struct scatterlist *sg;
+@@ -363,7 +363,7 @@ sbc_execute_rw(struct se_cmd *cmd)
+ 			       cmd->data_direction);
+ }
+ 
+-static sense_reason_t compare_and_write_post(struct se_cmd *cmd)
++static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success)
+ {
+ 	struct se_device *dev = cmd->se_dev;
+ 
+@@ -386,7 +386,7 @@ static sense_reason_t compare_and_write_post(struct se_cmd *cmd)
+ 	return TCM_NO_SENSE;
+ }
+ 
+-static sense_reason_t compare_and_write_callback(struct se_cmd *cmd)
++static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool success)
+ {
+ 	struct se_device *dev = cmd->se_dev;
+ 	struct scatterlist *write_sg = NULL, *sg;
+@@ -401,11 +401,16 @@ static sense_reason_t compare_and_write_callback(struct se_cmd *cmd)
+ 
+ 	/*
+ 	 * Handle early failure in transport_generic_request_failure(),
+-	 * which will not have taken ->caw_mutex yet..
++	 * which will not have taken ->caw_sem yet..
+ 	 */
+-	if (!cmd->t_data_sg || !cmd->t_bidi_data_sg)
++	if (!success && (!cmd->t_data_sg || !cmd->t_bidi_data_sg))
+ 		return TCM_NO_SENSE;
+ 	/*
++	 * Handle special case for zero-length COMPARE_AND_WRITE
++	 */
++	if (!cmd->data_length)
++		goto out;
++	/*
+ 	 * Immediately exit + release dev->caw_sem if command has already
+ 	 * been failed with a non-zero SCSI status.
+ 	 */
+diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
+index ac3cbabdbdf0..f786de0290db 100644
+--- a/drivers/target/target_core_transport.c
++++ b/drivers/target/target_core_transport.c
+@@ -1615,11 +1615,11 @@ void transport_generic_request_failure(struct se_cmd *cmd,
+ 	transport_complete_task_attr(cmd);
+ 	/*
+ 	 * Handle special case for COMPARE_AND_WRITE failure, where the
+-	 * callback is expected to drop the per device ->caw_mutex.
++	 * callback is expected to drop the per device ->caw_sem.
+ 	 */
+ 	if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) &&
+ 	     cmd->transport_complete_callback)
+-		cmd->transport_complete_callback(cmd);
++		cmd->transport_complete_callback(cmd, false);
+ 
+ 	switch (sense_reason) {
+ 	case TCM_NON_EXISTENT_LUN:
+@@ -1975,8 +1975,12 @@ static void target_complete_ok_work(struct work_struct *work)
+ 	if (cmd->transport_complete_callback) {
+ 		sense_reason_t rc;
+ 
+-		rc = cmd->transport_complete_callback(cmd);
++		rc = cmd->transport_complete_callback(cmd, true);
+ 		if (!rc && !(cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE_POST)) {
++			if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) &&
++			    !cmd->data_length)
++				goto queue_rsp;
++
+ 			return;
+ 		} else if (rc) {
+ 			ret = transport_send_check_condition_and_sense(cmd,
+@@ -1990,6 +1994,7 @@ static void target_complete_ok_work(struct work_struct *work)
+ 		}
+ 	}
+ 
++queue_rsp:
+ 	switch (cmd->data_direction) {
+ 	case DMA_FROM_DEVICE:
+ 		spin_lock(&cmd->se_lun->lun_sep_lock);
+@@ -2094,6 +2099,16 @@ static inline void transport_reset_sgl_orig(struct se_cmd *cmd)
+ static inline void transport_free_pages(struct se_cmd *cmd)
+ {
+ 	if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) {
++		/*
++		 * Release special case READ buffer payload required for
++		 * SG_TO_MEM_NOALLOC to function with COMPARE_AND_WRITE
++		 */
++		if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) {
++			transport_free_sgl(cmd->t_bidi_data_sg,
++					   cmd->t_bidi_data_nents);
++			cmd->t_bidi_data_sg = NULL;
++			cmd->t_bidi_data_nents = 0;
++		}
+ 		transport_reset_sgl_orig(cmd);
+ 		return;
+ 	}
+@@ -2246,6 +2261,7 @@ sense_reason_t
+ transport_generic_new_cmd(struct se_cmd *cmd)
+ {
+ 	int ret = 0;
++	bool zero_flag = !(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB);
+ 
+ 	/*
+ 	 * Determine is the TCM fabric module has already allocated physical
+@@ -2254,7 +2270,6 @@ transport_generic_new_cmd(struct se_cmd *cmd)
+ 	 */
+ 	if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) &&
+ 	    cmd->data_length) {
+-		bool zero_flag = !(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB);
+ 
+ 		if ((cmd->se_cmd_flags & SCF_BIDI) ||
+ 		    (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE)) {
+@@ -2285,6 +2300,20 @@ transport_generic_new_cmd(struct se_cmd *cmd)
+ 				       cmd->data_length, zero_flag);
+ 		if (ret < 0)
+ 			return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
++	} else if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) &&
++		    cmd->data_length) {
++		/*
++		 * Special case for COMPARE_AND_WRITE with fabrics
++		 * using SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC.
++		 */
++		u32 caw_length = cmd->t_task_nolb *
++				 cmd->se_dev->dev_attrib.block_size;
++
++		ret = target_alloc_sgl(&cmd->t_bidi_data_sg,
++				       &cmd->t_bidi_data_nents,
++				       caw_length, zero_flag);
++		if (ret < 0)
++			return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ 	}
+ 	/*
+ 	 * If this command is not a write we can execute it right here,
+diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
+index a051a7a2b1bd..a81f9dd7ee97 100644
+--- a/drivers/usb/class/cdc-wdm.c
++++ b/drivers/usb/class/cdc-wdm.c
+@@ -245,7 +245,7 @@ static void wdm_int_callback(struct urb *urb)
+ 	case USB_CDC_NOTIFY_RESPONSE_AVAILABLE:
+ 		dev_dbg(&desc->intf->dev,
+ 			"NOTIFY_RESPONSE_AVAILABLE received: index %d len %d",
+-			dr->wIndex, dr->wLength);
++			le16_to_cpu(dr->wIndex), le16_to_cpu(dr->wLength));
+ 		break;
+ 
+ 	case USB_CDC_NOTIFY_NETWORK_CONNECTION:
+@@ -262,7 +262,9 @@ static void wdm_int_callback(struct urb *urb)
+ 		clear_bit(WDM_POLL_RUNNING, &desc->flags);
+ 		dev_err(&desc->intf->dev,
+ 			"unknown notification %d received: index %d len %d\n",
+-			dr->bNotificationType, dr->wIndex, dr->wLength);
++			dr->bNotificationType,
++			le16_to_cpu(dr->wIndex),
++			le16_to_cpu(dr->wLength));
+ 		goto exit;
+ 	}
+ 
+@@ -408,7 +410,7 @@ static ssize_t wdm_write
+ 			     USB_RECIP_INTERFACE);
+ 	req->bRequest = USB_CDC_SEND_ENCAPSULATED_COMMAND;
+ 	req->wValue = 0;
+-	req->wIndex = desc->inum;
++	req->wIndex = desc->inum; /* already converted */
+ 	req->wLength = cpu_to_le16(count);
+ 	set_bit(WDM_IN_USE, &desc->flags);
+ 	desc->outbuf = buf;
+@@ -422,7 +424,7 @@ static ssize_t wdm_write
+ 		rv = usb_translate_errors(rv);
+ 	} else {
+ 		dev_dbg(&desc->intf->dev, "Tx URB has been submitted index=%d",
+-			req->wIndex);
++			le16_to_cpu(req->wIndex));
+ 	}
+ out:
+ 	usb_autopm_put_interface(desc->intf);
+@@ -820,7 +822,7 @@ static int wdm_create(struct usb_interface *intf, struct usb_endpoint_descriptor
+ 	desc->irq->bRequestType = (USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE);
+ 	desc->irq->bRequest = USB_CDC_GET_ENCAPSULATED_RESPONSE;
+ 	desc->irq->wValue = 0;
+-	desc->irq->wIndex = desc->inum;
++	desc->irq->wIndex = desc->inum; /* already converted */
+ 	desc->irq->wLength = cpu_to_le16(desc->wMaxCommand);
+ 
+ 	usb_fill_control_urb(
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index b4bfa3ac4b12..95409aacc076 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -3397,10 +3397,10 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg)
+ 	if (status) {
+ 		dev_dbg(&port_dev->dev, "can't resume, status %d\n", status);
+ 	} else {
+-		/* drive resume for at least 20 msec */
++		/* drive resume for USB_RESUME_TIMEOUT msec */
+ 		dev_dbg(&udev->dev, "usb %sresume\n",
+ 				(PMSG_IS_AUTO(msg) ? "auto-" : ""));
+-		msleep(25);
++		msleep(USB_RESUME_TIMEOUT);
+ 
+ 		/* Virtual root hubs can trigger on GET_PORT_STATUS to
+ 		 * stop resume signaling.  Then finish the resume
+diff --git a/drivers/usb/gadget/legacy/printer.c b/drivers/usb/gadget/legacy/printer.c
+index 90545980542f..6385c198c134 100644
+--- a/drivers/usb/gadget/legacy/printer.c
++++ b/drivers/usb/gadget/legacy/printer.c
+@@ -1031,6 +1031,15 @@ unknown:
+ 		break;
+ 	}
+ 	/* host either stalls (value < 0) or reports success */
++	if (value >= 0) {
++		req->length = value;
++		req->zero = value < wLength;
++		value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
++		if (value < 0) {
++			ERROR(dev, "%s:%d Error!\n", __func__, __LINE__);
++			req->status = 0;
++		}
++	}
+ 	return value;
+ }
+ 
+diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
+index 38bfeedae1d0..002ac2944d24 100644
+--- a/drivers/usb/host/ehci-hcd.c
++++ b/drivers/usb/host/ehci-hcd.c
+@@ -792,12 +792,12 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd)
+ 					ehci->reset_done[i] == 0))
+ 				continue;
+ 
+-			/* start 20 msec resume signaling from this port,
+-			 * and make hub_wq collect PORT_STAT_C_SUSPEND to
+-			 * stop that signaling.  Use 5 ms extra for safety,
+-			 * like usb_port_resume() does.
++			/* start USB_RESUME_TIMEOUT msec resume signaling from
++			 * this port, and make hub_wq collect
++			 * PORT_STAT_C_SUSPEND to stop that signaling.
+ 			 */
+-			ehci->reset_done[i] = jiffies + msecs_to_jiffies(25);
++			ehci->reset_done[i] = jiffies +
++				msecs_to_jiffies(USB_RESUME_TIMEOUT);
+ 			set_bit(i, &ehci->resuming_ports);
+ 			ehci_dbg (ehci, "port %d remote wakeup\n", i + 1);
+ 			usb_hcd_start_port_resume(&hcd->self, i);
+diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
+index 118edb7bdca2..d96d69f805ea 100644
+--- a/drivers/usb/host/ehci-hub.c
++++ b/drivers/usb/host/ehci-hub.c
+@@ -471,10 +471,13 @@ static int ehci_bus_resume (struct usb_hcd *hcd)
+ 		ehci_writel(ehci, temp, &ehci->regs->port_status [i]);
+ 	}
+ 
+-	/* msleep for 20ms only if code is trying to resume port */
++	/*
++	 * msleep for USB_RESUME_TIMEOUT ms only if code is trying to resume
++	 * port
++	 */
+ 	if (resume_needed) {
+ 		spin_unlock_irq(&ehci->lock);
+-		msleep(20);
++		msleep(USB_RESUME_TIMEOUT);
+ 		spin_lock_irq(&ehci->lock);
+ 		if (ehci->shutdown)
+ 			goto shutdown;
+@@ -942,7 +945,7 @@ int ehci_hub_control(
+ 			temp &= ~PORT_WAKE_BITS;
+ 			ehci_writel(ehci, temp | PORT_RESUME, status_reg);
+ 			ehci->reset_done[wIndex] = jiffies
+-					+ msecs_to_jiffies(20);
++					+ msecs_to_jiffies(USB_RESUME_TIMEOUT);
+ 			set_bit(wIndex, &ehci->resuming_ports);
+ 			usb_hcd_start_port_resume(&hcd->self, wIndex);
+ 			break;
+diff --git a/drivers/usb/host/fotg210-hcd.c b/drivers/usb/host/fotg210-hcd.c
+index ecf02b2623e8..c4794e378ea7 100644
+--- a/drivers/usb/host/fotg210-hcd.c
++++ b/drivers/usb/host/fotg210-hcd.c
+@@ -1595,7 +1595,7 @@ static int fotg210_hub_control(
+ 			/* resume signaling for 20 msec */
+ 			fotg210_writel(fotg210, temp | PORT_RESUME, status_reg);
+ 			fotg210->reset_done[wIndex] = jiffies
+-					+ msecs_to_jiffies(20);
++					+ msecs_to_jiffies(USB_RESUME_TIMEOUT);
+ 			break;
+ 		case USB_PORT_FEAT_C_SUSPEND:
+ 			clear_bit(wIndex, &fotg210->port_c_suspend);
+diff --git a/drivers/usb/host/fusbh200-hcd.c b/drivers/usb/host/fusbh200-hcd.c
+index 664d2aa1239c..c9eb18b9973c 100644
+--- a/drivers/usb/host/fusbh200-hcd.c
++++ b/drivers/usb/host/fusbh200-hcd.c
+@@ -1550,10 +1550,9 @@ static int fusbh200_hub_control (
+ 			if ((temp & PORT_PE) == 0)
+ 				goto error;
+ 
+-			/* resume signaling for 20 msec */
+ 			fusbh200_writel(fusbh200, temp | PORT_RESUME, status_reg);
+ 			fusbh200->reset_done[wIndex] = jiffies
+-					+ msecs_to_jiffies(20);
++					+ msecs_to_jiffies(USB_RESUME_TIMEOUT);
+ 			break;
+ 		case USB_PORT_FEAT_C_SUSPEND:
+ 			clear_bit(wIndex, &fusbh200->port_c_suspend);
+diff --git a/drivers/usb/host/isp116x-hcd.c b/drivers/usb/host/isp116x-hcd.c
+index 31c9c4d0fa0b..1613a1f69480 100644
+--- a/drivers/usb/host/isp116x-hcd.c
++++ b/drivers/usb/host/isp116x-hcd.c
+@@ -1487,7 +1487,7 @@ static int isp116x_bus_resume(struct usb_hcd *hcd)
+ 	spin_unlock_irq(&isp116x->lock);
+ 
+ 	hcd->state = HC_STATE_RESUMING;
+-	msleep(20);
++	msleep(USB_RESUME_TIMEOUT);
+ 
+ 	/* Go operational */
+ 	spin_lock_irq(&isp116x->lock);
+diff --git a/drivers/usb/host/oxu210hp-hcd.c b/drivers/usb/host/oxu210hp-hcd.c
+index 036924e640f5..3fc560c30d08 100644
+--- a/drivers/usb/host/oxu210hp-hcd.c
++++ b/drivers/usb/host/oxu210hp-hcd.c
+@@ -2500,11 +2500,12 @@ static irqreturn_t oxu210_hcd_irq(struct usb_hcd *hcd)
+ 					|| oxu->reset_done[i] != 0)
+ 				continue;
+ 
+-			/* start 20 msec resume signaling from this port,
+-			 * and make hub_wq collect PORT_STAT_C_SUSPEND to
++			/* start USB_RESUME_TIMEOUT resume signaling from this
++			 * port, and make hub_wq collect PORT_STAT_C_SUSPEND to
+ 			 * stop that signaling.
+ 			 */
+-			oxu->reset_done[i] = jiffies + msecs_to_jiffies(20);
++			oxu->reset_done[i] = jiffies +
++				msecs_to_jiffies(USB_RESUME_TIMEOUT);
+ 			oxu_dbg(oxu, "port %d remote wakeup\n", i + 1);
+ 			mod_timer(&hcd->rh_timer, oxu->reset_done[i]);
+ 		}
+diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c
+index c4bcfaedeec9..2dcf197d304c 100644
+--- a/drivers/usb/host/r8a66597-hcd.c
++++ b/drivers/usb/host/r8a66597-hcd.c
+@@ -2300,7 +2300,7 @@ static int r8a66597_bus_resume(struct usb_hcd *hcd)
+ 		rh->port &= ~USB_PORT_STAT_SUSPEND;
+ 		rh->port |= USB_PORT_STAT_C_SUSPEND << 16;
+ 		r8a66597_mdfy(r8a66597, RESUME, RESUME | UACT, dvstctr_reg);
+-		msleep(50);
++		msleep(USB_RESUME_TIMEOUT);
+ 		r8a66597_mdfy(r8a66597, UACT, RESUME | UACT, dvstctr_reg);
+ 	}
+ 
+diff --git a/drivers/usb/host/sl811-hcd.c b/drivers/usb/host/sl811-hcd.c
+index 25fb1da8d3d7..573355498193 100644
+--- a/drivers/usb/host/sl811-hcd.c
++++ b/drivers/usb/host/sl811-hcd.c
+@@ -1259,7 +1259,7 @@ sl811h_hub_control(
+ 			sl811_write(sl811, SL11H_CTLREG1, sl811->ctrl1);
+ 
+ 			mod_timer(&sl811->timer, jiffies
+-					+ msecs_to_jiffies(20));
++					+ msecs_to_jiffies(USB_RESUME_TIMEOUT));
+ 			break;
+ 		case USB_PORT_FEAT_POWER:
+ 			port_power(sl811, 0);
+diff --git a/drivers/usb/host/uhci-hub.c b/drivers/usb/host/uhci-hub.c
+index 93e17b12fb33..98c66d88ebde 100644
+--- a/drivers/usb/host/uhci-hub.c
++++ b/drivers/usb/host/uhci-hub.c
+@@ -165,7 +165,7 @@ static void uhci_check_ports(struct uhci_hcd *uhci)
+ 				/* Port received a wakeup request */
+ 				set_bit(port, &uhci->resuming_ports);
+ 				uhci->ports_timeout = jiffies +
+-						msecs_to_jiffies(25);
++					msecs_to_jiffies(USB_RESUME_TIMEOUT);
+ 				usb_hcd_start_port_resume(
+ 						&uhci_to_hcd(uhci)->self, port);
+ 
+@@ -337,7 +337,8 @@ static int uhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
+ 			uhci_finish_suspend(uhci, port, port_addr);
+ 
+ 			/* USB v2.0 7.1.7.5 */
+-			uhci->ports_timeout = jiffies + msecs_to_jiffies(50);
++			uhci->ports_timeout = jiffies +
++				msecs_to_jiffies(USB_RESUME_TIMEOUT);
+ 			break;
+ 		case USB_PORT_FEAT_POWER:
+ 			/* UHCI has no power switching */
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index 2a924d500d8a..7dd25cedef94 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -1574,7 +1574,7 @@ static void handle_port_status(struct xhci_hcd *xhci,
+ 		} else {
+ 			xhci_dbg(xhci, "resume HS port %d\n", port_id);
+ 			bus_state->resume_done[faked_port_index] = jiffies +
+-				msecs_to_jiffies(20);
++				msecs_to_jiffies(USB_RESUME_TIMEOUT);
+ 			set_bit(faked_port_index, &bus_state->resuming_ports);
+ 			mod_timer(&hcd->rh_timer,
+ 				  bus_state->resume_done[faked_port_index]);
+diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
+index 34cce3e38c49..fa5185868c05 100644
+--- a/drivers/usb/musb/musb_core.c
++++ b/drivers/usb/musb/musb_core.c
+@@ -99,6 +99,7 @@
+ #include <linux/platform_device.h>
+ #include <linux/io.h>
+ #include <linux/dma-mapping.h>
++#include <linux/usb.h>
+ 
+ #include "musb_core.h"
+ 
+@@ -562,7 +563,7 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
+ 						(USB_PORT_STAT_C_SUSPEND << 16)
+ 						| MUSB_PORT_STAT_RESUME;
+ 				musb->rh_timer = jiffies
+-						 + msecs_to_jiffies(20);
++					+ msecs_to_jiffies(USB_RESUME_TIMEOUT);
+ 				musb->need_finish_resume = 1;
+ 
+ 				musb->xceiv->otg->state = OTG_STATE_A_HOST;
+@@ -1596,16 +1597,30 @@ irqreturn_t musb_interrupt(struct musb *musb)
+ 		is_host_active(musb) ? "host" : "peripheral",
+ 		musb->int_usb, musb->int_tx, musb->int_rx);
+ 
+-	/* the core can interrupt us for multiple reasons; docs have
+-	 * a generic interrupt flowchart to follow
++	/**
++	 * According to Mentor Graphics' documentation, flowchart on page 98,
++	 * IRQ should be handled as follows:
++	 *
++	 * . Resume IRQ
++	 * . Session Request IRQ
++	 * . VBUS Error IRQ
++	 * . Suspend IRQ
++	 * . Connect IRQ
++	 * . Disconnect IRQ
++	 * . Reset/Babble IRQ
++	 * . SOF IRQ (we're not using this one)
++	 * . Endpoint 0 IRQ
++	 * . TX Endpoints
++	 * . RX Endpoints
++	 *
++	 * We will be following that flowchart in order to avoid any problems
++	 * that might arise with internal Finite State Machine.
+ 	 */
++
+ 	if (musb->int_usb)
+ 		retval |= musb_stage0_irq(musb, musb->int_usb,
+ 				devctl);
+ 
+-	/* "stage 1" is handling endpoint irqs */
+-
+-	/* handle endpoint 0 first */
+ 	if (musb->int_tx & 1) {
+ 		if (is_host_active(musb))
+ 			retval |= musb_h_ep0_irq(musb);
+@@ -1613,37 +1628,31 @@ irqreturn_t musb_interrupt(struct musb *musb)
+ 			retval |= musb_g_ep0_irq(musb);
+ 	}
+ 
+-	/* RX on endpoints 1-15 */
+-	reg = musb->int_rx >> 1;
++	reg = musb->int_tx >> 1;
+ 	ep_num = 1;
+ 	while (reg) {
+ 		if (reg & 1) {
+-			/* musb_ep_select(musb->mregs, ep_num); */
+-			/* REVISIT just retval = ep->rx_irq(...) */
+ 			retval = IRQ_HANDLED;
+ 			if (is_host_active(musb))
+-				musb_host_rx(musb, ep_num);
++				musb_host_tx(musb, ep_num);
+ 			else
+-				musb_g_rx(musb, ep_num);
++				musb_g_tx(musb, ep_num);
+ 		}
+-
+ 		reg >>= 1;
+ 		ep_num++;
+ 	}
+ 
+-	/* TX on endpoints 1-15 */
+-	reg = musb->int_tx >> 1;
++	reg = musb->int_rx >> 1;
+ 	ep_num = 1;
+ 	while (reg) {
+ 		if (reg & 1) {
+-			/* musb_ep_select(musb->mregs, ep_num); */
+-			/* REVISIT just retval |= ep->tx_irq(...) */
+ 			retval = IRQ_HANDLED;
+ 			if (is_host_active(musb))
+-				musb_host_tx(musb, ep_num);
++				musb_host_rx(musb, ep_num);
+ 			else
+-				musb_g_tx(musb, ep_num);
++				musb_g_rx(musb, ep_num);
+ 		}
++
+ 		reg >>= 1;
+ 		ep_num++;
+ 	}
+@@ -2460,7 +2469,7 @@ static int musb_resume(struct device *dev)
+ 	if (musb->need_finish_resume) {
+ 		musb->need_finish_resume = 0;
+ 		schedule_delayed_work(&musb->finish_resume_work,
+-				      msecs_to_jiffies(20));
++				      msecs_to_jiffies(USB_RESUME_TIMEOUT));
+ 	}
+ 
+ 	/*
+diff --git a/drivers/usb/musb/musb_virthub.c b/drivers/usb/musb/musb_virthub.c
+index b072420e44f5..a9c47315f06b 100644
+--- a/drivers/usb/musb/musb_virthub.c
++++ b/drivers/usb/musb/musb_virthub.c
+@@ -137,7 +137,7 @@ void musb_port_suspend(struct musb *musb, bool do_suspend)
+ 		/* later, GetPortStatus will stop RESUME signaling */
+ 		musb->port1_status |= MUSB_PORT_STAT_RESUME;
+ 		schedule_delayed_work(&musb->finish_resume_work,
+-				      msecs_to_jiffies(20));
++				      msecs_to_jiffies(USB_RESUME_TIMEOUT));
+ 	}
+ }
+ 
+diff --git a/drivers/usb/phy/phy.c b/drivers/usb/phy/phy.c
+index 2f9735b35338..d1cd6b50f520 100644
+--- a/drivers/usb/phy/phy.c
++++ b/drivers/usb/phy/phy.c
+@@ -81,7 +81,9 @@ static void devm_usb_phy_release(struct device *dev, void *res)
+ 
+ static int devm_usb_phy_match(struct device *dev, void *res, void *match_data)
+ {
+-	return res == match_data;
++	struct usb_phy **phy = res;
++
++	return *phy == match_data;
+ }
+ 
+ /**
+diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
+index 995986b8e36b..d925f55e4857 100644
+--- a/fs/binfmt_elf.c
++++ b/fs/binfmt_elf.c
+@@ -862,6 +862,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
+ 	    i < loc->elf_ex.e_phnum; i++, elf_ppnt++) {
+ 		int elf_prot = 0, elf_flags;
+ 		unsigned long k, vaddr;
++		unsigned long total_size = 0;
+ 
+ 		if (elf_ppnt->p_type != PT_LOAD)
+ 			continue;
+@@ -924,10 +925,16 @@ static int load_elf_binary(struct linux_binprm *bprm)
+ #else
+ 			load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
+ #endif
++			total_size = total_mapping_size(elf_phdata,
++							loc->elf_ex.e_phnum);
++			if (!total_size) {
++				error = -EINVAL;
++				goto out_free_dentry;
++			}
+ 		}
+ 
+ 		error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
+-				elf_prot, elf_flags, 0);
++				elf_prot, elf_flags, total_size);
+ 		if (BAD_ADDR(error)) {
+ 			retval = IS_ERR((void *)error) ?
+ 				PTR_ERR((void*)error) : -EINVAL;
+diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
+index a684086c3c81..4623a55d9c5b 100644
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -6967,12 +6967,11 @@ static int __btrfs_free_reserved_extent(struct btrfs_root *root,
+ 		return -ENOSPC;
+ 	}
+ 
+-	if (btrfs_test_opt(root, DISCARD))
+-		ret = btrfs_discard_extent(root, start, len, NULL);
+-
+ 	if (pin)
+ 		pin_down_extent(root, cache, start, len, 1);
+ 	else {
++		if (btrfs_test_opt(root, DISCARD))
++			ret = btrfs_discard_extent(root, start, len, NULL);
+ 		btrfs_add_free_space(cache, start, len);
+ 		btrfs_update_reserved_bytes(cache, len, RESERVE_FREE, delalloc);
+ 	}
+diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
+index d49fe8a0f6b5..09a566acc10f 100644
+--- a/fs/btrfs/ioctl.c
++++ b/fs/btrfs/ioctl.c
+@@ -2897,6 +2897,9 @@ static int btrfs_extent_same(struct inode *src, u64 loff, u64 len,
+ 	if (src == dst)
+ 		return -EINVAL;
+ 
++	if (len == 0)
++		return 0;
++
+ 	btrfs_double_lock(src, loff, dst, dst_loff, len);
+ 
+ 	ret = extent_same_check_offsets(src, loff, len);
+@@ -3626,6 +3629,11 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
+ 	if (off + len == src->i_size)
+ 		len = ALIGN(src->i_size, bs) - off;
+ 
++	if (len == 0) {
++		ret = 0;
++		goto out_unlock;
++	}
++
+ 	/* verify the end result is block aligned */
+ 	if (!IS_ALIGNED(off, bs) || !IS_ALIGNED(off + len, bs) ||
+ 	    !IS_ALIGNED(destoff, bs))
+diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c
+index 47b19465f0dc..01bad724b5f7 100644
+--- a/fs/btrfs/xattr.c
++++ b/fs/btrfs/xattr.c
+@@ -360,22 +360,42 @@ const struct xattr_handler *btrfs_xattr_handlers[] = {
+ /*
+  * Check if the attribute is in a supported namespace.
+  *
+- * This applied after the check for the synthetic attributes in the system
++ * This is applied after the check for the synthetic attributes in the system
+  * namespace.
+  */
+-static bool btrfs_is_valid_xattr(const char *name)
++static int btrfs_is_valid_xattr(const char *name)
+ {
+-	return !strncmp(name, XATTR_SECURITY_PREFIX,
+-			XATTR_SECURITY_PREFIX_LEN) ||
+-	       !strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN) ||
+-	       !strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) ||
+-	       !strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN) ||
+-		!strncmp(name, XATTR_BTRFS_PREFIX, XATTR_BTRFS_PREFIX_LEN);
++	int len = strlen(name);
++	int prefixlen = 0;
++
++	if (!strncmp(name, XATTR_SECURITY_PREFIX,
++			XATTR_SECURITY_PREFIX_LEN))
++		prefixlen = XATTR_SECURITY_PREFIX_LEN;
++	else if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
++		prefixlen = XATTR_SYSTEM_PREFIX_LEN;
++	else if (!strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN))
++		prefixlen = XATTR_TRUSTED_PREFIX_LEN;
++	else if (!strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN))
++		prefixlen = XATTR_USER_PREFIX_LEN;
++	else if (!strncmp(name, XATTR_BTRFS_PREFIX, XATTR_BTRFS_PREFIX_LEN))
++		prefixlen = XATTR_BTRFS_PREFIX_LEN;
++	else
++		return -EOPNOTSUPP;
++
++	/*
++	 * The name cannot consist of just prefix
++	 */
++	if (len <= prefixlen)
++		return -EINVAL;
++
++	return 0;
+ }
+ 
+ ssize_t btrfs_getxattr(struct dentry *dentry, const char *name,
+ 		       void *buffer, size_t size)
+ {
++	int ret;
++
+ 	/*
+ 	 * If this is a request for a synthetic attribute in the system.*
+ 	 * namespace use the generic infrastructure to resolve a handler
+@@ -384,8 +404,9 @@ ssize_t btrfs_getxattr(struct dentry *dentry, const char *name,
+ 	if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
+ 		return generic_getxattr(dentry, name, buffer, size);
+ 
+-	if (!btrfs_is_valid_xattr(name))
+-		return -EOPNOTSUPP;
++	ret = btrfs_is_valid_xattr(name);
++	if (ret)
++		return ret;
+ 	return __btrfs_getxattr(dentry->d_inode, name, buffer, size);
+ }
+ 
+@@ -393,6 +414,7 @@ int btrfs_setxattr(struct dentry *dentry, const char *name, const void *value,
+ 		   size_t size, int flags)
+ {
+ 	struct btrfs_root *root = BTRFS_I(dentry->d_inode)->root;
++	int ret;
+ 
+ 	/*
+ 	 * The permission on security.* and system.* is not checked
+@@ -409,8 +431,9 @@ int btrfs_setxattr(struct dentry *dentry, const char *name, const void *value,
+ 	if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
+ 		return generic_setxattr(dentry, name, value, size, flags);
+ 
+-	if (!btrfs_is_valid_xattr(name))
+-		return -EOPNOTSUPP;
++	ret = btrfs_is_valid_xattr(name);
++	if (ret)
++		return ret;
+ 
+ 	if (!strncmp(name, XATTR_BTRFS_PREFIX, XATTR_BTRFS_PREFIX_LEN))
+ 		return btrfs_set_prop(dentry->d_inode, name,
+@@ -426,6 +449,7 @@ int btrfs_setxattr(struct dentry *dentry, const char *name, const void *value,
+ int btrfs_removexattr(struct dentry *dentry, const char *name)
+ {
+ 	struct btrfs_root *root = BTRFS_I(dentry->d_inode)->root;
++	int ret;
+ 
+ 	/*
+ 	 * The permission on security.* and system.* is not checked
+@@ -442,8 +466,9 @@ int btrfs_removexattr(struct dentry *dentry, const char *name)
+ 	if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
+ 		return generic_removexattr(dentry, name);
+ 
+-	if (!btrfs_is_valid_xattr(name))
+-		return -EOPNOTSUPP;
++	ret = btrfs_is_valid_xattr(name);
++	if (ret)
++		return ret;
+ 
+ 	if (!strncmp(name, XATTR_BTRFS_PREFIX, XATTR_BTRFS_PREFIX_LEN))
+ 		return btrfs_set_prop(dentry->d_inode, name,
+diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
+index 2291923dae4e..5e8ae7811220 100644
+--- a/fs/ext4/namei.c
++++ b/fs/ext4/namei.c
+@@ -1865,7 +1865,7 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
+ 			  struct inode *inode)
+ {
+ 	struct inode *dir = dentry->d_parent->d_inode;
+-	struct buffer_head *bh;
++	struct buffer_head *bh = NULL;
+ 	struct ext4_dir_entry_2 *de;
+ 	struct ext4_dir_entry_tail *t;
+ 	struct super_block *sb;
+@@ -1889,14 +1889,14 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
+ 			return retval;
+ 		if (retval == 1) {
+ 			retval = 0;
+-			return retval;
++			goto out;
+ 		}
+ 	}
+ 
+ 	if (is_dx(dir)) {
+ 		retval = ext4_dx_add_entry(handle, dentry, inode);
+ 		if (!retval || (retval != ERR_BAD_DX_DIR))
+-			return retval;
++			goto out;
+ 		ext4_clear_inode_flag(dir, EXT4_INODE_INDEX);
+ 		dx_fallback++;
+ 		ext4_mark_inode_dirty(handle, dir);
+@@ -1908,14 +1908,15 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
+ 			return PTR_ERR(bh);
+ 
+ 		retval = add_dirent_to_buf(handle, dentry, inode, NULL, bh);
+-		if (retval != -ENOSPC) {
+-			brelse(bh);
+-			return retval;
+-		}
++		if (retval != -ENOSPC)
++			goto out;
+ 
+ 		if (blocks == 1 && !dx_fallback &&
+-		    EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_DIR_INDEX))
+-			return make_indexed_dir(handle, dentry, inode, bh);
++		    EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_DIR_INDEX)) {
++			retval = make_indexed_dir(handle, dentry, inode, bh);
++			bh = NULL; /* make_indexed_dir releases bh */
++			goto out;
++		}
+ 		brelse(bh);
+ 	}
+ 	bh = ext4_append(handle, dir, &block);
+@@ -1931,6 +1932,7 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
+ 	}
+ 
+ 	retval = add_dirent_to_buf(handle, dentry, inode, de, bh);
++out:
+ 	brelse(bh);
+ 	if (retval == 0)
+ 		ext4_set_inode_state(inode, EXT4_STATE_NEWENTRY);
+diff --git a/fs/lockd/svcsubs.c b/fs/lockd/svcsubs.c
+index d12ff4e2dbe7..251a1f04be2f 100644
+--- a/fs/lockd/svcsubs.c
++++ b/fs/lockd/svcsubs.c
+@@ -31,7 +31,7 @@
+ static struct hlist_head	nlm_files[FILE_NRHASH];
+ static DEFINE_MUTEX(nlm_file_mutex);
+ 
+-#ifdef NFSD_DEBUG
++#ifdef CONFIG_SUNRPC_DEBUG
+ static inline void nlm_debug_print_fh(char *msg, struct nfs_fh *f)
+ {
+ 	u32 *fhp = (u32*)f->data;
+diff --git a/fs/namei.c b/fs/namei.c
+index bc35b02883bb..3abfbda53574 100644
+--- a/fs/namei.c
++++ b/fs/namei.c
+@@ -1585,7 +1585,8 @@ static inline int walk_component(struct nameidata *nd, struct path *path,
+ 
+ 	if (should_follow_link(path->dentry, follow)) {
+ 		if (nd->flags & LOOKUP_RCU) {
+-			if (unlikely(unlazy_walk(nd, path->dentry))) {
++			if (unlikely(nd->path.mnt != path->mnt ||
++				     unlazy_walk(nd, path->dentry))) {
+ 				err = -ECHILD;
+ 				goto out_err;
+ 			}
+@@ -3028,7 +3029,8 @@ finish_lookup:
+ 
+ 	if (should_follow_link(path->dentry, !symlink_ok)) {
+ 		if (nd->flags & LOOKUP_RCU) {
+-			if (unlikely(unlazy_walk(nd, path->dentry))) {
++			if (unlikely(nd->path.mnt != path->mnt ||
++				     unlazy_walk(nd, path->dentry))) {
+ 				error = -ECHILD;
+ 				goto out;
+ 			}
+diff --git a/fs/namespace.c b/fs/namespace.c
+index cd1e9681a0cf..07d0562290a5 100644
+--- a/fs/namespace.c
++++ b/fs/namespace.c
+@@ -1323,14 +1323,15 @@ static inline void namespace_lock(void)
+ 	down_write(&namespace_sem);
+ }
+ 
++enum umount_tree_flags {
++	UMOUNT_SYNC = 1,
++	UMOUNT_PROPAGATE = 2,
++};
+ /*
+  * mount_lock must be held
+  * namespace_sem must be held for write
+- * how = 0 => just this tree, don't propagate
+- * how = 1 => propagate; we know that nobody else has reference to any victims
+- * how = 2 => lazy umount
+  */
+-void umount_tree(struct mount *mnt, int how)
++static void umount_tree(struct mount *mnt, enum umount_tree_flags how)
+ {
+ 	HLIST_HEAD(tmp_list);
+ 	struct mount *p;
+@@ -1344,7 +1345,7 @@ void umount_tree(struct mount *mnt, int how)
+ 	hlist_for_each_entry(p, &tmp_list, mnt_hash)
+ 		list_del_init(&p->mnt_child);
+ 
+-	if (how)
++	if (how & UMOUNT_PROPAGATE)
+ 		propagate_umount(&tmp_list);
+ 
+ 	hlist_for_each_entry(p, &tmp_list, mnt_hash) {
+@@ -1352,7 +1353,7 @@ void umount_tree(struct mount *mnt, int how)
+ 		list_del_init(&p->mnt_list);
+ 		__touch_mnt_namespace(p->mnt_ns);
+ 		p->mnt_ns = NULL;
+-		if (how < 2)
++		if (how & UMOUNT_SYNC)
+ 			p->mnt.mnt_flags |= MNT_SYNC_UMOUNT;
+ 		if (mnt_has_parent(p)) {
+ 			hlist_del_init(&p->mnt_mp_list);
+@@ -1457,14 +1458,14 @@ static int do_umount(struct mount *mnt, int flags)
+ 
+ 	if (flags & MNT_DETACH) {
+ 		if (!list_empty(&mnt->mnt_list))
+-			umount_tree(mnt, 2);
++			umount_tree(mnt, UMOUNT_PROPAGATE);
+ 		retval = 0;
+ 	} else {
+ 		shrink_submounts(mnt);
+ 		retval = -EBUSY;
+ 		if (!propagate_mount_busy(mnt, 2)) {
+ 			if (!list_empty(&mnt->mnt_list))
+-				umount_tree(mnt, 1);
++				umount_tree(mnt, UMOUNT_PROPAGATE|UMOUNT_SYNC);
+ 			retval = 0;
+ 		}
+ 	}
+@@ -1496,7 +1497,7 @@ void __detach_mounts(struct dentry *dentry)
+ 	lock_mount_hash();
+ 	while (!hlist_empty(&mp->m_list)) {
+ 		mnt = hlist_entry(mp->m_list.first, struct mount, mnt_mp_list);
+-		umount_tree(mnt, 2);
++		umount_tree(mnt, 0);
+ 	}
+ 	unlock_mount_hash();
+ 	put_mountpoint(mp);
+@@ -1658,7 +1659,7 @@ struct mount *copy_tree(struct mount *mnt, struct dentry *dentry,
+ out:
+ 	if (res) {
+ 		lock_mount_hash();
+-		umount_tree(res, 0);
++		umount_tree(res, UMOUNT_SYNC);
+ 		unlock_mount_hash();
+ 	}
+ 	return q;
+@@ -1682,7 +1683,7 @@ void drop_collected_mounts(struct vfsmount *mnt)
+ {
+ 	namespace_lock();
+ 	lock_mount_hash();
+-	umount_tree(real_mount(mnt), 0);
++	umount_tree(real_mount(mnt), UMOUNT_SYNC);
+ 	unlock_mount_hash();
+ 	namespace_unlock();
+ }
+@@ -1865,7 +1866,7 @@ static int attach_recursive_mnt(struct mount *source_mnt,
+  out_cleanup_ids:
+ 	while (!hlist_empty(&tree_list)) {
+ 		child = hlist_entry(tree_list.first, struct mount, mnt_hash);
+-		umount_tree(child, 0);
++		umount_tree(child, UMOUNT_SYNC);
+ 	}
+ 	unlock_mount_hash();
+ 	cleanup_group_ids(source_mnt, NULL);
+@@ -2045,7 +2046,7 @@ static int do_loopback(struct path *path, const char *old_name,
+ 	err = graft_tree(mnt, parent, mp);
+ 	if (err) {
+ 		lock_mount_hash();
+-		umount_tree(mnt, 0);
++		umount_tree(mnt, UMOUNT_SYNC);
+ 		unlock_mount_hash();
+ 	}
+ out2:
+@@ -2416,7 +2417,7 @@ void mark_mounts_for_expiry(struct list_head *mounts)
+ 	while (!list_empty(&graveyard)) {
+ 		mnt = list_first_entry(&graveyard, struct mount, mnt_expire);
+ 		touch_mnt_namespace(mnt->mnt_ns);
+-		umount_tree(mnt, 1);
++		umount_tree(mnt, UMOUNT_PROPAGATE|UMOUNT_SYNC);
+ 	}
+ 	unlock_mount_hash();
+ 	namespace_unlock();
+@@ -2487,7 +2488,7 @@ static void shrink_submounts(struct mount *mnt)
+ 			m = list_first_entry(&graveyard, struct mount,
+ 						mnt_expire);
+ 			touch_mnt_namespace(m->mnt_ns);
+-			umount_tree(m, 1);
++			umount_tree(m, UMOUNT_PROPAGATE|UMOUNT_SYNC);
+ 		}
+ 	}
+ }
+diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
+index cb4376b78ed9..5303646f1e7a 100644
+--- a/fs/nfs/nfs4xdr.c
++++ b/fs/nfs/nfs4xdr.c
+@@ -7338,6 +7338,11 @@ nfs4_stat_to_errno(int stat)
+ 	.p_name   = #proc,					\
+ }
+ 
++#define STUB(proc)		\
++[NFSPROC4_CLNT_##proc] = {	\
++	.p_name = #proc,	\
++}
++
+ struct rpc_procinfo	nfs4_procedures[] = {
+ 	PROC(READ,		enc_read,		dec_read),
+ 	PROC(WRITE,		enc_write,		dec_write),
+@@ -7390,6 +7395,7 @@ struct rpc_procinfo	nfs4_procedures[] = {
+ 	PROC(SECINFO_NO_NAME,	enc_secinfo_no_name,	dec_secinfo_no_name),
+ 	PROC(TEST_STATEID,	enc_test_stateid,	dec_test_stateid),
+ 	PROC(FREE_STATEID,	enc_free_stateid,	dec_free_stateid),
++	STUB(GETDEVICELIST),
+ 	PROC(BIND_CONN_TO_SESSION,
+ 			enc_bind_conn_to_session, dec_bind_conn_to_session),
+ 	PROC(DESTROY_CLIENTID,	enc_destroy_clientid,	dec_destroy_clientid),
+diff --git a/fs/nfs/read.c b/fs/nfs/read.c
+index c91a4799c562..beff2769c5c5 100644
+--- a/fs/nfs/read.c
++++ b/fs/nfs/read.c
+@@ -269,7 +269,7 @@ int nfs_readpage(struct file *file, struct page *page)
+ 	dprintk("NFS: nfs_readpage (%p %ld@%lu)\n",
+ 		page, PAGE_CACHE_SIZE, page_file_index(page));
+ 	nfs_inc_stats(inode, NFSIOS_VFSREADPAGE);
+-	nfs_inc_stats(inode, NFSIOS_READPAGES);
++	nfs_add_stats(inode, NFSIOS_READPAGES, 1);
+ 
+ 	/*
+ 	 * Try to flush any pending writes to the file..
+diff --git a/fs/nfs/write.c b/fs/nfs/write.c
+index af3af685a9e3..d489ff3f438f 100644
+--- a/fs/nfs/write.c
++++ b/fs/nfs/write.c
+@@ -575,7 +575,7 @@ static int nfs_do_writepage(struct page *page, struct writeback_control *wbc, st
+ 	int ret;
+ 
+ 	nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE);
+-	nfs_inc_stats(inode, NFSIOS_WRITEPAGES);
++	nfs_add_stats(inode, NFSIOS_WRITEPAGES, 1);
+ 
+ 	nfs_pageio_cond_complete(pgio, page_file_index(page));
+ 	ret = nfs_page_async_flush(pgio, page, wbc->sync_mode == WB_SYNC_NONE);
+diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
+index ac71d13c69ef..2cd2d64ff1e8 100644
+--- a/fs/nfsd/nfs4proc.c
++++ b/fs/nfsd/nfs4proc.c
+@@ -1028,6 +1028,8 @@ nfsd4_fallocate(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+ 		dprintk("NFSD: nfsd4_fallocate: couldn't process stateid!\n");
+ 		return status;
+ 	}
++	if (!file)
++		return nfserr_bad_stateid;
+ 
+ 	status = nfsd4_vfs_fallocate(rqstp, &cstate->current_fh, file,
+ 				     fallocate->falloc_offset,
+@@ -1067,6 +1069,8 @@ nfsd4_seek(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+ 		dprintk("NFSD: nfsd4_seek: couldn't process stateid!\n");
+ 		return status;
+ 	}
++	if (!file)
++		return nfserr_bad_stateid;
+ 
+ 	switch (seek->seek_whence) {
+ 	case NFS4_CONTENT_DATA:
+diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
+index 7cfb905a1e90..ddacc436a77a 100644
+--- a/fs/nfsd/nfs4state.c
++++ b/fs/nfsd/nfs4state.c
+@@ -1162,7 +1162,7 @@ hash_sessionid(struct nfs4_sessionid *sessionid)
+ 	return sid->sequence % SESSION_HASH_SIZE;
+ }
+ 
+-#ifdef NFSD_DEBUG
++#ifdef CONFIG_SUNRPC_DEBUG
+ static inline void
+ dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid)
+ {
+diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
+index 15f7b73e0c0f..281d12640c0b 100644
+--- a/fs/nfsd/nfs4xdr.c
++++ b/fs/nfsd/nfs4xdr.c
+@@ -3246,6 +3246,7 @@ nfsd4_encode_read(struct nfsd4_compoundres *resp, __be32 nfserr,
+ 	unsigned long maxcount;
+ 	struct xdr_stream *xdr = &resp->xdr;
+ 	struct file *file = read->rd_filp;
++	struct svc_fh *fhp = read->rd_fhp;
+ 	int starting_len = xdr->buf->len;
+ 	struct raparms *ra;
+ 	__be32 *p;
+@@ -3269,12 +3270,15 @@ nfsd4_encode_read(struct nfsd4_compoundres *resp, __be32 nfserr,
+ 	maxcount = min_t(unsigned long, maxcount, (xdr->buf->buflen - xdr->buf->len));
+ 	maxcount = min_t(unsigned long, maxcount, read->rd_length);
+ 
+-	if (!read->rd_filp) {
++	if (read->rd_filp)
++		err = nfsd_permission(resp->rqstp, fhp->fh_export,
++				fhp->fh_dentry,
++				NFSD_MAY_READ|NFSD_MAY_OWNER_OVERRIDE);
++	else
+ 		err = nfsd_get_tmp_read_open(resp->rqstp, read->rd_fhp,
+ 						&file, &ra);
+-		if (err)
+-			goto err_truncate;
+-	}
++	if (err)
++		goto err_truncate;
+ 
+ 	if (file->f_op->splice_read && test_bit(RQ_SPLICE_OK, &resp->rqstp->rq_flags))
+ 		err = nfsd4_encode_splice_read(resp, read, file, maxcount);
+diff --git a/fs/nfsd/nfsd.h b/fs/nfsd/nfsd.h
+index 33a46a8dfaf7..403d8e14fcaf 100644
+--- a/fs/nfsd/nfsd.h
++++ b/fs/nfsd/nfsd.h
+@@ -24,7 +24,7 @@
+ #include "export.h"
+ 
+ #undef ifdebug
+-#ifdef NFSD_DEBUG
++#ifdef CONFIG_SUNRPC_DEBUG
+ # define ifdebug(flag)		if (nfsd_debug & NFSDDBG_##flag)
+ #else
+ # define ifdebug(flag)		if (0)
+diff --git a/fs/open.c b/fs/open.c
+index 813be037b412..d45a7b919504 100644
+--- a/fs/open.c
++++ b/fs/open.c
+@@ -570,6 +570,7 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
+ 	uid = make_kuid(current_user_ns(), user);
+ 	gid = make_kgid(current_user_ns(), group);
+ 
++retry_deleg:
+ 	newattrs.ia_valid =  ATTR_CTIME;
+ 	if (user != (uid_t) -1) {
+ 		if (!uid_valid(uid))
+@@ -586,7 +587,6 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
+ 	if (!S_ISDIR(inode->i_mode))
+ 		newattrs.ia_valid |=
+ 			ATTR_KILL_SUID | ATTR_KILL_SGID | ATTR_KILL_PRIV;
+-retry_deleg:
+ 	mutex_lock(&inode->i_mutex);
+ 	error = security_path_chown(path, uid, gid);
+ 	if (!error)
+diff --git a/fs/pnode.h b/fs/pnode.h
+index 4a246358b031..16afc3d6d2f2 100644
+--- a/fs/pnode.h
++++ b/fs/pnode.h
+@@ -47,7 +47,6 @@ int get_dominating_id(struct mount *mnt, const struct path *root);
+ unsigned int mnt_get_count(struct mount *mnt);
+ void mnt_set_mountpoint(struct mount *, struct mountpoint *,
+ 			struct mount *);
+-void umount_tree(struct mount *, int);
+ struct mount *copy_tree(struct mount *, struct dentry *, int);
+ bool is_path_reachable(struct mount *, struct dentry *,
+ 			 const struct path *root);
+diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
+index bbef17368e49..9f2847cdf3ce 100644
+--- a/include/acpi/actypes.h
++++ b/include/acpi/actypes.h
+@@ -199,9 +199,29 @@ typedef int s32;
+ typedef s32 acpi_native_int;
+ 
+ typedef u32 acpi_size;
++
++#ifdef ACPI_32BIT_PHYSICAL_ADDRESS
++
++/*
++ * OSPMs can define this to shrink the size of the structures for 32-bit
++ * none PAE environment. ASL compiler may always define this to generate
++ * 32-bit OSPM compliant tables.
++ */
+ typedef u32 acpi_io_address;
+ typedef u32 acpi_physical_address;
+ 
++#else				/* ACPI_32BIT_PHYSICAL_ADDRESS */
++
++/*
++ * It is reported that, after some calculations, the physical addresses can
++ * wrap over the 32-bit boundary on 32-bit PAE environment.
++ * https://bugzilla.kernel.org/show_bug.cgi?id=87971
++ */
++typedef u64 acpi_io_address;
++typedef u64 acpi_physical_address;
++
++#endif				/* ACPI_32BIT_PHYSICAL_ADDRESS */
++
+ #define ACPI_MAX_PTR                    ACPI_UINT32_MAX
+ #define ACPI_SIZE_MAX                   ACPI_UINT32_MAX
+ 
+diff --git a/include/acpi/platform/acenv.h b/include/acpi/platform/acenv.h
+index 5f8cc1fa3278..9e1ed2e380b6 100644
+--- a/include/acpi/platform/acenv.h
++++ b/include/acpi/platform/acenv.h
+@@ -76,6 +76,7 @@
+ #define ACPI_LARGE_NAMESPACE_NODE
+ #define ACPI_DATA_TABLE_DISASSEMBLY
+ #define ACPI_SINGLE_THREADED
++#define ACPI_32BIT_PHYSICAL_ADDRESS
+ #endif
+ 
+ /* acpi_exec configuration. Multithreaded with full AML debugger */
+diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
+index ac4888dc86bc..36e0f219e507 100644
+--- a/include/kvm/arm_vgic.h
++++ b/include/kvm/arm_vgic.h
+@@ -113,6 +113,7 @@ struct vgic_ops {
+ 	void	(*sync_lr_elrsr)(struct kvm_vcpu *, int, struct vgic_lr);
+ 	u64	(*get_elrsr)(const struct kvm_vcpu *vcpu);
+ 	u64	(*get_eisr)(const struct kvm_vcpu *vcpu);
++	void	(*clear_eisr)(struct kvm_vcpu *vcpu);
+ 	u32	(*get_interrupt_status)(const struct kvm_vcpu *vcpu);
+ 	void	(*enable_underflow)(struct kvm_vcpu *vcpu);
+ 	void	(*disable_underflow)(struct kvm_vcpu *vcpu);
+diff --git a/include/linux/bpf.h b/include/linux/bpf.h
+index bbfceb756452..33b52fb0e20f 100644
+--- a/include/linux/bpf.h
++++ b/include/linux/bpf.h
+@@ -48,7 +48,7 @@ struct bpf_map *bpf_map_get(struct fd f);
+ 
+ /* function argument constraints */
+ enum bpf_arg_type {
+-	ARG_ANYTHING = 0,	/* any argument is ok */
++	ARG_DONTCARE = 0,	/* unused argument in helper function */
+ 
+ 	/* the following constraints used to prototype
+ 	 * bpf_map_lookup/update/delete_elem() functions
+@@ -62,6 +62,8 @@ enum bpf_arg_type {
+ 	 */
+ 	ARG_PTR_TO_STACK,	/* any pointer to eBPF program stack */
+ 	ARG_CONST_STACK_SIZE,	/* number of bytes accessed from stack */
++
++	ARG_ANYTHING,		/* any (initialized) argument is ok */
+ };
+ 
+ /* type of values returned from helper functions */
+diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
+index 85ab7d72b54c..5fcc6064247b 100644
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -762,6 +762,7 @@ bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
+ 
+ struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags,
+ 			    int node);
++struct sk_buff *__build_skb(void *data, unsigned int frag_size);
+ struct sk_buff *build_skb(void *data, unsigned int frag_size);
+ static inline struct sk_buff *alloc_skb(unsigned int size,
+ 					gfp_t priority)
+@@ -2998,6 +2999,18 @@ static inline bool __skb_checksum_validate_needed(struct sk_buff *skb,
+  */
+ #define CHECKSUM_BREAK 76
+ 
++/* Unset checksum-complete
++ *
++ * Unset checksum complete can be done when packet is being modified
++ * (uncompressed for instance) and checksum-complete value is
++ * invalidated.
++ */
++static inline void skb_checksum_complete_unset(struct sk_buff *skb)
++{
++	if (skb->ip_summed == CHECKSUM_COMPLETE)
++		skb->ip_summed = CHECKSUM_NONE;
++}
++
+ /* Validate (init) checksum based on checksum complete.
+  *
+  * Return values:
+diff --git a/include/linux/usb.h b/include/linux/usb.h
+index 058a7698d7e3..335b413e5980 100644
+--- a/include/linux/usb.h
++++ b/include/linux/usb.h
+@@ -205,6 +205,32 @@ void usb_put_intf(struct usb_interface *intf);
+ #define USB_MAXINTERFACES	32
+ #define USB_MAXIADS		(USB_MAXINTERFACES/2)
+ 
++/*
++ * USB Resume Timer: Every Host controller driver should drive the resume
++ * signalling on the bus for the amount of time defined by this macro.
++ *
++ * That way we will have a 'stable' behavior among all HCDs supported by Linux.
++ *
++ * Note that the USB Specification states we should drive resume for *at least*
++ * 20 ms, but it doesn't give an upper bound. This creates two possible
++ * situations which we want to avoid:
++ *
++ * (a) sometimes an msleep(20) might expire slightly before 20 ms, which causes
++ * us to fail USB Electrical Tests, thus failing Certification
++ *
++ * (b) Some (many) devices actually need more than 20 ms of resume signalling,
++ * and while we can argue that's against the USB Specification, we don't have
++ * control over which devices a certification laboratory will be using for
++ * certification. If CertLab uses a device which was tested against Windows and
++ * that happens to have relaxed resume signalling rules, we might fall into
++ * situations where we fail interoperability and electrical tests.
++ *
++ * In order to avoid both conditions, we're using a 40 ms resume timeout, which
++ * should cope with both LPJ calibration errors and devices not following every
++ * detail of the USB Specification.
++ */
++#define USB_RESUME_TIMEOUT	40 /* ms */
++
+ /**
+  * struct usb_interface_cache - long-term representation of a device interface
+  * @num_altsetting: number of altsettings defined.
+diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
+index 672150b6aaf5..985ca4c907fe 100644
+--- a/include/target/target_core_base.h
++++ b/include/target/target_core_base.h
+@@ -524,7 +524,7 @@ struct se_cmd {
+ 	sense_reason_t		(*execute_cmd)(struct se_cmd *);
+ 	sense_reason_t		(*execute_rw)(struct se_cmd *, struct scatterlist *,
+ 					      u32, enum dma_data_direction);
+-	sense_reason_t (*transport_complete_callback)(struct se_cmd *);
++	sense_reason_t (*transport_complete_callback)(struct se_cmd *, bool);
+ 
+ 	unsigned char		*t_task_cdb;
+ 	unsigned char		__t_task_cdb[TCM_MAX_COMMAND_SIZE];
+diff --git a/include/uapi/linux/nfsd/debug.h b/include/uapi/linux/nfsd/debug.h
+index 1fdc95bb2375..c021bbaf21b7 100644
+--- a/include/uapi/linux/nfsd/debug.h
++++ b/include/uapi/linux/nfsd/debug.h
+@@ -12,14 +12,6 @@
+ #include <linux/sunrpc/debug.h>
+ 
+ /*
+- * Enable debugging for nfsd.
+- * Requires RPC_DEBUG.
+- */
+-#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
+-# define NFSD_DEBUG		1
+-#endif
+-
+-/*
+  * knfsd debug flags
+  */
+ #define NFSDDBG_SOCK		0x0001
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index 36508e69e92a..5d8ea3d8a897 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -755,7 +755,7 @@ static int check_func_arg(struct verifier_env *env, u32 regno,
+ 	enum bpf_reg_type expected_type;
+ 	int err = 0;
+ 
+-	if (arg_type == ARG_ANYTHING)
++	if (arg_type == ARG_DONTCARE)
+ 		return 0;
+ 
+ 	if (reg->type == NOT_INIT) {
+@@ -763,6 +763,9 @@ static int check_func_arg(struct verifier_env *env, u32 regno,
+ 		return -EACCES;
+ 	}
+ 
++	if (arg_type == ARG_ANYTHING)
++		return 0;
++
+ 	if (arg_type == ARG_PTR_TO_STACK || arg_type == ARG_PTR_TO_MAP_KEY ||
+ 	    arg_type == ARG_PTR_TO_MAP_VALUE) {
+ 		expected_type = PTR_TO_STACK;
+diff --git a/kernel/ptrace.c b/kernel/ptrace.c
+index 1eb9d90c3af9..5009263eba97 100644
+--- a/kernel/ptrace.c
++++ b/kernel/ptrace.c
+@@ -697,6 +697,8 @@ static int ptrace_peek_siginfo(struct task_struct *child,
+ static int ptrace_resume(struct task_struct *child, long request,
+ 			 unsigned long data)
+ {
++	bool need_siglock;
++
+ 	if (!valid_signal(data))
+ 		return -EIO;
+ 
+@@ -724,8 +726,26 @@ static int ptrace_resume(struct task_struct *child, long request,
+ 		user_disable_single_step(child);
+ 	}
+ 
++	/*
++	 * Change ->exit_code and ->state under siglock to avoid the race
++	 * with wait_task_stopped() in between; a non-zero ->exit_code will
++	 * wrongly look like another report from tracee.
++	 *
++	 * Note that we need siglock even if ->exit_code == data and/or this
++	 * status was not reported yet, the new status must not be cleared by
++	 * wait_task_stopped() after resume.
++	 *
++	 * If data == 0 we do not care if wait_task_stopped() reports the old
++	 * status and clears the code too; this can't race with the tracee, it
++	 * takes siglock after resume.
++	 */
++	need_siglock = data && !thread_group_empty(current);
++	if (need_siglock)
++		spin_lock_irq(&child->sighand->siglock);
+ 	child->exit_code = data;
+ 	wake_up_state(child, __TASK_TRACED);
++	if (need_siglock)
++		spin_unlock_irq(&child->sighand->siglock);
+ 
+ 	return 0;
+ }
+diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
+index d2e151c83bd5..e896a58e18d8 100644
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -2681,7 +2681,7 @@ static DEFINE_PER_CPU(unsigned int, current_context);
+ 
+ static __always_inline int trace_recursive_lock(void)
+ {
+-	unsigned int val = this_cpu_read(current_context);
++	unsigned int val = __this_cpu_read(current_context);
+ 	int bit;
+ 
+ 	if (in_interrupt()) {
+@@ -2698,18 +2698,17 @@ static __always_inline int trace_recursive_lock(void)
+ 		return 1;
+ 
+ 	val |= (1 << bit);
+-	this_cpu_write(current_context, val);
++	__this_cpu_write(current_context, val);
+ 
+ 	return 0;
+ }
+ 
+ static __always_inline void trace_recursive_unlock(void)
+ {
+-	unsigned int val = this_cpu_read(current_context);
++	unsigned int val = __this_cpu_read(current_context);
+ 
+-	val--;
+-	val &= this_cpu_read(current_context);
+-	this_cpu_write(current_context, val);
++	val &= val & (val - 1);
++	__this_cpu_write(current_context, val);
+ }
+ 
+ #else
+diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
+index b03a0ea77b99..6f7e40d6fdec 100644
+--- a/kernel/trace/trace_events.c
++++ b/kernel/trace/trace_events.c
+@@ -565,6 +565,7 @@ static int __ftrace_set_clr_event(struct trace_array *tr, const char *match,
+ static int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set)
+ {
+ 	char *event = NULL, *sub = NULL, *match;
++	int ret;
+ 
+ 	/*
+ 	 * The buf format can be <subsystem>:<event-name>
+@@ -590,7 +591,13 @@ static int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set)
+ 			event = NULL;
+ 	}
+ 
+-	return __ftrace_set_clr_event(tr, match, sub, event, set);
++	ret = __ftrace_set_clr_event(tr, match, sub, event, set);
++
++	/* Put back the colon to allow this to be called again */
++	if (buf)
++		*(buf - 1) = ':';
++
++	return ret;
+ }
+ 
+ /**
+diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
+index ba476009e5de..224af0a51803 100644
+--- a/kernel/trace/trace_functions_graph.c
++++ b/kernel/trace/trace_functions_graph.c
+@@ -1309,15 +1309,19 @@ void graph_trace_open(struct trace_iterator *iter)
+ {
+ 	/* pid and depth on the last trace processed */
+ 	struct fgraph_data *data;
++	gfp_t gfpflags;
+ 	int cpu;
+ 
+ 	iter->private = NULL;
+ 
+-	data = kzalloc(sizeof(*data), GFP_KERNEL);
++	/* We can be called in atomic context via ftrace_dump() */
++	gfpflags = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL;
++
++	data = kzalloc(sizeof(*data), gfpflags);
+ 	if (!data)
+ 		goto out_err;
+ 
+-	data->cpu_data = alloc_percpu(struct fgraph_cpu_data);
++	data->cpu_data = alloc_percpu_gfp(struct fgraph_cpu_data, gfpflags);
+ 	if (!data->cpu_data)
+ 		goto out_err_free;
+ 
+diff --git a/lib/string.c b/lib/string.c
+index 10063300b830..643b0a90802c 100644
+--- a/lib/string.c
++++ b/lib/string.c
+@@ -610,7 +610,7 @@ EXPORT_SYMBOL(memset);
+ void memzero_explicit(void *s, size_t count)
+ {
+ 	memset(s, 0, count);
+-	OPTIMIZER_HIDE_VAR(s);
++	barrier();
+ }
+ EXPORT_SYMBOL(memzero_explicit);
+ 
+diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
+index c190d22b6b3d..ba2bd0a9a285 100644
+--- a/net/bridge/br_netfilter.c
++++ b/net/bridge/br_netfilter.c
+@@ -651,6 +651,13 @@ static int br_nf_forward_finish(struct sk_buff *skb)
+ 	struct net_device *in;
+ 
+ 	if (!IS_ARP(skb) && !IS_VLAN_ARP(skb)) {
++		int frag_max_size;
++
++		if (skb->protocol == htons(ETH_P_IP)) {
++			frag_max_size = IPCB(skb)->frag_max_size;
++			BR_INPUT_SKB_CB(skb)->frag_max_size = frag_max_size;
++		}
++
+ 		in = nf_bridge->physindev;
+ 		if (nf_bridge->mask & BRNF_PKT_TYPE) {
+ 			skb->pkt_type = PACKET_OTHERHOST;
+@@ -710,8 +717,14 @@ static unsigned int br_nf_forward_ip(const struct nf_hook_ops *ops,
+ 		nf_bridge->mask |= BRNF_PKT_TYPE;
+ 	}
+ 
+-	if (pf == NFPROTO_IPV4 && br_parse_ip_options(skb))
+-		return NF_DROP;
++	if (pf == NFPROTO_IPV4) {
++		int frag_max = BR_INPUT_SKB_CB(skb)->frag_max_size;
++
++		if (br_parse_ip_options(skb))
++			return NF_DROP;
++
++		IPCB(skb)->frag_max_size = frag_max;
++	}
+ 
+ 	/* The physdev module checks on this */
+ 	nf_bridge->mask |= BRNF_BRIDGED;
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index 39c444c1206d..3b0a8b0442b3 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -278,13 +278,14 @@ nodata:
+ EXPORT_SYMBOL(__alloc_skb);
+ 
+ /**
+- * build_skb - build a network buffer
++ * __build_skb - build a network buffer
+  * @data: data buffer provided by caller
+- * @frag_size: size of fragment, or 0 if head was kmalloced
++ * @frag_size: size of data, or 0 if head was kmalloced
+  *
+  * Allocate a new &sk_buff. Caller provides space holding head and
+  * skb_shared_info. @data must have been allocated by kmalloc() only if
+- * @frag_size is 0, otherwise data should come from the page allocator.
++ * @frag_size is 0, otherwise data should come from the page allocator
++ *  or vmalloc()
+  * The return is the new skb buffer.
+  * On a failure the return is %NULL, and @data is not freed.
+  * Notes :
+@@ -295,7 +296,7 @@ EXPORT_SYMBOL(__alloc_skb);
+  *  before giving packet to stack.
+  *  RX rings only contains data buffers, not full skbs.
+  */
+-struct sk_buff *build_skb(void *data, unsigned int frag_size)
++struct sk_buff *__build_skb(void *data, unsigned int frag_size)
+ {
+ 	struct skb_shared_info *shinfo;
+ 	struct sk_buff *skb;
+@@ -309,7 +310,6 @@ struct sk_buff *build_skb(void *data, unsigned int frag_size)
+ 
+ 	memset(skb, 0, offsetof(struct sk_buff, tail));
+ 	skb->truesize = SKB_TRUESIZE(size);
+-	skb->head_frag = frag_size != 0;
+ 	atomic_set(&skb->users, 1);
+ 	skb->head = data;
+ 	skb->data = data;
+@@ -326,6 +326,23 @@ struct sk_buff *build_skb(void *data, unsigned int frag_size)
+ 
+ 	return skb;
+ }
++
++/* build_skb() is wrapper over __build_skb(), that specifically
++ * takes care of skb->head and skb->pfmemalloc
++ * This means that if @frag_size is not zero, then @data must be backed
++ * by a page fragment, not kmalloc() or vmalloc()
++ */
++struct sk_buff *build_skb(void *data, unsigned int frag_size)
++{
++	struct sk_buff *skb = __build_skb(data, frag_size);
++
++	if (skb && frag_size) {
++		skb->head_frag = 1;
++		if (virt_to_head_page(data)->pfmemalloc)
++			skb->pfmemalloc = 1;
++	}
++	return skb;
++}
+ EXPORT_SYMBOL(build_skb);
+ 
+ struct netdev_alloc_cache {
+@@ -346,7 +363,8 @@ static struct page *__page_frag_refill(struct netdev_alloc_cache *nc,
+ 	gfp_t gfp = gfp_mask;
+ 
+ 	if (order) {
+-		gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY;
++		gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY |
++			    __GFP_NOMEMALLOC;
+ 		page = alloc_pages_node(NUMA_NO_NODE, gfp_mask, order);
+ 		nc->frag.size = PAGE_SIZE << (page ? order : 0);
+ 	}
+diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c
+index 787b3c294ce6..d5410b57da19 100644
+--- a/net/ipv4/ip_forward.c
++++ b/net/ipv4/ip_forward.c
+@@ -81,6 +81,9 @@ int ip_forward(struct sk_buff *skb)
+ 	if (skb->pkt_type != PACKET_HOST)
+ 		goto drop;
+ 
++	if (unlikely(skb->sk))
++		goto drop;
++
+ 	if (skb_warn_if_lro(skb))
+ 		goto drop;
+ 
+diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
+index 9f29453049dc..e625be562d3c 100644
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -2753,39 +2753,65 @@ begin_fwd:
+ 	}
+ }
+ 
+-/* Send a fin.  The caller locks the socket for us.  This cannot be
+- * allowed to fail queueing a FIN frame under any circumstances.
++/* We allow to exceed memory limits for FIN packets to expedite
++ * connection tear down and (memory) recovery.
++ * Otherwise tcp_send_fin() could be tempted to either delay FIN
++ * or even be forced to close flow without any FIN.
++ */
++static void sk_forced_wmem_schedule(struct sock *sk, int size)
++{
++	int amt, status;
++
++	if (size <= sk->sk_forward_alloc)
++		return;
++	amt = sk_mem_pages(size);
++	sk->sk_forward_alloc += amt * SK_MEM_QUANTUM;
++	sk_memory_allocated_add(sk, amt, &status);
++}
++
++/* Send a FIN. The caller locks the socket for us.
++ * We should try to send a FIN packet really hard, but eventually give up.
+  */
+ void tcp_send_fin(struct sock *sk)
+ {
++	struct sk_buff *skb, *tskb = tcp_write_queue_tail(sk);
+ 	struct tcp_sock *tp = tcp_sk(sk);
+-	struct sk_buff *skb = tcp_write_queue_tail(sk);
+-	int mss_now;
+ 
+-	/* Optimization, tack on the FIN if we have a queue of
+-	 * unsent frames.  But be careful about outgoing SACKS
+-	 * and IP options.
++	/* Optimization, tack on the FIN if we have one skb in write queue and
++	 * this skb was not yet sent, or we are under memory pressure.
++	 * Note: in the latter case, FIN packet will be sent after a timeout,
++	 * as TCP stack thinks it has already been transmitted.
+ 	 */
+-	mss_now = tcp_current_mss(sk);
+-
+-	if (tcp_send_head(sk) != NULL) {
+-		TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_FIN;
+-		TCP_SKB_CB(skb)->end_seq++;
++	if (tskb && (tcp_send_head(sk) || sk_under_memory_pressure(sk))) {
++coalesce:
++		TCP_SKB_CB(tskb)->tcp_flags |= TCPHDR_FIN;
++		TCP_SKB_CB(tskb)->end_seq++;
+ 		tp->write_seq++;
++		if (!tcp_send_head(sk)) {
++			/* This means tskb was already sent.
++			 * Pretend we included the FIN on previous transmit.
++			 * We need to set tp->snd_nxt to the value it would have
++			 * if FIN had been sent. This is because retransmit path
++			 * does not change tp->snd_nxt.
++			 */
++			tp->snd_nxt++;
++			return;
++		}
+ 	} else {
+-		/* Socket is locked, keep trying until memory is available. */
+-		for (;;) {
+-			skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation);
+-			if (skb)
+-				break;
+-			yield();
++		skb = alloc_skb_fclone(MAX_TCP_HEADER, sk->sk_allocation);
++		if (unlikely(!skb)) {
++			if (tskb)
++				goto coalesce;
++			return;
+ 		}
++		skb_reserve(skb, MAX_TCP_HEADER);
++		sk_forced_wmem_schedule(sk, skb->truesize);
+ 		/* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */
+ 		tcp_init_nondata_skb(skb, tp->write_seq,
+ 				     TCPHDR_ACK | TCPHDR_FIN);
+ 		tcp_queue_skb(sk, skb);
+ 	}
+-	__tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_OFF);
++	__tcp_push_pending_frames(sk, tcp_current_mss(sk), TCP_NAGLE_OFF);
+ }
+ 
+ /* We get here when a process closes a file descriptor (either due to
+diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
+index bf4b84ff1c70..6e565ec5cfa4 100644
+--- a/net/mac80211/mlme.c
++++ b/net/mac80211/mlme.c
+@@ -2231,7 +2231,7 @@ static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata)
+ 		else
+ 			ssid_len = ssid[1];
+ 
+-		ieee80211_send_probe_req(sdata, sdata->vif.addr, NULL,
++		ieee80211_send_probe_req(sdata, sdata->vif.addr, dst,
+ 					 ssid + 2, ssid_len, NULL,
+ 					 0, (u32) -1, true, 0,
+ 					 ifmgd->associated->channel, false);
+diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
+index 75532efa51cd..4b4a2a4418e4 100644
+--- a/net/netlink/af_netlink.c
++++ b/net/netlink/af_netlink.c
+@@ -1616,13 +1616,11 @@ static struct sk_buff *netlink_alloc_large_skb(unsigned int size,
+ 	if (data == NULL)
+ 		return NULL;
+ 
+-	skb = build_skb(data, size);
++	skb = __build_skb(data, size);
+ 	if (skb == NULL)
+ 		vfree(data);
+-	else {
+-		skb->head_frag = 0;
++	else
+ 		skb->destructor = netlink_skb_destructor;
+-	}
+ 
+ 	return skb;
+ }
+diff --git a/sound/pci/emu10k1/emuproc.c b/sound/pci/emu10k1/emuproc.c
+index 2ca9f2e93139..53745f4c2bf5 100644
+--- a/sound/pci/emu10k1/emuproc.c
++++ b/sound/pci/emu10k1/emuproc.c
+@@ -241,31 +241,22 @@ static void snd_emu10k1_proc_spdif_read(struct snd_info_entry *entry,
+ 	struct snd_emu10k1 *emu = entry->private_data;
+ 	u32 value;
+ 	u32 value2;
+-	unsigned long flags;
+ 	u32 rate;
+ 
+ 	if (emu->card_capabilities->emu_model) {
+-		spin_lock_irqsave(&emu->emu_lock, flags);
+ 		snd_emu1010_fpga_read(emu, 0x38, &value);
+-		spin_unlock_irqrestore(&emu->emu_lock, flags);
+ 		if ((value & 0x1) == 0) {
+-			spin_lock_irqsave(&emu->emu_lock, flags);
+ 			snd_emu1010_fpga_read(emu, 0x2a, &value);
+ 			snd_emu1010_fpga_read(emu, 0x2b, &value2);
+-			spin_unlock_irqrestore(&emu->emu_lock, flags);
+ 			rate = 0x1770000 / (((value << 5) | value2)+1);	
+ 			snd_iprintf(buffer, "ADAT Locked : %u\n", rate);
+ 		} else {
+ 			snd_iprintf(buffer, "ADAT Unlocked\n");
+ 		}
+-		spin_lock_irqsave(&emu->emu_lock, flags);
+ 		snd_emu1010_fpga_read(emu, 0x20, &value);
+-		spin_unlock_irqrestore(&emu->emu_lock, flags);
+ 		if ((value & 0x4) == 0) {
+-			spin_lock_irqsave(&emu->emu_lock, flags);
+ 			snd_emu1010_fpga_read(emu, 0x28, &value);
+ 			snd_emu1010_fpga_read(emu, 0x29, &value2);
+-			spin_unlock_irqrestore(&emu->emu_lock, flags);
+ 			rate = 0x1770000 / (((value << 5) | value2)+1);	
+ 			snd_iprintf(buffer, "SPDIF Locked : %d\n", rate);
+ 		} else {
+@@ -410,14 +401,11 @@ static void snd_emu_proc_emu1010_reg_read(struct snd_info_entry *entry,
+ {
+ 	struct snd_emu10k1 *emu = entry->private_data;
+ 	u32 value;
+-	unsigned long flags;
+ 	int i;
+ 	snd_iprintf(buffer, "EMU1010 Registers:\n\n");
+ 
+ 	for(i = 0; i < 0x40; i+=1) {
+-		spin_lock_irqsave(&emu->emu_lock, flags);
+ 		snd_emu1010_fpga_read(emu, i, &value);
+-		spin_unlock_irqrestore(&emu->emu_lock, flags);
+ 		snd_iprintf(buffer, "%02X: %08X, %02X\n", i, value, (value >> 8) & 0x7f);
+ 	}
+ }
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 172c89996d74..39e3640e30eb 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -4946,12 +4946,14 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x17aa, 0x2212, "Thinkpad T440", ALC292_FIXUP_TPT440_DOCK),
+ 	SND_PCI_QUIRK(0x17aa, 0x2214, "Thinkpad X240", ALC292_FIXUP_TPT440_DOCK),
+ 	SND_PCI_QUIRK(0x17aa, 0x2215, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
++	SND_PCI_QUIRK(0x17aa, 0x2226, "ThinkPad X250", ALC292_FIXUP_TPT440_DOCK),
+ 	SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
+ 	SND_PCI_QUIRK(0x17aa, 0x3978, "IdeaPad Y410P", ALC269_FIXUP_NO_SHUTUP),
+ 	SND_PCI_QUIRK(0x17aa, 0x5013, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+ 	SND_PCI_QUIRK(0x17aa, 0x501a, "Thinkpad", ALC283_FIXUP_INT_MIC),
+ 	SND_PCI_QUIRK(0x17aa, 0x501e, "Thinkpad L440", ALC292_FIXUP_TPT440_DOCK),
+ 	SND_PCI_QUIRK(0x17aa, 0x5026, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
++	SND_PCI_QUIRK(0x17aa, 0x5034, "Thinkpad T450", ALC292_FIXUP_TPT440_DOCK),
+ 	SND_PCI_QUIRK(0x17aa, 0x5036, "Thinkpad T450s", ALC292_FIXUP_TPT440_DOCK),
+ 	SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+ 	SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
+@@ -5452,6 +5454,8 @@ static int patch_alc269(struct hda_codec *codec)
+ 		break;
+ 	case 0x10ec0256:
+ 		spec->codec_variant = ALC269_TYPE_ALC256;
++		spec->gen.mixer_nid = 0; /* ALC256 does not have any loopback mixer path */
++		alc_update_coef_idx(codec, 0x36, 1 << 13, 1 << 5); /* Switch pcbeep path to Line in path*/
+ 		break;
+ 	}
+ 
+@@ -5465,8 +5469,8 @@ static int patch_alc269(struct hda_codec *codec)
+ 	if (err < 0)
+ 		goto error;
+ 
+-	if (!spec->gen.no_analog && spec->gen.beep_nid)
+-		set_beep_amp(spec, 0x0b, 0x04, HDA_INPUT);
++	if (!spec->gen.no_analog && spec->gen.beep_nid && spec->gen.mixer_nid)
++		set_beep_amp(spec, spec->gen.mixer_nid, 0x04, HDA_INPUT);
+ 
+ 	codec->patch_ops = alc_patch_ops;
+ #ifdef CONFIG_PM
+diff --git a/sound/soc/codecs/cs4271.c b/sound/soc/codecs/cs4271.c
+index 7d3a6accaf9a..e770ee6f36da 100644
+--- a/sound/soc/codecs/cs4271.c
++++ b/sound/soc/codecs/cs4271.c
+@@ -561,10 +561,10 @@ static int cs4271_codec_probe(struct snd_soc_codec *codec)
+ 	if (gpio_is_valid(cs4271->gpio_nreset)) {
+ 		/* Reset codec */
+ 		gpio_direction_output(cs4271->gpio_nreset, 0);
+-		udelay(1);
++		mdelay(1);
+ 		gpio_set_value(cs4271->gpio_nreset, 1);
+ 		/* Give the codec time to wake up */
+-		udelay(1);
++		mdelay(1);
+ 	}
+ 
+ 	ret = regmap_update_bits(cs4271->regmap, CS4271_MODE2,
+diff --git a/sound/soc/codecs/pcm512x.c b/sound/soc/codecs/pcm512x.c
+index 30c673cdc12e..d043797b7979 100644
+--- a/sound/soc/codecs/pcm512x.c
++++ b/sound/soc/codecs/pcm512x.c
+@@ -261,9 +261,9 @@ static const struct soc_enum pcm512x_veds =
+ static const struct snd_kcontrol_new pcm512x_controls[] = {
+ SOC_DOUBLE_R_TLV("Digital Playback Volume", PCM512x_DIGITAL_VOLUME_2,
+ 		 PCM512x_DIGITAL_VOLUME_3, 0, 255, 1, digital_tlv),
+-SOC_DOUBLE_TLV("Playback Volume", PCM512x_ANALOG_GAIN_CTRL,
++SOC_DOUBLE_TLV("Analogue Playback Volume", PCM512x_ANALOG_GAIN_CTRL,
+ 	       PCM512x_LAGN_SHIFT, PCM512x_RAGN_SHIFT, 1, 1, analog_tlv),
+-SOC_DOUBLE_TLV("Playback Boost Volume", PCM512x_ANALOG_GAIN_BOOST,
++SOC_DOUBLE_TLV("Analogue Playback Boost Volume", PCM512x_ANALOG_GAIN_BOOST,
+ 	       PCM512x_AGBL_SHIFT, PCM512x_AGBR_SHIFT, 1, 0, boost_tlv),
+ SOC_DOUBLE("Digital Playback Switch", PCM512x_MUTE, PCM512x_RQML_SHIFT,
+ 	   PCM512x_RQMR_SHIFT, 1, 1),
+diff --git a/sound/soc/codecs/wm8741.c b/sound/soc/codecs/wm8741.c
+index 31bb4801a005..9e71c768966f 100644
+--- a/sound/soc/codecs/wm8741.c
++++ b/sound/soc/codecs/wm8741.c
+@@ -123,7 +123,7 @@ static struct {
+ };
+ 
+ static const unsigned int rates_11289[] = {
+-	44100, 88235,
++	44100, 88200,
+ };
+ 
+ static const struct snd_pcm_hw_constraint_list constraints_11289 = {
+@@ -150,7 +150,7 @@ static const struct snd_pcm_hw_constraint_list constraints_16384 = {
+ };
+ 
+ static const unsigned int rates_16934[] = {
+-	44100, 88235,
++	44100, 88200,
+ };
+ 
+ static const struct snd_pcm_hw_constraint_list constraints_16934 = {
+@@ -168,7 +168,7 @@ static const struct snd_pcm_hw_constraint_list constraints_18432 = {
+ };
+ 
+ static const unsigned int rates_22579[] = {
+-	44100, 88235, 1764000
++	44100, 88200, 176400
+ };
+ 
+ static const struct snd_pcm_hw_constraint_list constraints_22579 = {
+@@ -186,7 +186,7 @@ static const struct snd_pcm_hw_constraint_list constraints_24576 = {
+ };
+ 
+ static const unsigned int rates_36864[] = {
+-	48000, 96000, 19200
++	48000, 96000, 192000
+ };
+ 
+ static const struct snd_pcm_hw_constraint_list constraints_36864 = {
+diff --git a/sound/soc/davinci/davinci-evm.c b/sound/soc/davinci/davinci-evm.c
+index 158cb3d1db70..f33143f917a3 100644
+--- a/sound/soc/davinci/davinci-evm.c
++++ b/sound/soc/davinci/davinci-evm.c
+@@ -431,18 +431,8 @@ static int davinci_evm_probe(struct platform_device *pdev)
+ 	return ret;
+ }
+ 
+-static int davinci_evm_remove(struct platform_device *pdev)
+-{
+-	struct snd_soc_card *card = platform_get_drvdata(pdev);
+-
+-	snd_soc_unregister_card(card);
+-
+-	return 0;
+-}
+-
+ static struct platform_driver davinci_evm_driver = {
+ 	.probe		= davinci_evm_probe,
+-	.remove		= davinci_evm_remove,
+ 	.driver		= {
+ 		.name	= "davinci_evm",
+ 		.pm	= &snd_soc_pm_ops,
+diff --git a/tools/lib/traceevent/kbuffer-parse.c b/tools/lib/traceevent/kbuffer-parse.c
+index dcc665228c71..deb3569ab004 100644
+--- a/tools/lib/traceevent/kbuffer-parse.c
++++ b/tools/lib/traceevent/kbuffer-parse.c
+@@ -372,7 +372,6 @@ translate_data(struct kbuffer *kbuf, void *data, void **rptr,
+ 	switch (type_len) {
+ 	case KBUFFER_TYPE_PADDING:
+ 		*length = read_4(kbuf, data);
+-		data += *length;
+ 		break;
+ 
+ 	case KBUFFER_TYPE_TIME_EXTEND:
+diff --git a/tools/perf/config/Makefile b/tools/perf/config/Makefile
+index 648e31ff4021..f97de8ee9a09 100644
+--- a/tools/perf/config/Makefile
++++ b/tools/perf/config/Makefile
+@@ -645,7 +645,7 @@ ifeq (${IS_64_BIT}, 1)
+       NO_PERF_READ_VDSO32 := 1
+     endif
+   endif
+-  ifneq (${IS_X86_64}, 1)
++  ifneq ($(ARCH), x86)
+     NO_PERF_READ_VDSOX32 := 1
+   endif
+   ifndef NO_PERF_READ_VDSOX32
+@@ -693,7 +693,7 @@ sysconfdir = $(prefix)/etc
+ ETC_PERFCONFIG = etc/perfconfig
+ endif
+ ifndef lib
+-ifeq ($(IS_X86_64),1)
++ifeq ($(ARCH)$(IS_64_BIT), x861)
+ lib = lib64
+ else
+ lib = lib
+diff --git a/tools/perf/tests/make b/tools/perf/tests/make
+index 69a71ff84e01..f8b24a2c5a96 100644
+--- a/tools/perf/tests/make
++++ b/tools/perf/tests/make
+@@ -5,7 +5,7 @@ include config/Makefile.arch
+ 
+ # FIXME looks like x86 is the only arch running tests ;-)
+ # we need some IS_(32/64) flag to make this generic
+-ifeq ($(IS_X86_64),1)
++ifeq ($(ARCH)$(IS_64_BIT), x861)
+ lib = lib64
+ else
+ lib = lib
+diff --git a/tools/perf/util/cloexec.c b/tools/perf/util/cloexec.c
+index 6da965bdbc2c..85b523885f9d 100644
+--- a/tools/perf/util/cloexec.c
++++ b/tools/perf/util/cloexec.c
+@@ -7,6 +7,12 @@
+ 
+ static unsigned long flag = PERF_FLAG_FD_CLOEXEC;
+ 
++int __weak sched_getcpu(void)
++{
++	errno = ENOSYS;
++	return -1;
++}
++
+ static int perf_flag_probe(void)
+ {
+ 	/* use 'safest' configuration as used in perf_evsel__fallback() */
+diff --git a/tools/perf/util/cloexec.h b/tools/perf/util/cloexec.h
+index 94a5a7d829d5..68888c29b04a 100644
+--- a/tools/perf/util/cloexec.h
++++ b/tools/perf/util/cloexec.h
+@@ -3,4 +3,10 @@
+ 
+ unsigned long perf_event_open_cloexec_flag(void);
+ 
++#ifdef __GLIBC_PREREQ
++#if !__GLIBC_PREREQ(2, 6)
++extern int sched_getcpu(void) __THROW;
++#endif
++#endif
++
+ #endif /* __PERF_CLOEXEC_H */
+diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
+index 06fcd1bf98b6..eafee111563b 100644
+--- a/tools/perf/util/symbol-elf.c
++++ b/tools/perf/util/symbol-elf.c
+@@ -69,6 +69,10 @@ static inline uint8_t elf_sym__type(const GElf_Sym *sym)
+ 	return GELF_ST_TYPE(sym->st_info);
+ }
+ 
++#ifndef STT_GNU_IFUNC
++#define STT_GNU_IFUNC 10
++#endif
++
+ static inline int elf_sym__is_function(const GElf_Sym *sym)
+ {
+ 	return (elf_sym__type(sym) == STT_FUNC ||
+diff --git a/tools/power/x86/turbostat/Makefile b/tools/power/x86/turbostat/Makefile
+index d1b3a361e526..4039854560d0 100644
+--- a/tools/power/x86/turbostat/Makefile
++++ b/tools/power/x86/turbostat/Makefile
+@@ -1,8 +1,12 @@
+ CC		= $(CROSS_COMPILE)gcc
+-BUILD_OUTPUT	:= $(PWD)
++BUILD_OUTPUT	:= $(CURDIR)
+ PREFIX		:= /usr
+ DESTDIR		:=
+ 
++ifeq ("$(origin O)", "command line")
++	BUILD_OUTPUT := $(O)
++endif
++
+ turbostat : turbostat.c
+ CFLAGS +=	-Wall
+ CFLAGS +=	-DMSRHEADER='"../../../../arch/x86/include/uapi/asm/msr-index.h"'
+diff --git a/virt/kvm/arm/vgic-v2.c b/virt/kvm/arm/vgic-v2.c
+index 2935405ad22f..b9d48e8e1eb4 100644
+--- a/virt/kvm/arm/vgic-v2.c
++++ b/virt/kvm/arm/vgic-v2.c
+@@ -72,6 +72,8 @@ static void vgic_v2_sync_lr_elrsr(struct kvm_vcpu *vcpu, int lr,
+ {
+ 	if (!(lr_desc.state & LR_STATE_MASK))
+ 		vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr |= (1ULL << lr);
++	else
++		vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr &= ~(1ULL << lr);
+ }
+ 
+ static u64 vgic_v2_get_elrsr(const struct kvm_vcpu *vcpu)
+@@ -84,6 +86,11 @@ static u64 vgic_v2_get_eisr(const struct kvm_vcpu *vcpu)
+ 	return vcpu->arch.vgic_cpu.vgic_v2.vgic_eisr;
+ }
+ 
++static void vgic_v2_clear_eisr(struct kvm_vcpu *vcpu)
++{
++	vcpu->arch.vgic_cpu.vgic_v2.vgic_eisr = 0;
++}
++
+ static u32 vgic_v2_get_interrupt_status(const struct kvm_vcpu *vcpu)
+ {
+ 	u32 misr = vcpu->arch.vgic_cpu.vgic_v2.vgic_misr;
+@@ -148,6 +155,7 @@ static const struct vgic_ops vgic_v2_ops = {
+ 	.sync_lr_elrsr		= vgic_v2_sync_lr_elrsr,
+ 	.get_elrsr		= vgic_v2_get_elrsr,
+ 	.get_eisr		= vgic_v2_get_eisr,
++	.clear_eisr		= vgic_v2_clear_eisr,
+ 	.get_interrupt_status	= vgic_v2_get_interrupt_status,
+ 	.enable_underflow	= vgic_v2_enable_underflow,
+ 	.disable_underflow	= vgic_v2_disable_underflow,
+diff --git a/virt/kvm/arm/vgic-v3.c b/virt/kvm/arm/vgic-v3.c
+index 1c2c8eef0599..58b8af00ee4c 100644
+--- a/virt/kvm/arm/vgic-v3.c
++++ b/virt/kvm/arm/vgic-v3.c
+@@ -86,6 +86,8 @@ static void vgic_v3_sync_lr_elrsr(struct kvm_vcpu *vcpu, int lr,
+ {
+ 	if (!(lr_desc.state & LR_STATE_MASK))
+ 		vcpu->arch.vgic_cpu.vgic_v3.vgic_elrsr |= (1U << lr);
++	else
++		vcpu->arch.vgic_cpu.vgic_v3.vgic_elrsr &= ~(1U << lr);
+ }
+ 
+ static u64 vgic_v3_get_elrsr(const struct kvm_vcpu *vcpu)
+@@ -98,6 +100,11 @@ static u64 vgic_v3_get_eisr(const struct kvm_vcpu *vcpu)
+ 	return vcpu->arch.vgic_cpu.vgic_v3.vgic_eisr;
+ }
+ 
++static void vgic_v3_clear_eisr(struct kvm_vcpu *vcpu)
++{
++	vcpu->arch.vgic_cpu.vgic_v3.vgic_eisr = 0;
++}
++
+ static u32 vgic_v3_get_interrupt_status(const struct kvm_vcpu *vcpu)
+ {
+ 	u32 misr = vcpu->arch.vgic_cpu.vgic_v3.vgic_misr;
+@@ -162,6 +169,7 @@ static const struct vgic_ops vgic_v3_ops = {
+ 	.sync_lr_elrsr		= vgic_v3_sync_lr_elrsr,
+ 	.get_elrsr		= vgic_v3_get_elrsr,
+ 	.get_eisr		= vgic_v3_get_eisr,
++	.clear_eisr		= vgic_v3_clear_eisr,
+ 	.get_interrupt_status	= vgic_v3_get_interrupt_status,
+ 	.enable_underflow	= vgic_v3_enable_underflow,
+ 	.disable_underflow	= vgic_v3_disable_underflow,
+diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
+index 03affc7bf453..57a16f4836d0 100644
+--- a/virt/kvm/arm/vgic.c
++++ b/virt/kvm/arm/vgic.c
+@@ -1219,6 +1219,11 @@ static inline u64 vgic_get_eisr(struct kvm_vcpu *vcpu)
+ 	return vgic_ops->get_eisr(vcpu);
+ }
+ 
++static inline void vgic_clear_eisr(struct kvm_vcpu *vcpu)
++{
++	vgic_ops->clear_eisr(vcpu);
++}
++
+ static inline u32 vgic_get_interrupt_status(struct kvm_vcpu *vcpu)
+ {
+ 	return vgic_ops->get_interrupt_status(vcpu);
+@@ -1258,6 +1263,7 @@ static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu)
+ 	vgic_set_lr(vcpu, lr_nr, vlr);
+ 	clear_bit(lr_nr, vgic_cpu->lr_used);
+ 	vgic_cpu->vgic_irq_lr_map[irq] = LR_EMPTY;
++	vgic_sync_lr_elrsr(vcpu, lr_nr, vlr);
+ }
+ 
+ /*
+@@ -1313,6 +1319,7 @@ static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
+ 			BUG_ON(!test_bit(lr, vgic_cpu->lr_used));
+ 			vlr.state |= LR_STATE_PENDING;
+ 			vgic_set_lr(vcpu, lr, vlr);
++			vgic_sync_lr_elrsr(vcpu, lr, vlr);
+ 			return true;
+ 		}
+ 	}
+@@ -1334,6 +1341,7 @@ static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
+ 		vlr.state |= LR_EOI_INT;
+ 
+ 	vgic_set_lr(vcpu, lr, vlr);
++	vgic_sync_lr_elrsr(vcpu, lr, vlr);
+ 
+ 	return true;
+ }
+@@ -1502,6 +1510,14 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
+ 	if (status & INT_STATUS_UNDERFLOW)
+ 		vgic_disable_underflow(vcpu);
+ 
++	/*
++	 * In the next iterations of the vcpu loop, if we sync the vgic state
++	 * after flushing it, but before entering the guest (this happens for
++	 * pending signals and vmid rollovers), then make sure we don't pick
++	 * up any old maintenance interrupts here.
++	 */
++	vgic_clear_eisr(vcpu);
++
+ 	return level_pending;
+ }
+ 
+@@ -1706,6 +1722,9 @@ int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num,
+ 			goto out;
+ 	}
+ 
++	if (irq_num >= kvm->arch.vgic.nr_irqs)
++		return -EINVAL;
++
+ 	vcpu_id = vgic_update_irq_pending(kvm, cpuid, irq_num, level);
+ 	if (vcpu_id >= 0) {
+ 		/* kick the specified vcpu */
+@@ -1809,7 +1828,7 @@ static int vgic_init(struct kvm *kvm)
+ 
+ 	nr_cpus = dist->nr_cpus = atomic_read(&kvm->online_vcpus);
+ 	if (!nr_cpus)		/* No vcpus? Can't be good... */
+-		return -EINVAL;
++		return -ENODEV;
+ 
+ 	/*
+ 	 * If nobody configured the number of interrupts, use the
+diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
+index 0fba701bc518..0edccc8f6ffb 100644
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -1608,8 +1608,8 @@ int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
+ 	ghc->generation = slots->generation;
+ 	ghc->len = len;
+ 	ghc->memslot = gfn_to_memslot(kvm, start_gfn);
+-	ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, &nr_pages_avail);
+-	if (!kvm_is_error_hva(ghc->hva) && nr_pages_avail >= nr_pages_needed) {
++	ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, NULL);
++	if (!kvm_is_error_hva(ghc->hva) && nr_pages_needed <= 1) {
+ 		ghc->hva += offset;
+ 	} else {
+ 		/*


^ permalink raw reply related	[flat|nested] 19+ messages in thread

* [gentoo-commits] proj/linux-patches:3.19 commit in: /
@ 2015-05-11 19:15 Mike Pagano
  0 siblings, 0 replies; 19+ messages in thread
From: Mike Pagano @ 2015-05-11 19:15 UTC (permalink / raw
  To: gentoo-commits

commit:     ae029c4cde3493b928e501e846e93cc067b4a845
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Mon May 11 19:15:25 2015 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Mon May 11 19:15:25 2015 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=ae029c4c

Linux patch 3.19.8

 0000_README             |   4 +
 1007_linux-3.19.8.patch | 211 ++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 215 insertions(+)

diff --git a/0000_README b/0000_README
index 7728485..36753e4 100644
--- a/0000_README
+++ b/0000_README
@@ -71,6 +71,10 @@ Patch:  1006_linux-3.19.7.patch
 From:   http://www.kernel.org
 Desc:   Linux 3.19.7
 
+Patch:  1007_linux-3.19.8.patch
+From:   http://www.kernel.org
+Desc:   Linux 3.19.8
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1007_linux-3.19.8.patch b/1007_linux-3.19.8.patch
new file mode 100644
index 0000000..a053bf4
--- /dev/null
+++ b/1007_linux-3.19.8.patch
@@ -0,0 +1,211 @@
+diff --git a/Makefile b/Makefile
+index 69952c1404b2..110782bba50d 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 19
+-SUBLEVEL = 7
++SUBLEVEL = 8
+ EXTRAVERSION =
+ NAME = Diseased Newt
+ 
+diff --git a/drivers/clk/at91/clk-usb.c b/drivers/clk/at91/clk-usb.c
+index 0b7c3e8840ba..0283a5713d6c 100644
+--- a/drivers/clk/at91/clk-usb.c
++++ b/drivers/clk/at91/clk-usb.c
+@@ -58,8 +58,6 @@ static unsigned long at91sam9x5_clk_usb_recalc_rate(struct clk_hw *hw,
+ 
+ static long at91sam9x5_clk_usb_determine_rate(struct clk_hw *hw,
+ 					      unsigned long rate,
+-					      unsigned long min_rate,
+-					      unsigned long max_rate,
+ 					      unsigned long *best_parent_rate,
+ 					      struct clk_hw **best_parent_hw)
+ {
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+index c132d9030729..0589a849d2a5 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+@@ -489,7 +489,7 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
+ 		memoffset = (mtype * (edc_size * 1024 * 1024));
+ 	else {
+ 		mc_size = EXT_MEM0_SIZE_G(t4_read_reg(adap,
+-						      MA_EXT_MEMORY1_BAR_A));
++						      MA_EXT_MEMORY0_BAR_A));
+ 		memoffset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024;
+ 	}
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+index 2617c9d68d9b..c998c4d8c7e9 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+@@ -1467,6 +1467,7 @@ static void mlx4_en_service_task(struct work_struct *work)
+ 		if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
+ 			mlx4_en_ptp_overflow_check(mdev);
+ 
++		mlx4_en_recover_from_oom(priv);
+ 		queue_delayed_work(mdev->workqueue, &priv->service_task,
+ 				   SERVICE_TASK_DELAY);
+ 	}
+diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+index a0474eb94aa3..10d353356b88 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+@@ -240,6 +240,12 @@ static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv,
+ 	return mlx4_en_alloc_frags(priv, rx_desc, frags, ring->page_alloc, gfp);
+ }
+ 
++static inline bool mlx4_en_is_ring_empty(struct mlx4_en_rx_ring *ring)
++{
++	BUG_ON((u32)(ring->prod - ring->cons) > ring->actual_size);
++	return ring->prod == ring->cons;
++}
++
+ static inline void mlx4_en_update_rx_prod_db(struct mlx4_en_rx_ring *ring)
+ {
+ 	*ring->wqres.db.db = cpu_to_be32(ring->prod & 0xffff);
+@@ -311,8 +317,7 @@ static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv,
+ 	       ring->cons, ring->prod);
+ 
+ 	/* Unmap and free Rx buffers */
+-	BUG_ON((u32) (ring->prod - ring->cons) > ring->actual_size);
+-	while (ring->cons != ring->prod) {
++	while (!mlx4_en_is_ring_empty(ring)) {
+ 		index = ring->cons & ring->size_mask;
+ 		en_dbg(DRV, priv, "Processing descriptor:%d\n", index);
+ 		mlx4_en_free_rx_desc(priv, ring, index);
+@@ -487,6 +492,23 @@ err_allocator:
+ 	return err;
+ }
+ 
++/* We recover from out of memory by scheduling our napi poll
++ * function (mlx4_en_process_cq), which tries to allocate
++ * all missing RX buffers (call to mlx4_en_refill_rx_buffers).
++ */
++void mlx4_en_recover_from_oom(struct mlx4_en_priv *priv)
++{
++	int ring;
++
++	if (!priv->port_up)
++		return;
++
++	for (ring = 0; ring < priv->rx_ring_num; ring++) {
++		if (mlx4_en_is_ring_empty(priv->rx_ring[ring]))
++			napi_reschedule(&priv->rx_cq[ring]->napi);
++	}
++}
++
+ void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
+ 			     struct mlx4_en_rx_ring **pring,
+ 			     u32 size, u16 stride)
+diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+index e3357bf523df..18db895daa2e 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+@@ -143,8 +143,10 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
+ 	ring->hwtstamp_tx_type = priv->hwtstamp_config.tx_type;
+ 	ring->queue_index = queue_index;
+ 
+-	if (queue_index < priv->num_tx_rings_p_up && cpu_online(queue_index))
+-		cpumask_set_cpu(queue_index, &ring->affinity_mask);
++	if (queue_index < priv->num_tx_rings_p_up)
++		cpumask_set_cpu_local_first(queue_index,
++					    priv->mdev->dev->numa_node,
++					    &ring->affinity_mask);
+ 
+ 	*pring = ring;
+ 	return 0;
+@@ -213,7 +215,7 @@ int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
+ 
+ 	err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context,
+ 			       &ring->qp, &ring->qp_state);
+-	if (!user_prio && cpu_online(ring->queue_index))
++	if (!cpumask_empty(&ring->affinity_mask))
+ 		netif_set_xps_queue(priv->dev, &ring->affinity_mask,
+ 				    ring->queue_index);
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+index 8805ef1a4c75..6cc49c1549b0 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
++++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+@@ -788,6 +788,7 @@ int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
+ void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv,
+ 				struct mlx4_en_tx_ring *ring);
+ void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev);
++void mlx4_en_recover_from_oom(struct mlx4_en_priv *priv);
+ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
+ 			   struct mlx4_en_rx_ring **pring,
+ 			   u32 size, u16 stride, int node);
+diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
+index a64e7a207d2b..0c5796eadae1 100644
+--- a/kernel/bpf/core.c
++++ b/kernel/bpf/core.c
+@@ -357,8 +357,8 @@ select_insn:
+ 	ALU64_MOD_X:
+ 		if (unlikely(SRC == 0))
+ 			return 0;
+-		tmp = DST;
+-		DST = do_div(tmp, SRC);
++		div64_u64_rem(DST, SRC, &tmp);
++		DST = tmp;
+ 		CONT;
+ 	ALU_MOD_X:
+ 		if (unlikely(SRC == 0))
+@@ -367,8 +367,8 @@ select_insn:
+ 		DST = do_div(tmp, (u32) SRC);
+ 		CONT;
+ 	ALU64_MOD_K:
+-		tmp = DST;
+-		DST = do_div(tmp, IMM);
++		div64_u64_rem(DST, IMM, &tmp);
++		DST = tmp;
+ 		CONT;
+ 	ALU_MOD_K:
+ 		tmp = (u32) DST;
+@@ -377,7 +377,7 @@ select_insn:
+ 	ALU64_DIV_X:
+ 		if (unlikely(SRC == 0))
+ 			return 0;
+-		do_div(DST, SRC);
++		DST = div64_u64(DST, SRC);
+ 		CONT;
+ 	ALU_DIV_X:
+ 		if (unlikely(SRC == 0))
+@@ -387,7 +387,7 @@ select_insn:
+ 		DST = (u32) tmp;
+ 		CONT;
+ 	ALU64_DIV_K:
+-		do_div(DST, IMM);
++		DST = div64_u64(DST, IMM);
+ 		CONT;
+ 	ALU_DIV_K:
+ 		tmp = (u32) DST;
+diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
+index 0ae28f517a9b..c0db43d2e1a6 100644
+--- a/net/ipv4/ping.c
++++ b/net/ipv4/ping.c
+@@ -158,6 +158,7 @@ void ping_unhash(struct sock *sk)
+ 	if (sk_hashed(sk)) {
+ 		write_lock_bh(&ping_table.lock);
+ 		hlist_nulls_del(&sk->sk_nulls_node);
++		sk_nulls_node_init(&sk->sk_nulls_node);
+ 		sock_put(sk);
+ 		isk->inet_num = 0;
+ 		isk->inet_sport = 0;
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c
+index 52e1f2bf0ca2..ac40410b94d0 100644
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -963,10 +963,7 @@ static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
+ 	if (dst_metric_locked(dst, RTAX_MTU))
+ 		return;
+ 
+-	if (dst->dev->mtu < mtu)
+-		return;
+-
+-	if (rt->rt_pmtu && rt->rt_pmtu < mtu)
++	if (ipv4_mtu(dst) < mtu)
+ 		return;
+ 
+ 	if (mtu < ip_rt_min_pmtu)


^ permalink raw reply related	[flat|nested] 19+ messages in thread

end of thread, other threads:[~2015-05-11 19:15 UTC | newest]

Thread overview: 19+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2015-03-28 19:54 [gentoo-commits] proj/linux-patches:3.19 commit in: / Mike Pagano
  -- strict thread matches above, loose matches on Subject: below --
2015-05-11 19:15 Mike Pagano
2015-05-07 15:11 Mike Pagano
2015-04-29 14:26 Mike Pagano
2015-04-20 12:24 Mike Pagano
2015-04-14  0:27 Mike Pagano
2015-03-26 21:08 Mike Pagano
2015-03-21 20:01 Mike Pagano
2015-03-18 23:26 Mike Pagano
2015-03-15 18:05 Mike Pagano
2015-03-07 14:10 Mike Pagano
2015-02-14 20:13 Mike Pagano
2015-02-14  0:12 Mike Pagano
2015-02-13  1:48 Mike Pagano
2015-02-13  1:33 Mike Pagano
2015-02-09 11:54 Mike Pagano
2015-02-09 11:12 Mike Pagano
2015-01-02 19:08 Mike Pagano
2014-12-23 17:32 Mike Pagano

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox