public inbox for gentoo-commits@lists.gentoo.org
 help / color / mirror / Atom feed
From: "Mike Pagano" <mpagano@gentoo.org>
To: gentoo-commits@lists.gentoo.org
Subject: [gentoo-commits] proj/linux-patches:3.18 commit in: /
Date: Wed, 18 Jan 2017 21:38:30 +0000 (UTC)	[thread overview]
Message-ID: <1484775500.54ad828df23ad0b32ae21e574ae1fe38fd441918.mpagano@gentoo> (raw)

commit:     54ad828df23ad0b32ae21e574ae1fe38fd441918
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Jan 18 21:38:20 2017 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Jan 18 21:38:20 2017 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=54ad828d

Linux patches 3.18.46 and 3.18.47

 0000_README              |    8 +
 1045_linux-3.18.46.patch |  728 ++++++++++++++++++
 1046_linux-3.18.47.patch | 1831 ++++++++++++++++++++++++++++++++++++++++++++++
 3 files changed, 2567 insertions(+)

diff --git a/0000_README b/0000_README
index 7eda87f..4efb409 100644
--- a/0000_README
+++ b/0000_README
@@ -223,6 +223,14 @@ Patch:  1044_linux-3.18.45.patch
 From:   http://www.kernel.org
 Desc:   Linux 3.18.45
 
+Patch:  1045_linux-3.18.46.patch
+From:   http://www.kernel.org
+Desc:   Linux 3.18.46
+
+Patch:  1046_linux-3.18.47.patch
+From:   http://www.kernel.org
+Desc:   Linux 3.18.47
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1045_linux-3.18.46.patch b/1045_linux-3.18.46.patch
new file mode 100644
index 0000000..d6886bb
--- /dev/null
+++ b/1045_linux-3.18.46.patch
@@ -0,0 +1,728 @@
+diff --git a/Documentation/arm/CCN.txt b/Documentation/arm/CCN.txt
+index 0632b3aad83e..715776f06df6 100644
+--- a/Documentation/arm/CCN.txt
++++ b/Documentation/arm/CCN.txt
+@@ -38,7 +38,7 @@ Example of perf tool use:
+ / # perf list | grep ccn
+   ccn/cycles/                                        [Kernel PMU event]
+ <...>
+-  ccn/xp_valid_flit/                                 [Kernel PMU event]
++  ccn/xp_valid_flit,xp=?,port=?,vc=?,dir=?/          [Kernel PMU event]
+ <...>
+ 
+ / # perf stat -C 0 -e ccn/cycles/,ccn/xp_valid_flit,xp=1,port=0,vc=1,dir=1/ \
+diff --git a/Makefile b/Makefile
+index de29f33d03a9..e73fd98d4cfc 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 18
+-SUBLEVEL = 45
++SUBLEVEL = 46
+ EXTRAVERSION =
+ NAME = Diseased Newt
+ 
+diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
+index 85738b200023..260308cd9b83 100644
+--- a/arch/arm/include/asm/dma-mapping.h
++++ b/arch/arm/include/asm/dma-mapping.h
+@@ -117,7 +117,7 @@ static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
+ /* The ARM override for dma_max_pfn() */
+ static inline unsigned long dma_max_pfn(struct device *dev)
+ {
+-	return PHYS_PFN_OFFSET + dma_to_pfn(dev, *dev->dma_mask);
++	return dma_to_pfn(dev, *dev->dma_mask);
+ }
+ #define dma_max_pfn(dev) dma_max_pfn(dev)
+ 
+diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
+index cd791948b286..7e459b7ee708 100644
+--- a/arch/x86/include/asm/tlbflush.h
++++ b/arch/x86/include/asm/tlbflush.h
+@@ -32,7 +32,7 @@ DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate);
+ /* Initialize cr4 shadow for this CPU. */
+ static inline void cr4_init_shadow(void)
+ {
+-	this_cpu_write(cpu_tlbstate.cr4, __read_cr4());
++	this_cpu_write(cpu_tlbstate.cr4, __read_cr4_safe());
+ }
+ 
+ /* Set in this cpu's CR4. */
+diff --git a/arch/x86/purgatory/Makefile b/arch/x86/purgatory/Makefile
+index f52e033557c9..43653ba4d784 100644
+--- a/arch/x86/purgatory/Makefile
++++ b/arch/x86/purgatory/Makefile
+@@ -12,6 +12,7 @@ targets += purgatory.ro
+ 
+ KBUILD_CFLAGS := -fno-strict-aliasing -Wall -Wstrict-prototypes -fno-zero-initialized-in-bss -fno-builtin -ffreestanding -c -MD -Os -mcmodel=large
+ KBUILD_CFLAGS += -m$(BITS)
++KBUILD_CFLAGS += $(call cc-option,-fno-PIE)
+ 
+ $(obj)/purgatory.ro: $(PURGATORY_OBJS) FORCE
+ 		$(call if_changed,ld)
+diff --git a/drivers/bus/arm-ccn.c b/drivers/bus/arm-ccn.c
+index 27fd0dacad5f..4d523cfe51ce 100644
+--- a/drivers/bus/arm-ccn.c
++++ b/drivers/bus/arm-ccn.c
+@@ -183,6 +183,7 @@ struct arm_ccn {
+ 	struct arm_ccn_component *xp;
+ 
+ 	struct arm_ccn_dt dt;
++	int mn_id;
+ };
+ 
+ 
+@@ -322,6 +323,7 @@ struct arm_ccn_pmu_event {
+ static ssize_t arm_ccn_pmu_event_show(struct device *dev,
+ 		struct device_attribute *attr, char *buf)
+ {
++	struct arm_ccn *ccn = pmu_to_arm_ccn(dev_get_drvdata(dev));
+ 	struct arm_ccn_pmu_event *event = container_of(attr,
+ 			struct arm_ccn_pmu_event, attr);
+ 	ssize_t res;
+@@ -336,6 +338,26 @@ static ssize_t arm_ccn_pmu_event_show(struct device *dev,
+ 	if (event->mask)
+ 		res += snprintf(buf + res, PAGE_SIZE - res, ",mask=0x%x",
+ 				event->mask);
++
++	/* Arguments required by an event */
++	switch (event->type) {
++	case CCN_TYPE_CYCLES:
++		break;
++	case CCN_TYPE_XP:
++		res += snprintf(buf + res, PAGE_SIZE - res,
++				",xp=?,port=?,vc=?,dir=?");
++		if (event->event == CCN_EVENT_WATCHPOINT)
++			res += snprintf(buf + res, PAGE_SIZE - res,
++					",cmp_l=?,cmp_h=?,mask=?");
++		break;
++	case CCN_TYPE_MN:
++		res += snprintf(buf + res, PAGE_SIZE - res, ",node=%d", ccn->mn_id);
++		break;
++	default:
++		res += snprintf(buf + res, PAGE_SIZE - res, ",node=?");
++		break;
++	}
++
+ 	res += snprintf(buf + res, PAGE_SIZE - res, "\n");
+ 
+ 	return res;
+@@ -360,9 +382,9 @@ static umode_t arm_ccn_pmu_events_is_visible(struct kobject *kobj,
+ }
+ 
+ static struct arm_ccn_pmu_event arm_ccn_pmu_events[] = {
+-	CCN_EVENT_MN(eobarrier, "dir=0,vc=0,cmp_h=0x1c00", CCN_IDX_MASK_OPCODE),
+-	CCN_EVENT_MN(ecbarrier, "dir=0,vc=0,cmp_h=0x1e00", CCN_IDX_MASK_OPCODE),
+-	CCN_EVENT_MN(dvmop, "dir=0,vc=0,cmp_h=0x2800", CCN_IDX_MASK_OPCODE),
++	CCN_EVENT_MN(eobarrier, "dir=1,vc=0,cmp_h=0x1c00", CCN_IDX_MASK_OPCODE),
++	CCN_EVENT_MN(ecbarrier, "dir=1,vc=0,cmp_h=0x1e00", CCN_IDX_MASK_OPCODE),
++	CCN_EVENT_MN(dvmop, "dir=1,vc=0,cmp_h=0x2800", CCN_IDX_MASK_OPCODE),
+ 	CCN_EVENT_HNI(txdatflits, "dir=1,vc=3", CCN_IDX_MASK_ANY),
+ 	CCN_EVENT_HNI(rxdatflits, "dir=0,vc=3", CCN_IDX_MASK_ANY),
+ 	CCN_EVENT_HNI(txreqflits, "dir=1,vc=0", CCN_IDX_MASK_ANY),
+@@ -649,6 +671,12 @@ static int arm_ccn_pmu_event_init(struct perf_event *event)
+ 
+ 	/* Validate node/xp vs topology */
+ 	switch (type) {
++	case CCN_TYPE_MN:
++		if (node_xp != ccn->mn_id) {
++			dev_warn(ccn->dev, "Invalid MN ID %d!\n", node_xp);
++			return -EINVAL;
++		}
++		break;
+ 	case CCN_TYPE_XP:
+ 		if (node_xp >= ccn->num_xps) {
+ 			dev_warn(ccn->dev, "Invalid XP ID %d!\n", node_xp);
+@@ -1214,6 +1242,8 @@ static int arm_ccn_init_nodes(struct arm_ccn *ccn, int region,
+ 
+ 	switch (type) {
+ 	case CCN_TYPE_MN:
++		ccn->mn_id = id;
++		return 0;
+ 	case CCN_TYPE_DT:
+ 		return 0;
+ 	case CCN_TYPE_XP:
+diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
+index cd0554f68316..4ff8c334e7c8 100644
+--- a/drivers/gpu/drm/msm/msm_gem_submit.c
++++ b/drivers/gpu/drm/msm/msm_gem_submit.c
+@@ -55,6 +55,14 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
+ 	return submit;
+ }
+ 
++static inline unsigned long __must_check
++copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
++{
++	if (access_ok(VERIFY_READ, from, n))
++		return __copy_from_user_inatomic(to, from, n);
++	return -EFAULT;
++}
++
+ static int submit_lookup_objects(struct msm_gem_submit *submit,
+ 		struct drm_msm_gem_submit *args, struct drm_file *file)
+ {
+@@ -62,6 +70,7 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
+ 	int ret = 0;
+ 
+ 	spin_lock(&file->table_lock);
++	pagefault_disable();
+ 
+ 	for (i = 0; i < args->nr_bos; i++) {
+ 		struct drm_msm_gem_submit_bo submit_bo;
+@@ -70,10 +79,15 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
+ 		void __user *userptr =
+ 			to_user_ptr(args->bos + (i * sizeof(submit_bo)));
+ 
+-		ret = copy_from_user(&submit_bo, userptr, sizeof(submit_bo));
+-		if (ret) {
+-			ret = -EFAULT;
+-			goto out_unlock;
++		ret = copy_from_user_inatomic(&submit_bo, userptr, sizeof(submit_bo));
++		if (unlikely(ret)) {
++			pagefault_enable();
++			spin_unlock(&file->table_lock);
++			ret = copy_from_user(&submit_bo, userptr, sizeof(submit_bo));
++			if (ret)
++				goto out;
++			spin_lock(&file->table_lock);
++			pagefault_disable();
+ 		}
+ 
+ 		if (submit_bo.flags & ~MSM_SUBMIT_BO_FLAGS) {
+@@ -113,9 +127,12 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
+ 	}
+ 
+ out_unlock:
+-	submit->nr_bos = i;
++	pagefault_enable();
+ 	spin_unlock(&file->table_lock);
+ 
++out:
++	submit->nr_bos = i;
++
+ 	return ret;
+ }
+ 
+diff --git a/drivers/mtd/nand/davinci_nand.c b/drivers/mtd/nand/davinci_nand.c
+index b922c8efcf40..0ba96f9e1aa5 100644
+--- a/drivers/mtd/nand/davinci_nand.c
++++ b/drivers/mtd/nand/davinci_nand.c
+@@ -241,6 +241,9 @@ static void nand_davinci_hwctl_4bit(struct mtd_info *mtd, int mode)
+ 	unsigned long flags;
+ 	u32 val;
+ 
++	/* Reset ECC hardware */
++	davinci_nand_readl(info, NAND_4BIT_ECC1_OFFSET);
++
+ 	spin_lock_irqsave(&davinci_nand_lock, flags);
+ 
+ 	/* Start 4-bit ECC calculation for read/write */
+diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
+index bcefb375d232..031b6877acf9 100644
+--- a/drivers/net/can/dev.c
++++ b/drivers/net/can/dev.c
+@@ -21,6 +21,7 @@
+ #include <linux/slab.h>
+ #include <linux/netdevice.h>
+ #include <linux/if_arp.h>
++#include <linux/workqueue.h>
+ #include <linux/can.h>
+ #include <linux/can/dev.h>
+ #include <linux/can/skb.h>
+@@ -391,9 +392,8 @@ EXPORT_SYMBOL_GPL(can_free_echo_skb);
+ /*
+  * CAN device restart for bus-off recovery
+  */
+-static void can_restart(unsigned long data)
++static void can_restart(struct net_device *dev)
+ {
+-	struct net_device *dev = (struct net_device *)data;
+ 	struct can_priv *priv = netdev_priv(dev);
+ 	struct net_device_stats *stats = &dev->stats;
+ 	struct sk_buff *skb;
+@@ -433,6 +433,14 @@ restart:
+ 		netdev_err(dev, "Error %d during restart", err);
+ }
+ 
++static void can_restart_work(struct work_struct *work)
++{
++	struct delayed_work *dwork = to_delayed_work(work);
++	struct can_priv *priv = container_of(dwork, struct can_priv, restart_work);
++
++	can_restart(priv->dev);
++}
++
+ int can_restart_now(struct net_device *dev)
+ {
+ 	struct can_priv *priv = netdev_priv(dev);
+@@ -446,8 +454,8 @@ int can_restart_now(struct net_device *dev)
+ 	if (priv->state != CAN_STATE_BUS_OFF)
+ 		return -EBUSY;
+ 
+-	/* Runs as soon as possible in the timer context */
+-	mod_timer(&priv->restart_timer, jiffies);
++	cancel_delayed_work_sync(&priv->restart_work);
++	can_restart(dev);
+ 
+ 	return 0;
+ }
+@@ -469,8 +477,8 @@ void can_bus_off(struct net_device *dev)
+ 	priv->can_stats.bus_off++;
+ 
+ 	if (priv->restart_ms)
+-		mod_timer(&priv->restart_timer,
+-			  jiffies + (priv->restart_ms * HZ) / 1000);
++		schedule_delayed_work(&priv->restart_work,
++				      msecs_to_jiffies(priv->restart_ms));
+ }
+ EXPORT_SYMBOL_GPL(can_bus_off);
+ 
+@@ -577,6 +585,7 @@ struct net_device *alloc_candev(int sizeof_priv, unsigned int echo_skb_max)
+ 		return NULL;
+ 
+ 	priv = netdev_priv(dev);
++	priv->dev = dev;
+ 
+ 	if (echo_skb_max) {
+ 		priv->echo_skb_max = echo_skb_max;
+@@ -586,7 +595,7 @@ struct net_device *alloc_candev(int sizeof_priv, unsigned int echo_skb_max)
+ 
+ 	priv->state = CAN_STATE_STOPPED;
+ 
+-	init_timer(&priv->restart_timer);
++	INIT_DELAYED_WORK(&priv->restart_work, can_restart_work);
+ 
+ 	return dev;
+ }
+@@ -667,8 +676,6 @@ int open_candev(struct net_device *dev)
+ 	if (!netif_carrier_ok(dev))
+ 		netif_carrier_on(dev);
+ 
+-	setup_timer(&priv->restart_timer, can_restart, (unsigned long)dev);
+-
+ 	return 0;
+ }
+ EXPORT_SYMBOL_GPL(open_candev);
+@@ -683,7 +690,7 @@ void close_candev(struct net_device *dev)
+ {
+ 	struct can_priv *priv = netdev_priv(dev);
+ 
+-	del_timer_sync(&priv->restart_timer);
++	cancel_delayed_work_sync(&priv->restart_work);
+ 	can_flush_echo_skb(dev);
+ }
+ EXPORT_SYMBOL_GPL(close_candev);
+diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
+index 7bded8e3f116..ce6b16991068 100644
+--- a/drivers/scsi/arcmsr/arcmsr_hba.c
++++ b/drivers/scsi/arcmsr/arcmsr_hba.c
+@@ -2305,7 +2305,8 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb,
+ 	}
+ 	case ARCMSR_MESSAGE_WRITE_WQBUFFER: {
+ 		unsigned char *ver_addr;
+-		int32_t user_len, cnt2end;
++		uint32_t user_len;
++		int32_t cnt2end;
+ 		uint8_t *pQbuffer, *ptmpuserbuffer;
+ 		ver_addr = kmalloc(ARCMSR_API_DATA_BUFLEN, GFP_ATOMIC);
+ 		if (!ver_addr) {
+@@ -2314,6 +2315,11 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb,
+ 		}
+ 		ptmpuserbuffer = ver_addr;
+ 		user_len = pcmdmessagefld->cmdmessage.Length;
++		if (user_len > ARCMSR_API_DATA_BUFLEN) {
++			retvalue = ARCMSR_MESSAGE_FAIL;
++			kfree(ver_addr);
++			goto message_out;
++		}
+ 		memcpy(ptmpuserbuffer,
+ 			pcmdmessagefld->messagedatabuffer, user_len);
+ 		spin_lock_irqsave(&acb->wqbuffer_lock, flags);
+diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
+index a49914de4b95..4cf75e01c498 100644
+--- a/drivers/scsi/megaraid/megaraid_sas.h
++++ b/drivers/scsi/megaraid/megaraid_sas.h
+@@ -1823,7 +1823,7 @@ struct megasas_instance_template {
+ };
+ 
+ #define MEGASAS_IS_LOGICAL(scp)						\
+-	(scp->device->channel < MEGASAS_MAX_PD_CHANNELS) ? 0 : 1
++	((scp->device->channel < MEGASAS_MAX_PD_CHANNELS) ? 0 : 1)
+ 
+ #define MEGASAS_DEV_INDEX(inst, scp)					\
+ 	((scp->device->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) + 	\
+diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
+index dbab798f5caf..015e21edd6bc 100644
+--- a/fs/fuse/dir.c
++++ b/fs/fuse/dir.c
+@@ -1693,9 +1693,10 @@ int fuse_flush_times(struct inode *inode, struct fuse_file *ff)
+  * vmtruncate() doesn't allow for this case, so do the rlimit checking
+  * and the actual truncation by hand.
+  */
+-int fuse_do_setattr(struct inode *inode, struct iattr *attr,
++int fuse_do_setattr(struct dentry *dentry, struct iattr *attr,
+ 		    struct file *file)
+ {
++	struct inode *inode = d_inode(dentry);
+ 	struct fuse_conn *fc = get_fuse_conn(inode);
+ 	struct fuse_inode *fi = get_fuse_inode(inode);
+ 	struct fuse_req *req;
+@@ -1815,9 +1816,9 @@ static int fuse_setattr(struct dentry *entry, struct iattr *attr)
+ 		return -EACCES;
+ 
+ 	if (attr->ia_valid & ATTR_FILE)
+-		return fuse_do_setattr(inode, attr, attr->ia_file);
++		return fuse_do_setattr(entry, attr, attr->ia_file);
+ 	else
+-		return fuse_do_setattr(inode, attr, NULL);
++		return fuse_do_setattr(entry, attr, NULL);
+ }
+ 
+ static int fuse_getattr(struct vfsmount *mnt, struct dentry *entry,
+diff --git a/fs/fuse/file.c b/fs/fuse/file.c
+index 8e2c5ccc09b1..52717269a5ac 100644
+--- a/fs/fuse/file.c
++++ b/fs/fuse/file.c
+@@ -2879,7 +2879,7 @@ static void fuse_do_truncate(struct file *file)
+ 	attr.ia_file = file;
+ 	attr.ia_valid |= ATTR_FILE;
+ 
+-	fuse_do_setattr(inode, &attr, file);
++	fuse_do_setattr(file->f_path.dentry, &attr, file);
+ }
+ 
+ static inline loff_t fuse_round_up(loff_t off)
+diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
+index e8e47a6ab518..300619ba8591 100644
+--- a/fs/fuse/fuse_i.h
++++ b/fs/fuse/fuse_i.h
+@@ -894,7 +894,7 @@ bool fuse_write_update_size(struct inode *inode, loff_t pos);
+ int fuse_flush_times(struct inode *inode, struct fuse_file *ff);
+ int fuse_write_inode(struct inode *inode, struct writeback_control *wbc);
+ 
+-int fuse_do_setattr(struct inode *inode, struct iattr *attr,
++int fuse_do_setattr(struct dentry *dentry, struct iattr *attr,
+ 		    struct file *file);
+ 
+ #endif /* _FS_FUSE_I_H */
+diff --git a/fs/nfsd/nfsproc.c b/fs/nfsd/nfsproc.c
+index b8680738f588..0ce7ce3e48cb 100644
+--- a/fs/nfsd/nfsproc.c
++++ b/fs/nfsd/nfsproc.c
+@@ -59,13 +59,61 @@ static __be32
+ nfsd_proc_setattr(struct svc_rqst *rqstp, struct nfsd_sattrargs *argp,
+ 					  struct nfsd_attrstat  *resp)
+ {
++	struct iattr *iap = &argp->attrs;
++	struct svc_fh *fhp;
+ 	__be32 nfserr;
++
+ 	dprintk("nfsd: SETATTR  %s, valid=%x, size=%ld\n",
+ 		SVCFH_fmt(&argp->fh),
+ 		argp->attrs.ia_valid, (long) argp->attrs.ia_size);
+ 
+-	fh_copy(&resp->fh, &argp->fh);
+-	nfserr = nfsd_setattr(rqstp, &resp->fh, &argp->attrs,0, (time_t)0);
++	fhp = fh_copy(&resp->fh, &argp->fh);
++
++	/*
++	 * NFSv2 does not differentiate between "set-[ac]time-to-now"
++	 * which only requires access, and "set-[ac]time-to-X" which
++	 * requires ownership.
++	 * So if it looks like it might be "set both to the same time which
++	 * is close to now", and if inode_change_ok fails, then we
++	 * convert to "set to now" instead of "set to explicit time"
++	 *
++	 * We only call inode_change_ok as the last test as technically
++	 * it is not an interface that we should be using.
++	 */
++#define BOTH_TIME_SET (ATTR_ATIME_SET | ATTR_MTIME_SET)
++#define	MAX_TOUCH_TIME_ERROR (30*60)
++	if ((iap->ia_valid & BOTH_TIME_SET) == BOTH_TIME_SET &&
++	    iap->ia_mtime.tv_sec == iap->ia_atime.tv_sec) {
++		/*
++		 * Looks probable.
++		 *
++		 * Now just make sure time is in the right ballpark.
++		 * Solaris, at least, doesn't seem to care what the time
++		 * request is.  We require it be within 30 minutes of now.
++		 */
++		time_t delta = iap->ia_atime.tv_sec - get_seconds();
++		struct inode *inode;
++
++		nfserr = fh_verify(rqstp, fhp, 0, NFSD_MAY_NOP);
++		if (nfserr)
++			goto done;
++		inode = d_inode(fhp->fh_dentry);
++
++		if (delta < 0)
++			delta = -delta;
++		if (delta < MAX_TOUCH_TIME_ERROR &&
++		    inode_change_ok(inode, iap) != 0) {
++			/*
++			 * Turn off ATTR_[AM]TIME_SET but leave ATTR_[AM]TIME.
++			 * This will cause notify_change to set these times
++			 * to "now"
++			 */
++			iap->ia_valid &= ~BOTH_TIME_SET;
++		}
++	}
++
++	nfserr = nfsd_setattr(rqstp, fhp, iap, 0, (time_t)0);
++done:
+ 	return nfsd_return_attrs(nfserr, resp);
+ }
+ 
+diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
+index 989129e2d6ea..35cbf570495f 100644
+--- a/fs/nfsd/vfs.c
++++ b/fs/nfsd/vfs.c
+@@ -301,42 +301,6 @@ commit_metadata(struct svc_fh *fhp)
+ static void
+ nfsd_sanitize_attrs(struct inode *inode, struct iattr *iap)
+ {
+-	/*
+-	 * NFSv2 does not differentiate between "set-[ac]time-to-now"
+-	 * which only requires access, and "set-[ac]time-to-X" which
+-	 * requires ownership.
+-	 * So if it looks like it might be "set both to the same time which
+-	 * is close to now", and if inode_change_ok fails, then we
+-	 * convert to "set to now" instead of "set to explicit time"
+-	 *
+-	 * We only call inode_change_ok as the last test as technically
+-	 * it is not an interface that we should be using.
+-	 */
+-#define BOTH_TIME_SET (ATTR_ATIME_SET | ATTR_MTIME_SET)
+-#define	MAX_TOUCH_TIME_ERROR (30*60)
+-	if ((iap->ia_valid & BOTH_TIME_SET) == BOTH_TIME_SET &&
+-	    iap->ia_mtime.tv_sec == iap->ia_atime.tv_sec) {
+-		/*
+-		 * Looks probable.
+-		 *
+-		 * Now just make sure time is in the right ballpark.
+-		 * Solaris, at least, doesn't seem to care what the time
+-		 * request is.  We require it be within 30 minutes of now.
+-		 */
+-		time_t delta = iap->ia_atime.tv_sec - get_seconds();
+-		if (delta < 0)
+-			delta = -delta;
+-		if (delta < MAX_TOUCH_TIME_ERROR &&
+-		    inode_change_ok(inode, iap) != 0) {
+-			/*
+-			 * Turn off ATTR_[AM]TIME_SET but leave ATTR_[AM]TIME.
+-			 * This will cause notify_change to set these times
+-			 * to "now"
+-			 */
+-			iap->ia_valid &= ~BOTH_TIME_SET;
+-		}
+-	}
+-
+ 	/* sanitize the mode change */
+ 	if (iap->ia_valid & ATTR_MODE) {
+ 		iap->ia_mode &= S_IALLUGO;
+diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h
+index 71e37afd7290..49093889362c 100644
+--- a/include/linux/can/dev.h
++++ b/include/linux/can/dev.h
+@@ -31,6 +31,7 @@ enum can_mode {
+  * CAN common private data
+  */
+ struct can_priv {
++	struct net_device *dev;
+ 	struct can_device_stats can_stats;
+ 
+ 	struct can_bittiming bittiming, data_bittiming;
+@@ -46,7 +47,7 @@ struct can_priv {
+ 	u32 ctrlmode_static;	/* static enabled options for driver/hardware */
+ 
+ 	int restart_ms;
+-	struct timer_list restart_timer;
++	struct delayed_work restart_work;
+ 
+ 	int (*do_set_bittiming)(struct net_device *dev);
+ 	int (*do_set_data_bittiming)(struct net_device *dev);
+diff --git a/include/net/tcp.h b/include/net/tcp.h
+index 4062b4f0d121..7219b8f38cef 100644
+--- a/include/net/tcp.h
++++ b/include/net/tcp.h
+@@ -1431,6 +1431,8 @@ static inline void tcp_check_send_head(struct sock *sk, struct sk_buff *skb_unli
+ {
+ 	if (sk->sk_send_head == skb_unlinked)
+ 		sk->sk_send_head = NULL;
++	if (tcp_sk(sk)->highest_sack == skb_unlinked)
++		tcp_sk(sk)->highest_sack = NULL;
+ }
+ 
+ static inline void tcp_init_send_head(struct sock *sk)
+diff --git a/kernel/cpuset.c b/kernel/cpuset.c
+index 56dc7c1b6300..fe33cb70d89d 100644
+--- a/kernel/cpuset.c
++++ b/kernel/cpuset.c
+@@ -323,8 +323,7 @@ static struct file_system_type cpuset_fs_type = {
+ /*
+  * Return in pmask the portion of a cpusets's cpus_allowed that
+  * are online.  If none are online, walk up the cpuset hierarchy
+- * until we find one that does have some online cpus.  The top
+- * cpuset always has some cpus online.
++ * until we find one that does have some online cpus.
+  *
+  * One way or another, we guarantee to return some non-empty subset
+  * of cpu_online_mask.
+@@ -333,8 +332,20 @@ static struct file_system_type cpuset_fs_type = {
+  */
+ static void guarantee_online_cpus(struct cpuset *cs, struct cpumask *pmask)
+ {
+-	while (!cpumask_intersects(cs->effective_cpus, cpu_online_mask))
++	while (!cpumask_intersects(cs->effective_cpus, cpu_online_mask)) {
+ 		cs = parent_cs(cs);
++		if (unlikely(!cs)) {
++			/*
++			 * The top cpuset doesn't have any online cpu as a
++			 * consequence of a race between cpuset_hotplug_work
++			 * and cpu hotplug notifier.  But we know the top
++			 * cpuset's effective_cpus is on its way to to be
++			 * identical to cpu_online_mask.
++			 */
++			cpumask_copy(pmask, cpu_online_mask);
++			return;
++		}
++	}
+ 	cpumask_and(pmask, cs->effective_cpus, cpu_online_mask);
+ }
+ 
+diff --git a/mm/ksm.c b/mm/ksm.c
+index a0ed043a1096..effb699de487 100644
+--- a/mm/ksm.c
++++ b/mm/ksm.c
+@@ -283,7 +283,8 @@ static inline struct rmap_item *alloc_rmap_item(void)
+ {
+ 	struct rmap_item *rmap_item;
+ 
+-	rmap_item = kmem_cache_zalloc(rmap_item_cache, GFP_KERNEL);
++	rmap_item = kmem_cache_zalloc(rmap_item_cache, GFP_KERNEL |
++						__GFP_NORETRY | __GFP_NOWARN);
+ 	if (rmap_item)
+ 		ksm_rmap_items++;
+ 	return rmap_item;
+diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
+index 8bbbb5ec468c..bcb3160fefb4 100644
+--- a/net/bluetooth/rfcomm/sock.c
++++ b/net/bluetooth/rfcomm/sock.c
+@@ -334,16 +334,19 @@ static int rfcomm_sock_create(struct net *net, struct socket *sock,
+ 
+ static int rfcomm_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
+ {
+-	struct sockaddr_rc *sa = (struct sockaddr_rc *) addr;
++	struct sockaddr_rc sa;
+ 	struct sock *sk = sock->sk;
+-	int chan = sa->rc_channel;
+-	int err = 0;
+-
+-	BT_DBG("sk %p %pMR", sk, &sa->rc_bdaddr);
++	int len, err = 0;
+ 
+ 	if (!addr || addr->sa_family != AF_BLUETOOTH)
+ 		return -EINVAL;
+ 
++	memset(&sa, 0, sizeof(sa));
++	len = min_t(unsigned int, sizeof(sa), addr_len);
++	memcpy(&sa, addr, len);
++
++	BT_DBG("sk %p %pMR", sk, &sa.rc_bdaddr);
++
+ 	lock_sock(sk);
+ 
+ 	if (sk->sk_state != BT_OPEN) {
+@@ -358,12 +361,13 @@ static int rfcomm_sock_bind(struct socket *sock, struct sockaddr *addr, int addr
+ 
+ 	write_lock(&rfcomm_sk_list.lock);
+ 
+-	if (chan && __rfcomm_get_listen_sock_by_addr(chan, &sa->rc_bdaddr)) {
++	if (sa.rc_channel &&
++	    __rfcomm_get_listen_sock_by_addr(sa.rc_channel, &sa.rc_bdaddr)) {
+ 		err = -EADDRINUSE;
+ 	} else {
+ 		/* Save source address */
+-		bacpy(&rfcomm_pi(sk)->src, &sa->rc_bdaddr);
+-		rfcomm_pi(sk)->channel = chan;
++		bacpy(&rfcomm_pi(sk)->src, &sa.rc_bdaddr);
++		rfcomm_pi(sk)->channel = sa.rc_channel;
+ 		sk->sk_state = BT_BOUND;
+ 	}
+ 
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index e73b3fb1a144..abffc1519e4d 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -3281,19 +3281,25 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
+ 
+ 		if (optlen != sizeof(val))
+ 			return -EINVAL;
+-		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
+-			return -EBUSY;
+ 		if (copy_from_user(&val, optval, sizeof(val)))
+ 			return -EFAULT;
+ 		switch (val) {
+ 		case TPACKET_V1:
+ 		case TPACKET_V2:
+ 		case TPACKET_V3:
+-			po->tp_version = val;
+-			return 0;
++			break;
+ 		default:
+ 			return -EINVAL;
+ 		}
++		lock_sock(sk);
++		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
++			ret = -EBUSY;
++		} else {
++			po->tp_version = val;
++			ret = 0;
++		}
++		release_sock(sk);
++		return ret;
+ 	}
+ 	case PACKET_RESERVE:
+ 	{
+@@ -3756,6 +3762,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
+ 	/* Added to avoid minimal code churn */
+ 	struct tpacket_req *req = &req_u->req;
+ 
++	lock_sock(sk);
+ 	/* Opening a Tx-ring is NOT supported in TPACKET_V3 */
+ 	if (!closing && tx_ring && (po->tp_version > TPACKET_V2)) {
+ 		WARN(1, "Tx-ring is not supported.\n");
+@@ -3837,7 +3844,6 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
+ 			goto out;
+ 	}
+ 
+-	lock_sock(sk);
+ 
+ 	/* Detach socket from network */
+ 	spin_lock(&po->bind_lock);
+@@ -3886,11 +3892,11 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
+ 		if (!tx_ring)
+ 			prb_shutdown_retire_blk_timer(po, tx_ring, rb_queue);
+ 	}
+-	release_sock(sk);
+ 
+ 	if (pg_vec)
+ 		free_pg_vec(pg_vec, order, req->tp_block_nr);
+ out:
++	release_sock(sk);
+ 	return err;
+ }
+ 
+diff --git a/scripts/gcc-x86_64-has-stack-protector.sh b/scripts/gcc-x86_64-has-stack-protector.sh
+index 973e8c141567..17867e723a51 100755
+--- a/scripts/gcc-x86_64-has-stack-protector.sh
++++ b/scripts/gcc-x86_64-has-stack-protector.sh
+@@ -1,6 +1,6 @@
+ #!/bin/sh
+ 
+-echo "int foo(void) { char X[200]; return 3; }" | $* -S -x c -c -O0 -mcmodel=kernel -fstack-protector - -o - 2> /dev/null | grep -q "%gs"
++echo "int foo(void) { char X[200]; return 3; }" | $* -S -x c -c -O0 -mcmodel=kernel -fno-PIE -fstack-protector - -o - 2> /dev/null | grep -q "%gs"
+ if [ "$?" -eq "0" ] ; then
+ 	echo y
+ else

diff --git a/1046_linux-3.18.47.patch b/1046_linux-3.18.47.patch
new file mode 100644
index 0000000..287e7b6
--- /dev/null
+++ b/1046_linux-3.18.47.patch
@@ -0,0 +1,1831 @@
+diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
+index 702bb2557db8..20e6898d02c9 100644
+--- a/Documentation/virtual/kvm/api.txt
++++ b/Documentation/virtual/kvm/api.txt
+@@ -1919,6 +1919,7 @@ registers, find a list below:
+   PPC   | KVM_REG_PPC_TM_VSCR           | 32
+   PPC   | KVM_REG_PPC_TM_DSCR           | 64
+   PPC   | KVM_REG_PPC_TM_TAR            | 64
++  PPC   | KVM_REG_PPC_TM_XER            | 64
+         |                               |
+   MIPS  | KVM_REG_MIPS_R0               | 64
+           ...
+diff --git a/Makefile b/Makefile
+index e73fd98d4cfc..9132f73528b3 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 18
+-SUBLEVEL = 46
++SUBLEVEL = 47
+ EXTRAVERSION =
+ NAME = Diseased Newt
+ 
+diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
+index 0e15f011f9c8..76e2ec624e88 100644
+--- a/arch/arm/xen/enlighten.c
++++ b/arch/arm/xen/enlighten.c
+@@ -295,8 +295,7 @@ static int __init xen_guest_init(void)
+ 	 * for secondary CPUs as they are brought up.
+ 	 * For uniformity we use VCPUOP_register_vcpu_info even on cpu0.
+ 	 */
+-	xen_vcpu_info = __alloc_percpu(sizeof(struct vcpu_info),
+-			                       sizeof(struct vcpu_info));
++	xen_vcpu_info = alloc_percpu(struct vcpu_info);
+ 	if (xen_vcpu_info == NULL)
+ 		return -ENOMEM;
+ 
+diff --git a/arch/powerpc/boot/ps3-head.S b/arch/powerpc/boot/ps3-head.S
+index b6fcbaf5027b..3dc44b05fb97 100644
+--- a/arch/powerpc/boot/ps3-head.S
++++ b/arch/powerpc/boot/ps3-head.S
+@@ -57,11 +57,6 @@ __system_reset_overlay:
+ 	bctr
+ 
+ 1:
+-	/* Save the value at addr zero for a null pointer write check later. */
+-
+-	li	r4, 0
+-	lwz	r3, 0(r4)
+-
+ 	/* Primary delays then goes to _zimage_start in wrapper. */
+ 
+ 	or	31, 31, 31 /* db16cyc */
+diff --git a/arch/powerpc/boot/ps3.c b/arch/powerpc/boot/ps3.c
+index 4ec2d86d3c50..a05558a7e51a 100644
+--- a/arch/powerpc/boot/ps3.c
++++ b/arch/powerpc/boot/ps3.c
+@@ -119,13 +119,12 @@ void ps3_copy_vectors(void)
+ 	flush_cache((void *)0x100, 512);
+ }
+ 
+-void platform_init(unsigned long null_check)
++void platform_init(void)
+ {
+ 	const u32 heapsize = 0x1000000 - (u32)_end; /* 16MiB */
+ 	void *chosen;
+ 	unsigned long ft_addr;
+ 	u64 rm_size;
+-	unsigned long val;
+ 
+ 	console_ops.write = ps3_console_write;
+ 	platform_ops.exit = ps3_exit;
+@@ -153,11 +152,6 @@ void platform_init(unsigned long null_check)
+ 
+ 	printf(" flat tree at 0x%lx\n\r", ft_addr);
+ 
+-	val = *(unsigned long *)0;
+-
+-	if (val != null_check)
+-		printf("null check failed: %lx != %lx\n\r", val, null_check);
+-
+ 	((kernel_entry_t)0)(ft_addr, 0, NULL);
+ 
+ 	ps3_exit();
+diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
+index 047855619cc4..9bb51b9e03c6 100644
+--- a/arch/powerpc/include/asm/kvm_host.h
++++ b/arch/powerpc/include/asm/kvm_host.h
+@@ -537,6 +537,7 @@ struct kvm_vcpu_arch {
+ 	u64 tfiar;
+ 
+ 	u32 cr_tm;
++	u64 xer_tm;
+ 	u64 lr_tm;
+ 	u64 ctr_tm;
+ 	u64 amr_tm;
+diff --git a/arch/powerpc/include/uapi/asm/kvm.h b/arch/powerpc/include/uapi/asm/kvm.h
+index ab4d4732c492..720b71a636c8 100644
+--- a/arch/powerpc/include/uapi/asm/kvm.h
++++ b/arch/powerpc/include/uapi/asm/kvm.h
+@@ -587,6 +587,7 @@ struct kvm_get_htab_header {
+ #define KVM_REG_PPC_TM_VSCR	(KVM_REG_PPC_TM | KVM_REG_SIZE_U32 | 0x67)
+ #define KVM_REG_PPC_TM_DSCR	(KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x68)
+ #define KVM_REG_PPC_TM_TAR	(KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x69)
++#define KVM_REG_PPC_TM_XER	(KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x6a)
+ 
+ /* PPC64 eXternal Interrupt Controller Specification */
+ #define KVM_DEV_XICS_GRP_SOURCES	1	/* 64-bit source attributes */
+diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
+index 9d7dede2847c..a892c651ac2d 100644
+--- a/arch/powerpc/kernel/asm-offsets.c
++++ b/arch/powerpc/kernel/asm-offsets.c
+@@ -571,6 +571,7 @@ int main(void)
+ 	DEFINE(VCPU_VRS_TM, offsetof(struct kvm_vcpu, arch.vr_tm.vr));
+ 	DEFINE(VCPU_VRSAVE_TM, offsetof(struct kvm_vcpu, arch.vrsave_tm));
+ 	DEFINE(VCPU_CR_TM, offsetof(struct kvm_vcpu, arch.cr_tm));
++	DEFINE(VCPU_XER_TM, offsetof(struct kvm_vcpu, arch.xer_tm));
+ 	DEFINE(VCPU_LR_TM, offsetof(struct kvm_vcpu, arch.lr_tm));
+ 	DEFINE(VCPU_CTR_TM, offsetof(struct kvm_vcpu, arch.ctr_tm));
+ 	DEFINE(VCPU_AMR_TM, offsetof(struct kvm_vcpu, arch.amr_tm));
+diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
+index e63587d30b70..ed86c263a103 100644
+--- a/arch/powerpc/kvm/book3s_hv.c
++++ b/arch/powerpc/kvm/book3s_hv.c
+@@ -1097,6 +1097,9 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
+ 	case KVM_REG_PPC_TM_CR:
+ 		*val = get_reg_val(id, vcpu->arch.cr_tm);
+ 		break;
++	case KVM_REG_PPC_TM_XER:
++		*val = get_reg_val(id, vcpu->arch.xer_tm);
++		break;
+ 	case KVM_REG_PPC_TM_LR:
+ 		*val = get_reg_val(id, vcpu->arch.lr_tm);
+ 		break;
+@@ -1304,6 +1307,9 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
+ 	case KVM_REG_PPC_TM_CR:
+ 		vcpu->arch.cr_tm = set_reg_val(id, *val);
+ 		break;
++	case KVM_REG_PPC_TM_XER:
++		vcpu->arch.xer_tm = set_reg_val(id, *val);
++		break;
+ 	case KVM_REG_PPC_TM_LR:
+ 		vcpu->arch.lr_tm = set_reg_val(id, *val);
+ 		break;
+diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+index 549d28fdc291..791ad037749c 100644
+--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
++++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+@@ -2385,11 +2385,13 @@ kvmppc_save_tm:
+ 	mfctr	r7
+ 	mfspr	r8, SPRN_AMR
+ 	mfspr	r10, SPRN_TAR
++	mfxer	r11
+ 	std	r5, VCPU_LR_TM(r9)
+ 	stw	r6, VCPU_CR_TM(r9)
+ 	std	r7, VCPU_CTR_TM(r9)
+ 	std	r8, VCPU_AMR_TM(r9)
+ 	std	r10, VCPU_TAR_TM(r9)
++	std	r11, VCPU_XER_TM(r9)
+ 
+ 	/* Restore r12 as trap number. */
+ 	lwz	r12, VCPU_TRAP(r9)
+@@ -2482,11 +2484,13 @@ kvmppc_restore_tm:
+ 	ld	r7, VCPU_CTR_TM(r4)
+ 	ld	r8, VCPU_AMR_TM(r4)
+ 	ld	r9, VCPU_TAR_TM(r4)
++	ld	r10, VCPU_XER_TM(r4)
+ 	mtlr	r5
+ 	mtcr	r6
+ 	mtctr	r7
+ 	mtspr	SPRN_AMR, r8
+ 	mtspr	SPRN_TAR, r9
++	mtxer	r10
+ 
+ 	/*
+ 	 * Load up PPR and DSCR values but don't put them in the actual SPRs
+diff --git a/block/bsg.c b/block/bsg.c
+index 276e869e686c..fc607692cfe4 100644
+--- a/block/bsg.c
++++ b/block/bsg.c
+@@ -677,6 +677,9 @@ bsg_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
+ 
+ 	dprintk("%s: write %Zd bytes\n", bd->name, count);
+ 
++	if (unlikely(segment_eq(get_fs(), KERNEL_DS)))
++		return -EINVAL;
++
+ 	bsg_set_block(bd, file);
+ 
+ 	bytes_written = 0;
+diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
+index a80ea853701d..50297a23ac6e 100644
+--- a/drivers/crypto/caam/caamalg.c
++++ b/drivers/crypto/caam/caamalg.c
+@@ -597,7 +597,9 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
+ 
+ 	/* Will read cryptlen */
+ 	append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+-	aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
++	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | KEY_VLF |
++			     FIFOLD_TYPE_MSG1OUT2 | FIFOLD_TYPE_LASTBOTH);
++	append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
+ 
+ 	/* Write ICV */
+ 	append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
+diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
+index fd5c5f3370f6..e53dbc90fcb6 100644
+--- a/drivers/gpu/drm/ast/ast_main.c
++++ b/drivers/gpu/drm/ast/ast_main.c
+@@ -223,7 +223,8 @@ static int ast_get_dram_info(struct drm_device *dev)
+ 	ast_write32(ast, 0x10000, 0xfc600309);
+ 
+ 	do {
+-		;
++		if (pci_channel_offline(dev->pdev))
++			return -EIO;
+ 	} while (ast_read32(ast, 0x10000) != 0x01);
+ 	data = ast_read32(ast, 0x10004);
+ 
+@@ -429,7 +430,9 @@ int ast_driver_load(struct drm_device *dev, unsigned long flags)
+ 	ast_detect_chip(dev, &need_post);
+ 
+ 	if (ast->chip != AST1180) {
+-		ast_get_dram_info(dev);
++		ret = ast_get_dram_info(dev);
++		if (ret)
++			goto out_free;
+ 		ast->vram_size = ast_get_vram_info(dev);
+ 		DRM_INFO("dram %d %d %d %08x\n", ast->mclk, ast->dram_type, ast->dram_bus_width, ast->vram_size);
+ 	}
+diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
+index 6ec3a905fdd2..bdba0c11f989 100644
+--- a/drivers/gpu/drm/gma500/psb_drv.c
++++ b/drivers/gpu/drm/gma500/psb_drv.c
+@@ -464,6 +464,9 @@ static const struct file_operations psb_gem_fops = {
+ 	.open = drm_open,
+ 	.release = drm_release,
+ 	.unlocked_ioctl = psb_unlocked_ioctl,
++#ifdef CONFIG_COMPAT
++	.compat_ioctl = drm_compat_ioctl,
++#endif
+ 	.mmap = drm_gem_mmap,
+ 	.poll = drm_poll,
+ 	.read = drm_read,
+diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
+index 1a75d78a2708..e684508d5188 100644
+--- a/drivers/gpu/drm/radeon/si_dpm.c
++++ b/drivers/gpu/drm/radeon/si_dpm.c
+@@ -2972,6 +2972,7 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
+ 		    (rdev->pdev->revision == 0x80) ||
+ 		    (rdev->pdev->revision == 0x81) ||
+ 		    (rdev->pdev->revision == 0x83) ||
++		    (rdev->pdev->revision == 0x87) ||
+ 		    (rdev->pdev->device == 0x6604) ||
+ 		    (rdev->pdev->device == 0x6605)) {
+ 			max_sclk = 75000;
+diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+index ffb83b5f7e80..6391ed0fe449 100644
+--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
++++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+@@ -523,8 +523,11 @@ void ipoib_mcast_join_task(struct work_struct *work)
+ 	if (!test_bit(IPOIB_MCAST_RUN, &priv->flags))
+ 		return;
+ 
+-	if (ib_query_port(priv->ca, priv->port, &port_attr) ||
+-	    port_attr.state != IB_PORT_ACTIVE) {
++	if (ib_query_port(priv->ca, priv->port, &port_attr)) {
++		ipoib_dbg(priv, "ib_query_port() failed\n");
++		return;
++	}
++	if (port_attr.state != IB_PORT_ACTIVE) {
+ 		ipoib_dbg(priv, "port state is not ACTIVE (state = %d) suspending join task\n",
+ 			  port_attr.state);
+ 		return;
+diff --git a/drivers/input/misc/drv260x.c b/drivers/input/misc/drv260x.c
+index cab87f5ce6d3..e4b337bf6132 100644
+--- a/drivers/input/misc/drv260x.c
++++ b/drivers/input/misc/drv260x.c
+@@ -597,7 +597,6 @@ static int drv260x_probe(struct i2c_client *client,
+ 	}
+ 
+ 	haptics->input_dev->name = "drv260x:haptics";
+-	haptics->input_dev->dev.parent = client->dev.parent;
+ 	haptics->input_dev->close = drv260x_close;
+ 	input_set_drvdata(haptics->input_dev, haptics);
+ 	input_set_capability(haptics->input_dev, EV_FF, FF_RUMBLE);
+diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
+index b1c2bef46f8d..9c4836f5fea7 100644
+--- a/drivers/md/dm-crypt.c
++++ b/drivers/md/dm-crypt.c
+@@ -1450,12 +1450,15 @@ static int crypt_set_key(struct crypt_config *cc, char *key)
+ 	if (!cc->key_size && strcmp(key, "-"))
+ 		goto out;
+ 
++	/* clear the flag since following operations may invalidate previously valid key */
++	clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
++
+ 	if (cc->key_size && crypt_decode_key(cc->key, key, cc->key_size) < 0)
+ 		goto out;
+ 
+-	set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
+-
+ 	r = crypt_setkey_allcpus(cc);
++	if (!r)
++		set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
+ 
+ out:
+ 	/* Hex key string not needed after here, so wipe it. */
+diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
+index 199c9ccd1f5d..023e9d8e5c83 100644
+--- a/drivers/md/persistent-data/dm-space-map-metadata.c
++++ b/drivers/md/persistent-data/dm-space-map-metadata.c
+@@ -764,17 +764,15 @@ int dm_sm_metadata_create(struct dm_space_map *sm,
+ 	memcpy(&smm->sm, &bootstrap_ops, sizeof(smm->sm));
+ 
+ 	r = sm_ll_new_metadata(&smm->ll, tm);
++	if (!r) {
++		if (nr_blocks > DM_SM_METADATA_MAX_BLOCKS)
++			nr_blocks = DM_SM_METADATA_MAX_BLOCKS;
++		r = sm_ll_extend(&smm->ll, nr_blocks);
++	}
++	memcpy(&smm->sm, &ops, sizeof(smm->sm));
+ 	if (r)
+ 		return r;
+ 
+-	if (nr_blocks > DM_SM_METADATA_MAX_BLOCKS)
+-		nr_blocks = DM_SM_METADATA_MAX_BLOCKS;
+-	r = sm_ll_extend(&smm->ll, nr_blocks);
+-	if (r)
+-		return r;
+-
+-	memcpy(&smm->sm, &ops, sizeof(smm->sm));
+-
+ 	/*
+ 	 * Now we need to update the newly created data structures with the
+ 	 * allocated blocks that they were built from.
+diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
+index cdd8770de1c2..48096d09fef3 100644
+--- a/drivers/md/raid5.c
++++ b/drivers/md/raid5.c
+@@ -6203,6 +6203,15 @@ static int run(struct mddev *mddev)
+ 			stripe = (stripe | (stripe-1)) + 1;
+ 		mddev->queue->limits.discard_alignment = stripe;
+ 		mddev->queue->limits.discard_granularity = stripe;
++
++		/*
++		 * We use 16-bit counter of active stripes in bi_phys_segments
++		 * (minus one for over-loaded initialization)
++		 */
++		blk_queue_max_hw_sectors(mddev->queue, 0xfffe * STRIPE_SECTORS);
++		blk_queue_max_discard_sectors(mddev->queue,
++					      0xfffe * STRIPE_SECTORS);
++
+ 		/*
+ 		 * unaligned part of discard request will be ignored, so can't
+ 		 * guarantee discard_zeroes_data
+diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
+index 388046e71e80..fd033cda0d53 100644
+--- a/drivers/mmc/host/sdhci.c
++++ b/drivers/mmc/host/sdhci.c
+@@ -1988,7 +1988,27 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
+ 			ctrl &= ~SDHCI_CTRL_EXEC_TUNING;
+ 			sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
+ 
++			sdhci_do_reset(host, SDHCI_RESET_CMD);
++			sdhci_do_reset(host, SDHCI_RESET_DATA);
++
+ 			err = -EIO;
++
++			if (cmd.opcode != MMC_SEND_TUNING_BLOCK_HS200)
++				goto out;
++
++			sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
++			sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
++
++			spin_unlock_irqrestore(&host->lock, flags);
++
++			memset(&cmd, 0, sizeof(cmd));
++			cmd.opcode = MMC_STOP_TRANSMISSION;
++			cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
++			cmd.busy_timeout = 50;
++			mmc_wait_for_cmd(mmc, &cmd, 0);
++
++			spin_lock_irqsave(&host->lock, flags);
++
+ 			goto out;
+ 		}
+ 
+diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
+index 94ff20e9a8a0..33d83b0d1318 100644
+--- a/drivers/pci/pci.c
++++ b/drivers/pci/pci.c
+@@ -1987,6 +1987,10 @@ bool pci_dev_run_wake(struct pci_dev *dev)
+ 	if (!dev->pme_support)
+ 		return false;
+ 
++	/* PME-capable in principle, but not from the intended sleep state */
++	if (!pci_pme_capable(dev, pci_target_state(dev)))
++		return false;
++
+ 	while (bus->parent) {
+ 		struct pci_dev *bridge = bus->self;
+ 
+diff --git a/drivers/regulator/stw481x-vmmc.c b/drivers/regulator/stw481x-vmmc.c
+index a7e152696a02..f140b42956a4 100644
+--- a/drivers/regulator/stw481x-vmmc.c
++++ b/drivers/regulator/stw481x-vmmc.c
+@@ -47,7 +47,8 @@ static struct regulator_desc vmmc_regulator = {
+ 	.volt_table = stw481x_vmmc_voltages,
+ 	.enable_time = 200, /* FIXME: look this up */
+ 	.enable_reg = STW_CONF1,
+-	.enable_mask = STW_CONF1_PDN_VMMC,
++	.enable_mask = STW_CONF1_PDN_VMMC | STW_CONF1_MMC_LS_STATUS,
++	.enable_val = STW_CONF1_PDN_VMMC,
+ 	.vsel_reg = STW_CONF1,
+ 	.vsel_mask = STW_CONF1_VMMC_MASK,
+ };
+diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c
+index 9bb48d70957c..4d20f7298cb8 100644
+--- a/drivers/s390/char/vmlogrdr.c
++++ b/drivers/s390/char/vmlogrdr.c
+@@ -872,7 +872,7 @@ static int __init vmlogrdr_init(void)
+ 		goto cleanup;
+ 
+ 	for (i=0; i < MAXMINOR; ++i ) {
+-		sys_ser[i].buffer = (char *) get_zeroed_page(GFP_KERNEL);
++		sys_ser[i].buffer = (char *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ 		if (!sys_ser[i].buffer) {
+ 			rc = -ENOMEM;
+ 			break;
+diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
+index 554395634592..e37b5e82e068 100644
+--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
++++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
+@@ -2508,6 +2508,7 @@ int megasas_wait_for_outstanding_fusion(struct megasas_instance *instance,
+ 		printk("megaraid_sas: pending commands remain after waiting, "
+ 		       "will reset adapter scsi%d.\n",
+ 		       instance->host->host_no);
++		*convert = 1;
+ 		retval = 1;
+ 	}
+ out:
+diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
+index f4cb7b3e9e23..9bfc6835ff2f 100644
+--- a/drivers/scsi/scsi_sysfs.c
++++ b/drivers/scsi/scsi_sysfs.c
+@@ -1027,10 +1027,6 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev)
+ 	struct request_queue *rq = sdev->request_queue;
+ 	struct scsi_target *starget = sdev->sdev_target;
+ 
+-	error = scsi_device_set_state(sdev, SDEV_RUNNING);
+-	if (error)
+-		return error;
+-
+ 	error = scsi_target_add(starget);
+ 	if (error)
+ 		return error;
+diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
+index bc09f1d196c9..0becdb6e1ba0 100644
+--- a/drivers/scsi/sg.c
++++ b/drivers/scsi/sg.c
+@@ -592,6 +592,9 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
+ 	sg_io_hdr_t *hp;
+ 	unsigned char cmnd[SG_MAX_CDB_SIZE];
+ 
++	if (unlikely(segment_eq(get_fs(), KERNEL_DS)))
++		return -EINVAL;
++
+ 	if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
+ 		return -ENXIO;
+ 	SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
+diff --git a/drivers/ssb/pci.c b/drivers/ssb/pci.c
+index 0f28c08fcb3c..77b551da5728 100644
+--- a/drivers/ssb/pci.c
++++ b/drivers/ssb/pci.c
+@@ -909,6 +909,7 @@ static int ssb_pci_sprom_get(struct ssb_bus *bus,
+ 			if (err) {
+ 				ssb_warn("WARNING: Using fallback SPROM failed (err %d)\n",
+ 					 err);
++				goto out_free;
+ 			} else {
+ 				ssb_dbg("Using SPROM revision %d provided by platform\n",
+ 					sprom->revision);
+diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c
+index 8400f2fa76bd..04f7a3bd3ddc 100644
+--- a/drivers/staging/comedi/drivers/ni_mio_common.c
++++ b/drivers/staging/comedi/drivers/ni_mio_common.c
+@@ -2105,7 +2105,7 @@ static int ni_ai_insn_read(struct comedi_device *dev,
+ 			   unsigned int *data)
+ {
+ 	struct ni_private *devpriv = dev->private;
+-	unsigned int mask = (s->maxdata + 1) >> 1;
++	unsigned int mask = s->maxdata;
+ 	int i, n;
+ 	unsigned signbits;
+ 	unsigned int d;
+diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
+index 9a1b314f6482..90e55b469c1e 100644
+--- a/drivers/target/target_core_user.c
++++ b/drivers/target/target_core_user.c
+@@ -587,8 +587,6 @@ static int tcmu_check_expired_cmd(int id, void *p, void *data)
+ 	target_complete_cmd(cmd->se_cmd, SAM_STAT_CHECK_CONDITION);
+ 	cmd->se_cmd = NULL;
+ 
+-	kmem_cache_free(tcmu_cmd_cache, cmd);
+-
+ 	return 0;
+ }
+ 
+diff --git a/drivers/thermal/thermal_hwmon.c b/drivers/thermal/thermal_hwmon.c
+index 1967bee4f076..9035fbc5e98d 100644
+--- a/drivers/thermal/thermal_hwmon.c
++++ b/drivers/thermal/thermal_hwmon.c
+@@ -98,7 +98,7 @@ temp_crit_show(struct device *dev, struct device_attribute *attr, char *buf)
+ 	long temperature;
+ 	int ret;
+ 
+-	ret = tz->ops->get_trip_temp(tz, 0, &temperature);
++	ret = tz->ops->get_crit_temp(tz, &temperature);
+ 	if (ret)
+ 		return ret;
+ 
+diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
+index f88ee8f3373d..e2f63f5d30cb 100644
+--- a/drivers/usb/class/cdc-acm.c
++++ b/drivers/usb/class/cdc-acm.c
+@@ -1727,6 +1727,7 @@ static const struct usb_device_id acm_ids[] = {
+ 	{ USB_DEVICE(0x20df, 0x0001), /* Simtec Electronics Entropy Key */
+ 	.driver_info = QUIRK_CONTROL_LINE_STATE, },
+ 	{ USB_DEVICE(0x2184, 0x001c) },	/* GW Instek AFG-2225 */
++	{ USB_DEVICE(0x2184, 0x0036) },	/* GW Instek AFG-125 */
+ 	{ USB_DEVICE(0x22b8, 0x6425), /* Motorola MOTOMAGX phones */
+ 	},
+ 	/* Motorola H24 HSPA module: */
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index 464542a046c7..1a3df0f8b891 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -101,6 +101,8 @@ EXPORT_SYMBOL_GPL(ehci_cf_port_reset_rwsem);
+ 
+ static void hub_release(struct kref *kref);
+ static int usb_reset_and_verify_device(struct usb_device *udev);
++static void hub_usb3_port_prepare_disable(struct usb_hub *hub,
++					  struct usb_port *port_dev);
+ 
+ static inline char *portspeed(struct usb_hub *hub, int portstatus)
+ {
+@@ -882,82 +884,28 @@ static int hub_set_port_link_state(struct usb_hub *hub, int port1,
+ }
+ 
+ /*
+- * If USB 3.0 ports are placed into the Disabled state, they will no longer
+- * detect any device connects or disconnects.  This is generally not what the
+- * USB core wants, since it expects a disabled port to produce a port status
+- * change event when a new device connects.
+- *
+- * Instead, set the link state to Disabled, wait for the link to settle into
+- * that state, clear any change bits, and then put the port into the RxDetect
+- * state.
++ * USB-3 does not have a similar link state as USB-2 that will avoid negotiating
++ * a connection with a plugged-in cable but will signal the host when the cable
++ * is unplugged. Disable remote wake and set link state to U3 for USB-3 devices
+  */
+-static int hub_usb3_port_disable(struct usb_hub *hub, int port1)
+-{
+-	int ret;
+-	int total_time;
+-	u16 portchange, portstatus;
+-
+-	if (!hub_is_superspeed(hub->hdev))
+-		return -EINVAL;
+-
+-	ret = hub_port_status(hub, port1, &portstatus, &portchange);
+-	if (ret < 0)
+-		return ret;
+-
+-	/*
+-	 * USB controller Advanced Micro Devices, Inc. [AMD] FCH USB XHCI
+-	 * Controller [1022:7814] will have spurious result making the following
+-	 * usb 3.0 device hotplugging route to the 2.0 root hub and recognized
+-	 * as high-speed device if we set the usb 3.0 port link state to
+-	 * Disabled. Since it's already in USB_SS_PORT_LS_RX_DETECT state, we
+-	 * check the state here to avoid the bug.
+-	 */
+-	if ((portstatus & USB_PORT_STAT_LINK_STATE) ==
+-				USB_SS_PORT_LS_RX_DETECT) {
+-		dev_dbg(&hub->ports[port1 - 1]->dev,
+-			 "Not disabling port; link state is RxDetect\n");
+-		return ret;
+-	}
+-
+-	ret = hub_set_port_link_state(hub, port1, USB_SS_PORT_LS_SS_DISABLED);
+-	if (ret)
+-		return ret;
+-
+-	/* Wait for the link to enter the disabled state. */
+-	for (total_time = 0; ; total_time += HUB_DEBOUNCE_STEP) {
+-		ret = hub_port_status(hub, port1, &portstatus, &portchange);
+-		if (ret < 0)
+-			return ret;
+-
+-		if ((portstatus & USB_PORT_STAT_LINK_STATE) ==
+-				USB_SS_PORT_LS_SS_DISABLED)
+-			break;
+-		if (total_time >= HUB_DEBOUNCE_TIMEOUT)
+-			break;
+-		msleep(HUB_DEBOUNCE_STEP);
+-	}
+-	if (total_time >= HUB_DEBOUNCE_TIMEOUT)
+-		dev_warn(&hub->ports[port1 - 1]->dev,
+-				"Could not disable after %d ms\n", total_time);
+-
+-	return hub_set_port_link_state(hub, port1, USB_SS_PORT_LS_RX_DETECT);
+-}
+-
+ static int hub_port_disable(struct usb_hub *hub, int port1, int set_state)
+ {
+ 	struct usb_port *port_dev = hub->ports[port1 - 1];
+ 	struct usb_device *hdev = hub->hdev;
+ 	int ret = 0;
+ 
+-	if (port_dev->child && set_state)
+-		usb_set_device_state(port_dev->child, USB_STATE_NOTATTACHED);
+ 	if (!hub->error) {
+-		if (hub_is_superspeed(hub->hdev))
+-			ret = hub_usb3_port_disable(hub, port1);
+-		else
++		if (hub_is_superspeed(hub->hdev)) {
++			hub_usb3_port_prepare_disable(hub, port_dev);
++			ret = hub_set_port_link_state(hub, port_dev->portnum,
++						      USB_SS_PORT_LS_U3);
++		} else {
+ 			ret = usb_clear_port_feature(hdev, port1,
+ 					USB_PORT_FEAT_ENABLE);
++		}
+ 	}
++	if (port_dev->child && set_state)
++		usb_set_device_state(port_dev->child, USB_STATE_NOTATTACHED);
+ 	if (ret && ret != -ENODEV)
+ 		dev_err(&port_dev->dev, "cannot disable (err = %d)\n", ret);
+ 	return ret;
+@@ -4041,6 +3989,26 @@ void usb_unlocked_enable_lpm(struct usb_device *udev)
+ }
+ EXPORT_SYMBOL_GPL(usb_unlocked_enable_lpm);
+ 
++/* usb3 devices use U3 for disabled, make sure remote wakeup is disabled */
++static void hub_usb3_port_prepare_disable(struct usb_hub *hub,
++					  struct usb_port *port_dev)
++{
++	struct usb_device *udev = port_dev->child;
++	int ret;
++
++	if (udev && udev->port_is_suspended && udev->do_remote_wakeup) {
++		ret = hub_set_port_link_state(hub, port_dev->portnum,
++					      USB_SS_PORT_LS_U0);
++		if (!ret) {
++			msleep(USB_RESUME_TIMEOUT);
++			ret = usb_disable_remote_wakeup(udev);
++		}
++		if (ret)
++			dev_warn(&udev->dev,
++				 "Port disable: can't disable remote wake\n");
++		udev->do_remote_wakeup = 0;
++	}
++}
+ 
+ #else	/* CONFIG_PM */
+ 
+@@ -4048,6 +4016,9 @@ EXPORT_SYMBOL_GPL(usb_unlocked_enable_lpm);
+ #define hub_resume		NULL
+ #define hub_reset_resume	NULL
+ 
++static inline void hub_usb3_port_prepare_disable(struct usb_hub *hub,
++						 struct usb_port *port_dev) { }
++
+ int usb_disable_lpm(struct usb_device *udev)
+ {
+ 	return 0;
+diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
+index f6a51fddd5b5..7b2ba34cf5e7 100644
+--- a/drivers/usb/gadget/composite.c
++++ b/drivers/usb/gadget/composite.c
+@@ -143,7 +143,7 @@ int config_ep_by_speed(struct usb_gadget *g,
+ 
+ ep_found:
+ 	/* commit results */
+-	_ep->maxpacket = usb_endpoint_maxp(chosen_desc);
++	_ep->maxpacket = usb_endpoint_maxp(chosen_desc) & 0x7ff;
+ 	_ep->desc = chosen_desc;
+ 	_ep->comp_desc = NULL;
+ 	_ep->maxburst = 0;
+diff --git a/drivers/usb/host/uhci-pci.c b/drivers/usb/host/uhci-pci.c
+index 940304c33224..02260cfdedb1 100644
+--- a/drivers/usb/host/uhci-pci.c
++++ b/drivers/usb/host/uhci-pci.c
+@@ -129,6 +129,10 @@ static int uhci_pci_init(struct usb_hcd *hcd)
+ 	if (to_pci_dev(uhci_dev(uhci))->vendor == PCI_VENDOR_ID_HP)
+ 		uhci->wait_for_hp = 1;
+ 
++	/* Intel controllers use non-PME wakeup signalling */
++	if (to_pci_dev(uhci_dev(uhci))->vendor == PCI_VENDOR_ID_INTEL)
++		device_set_run_wake(uhci_dev(uhci), 1);
++
+ 	/* Set up pointers to PCI-specific functions */
+ 	uhci->reset_hc = uhci_pci_reset_hc;
+ 	uhci->check_and_reset_hc = uhci_pci_check_and_reset_hc;
+diff --git a/drivers/usb/serial/kl5kusb105.c b/drivers/usb/serial/kl5kusb105.c
+index e020ad28a00c..53c90131764d 100644
+--- a/drivers/usb/serial/kl5kusb105.c
++++ b/drivers/usb/serial/kl5kusb105.c
+@@ -296,7 +296,7 @@ static int  klsi_105_open(struct tty_struct *tty, struct usb_serial_port *port)
+ 	rc = usb_serial_generic_open(tty, port);
+ 	if (rc) {
+ 		retval = rc;
+-		goto exit;
++		goto err_free_cfg;
+ 	}
+ 
+ 	rc = usb_control_msg(port->serial->dev,
+@@ -315,17 +315,32 @@ static int  klsi_105_open(struct tty_struct *tty, struct usb_serial_port *port)
+ 		dev_dbg(&port->dev, "%s - enabled reading\n", __func__);
+ 
+ 	rc = klsi_105_get_line_state(port, &line_state);
+-	if (rc >= 0) {
+-		spin_lock_irqsave(&priv->lock, flags);
+-		priv->line_state = line_state;
+-		spin_unlock_irqrestore(&priv->lock, flags);
+-		dev_dbg(&port->dev, "%s - read line state 0x%lx\n", __func__, line_state);
+-		retval = 0;
+-	} else
++	if (rc < 0) {
+ 		retval = rc;
++		goto err_disable_read;
++	}
++
++	spin_lock_irqsave(&priv->lock, flags);
++	priv->line_state = line_state;
++	spin_unlock_irqrestore(&priv->lock, flags);
++	dev_dbg(&port->dev, "%s - read line state 0x%lx\n", __func__,
++			line_state);
++
++	return 0;
+ 
+-exit:
++err_disable_read:
++	usb_control_msg(port->serial->dev,
++			     usb_sndctrlpipe(port->serial->dev, 0),
++			     KL5KUSB105A_SIO_CONFIGURE,
++			     USB_TYPE_VENDOR | USB_DIR_OUT,
++			     KL5KUSB105A_SIO_CONFIGURE_READ_OFF,
++			     0, /* index */
++			     NULL, 0,
++			     KLSI_TIMEOUT);
++	usb_serial_generic_close(port);
++err_free_cfg:
+ 	kfree(cfg);
++
+ 	return retval;
+ }
+ 
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 3814b7bd7d9f..295a00a9c257 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -270,6 +270,8 @@ static void option_instat_callback(struct urb *urb);
+ #define TELIT_PRODUCT_CC864_SINGLE		0x1006
+ #define TELIT_PRODUCT_DE910_DUAL		0x1010
+ #define TELIT_PRODUCT_UE910_V2			0x1012
++#define TELIT_PRODUCT_LE922_USBCFG1		0x1040
++#define TELIT_PRODUCT_LE922_USBCFG2		0x1041
+ #define TELIT_PRODUCT_LE922_USBCFG0		0x1042
+ #define TELIT_PRODUCT_LE922_USBCFG3		0x1043
+ #define TELIT_PRODUCT_LE922_USBCFG5		0x1045
+@@ -1223,6 +1225,10 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UE910_V2) },
+ 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG0),
+ 		.driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 },
++	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG1),
++		.driver_info = (kernel_ulong_t)&telit_le910_blacklist },
++	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG2),
++		.driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
+ 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG3),
+ 		.driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
+ 	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG5, 0xff),
+@@ -1999,6 +2005,7 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d02, 0xff, 0x00, 0x00) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x02, 0x01) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x00, 0x00) },
++	{ USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7d04, 0xff) },			/* D-Link DWM-158 */
+ 	{ USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e19, 0xff),			/* D-Link DWM-221 B1 */
+ 	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
+diff --git a/fs/block_dev.c b/fs/block_dev.c
+index 1d9c9f3754f8..543dcf972120 100644
+--- a/fs/block_dev.c
++++ b/fs/block_dev.c
+@@ -694,7 +694,7 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
+ 		return true;	 /* already a holder */
+ 	else if (bdev->bd_holder != NULL)
+ 		return false; 	 /* held by someone else */
+-	else if (bdev->bd_contains == bdev)
++	else if (whole == bdev)
+ 		return true;  	 /* is a whole device which isn't held */
+ 
+ 	else if (whole->bd_holder == bd_may_claim)
+@@ -1727,6 +1727,7 @@ void iterate_bdevs(void (*func)(struct block_device *, void *), void *arg)
+ 	spin_lock(&inode_sb_list_lock);
+ 	list_for_each_entry(inode, &blockdev_superblock->s_inodes, i_sb_list) {
+ 		struct address_space *mapping = inode->i_mapping;
++		struct block_device *bdev;
+ 
+ 		spin_lock(&inode->i_lock);
+ 		if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW) ||
+@@ -1747,8 +1748,12 @@ void iterate_bdevs(void (*func)(struct block_device *, void *), void *arg)
+ 		 */
+ 		iput(old_inode);
+ 		old_inode = inode;
++		bdev = I_BDEV(inode);
+ 
+-		func(I_BDEV(inode), arg);
++		mutex_lock(&bdev->bd_mutex);
++		if (bdev->bd_openers)
++			func(bdev, arg);
++		mutex_unlock(&bdev->bd_mutex);
+ 
+ 		spin_lock(&inode_sb_list_lock);
+ 	}
+diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
+index 6f5fe1104ec6..001b338abe17 100644
+--- a/fs/btrfs/tree-log.c
++++ b/fs/btrfs/tree-log.c
+@@ -1782,12 +1782,11 @@ static noinline int find_dir_range(struct btrfs_root *root,
+ next:
+ 	/* check the next slot in the tree to see if it is a valid item */
+ 	nritems = btrfs_header_nritems(path->nodes[0]);
++	path->slots[0]++;
+ 	if (path->slots[0] >= nritems) {
+ 		ret = btrfs_next_leaf(root, path);
+ 		if (ret)
+ 			goto out;
+-	} else {
+-		path->slots[0]++;
+ 	}
+ 
+ 	btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
+diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
+index 02a33e529904..2d7026888b53 100644
+--- a/fs/cifs/cifsglob.h
++++ b/fs/cifs/cifsglob.h
+@@ -615,6 +615,8 @@ struct TCP_Server_Info {
+ #ifdef CONFIG_CIFS_SMB2
+ 	unsigned int	max_read;
+ 	unsigned int	max_write;
++	struct delayed_work reconnect; /* reconnect workqueue job */
++	struct mutex reconnect_mutex; /* prevent simultaneous reconnects */
+ #endif /* CONFIG_CIFS_SMB2 */
+ };
+ 
+@@ -814,6 +816,7 @@ cap_unix(struct cifs_ses *ses)
+ struct cifs_tcon {
+ 	struct list_head tcon_list;
+ 	int tc_count;
++	struct list_head rlist; /* reconnect list */
+ 	struct list_head openFileList;
+ 	struct cifs_ses *ses;	/* pointer to session associated with */
+ 	char treeName[MAX_TREE_SIZE + 1]; /* UNC name of resource in ASCII */
+diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
+index 5b868060eab8..26f19549ef35 100644
+--- a/fs/cifs/cifsproto.h
++++ b/fs/cifs/cifsproto.h
+@@ -207,6 +207,9 @@ extern void cifs_add_pending_open_locked(struct cifs_fid *fid,
+ 					 struct tcon_link *tlink,
+ 					 struct cifs_pending_open *open);
+ extern void cifs_del_pending_open(struct cifs_pending_open *open);
++extern void cifs_put_tcp_session(struct TCP_Server_Info *server,
++				 int from_reconnect);
++extern void cifs_put_tcon(struct cifs_tcon *tcon);
+ 
+ #if IS_ENABLED(CONFIG_CIFS_DFS_UPCALL)
+ extern void cifs_dfs_release_automount_timer(void);
+diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
+index 5093710e7660..8770f4de457b 100644
+--- a/fs/cifs/connect.c
++++ b/fs/cifs/connect.c
+@@ -52,6 +52,9 @@
+ #include "nterr.h"
+ #include "rfc1002pdu.h"
+ #include "fscache.h"
++#ifdef CONFIG_CIFS_SMB2
++#include "smb2proto.h"
++#endif
+ 
+ #define CIFS_PORT 445
+ #define RFC1001_PORT 139
+@@ -2066,8 +2069,8 @@ cifs_find_tcp_session(struct smb_vol *vol)
+ 	return NULL;
+ }
+ 
+-static void
+-cifs_put_tcp_session(struct TCP_Server_Info *server)
++void
++cifs_put_tcp_session(struct TCP_Server_Info *server, int from_reconnect)
+ {
+ 	struct task_struct *task;
+ 
+@@ -2084,6 +2087,19 @@ cifs_put_tcp_session(struct TCP_Server_Info *server)
+ 
+ 	cancel_delayed_work_sync(&server->echo);
+ 
++#ifdef CONFIG_CIFS_SMB2
++	if (from_reconnect)
++		/*
++		 * Avoid deadlock here: reconnect work calls
++		 * cifs_put_tcp_session() at its end. Need to be sure
++		 * that reconnect work does nothing with server pointer after
++		 * that step.
++		 */
++		cancel_delayed_work(&server->reconnect);
++	else
++		cancel_delayed_work_sync(&server->reconnect);
++#endif
++
+ 	spin_lock(&GlobalMid_Lock);
+ 	server->tcpStatus = CifsExiting;
+ 	spin_unlock(&GlobalMid_Lock);
+@@ -2148,6 +2164,10 @@ cifs_get_tcp_session(struct smb_vol *volume_info)
+ 	INIT_LIST_HEAD(&tcp_ses->tcp_ses_list);
+ 	INIT_LIST_HEAD(&tcp_ses->smb_ses_list);
+ 	INIT_DELAYED_WORK(&tcp_ses->echo, cifs_echo_request);
++#ifdef CONFIG_CIFS_SMB2
++	INIT_DELAYED_WORK(&tcp_ses->reconnect, smb2_reconnect_server);
++	mutex_init(&tcp_ses->reconnect_mutex);
++#endif
+ 	memcpy(&tcp_ses->srcaddr, &volume_info->srcaddr,
+ 	       sizeof(tcp_ses->srcaddr));
+ 	memcpy(&tcp_ses->dstaddr, &volume_info->dstaddr,
+@@ -2300,7 +2320,7 @@ cifs_put_smb_ses(struct cifs_ses *ses)
+ 	spin_unlock(&cifs_tcp_ses_lock);
+ 
+ 	sesInfoFree(ses);
+-	cifs_put_tcp_session(server);
++	cifs_put_tcp_session(server, 0);
+ }
+ 
+ #ifdef CONFIG_KEYS
+@@ -2473,7 +2493,7 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info)
+ 		mutex_unlock(&ses->session_mutex);
+ 
+ 		/* existing SMB ses has a server reference already */
+-		cifs_put_tcp_session(server);
++		cifs_put_tcp_session(server, 0);
+ 		free_xid(xid);
+ 		return ses;
+ 	}
+@@ -2563,7 +2583,7 @@ cifs_find_tcon(struct cifs_ses *ses, const char *unc)
+ 	return NULL;
+ }
+ 
+-static void
++void
+ cifs_put_tcon(struct cifs_tcon *tcon)
+ {
+ 	unsigned int xid;
+@@ -3671,7 +3691,7 @@ mount_fail_check:
+ 		else if (ses)
+ 			cifs_put_smb_ses(ses);
+ 		else
+-			cifs_put_tcp_session(server);
++			cifs_put_tcp_session(server, 0);
+ 		bdi_destroy(&cifs_sb->bdi);
+ 	}
+ 
+@@ -3976,7 +3996,7 @@ cifs_construct_tcon(struct cifs_sb_info *cifs_sb, kuid_t fsuid)
+ 	ses = cifs_get_smb_ses(master_tcon->ses->server, vol_info);
+ 	if (IS_ERR(ses)) {
+ 		tcon = (struct cifs_tcon *)ses;
+-		cifs_put_tcp_session(master_tcon->ses->server);
++		cifs_put_tcp_session(master_tcon->ses->server, 0);
+ 		goto out;
+ 	}
+ 
+diff --git a/fs/cifs/smb2file.c b/fs/cifs/smb2file.c
+index 45992944e238..b87b07504947 100644
+--- a/fs/cifs/smb2file.c
++++ b/fs/cifs/smb2file.c
+@@ -241,7 +241,7 @@ smb2_push_mandatory_locks(struct cifsFileInfo *cfile)
+ 	 * and check it for zero before using.
+ 	 */
+ 	max_buf = tlink_tcon(cfile->tlink)->ses->server->maxBuf;
+-	if (!max_buf) {
++	if (max_buf < sizeof(struct smb2_lock_element)) {
+ 		free_xid(xid);
+ 		return -EINVAL;
+ 	}
+diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
+index d759ecdfa9d6..c39773959a49 100644
+--- a/fs/cifs/smb2pdu.c
++++ b/fs/cifs/smb2pdu.c
+@@ -264,7 +264,7 @@ out:
+ 	case SMB2_CHANGE_NOTIFY:
+ 	case SMB2_QUERY_INFO:
+ 	case SMB2_SET_INFO:
+-		return -EAGAIN;
++		rc = -EAGAIN;
+ 	}
+ 	unload_nls(nls_codepage);
+ 	return rc;
+@@ -1612,6 +1612,54 @@ smb2_echo_callback(struct mid_q_entry *mid)
+ 	add_credits(server, credits_received, CIFS_ECHO_OP);
+ }
+ 
++void smb2_reconnect_server(struct work_struct *work)
++{
++	struct TCP_Server_Info *server = container_of(work,
++					struct TCP_Server_Info, reconnect.work);
++	struct cifs_ses *ses;
++	struct cifs_tcon *tcon, *tcon2;
++	struct list_head tmp_list;
++	int tcon_exist = false;
++
++	/* Prevent simultaneous reconnects that can corrupt tcon->rlist list */
++	mutex_lock(&server->reconnect_mutex);
++
++	INIT_LIST_HEAD(&tmp_list);
++	cifs_dbg(FYI, "Need negotiate, reconnecting tcons\n");
++
++	spin_lock(&cifs_tcp_ses_lock);
++	list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
++		list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
++			if (tcon->need_reconnect) {
++				tcon->tc_count++;
++				list_add_tail(&tcon->rlist, &tmp_list);
++				tcon_exist = true;
++			}
++		}
++	}
++	/*
++	 * Get the reference to server struct to be sure that the last call of
++	 * cifs_put_tcon() in the loop below won't release the server pointer.
++	 */
++	if (tcon_exist)
++		server->srv_count++;
++
++	spin_unlock(&cifs_tcp_ses_lock);
++
++	list_for_each_entry_safe(tcon, tcon2, &tmp_list, rlist) {
++		smb2_reconnect(SMB2_ECHO, tcon);
++		list_del_init(&tcon->rlist);
++		cifs_put_tcon(tcon);
++	}
++
++	cifs_dbg(FYI, "Reconnecting tcons finished\n");
++	mutex_unlock(&server->reconnect_mutex);
++
++	/* now we can safely release srv struct */
++	if (tcon_exist)
++		cifs_put_tcp_session(server, 1);
++}
++
+ int
+ SMB2_echo(struct TCP_Server_Info *server)
+ {
+@@ -1624,32 +1672,11 @@ SMB2_echo(struct TCP_Server_Info *server)
+ 	cifs_dbg(FYI, "In echo request\n");
+ 
+ 	if (server->tcpStatus == CifsNeedNegotiate) {
+-		struct list_head *tmp, *tmp2;
+-		struct cifs_ses *ses;
+-		struct cifs_tcon *tcon;
+-
+-		cifs_dbg(FYI, "Need negotiate, reconnecting tcons\n");
+-		spin_lock(&cifs_tcp_ses_lock);
+-		list_for_each(tmp, &server->smb_ses_list) {
+-			ses = list_entry(tmp, struct cifs_ses, smb_ses_list);
+-			list_for_each(tmp2, &ses->tcon_list) {
+-				tcon = list_entry(tmp2, struct cifs_tcon,
+-						  tcon_list);
+-				/* add check for persistent handle reconnect */
+-				if (tcon && tcon->need_reconnect) {
+-					spin_unlock(&cifs_tcp_ses_lock);
+-					rc = smb2_reconnect(SMB2_ECHO, tcon);
+-					spin_lock(&cifs_tcp_ses_lock);
+-				}
+-			}
+-		}
+-		spin_unlock(&cifs_tcp_ses_lock);
++		/* No need to send echo on newly established connections */
++		queue_delayed_work(cifsiod_wq, &server->reconnect, 0);
++		return rc;
+ 	}
+ 
+-	/* if no session, renegotiate failed above */
+-	if (server->tcpStatus == CifsNeedNegotiate)
+-		return -EIO;
+-
+ 	rc = small_smb2_init(SMB2_ECHO, NULL, (void **)&req);
+ 	if (rc)
+ 		return rc;
+diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h
+index 9bc59f9c12fb..0a406ae78129 100644
+--- a/fs/cifs/smb2proto.h
++++ b/fs/cifs/smb2proto.h
+@@ -95,6 +95,7 @@ extern int smb2_open_file(const unsigned int xid,
+ extern int smb2_unlock_range(struct cifsFileInfo *cfile,
+ 			     struct file_lock *flock, const unsigned int xid);
+ extern int smb2_push_mandatory_locks(struct cifsFileInfo *cfile);
++extern void smb2_reconnect_server(struct work_struct *work);
+ 
+ /*
+  * SMB2 Worker functions - most of protocol specific implementation details
+diff --git a/fs/exec.c b/fs/exec.c
+index b7a5f46181b0..fe9ec45685a5 100644
+--- a/fs/exec.c
++++ b/fs/exec.c
+@@ -19,7 +19,7 @@
+  * current->executable is only used by the procfs.  This allows a dispatch
+  * table to check for several different types  of binary formats.  We keep
+  * trying until we recognize the file or we run out of supported binary
+- * formats. 
++ * formats.
+  */
+ 
+ #include <linux/slab.h>
+@@ -1083,6 +1083,13 @@ int flush_old_exec(struct linux_binprm * bprm)
+ 	flush_thread();
+ 	current->personality &= ~bprm->per_clear;
+ 
++	/*
++	 * We have to apply CLOEXEC before we change whether the process is
++	 * dumpable (in setup_new_exec) to avoid a race with a process in userspace
++	 * trying to access the should-be-closed file descriptors of a process
++	 * undergoing exec(2).
++	 */
++	do_close_on_exec(current->files);
+ 	return 0;
+ 
+ out:
+@@ -1132,7 +1139,6 @@ void setup_new_exec(struct linux_binprm * bprm)
+ 	   group */
+ 	current->self_exec_id++;
+ 	flush_signal_handlers(current, 0);
+-	do_close_on_exec(current->files);
+ }
+ EXPORT_SYMBOL(setup_new_exec);
+ 
+diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
+index 6ce71e4e82e5..bd997b3d6a6f 100644
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -233,6 +233,7 @@ struct ext4_io_submit {
+ #define	EXT4_MAX_BLOCK_SIZE		65536
+ #define EXT4_MIN_BLOCK_LOG_SIZE		10
+ #define EXT4_MAX_BLOCK_LOG_SIZE		16
++#define EXT4_MAX_CLUSTER_LOG_SIZE	30
+ #ifdef __KERNEL__
+ # define EXT4_BLOCK_SIZE(s)		((s)->s_blocksize)
+ #else
+diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
+index 3ea62695abce..3968b164df6e 100644
+--- a/fs/ext4/inline.c
++++ b/fs/ext4/inline.c
+@@ -334,8 +334,10 @@ static int ext4_update_inline_data(handle_t *handle, struct inode *inode,
+ 
+ 	len -= EXT4_MIN_INLINE_DATA_SIZE;
+ 	value = kzalloc(len, GFP_NOFS);
+-	if (!value)
++	if (!value) {
++		error = -ENOMEM;
+ 		goto out;
++	}
+ 
+ 	error = ext4_xattr_ibody_get(inode, i.name_index, i.name,
+ 				     value, len);
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index dee06cd428eb..da268894e7c7 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -668,7 +668,7 @@ static void ext4_mb_mark_free_simple(struct super_block *sb,
+ 	ext4_grpblk_t min;
+ 	ext4_grpblk_t max;
+ 	ext4_grpblk_t chunk;
+-	unsigned short border;
++	unsigned int border;
+ 
+ 	BUG_ON(len > EXT4_CLUSTERS_PER_GROUP(sb));
+ 
+@@ -2254,7 +2254,7 @@ static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
+ 	struct ext4_group_info *grinfo;
+ 	struct sg {
+ 		struct ext4_group_info info;
+-		ext4_grpblk_t counters[16];
++		ext4_grpblk_t counters[EXT4_MAX_BLOCK_LOG_SIZE + 2];
+ 	} sg;
+ 
+ 	group--;
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index 60964b7d7ffe..a5200023e604 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -3325,10 +3325,15 @@ static int count_overhead(struct super_block *sb, ext4_group_t grp,
+ 			ext4_set_bit(s++, buf);
+ 			count++;
+ 		}
+-		for (j = ext4_bg_num_gdb(sb, grp); j > 0; j--) {
+-			ext4_set_bit(EXT4_B2C(sbi, s++), buf);
+-			count++;
++		j = ext4_bg_num_gdb(sb, grp);
++		if (s + j > EXT4_BLOCKS_PER_GROUP(sb)) {
++			ext4_error(sb, "Invalid number of block group "
++				   "descriptor blocks: %d", j);
++			j = EXT4_BLOCKS_PER_GROUP(sb) - s;
+ 		}
++		count += j;
++		for (; j > 0; j--)
++			ext4_set_bit(EXT4_B2C(sbi, s++), buf);
+ 	}
+ 	if (!count)
+ 		return 0;
+@@ -3690,7 +3695,15 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
+ 	if (blocksize < EXT4_MIN_BLOCK_SIZE ||
+ 	    blocksize > EXT4_MAX_BLOCK_SIZE) {
+ 		ext4_msg(sb, KERN_ERR,
+-		       "Unsupported filesystem blocksize %d", blocksize);
++		       "Unsupported filesystem blocksize %d (%d log_block_size)",
++			 blocksize, le32_to_cpu(es->s_log_block_size));
++		goto failed_mount;
++	}
++	if (le32_to_cpu(es->s_log_block_size) >
++	    (EXT4_MAX_BLOCK_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) {
++		ext4_msg(sb, KERN_ERR,
++			 "Invalid log block size: %u",
++			 le32_to_cpu(es->s_log_block_size));
+ 		goto failed_mount;
+ 	}
+ 
+@@ -3766,12 +3779,16 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
+ 
+ 	sbi->s_blocks_per_group = le32_to_cpu(es->s_blocks_per_group);
+ 	sbi->s_inodes_per_group = le32_to_cpu(es->s_inodes_per_group);
+-	if (EXT4_INODE_SIZE(sb) == 0 || EXT4_INODES_PER_GROUP(sb) == 0)
+-		goto cantfind_ext4;
+ 
+ 	sbi->s_inodes_per_block = blocksize / EXT4_INODE_SIZE(sb);
+ 	if (sbi->s_inodes_per_block == 0)
+ 		goto cantfind_ext4;
++	if (sbi->s_inodes_per_group < sbi->s_inodes_per_block ||
++	    sbi->s_inodes_per_group > blocksize * 8) {
++		ext4_msg(sb, KERN_ERR, "invalid inodes per group: %lu\n",
++			 sbi->s_blocks_per_group);
++		goto failed_mount;
++	}
+ 	sbi->s_itb_per_group = sbi->s_inodes_per_group /
+ 					sbi->s_inodes_per_block;
+ 	sbi->s_desc_per_block = blocksize / EXT4_DESC_SIZE(sb);
+@@ -3812,6 +3829,13 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
+ 				 "block size (%d)", clustersize, blocksize);
+ 			goto failed_mount;
+ 		}
++		if (le32_to_cpu(es->s_log_cluster_size) >
++		    (EXT4_MAX_CLUSTER_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) {
++			ext4_msg(sb, KERN_ERR,
++				 "Invalid log cluster size: %u",
++				 le32_to_cpu(es->s_log_cluster_size));
++			goto failed_mount;
++		}
+ 		sbi->s_cluster_bits = le32_to_cpu(es->s_log_cluster_size) -
+ 			le32_to_cpu(es->s_log_block_size);
+ 		sbi->s_clusters_per_group =
+@@ -3848,13 +3872,6 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
+ 	}
+ 	sbi->s_cluster_ratio = clustersize / blocksize;
+ 
+-	if (sbi->s_inodes_per_group > blocksize * 8) {
+-		ext4_msg(sb, KERN_ERR,
+-		       "#inodes per group too big: %lu",
+-		       sbi->s_inodes_per_group);
+-		goto failed_mount;
+-	}
+-
+ 	/* Do we have standard group size of clustersize * 8 blocks ? */
+ 	if (sbi->s_blocks_per_group == clustersize << 3)
+ 		set_opt2(sb, STD_GROUP_SIZE);
+diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c
+index 0a91ab813a9e..8e53d80c181a 100644
+--- a/fs/f2fs/debug.c
++++ b/fs/f2fs/debug.c
+@@ -295,6 +295,7 @@ static int stat_open(struct inode *inode, struct file *file)
+ }
+ 
+ static const struct file_operations stat_fops = {
++	.owner = THIS_MODULE,
+ 	.open = stat_open,
+ 	.read = seq_read,
+ 	.llseek = seq_lseek,
+diff --git a/fs/nfs/file.c b/fs/nfs/file.c
+index 2ab6f00dba5b..c85da0fc4593 100644
+--- a/fs/nfs/file.c
++++ b/fs/nfs/file.c
+@@ -410,7 +410,7 @@ static int nfs_write_end(struct file *file, struct address_space *mapping,
+ 	 */
+ 	if (!PageUptodate(page)) {
+ 		unsigned pglen = nfs_page_length(page);
+-		unsigned end = offset + len;
++		unsigned end = offset + copied;
+ 
+ 		if (pglen == 0) {
+ 			zero_user_segments(page, 0, offset,
+diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
+index 5942b9fa5681..315f96c88091 100644
+--- a/fs/xfs/xfs_log_recover.c
++++ b/fs/xfs/xfs_log_recover.c
+@@ -3861,6 +3861,7 @@ xlog_recover_clear_agi_bucket(
+ 	agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO);
+ 	offset = offsetof(xfs_agi_t, agi_unlinked) +
+ 		 (sizeof(xfs_agino_t) * bucket);
++	xfs_trans_buf_set_type(tp, agibp, XFS_BLFT_AGI_BUF);
+ 	xfs_trans_log_buf(tp, agibp, offset,
+ 			  (offset + sizeof(xfs_agino_t) - 1));
+ 
+diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
+index 70fde9c5c61d..49ac10f99da0 100644
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -1888,22 +1888,17 @@ struct napi_gro_cb {
+ 	/* Number of segments aggregated. */
+ 	u16	count;
+ 
+-	/* This is non-zero if the packet may be of the same flow. */
+-	u8	same_flow;
+-
+-	/* Free the skb? */
+-	u8	free;
+-#define NAPI_GRO_FREE		  1
+-#define NAPI_GRO_FREE_STOLEN_HEAD 2
+-
+ 	/* jiffies when first packet was created/queued */
+ 	unsigned long age;
+ 
+ 	/* Used in ipv6_gro_receive() and foo-over-udp */
+ 	u16	proto;
+ 
+-	/* Used in udp_gro_receive */
+-	u8	udp_mark:1;
++	/* This is non-zero if the packet may be of the same flow. */
++	u8	same_flow:1;
++
++	/* Used in tunnel GRO receive */
++	u8	encap_mark:1;
+ 
+ 	/* GRO checksum is valid */
+ 	u8	csum_valid:1;
+@@ -1911,9 +1906,16 @@ struct napi_gro_cb {
+ 	/* Number of checksums via CHECKSUM_UNNECESSARY */
+ 	u8	csum_cnt:3;
+ 
++	/* Free the skb? */
++	u8	free:2;
++#define NAPI_GRO_FREE		  1
++#define NAPI_GRO_FREE_STOLEN_HEAD 2
++
+ 	/* Used in foo-over-udp, set in udp[46]_gro_receive */
+ 	u8	is_ipv6:1;
+ 
++	/* 7 bit hole */
++
+ 	/* used to support CHECKSUM_COMPLETE for tunneling protocols */
+ 	__wsum	csum;
+ 
+diff --git a/include/net/ip.h b/include/net/ip.h
+index d00ebdf14ca4..7f0cd17668ae 100644
+--- a/include/net/ip.h
++++ b/include/net/ip.h
+@@ -233,6 +233,8 @@ static inline int inet_is_local_reserved_port(struct net *net, int port)
+ }
+ #endif
+ 
++__be32 inet_current_timestamp(void);
++
+ /* From inetpeer.c */
+ extern int inet_peer_threshold;
+ extern int inet_peer_minttl;
+diff --git a/include/rdma/ib_addr.h b/include/rdma/ib_addr.h
+index ce55906b54a0..2ecac2032a3a 100644
+--- a/include/rdma/ib_addr.h
++++ b/include/rdma/ib_addr.h
+@@ -184,10 +184,12 @@ static inline void iboe_addr_get_sgid(struct rdma_dev_addr *dev_addr,
+ 
+ 	dev = dev_get_by_index(&init_net, dev_addr->bound_dev_if);
+ 	if (dev) {
+-		ip4 = (struct in_device *)dev->ip_ptr;
+-		if (ip4 && ip4->ifa_list && ip4->ifa_list->ifa_address)
++		ip4 = in_dev_get(dev);
++		if (ip4 && ip4->ifa_list && ip4->ifa_list->ifa_address) {
+ 			ipv6_addr_set_v4mapped(ip4->ifa_list->ifa_address,
+ 					       (struct in6_addr *)gid);
++			in_dev_put(ip4);
++		}
+ 		dev_put(dev);
+ 	}
+ }
+diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
+index 2964333687dc..a88178c9ca5f 100644
+--- a/kernel/trace/trace_functions_graph.c
++++ b/kernel/trace/trace_functions_graph.c
+@@ -877,6 +877,10 @@ print_graph_entry_leaf(struct trace_iterator *iter,
+ 
+ 		cpu_data = per_cpu_ptr(data->cpu_data, cpu);
+ 
++		/* If a graph tracer ignored set_graph_notrace */
++		if (call->depth < -1)
++			call->depth += FTRACE_NOTRACE_DEPTH;
++
+ 		/*
+ 		 * Comments display at + 1 to depth. Since
+ 		 * this is a leaf function, keep the comments
+@@ -885,7 +889,8 @@ print_graph_entry_leaf(struct trace_iterator *iter,
+ 		cpu_data->depth = call->depth - 1;
+ 
+ 		/* No need to keep this function around for this depth */
+-		if (call->depth < FTRACE_RETFUNC_DEPTH)
++		if (call->depth < FTRACE_RETFUNC_DEPTH &&
++		    !WARN_ON_ONCE(call->depth < 0))
+ 			cpu_data->enter_funcs[call->depth] = 0;
+ 	}
+ 
+@@ -922,11 +927,16 @@ print_graph_entry_nested(struct trace_iterator *iter,
+ 		struct fgraph_cpu_data *cpu_data;
+ 		int cpu = iter->cpu;
+ 
++		/* If a graph tracer ignored set_graph_notrace */
++		if (call->depth < -1)
++			call->depth += FTRACE_NOTRACE_DEPTH;
++
+ 		cpu_data = per_cpu_ptr(data->cpu_data, cpu);
+ 		cpu_data->depth = call->depth;
+ 
+ 		/* Save this function pointer to see if the exit matches */
+-		if (call->depth < FTRACE_RETFUNC_DEPTH)
++		if (call->depth < FTRACE_RETFUNC_DEPTH &&
++		    !WARN_ON_ONCE(call->depth < 0))
+ 			cpu_data->enter_funcs[call->depth] = call->func;
+ 	}
+ 
+@@ -1179,7 +1189,8 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
+ 		 */
+ 		cpu_data->depth = trace->depth - 1;
+ 
+-		if (trace->depth < FTRACE_RETFUNC_DEPTH) {
++		if (trace->depth < FTRACE_RETFUNC_DEPTH &&
++		    !WARN_ON_ONCE(trace->depth < 0)) {
+ 			if (cpu_data->enter_funcs[trace->depth] != trace->func)
+ 				func_match = 0;
+ 			cpu_data->enter_funcs[trace->depth] = 0;
+diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
+index 1519051603ff..0861598c2465 100644
+--- a/net/ceph/messenger.c
++++ b/net/ceph/messenger.c
+@@ -1979,6 +1979,19 @@ static int process_connect(struct ceph_connection *con)
+ 
+ 	dout("process_connect on %p tag %d\n", con, (int)con->in_tag);
+ 
++	if (con->auth_reply_buf) {
++		/*
++		 * Any connection that defines ->get_authorizer()
++		 * should also define ->verify_authorizer_reply().
++		 * See get_connect_authorizer().
++		 */
++		ret = con->ops->verify_authorizer_reply(con, 0);
++		if (ret < 0) {
++			con->error_msg = "bad authorize reply";
++			return ret;
++		}
++	}
++
+ 	switch (con->in_reply.tag) {
+ 	case CEPH_MSGR_TAG_FEATURES:
+ 		pr_err("%s%lld %s feature set mismatch,"
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 93612b2e3bbf..2a24197f3a45 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -4020,7 +4020,7 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
+ 		NAPI_GRO_CB(skb)->same_flow = 0;
+ 		NAPI_GRO_CB(skb)->flush = 0;
+ 		NAPI_GRO_CB(skb)->free = 0;
+-		NAPI_GRO_CB(skb)->udp_mark = 0;
++		NAPI_GRO_CB(skb)->encap_mark = 0;
+ 
+ 		/* Setup for GRO checksum validation */
+ 		switch (skb->ip_summed) {
+diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
+index 6cf020ea4f46..1a621e1f1864 100644
+--- a/net/ipv4/af_inet.c
++++ b/net/ipv4/af_inet.c
+@@ -1391,6 +1391,45 @@ out:
+ 	return pp;
+ }
+ 
++static struct sk_buff **ipip_gro_receive(struct sk_buff **head,
++					 struct sk_buff *skb)
++{
++	if (NAPI_GRO_CB(skb)->encap_mark) {
++		NAPI_GRO_CB(skb)->flush = 1;
++		return NULL;
++	}
++
++	NAPI_GRO_CB(skb)->encap_mark = 1;
++
++	return inet_gro_receive(head, skb);
++}
++
++#define SECONDS_PER_DAY	86400
++
++/* inet_current_timestamp - Return IP network timestamp
++ *
++ * Return milliseconds since midnight in network byte order.
++ */
++__be32 inet_current_timestamp(void)
++{
++	u32 secs;
++	u32 msecs;
++	struct timespec64 ts;
++
++	ktime_get_real_ts64(&ts);
++
++	/* Get secs since midnight. */
++	(void)div_u64_rem(ts.tv_sec, SECONDS_PER_DAY, &secs);
++	/* Convert to msecs. */
++	msecs = secs * MSEC_PER_SEC;
++	/* Convert nsec to msec. */
++	msecs += (u32)ts.tv_nsec / NSEC_PER_MSEC;
++
++	/* Convert to network byte order. */
++	return htons(msecs);
++}
++EXPORT_SYMBOL(inet_current_timestamp);
++
+ int inet_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
+ {
+ 	if (sk->sk_family == AF_INET)
+@@ -1433,6 +1472,13 @@ out_unlock:
+ 	return err;
+ }
+ 
++static int ipip_gro_complete(struct sk_buff *skb, int nhoff)
++{
++	skb->encapsulation = 1;
++	skb_shinfo(skb)->gso_type |= SKB_GSO_IPIP;
++	return inet_gro_complete(skb, nhoff);
++}
++
+ int inet_ctl_sock_create(struct sock **sk, unsigned short family,
+ 			 unsigned short type, unsigned char protocol,
+ 			 struct net *net)
+@@ -1649,8 +1695,8 @@ static struct packet_offload ip_packet_offload __read_mostly = {
+ static const struct net_offload ipip_offload = {
+ 	.callbacks = {
+ 		.gso_segment	= inet_gso_segment,
+-		.gro_receive	= inet_gro_receive,
+-		.gro_complete	= inet_gro_complete,
++		.gro_receive	= ipip_gro_receive,
++		.gro_complete	= ipip_gro_complete,
+ 	},
+ };
+ 
+diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c
+index 8ce8e82d1abb..f7ae11d47e89 100644
+--- a/net/ipv4/fou.c
++++ b/net/ipv4/fou.c
+@@ -113,6 +113,14 @@ static struct sk_buff **fou_gro_receive(struct sk_buff **head,
+ 	u8 proto = NAPI_GRO_CB(skb)->proto;
+ 	const struct net_offload **offloads;
+ 
++	/* We can clear the encap_mark for FOU as we are essentially doing
++	 * one of two possible things.  We are either adding an L4 tunnel
++	 * header to the outer L3 tunnel header, or we are are simply
++	 * treating the GRE tunnel header as though it is a UDP protocol
++	 * specific header such as VXLAN or GENEVE.
++	 */
++	NAPI_GRO_CB(skb)->encap_mark = 0;
++
+ 	rcu_read_lock();
+ 	offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
+ 	ops = rcu_dereference(offloads[proto]);
+@@ -217,6 +225,14 @@ static struct sk_buff **gue_gro_receive(struct sk_buff **head,
+ 
+ 	skb_gro_pull(skb, guehlen);
+ 
++	/* We can clear the encap_mark for GUE as we are essentially doing
++	 * one of two possible things.  We are either adding an L4 tunnel
++	 * header to the outer L3 tunnel header, or we are are simply
++	 * treating the GRE tunnel header as though it is a UDP protocol
++	 * specific header such as VXLAN or GENEVE.
++	 */
++	NAPI_GRO_CB(skb)->encap_mark = 0;
++
+ 	/* Adjusted NAPI_GRO_CB(skb)->csum after skb_gro_pull()*/
+ 	skb_gro_postpull_rcsum(skb, guehdr, guehlen);
+ 
+diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c
+index abc50b41bc39..cc7b08232e92 100644
+--- a/net/ipv4/gre_offload.c
++++ b/net/ipv4/gre_offload.c
+@@ -128,6 +128,11 @@ static struct sk_buff **gre_gro_receive(struct sk_buff **head,
+ 	struct packet_offload *ptype;
+ 	__be16 type;
+ 
++	if (NAPI_GRO_CB(skb)->encap_mark)
++		goto out;
++
++	NAPI_GRO_CB(skb)->encap_mark = 1;
++
+ 	off = skb_gro_offset(skb);
+ 	hlen = off + sizeof(*greh);
+ 	greh = skb_gro_header_fast(skb, off);
+diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
+index 5882f584910e..2d6f89070373 100644
+--- a/net/ipv4/icmp.c
++++ b/net/ipv4/icmp.c
+@@ -902,7 +902,6 @@ static void icmp_echo(struct sk_buff *skb)
+  */
+ static void icmp_timestamp(struct sk_buff *skb)
+ {
+-	struct timespec tv;
+ 	struct icmp_bxm icmp_param;
+ 	/*
+ 	 *	Too short.
+@@ -913,9 +912,7 @@ static void icmp_timestamp(struct sk_buff *skb)
+ 	/*
+ 	 *	Fill in the current time as ms since midnight UT:
+ 	 */
+-	getnstimeofday(&tv);
+-	icmp_param.data.times[1] = htonl((tv.tv_sec % 86400) * MSEC_PER_SEC +
+-					 tv.tv_nsec / NSEC_PER_MSEC);
++	icmp_param.data.times[1] = inet_current_timestamp();
+ 	icmp_param.data.times[2] = icmp_param.data.times[1];
+ 	if (skb_copy_bits(skb, 0, &icmp_param.data.times[0], 4))
+ 		BUG();
+diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
+index 5b3d91be2db0..0e4632e93809 100644
+--- a/net/ipv4/ip_options.c
++++ b/net/ipv4/ip_options.c
+@@ -58,10 +58,9 @@ void ip_options_build(struct sk_buff *skb, struct ip_options *opt,
+ 		if (opt->ts_needaddr)
+ 			ip_rt_get_source(iph+opt->ts+iph[opt->ts+2]-9, skb, rt);
+ 		if (opt->ts_needtime) {
+-			struct timespec tv;
+ 			__be32 midtime;
+-			getnstimeofday(&tv);
+-			midtime = htonl((tv.tv_sec % 86400) * MSEC_PER_SEC + tv.tv_nsec / NSEC_PER_MSEC);
++
++			midtime = inet_current_timestamp();
+ 			memcpy(iph+opt->ts+iph[opt->ts+2]-5, &midtime, 4);
+ 		}
+ 		return;
+@@ -415,11 +414,10 @@ int ip_options_compile(struct net *net,
+ 					break;
+ 				}
+ 				if (timeptr) {
+-					struct timespec tv;
+-					u32  midtime;
+-					getnstimeofday(&tv);
+-					midtime = (tv.tv_sec % 86400) * MSEC_PER_SEC + tv.tv_nsec / NSEC_PER_MSEC;
+-					put_unaligned_be32(midtime, timeptr);
++					__be32 midtime;
++
++					midtime = inet_current_timestamp();
++					memcpy(timeptr, &midtime, 4);
+ 					opt->is_changed = 1;
+ 				}
+ 			} else if ((optptr[3]&0xF) != IPOPT_TS_PRESPEC) {
+diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
+index 6480cea7aa53..e6d05ae1f22e 100644
+--- a/net/ipv4/udp_offload.c
++++ b/net/ipv4/udp_offload.c
+@@ -266,14 +266,14 @@ struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb,
+ 	unsigned int off = skb_gro_offset(skb);
+ 	int flush = 1;
+ 
+-	if (NAPI_GRO_CB(skb)->udp_mark ||
++	if (NAPI_GRO_CB(skb)->encap_mark ||
+ 	    (skb->ip_summed != CHECKSUM_PARTIAL &&
+ 	     NAPI_GRO_CB(skb)->csum_cnt == 0 &&
+ 	     !NAPI_GRO_CB(skb)->csum_valid))
+ 		goto out;
+ 
+-	/* mark that this skb passed once through the udp gro layer */
+-	NAPI_GRO_CB(skb)->udp_mark = 1;
++	/* mark that this skb passed once through the tunnel gro layer */
++	NAPI_GRO_CB(skb)->encap_mark = 1;
+ 
+ 	rcu_read_lock();
+ 	uo_priv = rcu_dereference(udp_offload_base);
+diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
+index 01e12d0d8fcc..f291813c7c0c 100644
+--- a/net/ipv6/ip6_offload.c
++++ b/net/ipv6/ip6_offload.c
+@@ -258,6 +258,19 @@ out:
+ 	return pp;
+ }
+ 
++static struct sk_buff **sit_gro_receive(struct sk_buff **head,
++					struct sk_buff *skb)
++{
++	if (NAPI_GRO_CB(skb)->encap_mark) {
++		NAPI_GRO_CB(skb)->flush = 1;
++		return NULL;
++	}
++
++	NAPI_GRO_CB(skb)->encap_mark = 1;
++
++	return ipv6_gro_receive(head, skb);
++}
++
+ static int ipv6_gro_complete(struct sk_buff *skb, int nhoff)
+ {
+ 	const struct net_offload *ops;
+diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
+index 53ed8d3f8897..81e29f02d28e 100644
+--- a/net/sunrpc/auth_gss/auth_gss.c
++++ b/net/sunrpc/auth_gss/auth_gss.c
+@@ -539,9 +539,13 @@ gss_setup_upcall(struct gss_auth *gss_auth, struct rpc_cred *cred)
+ 		return gss_new;
+ 	gss_msg = gss_add_msg(gss_new);
+ 	if (gss_msg == gss_new) {
+-		int res = rpc_queue_upcall(gss_new->pipe, &gss_new->msg);
++		int res;
++		atomic_inc(&gss_msg->count);
++		res = rpc_queue_upcall(gss_new->pipe, &gss_new->msg);
+ 		if (res) {
+ 			gss_unhash_msg(gss_new);
++			atomic_dec(&gss_msg->count);
++			gss_release_msg(gss_new);
+ 			gss_msg = ERR_PTR(res);
+ 		}
+ 	} else
+@@ -834,6 +838,7 @@ gss_pipe_destroy_msg(struct rpc_pipe_msg *msg)
+ 			warn_gssd();
+ 		gss_release_msg(gss_msg);
+ 	}
++	gss_release_msg(gss_msg);
+ }
+ 
+ static void gss_pipe_dentry_destroy(struct dentry *dir,
+diff --git a/scripts/kconfig/nconf.gui.c b/scripts/kconfig/nconf.gui.c
+index 8275f0e55106..4b2f44c20caf 100644
+--- a/scripts/kconfig/nconf.gui.c
++++ b/scripts/kconfig/nconf.gui.c
+@@ -364,12 +364,14 @@ int dialog_inputbox(WINDOW *main_window,
+ 	WINDOW *prompt_win;
+ 	WINDOW *form_win;
+ 	PANEL *panel;
+-	int i, x, y;
++	int i, x, y, lines, columns, win_lines, win_cols;
+ 	int res = -1;
+ 	int cursor_position = strlen(init);
+ 	int cursor_form_win;
+ 	char *result = *resultp;
+ 
++	getmaxyx(stdscr, lines, columns);
++
+ 	if (strlen(init)+1 > *result_len) {
+ 		*result_len = strlen(init)+1;
+ 		*resultp = result = realloc(result, *result_len);
+@@ -386,14 +388,19 @@ int dialog_inputbox(WINDOW *main_window,
+ 	if (title)
+ 		prompt_width = max(prompt_width, strlen(title));
+ 
++	win_lines = min(prompt_lines+6, lines-2);
++	win_cols = min(prompt_width+7, columns-2);
++	prompt_lines = max(win_lines-6, 0);
++	prompt_width = max(win_cols-7, 0);
++
+ 	/* place dialog in middle of screen */
+-	y = (getmaxy(stdscr)-(prompt_lines+4))/2;
+-	x = (getmaxx(stdscr)-(prompt_width+4))/2;
++	y = (lines-win_lines)/2;
++	x = (columns-win_cols)/2;
+ 
+ 	strncpy(result, init, *result_len);
+ 
+ 	/* create the windows */
+-	win = newwin(prompt_lines+6, prompt_width+7, y, x);
++	win = newwin(win_lines, win_cols, y, x);
+ 	prompt_win = derwin(win, prompt_lines+1, prompt_width, 2, 2);
+ 	form_win = derwin(win, 1, prompt_width, prompt_lines+3, 2);
+ 	keypad(form_win, TRUE);
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index ebb8feff0a84..0e4908b6163d 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -5712,6 +5712,9 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
+ 		{0x12, 0x90a60180},
+ 		{0x14, 0x90170120},
+ 		{0x21, 0x02211030}),
++	SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
++		{0x1b, 0x01011020},
++		{0x21, 0x02211010}),
+ 	SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+ 		ALC256_STANDARD_PINS,
+ 		{0x13, 0x40000000}),
+diff --git a/sound/usb/hiface/pcm.c b/sound/usb/hiface/pcm.c
+index 2c44139b4041..33db205dd12b 100644
+--- a/sound/usb/hiface/pcm.c
++++ b/sound/usb/hiface/pcm.c
+@@ -445,6 +445,8 @@ static int hiface_pcm_prepare(struct snd_pcm_substream *alsa_sub)
+ 
+ 	mutex_lock(&rt->stream_mutex);
+ 
++	hiface_pcm_stream_stop(rt);
++
+ 	sub->dma_off = 0;
+ 	sub->period_off = 0;
+ 
+diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
+index 0d7f1ced1d3b..1ceeb9e875a4 100644
+--- a/sound/usb/mixer.c
++++ b/sound/usb/mixer.c
+@@ -916,9 +916,10 @@ static void volume_control_quirks(struct usb_mixer_elem_info *cval,
+ 	case USB_ID(0x046d, 0x0826): /* HD Webcam c525 */
+ 	case USB_ID(0x046d, 0x08ca): /* Logitech Quickcam Fusion */
+ 	case USB_ID(0x046d, 0x0991):
++	case USB_ID(0x046d, 0x09a2): /* QuickCam Communicate Deluxe/S7500 */
+ 	/* Most audio usb devices lie about volume resolution.
+ 	 * Most Logitech webcams have res = 384.
+-	 * Proboly there is some logitech magic behind this number --fishor
++	 * Probably there is some logitech magic behind this number --fishor
+ 	 */
+ 		if (!strcmp(kctl->id.name, "Mic Capture Volume")) {
+ 			usb_audio_info(chip,


             reply	other threads:[~2017-01-18 21:38 UTC|newest]

Thread overview: 67+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-01-18 21:38 Mike Pagano [this message]
  -- strict thread matches above, loose matches on Subject: below --
2017-04-18 10:20 [gentoo-commits] proj/linux-patches:3.18 commit in: / Mike Pagano
2017-03-02 16:33 Mike Pagano
2017-03-02 16:33 Mike Pagano
2017-02-08 11:16 Mike Pagano
2017-01-18 21:46 Mike Pagano
2016-12-09  0:21 Mike Pagano
2016-11-30 15:42 Mike Pagano
2016-11-25 22:57 Mike Pagano
2016-11-01  9:36 Alice Ferrazzi
2016-10-12 19:51 Mike Pagano
2016-09-18 12:43 Mike Pagano
2016-08-22 23:27 Mike Pagano
2016-08-10 12:54 Mike Pagano
2016-07-31 15:27 Mike Pagano
2016-07-15 14:46 Mike Pagano
2016-07-13 23:28 Mike Pagano
2016-07-01 20:50 Mike Pagano
2016-06-23 11:44 Mike Pagano
2016-06-08 11:20 Mike Pagano
2016-05-24 12:03 Mike Pagano
2016-05-12  0:10 Mike Pagano
2016-04-20 11:21 Mike Pagano
2016-04-06 11:21 Mike Pagano
2016-03-17 22:50 Mike Pagano
2016-03-05 21:08 Mike Pagano
2016-02-16 16:34 Mike Pagano
2016-01-31 15:36 Mike Pagano
2016-01-20 14:35 Mike Pagano
2015-12-17 17:12 Mike Pagano
2015-11-03 18:39 Mike Pagano
2015-10-30 18:38 Mike Pagano
2015-10-03 17:45 Mike Pagano
2015-09-10  0:15 Mike Pagano
2015-08-21 12:57 Mike Pagano
2015-07-30 12:43 Mike Pagano
2015-07-22 10:13 Mike Pagano
2015-07-10 23:44 Mike Pagano
2015-06-19 15:22 Mike Pagano
2015-05-22  0:48 Mike Pagano
2015-05-13 14:27 Mike Pagano
2015-04-29 17:31 Mike Pagano
2015-04-27 17:18 Mike Pagano
2015-04-05  0:05 Mike Pagano
2015-03-28 19:46 Mike Pagano
2015-03-24 23:19 Mike Pagano
2015-03-21 20:02 Mike Pagano
2015-03-07 14:28 Mike Pagano
2015-02-27 13:26 Mike Pagano
2015-02-14 20:32 Mike Pagano
2015-02-13  1:35 Mike Pagano
2015-02-11 14:35 Mike Pagano
2015-02-07  1:07 Mike Pagano
2015-01-30 11:01 Mike Pagano
2015-01-28 23:56 Anthony G. Basile
2015-01-28 23:55 Anthony G. Basile
2015-01-28 22:18 Anthony G. Basile
2015-01-16 18:31 Mike Pagano
2015-01-16  0:28 Mike Pagano
2015-01-09 13:39 Mike Pagano
2015-01-05 14:39 Mike Pagano
2015-01-04 19:03 Mike Pagano
2015-01-02 19:06 Mike Pagano
2015-01-01 14:15 Mike Pagano
2014-12-16 19:44 Mike Pagano
2014-12-09 20:49 Mike Pagano
2014-11-26  0:36 Mike Pagano

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1484775500.54ad828df23ad0b32ae21e574ae1fe38fd441918.mpagano@gentoo \
    --to=mpagano@gentoo.org \
    --cc=gentoo-commits@lists.gentoo.org \
    --cc=gentoo-dev@lists.gentoo.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox