public inbox for gentoo-commits@lists.gentoo.org
 help / color / mirror / Atom feed
From: "Mike Pagano" <mpagano@gentoo.org>
To: gentoo-commits@lists.gentoo.org
Subject: [gentoo-commits] proj/linux-patches:3.12 commit in: /
Date: Wed,  9 Mar 2016 13:50:27 +0000 (UTC)	[thread overview]
Message-ID: <1457531419.db757024a135f0fa6729f44fbb65df6be9985e64.mpagano@gentoo> (raw)

commit:     db757024a135f0fa6729f44fbb65df6be9985e64
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Mar  9 13:50:19 2016 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Mar  9 13:50:19 2016 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=db757024

Linux patch 3.12.56

 0000_README              |    4 +
 1055_linux-3.12.56.patch | 4241 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 4245 insertions(+)

diff --git a/0000_README b/0000_README
index 1c7c3c7..3d5d7bf 100644
--- a/0000_README
+++ b/0000_README
@@ -262,6 +262,10 @@ Patch:  1054_linux-3.12.55.patch
 From:   http://www.kernel.org
 Desc:   Linux 3.12.55
 
+Patch:  1055_linux-3.12.56.patch
+From:   http://www.kernel.org
+Desc:   Linux 3.12.56
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1055_linux-3.12.56.patch b/1055_linux-3.12.56.patch
new file mode 100644
index 0000000..840d33f
--- /dev/null
+++ b/1055_linux-3.12.56.patch
@@ -0,0 +1,4241 @@
+diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
+index 7d8dc93fe2eb..3e5b1b5466e5 100644
+--- a/Documentation/networking/ip-sysctl.txt
++++ b/Documentation/networking/ip-sysctl.txt
+@@ -1156,6 +1156,14 @@ accept_ra_defrtr - BOOLEAN
+ 	Functional default: enabled if accept_ra is enabled.
+ 			    disabled if accept_ra is disabled.
+ 
++accept_ra_min_hop_limit - INTEGER
++	Minimum hop limit Information in Router Advertisement.
++
++	Hop limit Information in Router Advertisement less than this
++	variable shall be ignored.
++
++	Default: 1
++
+ accept_ra_pinfo - BOOLEAN
+ 	Learn Prefix Information in Router Advertisement.
+ 
+diff --git a/Makefile b/Makefile
+index 417164d9cd46..34049410c565 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 12
+-SUBLEVEL = 55
++SUBLEVEL = 56
+ EXTRAVERSION =
+ NAME = One Giant Leap for Frogkind
+ 
+diff --git a/arch/arm/boot/dts/wm8650.dtsi b/arch/arm/boot/dts/wm8650.dtsi
+index 7525982262ac..2897c1ac47d8 100644
+--- a/arch/arm/boot/dts/wm8650.dtsi
++++ b/arch/arm/boot/dts/wm8650.dtsi
+@@ -187,6 +187,15 @@
+ 			interrupts = <43>;
+ 		};
+ 
++		sdhc@d800a000 {
++			compatible = "wm,wm8505-sdhc";
++			reg = <0xd800a000 0x400>;
++			interrupts = <20>, <21>;
++			clocks = <&clksdhc>;
++			bus-width = <4>;
++			sdon-inverted;
++		};
++
+ 		fb: fb@d8050800 {
+ 			compatible = "wm,wm8505-fb";
+ 			reg = <0xd8050800 0x200>;
+diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
+index d188c591f2d6..608f9390396e 100644
+--- a/arch/sparc/kernel/sys_sparc_64.c
++++ b/arch/sparc/kernel/sys_sparc_64.c
+@@ -411,7 +411,7 @@ out:
+ 
+ SYSCALL_DEFINE1(sparc64_personality, unsigned long, personality)
+ {
+-	int ret;
++	long ret;
+ 
+ 	if (personality(current->personality) == PER_LINUX32 &&
+ 	    personality(personality) == PER_LINUX)
+diff --git a/arch/um/os-Linux/start_up.c b/arch/um/os-Linux/start_up.c
+index 337518c5042a..b412c62486f0 100644
+--- a/arch/um/os-Linux/start_up.c
++++ b/arch/um/os-Linux/start_up.c
+@@ -95,6 +95,8 @@ static int start_ptraced_child(void)
+ {
+ 	int pid, n, status;
+ 
++	fflush(stdout);
++
+ 	pid = fork();
+ 	if (pid == 0)
+ 		ptrace_child();
+diff --git a/block/partitions/mac.c b/block/partitions/mac.c
+index 76d8ba6379a9..bd5b91465230 100644
+--- a/block/partitions/mac.c
++++ b/block/partitions/mac.c
+@@ -32,7 +32,7 @@ int mac_partition(struct parsed_partitions *state)
+ 	Sector sect;
+ 	unsigned char *data;
+ 	int slot, blocks_in_map;
+-	unsigned secsize;
++	unsigned secsize, datasize, partoffset;
+ #ifdef CONFIG_PPC_PMAC
+ 	int found_root = 0;
+ 	int found_root_goodness = 0;
+@@ -50,10 +50,14 @@ int mac_partition(struct parsed_partitions *state)
+ 	}
+ 	secsize = be16_to_cpu(md->block_size);
+ 	put_dev_sector(sect);
+-	data = read_part_sector(state, secsize/512, &sect);
++	datasize = round_down(secsize, 512);
++	data = read_part_sector(state, datasize / 512, &sect);
+ 	if (!data)
+ 		return -1;
+-	part = (struct mac_partition *) (data + secsize%512);
++	partoffset = secsize % 512;
++	if (partoffset + sizeof(*part) > datasize)
++		return -1;
++	part = (struct mac_partition *) (data + partoffset);
+ 	if (be16_to_cpu(part->signature) != MAC_PARTITION_MAGIC) {
+ 		put_dev_sector(sect);
+ 		return 0;		/* not a MacOS disk */
+diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
+index 136803c47cdb..96e5ed188636 100644
+--- a/drivers/ata/libata-sff.c
++++ b/drivers/ata/libata-sff.c
+@@ -997,12 +997,9 @@ static inline int ata_hsm_ok_in_wq(struct ata_port *ap,
+ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
+ {
+ 	struct ata_port *ap = qc->ap;
+-	unsigned long flags;
+ 
+ 	if (ap->ops->error_handler) {
+ 		if (in_wq) {
+-			spin_lock_irqsave(ap->lock, flags);
+-
+ 			/* EH might have kicked in while host lock is
+ 			 * released.
+ 			 */
+@@ -1014,8 +1011,6 @@ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
+ 				} else
+ 					ata_port_freeze(ap);
+ 			}
+-
+-			spin_unlock_irqrestore(ap->lock, flags);
+ 		} else {
+ 			if (likely(!(qc->err_mask & AC_ERR_HSM)))
+ 				ata_qc_complete(qc);
+@@ -1024,10 +1019,8 @@ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
+ 		}
+ 	} else {
+ 		if (in_wq) {
+-			spin_lock_irqsave(ap->lock, flags);
+ 			ata_sff_irq_on(ap);
+ 			ata_qc_complete(qc);
+-			spin_unlock_irqrestore(ap->lock, flags);
+ 		} else
+ 			ata_qc_complete(qc);
+ 	}
+@@ -1048,9 +1041,10 @@ int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
+ {
+ 	struct ata_link *link = qc->dev->link;
+ 	struct ata_eh_info *ehi = &link->eh_info;
+-	unsigned long flags = 0;
+ 	int poll_next;
+ 
++	lockdep_assert_held(ap->lock);
++
+ 	WARN_ON_ONCE((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
+ 
+ 	/* Make sure ata_sff_qc_issue() does not throw things
+@@ -1112,14 +1106,6 @@ fsm_start:
+ 			}
+ 		}
+ 
+-		/* Send the CDB (atapi) or the first data block (ata pio out).
+-		 * During the state transition, interrupt handler shouldn't
+-		 * be invoked before the data transfer is complete and
+-		 * hsm_task_state is changed. Hence, the following locking.
+-		 */
+-		if (in_wq)
+-			spin_lock_irqsave(ap->lock, flags);
+-
+ 		if (qc->tf.protocol == ATA_PROT_PIO) {
+ 			/* PIO data out protocol.
+ 			 * send first data block.
+@@ -1135,9 +1121,6 @@ fsm_start:
+ 			/* send CDB */
+ 			atapi_send_cdb(ap, qc);
+ 
+-		if (in_wq)
+-			spin_unlock_irqrestore(ap->lock, flags);
+-
+ 		/* if polling, ata_sff_pio_task() handles the rest.
+ 		 * otherwise, interrupt handler takes over from here.
+ 		 */
+@@ -1361,12 +1344,14 @@ static void ata_sff_pio_task(struct work_struct *work)
+ 	u8 status;
+ 	int poll_next;
+ 
++	spin_lock_irq(ap->lock);
++
+ 	BUG_ON(ap->sff_pio_task_link == NULL);
+ 	/* qc can be NULL if timeout occurred */
+ 	qc = ata_qc_from_tag(ap, link->active_tag);
+ 	if (!qc) {
+ 		ap->sff_pio_task_link = NULL;
+-		return;
++		goto out_unlock;
+ 	}
+ 
+ fsm_start:
+@@ -1381,11 +1366,14 @@ fsm_start:
+ 	 */
+ 	status = ata_sff_busy_wait(ap, ATA_BUSY, 5);
+ 	if (status & ATA_BUSY) {
++		spin_unlock_irq(ap->lock);
+ 		ata_msleep(ap, 2);
++		spin_lock_irq(ap->lock);
++
+ 		status = ata_sff_busy_wait(ap, ATA_BUSY, 10);
+ 		if (status & ATA_BUSY) {
+ 			ata_sff_queue_pio_task(link, ATA_SHORT_PAUSE);
+-			return;
++			goto out_unlock;
+ 		}
+ 	}
+ 
+@@ -1402,6 +1390,8 @@ fsm_start:
+ 	 */
+ 	if (poll_next)
+ 		goto fsm_start;
++out_unlock:
++	spin_unlock_irq(ap->lock);
+ }
+ 
+ /**
+diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c
+index b7695e804635..fa94fba8fa21 100644
+--- a/drivers/ata/sata_sil.c
++++ b/drivers/ata/sata_sil.c
+@@ -631,6 +631,9 @@ static void sil_dev_config(struct ata_device *dev)
+ 	unsigned int n, quirks = 0;
+ 	unsigned char model_num[ATA_ID_PROD_LEN + 1];
+ 
++	/* This controller doesn't support trim */
++	dev->horkage |= ATA_HORKAGE_NOTRIM;
++
+ 	ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
+ 
+ 	for (n = 0; sil_blacklist[n].product; n++)
+diff --git a/drivers/clocksource/vt8500_timer.c b/drivers/clocksource/vt8500_timer.c
+index 64f553f04fa4..5874ebf9dced 100644
+--- a/drivers/clocksource/vt8500_timer.c
++++ b/drivers/clocksource/vt8500_timer.c
+@@ -50,6 +50,8 @@
+ 
+ #define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t)
+ 
++#define MIN_OSCR_DELTA		16
++
+ static void __iomem *regbase;
+ 
+ static cycle_t vt8500_timer_read(struct clocksource *cs)
+@@ -80,7 +82,7 @@ static int vt8500_timer_set_next_event(unsigned long cycles,
+ 		cpu_relax();
+ 	writel((unsigned long)alarm, regbase + TIMER_MATCH_VAL);
+ 
+-	if ((signed)(alarm - clocksource.read(&clocksource)) <= 16)
++	if ((signed)(alarm - clocksource.read(&clocksource)) <= MIN_OSCR_DELTA)
+ 		return -ETIME;
+ 
+ 	writel(1, regbase + TIMER_IER_VAL);
+@@ -162,7 +164,7 @@ static void __init vt8500_timer_init(struct device_node *np)
+ 		pr_err("%s: setup_irq failed for %s\n", __func__,
+ 							clockevent.name);
+ 	clockevents_config_and_register(&clockevent, VT8500_TIMER_HZ,
+-					4, 0xf0000000);
++					MIN_OSCR_DELTA * 2, 0xf0000000);
+ }
+ 
+ CLOCKSOURCE_OF_DECLARE(vt8500, "via,vt8500-timer", vt8500_timer_init);
+diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
+index c128aab076ab..fe083015a354 100644
+--- a/drivers/dma/dw/core.c
++++ b/drivers/dma/dw/core.c
+@@ -180,7 +180,7 @@ static void dwc_initialize(struct dw_dma_chan *dwc)
+ 
+ /*----------------------------------------------------------------------*/
+ 
+-static inline unsigned int dwc_fast_fls(unsigned long long v)
++static inline unsigned int dwc_fast_ffs(unsigned long long v)
+ {
+ 	/*
+ 	 * We can be a lot more clever here, but this should take care
+@@ -744,7 +744,7 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
+ 			   dw->data_width[dwc->dst_master]);
+ 
+ 	src_width = dst_width = min_t(unsigned int, data_width,
+-				      dwc_fast_fls(src | dest | len));
++				      dwc_fast_ffs(src | dest | len));
+ 
+ 	ctllo = DWC_DEFAULT_CTLLO(chan)
+ 			| DWC_CTLL_DST_WIDTH(dst_width)
+@@ -823,7 +823,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+ 
+ 	switch (direction) {
+ 	case DMA_MEM_TO_DEV:
+-		reg_width = __fls(sconfig->dst_addr_width);
++		reg_width = __ffs(sconfig->dst_addr_width);
+ 		reg = sconfig->dst_addr;
+ 		ctllo = (DWC_DEFAULT_CTLLO(chan)
+ 				| DWC_CTLL_DST_WIDTH(reg_width)
+@@ -843,7 +843,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+ 			len = sg_dma_len(sg);
+ 
+ 			mem_width = min_t(unsigned int,
+-					  data_width, dwc_fast_fls(mem | len));
++					  data_width, dwc_fast_ffs(mem | len));
+ 
+ slave_sg_todev_fill_desc:
+ 			desc = dwc_desc_get(dwc);
+@@ -883,7 +883,7 @@ slave_sg_todev_fill_desc:
+ 		}
+ 		break;
+ 	case DMA_DEV_TO_MEM:
+-		reg_width = __fls(sconfig->src_addr_width);
++		reg_width = __ffs(sconfig->src_addr_width);
+ 		reg = sconfig->src_addr;
+ 		ctllo = (DWC_DEFAULT_CTLLO(chan)
+ 				| DWC_CTLL_SRC_WIDTH(reg_width)
+@@ -903,7 +903,7 @@ slave_sg_todev_fill_desc:
+ 			len = sg_dma_len(sg);
+ 
+ 			mem_width = min_t(unsigned int,
+-					  data_width, dwc_fast_fls(mem | len));
++					  data_width, dwc_fast_ffs(mem | len));
+ 
+ slave_sg_fromdev_fill_desc:
+ 			desc = dwc_desc_get(dwc);
+diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c
+index 211021dfec73..46ef63d05584 100644
+--- a/drivers/edac/edac_device.c
++++ b/drivers/edac/edac_device.c
+@@ -435,13 +435,10 @@ void edac_device_workq_setup(struct edac_device_ctl_info *edac_dev,
+  */
+ void edac_device_workq_teardown(struct edac_device_ctl_info *edac_dev)
+ {
+-	int status;
++	edac_dev->op_state = OP_OFFLINE;
+ 
+-	status = cancel_delayed_work(&edac_dev->work);
+-	if (status == 0) {
+-		/* workq instance might be running, wait for it */
+-		flush_workqueue(edac_workqueue);
+-	}
++	cancel_delayed_work_sync(&edac_dev->work);
++	flush_workqueue(edac_workqueue);
+ }
+ 
+ /*
+diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
+index a9d98cdd11f4..f1f298b3ff16 100644
+--- a/drivers/edac/edac_mc.c
++++ b/drivers/edac/edac_mc.c
+@@ -584,18 +584,10 @@ static void edac_mc_workq_setup(struct mem_ctl_info *mci, unsigned msec,
+  */
+ static void edac_mc_workq_teardown(struct mem_ctl_info *mci)
+ {
+-	int status;
+-
+-	if (mci->op_state != OP_RUNNING_POLL)
+-		return;
+-
+-	status = cancel_delayed_work(&mci->work);
+-	if (status == 0) {
+-		edac_dbg(0, "not canceled, flush the queue\n");
++	mci->op_state = OP_OFFLINE;
+ 
+-		/* workq instance might be running, wait for it */
+-		flush_workqueue(edac_workqueue);
+-	}
++	cancel_delayed_work_sync(&mci->work);
++	flush_workqueue(edac_workqueue);
+ }
+ 
+ /*
+diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
+index 66f2ccfa5665..e5fde4382552 100644
+--- a/drivers/edac/edac_mc_sysfs.c
++++ b/drivers/edac/edac_mc_sysfs.c
+@@ -973,21 +973,26 @@ nomem:
+  */
+ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
+ {
++	char *name;
+ 	int i, err;
+ 
+ 	/*
+ 	 * The memory controller needs its own bus, in order to avoid
+ 	 * namespace conflicts at /sys/bus/edac.
+ 	 */
+-	mci->bus->name = kasprintf(GFP_KERNEL, "mc%d", mci->mc_idx);
+-	if (!mci->bus->name)
++	name = kasprintf(GFP_KERNEL, "mc%d", mci->mc_idx);
++	if (!name)
+ 		return -ENOMEM;
+ 
++	mci->bus->name = name;
++
+ 	edac_dbg(0, "creating bus %s\n", mci->bus->name);
+ 
+ 	err = bus_register(mci->bus);
+-	if (err < 0)
++	if (err < 0) {
++		kfree(name);
+ 		return err;
++	}
+ 
+ 	/* get the /sys/devices/system/edac subsys reference */
+ 	mci->dev.type = &mci_attr_type;
+@@ -1071,7 +1076,8 @@ fail:
+ fail2:
+ 	device_unregister(&mci->dev);
+ 	bus_unregister(mci->bus);
+-	kfree(mci->bus->name);
++	kfree(name);
++
+ 	return err;
+ }
+ 
+@@ -1102,10 +1108,12 @@ void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci)
+ 
+ void edac_unregister_sysfs(struct mem_ctl_info *mci)
+ {
++	const char *name = mci->bus->name;
++
+ 	edac_dbg(1, "Unregistering device %s\n", dev_name(&mci->dev));
+ 	device_unregister(&mci->dev);
+ 	bus_unregister(mci->bus);
+-	kfree(mci->bus->name);
++	kfree(name);
+ }
+ 
+ static void mc_attr_release(struct device *dev)
+diff --git a/drivers/edac/edac_pci.c b/drivers/edac/edac_pci.c
+index dd370f92ace3..e1e6d3653e03 100644
+--- a/drivers/edac/edac_pci.c
++++ b/drivers/edac/edac_pci.c
+@@ -274,13 +274,12 @@ static void edac_pci_workq_setup(struct edac_pci_ctl_info *pci,
+  */
+ static void edac_pci_workq_teardown(struct edac_pci_ctl_info *pci)
+ {
+-	int status;
+-
+ 	edac_dbg(0, "\n");
+ 
+-	status = cancel_delayed_work(&pci->work);
+-	if (status == 0)
+-		flush_workqueue(edac_workqueue);
++	pci->op_state = OP_OFFLINE;
++
++	cancel_delayed_work_sync(&pci->work);
++	flush_workqueue(edac_workqueue);
+ }
+ 
+ /*
+diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
+index 8492b68e873c..df2fa469e37c 100644
+--- a/drivers/gpu/drm/ast/ast_drv.h
++++ b/drivers/gpu/drm/ast/ast_drv.h
+@@ -296,6 +296,7 @@ int ast_framebuffer_init(struct drm_device *dev,
+ int ast_fbdev_init(struct drm_device *dev);
+ void ast_fbdev_fini(struct drm_device *dev);
+ void ast_fbdev_set_suspend(struct drm_device *dev, int state);
++void ast_fbdev_set_base(struct ast_private *ast, unsigned long gpu_addr);
+ 
+ struct ast_bo {
+ 	struct ttm_buffer_object bo;
+diff --git a/drivers/gpu/drm/ast/ast_fb.c b/drivers/gpu/drm/ast/ast_fb.c
+index a28640f47c27..b55b6b1c9fe2 100644
+--- a/drivers/gpu/drm/ast/ast_fb.c
++++ b/drivers/gpu/drm/ast/ast_fb.c
+@@ -367,3 +367,10 @@ void ast_fbdev_set_suspend(struct drm_device *dev, int state)
+ 
+ 	fb_set_suspend(ast->fbdev->helper.fbdev, state);
+ }
++
++void ast_fbdev_set_base(struct ast_private *ast, unsigned long gpu_addr)
++{
++	ast->fbdev->helper.fbdev->fix.smem_start =
++		ast->fbdev->helper.fbdev->apertures->ranges[0].base + gpu_addr;
++	ast->fbdev->helper.fbdev->fix.smem_len = ast->vram_size - gpu_addr;
++}
+diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
+index d57a38d1ca69..48f7ad1497c2 100644
+--- a/drivers/gpu/drm/ast/ast_main.c
++++ b/drivers/gpu/drm/ast/ast_main.c
+@@ -359,6 +359,7 @@ int ast_driver_load(struct drm_device *dev, unsigned long flags)
+ 	dev->mode_config.min_height = 0;
+ 	dev->mode_config.preferred_depth = 24;
+ 	dev->mode_config.prefer_shadow = 1;
++	dev->mode_config.fb_base = pci_resource_start(ast->dev->pdev, 0);
+ 
+ 	if (ast->chip == AST2100 ||
+ 	    ast->chip == AST2200 ||
+diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
+index e8f6418b6dec..f3a54ad77e3f 100644
+--- a/drivers/gpu/drm/ast/ast_mode.c
++++ b/drivers/gpu/drm/ast/ast_mode.c
+@@ -509,6 +509,8 @@ static int ast_crtc_do_set_base(struct drm_crtc *crtc,
+ 		ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
+ 		if (ret)
+ 			DRM_ERROR("failed to kmap fbcon\n");
++		else
++			ast_fbdev_set_base(ast, gpu_addr);
+ 	}
+ 	ast_bo_unreserve(bo);
+ 
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index 37a9d3c89feb..af46a33d8715 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -8200,11 +8200,21 @@ connected_sink_compute_bpp(struct intel_connector * connector,
+ 		pipe_config->pipe_bpp = connector->base.display_info.bpc*3;
+ 	}
+ 
+-	/* Clamp bpp to 8 on screens without EDID 1.4 */
+-	if (connector->base.display_info.bpc == 0 && bpp > 24) {
+-		DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n",
+-			      bpp);
+-		pipe_config->pipe_bpp = 24;
++	/* Clamp bpp to default limit on screens without EDID 1.4 */
++	if (connector->base.display_info.bpc == 0) {
++		int type = connector->base.connector_type;
++		int clamp_bpp = 24;
++
++		/* Fall back to 18 bpp when DP sink capability is unknown. */
++		if (type == DRM_MODE_CONNECTOR_DisplayPort ||
++		    type == DRM_MODE_CONNECTOR_eDP)
++			clamp_bpp = 18;
++
++		if (bpp > clamp_bpp) {
++			DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of %d\n",
++				      bpp, clamp_bpp);
++			pipe_config->pipe_bpp = clamp_bpp;
++		}
+ 	}
+ }
+ 
+diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
+index 729debf83fa3..94008582b5e0 100644
+--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
++++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
+@@ -168,7 +168,8 @@ static int qxl_process_single_command(struct qxl_device *qdev,
+ 		       cmd->command_size))
+ 		return -EFAULT;
+ 
+-	reloc_info = kmalloc(sizeof(struct qxl_reloc_info) * cmd->relocs_num, GFP_KERNEL);
++	reloc_info = kmalloc_array(cmd->relocs_num,
++				   sizeof(struct qxl_reloc_info), GFP_KERNEL);
+ 	if (!reloc_info)
+ 		return -ENOMEM;
+ 
+diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
+index 0f538a442abf..1c71ff82f302 100644
+--- a/drivers/gpu/drm/radeon/radeon_atombios.c
++++ b/drivers/gpu/drm/radeon/radeon_atombios.c
+@@ -453,7 +453,9 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
+ 	}
+ 
+ 	/* Fujitsu D3003-S2 board lists DVI-I as DVI-D and VGA */
+-	if (((dev->pdev->device == 0x9802) || (dev->pdev->device == 0x9806)) &&
++	if (((dev->pdev->device == 0x9802) ||
++	     (dev->pdev->device == 0x9805) ||
++	     (dev->pdev->device == 0x9806)) &&
+ 	    (dev->pdev->subsystem_vendor == 0x1734) &&
+ 	    (dev->pdev->subsystem_device == 0x11bd)) {
+ 		if (*connector_type == DRM_MODE_CONNECTOR_VGA) {
+@@ -464,14 +466,6 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
+ 		}
+ 	}
+ 
+-	/* Fujitsu D3003-S2 board lists DVI-I as DVI-I and VGA */
+-	if ((dev->pdev->device == 0x9805) &&
+-	    (dev->pdev->subsystem_vendor == 0x1734) &&
+-	    (dev->pdev->subsystem_device == 0x11bd)) {
+-		if (*connector_type == DRM_MODE_CONNECTOR_VGA)
+-			return false;
+-	}
+-
+ 	return true;
+ }
+ 
+diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
+index 10fc97749a81..0526eca2402c 100644
+--- a/drivers/gpu/drm/radeon/radeon_pm.c
++++ b/drivers/gpu/drm/radeon/radeon_pm.c
+@@ -896,8 +896,6 @@ static void radeon_dpm_change_power_state_locked(struct radeon_device *rdev)
+ 
+ 	/* update display watermarks based on new power state */
+ 	radeon_bandwidth_update(rdev);
+-	/* update displays */
+-	radeon_dpm_display_configuration_changed(rdev);
+ 
+ 	rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
+ 	rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
+@@ -917,6 +915,9 @@ static void radeon_dpm_change_power_state_locked(struct radeon_device *rdev)
+ 
+ 	radeon_dpm_post_set_power_state(rdev);
+ 
++	/* update displays */
++	radeon_dpm_display_configuration_changed(rdev);
++
+ 	if (rdev->asic->dpm.force_performance_level) {
+ 		if (rdev->pm.dpm.thermal_active)
+ 			/* force low perf level for thermal */
+diff --git a/drivers/gpu/drm/radeon/radeon_sa.c b/drivers/gpu/drm/radeon/radeon_sa.c
+index f0bac68254b7..bb166849aa6e 100644
+--- a/drivers/gpu/drm/radeon/radeon_sa.c
++++ b/drivers/gpu/drm/radeon/radeon_sa.c
+@@ -349,8 +349,13 @@ int radeon_sa_bo_new(struct radeon_device *rdev,
+ 			/* see if we can skip over some allocations */
+ 		} while (radeon_sa_bo_next_hole(sa_manager, fences, tries));
+ 
++		for (i = 0; i < RADEON_NUM_RINGS; ++i)
++			radeon_fence_ref(fences[i]);
++
+ 		spin_unlock(&sa_manager->wq.lock);
+ 		r = radeon_fence_wait_any(rdev, fences, false);
++		for (i = 0; i < RADEON_NUM_RINGS; ++i)
++			radeon_fence_unref(&fences[i]);
+ 		spin_lock(&sa_manager->wq.lock);
+ 		/* if we have nothing to wait for block */
+ 		if (r == -ENOENT && block) {
+diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
+index 02d3c3820803..f5931e5f44fd 100644
+--- a/drivers/gpu/drm/radeon/radeon_ttm.c
++++ b/drivers/gpu/drm/radeon/radeon_ttm.c
+@@ -621,7 +621,7 @@ static int radeon_ttm_tt_populate(struct ttm_tt *ttm)
+ 						       0, PAGE_SIZE,
+ 						       PCI_DMA_BIDIRECTIONAL);
+ 		if (pci_dma_mapping_error(rdev->pdev, gtt->ttm.dma_address[i])) {
+-			while (--i) {
++			while (i--) {
+ 				pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i],
+ 					       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ 				gtt->ttm.dma_address[i] = 0;
+diff --git a/drivers/gpu/drm/radeon/rv770_dpm.c b/drivers/gpu/drm/radeon/rv770_dpm.c
+index 890cf1710253..7eda43c4b3ec 100644
+--- a/drivers/gpu/drm/radeon/rv770_dpm.c
++++ b/drivers/gpu/drm/radeon/rv770_dpm.c
+@@ -1415,7 +1415,7 @@ int rv770_resume_smc(struct radeon_device *rdev)
+ int rv770_set_sw_state(struct radeon_device *rdev)
+ {
+ 	if (rv770_send_msg_to_smc(rdev, PPSMC_MSG_SwitchToSwState) != PPSMC_Result_OK)
+-		return -EINVAL;
++		DRM_ERROR("rv770_set_sw_state failed\n");
+ 	return 0;
+ }
+ 
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+index 59cd2baf6dc0..5214d65ebe65 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+@@ -25,6 +25,7 @@
+  *
+  **************************************************************************/
+ #include <linux/module.h>
++#include <linux/console.h>
+ 
+ #include <drm/drmP.h>
+ #include "vmwgfx_drv.h"
+@@ -1175,6 +1176,12 @@ static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ static int __init vmwgfx_init(void)
+ {
+ 	int ret;
++
++#ifdef CONFIG_VGA_CONSOLE
++	if (vgacon_text_force())
++		return -EINVAL;
++#endif
++
+ 	ret = drm_pci_init(&driver, &vmw_pci_driver);
+ 	if (ret)
+ 		DRM_ERROR("Failed initializing DRM.\n");
+diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
+index af0259708358..bbb554d586d4 100644
+--- a/drivers/gpu/vga/vgaarb.c
++++ b/drivers/gpu/vga/vgaarb.c
+@@ -392,8 +392,10 @@ int vga_get(struct pci_dev *pdev, unsigned int rsrc, int interruptible)
+ 		set_current_state(interruptible ?
+ 				  TASK_INTERRUPTIBLE :
+ 				  TASK_UNINTERRUPTIBLE);
+-		if (signal_pending(current)) {
+-			rc = -EINTR;
++		if (interruptible && signal_pending(current)) {
++			__set_current_state(TASK_RUNNING);
++			remove_wait_queue(&vga_wait_queue, &wait);
++			rc = -ERESTARTSYS;
+ 			break;
+ 		}
+ 		schedule();
+diff --git a/drivers/hwmon/ads1015.c b/drivers/hwmon/ads1015.c
+index 126516414c11..44223f5d92d8 100644
+--- a/drivers/hwmon/ads1015.c
++++ b/drivers/hwmon/ads1015.c
+@@ -126,7 +126,7 @@ static int ads1015_reg_to_mv(struct i2c_client *client, unsigned int channel,
+ 	struct ads1015_data *data = i2c_get_clientdata(client);
+ 	unsigned int pga = data->channel_data[channel].pga;
+ 	int fullscale = fullscale_table[pga];
+-	const unsigned mask = data->id == ads1115 ? 0x7fff : 0x7ff0;
++	const int mask = data->id == ads1115 ? 0x7fff : 0x7ff0;
+ 
+ 	return DIV_ROUND_CLOSEST(reg * fullscale, mask);
+ }
+diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
+index cb78b1e9bcd9..f504ba73e5dc 100644
+--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
++++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
+@@ -149,7 +149,7 @@ static int iwch_l2t_send(struct t3cdev *tdev, struct sk_buff *skb, struct l2t_en
+ 	error = l2t_send(tdev, skb, l2e);
+ 	if (error < 0)
+ 		kfree_skb(skb);
+-	return error;
++	return error < 0 ? error : 0;
+ }
+ 
+ int iwch_cxgb3_ofld_send(struct t3cdev *tdev, struct sk_buff *skb)
+@@ -165,7 +165,7 @@ int iwch_cxgb3_ofld_send(struct t3cdev *tdev, struct sk_buff *skb)
+ 	error = cxgb3_ofld_send(tdev, skb);
+ 	if (error < 0)
+ 		kfree_skb(skb);
+-	return error;
++	return error < 0 ? error : 0;
+ }
+ 
+ static void release_tid(struct t3cdev *tdev, u32 hwtid, struct sk_buff *skb)
+diff --git a/drivers/infiniband/hw/qib/qib_verbs_mcast.c b/drivers/infiniband/hw/qib/qib_verbs_mcast.c
+index dabb697b1c2a..48ba1c3e945a 100644
+--- a/drivers/infiniband/hw/qib/qib_verbs_mcast.c
++++ b/drivers/infiniband/hw/qib/qib_verbs_mcast.c
+@@ -286,15 +286,13 @@ int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
+ 	struct qib_ibdev *dev = to_idev(ibqp->device);
+ 	struct qib_ibport *ibp = to_iport(ibqp->device, qp->port_num);
+ 	struct qib_mcast *mcast = NULL;
+-	struct qib_mcast_qp *p, *tmp;
++	struct qib_mcast_qp *p, *tmp, *delp = NULL;
+ 	struct rb_node *n;
+ 	int last = 0;
+ 	int ret;
+ 
+-	if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET) {
+-		ret = -EINVAL;
+-		goto bail;
+-	}
++	if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET)
++		return -EINVAL;
+ 
+ 	spin_lock_irq(&ibp->lock);
+ 
+@@ -303,8 +301,7 @@ int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
+ 	while (1) {
+ 		if (n == NULL) {
+ 			spin_unlock_irq(&ibp->lock);
+-			ret = -EINVAL;
+-			goto bail;
++			return -EINVAL;
+ 		}
+ 
+ 		mcast = rb_entry(n, struct qib_mcast, rb_node);
+@@ -328,6 +325,7 @@ int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
+ 		 */
+ 		list_del_rcu(&p->list);
+ 		mcast->n_attached--;
++		delp = p;
+ 
+ 		/* If this was the last attached QP, remove the GID too. */
+ 		if (list_empty(&mcast->qp_list)) {
+@@ -338,15 +336,16 @@ int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
+ 	}
+ 
+ 	spin_unlock_irq(&ibp->lock);
++	/* QP not attached */
++	if (!delp)
++		return -EINVAL;
++	/*
++	 * Wait for any list walkers to finish before freeing the
++	 * list element.
++	 */
++	wait_event(mcast->wait, atomic_read(&mcast->refcount) <= 1);
++	qib_mcast_qp_free(delp);
+ 
+-	if (p) {
+-		/*
+-		 * Wait for any list walkers to finish before freeing the
+-		 * list element.
+-		 */
+-		wait_event(mcast->wait, atomic_read(&mcast->refcount) <= 1);
+-		qib_mcast_qp_free(p);
+-	}
+ 	if (last) {
+ 		atomic_dec(&mcast->refcount);
+ 		wait_event(mcast->wait, !atomic_read(&mcast->refcount));
+@@ -355,11 +354,7 @@ int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
+ 		dev->n_mcast_grps_allocated--;
+ 		spin_unlock_irq(&dev->n_mcast_grps_lock);
+ 	}
+-
+-	ret = 0;
+-
+-bail:
+-	return ret;
++	return 0;
+ }
+ 
+ int qib_mcast_tree_empty(struct qib_ibport *ibp)
+diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
+index f5004c5c4b96..70d396ee69e2 100644
+--- a/drivers/md/bcache/super.c
++++ b/drivers/md/bcache/super.c
+@@ -841,8 +841,11 @@ void bch_cached_dev_run(struct cached_dev *dc)
+ 	buf[SB_LABEL_SIZE] = '\0';
+ 	env[2] = kasprintf(GFP_KERNEL, "CACHED_LABEL=%s", buf);
+ 
+-	if (atomic_xchg(&dc->running, 1))
++	if (atomic_xchg(&dc->running, 1)) {
++		kfree(env[1]);
++		kfree(env[2]);
+ 		return;
++	}
+ 
+ 	if (!d->c &&
+ 	    BDEV_STATE(&dc->sb) != BDEV_STATE_NONE) {
+@@ -2040,8 +2043,10 @@ static int __init bcache_init(void)
+ 	closure_debug_init();
+ 
+ 	bcache_major = register_blkdev(0, "bcache");
+-	if (bcache_major < 0)
++	if (bcache_major < 0) {
++		unregister_reboot_notifier(&reboot);
+ 		return bcache_major;
++	}
+ 
+ 	if (!(bcache_wq = create_workqueue("bcache")) ||
+ 	    !(bcache_kobj = kobject_create_and_add("bcache", fs_kobj)) ||
+diff --git a/drivers/md/dm-exception-store.h b/drivers/md/dm-exception-store.h
+index 0b2536247cf5..84e27708ad97 100644
+--- a/drivers/md/dm-exception-store.h
++++ b/drivers/md/dm-exception-store.h
+@@ -70,7 +70,7 @@ struct dm_exception_store_type {
+ 	 * Update the metadata with this exception.
+ 	 */
+ 	void (*commit_exception) (struct dm_exception_store *store,
+-				  struct dm_exception *e,
++				  struct dm_exception *e, int valid,
+ 				  void (*callback) (void *, int success),
+ 				  void *callback_context);
+ 
+diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
+index 2d2b1b7588d7..8f6d3ea55401 100644
+--- a/drivers/md/dm-snap-persistent.c
++++ b/drivers/md/dm-snap-persistent.c
+@@ -646,7 +646,7 @@ static int persistent_prepare_exception(struct dm_exception_store *store,
+ }
+ 
+ static void persistent_commit_exception(struct dm_exception_store *store,
+-					struct dm_exception *e,
++					struct dm_exception *e, int valid,
+ 					void (*callback) (void *, int success),
+ 					void *callback_context)
+ {
+@@ -655,6 +655,9 @@ static void persistent_commit_exception(struct dm_exception_store *store,
+ 	struct core_exception ce;
+ 	struct commit_callback *cb;
+ 
++	if (!valid)
++		ps->valid = 0;
++
+ 	ce.old_chunk = e->old_chunk;
+ 	ce.new_chunk = e->new_chunk;
+ 	write_exception(ps, ps->current_committed++, &ce);
+diff --git a/drivers/md/dm-snap-transient.c b/drivers/md/dm-snap-transient.c
+index 1ce9a2586e41..31439d53cf7e 100644
+--- a/drivers/md/dm-snap-transient.c
++++ b/drivers/md/dm-snap-transient.c
+@@ -52,12 +52,12 @@ static int transient_prepare_exception(struct dm_exception_store *store,
+ }
+ 
+ static void transient_commit_exception(struct dm_exception_store *store,
+-				       struct dm_exception *e,
++				       struct dm_exception *e, int valid,
+ 				       void (*callback) (void *, int success),
+ 				       void *callback_context)
+ {
+ 	/* Just succeed */
+-	callback(callback_context, 1);
++	callback(callback_context, valid);
+ }
+ 
+ static void transient_usage(struct dm_exception_store *store,
+diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
+index d892a05c84f4..dbd0f00f7395 100644
+--- a/drivers/md/dm-snap.c
++++ b/drivers/md/dm-snap.c
+@@ -1388,8 +1388,9 @@ static void __invalidate_snapshot(struct dm_snapshot *s, int err)
+ 	dm_table_event(s->ti->table);
+ }
+ 
+-static void pending_complete(struct dm_snap_pending_exception *pe, int success)
++static void pending_complete(void *context, int success)
+ {
++	struct dm_snap_pending_exception *pe = context;
+ 	struct dm_exception *e;
+ 	struct dm_snapshot *s = pe->snap;
+ 	struct bio *origin_bios = NULL;
+@@ -1459,24 +1460,13 @@ out:
+ 	free_pending_exception(pe);
+ }
+ 
+-static void commit_callback(void *context, int success)
+-{
+-	struct dm_snap_pending_exception *pe = context;
+-
+-	pending_complete(pe, success);
+-}
+-
+ static void complete_exception(struct dm_snap_pending_exception *pe)
+ {
+ 	struct dm_snapshot *s = pe->snap;
+ 
+-	if (unlikely(pe->copy_error))
+-		pending_complete(pe, 0);
+-
+-	else
+-		/* Update the metadata if we are persistent */
+-		s->store->type->commit_exception(s->store, &pe->e,
+-						 commit_callback, pe);
++	/* Update the metadata if we are persistent */
++	s->store->type->commit_exception(s->store, &pe->e, !pe->copy_error,
++					 pending_complete, pe);
+ }
+ 
+ /*
+diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
+index 7e3da70ed646..f74821c6ec5f 100644
+--- a/drivers/md/dm-thin-metadata.c
++++ b/drivers/md/dm-thin-metadata.c
+@@ -1205,6 +1205,12 @@ static int __reserve_metadata_snap(struct dm_pool_metadata *pmd)
+ 	dm_block_t held_root;
+ 
+ 	/*
++	 * We commit to ensure the btree roots which we increment in a
++	 * moment are up to date.
++	 */
++	__commit_transaction(pmd);
++
++	/*
+ 	 * Copy the superblock.
+ 	 */
+ 	dm_sm_inc_block(pmd->metadata_sm, THIN_SUPERBLOCK_LOCATION);
+diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c
+index 28662bd600e0..67eae74a5525 100644
+--- a/drivers/md/persistent-data/dm-btree.c
++++ b/drivers/md/persistent-data/dm-btree.c
+@@ -250,6 +250,16 @@ static void pop_frame(struct del_stack *s)
+ 	dm_tm_unlock(s->tm, f->b);
+ }
+ 
++static void unlock_all_frames(struct del_stack *s)
++{
++	struct frame *f;
++
++	while (unprocessed_frames(s)) {
++		f = s->spine + s->top--;
++		dm_tm_unlock(s->tm, f->b);
++	}
++}
++
+ int dm_btree_del(struct dm_btree_info *info, dm_block_t root)
+ {
+ 	int r;
+@@ -306,9 +316,13 @@ int dm_btree_del(struct dm_btree_info *info, dm_block_t root)
+ 			pop_frame(s);
+ 		}
+ 	}
+-
+ out:
++	if (r) {
++		/* cleanup all frames of del_stack */
++		unlock_all_frames(s);
++	}
+ 	kfree(s);
++
+ 	return r;
+ }
+ EXPORT_SYMBOL_GPL(dm_btree_del);
+diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
+index f6dea401232c..8a8f06bcde60 100644
+--- a/drivers/md/persistent-data/dm-space-map-metadata.c
++++ b/drivers/md/persistent-data/dm-space-map-metadata.c
+@@ -136,7 +136,7 @@ static int brb_push(struct bop_ring_buffer *brb,
+ 	return 0;
+ }
+ 
+-static int brb_pop(struct bop_ring_buffer *brb, struct block_op *result)
++static int brb_peek(struct bop_ring_buffer *brb, struct block_op *result)
+ {
+ 	struct block_op *bop;
+ 
+@@ -147,6 +147,14 @@ static int brb_pop(struct bop_ring_buffer *brb, struct block_op *result)
+ 	result->type = bop->type;
+ 	result->block = bop->block;
+ 
++	return 0;
++}
++
++static int brb_pop(struct bop_ring_buffer *brb)
++{
++	if (brb_empty(brb))
++		return -ENODATA;
++
+ 	brb->begin = brb_next(brb, brb->begin);
+ 
+ 	return 0;
+@@ -211,7 +219,7 @@ static int apply_bops(struct sm_metadata *smm)
+ 	while (!brb_empty(&smm->uncommitted)) {
+ 		struct block_op bop;
+ 
+-		r = brb_pop(&smm->uncommitted, &bop);
++		r = brb_peek(&smm->uncommitted, &bop);
+ 		if (r) {
+ 			DMERR("bug in bop ring buffer");
+ 			break;
+@@ -220,6 +228,8 @@ static int apply_bops(struct sm_metadata *smm)
+ 		r = commit_bop(smm, &bop);
+ 		if (r)
+ 			break;
++
++		brb_pop(&smm->uncommitted);
+ 	}
+ 
+ 	return r;
+@@ -681,7 +691,6 @@ static struct dm_space_map bootstrap_ops = {
+ static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
+ {
+ 	int r, i;
+-	enum allocation_event ev;
+ 	struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
+ 	dm_block_t old_len = smm->ll.nr_blocks;
+ 
+@@ -703,11 +712,12 @@ static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
+ 	 * allocate any new blocks.
+ 	 */
+ 	do {
+-		for (i = old_len; !r && i < smm->begin; i++) {
+-			r = sm_ll_inc(&smm->ll, i, &ev);
+-			if (r)
+-				goto out;
+-		}
++		for (i = old_len; !r && i < smm->begin; i++)
++			r = add_bop(smm, BOP_INC, i);
++
++		if (r)
++			goto out;
++
+ 		old_len = smm->begin;
+ 
+ 		r = apply_bops(smm);
+@@ -752,7 +762,6 @@ int dm_sm_metadata_create(struct dm_space_map *sm,
+ {
+ 	int r;
+ 	dm_block_t i;
+-	enum allocation_event ev;
+ 	struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
+ 
+ 	smm->begin = superblock + 1;
+@@ -778,7 +787,7 @@ int dm_sm_metadata_create(struct dm_space_map *sm,
+ 	 * allocated blocks that they were built from.
+ 	 */
+ 	for (i = superblock; !r && i < smm->begin; i++)
+-		r = sm_ll_inc(&smm->ll, i, &ev);
++		r = add_bop(smm, BOP_INC, i);
+ 
+ 	if (r)
+ 		return r;
+diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c
+index 1f925e856974..46a984291b7d 100644
+--- a/drivers/media/dvb-core/dvb_frontend.c
++++ b/drivers/media/dvb-core/dvb_frontend.c
+@@ -2195,9 +2195,9 @@ static int dvb_frontend_ioctl_legacy(struct file *file,
+ 		dev_dbg(fe->dvb->device, "%s: current delivery system on cache: %d, V3 type: %d\n",
+ 				 __func__, c->delivery_system, fe->ops.info.type);
+ 
+-		/* Force the CAN_INVERSION_AUTO bit on. If the frontend doesn't
+-		 * do it, it is done for it. */
+-		info->caps |= FE_CAN_INVERSION_AUTO;
++		/* Set CAN_INVERSION_AUTO bit on in other than oneshot mode */
++		if (!(fepriv->tune_mode_flags & FE_TUNE_MODE_ONESHOT))
++			info->caps |= FE_CAN_INVERSION_AUTO;
+ 		err = 0;
+ 		break;
+ 	}
+diff --git a/drivers/media/dvb-frontends/tda1004x.c b/drivers/media/dvb-frontends/tda1004x.c
+index a2631be7ffac..08e0f0dd8728 100644
+--- a/drivers/media/dvb-frontends/tda1004x.c
++++ b/drivers/media/dvb-frontends/tda1004x.c
+@@ -903,9 +903,18 @@ static int tda1004x_get_fe(struct dvb_frontend *fe)
+ {
+ 	struct dtv_frontend_properties *fe_params = &fe->dtv_property_cache;
+ 	struct tda1004x_state* state = fe->demodulator_priv;
++	int status;
+ 
+ 	dprintk("%s\n", __func__);
+ 
++	status = tda1004x_read_byte(state, TDA1004X_STATUS_CD);
++	if (status == -1)
++		return -EIO;
++
++	/* Only update the properties cache if device is locked */
++	if (!(status & 8))
++		return 0;
++
+ 	// inversion status
+ 	fe_params->inversion = INVERSION_OFF;
+ 	if (tda1004x_read_byte(state, TDA1004X_CONFC1) & 0x20)
+diff --git a/drivers/media/usb/gspca/ov534.c b/drivers/media/usb/gspca/ov534.c
+index 03a33c46ca2c..9a9ad6ba56dd 100644
+--- a/drivers/media/usb/gspca/ov534.c
++++ b/drivers/media/usb/gspca/ov534.c
+@@ -1489,8 +1489,13 @@ static void sd_set_streamparm(struct gspca_dev *gspca_dev,
+ 	struct v4l2_fract *tpf = &cp->timeperframe;
+ 	struct sd *sd = (struct sd *) gspca_dev;
+ 
+-	/* Set requested framerate */
+-	sd->frame_rate = tpf->denominator / tpf->numerator;
++	if (tpf->numerator == 0 || tpf->denominator == 0)
++		/* Set default framerate */
++		sd->frame_rate = 30;
++	else
++		/* Set requested framerate */
++		sd->frame_rate = tpf->denominator / tpf->numerator;
++
+ 	if (gspca_dev->streaming)
+ 		set_frame_rate(gspca_dev);
+ 
+diff --git a/drivers/media/usb/gspca/topro.c b/drivers/media/usb/gspca/topro.c
+index 4cb511ccc5f6..22ea6aefd22f 100644
+--- a/drivers/media/usb/gspca/topro.c
++++ b/drivers/media/usb/gspca/topro.c
+@@ -4791,7 +4791,11 @@ static void sd_set_streamparm(struct gspca_dev *gspca_dev,
+ 	struct v4l2_fract *tpf = &cp->timeperframe;
+ 	int fr, i;
+ 
+-	sd->framerate = tpf->denominator / tpf->numerator;
++	if (tpf->numerator == 0 || tpf->denominator == 0)
++		sd->framerate = 30;
++	else
++		sd->framerate = tpf->denominator / tpf->numerator;
++
+ 	if (gspca_dev->streaming)
+ 		setframerate(gspca_dev, v4l2_ctrl_g_ctrl(gspca_dev->exposure));
+ 
+diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
+index 88554c22265c..30076b4f3fee 100644
+--- a/drivers/mmc/card/block.c
++++ b/drivers/mmc/card/block.c
+@@ -62,8 +62,7 @@ MODULE_ALIAS("mmc:block");
+ #define MMC_SANITIZE_REQ_TIMEOUT 240000
+ #define MMC_EXTRACT_INDEX_FROM_ARG(x) ((x & 0x00FF0000) >> 16)
+ 
+-#define mmc_req_rel_wr(req)	(((req->cmd_flags & REQ_FUA) || \
+-				  (req->cmd_flags & REQ_META)) && \
++#define mmc_req_rel_wr(req)	((req->cmd_flags & REQ_FUA) && \
+ 				  (rq_data_dir(req) == WRITE))
+ #define PACKED_CMD_VER	0x01
+ #define PACKED_CMD_WR	0x02
+@@ -1328,13 +1327,9 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
+ 
+ 	/*
+ 	 * Reliable writes are used to implement Forced Unit Access and
+-	 * REQ_META accesses, and are supported only on MMCs.
+-	 *
+-	 * XXX: this really needs a good explanation of why REQ_META
+-	 * is treated special.
++	 * are supported only on MMCs.
+ 	 */
+-	bool do_rel_wr = ((req->cmd_flags & REQ_FUA) ||
+-			  (req->cmd_flags & REQ_META)) &&
++	bool do_rel_wr = (req->cmd_flags & REQ_FUA) &&
+ 		(rq_data_dir(req) == WRITE) &&
+ 		(md->flags & MMC_BLK_REL_WR);
+ 
+diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
+index c3785edc0e92..3755f4a43622 100644
+--- a/drivers/mmc/host/mmci.c
++++ b/drivers/mmc/host/mmci.c
+@@ -1827,7 +1827,7 @@ static struct amba_id mmci_ids[] = {
+ 	{
+ 		.id     = 0x00280180,
+ 		.mask   = 0x00ffffff,
+-		.data	= &variant_u300,
++		.data	= &variant_nomadik,
+ 	},
+ 	{
+ 		.id     = 0x00480180,
+diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
+index bd2538d84f5d..4aa4d2d18933 100644
+--- a/drivers/mmc/host/sdhci.c
++++ b/drivers/mmc/host/sdhci.c
+@@ -2650,7 +2650,7 @@ static int sdhci_runtime_pm_put(struct sdhci_host *host)
+ 
+ static void sdhci_runtime_pm_bus_on(struct sdhci_host *host)
+ {
+-	if (host->runtime_suspended || host->bus_on)
++	if (host->bus_on)
+ 		return;
+ 	host->bus_on = true;
+ 	pm_runtime_get_noresume(host->mmc->parent);
+@@ -2658,7 +2658,7 @@ static void sdhci_runtime_pm_bus_on(struct sdhci_host *host)
+ 
+ static void sdhci_runtime_pm_bus_off(struct sdhci_host *host)
+ {
+-	if (host->runtime_suspended || !host->bus_on)
++	if (!host->bus_on)
+ 		return;
+ 	host->bus_on = false;
+ 	pm_runtime_put_noidle(host->mmc->parent);
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index b3892b0d2e61..5dcac318e317 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -259,6 +259,8 @@ struct bond_parm_tbl ad_select_tbl[] = {
+ 
+ static int bond_init(struct net_device *bond_dev);
+ static void bond_uninit(struct net_device *bond_dev);
++static bool bond_time_in_interval(struct bonding *bond, unsigned long last_act,
++				  int mod);
+ 
+ /*---------------------------- General routines -----------------------------*/
+ 
+@@ -2435,6 +2437,7 @@ int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
+ 		 struct slave *slave)
+ {
+ 	struct arphdr *arp = (struct arphdr *)skb->data;
++	struct slave *curr_active_slave, *curr_arp_slave;
+ 	unsigned char *arp_ptr;
+ 	__be32 sip, tip;
+ 	int alen;
+@@ -2479,25 +2482,42 @@ int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
+ 		 bond->params.arp_validate, slave_do_arp_validate(bond, slave),
+ 		 &sip, &tip);
+ 
+-	/*
+-	 * Backup slaves won't see the ARP reply, but do come through
+-	 * here for each ARP probe (so we swap the sip/tip to validate
+-	 * the probe).  In a "redundant switch, common router" type of
+-	 * configuration, the ARP probe will (hopefully) travel from
+-	 * the active, through one switch, the router, then the other
+-	 * switch before reaching the backup.
++	curr_active_slave = rcu_dereference(bond->curr_active_slave);
++	curr_arp_slave = rcu_dereference(bond->current_arp_slave);
++
++	/* We 'trust' the received ARP enough to validate it if:
++	 *
++	 * (a) the slave receiving the ARP is active (which includes the
++	 * current ARP slave, if any), or
+ 	 *
+-	 * We 'trust' the arp requests if there is an active slave and
+-	 * it received valid arp reply(s) after it became active. This
+-	 * is done to avoid endless looping when we can't reach the
++	 * (b) the receiving slave isn't active, but there is a currently
++	 * active slave and it received valid arp reply(s) after it became
++	 * the currently active slave, or
++	 *
++	 * (c) there is an ARP slave that sent an ARP during the prior ARP
++	 * interval, and we receive an ARP reply on any slave.  We accept
++	 * these because switch FDB update delays may deliver the ARP
++	 * reply to a slave other than the sender of the ARP request.
++	 *
++	 * Note: for (b), backup slaves are receiving the broadcast ARP
++	 * request, not a reply.  This request passes from the sending
++	 * slave through the L2 switch(es) to the receiving slave.  Since
++	 * this is checking the request, sip/tip are swapped for
++	 * validation.
++	 *
++	 * This is done to avoid endless looping when we can't reach the
+ 	 * arp_ip_target and fool ourselves with our own arp requests.
+ 	 */
+ 	if (bond_is_active_slave(slave))
+ 		bond_validate_arp(bond, slave, sip, tip);
+-	else if (bond->curr_active_slave &&
+-		 time_after(slave_last_rx(bond, bond->curr_active_slave),
+-			    bond->curr_active_slave->jiffies))
++	else if (curr_active_slave &&
++		 time_after(slave_last_rx(bond, curr_active_slave),
++			    curr_active_slave->jiffies))
+ 		bond_validate_arp(bond, slave, tip, sip);
++	else if (curr_arp_slave && (arp->ar_op == htons(ARPOP_REPLY)) &&
++		 bond_time_in_interval(bond,
++				       dev_trans_start(curr_arp_slave->dev), 1))
++		bond_validate_arp(bond, slave, sip, tip);
+ 
+ out_unlock:
+ 	read_unlock(&bond->lock);
+diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
+index 5f9a7ad9b964..d921416295ce 100644
+--- a/drivers/net/can/usb/ems_usb.c
++++ b/drivers/net/can/usb/ems_usb.c
+@@ -118,6 +118,9 @@ MODULE_LICENSE("GPL v2");
+  */
+ #define EMS_USB_ARM7_CLOCK 8000000
+ 
++#define CPC_TX_QUEUE_TRIGGER_LOW	25
++#define CPC_TX_QUEUE_TRIGGER_HIGH	35
++
+ /*
+  * CAN-Message representation in a CPC_MSG. Message object type is
+  * CPC_MSG_TYPE_CAN_FRAME or CPC_MSG_TYPE_RTR_FRAME or
+@@ -279,6 +282,11 @@ static void ems_usb_read_interrupt_callback(struct urb *urb)
+ 	switch (urb->status) {
+ 	case 0:
+ 		dev->free_slots = dev->intr_in_buffer[1];
++		if(dev->free_slots > CPC_TX_QUEUE_TRIGGER_HIGH){
++			if (netif_queue_stopped(netdev)){
++				netif_wake_queue(netdev);
++			}
++		}
+ 		break;
+ 
+ 	case -ECONNRESET: /* unlink */
+@@ -530,8 +538,6 @@ static void ems_usb_write_bulk_callback(struct urb *urb)
+ 	/* Release context */
+ 	context->echo_index = MAX_TX_URBS;
+ 
+-	if (netif_queue_stopped(netdev))
+-		netif_wake_queue(netdev);
+ }
+ 
+ /*
+@@ -591,7 +597,7 @@ static int ems_usb_start(struct ems_usb *dev)
+ 	int err, i;
+ 
+ 	dev->intr_in_buffer[0] = 0;
+-	dev->free_slots = 15; /* initial size */
++	dev->free_slots = 50; /* initial size */
+ 
+ 	for (i = 0; i < MAX_RX_URBS; i++) {
+ 		struct urb *urb = NULL;
+@@ -841,7 +847,7 @@ static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *ne
+ 
+ 		/* Slow down tx path */
+ 		if (atomic_read(&dev->active_tx_urbs) >= MAX_TX_URBS ||
+-		    dev->free_slots < 5) {
++		    dev->free_slots < CPC_TX_QUEUE_TRIGGER_LOW) {
+ 			netif_stop_queue(netdev);
+ 		}
+ 	}
+diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+index 930ced0bcc8b..ce534b2bbd95 100644
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+@@ -2371,10 +2371,13 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id,
+ 				 AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR | \
+ 				 AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR)
+ 
+-#define HW_PRTY_ASSERT_SET_3 (AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY | \
+-		AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY | \
+-		AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY | \
+-		AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY)
++#define HW_PRTY_ASSERT_SET_3_WITHOUT_SCPAD \
++		(AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY | \
++		 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY | \
++		 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY)
++
++#define HW_PRTY_ASSERT_SET_3 (HW_PRTY_ASSERT_SET_3_WITHOUT_SCPAD | \
++			      AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY)
+ 
+ #define HW_PRTY_ASSERT_SET_4 (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR | \
+ 			      AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)
+diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+index 237a5611d3f6..1e912b16c487 100644
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+@@ -4613,9 +4613,7 @@ static bool bnx2x_check_blocks_with_parity3(struct bnx2x *bp, u32 sig,
+ 				res |= true;
+ 				break;
+ 			case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
+-				if (print)
+-					_print_next_block((*par_num)++,
+-							  "MCP SCPAD");
++				(*par_num)++;
+ 				/* clear latched SCPAD PATIRY from MCP */
+ 				REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL,
+ 				       1UL << 10);
+@@ -4677,6 +4675,7 @@ static bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print,
+ 	    (sig[3] & HW_PRTY_ASSERT_SET_3) ||
+ 	    (sig[4] & HW_PRTY_ASSERT_SET_4)) {
+ 		int par_num = 0;
++
+ 		DP(NETIF_MSG_HW, "Was parity error: HW block parity attention:\n"
+ 				 "[0]:0x%08x [1]:0x%08x [2]:0x%08x [3]:0x%08x [4]:0x%08x\n",
+ 			  sig[0] & HW_PRTY_ASSERT_SET_0,
+@@ -4684,9 +4683,18 @@ static bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print,
+ 			  sig[2] & HW_PRTY_ASSERT_SET_2,
+ 			  sig[3] & HW_PRTY_ASSERT_SET_3,
+ 			  sig[4] & HW_PRTY_ASSERT_SET_4);
+-		if (print)
+-			netdev_err(bp->dev,
+-				   "Parity errors detected in blocks: ");
++		if (print) {
++			if (((sig[0] & HW_PRTY_ASSERT_SET_0) ||
++			     (sig[1] & HW_PRTY_ASSERT_SET_1) ||
++			     (sig[2] & HW_PRTY_ASSERT_SET_2) ||
++			     (sig[4] & HW_PRTY_ASSERT_SET_4)) ||
++			     (sig[3] & HW_PRTY_ASSERT_SET_3_WITHOUT_SCPAD)) {
++				netdev_err(bp->dev,
++					   "Parity errors detected in blocks: ");
++			} else {
++				print = false;
++			}
++		}
+ 		res |= bnx2x_check_blocks_with_parity0(bp,
+ 			sig[0] & HW_PRTY_ASSERT_SET_0, &par_num, print);
+ 		res |= bnx2x_check_blocks_with_parity1(bp,
+diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
+index fe601e264f94..ea86c17541b3 100644
+--- a/drivers/net/ethernet/broadcom/tg3.c
++++ b/drivers/net/ethernet/broadcom/tg3.c
+@@ -7809,6 +7809,14 @@ static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
+ 	return ret;
+ }
+ 
++static bool tg3_tso_bug_gso_check(struct tg3_napi *tnapi, struct sk_buff *skb)
++{
++	/* Check if we will never have enough descriptors,
++	 * as gso_segs can be more than current ring size
++	 */
++	return skb_shinfo(skb)->gso_segs < tnapi->tx_pending / 3;
++}
++
+ static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
+ 
+ /* Use GSO to workaround a rare TSO bug that may be triggered when the
+@@ -7910,8 +7918,11 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ 		 * vlan encapsulated.
+ 		 */
+ 		if (skb->protocol == htons(ETH_P_8021Q) ||
+-		    skb->protocol == htons(ETH_P_8021AD))
+-			return tg3_tso_bug(tp, skb);
++		    skb->protocol == htons(ETH_P_8021AD)) {
++			if (tg3_tso_bug_gso_check(tnapi, skb))
++				return tg3_tso_bug(tp, skb);
++			goto drop;
++		}
+ 
+ 		if (!skb_is_gso_v6(skb)) {
+ 			iph->check = 0;
+@@ -7919,8 +7930,11 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ 		}
+ 
+ 		if (unlikely((ETH_HLEN + hdr_len) > 80) &&
+-		    tg3_flag(tp, TSO_BUG))
+-			return tg3_tso_bug(tp, skb);
++		    tg3_flag(tp, TSO_BUG)) {
++			if (tg3_tso_bug_gso_check(tnapi, skb))
++				return tg3_tso_bug(tp, skb);
++			goto drop;
++		}
+ 
+ 		base_flags |= (TXD_FLAG_CPU_PRE_DMA |
+ 			       TXD_FLAG_CPU_POST_DMA);
+diff --git a/drivers/net/ethernet/mellanox/mlx4/en_clock.c b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
+index fd6441071319..9180c7e72c65 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/en_clock.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
+@@ -111,6 +111,24 @@ void mlx4_en_fill_hwtstamps(struct mlx4_en_dev *mdev,
+ 	hwts->hwtstamp = ns_to_ktime(nsec);
+ }
+ 
++#define MLX4_EN_WRAP_AROUND_SEC	10ULL
++
++/* This function calculates the max shift that enables the user range
++ * of MLX4_EN_WRAP_AROUND_SEC values in the cycles register.
++ */
++static u32 freq_to_shift(u16 freq)
++{
++	u32 freq_khz = freq * 1000;
++	u64 max_val_cycles = freq_khz * 1000 * MLX4_EN_WRAP_AROUND_SEC;
++	u64 max_val_cycles_rounded = is_power_of_2(max_val_cycles + 1) ?
++		max_val_cycles : roundup_pow_of_two(max_val_cycles) - 1;
++	/* calculate max possible multiplier in order to fit in 64bit */
++	u64 max_mul = div_u64(0xffffffffffffffffULL, max_val_cycles_rounded);
++
++	/* This comes from the reverse of clocksource_khz2mult */
++	return ilog2(div_u64(max_mul * freq_khz, 1000000));
++}
++
+ void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev)
+ {
+ 	struct mlx4_dev *dev = mdev->dev;
+@@ -119,12 +137,7 @@ void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev)
+ 	memset(&mdev->cycles, 0, sizeof(mdev->cycles));
+ 	mdev->cycles.read = mlx4_en_read_clock;
+ 	mdev->cycles.mask = CLOCKSOURCE_MASK(48);
+-	/* Using shift to make calculation more accurate. Since current HW
+-	 * clock frequency is 427 MHz, and cycles are given using a 48 bits
+-	 * register, the biggest shift when calculating using u64, is 14
+-	 * (max_cycles * multiplier < 2^64)
+-	 */
+-	mdev->cycles.shift = 14;
++	mdev->cycles.shift = freq_to_shift(dev->caps.hca_core_clock);
+ 	mdev->cycles.mult =
+ 		clocksource_khz2mult(1000 * dev->caps.hca_core_clock, mdev->cycles.shift);
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c
+index 331791467a22..85dcd178e5ed 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/en_port.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c
+@@ -174,11 +174,11 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
+ 			   be64_to_cpu(mlx4_en_stats->MCAST_novlan);
+ 	stats->collisions = 0;
+ 	stats->rx_length_errors = be32_to_cpu(mlx4_en_stats->RdropLength);
+-	stats->rx_over_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw);
++	stats->rx_over_errors = 0;
+ 	stats->rx_crc_errors = be32_to_cpu(mlx4_en_stats->RCRC);
+ 	stats->rx_frame_errors = 0;
+ 	stats->rx_fifo_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw);
+-	stats->rx_missed_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw);
++	stats->rx_missed_errors = 0;
+ 	stats->tx_aborted_errors = 0;
+ 	stats->tx_carrier_errors = 0;
+ 	stats->tx_fifo_errors = 0;
+diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
+index d2907a6e3dab..be37c042cd69 100644
+--- a/drivers/net/phy/dp83640.c
++++ b/drivers/net/phy/dp83640.c
+@@ -698,6 +698,11 @@ static void decode_rxts(struct dp83640_private *dp83640,
+ {
+ 	struct rxts *rxts;
+ 	unsigned long flags;
++	u8 overflow;
++
++	overflow = (phy_rxts->ns_hi >> 14) & 0x3;
++	if (overflow)
++		pr_debug("rx timestamp queue overflow, count %d\n", overflow);
+ 
+ 	spin_lock_irqsave(&dp83640->rx_lock, flags);
+ 
+@@ -721,6 +726,7 @@ static void decode_txts(struct dp83640_private *dp83640,
+ 	struct skb_shared_hwtstamps shhwtstamps;
+ 	struct sk_buff *skb;
+ 	u64 ns;
++	u8 overflow;
+ 
+ 	/* We must already have the skb that triggered this. */
+ 
+@@ -730,6 +736,17 @@ static void decode_txts(struct dp83640_private *dp83640,
+ 		pr_debug("have timestamp but tx_queue empty\n");
+ 		return;
+ 	}
++
++	overflow = (phy_txts->ns_hi >> 14) & 0x3;
++	if (overflow) {
++		pr_debug("tx timestamp queue overflow, count %d\n", overflow);
++		while (skb) {
++			skb_complete_tx_timestamp(skb, NULL);
++			skb = skb_dequeue(&dp83640->tx_queue);
++		}
++		return;
++	}
++
+ 	ns = phy2txts(phy_txts);
+ 	memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+ 	shhwtstamps.hwtstamp = ns_to_ktime(ns);
+diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
+index 1cfd4e841854..ec982788e6db 100644
+--- a/drivers/net/ppp/pppoe.c
++++ b/drivers/net/ppp/pppoe.c
+@@ -392,6 +392,8 @@ static int pppoe_rcv_core(struct sock *sk, struct sk_buff *skb)
+ 
+ 		if (!__pppoe_xmit(sk_pppox(relay_po), skb))
+ 			goto abort_put;
++
++		sock_put(sk_pppox(relay_po));
+ 	} else {
+ 		if (sock_queue_rcv_skb(sk, skb))
+ 			goto abort_kfree;
+diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
+index 0710214df2bf..bb1ab1ffbc8b 100644
+--- a/drivers/net/ppp/pptp.c
++++ b/drivers/net/ppp/pptp.c
+@@ -131,24 +131,27 @@ static int lookup_chan_dst(u16 call_id, __be32 d_addr)
+ 	return i < MAX_CALLID;
+ }
+ 
+-static int add_chan(struct pppox_sock *sock)
++static int add_chan(struct pppox_sock *sock,
++		    struct pptp_addr *sa)
+ {
+ 	static int call_id;
+ 
+ 	spin_lock(&chan_lock);
+-	if (!sock->proto.pptp.src_addr.call_id)	{
++	if (!sa->call_id)	{
+ 		call_id = find_next_zero_bit(callid_bitmap, MAX_CALLID, call_id + 1);
+ 		if (call_id == MAX_CALLID) {
+ 			call_id = find_next_zero_bit(callid_bitmap, MAX_CALLID, 1);
+ 			if (call_id == MAX_CALLID)
+ 				goto out_err;
+ 		}
+-		sock->proto.pptp.src_addr.call_id = call_id;
+-	} else if (test_bit(sock->proto.pptp.src_addr.call_id, callid_bitmap))
++		sa->call_id = call_id;
++	} else if (test_bit(sa->call_id, callid_bitmap)) {
+ 		goto out_err;
++	}
+ 
+-	set_bit(sock->proto.pptp.src_addr.call_id, callid_bitmap);
+-	rcu_assign_pointer(callid_sock[sock->proto.pptp.src_addr.call_id], sock);
++	sock->proto.pptp.src_addr = *sa;
++	set_bit(sa->call_id, callid_bitmap);
++	rcu_assign_pointer(callid_sock[sa->call_id], sock);
+ 	spin_unlock(&chan_lock);
+ 
+ 	return 0;
+@@ -417,7 +420,6 @@ static int pptp_bind(struct socket *sock, struct sockaddr *uservaddr,
+ 	struct sock *sk = sock->sk;
+ 	struct sockaddr_pppox *sp = (struct sockaddr_pppox *) uservaddr;
+ 	struct pppox_sock *po = pppox_sk(sk);
+-	struct pptp_opt *opt = &po->proto.pptp;
+ 	int error = 0;
+ 
+ 	if (sockaddr_len < sizeof(struct sockaddr_pppox))
+@@ -425,10 +427,22 @@ static int pptp_bind(struct socket *sock, struct sockaddr *uservaddr,
+ 
+ 	lock_sock(sk);
+ 
+-	opt->src_addr = sp->sa_addr.pptp;
+-	if (add_chan(po))
++	if (sk->sk_state & PPPOX_DEAD) {
++		error = -EALREADY;
++		goto out;
++	}
++
++	if (sk->sk_state & PPPOX_BOUND) {
+ 		error = -EBUSY;
++		goto out;
++	}
++
++	if (add_chan(po, &sp->sa_addr.pptp))
++		error = -EBUSY;
++	else
++		sk->sk_state |= PPPOX_BOUND;
+ 
++out:
+ 	release_sock(sk);
+ 	return error;
+ }
+@@ -499,7 +513,7 @@ static int pptp_connect(struct socket *sock, struct sockaddr *uservaddr,
+ 	}
+ 
+ 	opt->dst_addr = sp->sa_addr.pptp;
+-	sk->sk_state = PPPOX_CONNECTED;
++	sk->sk_state |= PPPOX_CONNECTED;
+ 
+  end:
+ 	release_sock(sk);
+diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
+index 9356aa5f2033..0eb410b637de 100644
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -522,6 +522,7 @@ static const struct usb_device_id products[] = {
+ 
+ 	/* 3. Combined interface devices matching on interface number */
+ 	{QMI_FIXED_INTF(0x0408, 0xea42, 4)},	/* Yota / Megafon M100-1 */
++	{QMI_FIXED_INTF(0x05c6, 0x6001, 3)},	/* 4G LTE usb-modem U901 */
+ 	{QMI_FIXED_INTF(0x05c6, 0x7000, 0)},
+ 	{QMI_FIXED_INTF(0x05c6, 0x7001, 1)},
+ 	{QMI_FIXED_INTF(0x05c6, 0x7002, 1)},
+diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c
+index 0bf82a20a0fb..48d21e0edd56 100644
+--- a/drivers/pci/pcie/aer/aerdrv.c
++++ b/drivers/pci/pcie/aer/aerdrv.c
+@@ -262,7 +262,6 @@ static struct aer_rpc *aer_alloc_rpc(struct pcie_device *dev)
+ 	rpc->rpd = dev;
+ 	INIT_WORK(&rpc->dpc_handler, aer_isr);
+ 	mutex_init(&rpc->rpc_mutex);
+-	init_waitqueue_head(&rpc->wait_release);
+ 
+ 	/* Use PCIe bus function to store rpc into PCIe device */
+ 	set_service_data(dev, rpc);
+@@ -285,8 +284,7 @@ static void aer_remove(struct pcie_device *dev)
+ 		if (rpc->isr)
+ 			free_irq(dev->irq, dev);
+ 
+-		wait_event(rpc->wait_release, rpc->prod_idx == rpc->cons_idx);
+-
++		flush_work(&rpc->dpc_handler);
+ 		aer_disable_rootport(rpc);
+ 		kfree(rpc);
+ 		set_service_data(dev, NULL);
+diff --git a/drivers/pci/pcie/aer/aerdrv.h b/drivers/pci/pcie/aer/aerdrv.h
+index 84420b7c9456..945c939a86c5 100644
+--- a/drivers/pci/pcie/aer/aerdrv.h
++++ b/drivers/pci/pcie/aer/aerdrv.h
+@@ -72,7 +72,6 @@ struct aer_rpc {
+ 					 * recovery on the same
+ 					 * root port hierarchy
+ 					 */
+-	wait_queue_head_t wait_release;
+ };
+ 
+ struct aer_broadcast_data {
+diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
+index 85ca36f2136d..28d4c0a0d31a 100644
+--- a/drivers/pci/pcie/aer/aerdrv_core.c
++++ b/drivers/pci/pcie/aer/aerdrv_core.c
+@@ -785,8 +785,6 @@ void aer_isr(struct work_struct *work)
+ 	while (get_e_source(rpc, &e_src))
+ 		aer_isr_one_error(p_device, &e_src);
+ 	mutex_unlock(&rpc->rpc_mutex);
+-
+-	wake_up(&rpc->wait_release);
+ }
+ 
+ /**
+diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c
+index eae7cd9fde7b..facd18c2ed46 100644
+--- a/drivers/pci/xen-pcifront.c
++++ b/drivers/pci/xen-pcifront.c
+@@ -52,7 +52,7 @@ struct pcifront_device {
+ };
+ 
+ struct pcifront_sd {
+-	int domain;
++	struct pci_sysdata sd;
+ 	struct pcifront_device *pdev;
+ };
+ 
+@@ -66,7 +66,9 @@ static inline void pcifront_init_sd(struct pcifront_sd *sd,
+ 				    unsigned int domain, unsigned int bus,
+ 				    struct pcifront_device *pdev)
+ {
+-	sd->domain = domain;
++	/* Because we do not expose that information via XenBus. */
++	sd->sd.node = first_online_node;
++	sd->sd.domain = domain;
+ 	sd->pdev = pdev;
+ }
+ 
+@@ -464,8 +466,8 @@ static int pcifront_scan_root(struct pcifront_device *pdev,
+ 	dev_info(&pdev->xdev->dev, "Creating PCI Frontend Bus %04x:%02x\n",
+ 		 domain, bus);
+ 
+-	bus_entry = kmalloc(sizeof(*bus_entry), GFP_KERNEL);
+-	sd = kmalloc(sizeof(*sd), GFP_KERNEL);
++	bus_entry = kzalloc(sizeof(*bus_entry), GFP_KERNEL);
++	sd = kzalloc(sizeof(*sd), GFP_KERNEL);
+ 	if (!bus_entry || !sd) {
+ 		err = -ENOMEM;
+ 		goto err_out;
+diff --git a/drivers/power/wm831x_power.c b/drivers/power/wm831x_power.c
+index 3bed2f55cf7d..3ccadf631d45 100644
+--- a/drivers/power/wm831x_power.c
++++ b/drivers/power/wm831x_power.c
+@@ -567,7 +567,7 @@ static int wm831x_power_probe(struct platform_device *pdev)
+ 
+ 	irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "SYSLO"));
+ 	ret = request_threaded_irq(irq, NULL, wm831x_syslo_irq,
+-				   IRQF_TRIGGER_RISING, "System power low",
++				   IRQF_TRIGGER_RISING | IRQF_ONESHOT, "System power low",
+ 				   power);
+ 	if (ret != 0) {
+ 		dev_err(&pdev->dev, "Failed to request SYSLO IRQ %d: %d\n",
+@@ -577,7 +577,7 @@ static int wm831x_power_probe(struct platform_device *pdev)
+ 
+ 	irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "PWR SRC"));
+ 	ret = request_threaded_irq(irq, NULL, wm831x_pwr_src_irq,
+-				   IRQF_TRIGGER_RISING, "Power source",
++				   IRQF_TRIGGER_RISING | IRQF_ONESHOT, "Power source",
+ 				   power);
+ 	if (ret != 0) {
+ 		dev_err(&pdev->dev, "Failed to request PWR SRC IRQ %d: %d\n",
+@@ -590,7 +590,7 @@ static int wm831x_power_probe(struct platform_device *pdev)
+ 				 platform_get_irq_byname(pdev,
+ 							 wm831x_bat_irqs[i]));
+ 		ret = request_threaded_irq(irq, NULL, wm831x_bat_irq,
+-					   IRQF_TRIGGER_RISING,
++					   IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+ 					   wm831x_bat_irqs[i],
+ 					   power);
+ 		if (ret != 0) {
+diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
+index a2597e683e79..6a64e86e8ccd 100644
+--- a/drivers/s390/block/dasd_alias.c
++++ b/drivers/s390/block/dasd_alias.c
+@@ -264,8 +264,10 @@ void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device)
+ 		spin_unlock_irqrestore(&lcu->lock, flags);
+ 		cancel_work_sync(&lcu->suc_data.worker);
+ 		spin_lock_irqsave(&lcu->lock, flags);
+-		if (device == lcu->suc_data.device)
++		if (device == lcu->suc_data.device) {
++			dasd_put_device(device);
+ 			lcu->suc_data.device = NULL;
++		}
+ 	}
+ 	was_pending = 0;
+ 	if (device == lcu->ruac_data.device) {
+@@ -273,8 +275,10 @@ void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device)
+ 		was_pending = 1;
+ 		cancel_delayed_work_sync(&lcu->ruac_data.dwork);
+ 		spin_lock_irqsave(&lcu->lock, flags);
+-		if (device == lcu->ruac_data.device)
++		if (device == lcu->ruac_data.device) {
++			dasd_put_device(device);
+ 			lcu->ruac_data.device = NULL;
++		}
+ 	}
+ 	private->lcu = NULL;
+ 	spin_unlock_irqrestore(&lcu->lock, flags);
+@@ -549,8 +553,10 @@ static void lcu_update_work(struct work_struct *work)
+ 	if ((rc && (rc != -EOPNOTSUPP)) || (lcu->flags & NEED_UAC_UPDATE)) {
+ 		DBF_DEV_EVENT(DBF_WARNING, device, "could not update"
+ 			    " alias data in lcu (rc = %d), retry later", rc);
+-		schedule_delayed_work(&lcu->ruac_data.dwork, 30*HZ);
++		if (!schedule_delayed_work(&lcu->ruac_data.dwork, 30*HZ))
++			dasd_put_device(device);
+ 	} else {
++		dasd_put_device(device);
+ 		lcu->ruac_data.device = NULL;
+ 		lcu->flags &= ~UPDATE_PENDING;
+ 	}
+@@ -593,8 +599,10 @@ static int _schedule_lcu_update(struct alias_lcu *lcu,
+ 	 */
+ 	if (!usedev)
+ 		return -EINVAL;
++	dasd_get_device(usedev);
+ 	lcu->ruac_data.device = usedev;
+-	schedule_delayed_work(&lcu->ruac_data.dwork, 0);
++	if (!schedule_delayed_work(&lcu->ruac_data.dwork, 0))
++		dasd_put_device(usedev);
+ 	return 0;
+ }
+ 
+@@ -722,7 +730,7 @@ static int reset_summary_unit_check(struct alias_lcu *lcu,
+ 	ASCEBC((char *) &cqr->magic, 4);
+ 	ccw = cqr->cpaddr;
+ 	ccw->cmd_code = DASD_ECKD_CCW_RSCK;
+-	ccw->flags = 0 ;
++	ccw->flags = CCW_FLAG_SLI;
+ 	ccw->count = 16;
+ 	ccw->cda = (__u32)(addr_t) cqr->data;
+ 	((char *)cqr->data)[0] = reason;
+@@ -926,6 +934,7 @@ static void summary_unit_check_handling_work(struct work_struct *work)
+ 	/* 3. read new alias configuration */
+ 	_schedule_lcu_update(lcu, device);
+ 	lcu->suc_data.device = NULL;
++	dasd_put_device(device);
+ 	spin_unlock_irqrestore(&lcu->lock, flags);
+ }
+ 
+@@ -985,6 +994,8 @@ void dasd_alias_handle_summary_unit_check(struct dasd_device *device,
+ 	}
+ 	lcu->suc_data.reason = reason;
+ 	lcu->suc_data.device = device;
++	dasd_get_device(device);
+ 	spin_unlock(&lcu->lock);
+-	schedule_work(&lcu->suc_data.worker);
++	if (!schedule_work(&lcu->suc_data.worker))
++		dasd_put_device(device);
+ };
+diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
+index f2bb2f09bff1..deb1ed816c49 100644
+--- a/drivers/scsi/megaraid/megaraid_sas.h
++++ b/drivers/scsi/megaraid/megaraid_sas.h
+@@ -334,6 +334,8 @@ enum MR_EVT_ARGS {
+ 	MR_EVT_ARGS_GENERIC,
+ };
+ 
++
++#define SGE_BUFFER_SIZE	4096
+ /*
+  * define constants for device list query options
+  */
+diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
+index 6da7e62b13fb..6811a9b37053 100644
+--- a/drivers/scsi/megaraid/megaraid_sas_base.c
++++ b/drivers/scsi/megaraid/megaraid_sas_base.c
+@@ -3819,7 +3819,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
+ 		}
+ 	}
+ 	instance->max_sectors_per_req = instance->max_num_sge *
+-						PAGE_SIZE / 512;
++						SGE_BUFFER_SIZE / 512;
+ 	if (tmp_sectors && (instance->max_sectors_per_req > tmp_sectors))
+ 		instance->max_sectors_per_req = tmp_sectors;
+ 
+@@ -5284,6 +5284,9 @@ static int megasas_mgmt_compat_ioctl_fw(struct file *file, unsigned long arg)
+ 	int i;
+ 	int error = 0;
+ 	compat_uptr_t ptr;
++	unsigned long local_raw_ptr;
++	u32 local_sense_off;
++	u32 local_sense_len;
+ 
+ 	if (clear_user(ioc, sizeof(*ioc)))
+ 		return -EFAULT;
+@@ -5301,9 +5304,15 @@ static int megasas_mgmt_compat_ioctl_fw(struct file *file, unsigned long arg)
+ 	 * sense_len is not null, so prepare the 64bit value under
+ 	 * the same condition.
+ 	 */
+-	if (ioc->sense_len) {
++	if (get_user(local_raw_ptr, ioc->frame.raw) ||
++		get_user(local_sense_off, &ioc->sense_off) ||
++		get_user(local_sense_len, &ioc->sense_len))
++		return -EFAULT;
++
++
++	if (local_sense_len) {
+ 		void __user **sense_ioc_ptr =
+-			(void __user **)(ioc->frame.raw + ioc->sense_off);
++			(void __user **)((u8*)local_raw_ptr + local_sense_off);
+ 		compat_uptr_t *sense_cioc_ptr =
+ 			(compat_uptr_t *)(cioc->frame.raw + cioc->sense_off);
+ 		if (get_user(ptr, sense_cioc_ptr) ||
+diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c
+index eba183c428cf..3643bbf5456d 100644
+--- a/drivers/scsi/ses.c
++++ b/drivers/scsi/ses.c
+@@ -70,6 +70,7 @@ static int ses_probe(struct device *dev)
+ static int ses_recv_diag(struct scsi_device *sdev, int page_code,
+ 			 void *buf, int bufflen)
+ {
++	int ret;
+ 	unsigned char cmd[] = {
+ 		RECEIVE_DIAGNOSTIC,
+ 		1,		/* Set PCV bit */
+@@ -78,9 +79,26 @@ static int ses_recv_diag(struct scsi_device *sdev, int page_code,
+ 		bufflen & 0xff,
+ 		0
+ 	};
++	unsigned char recv_page_code;
+ 
+-	return scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buf, bufflen,
++	ret =  scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buf, bufflen,
+ 				NULL, SES_TIMEOUT, SES_RETRIES, NULL);
++	if (unlikely(!ret))
++		return ret;
++
++	recv_page_code = ((unsigned char *)buf)[0];
++
++	if (likely(recv_page_code == page_code))
++		return ret;
++
++	/* successful diagnostic but wrong page code.  This happens to some
++	 * USB devices, just print a message and pretend there was an error */
++
++	sdev_printk(KERN_ERR, sdev,
++		    "Wrong diagnostic page; asked for %d got %u\n",
++		    page_code, recv_page_code);
++
++	return -EINVAL;
+ }
+ 
+ static int ses_send_diag(struct scsi_device *sdev, int page_code,
+@@ -436,7 +454,15 @@ static void ses_enclosure_data_process(struct enclosure_device *edev,
+ 			if (desc_ptr)
+ 				desc_ptr += len;
+ 
+-			if (addl_desc_ptr)
++			if (addl_desc_ptr &&
++			    /* only find additional descriptions for specific devices */
++			    (type_ptr[0] == ENCLOSURE_COMPONENT_DEVICE ||
++			     type_ptr[0] == ENCLOSURE_COMPONENT_ARRAY_DEVICE ||
++			     type_ptr[0] == ENCLOSURE_COMPONENT_SAS_EXPANDER ||
++			     /* these elements are optional */
++			     type_ptr[0] == ENCLOSURE_COMPONENT_SCSI_TARGET_PORT ||
++			     type_ptr[0] == ENCLOSURE_COMPONENT_SCSI_INITIATOR_PORT ||
++			     type_ptr[0] == ENCLOSURE_COMPONENT_CONTROLLER_ELECTRONICS))
+ 				addl_desc_ptr += addl_desc_ptr[1] + 2;
+ 
+ 		}
+diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
+index 3bb6646bb406..f9da66fa850b 100644
+--- a/drivers/scsi/storvsc_drv.c
++++ b/drivers/scsi/storvsc_drv.c
+@@ -1610,8 +1610,7 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
+ 	vm_srb->win8_extension.time_out_value = 60;
+ 
+ 	vm_srb->win8_extension.srb_flags |=
+-		(SRB_FLAGS_QUEUE_ACTION_ENABLE |
+-		SRB_FLAGS_DISABLE_SYNCH_TRANSFER);
++		SRB_FLAGS_DISABLE_SYNCH_TRANSFER;
+ 
+ 	/* Build the SRB */
+ 	switch (scmnd->sc_data_direction) {
+diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
+index 75f126538a72..401fc7097935 100644
+--- a/drivers/target/target_core_sbc.c
++++ b/drivers/target/target_core_sbc.c
+@@ -298,7 +298,8 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o
+ 	return 0;
+ }
+ 
+-static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd, bool success)
++static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd, bool success,
++					   int *post_ret)
+ {
+ 	unsigned char *buf, *addr;
+ 	struct scatterlist *sg;
+@@ -362,7 +363,8 @@ sbc_execute_rw(struct se_cmd *cmd)
+ 			       cmd->data_direction);
+ }
+ 
+-static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success)
++static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success,
++					     int *post_ret)
+ {
+ 	struct se_device *dev = cmd->se_dev;
+ 
+@@ -372,8 +374,10 @@ static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success)
+ 	 * sent to the backend driver.
+ 	 */
+ 	spin_lock_irq(&cmd->t_state_lock);
+-	if ((cmd->transport_state & CMD_T_SENT) && !cmd->scsi_status)
++	if ((cmd->transport_state & CMD_T_SENT) && !cmd->scsi_status) {
+ 		cmd->se_cmd_flags |= SCF_COMPARE_AND_WRITE_POST;
++		*post_ret = 1;
++	}
+ 	spin_unlock_irq(&cmd->t_state_lock);
+ 
+ 	/*
+@@ -385,7 +389,8 @@ static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success)
+ 	return TCM_NO_SENSE;
+ }
+ 
+-static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool success)
++static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool success,
++						 int *post_ret)
+ {
+ 	struct se_device *dev = cmd->se_dev;
+ 	struct scatterlist *write_sg = NULL, *sg;
+@@ -481,11 +486,11 @@ static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool succes
+ 
+ 		if (block_size < PAGE_SIZE) {
+ 			sg_set_page(&write_sg[i], m.page, block_size,
+-				    block_size);
++				    m.piter.sg->offset + block_size);
+ 		} else {
+ 			sg_miter_next(&m);
+ 			sg_set_page(&write_sg[i], m.page, block_size,
+-				    0);
++				    m.piter.sg->offset);
+ 		}
+ 		len -= block_size;
+ 		i++;
+diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
+index b52bf3cad494..b335709f050f 100644
+--- a/drivers/target/target_core_transport.c
++++ b/drivers/target/target_core_transport.c
+@@ -1570,7 +1570,7 @@ bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags)
+ void transport_generic_request_failure(struct se_cmd *cmd,
+ 		sense_reason_t sense_reason)
+ {
+-	int ret = 0;
++	int ret = 0, post_ret = 0;
+ 
+ 	pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x"
+ 		" CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd),
+@@ -1593,7 +1593,7 @@ void transport_generic_request_failure(struct se_cmd *cmd,
+ 	 */
+ 	if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) &&
+ 	     cmd->transport_complete_callback)
+-		cmd->transport_complete_callback(cmd, false);
++		cmd->transport_complete_callback(cmd, false, &post_ret);
+ 
+ 	switch (sense_reason) {
+ 	case TCM_NON_EXISTENT_LUN:
+@@ -1941,11 +1941,13 @@ static void target_complete_ok_work(struct work_struct *work)
+ 	 */
+ 	if (cmd->transport_complete_callback) {
+ 		sense_reason_t rc;
++		bool caw = (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE);
++		bool zero_dl = !(cmd->data_length);
++		int post_ret = 0;
+ 
+-		rc = cmd->transport_complete_callback(cmd, true);
+-		if (!rc && !(cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE_POST)) {
+-			if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) &&
+-			    !cmd->data_length)
++		rc = cmd->transport_complete_callback(cmd, true, &post_ret);
++		if (!rc && !post_ret) {
++			if (caw && zero_dl)
+ 				goto queue_rsp;
+ 
+ 			return;
+diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
+index 0822bf1ed2e5..c0ed832d8ad5 100644
+--- a/drivers/usb/class/cdc-acm.c
++++ b/drivers/usb/class/cdc-acm.c
+@@ -1720,6 +1720,11 @@ static const struct usb_device_id acm_ids[] = {
+ 	},
+ #endif
+ 
++	/*Samsung phone in firmware update mode */
++	{ USB_DEVICE(0x04e8, 0x685d),
++	.driver_info = IGNORE_DEVICE,
++	},
++
+ 	/* Exclude Infineon Flash Loader utility */
+ 	{ USB_DEVICE(0x058b, 0x0041),
+ 	.driver_info = IGNORE_DEVICE,
+diff --git a/drivers/usb/musb/musb_cppi41.c b/drivers/usb/musb/musb_cppi41.c
+index 2ed1695ff5ad..cce32e91fd9e 100644
+--- a/drivers/usb/musb/musb_cppi41.c
++++ b/drivers/usb/musb/musb_cppi41.c
+@@ -9,9 +9,9 @@
+ 
+ #define RNDIS_REG(x) (0x80 + ((x - 1) * 4))
+ 
+-#define EP_MODE_AUTOREG_NONE		0
+-#define EP_MODE_AUTOREG_ALL_NEOP	1
+-#define EP_MODE_AUTOREG_ALWAYS		3
++#define EP_MODE_AUTOREQ_NONE		0
++#define EP_MODE_AUTOREQ_ALL_NEOP	1
++#define EP_MODE_AUTOREQ_ALWAYS		3
+ 
+ #define EP_MODE_DMA_TRANSPARENT		0
+ #define EP_MODE_DMA_RNDIS		1
+@@ -376,19 +376,19 @@ static bool cppi41_configure_channel(struct dma_channel *channel,
+ 
+ 			/* auto req */
+ 			cppi41_set_autoreq_mode(cppi41_channel,
+-					EP_MODE_AUTOREG_ALL_NEOP);
++					EP_MODE_AUTOREQ_ALL_NEOP);
+ 		} else {
+ 			musb_writel(musb->ctrl_base,
+ 					RNDIS_REG(cppi41_channel->port_num), 0);
+ 			cppi41_set_dma_mode(cppi41_channel,
+ 					EP_MODE_DMA_TRANSPARENT);
+ 			cppi41_set_autoreq_mode(cppi41_channel,
+-					EP_MODE_AUTOREG_NONE);
++					EP_MODE_AUTOREQ_NONE);
+ 		}
+ 	} else {
+ 		/* fallback mode */
+ 		cppi41_set_dma_mode(cppi41_channel, EP_MODE_DMA_TRANSPARENT);
+-		cppi41_set_autoreq_mode(cppi41_channel, EP_MODE_AUTOREG_NONE);
++		cppi41_set_autoreq_mode(cppi41_channel, EP_MODE_AUTOREQ_NONE);
+ 		len = min_t(u32, packet_sz, len);
+ 	}
+ 	cppi41_channel->prog_len = len;
+diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
+index c61684e69174..f288f3c1f5e2 100644
+--- a/drivers/usb/serial/cp210x.c
++++ b/drivers/usb/serial/cp210x.c
+@@ -162,6 +162,8 @@ static const struct usb_device_id id_table[] = {
+ 	{ USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
+ 	{ USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
+ 	{ USB_DEVICE(0x18EF, 0xE025) }, /* ELV Marble Sound Board 1 */
++	{ USB_DEVICE(0x1901, 0x0190) }, /* GE B850 CP2105 Recorder interface */
++	{ USB_DEVICE(0x1901, 0x0193) }, /* GE B650 CP2104 PMC interface */
+ 	{ USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */
+ 	{ USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */
+ 	{ USB_DEVICE(0x1BA4, 0x0002) },	/* Silicon Labs 358x factory default */
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 81f6a572f016..9bab34cf01d4 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -315,6 +315,7 @@ static void option_instat_callback(struct urb *urb);
+ #define TOSHIBA_PRODUCT_G450			0x0d45
+ 
+ #define ALINK_VENDOR_ID				0x1e0e
++#define SIMCOM_PRODUCT_SIM7100E			0x9001 /* Yes, ALINK_VENDOR_ID */
+ #define ALINK_PRODUCT_PH300			0x9100
+ #define ALINK_PRODUCT_3GU			0x9200
+ 
+@@ -615,6 +616,10 @@ static const struct option_blacklist_info zte_1255_blacklist = {
+ 	.reserved = BIT(3) | BIT(4),
+ };
+ 
++static const struct option_blacklist_info simcom_sim7100e_blacklist = {
++	.reserved = BIT(5) | BIT(6),
++};
++
+ static const struct option_blacklist_info telit_le910_blacklist = {
+ 	.sendsetup = BIT(0),
+ 	.reserved = BIT(1) | BIT(2),
+@@ -1130,6 +1135,8 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC650) },
+ 	{ USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) },
+ 	{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6000)}, /* ZTE AC8700 */
++	{ USB_DEVICE_AND_INTERFACE_INFO(QUALCOMM_VENDOR_ID, 0x6001, 0xff, 0xff, 0xff), /* 4G LTE usb-modem U901 */
++	  .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+ 	{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
+ 	{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */
+ 	{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
+@@ -1645,6 +1652,8 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE(ALINK_VENDOR_ID, 0x9000) },
+ 	{ USB_DEVICE(ALINK_VENDOR_ID, ALINK_PRODUCT_PH300) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ALINK_VENDOR_ID, ALINK_PRODUCT_3GU, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE(ALINK_VENDOR_ID, SIMCOM_PRODUCT_SIM7100E),
++	  .driver_info = (kernel_ulong_t)&simcom_sim7100e_blacklist },
+ 	{ USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S_X200),
+ 	  .driver_info = (kernel_ulong_t)&alcatel_x200_blacklist
+ 	},
+diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c
+index ee59b74768d9..beaa7cc4e857 100644
+--- a/drivers/virtio/virtio.c
++++ b/drivers/virtio/virtio.c
+@@ -238,6 +238,7 @@ static int virtio_init(void)
+ static void __exit virtio_exit(void)
+ {
+ 	bus_unregister(&virtio_bus);
++	ida_destroy(&virtio_index_ida);
+ }
+ core_initcall(virtio_init);
+ module_exit(virtio_exit);
+diff --git a/fs/bio.c b/fs/bio.c
+index e7fb3f82f5f5..6405b44000cb 100644
+--- a/fs/bio.c
++++ b/fs/bio.c
+@@ -1051,15 +1051,19 @@ int bio_uncopy_user(struct bio *bio)
+ 	if (!bio_flagged(bio, BIO_NULL_MAPPED)) {
+ 		/*
+ 		 * if we're in a workqueue, the request is orphaned, so
+-		 * don't copy into a random user address space, just free.
++		 * don't copy into a random user address space, just free
++		 * and return -EINTR so user space doesn't expect any data.
+ 		 */
+ 		if (current->mm)
+ 			ret = __bio_copy_iov(bio, bmd->iovecs, bmd->sgvecs,
+ 					     bmd->nr_sgvecs, bio_data_dir(bio) == READ,
+ 					     0, bmd->is_our_pages);
+-		else if (bmd->is_our_pages)
+-			bio_for_each_segment_all(bvec, bio, i)
+-				__free_page(bvec->bv_page);
++		else {
++			ret = -EINTR;
++			if (bmd->is_our_pages)
++				bio_for_each_segment_all(bvec, bio, i)
++					__free_page(bvec->bv_page);
++		}
+ 	}
+ 	bio_free_map_data(bmd);
+ 	bio_put(bio);
+diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
+index 3ec1cb0808c3..2622ec8a76f0 100644
+--- a/fs/btrfs/disk-io.c
++++ b/fs/btrfs/disk-io.c
+@@ -2340,6 +2340,7 @@ int open_ctree(struct super_block *sb,
+ 	bh = btrfs_read_dev_super(fs_devices->latest_bdev);
+ 	if (!bh) {
+ 		err = -EINVAL;
++		brelse(bh);
+ 		goto fail_alloc;
+ 	}
+ 
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index 264be61a3f40..89b5868ccfc7 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -7343,15 +7343,28 @@ int btrfs_readpage(struct file *file, struct page *page)
+ static int btrfs_writepage(struct page *page, struct writeback_control *wbc)
+ {
+ 	struct extent_io_tree *tree;
+-
++	struct inode *inode = page->mapping->host;
++	int ret;
+ 
+ 	if (current->flags & PF_MEMALLOC) {
+ 		redirty_page_for_writepage(wbc, page);
+ 		unlock_page(page);
+ 		return 0;
+ 	}
++
++	/*
++	 * If we are under memory pressure we will call this directly from the
++	 * VM, we need to make sure we have the inode referenced for the ordered
++	 * extent.  If not just return like we didn't do anything.
++	 */
++	if (!igrab(inode)) {
++		redirty_page_for_writepage(wbc, page);
++		return AOP_WRITEPAGE_ACTIVATE;
++	}
+ 	tree = &BTRFS_I(page->mapping->host)->io_tree;
+-	return extent_write_full_page(tree, page, btrfs_get_extent, wbc);
++	ret = extent_write_full_page(tree, page, btrfs_get_extent, wbc);
++	btrfs_add_delayed_iput(inode);
++	return ret;
+ }
+ 
+ static int btrfs_writepages(struct address_space *mapping,
+@@ -8404,9 +8417,11 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
+ 	/*
+ 	 * 2 items for inode item and ref
+ 	 * 2 items for dir items
++	 * 1 item for updating parent inode item
++	 * 1 item for the inline extent item
+ 	 * 1 item for xattr if selinux is on
+ 	 */
+-	trans = btrfs_start_transaction(root, 5);
++	trans = btrfs_start_transaction(root, 7);
+ 	if (IS_ERR(trans))
+ 		return PTR_ERR(trans);
+ 
+diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
+index 76736b57de5e..82892b18a744 100644
+--- a/fs/btrfs/send.c
++++ b/fs/btrfs/send.c
+@@ -1335,7 +1335,21 @@ static int read_symlink(struct btrfs_root *root,
+ 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+ 	if (ret < 0)
+ 		goto out;
+-	BUG_ON(ret);
++	if (ret) {
++		/*
++		 * An empty symlink inode. Can happen in rare error paths when
++		 * creating a symlink (transaction committed before the inode
++		 * eviction handler removed the symlink inode items and a crash
++		 * happened in between or the subvol was snapshoted in between).
++		 * Print an informative message to dmesg/syslog so that the user
++		 * can delete the symlink.
++		 */
++		btrfs_err(root->fs_info,
++			  "Found empty symlink inode %llu at root %llu",
++			  ino, root->root_key.objectid);
++		ret = -EIO;
++		goto out;
++	}
+ 
+ 	ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
+ 			struct btrfs_file_extent_item);
+diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
+index 25437280a207..04091cd05095 100644
+--- a/fs/hostfs/hostfs_kern.c
++++ b/fs/hostfs/hostfs_kern.c
+@@ -726,15 +726,13 @@ static int hostfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode,
+ 
+ 	init_special_inode(inode, mode, dev);
+ 	err = do_mknod(name, mode, MAJOR(dev), MINOR(dev));
+-	if (!err)
++	if (err)
+ 		goto out_free;
+ 
+ 	err = read_name(inode, name);
+ 	__putname(name);
+ 	if (err)
+ 		goto out_put;
+-	if (err)
+-		goto out_put;
+ 
+ 	d_instantiate(dentry, inode);
+ 	return 0;
+diff --git a/fs/lockd/host.c b/fs/lockd/host.c
+index 969d589c848d..b5f3c3ab0d5f 100644
+--- a/fs/lockd/host.c
++++ b/fs/lockd/host.c
+@@ -116,7 +116,7 @@ static struct nlm_host *nlm_alloc_host(struct nlm_lookup_host_info *ni,
+ 		atomic_inc(&nsm->sm_count);
+ 	else {
+ 		host = NULL;
+-		nsm = nsm_get_handle(ni->sap, ni->salen,
++		nsm = nsm_get_handle(ni->net, ni->sap, ni->salen,
+ 					ni->hostname, ni->hostname_len);
+ 		if (unlikely(nsm == NULL)) {
+ 			dprintk("lockd: %s failed; no nsm handle\n",
+@@ -534,17 +534,18 @@ static struct nlm_host *next_host_state(struct hlist_head *cache,
+ 
+ /**
+  * nlm_host_rebooted - Release all resources held by rebooted host
++ * @net:  network namespace
+  * @info: pointer to decoded results of NLM_SM_NOTIFY call
+  *
+  * We were notified that the specified host has rebooted.  Release
+  * all resources held by that peer.
+  */
+-void nlm_host_rebooted(const struct nlm_reboot *info)
++void nlm_host_rebooted(const struct net *net, const struct nlm_reboot *info)
+ {
+ 	struct nsm_handle *nsm;
+ 	struct nlm_host	*host;
+ 
+-	nsm = nsm_reboot_lookup(info);
++	nsm = nsm_reboot_lookup(net, info);
+ 	if (unlikely(nsm == NULL))
+ 		return;
+ 
+diff --git a/fs/lockd/mon.c b/fs/lockd/mon.c
+index 6ae664b489af..13fac49aff7f 100644
+--- a/fs/lockd/mon.c
++++ b/fs/lockd/mon.c
+@@ -51,7 +51,6 @@ struct nsm_res {
+ };
+ 
+ static const struct rpc_program	nsm_program;
+-static				LIST_HEAD(nsm_handles);
+ static				DEFINE_SPINLOCK(nsm_lock);
+ 
+ /*
+@@ -259,33 +258,35 @@ void nsm_unmonitor(const struct nlm_host *host)
+ 	}
+ }
+ 
+-static struct nsm_handle *nsm_lookup_hostname(const char *hostname,
+-					      const size_t len)
++static struct nsm_handle *nsm_lookup_hostname(const struct list_head *nsm_handles,
++					const char *hostname, const size_t len)
+ {
+ 	struct nsm_handle *nsm;
+ 
+-	list_for_each_entry(nsm, &nsm_handles, sm_link)
++	list_for_each_entry(nsm, nsm_handles, sm_link)
+ 		if (strlen(nsm->sm_name) == len &&
+ 		    memcmp(nsm->sm_name, hostname, len) == 0)
+ 			return nsm;
+ 	return NULL;
+ }
+ 
+-static struct nsm_handle *nsm_lookup_addr(const struct sockaddr *sap)
++static struct nsm_handle *nsm_lookup_addr(const struct list_head *nsm_handles,
++					const struct sockaddr *sap)
+ {
+ 	struct nsm_handle *nsm;
+ 
+-	list_for_each_entry(nsm, &nsm_handles, sm_link)
++	list_for_each_entry(nsm, nsm_handles, sm_link)
+ 		if (rpc_cmp_addr(nsm_addr(nsm), sap))
+ 			return nsm;
+ 	return NULL;
+ }
+ 
+-static struct nsm_handle *nsm_lookup_priv(const struct nsm_private *priv)
++static struct nsm_handle *nsm_lookup_priv(const struct list_head *nsm_handles,
++					const struct nsm_private *priv)
+ {
+ 	struct nsm_handle *nsm;
+ 
+-	list_for_each_entry(nsm, &nsm_handles, sm_link)
++	list_for_each_entry(nsm, nsm_handles, sm_link)
+ 		if (memcmp(nsm->sm_priv.data, priv->data,
+ 					sizeof(priv->data)) == 0)
+ 			return nsm;
+@@ -350,6 +351,7 @@ static struct nsm_handle *nsm_create_handle(const struct sockaddr *sap,
+ 
+ /**
+  * nsm_get_handle - Find or create a cached nsm_handle
++ * @net: network namespace
+  * @sap: pointer to socket address of handle to find
+  * @salen: length of socket address
+  * @hostname: pointer to C string containing hostname to find
+@@ -362,11 +364,13 @@ static struct nsm_handle *nsm_create_handle(const struct sockaddr *sap,
+  * @hostname cannot be found in the handle cache.  Returns NULL if
+  * an error occurs.
+  */
+-struct nsm_handle *nsm_get_handle(const struct sockaddr *sap,
++struct nsm_handle *nsm_get_handle(const struct net *net,
++				  const struct sockaddr *sap,
+ 				  const size_t salen, const char *hostname,
+ 				  const size_t hostname_len)
+ {
+ 	struct nsm_handle *cached, *new = NULL;
++	struct lockd_net *ln = net_generic(net, lockd_net_id);
+ 
+ 	if (hostname && memchr(hostname, '/', hostname_len) != NULL) {
+ 		if (printk_ratelimit()) {
+@@ -381,9 +385,10 @@ retry:
+ 	spin_lock(&nsm_lock);
+ 
+ 	if (nsm_use_hostnames && hostname != NULL)
+-		cached = nsm_lookup_hostname(hostname, hostname_len);
++		cached = nsm_lookup_hostname(&ln->nsm_handles,
++					hostname, hostname_len);
+ 	else
+-		cached = nsm_lookup_addr(sap);
++		cached = nsm_lookup_addr(&ln->nsm_handles, sap);
+ 
+ 	if (cached != NULL) {
+ 		atomic_inc(&cached->sm_count);
+@@ -397,7 +402,7 @@ retry:
+ 	}
+ 
+ 	if (new != NULL) {
+-		list_add(&new->sm_link, &nsm_handles);
++		list_add(&new->sm_link, &ln->nsm_handles);
+ 		spin_unlock(&nsm_lock);
+ 		dprintk("lockd: created nsm_handle for %s (%s)\n",
+ 				new->sm_name, new->sm_addrbuf);
+@@ -414,19 +419,22 @@ retry:
+ 
+ /**
+  * nsm_reboot_lookup - match NLMPROC_SM_NOTIFY arguments to an nsm_handle
++ * @net:  network namespace
+  * @info: pointer to NLMPROC_SM_NOTIFY arguments
+  *
+  * Returns a matching nsm_handle if found in the nsm cache. The returned
+  * nsm_handle's reference count is bumped. Otherwise returns NULL if some
+  * error occurred.
+  */
+-struct nsm_handle *nsm_reboot_lookup(const struct nlm_reboot *info)
++struct nsm_handle *nsm_reboot_lookup(const struct net *net,
++				const struct nlm_reboot *info)
+ {
+ 	struct nsm_handle *cached;
++	struct lockd_net *ln = net_generic(net, lockd_net_id);
+ 
+ 	spin_lock(&nsm_lock);
+ 
+-	cached = nsm_lookup_priv(&info->priv);
++	cached = nsm_lookup_priv(&ln->nsm_handles, &info->priv);
+ 	if (unlikely(cached == NULL)) {
+ 		spin_unlock(&nsm_lock);
+ 		dprintk("lockd: never saw rebooted peer '%.*s' before\n",
+diff --git a/fs/lockd/netns.h b/fs/lockd/netns.h
+index 5010b55628b4..414da99744e9 100644
+--- a/fs/lockd/netns.h
++++ b/fs/lockd/netns.h
+@@ -16,6 +16,7 @@ struct lockd_net {
+ 	spinlock_t nsm_clnt_lock;
+ 	unsigned int nsm_users;
+ 	struct rpc_clnt *nsm_clnt;
++	struct list_head nsm_handles;
+ };
+ 
+ extern int lockd_net_id;
+diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
+index 59a53f664005..bb1ad4df024d 100644
+--- a/fs/lockd/svc.c
++++ b/fs/lockd/svc.c
+@@ -583,6 +583,7 @@ static int lockd_init_net(struct net *net)
+ 	INIT_DELAYED_WORK(&ln->grace_period_end, grace_ender);
+ 	INIT_LIST_HEAD(&ln->grace_list);
+ 	spin_lock_init(&ln->nsm_clnt_lock);
++	INIT_LIST_HEAD(&ln->nsm_handles);
+ 	return 0;
+ }
+ 
+diff --git a/fs/lockd/svc4proc.c b/fs/lockd/svc4proc.c
+index b147d1ae71fd..09c576f26c7b 100644
+--- a/fs/lockd/svc4proc.c
++++ b/fs/lockd/svc4proc.c
+@@ -421,7 +421,7 @@ nlm4svc_proc_sm_notify(struct svc_rqst *rqstp, struct nlm_reboot *argp,
+ 		return rpc_system_err;
+ 	}
+ 
+-	nlm_host_rebooted(argp);
++	nlm_host_rebooted(SVC_NET(rqstp), argp);
+ 	return rpc_success;
+ }
+ 
+diff --git a/fs/lockd/svcproc.c b/fs/lockd/svcproc.c
+index 21171f0c6477..fb26b9f522e7 100644
+--- a/fs/lockd/svcproc.c
++++ b/fs/lockd/svcproc.c
+@@ -464,7 +464,7 @@ nlmsvc_proc_sm_notify(struct svc_rqst *rqstp, struct nlm_reboot *argp,
+ 		return rpc_system_err;
+ 	}
+ 
+-	nlm_host_rebooted(argp);
++	nlm_host_rebooted(SVC_NET(rqstp), argp);
+ 	return rpc_success;
+ }
+ 
+diff --git a/fs/locks.c b/fs/locks.c
+index 0d2b5febc627..c8779651ccc7 100644
+--- a/fs/locks.c
++++ b/fs/locks.c
+@@ -1971,7 +1971,6 @@ int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd,
+ 		goto out;
+ 	}
+ 
+-again:
+ 	error = flock_to_posix_lock(filp, file_lock, &flock);
+ 	if (error)
+ 		goto out;
+@@ -2002,19 +2001,22 @@ again:
+ 	 * Attempt to detect a close/fcntl race and recover by
+ 	 * releasing the lock that was just acquired.
+ 	 */
+-	/*
+-	 * we need that spin_lock here - it prevents reordering between
+-	 * update of inode->i_flock and check for it done in close().
+-	 * rcu_read_lock() wouldn't do.
+-	 */
+-	spin_lock(&current->files->file_lock);
+-	f = fcheck(fd);
+-	spin_unlock(&current->files->file_lock);
+-	if (!error && f != filp && flock.l_type != F_UNLCK) {
+-		flock.l_type = F_UNLCK;
+-		goto again;
++	if (!error && file_lock->fl_type != F_UNLCK) {
++		/*
++		 * We need that spin_lock here - it prevents reordering between
++		 * update of inode->i_flock and check for it done in
++		 * close(). rcu_read_lock() wouldn't do.
++		 */
++		spin_lock(&current->files->file_lock);
++		f = fcheck(fd);
++		spin_unlock(&current->files->file_lock);
++		if (f != filp) {
++			file_lock->fl_type = F_UNLCK;
++			error = do_lock_file_wait(filp, cmd, file_lock);
++			WARN_ON_ONCE(error);
++			error = -EBADF;
++		}
+ 	}
+-
+ out:
+ 	locks_free_lock(file_lock);
+ 	return error;
+@@ -2089,7 +2091,6 @@ int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd,
+ 		goto out;
+ 	}
+ 
+-again:
+ 	error = flock64_to_posix_lock(filp, file_lock, &flock);
+ 	if (error)
+ 		goto out;
+@@ -2120,14 +2121,22 @@ again:
+ 	 * Attempt to detect a close/fcntl race and recover by
+ 	 * releasing the lock that was just acquired.
+ 	 */
+-	spin_lock(&current->files->file_lock);
+-	f = fcheck(fd);
+-	spin_unlock(&current->files->file_lock);
+-	if (!error && f != filp && flock.l_type != F_UNLCK) {
+-		flock.l_type = F_UNLCK;
+-		goto again;
++	if (!error && file_lock->fl_type != F_UNLCK) {
++		/*
++		 * We need that spin_lock here - it prevents reordering between
++		 * update of inode->i_flock and check for it done in
++		 * close(). rcu_read_lock() wouldn't do.
++		 */
++		spin_lock(&current->files->file_lock);
++		f = fcheck(fd);
++		spin_unlock(&current->files->file_lock);
++		if (f != filp) {
++			file_lock->fl_type = F_UNLCK;
++			error = do_lock_file_wait(filp, cmd, file_lock);
++			WARN_ON_ONCE(error);
++			error = -EBADF;
++		}
+ 	}
+-
+ out:
+ 	locks_free_lock(file_lock);
+ 	return error;
+diff --git a/fs/namei.c b/fs/namei.c
+index d1c0b91b4534..b1b1781faca1 100644
+--- a/fs/namei.c
++++ b/fs/namei.c
+@@ -3116,6 +3116,10 @@ opened:
+ 			goto exit_fput;
+ 	}
+ out:
++	if (unlikely(error > 0)) {
++		WARN_ON(1);
++		error = -EINVAL;
++	}
+ 	if (got_write)
+ 		mnt_drop_write(nd->path.mnt);
+ 	path_put(&save_parent);
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index aa62c7308a1b..ae85a71e5045 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -2179,9 +2179,9 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
+ 		dentry = d_add_unique(dentry, igrab(state->inode));
+ 		if (dentry == NULL) {
+ 			dentry = opendata->dentry;
+-		} else if (dentry != ctx->dentry) {
++		} else {
+ 			dput(ctx->dentry);
+-			ctx->dentry = dget(dentry);
++			ctx->dentry = dentry;
+ 		}
+ 		nfs_set_verifier(dentry,
+ 				nfs_save_change_attribute(opendata->dir->d_inode));
+diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
+index 52c9b880697e..fbe7e2f90a3c 100644
+--- a/fs/nfs/nfs4state.c
++++ b/fs/nfs/nfs4state.c
+@@ -1436,7 +1436,7 @@ restart:
+ 					spin_unlock(&state->state_lock);
+ 				}
+ 				nfs4_put_open_state(state);
+-				clear_bit(NFS4CLNT_RECLAIM_NOGRACE,
++				clear_bit(NFS_STATE_RECLAIM_NOGRACE,
+ 					&state->flags);
+ 				spin_lock(&sp->so_lock);
+ 				goto restart;
+diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
+index d20f37d1c6e7..4fe8b1082cf3 100644
+--- a/fs/proc/task_mmu.c
++++ b/fs/proc/task_mmu.c
+@@ -172,7 +172,7 @@ static void *m_start(struct seq_file *m, loff_t *pos)
+ 	if (!priv->task)
+ 		return ERR_PTR(-ESRCH);
+ 
+-	mm = mm_access(priv->task, PTRACE_MODE_READ);
++	mm = mm_access(priv->task, PTRACE_MODE_READ_FSCREDS);
+ 	if (!mm || IS_ERR(mm))
+ 		return mm;
+ 	down_read(&mm->mmap_sem);
+@@ -1186,7 +1186,7 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
+ 	if (!pm.buffer)
+ 		goto out_task;
+ 
+-	mm = mm_access(task, PTRACE_MODE_READ);
++	mm = mm_access(task, PTRACE_MODE_READ_FSCREDS);
+ 	ret = PTR_ERR(mm);
+ 	if (!mm || IS_ERR(mm))
+ 		goto out_free;
+diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
+index 56123a6f462e..123c19890b14 100644
+--- a/fs/proc/task_nommu.c
++++ b/fs/proc/task_nommu.c
+@@ -223,7 +223,7 @@ static void *m_start(struct seq_file *m, loff_t *pos)
+ 	if (!priv->task)
+ 		return ERR_PTR(-ESRCH);
+ 
+-	mm = mm_access(priv->task, PTRACE_MODE_READ);
++	mm = mm_access(priv->task, PTRACE_MODE_READ_FSCREDS);
+ 	if (!mm || IS_ERR(mm)) {
+ 		put_task_struct(priv->task);
+ 		priv->task = NULL;
+diff --git a/fs/splice.c b/fs/splice.c
+index c915e215a50e..76cbc01df6a4 100644
+--- a/fs/splice.c
++++ b/fs/splice.c
+@@ -949,6 +949,7 @@ ssize_t __splice_from_pipe(struct pipe_inode_info *pipe, struct splice_desc *sd,
+ 
+ 	splice_from_pipe_begin(sd);
+ 	do {
++		cond_resched();
+ 		ret = splice_from_pipe_next(pipe, sd);
+ 		if (ret > 0)
+ 			ret = splice_from_pipe_feed(pipe, sd, actor);
+@@ -1175,7 +1176,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
+ 	long ret, bytes;
+ 	umode_t i_mode;
+ 	size_t len;
+-	int i, flags;
++	int i, flags, more;
+ 
+ 	/*
+ 	 * We require the input being a regular file, as we don't want to
+@@ -1218,6 +1219,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
+ 	 * Don't block on output, we have to drain the direct pipe.
+ 	 */
+ 	sd->flags &= ~SPLICE_F_NONBLOCK;
++	more = sd->flags & SPLICE_F_MORE;
+ 
+ 	while (len) {
+ 		size_t read_len;
+@@ -1231,6 +1233,15 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
+ 		sd->total_len = read_len;
+ 
+ 		/*
++		 * If more data is pending, set SPLICE_F_MORE
++		 * If this is the last data and SPLICE_F_MORE was not set
++		 * initially, clears it.
++		 */
++		if (read_len < len)
++			sd->flags |= SPLICE_F_MORE;
++		else if (!more)
++			sd->flags &= ~SPLICE_F_MORE;
++		/*
+ 		 * NOTE: nonblocking mode only applies to the input. We
+ 		 * must not do the output in nonblocking mode as then we
+ 		 * could get stuck data in the internal pipe:
+diff --git a/include/linux/enclosure.h b/include/linux/enclosure.h
+index 9a33c5f7e126..f6c229e2bffa 100644
+--- a/include/linux/enclosure.h
++++ b/include/linux/enclosure.h
+@@ -29,7 +29,11 @@
+ /* A few generic types ... taken from ses-2 */
+ enum enclosure_component_type {
+ 	ENCLOSURE_COMPONENT_DEVICE = 0x01,
++	ENCLOSURE_COMPONENT_CONTROLLER_ELECTRONICS = 0x07,
++	ENCLOSURE_COMPONENT_SCSI_TARGET_PORT = 0x14,
++	ENCLOSURE_COMPONENT_SCSI_INITIATOR_PORT = 0x15,
+ 	ENCLOSURE_COMPONENT_ARRAY_DEVICE = 0x17,
++	ENCLOSURE_COMPONENT_SAS_EXPANDER = 0x18,
+ };
+ 
+ /* ses-2 common element status */
+diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
+index 88c0cf0079ad..167c976554fa 100644
+--- a/include/linux/ipv6.h
++++ b/include/linux/ipv6.h
+@@ -30,6 +30,7 @@ struct ipv6_devconf {
+ #endif
+ 	__s32		max_addresses;
+ 	__s32		accept_ra_defrtr;
++	__s32		accept_ra_min_hop_limit;
+ 	__s32		accept_ra_pinfo;
+ #ifdef CONFIG_IPV6_ROUTER_PREF
+ 	__s32		accept_ra_rtr_pref;
+diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h
+index dcaad79f54ed..0adf073f13b3 100644
+--- a/include/linux/lockd/lockd.h
++++ b/include/linux/lockd/lockd.h
+@@ -236,7 +236,8 @@ void		  nlm_rebind_host(struct nlm_host *);
+ struct nlm_host * nlm_get_host(struct nlm_host *);
+ void		  nlm_shutdown_hosts(void);
+ void		  nlm_shutdown_hosts_net(struct net *net);
+-void		  nlm_host_rebooted(const struct nlm_reboot *);
++void		  nlm_host_rebooted(const struct net *net,
++					const struct nlm_reboot *);
+ 
+ /*
+  * Host monitoring
+@@ -244,11 +245,13 @@ void		  nlm_host_rebooted(const struct nlm_reboot *);
+ int		  nsm_monitor(const struct nlm_host *host);
+ void		  nsm_unmonitor(const struct nlm_host *host);
+ 
+-struct nsm_handle *nsm_get_handle(const struct sockaddr *sap,
++struct nsm_handle *nsm_get_handle(const struct net *net,
++					const struct sockaddr *sap,
+ 					const size_t salen,
+ 					const char *hostname,
+ 					const size_t hostname_len);
+-struct nsm_handle *nsm_reboot_lookup(const struct nlm_reboot *info);
++struct nsm_handle *nsm_reboot_lookup(const struct net *net,
++					const struct nlm_reboot *info);
+ void		  nsm_release(struct nsm_handle *nsm);
+ 
+ /*
+diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
+index f4bf1b593327..1c532adcedc5 100644
+--- a/include/linux/nfs_fs.h
++++ b/include/linux/nfs_fs.h
+@@ -604,9 +604,7 @@ static inline void nfs3_forget_cached_acls(struct inode *inode)
+ 
+ static inline loff_t nfs_size_to_loff_t(__u64 size)
+ {
+-	if (size > (__u64) OFFSET_MAX - 1)
+-		return OFFSET_MAX - 1;
+-	return (loff_t) size;
++	return min_t(u64, size, OFFSET_MAX);
+ }
+ 
+ static inline ino_t
+diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
+index 16e753a9922a..e492ab7aadbf 100644
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -149,6 +149,7 @@ struct sk_buff;
+ #else
+ #define MAX_SKB_FRAGS (65536/PAGE_SIZE + 1)
+ #endif
++extern int sysctl_max_skb_frags;
+ 
+ typedef struct skb_frag_struct skb_frag_t;
+ 
+diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
+index 0ecc46e7af3d..c1248996006f 100644
+--- a/include/linux/tracepoint.h
++++ b/include/linux/tracepoint.h
+@@ -14,8 +14,11 @@
+  * See the file COPYING for more details.
+  */
+ 
++#include <linux/smp.h>
+ #include <linux/errno.h>
+ #include <linux/types.h>
++#include <linux/percpu.h>
++#include <linux/cpumask.h>
+ #include <linux/rcupdate.h>
+ #include <linux/static_key.h>
+ 
+@@ -126,6 +129,9 @@ static inline void tracepoint_synchronize_unregister(void)
+ 		void *it_func;						\
+ 		void *__data;						\
+ 									\
++		if (!cpu_online(raw_smp_processor_id()))		\
++			return;						\
++									\
+ 		if (!(cond))						\
+ 			return;						\
+ 		prercu;							\
+diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
+index cbf2be37c91a..4dae9d5268ca 100644
+--- a/include/net/ip_fib.h
++++ b/include/net/ip_fib.h
+@@ -59,6 +59,7 @@ struct fib_nh_exception {
+ 	struct rtable __rcu		*fnhe_rth_input;
+ 	struct rtable __rcu		*fnhe_rth_output;
+ 	unsigned long			fnhe_stamp;
++	struct rcu_head			rcu;
+ };
+ 
+ struct fnhe_hash_bucket {
+diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
+index 085e6bedf393..5c5700bd1345 100644
+--- a/include/target/target_core_base.h
++++ b/include/target/target_core_base.h
+@@ -457,7 +457,7 @@ struct se_cmd {
+ 	sense_reason_t		(*execute_cmd)(struct se_cmd *);
+ 	sense_reason_t		(*execute_rw)(struct se_cmd *, struct scatterlist *,
+ 					      u32, enum dma_data_direction);
+-	sense_reason_t (*transport_complete_callback)(struct se_cmd *, bool);
++	sense_reason_t (*transport_complete_callback)(struct se_cmd *, bool, int *);
+ 
+ 	unsigned char		*t_task_cdb;
+ 	unsigned char		__t_task_cdb[TCM_MAX_COMMAND_SIZE];
+diff --git a/include/uapi/linux/ipv6.h b/include/uapi/linux/ipv6.h
+index 593b0e32d956..25955206757a 100644
+--- a/include/uapi/linux/ipv6.h
++++ b/include/uapi/linux/ipv6.h
+@@ -163,6 +163,8 @@ enum {
+ 	DEVCONF_MLDV1_UNSOLICITED_REPORT_INTERVAL,
+ 	DEVCONF_MLDV2_UNSOLICITED_REPORT_INTERVAL,
+ 	DEVCONF_SUPPRESS_FRAG_NDISC,
++	DEVCONF_USE_OIF_ADDRS_ONLY,
++	DEVCONF_ACCEPT_RA_MIN_HOP_LIMIT,
+ 	DEVCONF_MAX
+ };
+ 
+diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
+index 75a976a8ed58..504bb4b0d226 100644
+--- a/kernel/irq/manage.c
++++ b/kernel/irq/manage.c
+@@ -1230,6 +1230,7 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
+ 	if (!desc)
+ 		return NULL;
+ 
++	chip_bus_lock(desc);
+ 	raw_spin_lock_irqsave(&desc->lock, flags);
+ 
+ 	/*
+@@ -1243,7 +1244,7 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
+ 		if (!action) {
+ 			WARN(1, "Trying to free already-free IRQ %d\n", irq);
+ 			raw_spin_unlock_irqrestore(&desc->lock, flags);
+-
++			chip_bus_sync_unlock(desc);
+ 			return NULL;
+ 		}
+ 
+@@ -1266,6 +1267,7 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
+ #endif
+ 
+ 	raw_spin_unlock_irqrestore(&desc->lock, flags);
++	chip_bus_sync_unlock(desc);
+ 
+ 	unregister_handler_proc(irq, action);
+ 
+@@ -1339,9 +1341,7 @@ void free_irq(unsigned int irq, void *dev_id)
+ 		desc->affinity_notify = NULL;
+ #endif
+ 
+-	chip_bus_lock(desc);
+ 	kfree(__free_irq(irq, dev_id));
+-	chip_bus_sync_unlock(desc);
+ }
+ EXPORT_SYMBOL(free_irq);
+ 
+diff --git a/kernel/resource.c b/kernel/resource.c
+index 3f285dce9347..449282e48bb1 100644
+--- a/kernel/resource.c
++++ b/kernel/resource.c
+@@ -961,9 +961,10 @@ struct resource * __request_region(struct resource *parent,
+ 		if (!conflict)
+ 			break;
+ 		if (conflict != parent) {
+-			parent = conflict;
+-			if (!(conflict->flags & IORESOURCE_BUSY))
++			if (!(conflict->flags & IORESOURCE_BUSY)) {
++				parent = conflict;
+ 				continue;
++			}
+ 		}
+ 		if (conflict->flags & flags & IORESOURCE_MUXED) {
+ 			add_wait_queue(&muxed_resource_wait, &wait);
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index 0bcdceaca6e2..3800316d7424 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -941,6 +941,13 @@ inline int task_curr(const struct task_struct *p)
+ 	return cpu_curr(task_cpu(p)) == p;
+ }
+ 
++/*
++ * switched_from, switched_to and prio_changed must _NOT_ drop rq->lock,
++ * use the balance_callback list if you want balancing.
++ *
++ * this means any call to check_class_changed() must be followed by a call to
++ * balance_callback().
++ */
+ static inline void check_class_changed(struct rq *rq, struct task_struct *p,
+ 				       const struct sched_class *prev_class,
+ 				       int oldprio)
+@@ -1325,8 +1332,12 @@ ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
+ 
+ 	p->state = TASK_RUNNING;
+ #ifdef CONFIG_SMP
+-	if (p->sched_class->task_woken)
++	if (p->sched_class->task_woken) {
++		/*
++		 * XXX can drop rq->lock; most likely ok.
++		 */
+ 		p->sched_class->task_woken(rq, p);
++	}
+ 
+ 	if (rq->idle_stamp) {
+ 		u64 delta = rq_clock(rq) - rq->idle_stamp;
+@@ -1579,7 +1590,6 @@ out:
+  */
+ int wake_up_process(struct task_struct *p)
+ {
+-	WARN_ON(task_is_stopped_or_traced(p));
+ 	return try_to_wake_up(p, TASK_NORMAL, 0);
+ }
+ EXPORT_SYMBOL(wake_up_process);
+@@ -1911,18 +1921,30 @@ static inline void pre_schedule(struct rq *rq, struct task_struct *prev)
+ }
+ 
+ /* rq->lock is NOT held, but preemption is disabled */
+-static inline void post_schedule(struct rq *rq)
++static void __balance_callback(struct rq *rq)
+ {
+-	if (rq->post_schedule) {
+-		unsigned long flags;
++	struct callback_head *head, *next;
++	void (*func)(struct rq *rq);
++	unsigned long flags;
+ 
+-		raw_spin_lock_irqsave(&rq->lock, flags);
+-		if (rq->curr->sched_class->post_schedule)
+-			rq->curr->sched_class->post_schedule(rq);
+-		raw_spin_unlock_irqrestore(&rq->lock, flags);
++	raw_spin_lock_irqsave(&rq->lock, flags);
++	head = rq->balance_callback;
++	rq->balance_callback = NULL;
++	while (head) {
++		func = (void (*)(struct rq *))head->func;
++		next = head->next;
++		head->next = NULL;
++		head = next;
+ 
+-		rq->post_schedule = 0;
++		func(rq);
+ 	}
++	raw_spin_unlock_irqrestore(&rq->lock, flags);
++}
++
++static inline void balance_callback(struct rq *rq)
++{
++	if (unlikely(rq->balance_callback))
++		__balance_callback(rq);
+ }
+ 
+ #else
+@@ -1931,7 +1953,7 @@ static inline void pre_schedule(struct rq *rq, struct task_struct *p)
+ {
+ }
+ 
+-static inline void post_schedule(struct rq *rq)
++static inline void balance_callback(struct rq *rq)
+ {
+ }
+ 
+@@ -1952,7 +1974,7 @@ asmlinkage void schedule_tail(struct task_struct *prev)
+ 	 * FIXME: do we need to worry about rq being invalidated by the
+ 	 * task_switch?
+ 	 */
+-	post_schedule(rq);
++	balance_callback(rq);
+ 
+ #ifdef __ARCH_WANT_UNLOCKED_CTXSW
+ 	/* In this case, finish_task_switch does not reenable preemption */
+@@ -2449,7 +2471,7 @@ need_resched:
+ 	} else
+ 		raw_spin_unlock_irq(&rq->lock);
+ 
+-	post_schedule(rq);
++	balance_callback(rq);
+ 
+ 	sched_preempt_enable_no_resched();
+ 	if (need_resched())
+@@ -3075,7 +3097,11 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
+ 
+ 	check_class_changed(rq, p, prev_class, oldprio);
+ out_unlock:
++	preempt_disable(); /* avoid rq from going away on us */
+ 	__task_rq_unlock(rq);
++
++	balance_callback(rq);
++	preempt_enable();
+ }
+ #endif
+ void set_user_nice(struct task_struct *p, long nice)
+@@ -3430,10 +3456,17 @@ recheck:
+ 		enqueue_task(rq, p, 0);
+ 
+ 	check_class_changed(rq, p, prev_class, oldprio);
++	preempt_disable(); /* avoid rq from going away on us */
+ 	task_rq_unlock(rq, p, &flags);
+ 
+ 	rt_mutex_adjust_pi(p);
+ 
++	/*
++	 * Run balance callbacks after we've adjusted the PI chain.
++	 */
++	balance_callback(rq);
++	preempt_enable();
++
+ 	return 0;
+ }
+ 
+@@ -5020,11 +5053,11 @@ static int init_rootdomain(struct root_domain *rd)
+ {
+ 	memset(rd, 0, sizeof(*rd));
+ 
+-	if (!alloc_cpumask_var(&rd->span, GFP_KERNEL))
++	if (!zalloc_cpumask_var(&rd->span, GFP_KERNEL))
+ 		goto out;
+-	if (!alloc_cpumask_var(&rd->online, GFP_KERNEL))
++	if (!zalloc_cpumask_var(&rd->online, GFP_KERNEL))
+ 		goto free_span;
+-	if (!alloc_cpumask_var(&rd->rto_mask, GFP_KERNEL))
++	if (!zalloc_cpumask_var(&rd->rto_mask, GFP_KERNEL))
+ 		goto free_online;
+ 
+ 	if (cpupri_init(&rd->cpupri) != 0)
+@@ -6516,7 +6549,7 @@ void __init sched_init(void)
+ 		rq->sd = NULL;
+ 		rq->rd = NULL;
+ 		rq->cpu_power = SCHED_POWER_SCALE;
+-		rq->post_schedule = 0;
++		rq->balance_callback = NULL;
+ 		rq->active_balance = 0;
+ 		rq->next_balance = jiffies;
+ 		rq->push_cpu = 0;
+diff --git a/kernel/sched/idle_task.c b/kernel/sched/idle_task.c
+index d8da01008d39..ecc371e86da1 100644
+--- a/kernel/sched/idle_task.c
++++ b/kernel/sched/idle_task.c
+@@ -19,11 +19,6 @@ static void pre_schedule_idle(struct rq *rq, struct task_struct *prev)
+ 	idle_exit_fair(rq);
+ 	rq_last_tick_reset(rq);
+ }
+-
+-static void post_schedule_idle(struct rq *rq)
+-{
+-	idle_enter_fair(rq);
+-}
+ #endif /* CONFIG_SMP */
+ /*
+  * Idle tasks are unconditionally rescheduled:
+@@ -37,8 +32,7 @@ static struct task_struct *pick_next_task_idle(struct rq *rq)
+ {
+ 	schedstat_inc(rq, sched_goidle);
+ #ifdef CONFIG_SMP
+-	/* Trigger the post schedule to do an idle_enter for CFS */
+-	rq->post_schedule = 1;
++	idle_enter_fair(rq);
+ #endif
+ 	return rq->idle;
+ }
+@@ -102,7 +96,6 @@ const struct sched_class idle_sched_class = {
+ #ifdef CONFIG_SMP
+ 	.select_task_rq		= select_task_rq_idle,
+ 	.pre_schedule		= pre_schedule_idle,
+-	.post_schedule		= post_schedule_idle,
+ #endif
+ 
+ 	.set_curr_task          = set_curr_task_idle,
+diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
+index e849d4070c7f..10edf9d2a8b7 100644
+--- a/kernel/sched/rt.c
++++ b/kernel/sched/rt.c
+@@ -315,6 +315,25 @@ static inline int has_pushable_tasks(struct rq *rq)
+ 	return !plist_head_empty(&rq->rt.pushable_tasks);
+ }
+ 
++static DEFINE_PER_CPU(struct callback_head, rt_push_head);
++static DEFINE_PER_CPU(struct callback_head, rt_pull_head);
++
++static void push_rt_tasks(struct rq *);
++static void pull_rt_task(struct rq *);
++
++static inline void queue_push_tasks(struct rq *rq)
++{
++	if (!has_pushable_tasks(rq))
++		return;
++
++	queue_balance_callback(rq, &per_cpu(rt_push_head, rq->cpu), push_rt_tasks);
++}
++
++static inline void queue_pull_task(struct rq *rq)
++{
++	queue_balance_callback(rq, &per_cpu(rt_pull_head, rq->cpu), pull_rt_task);
++}
++
+ static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
+ {
+ 	plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
+@@ -359,6 +378,9 @@ void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
+ {
+ }
+ 
++static inline void queue_push_tasks(struct rq *rq)
++{
++}
+ #endif /* CONFIG_SMP */
+ 
+ static inline int on_rt_rq(struct sched_rt_entity *rt_se)
+@@ -1344,11 +1366,7 @@ static struct task_struct *pick_next_task_rt(struct rq *rq)
+ 		dequeue_pushable_task(rq, p);
+ 
+ #ifdef CONFIG_SMP
+-	/*
+-	 * We detect this state here so that we can avoid taking the RQ
+-	 * lock again later if there is no need to push
+-	 */
+-	rq->post_schedule = has_pushable_tasks(rq);
++	queue_push_tasks(rq);
+ #endif
+ 
+ 	return p;
+@@ -1636,14 +1654,15 @@ static void push_rt_tasks(struct rq *rq)
+ 		;
+ }
+ 
+-static int pull_rt_task(struct rq *this_rq)
++static void pull_rt_task(struct rq *this_rq)
+ {
+-	int this_cpu = this_rq->cpu, ret = 0, cpu;
++	int this_cpu = this_rq->cpu, cpu;
++	bool resched = false;
+ 	struct task_struct *p;
+ 	struct rq *src_rq;
+ 
+ 	if (likely(!rt_overloaded(this_rq)))
+-		return 0;
++		return;
+ 
+ 	/*
+ 	 * Match the barrier from rt_set_overloaded; this guarantees that if we
+@@ -1700,7 +1719,7 @@ static int pull_rt_task(struct rq *this_rq)
+ 			if (p->prio < src_rq->curr->prio)
+ 				goto skip;
+ 
+-			ret = 1;
++			resched = true;
+ 
+ 			deactivate_task(src_rq, p, 0);
+ 			set_task_cpu(p, this_cpu);
+@@ -1716,7 +1735,8 @@ skip:
+ 		double_unlock_balance(this_rq, src_rq);
+ 	}
+ 
+-	return ret;
++	if (resched)
++		resched_task(this_rq->curr);
+ }
+ 
+ static void pre_schedule_rt(struct rq *rq, struct task_struct *prev)
+@@ -1726,11 +1746,6 @@ static void pre_schedule_rt(struct rq *rq, struct task_struct *prev)
+ 		pull_rt_task(rq);
+ }
+ 
+-static void post_schedule_rt(struct rq *rq)
+-{
+-	push_rt_tasks(rq);
+-}
+-
+ /*
+  * If we are not running and we are not going to reschedule soon, we should
+  * try to push tasks away now
+@@ -1824,8 +1839,7 @@ static void switched_from_rt(struct rq *rq, struct task_struct *p)
+ 	if (!p->on_rq || rq->rt.rt_nr_running)
+ 		return;
+ 
+-	if (pull_rt_task(rq))
+-		resched_task(rq->curr);
++	queue_pull_task(rq);
+ }
+ 
+ void init_sched_rt_class(void)
+@@ -1846,8 +1860,6 @@ void init_sched_rt_class(void)
+  */
+ static void switched_to_rt(struct rq *rq, struct task_struct *p)
+ {
+-	int check_resched = 1;
+-
+ 	/*
+ 	 * If we are already running, then there's nothing
+ 	 * that needs to be done. But if we are not running
+@@ -1857,13 +1869,12 @@ static void switched_to_rt(struct rq *rq, struct task_struct *p)
+ 	 */
+ 	if (p->on_rq && rq->curr != p) {
+ #ifdef CONFIG_SMP
+-		if (rq->rt.overloaded && push_rt_task(rq) &&
+-		    /* Don't resched if we changed runqueues */
+-		    rq != task_rq(p))
+-			check_resched = 0;
+-#endif /* CONFIG_SMP */
+-		if (check_resched && p->prio < rq->curr->prio)
++		if (rq->rt.overloaded)
++			queue_push_tasks(rq);
++#else
++		if (p->prio < rq->curr->prio)
+ 			resched_task(rq->curr);
++#endif /* CONFIG_SMP */
+ 	}
+ }
+ 
+@@ -1884,14 +1895,13 @@ prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
+ 		 * may need to pull tasks to this runqueue.
+ 		 */
+ 		if (oldprio < p->prio)
+-			pull_rt_task(rq);
++			queue_pull_task(rq);
++
+ 		/*
+ 		 * If there's a higher priority task waiting to run
+-		 * then reschedule. Note, the above pull_rt_task
+-		 * can release the rq lock and p could migrate.
+-		 * Only reschedule if p is still on the same runqueue.
++		 * then reschedule.
+ 		 */
+-		if (p->prio > rq->rt.highest_prio.curr && rq->curr == p)
++		if (p->prio > rq->rt.highest_prio.curr)
+ 			resched_task(p);
+ #else
+ 		/* For UP simply resched on drop of prio */
+@@ -2003,7 +2013,6 @@ const struct sched_class rt_sched_class = {
+ 	.rq_online              = rq_online_rt,
+ 	.rq_offline             = rq_offline_rt,
+ 	.pre_schedule		= pre_schedule_rt,
+-	.post_schedule		= post_schedule_rt,
+ 	.task_woken		= task_woken_rt,
+ 	.switched_from		= switched_from_rt,
+ #endif
+diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
+index 1a1cdc3783ed..e09e3e0466f7 100644
+--- a/kernel/sched/sched.h
++++ b/kernel/sched/sched.h
+@@ -460,9 +460,10 @@ struct rq {
+ 
+ 	unsigned long cpu_power;
+ 
++	struct callback_head *balance_callback;
++
+ 	unsigned char idle_balance;
+ 	/* For active balancing */
+-	int post_schedule;
+ 	int active_balance;
+ 	int push_cpu;
+ 	struct cpu_stop_work active_balance_work;
+@@ -554,6 +555,21 @@ static inline u64 rq_clock_task(struct rq *rq)
+ 
+ #ifdef CONFIG_SMP
+ 
++static inline void
++queue_balance_callback(struct rq *rq,
++		       struct callback_head *head,
++		       void (*func)(struct rq *rq))
++{
++	lockdep_assert_held(&rq->lock);
++
++	if (unlikely(head->next))
++		return;
++
++	head->func = (void (*)(struct callback_head *))func;
++	head->next = rq->balance_callback;
++	rq->balance_callback = head;
++}
++
+ #define rcu_dereference_check_sched_domain(p) \
+ 	rcu_dereference_check((p), \
+ 			      lockdep_is_held(&sched_domains_mutex))
+@@ -981,7 +997,6 @@ struct sched_class {
+ 	void (*migrate_task_rq)(struct task_struct *p, int next_cpu);
+ 
+ 	void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
+-	void (*post_schedule) (struct rq *this_rq);
+ 	void (*task_waking) (struct task_struct *task);
+ 	void (*task_woken) (struct rq *this_rq, struct task_struct *task);
+ 
+diff --git a/kernel/time/posix-clock.c b/kernel/time/posix-clock.c
+index ce033c7aa2e8..9cff0ab82b63 100644
+--- a/kernel/time/posix-clock.c
++++ b/kernel/time/posix-clock.c
+@@ -69,10 +69,10 @@ static ssize_t posix_clock_read(struct file *fp, char __user *buf,
+ static unsigned int posix_clock_poll(struct file *fp, poll_table *wait)
+ {
+ 	struct posix_clock *clk = get_posix_clock(fp);
+-	int result = 0;
++	unsigned int result = 0;
+ 
+ 	if (!clk)
+-		return -ENODEV;
++		return POLLERR;
+ 
+ 	if (clk->ops.poll)
+ 		result = clk->ops.poll(clk, fp, wait);
+diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
+index d6b35d3a232c..321ee4205160 100644
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -1933,12 +1933,6 @@ rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
+ 		goto again;
+ }
+ 
+-static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
+-{
+-	cpu_buffer->read_stamp = cpu_buffer->reader_page->page->time_stamp;
+-	cpu_buffer->reader_page->read = 0;
+-}
+-
+ static void rb_inc_iter(struct ring_buffer_iter *iter)
+ {
+ 	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
+@@ -3576,7 +3570,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
+ 
+ 	/* Finally update the reader page to the new head */
+ 	cpu_buffer->reader_page = reader;
+-	rb_reset_reader_page(cpu_buffer);
++	cpu_buffer->reader_page->read = 0;
+ 
+ 	if (overwrite != cpu_buffer->last_overrun) {
+ 		cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun;
+@@ -3586,6 +3580,10 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
+ 	goto again;
+ 
+  out:
++	/* Update the read_stamp on the first event */
++	if (reader && reader->read == 0)
++		cpu_buffer->read_stamp = reader->page->time_stamp;
++
+ 	arch_spin_unlock(&cpu_buffer->lock);
+ 	local_irq_restore(flags);
+ 
+diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
+index be15da87b390..9514ee1791a7 100644
+--- a/kernel/trace/trace_events.c
++++ b/kernel/trace/trace_events.c
+@@ -604,7 +604,8 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
+ 		 * The ftrace subsystem is for showing formats only.
+ 		 * They can not be enabled or disabled via the event files.
+ 		 */
+-		if (call->class && call->class->reg)
++		if (call->class && call->class->reg &&
++		    !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
+ 			return file;
+ 	}
+ 
+diff --git a/lib/devres.c b/lib/devres.c
+index 823533138fa0..20afaf181b27 100644
+--- a/lib/devres.c
++++ b/lib/devres.c
+@@ -423,7 +423,7 @@ void pcim_iounmap_regions(struct pci_dev *pdev, int mask)
+ 	if (!iomap)
+ 		return;
+ 
+-	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
++	for (i = 0; i < PCIM_IOMAP_MAX; i++) {
+ 		if (!(mask & (1 << i)))
+ 			continue;
+ 
+diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
+index 057017bd3b42..469f3138d0f6 100644
+--- a/net/ceph/messenger.c
++++ b/net/ceph/messenger.c
+@@ -2280,7 +2280,7 @@ static int read_partial_message(struct ceph_connection *con)
+ 		con->in_base_pos = -front_len - middle_len - data_len -
+ 			sizeof(m->footer);
+ 		con->in_tag = CEPH_MSGR_TAG_READY;
+-		return 0;
++		return 1;
+ 	} else if ((s64)seq - (s64)con->in_seq > 1) {
+ 		pr_err("read_partial_message bad seq %lld expected %lld\n",
+ 		       seq, con->in_seq + 1);
+@@ -2313,7 +2313,7 @@ static int read_partial_message(struct ceph_connection *con)
+ 				sizeof(m->footer);
+ 			con->in_tag = CEPH_MSGR_TAG_READY;
+ 			con->in_seq++;
+-			return 0;
++			return 1;
+ 		}
+ 
+ 		BUG_ON(!con->in_msg);
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index 56cdf3bb1e7f..7df6f539a402 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -76,6 +76,8 @@
+ 
+ struct kmem_cache *skbuff_head_cache __read_mostly;
+ static struct kmem_cache *skbuff_fclone_cache __read_mostly;
++int sysctl_max_skb_frags __read_mostly = MAX_SKB_FRAGS;
++EXPORT_SYMBOL(sysctl_max_skb_frags);
+ 
+ /**
+  *	skb_panic - private function for out-of-line support
+diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
+index f3413ae3d973..d7962397d90f 100644
+--- a/net/core/sysctl_net_core.c
++++ b/net/core/sysctl_net_core.c
+@@ -27,6 +27,7 @@ static int one = 1;
+ static int ushort_max = USHRT_MAX;
+ static int min_sndbuf = SOCK_MIN_SNDBUF;
+ static int min_rcvbuf = SOCK_MIN_RCVBUF;
++static int max_skb_frags = MAX_SKB_FRAGS;
+ 
+ #ifdef CONFIG_RPS
+ static int rps_sock_flow_sysctl(struct ctl_table *table, int write,
+@@ -362,6 +363,15 @@ static struct ctl_table net_core_table[] = {
+ 		.mode		= 0644,
+ 		.proc_handler	= proc_dointvec
+ 	},
++	{
++		.procname	= "max_skb_frags",
++		.data		= &sysctl_max_skb_frags,
++		.maxlen		= sizeof(int),
++		.mode		= 0644,
++		.proc_handler	= proc_dointvec_minmax,
++		.extra1		= &one,
++		.extra2		= &max_skb_frags,
++	},
+ 	{ }
+ };
+ 
+diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
+index f4b34d8f92fe..68447109000f 100644
+--- a/net/ipv4/devinet.c
++++ b/net/ipv4/devinet.c
+@@ -1785,7 +1785,7 @@ static int inet_netconf_get_devconf(struct sk_buff *in_skb,
+ 	if (err < 0)
+ 		goto errout;
+ 
+-	err = EINVAL;
++	err = -EINVAL;
+ 	if (!tb[NETCONFA_IFINDEX])
+ 		goto errout;
+ 
+diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
+index f6603142cb33..9e4f832aaf13 100644
+--- a/net/ipv4/ip_sockglue.c
++++ b/net/ipv4/ip_sockglue.c
+@@ -200,6 +200,8 @@ int ip_cmsg_send(struct net *net, struct msghdr *msg, struct ipcm_cookie *ipc)
+ 		switch (cmsg->cmsg_type) {
+ 		case IP_RETOPTS:
+ 			err = cmsg->cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr));
++
++			/* Our caller is responsible for freeing ipc->opt */
+ 			err = ip_options_get(net, &ipc->opt, CMSG_DATA(cmsg),
+ 					     err < 40 ? err : 40);
+ 			if (err)
+diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
+index 54012b8c0ef9..716dff49d0b9 100644
+--- a/net/ipv4/ping.c
++++ b/net/ipv4/ping.c
+@@ -740,8 +740,10 @@ int ping_v4_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+ 
+ 	if (msg->msg_controllen) {
+ 		err = ip_cmsg_send(sock_net(sk), msg, &ipc);
+-		if (err)
++		if (unlikely(err)) {
++			kfree(ipc.opt);
+ 			return err;
++		}
+ 		if (ipc.opt)
+ 			free = 1;
+ 	}
+diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
+index 6183d36c038b..ed96b2320e5f 100644
+--- a/net/ipv4/raw.c
++++ b/net/ipv4/raw.c
+@@ -523,8 +523,10 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+ 
+ 	if (msg->msg_controllen) {
+ 		err = ip_cmsg_send(sock_net(sk), msg, &ipc);
+-		if (err)
++		if (unlikely(err)) {
++			kfree(ipc.opt);
+ 			goto out;
++		}
+ 		if (ipc.opt)
+ 			free = 1;
+ 	}
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c
+index 54874e4767de..ae001e8e81b9 100644
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -128,6 +128,7 @@ static int ip_rt_mtu_expires __read_mostly	= 10 * 60 * HZ;
+ static int ip_rt_min_pmtu __read_mostly		= 512 + 20 + 20;
+ static int ip_rt_min_advmss __read_mostly	= 256;
+ 
++static int ip_rt_gc_timeout __read_mostly	= RT_GC_TIMEOUT;
+ /*
+  *	Interface to generic destination cache.
+  */
+@@ -772,7 +773,7 @@ static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flow
+ 				struct fib_nh *nh = &FIB_RES_NH(res);
+ 
+ 				update_or_create_fnhe(nh, fl4->daddr, new_gw,
+-						      0, 0);
++						0, jiffies + ip_rt_gc_timeout);
+ 			}
+ 			if (kill_route)
+ 				rt->dst.obsolete = DST_OBSOLETE_KILL;
+@@ -1533,6 +1534,36 @@ static void ip_handle_martian_source(struct net_device *dev,
+ #endif
+ }
+ 
++static void ip_del_fnhe(struct fib_nh *nh, __be32 daddr)
++{
++	struct fnhe_hash_bucket *hash;
++	struct fib_nh_exception *fnhe, __rcu **fnhe_p;
++	u32 hval = fnhe_hashfun(daddr);
++
++	spin_lock_bh(&fnhe_lock);
++
++	hash = rcu_dereference_protected(nh->nh_exceptions,
++					 lockdep_is_held(&fnhe_lock));
++	hash += hval;
++
++	fnhe_p = &hash->chain;
++	fnhe = rcu_dereference_protected(*fnhe_p, lockdep_is_held(&fnhe_lock));
++	while (fnhe) {
++		if (fnhe->fnhe_daddr == daddr) {
++			rcu_assign_pointer(*fnhe_p, rcu_dereference_protected(
++				fnhe->fnhe_next, lockdep_is_held(&fnhe_lock)));
++			fnhe_flush_routes(fnhe);
++			kfree_rcu(fnhe, rcu);
++			break;
++		}
++		fnhe_p = &fnhe->fnhe_next;
++		fnhe = rcu_dereference_protected(fnhe->fnhe_next,
++						 lockdep_is_held(&fnhe_lock));
++	}
++
++	spin_unlock_bh(&fnhe_lock);
++}
++
+ /* called in rcu_read_lock() section */
+ static int __mkroute_input(struct sk_buff *skb,
+ 			   const struct fib_result *res,
+@@ -1587,11 +1618,20 @@ static int __mkroute_input(struct sk_buff *skb,
+ 
+ 	fnhe = find_exception(&FIB_RES_NH(*res), daddr);
+ 	if (do_cache) {
+-		if (fnhe != NULL)
++		if (fnhe) {
+ 			rth = rcu_dereference(fnhe->fnhe_rth_input);
+-		else
+-			rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input);
++			if (rth && rth->dst.expires &&
++			    time_after(jiffies, rth->dst.expires)) {
++				ip_del_fnhe(&FIB_RES_NH(*res), daddr);
++				fnhe = NULL;
++			} else {
++				goto rt_cache;
++			}
++		}
++
++		rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input);
+ 
++rt_cache:
+ 		if (rt_cache_valid(rth)) {
+ 			skb_dst_set_noref(skb, &rth->dst);
+ 			goto out;
+@@ -1937,19 +1977,29 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
+ 		struct fib_nh *nh = &FIB_RES_NH(*res);
+ 
+ 		fnhe = find_exception(nh, fl4->daddr);
+-		if (fnhe)
++		if (fnhe) {
+ 			prth = &fnhe->fnhe_rth_output;
+-		else {
+-			if (unlikely(fl4->flowi4_flags &
+-				     FLOWI_FLAG_KNOWN_NH &&
+-				     !(nh->nh_gw &&
+-				       nh->nh_scope == RT_SCOPE_LINK))) {
+-				do_cache = false;
+-				goto add;
++			rth = rcu_dereference(*prth);
++			if (rth && rth->dst.expires &&
++			    time_after(jiffies, rth->dst.expires)) {
++				ip_del_fnhe(nh, fl4->daddr);
++				fnhe = NULL;
++			} else {
++				goto rt_cache;
+ 			}
+-			prth = __this_cpu_ptr(nh->nh_pcpu_rth_output);
+ 		}
++
++		if (unlikely(fl4->flowi4_flags &
++			     FLOWI_FLAG_KNOWN_NH &&
++			     !(nh->nh_gw &&
++			       nh->nh_scope == RT_SCOPE_LINK))) {
++			do_cache = false;
++			goto add;
++		}
++		prth = __this_cpu_ptr(nh->nh_pcpu_rth_output);
+ 		rth = rcu_dereference(*prth);
++
++rt_cache:
+ 		if (rt_cache_valid(rth)) {
+ 			dst_hold(&rth->dst);
+ 			return rth;
+@@ -2501,7 +2551,6 @@ void ip_rt_multicast_event(struct in_device *in_dev)
+ }
+ 
+ #ifdef CONFIG_SYSCTL
+-static int ip_rt_gc_timeout __read_mostly	= RT_GC_TIMEOUT;
+ static int ip_rt_gc_interval __read_mostly  = 60 * HZ;
+ static int ip_rt_gc_min_interval __read_mostly	= HZ / 2;
+ static int ip_rt_gc_elasticity __read_mostly	= 8;
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index a880ccc10f61..392d3259f9ad 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -886,7 +886,7 @@ new_segment:
+ 
+ 		i = skb_shinfo(skb)->nr_frags;
+ 		can_coalesce = skb_can_coalesce(skb, i, page, offset);
+-		if (!can_coalesce && i >= MAX_SKB_FRAGS) {
++		if (!can_coalesce && i >= sysctl_max_skb_frags) {
+ 			tcp_mark_push(tp, skb);
+ 			goto new_segment;
+ 		}
+@@ -1169,7 +1169,7 @@ new_segment:
+ 
+ 				if (!skb_can_coalesce(skb, i, pfrag->page,
+ 						      pfrag->offset)) {
+-					if (i == MAX_SKB_FRAGS || !sg) {
++					if (i == sysctl_max_skb_frags || !sg) {
+ 						tcp_mark_push(tp, skb);
+ 						goto new_segment;
+ 					}
+diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
+index 09451a2cbd6a..6184d17c9126 100644
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -710,7 +710,8 @@ release_sk1:
+    outside socket context is ugly, certainly. What can I do?
+  */
+ 
+-static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
++static void tcp_v4_send_ack(struct net *net,
++			    struct sk_buff *skb, u32 seq, u32 ack,
+ 			    u32 win, u32 tsval, u32 tsecr, int oif,
+ 			    struct tcp_md5sig_key *key,
+ 			    int reply_flags, u8 tos)
+@@ -725,7 +726,6 @@ static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
+ 			];
+ 	} rep;
+ 	struct ip_reply_arg arg;
+-	struct net *net = dev_net(skb_dst(skb)->dev);
+ 
+ 	memset(&rep.th, 0, sizeof(struct tcphdr));
+ 	memset(&arg, 0, sizeof(arg));
+@@ -786,7 +786,8 @@ static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
+ 	struct inet_timewait_sock *tw = inet_twsk(sk);
+ 	struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
+ 
+-	tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
++	tcp_v4_send_ack(sock_net(sk), skb,
++			tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
+ 			tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
+ 			tcp_time_stamp + tcptw->tw_ts_offset,
+ 			tcptw->tw_ts_recent,
+@@ -805,8 +806,10 @@ static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
+ 	/* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
+ 	 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
+ 	 */
+-	tcp_v4_send_ack(skb, (sk->sk_state == TCP_LISTEN) ?
+-			tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
++	u32 seq = (sk->sk_state == TCP_LISTEN) ? tcp_rsk(req)->snt_isn + 1 :
++					     tcp_sk(sk)->snd_nxt;
++
++	tcp_v4_send_ack(sock_net(sk), skb, seq,
+ 			tcp_rsk(req)->rcv_nxt, req->rcv_wnd,
+ 			tcp_time_stamp,
+ 			req->ts_recent,
+diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
+index f8e304667108..f904b644a40c 100644
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -910,8 +910,10 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+ 
+ 	if (msg->msg_controllen) {
+ 		err = ip_cmsg_send(sock_net(sk), msg, &ipc);
+-		if (err)
++		if (unlikely(err)) {
++			kfree(ipc.opt);
+ 			return err;
++		}
+ 		if (ipc.opt)
+ 			free = 1;
+ 		connected = 0;
+diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
+index 38540a3ed92f..bbf35875e4ef 100644
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -192,6 +192,7 @@ static struct ipv6_devconf ipv6_devconf __read_mostly = {
+ #endif
+ 	.max_addresses		= IPV6_MAX_ADDRESSES,
+ 	.accept_ra_defrtr	= 1,
++	.accept_ra_min_hop_limit= 1,
+ 	.accept_ra_pinfo	= 1,
+ #ifdef CONFIG_IPV6_ROUTER_PREF
+ 	.accept_ra_rtr_pref	= 1,
+@@ -230,6 +231,7 @@ static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
+ #endif
+ 	.max_addresses		= IPV6_MAX_ADDRESSES,
+ 	.accept_ra_defrtr	= 1,
++	.accept_ra_min_hop_limit= 1,
+ 	.accept_ra_pinfo	= 1,
+ #ifdef CONFIG_IPV6_ROUTER_PREF
+ 	.accept_ra_rtr_pref	= 1,
+@@ -526,7 +528,7 @@ static int inet6_netconf_get_devconf(struct sk_buff *in_skb,
+ 	if (err < 0)
+ 		goto errout;
+ 
+-	err = EINVAL;
++	err = -EINVAL;
+ 	if (!tb[NETCONFA_IFINDEX])
+ 		goto errout;
+ 
+@@ -4150,6 +4152,7 @@ static inline void ipv6_store_devconf(struct ipv6_devconf *cnf,
+ #endif
+ 	array[DEVCONF_MAX_ADDRESSES] = cnf->max_addresses;
+ 	array[DEVCONF_ACCEPT_RA_DEFRTR] = cnf->accept_ra_defrtr;
++	array[DEVCONF_ACCEPT_RA_MIN_HOP_LIMIT] = cnf->accept_ra_min_hop_limit;
+ 	array[DEVCONF_ACCEPT_RA_PINFO] = cnf->accept_ra_pinfo;
+ #ifdef CONFIG_IPV6_ROUTER_PREF
+ 	array[DEVCONF_ACCEPT_RA_RTR_PREF] = cnf->accept_ra_rtr_pref;
+@@ -4908,6 +4911,13 @@ static struct addrconf_sysctl_table
+ 			.proc_handler	= proc_dointvec,
+ 		},
+ 		{
++			.procname	= "accept_ra_min_hop_limit",
++			.data		= &ipv6_devconf.accept_ra_min_hop_limit,
++			.maxlen		= sizeof(int),
++			.mode		= 0644,
++			.proc_handler	= proc_dointvec,
++		},
++		{
+ 			.procname	= "accept_ra_pinfo",
+ 			.data		= &ipv6_devconf.accept_ra_pinfo,
+ 			.maxlen		= sizeof(int),
+diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
+index e24fa8c01dd2..fcfa2885df0e 100644
+--- a/net/ipv6/datagram.c
++++ b/net/ipv6/datagram.c
+@@ -163,6 +163,9 @@ ipv4_connected:
+ 	fl6.fl6_dport = inet->inet_dport;
+ 	fl6.fl6_sport = inet->inet_sport;
+ 
++	if (!fl6.flowi6_oif)
++		fl6.flowi6_oif = np->sticky_pktinfo.ipi6_ifindex;
++
+ 	if (!fl6.flowi6_oif && (addr_type&IPV6_ADDR_MULTICAST))
+ 		fl6.flowi6_oif = np->mcast_oif;
+ 
+diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
+index f0ccdb787100..d14c74b2dfa3 100644
+--- a/net/ipv6/ip6_flowlabel.c
++++ b/net/ipv6/ip6_flowlabel.c
+@@ -527,12 +527,13 @@ int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen)
+ 	case IPV6_FL_A_PUT:
+ 		spin_lock_bh(&ip6_sk_fl_lock);
+ 		for (sflp = &np->ipv6_fl_list;
+-		     (sfl = rcu_dereference(*sflp))!=NULL;
++		     (sfl = rcu_dereference_protected(*sflp,
++						      lockdep_is_held(&ip6_sk_fl_lock))) != NULL;
+ 		     sflp = &sfl->next) {
+ 			if (sfl->fl->label == freq.flr_label) {
+ 				if (freq.flr_label == (np->flow_label&IPV6_FLOWLABEL_MASK))
+ 					np->flow_label &= ~IPV6_FLOWLABEL_MASK;
+-				*sflp = rcu_dereference(sfl->next);
++				*sflp = sfl->next;
+ 				spin_unlock_bh(&ip6_sk_fl_lock);
+ 				fl_release(sfl->fl);
+ 				kfree_rcu(sfl, rcu);
+diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
+index fda5d95e39f4..e34a6b3520c6 100644
+--- a/net/ipv6/ndisc.c
++++ b/net/ipv6/ndisc.c
+@@ -1190,18 +1190,16 @@ static void ndisc_router_discovery(struct sk_buff *skb)
+ 
+ 	if (rt)
+ 		rt6_set_expires(rt, jiffies + (HZ * lifetime));
+-	if (ra_msg->icmph.icmp6_hop_limit) {
+-		/* Only set hop_limit on the interface if it is higher than
+-		 * the current hop_limit.
+-		 */
+-		if (in6_dev->cnf.hop_limit < ra_msg->icmph.icmp6_hop_limit) {
++	if (in6_dev->cnf.accept_ra_min_hop_limit < 256 &&
++	    ra_msg->icmph.icmp6_hop_limit) {
++		if (in6_dev->cnf.accept_ra_min_hop_limit <= ra_msg->icmph.icmp6_hop_limit) {
+ 			in6_dev->cnf.hop_limit = ra_msg->icmph.icmp6_hop_limit;
++			if (rt)
++				dst_metric_set(&rt->dst, RTAX_HOPLIMIT,
++					       ra_msg->icmph.icmp6_hop_limit);
+ 		} else {
+-			ND_PRINTK(2, warn, "RA: Got route advertisement with lower hop_limit than current\n");
++			ND_PRINTK(2, warn, "RA: Got route advertisement with lower hop_limit than minimum\n");
+ 		}
+-		if (rt)
+-			dst_metric_set(&rt->dst, RTAX_HOPLIMIT,
+-				       ra_msg->icmph.icmp6_hop_limit);
+ 	}
+ 
+ skip_defrtr:
+diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
+index 1465363a452b..bb3969a40b8e 100644
+--- a/net/iucv/af_iucv.c
++++ b/net/iucv/af_iucv.c
+@@ -697,6 +697,9 @@ static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr,
+ 	if (!addr || addr->sa_family != AF_IUCV)
+ 		return -EINVAL;
+ 
++	if (addr_len < sizeof(struct sockaddr_iucv))
++		return -EINVAL;
++
+ 	lock_sock(sk);
+ 	if (sk->sk_state != IUCV_OPEN) {
+ 		err = -EBADFD;
+diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
+index 89aacfd2756d..9ba6d8c7c793 100644
+--- a/net/mac80211/mesh_pathtbl.c
++++ b/net/mac80211/mesh_pathtbl.c
+@@ -747,10 +747,8 @@ void mesh_plink_broken(struct sta_info *sta)
+ static void mesh_path_node_reclaim(struct rcu_head *rp)
+ {
+ 	struct mpath_node *node = container_of(rp, struct mpath_node, rcu);
+-	struct ieee80211_sub_if_data *sdata = node->mpath->sdata;
+ 
+ 	del_timer_sync(&node->mpath->timer);
+-	atomic_dec(&sdata->u.mesh.mpaths);
+ 	kfree(node->mpath);
+ 	kfree(node);
+ }
+@@ -758,8 +756,9 @@ static void mesh_path_node_reclaim(struct rcu_head *rp)
+ /* needs to be called with the corresponding hashwlock taken */
+ static void __mesh_path_del(struct mesh_table *tbl, struct mpath_node *node)
+ {
+-	struct mesh_path *mpath;
+-	mpath = node->mpath;
++	struct mesh_path *mpath = node->mpath;
++	struct ieee80211_sub_if_data *sdata = node->mpath->sdata;
++
+ 	spin_lock(&mpath->state_lock);
+ 	mpath->flags |= MESH_PATH_RESOLVING;
+ 	if (mpath->is_gate)
+@@ -767,6 +766,7 @@ static void __mesh_path_del(struct mesh_table *tbl, struct mpath_node *node)
+ 	hlist_del_rcu(&node->list);
+ 	call_rcu(&node->rcu, mesh_path_node_reclaim);
+ 	spin_unlock(&mpath->state_lock);
++	atomic_dec(&sdata->u.mesh.mpaths);
+ 	atomic_dec(&tbl->entries);
+ }
+ 
+diff --git a/net/rfkill/core.c b/net/rfkill/core.c
+index 1bacc1079942..918c5ebd239e 100644
+--- a/net/rfkill/core.c
++++ b/net/rfkill/core.c
+@@ -51,7 +51,6 @@
+ struct rfkill {
+ 	spinlock_t		lock;
+ 
+-	const char		*name;
+ 	enum rfkill_type	type;
+ 
+ 	unsigned long		state;
+@@ -75,6 +74,7 @@ struct rfkill {
+ 	struct delayed_work	poll_work;
+ 	struct work_struct	uevent_work;
+ 	struct work_struct	sync_work;
++	char			name[];
+ };
+ #define to_rfkill(d)	container_of(d, struct rfkill, dev)
+ 
+@@ -863,14 +863,14 @@ struct rfkill * __must_check rfkill_alloc(const char *name,
+ 	if (WARN_ON(type == RFKILL_TYPE_ALL || type >= NUM_RFKILL_TYPES))
+ 		return NULL;
+ 
+-	rfkill = kzalloc(sizeof(*rfkill), GFP_KERNEL);
++	rfkill = kzalloc(sizeof(*rfkill) + strlen(name) + 1, GFP_KERNEL);
+ 	if (!rfkill)
+ 		return NULL;
+ 
+ 	spin_lock_init(&rfkill->lock);
+ 	INIT_LIST_HEAD(&rfkill->node);
+ 	rfkill->type = type;
+-	rfkill->name = name;
++	strcpy(rfkill->name, name);
+ 	rfkill->ops = ops;
+ 	rfkill->data = ops_data;
+ 
+@@ -1080,17 +1080,6 @@ static unsigned int rfkill_fop_poll(struct file *file, poll_table *wait)
+ 	return res;
+ }
+ 
+-static bool rfkill_readable(struct rfkill_data *data)
+-{
+-	bool r;
+-
+-	mutex_lock(&data->mtx);
+-	r = !list_empty(&data->events);
+-	mutex_unlock(&data->mtx);
+-
+-	return r;
+-}
+-
+ static ssize_t rfkill_fop_read(struct file *file, char __user *buf,
+ 			       size_t count, loff_t *pos)
+ {
+@@ -1107,8 +1096,11 @@ static ssize_t rfkill_fop_read(struct file *file, char __user *buf,
+ 			goto out;
+ 		}
+ 		mutex_unlock(&data->mtx);
++		/* since we re-check and it just compares pointers,
++		 * using !list_empty() without locking isn't a problem
++		 */
+ 		ret = wait_event_interruptible(data->read_wait,
+-					       rfkill_readable(data));
++					       !list_empty(&data->events));
+ 		mutex_lock(&data->mtx);
+ 
+ 		if (ret)
+diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
+index 599757e0c23a..d8689fc37fcb 100644
+--- a/net/sctp/protocol.c
++++ b/net/sctp/protocol.c
+@@ -61,6 +61,8 @@
+ #include <net/inet_common.h>
+ #include <net/inet_ecn.h>
+ 
++#define MAX_SCTP_PORT_HASH_ENTRIES (64 * 1024)
++
+ /* Global data structures. */
+ struct sctp_globals sctp_globals __read_mostly;
+ 
+@@ -1333,6 +1335,8 @@ static __init int sctp_init(void)
+ 	unsigned long limit;
+ 	int max_share;
+ 	int order;
++	int num_entries;
++	int max_entry_order;
+ 
+ 	BUILD_BUG_ON(sizeof(struct sctp_ulpevent) >
+ 		     sizeof(((struct sk_buff *) 0)->cb));
+@@ -1386,14 +1390,24 @@ static __init int sctp_init(void)
+ 
+ 	/* Size and allocate the association hash table.
+ 	 * The methodology is similar to that of the tcp hash tables.
++	 * Though not identical.  Start by getting a goal size
+ 	 */
+ 	if (totalram_pages >= (128 * 1024))
+ 		goal = totalram_pages >> (22 - PAGE_SHIFT);
+ 	else
+ 		goal = totalram_pages >> (24 - PAGE_SHIFT);
+ 
+-	for (order = 0; (1UL << order) < goal; order++)
+-		;
++	/* Then compute the page order for said goal */
++	order = get_order(goal);
++
++	/* Now compute the required page order for the maximum sized table we
++	 * want to create
++	 */
++	max_entry_order = get_order(MAX_SCTP_PORT_HASH_ENTRIES *
++				    sizeof(struct sctp_bind_hashbucket));
++
++	/* Limit the page order by that maximum hash table size */
++	order = min(order, max_entry_order);
+ 
+ 	do {
+ 		sctp_assoc_hashsize = (1UL << order) * PAGE_SIZE /
+@@ -1427,27 +1441,42 @@ static __init int sctp_init(void)
+ 		INIT_HLIST_HEAD(&sctp_ep_hashtable[i].chain);
+ 	}
+ 
+-	/* Allocate and initialize the SCTP port hash table.  */
++	/* Allocate and initialize the SCTP port hash table.
++	 * Note that order is initalized to start at the max sized
++	 * table we want to support.  If we can't get that many pages
++	 * reduce the order and try again
++	 */
+ 	do {
+-		sctp_port_hashsize = (1UL << order) * PAGE_SIZE /
+-					sizeof(struct sctp_bind_hashbucket);
+-		if ((sctp_port_hashsize > (64 * 1024)) && order > 0)
+-			continue;
+ 		sctp_port_hashtable = (struct sctp_bind_hashbucket *)
+ 			__get_free_pages(GFP_ATOMIC|__GFP_NOWARN, order);
+ 	} while (!sctp_port_hashtable && --order > 0);
++
+ 	if (!sctp_port_hashtable) {
+ 		pr_err("Failed bind hash alloc\n");
+ 		status = -ENOMEM;
+ 		goto err_bhash_alloc;
+ 	}
++
++	/* Now compute the number of entries that will fit in the
++	 * port hash space we allocated
++	 */
++	num_entries = (1UL << order) * PAGE_SIZE /
++		      sizeof(struct sctp_bind_hashbucket);
++
++	/* And finish by rounding it down to the nearest power of two
++	 * this wastes some memory of course, but its needed because
++	 * the hash function operates based on the assumption that
++	 * that the number of entries is a power of two
++	 */
++	sctp_port_hashsize = rounddown_pow_of_two(num_entries);
++
+ 	for (i = 0; i < sctp_port_hashsize; i++) {
+ 		spin_lock_init(&sctp_port_hashtable[i].lock);
+ 		INIT_HLIST_HEAD(&sctp_port_hashtable[i].chain);
+ 	}
+ 
+-	pr_info("Hash tables configured (established %d bind %d)\n",
+-		sctp_assoc_hashsize, sctp_port_hashsize);
++	pr_info("Hash tables configured (established %d bind %d/%d)\n",
++		sctp_assoc_hashsize, sctp_port_hashsize, num_entries);
+ 
+ 	sctp_sysctl_register();
+ 
+diff --git a/net/sctp/socket.c b/net/sctp/socket.c
+index 9c47fbc5de0c..ead3a8adca08 100644
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -5369,6 +5369,7 @@ static int sctp_getsockopt_hmac_ident(struct sock *sk, int len,
+ 	struct sctp_hmac_algo_param *hmacs;
+ 	__u16 data_len = 0;
+ 	u32 num_idents;
++	int i;
+ 
+ 	if (!ep->auth_enable)
+ 		return -EACCES;
+@@ -5386,8 +5387,12 @@ static int sctp_getsockopt_hmac_ident(struct sock *sk, int len,
+ 		return -EFAULT;
+ 	if (put_user(num_idents, &p->shmac_num_idents))
+ 		return -EFAULT;
+-	if (copy_to_user(p->shmac_idents, hmacs->hmac_ids, data_len))
+-		return -EFAULT;
++	for (i = 0; i < num_idents; i++) {
++		__u16 hmacid = ntohs(hmacs->hmac_ids[i]);
++
++		if (copy_to_user(&p->shmac_idents[i], &hmacid, sizeof(__u16)))
++			return -EFAULT;
++	}
+ 	return 0;
+ }
+ 
+@@ -6420,6 +6425,7 @@ static int sctp_msghdr_parse(const struct msghdr *msg, sctp_cmsgs_t *cmsgs)
+ 			/* Minimally, validate the sinfo_flags. */
+ 			if (cmsgs->info->sinfo_flags &
+ 			    ~(SCTP_UNORDERED | SCTP_ADDR_OVER |
++			      SCTP_SACK_IMMEDIATELY |
+ 			      SCTP_ABORT | SCTP_EOF))
+ 				return -EINVAL;
+ 			break;
+diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
+index 8a6e3b0d25d4..f3e2b7d8f325 100644
+--- a/net/sunrpc/cache.c
++++ b/net/sunrpc/cache.c
+@@ -1232,7 +1232,7 @@ int qword_get(char **bpp, char *dest, int bufsize)
+ 	if (bp[0] == '\\' && bp[1] == 'x') {
+ 		/* HEX STRING */
+ 		bp += 2;
+-		while (len < bufsize) {
++		while (len < bufsize - 1) {
+ 			int h, l;
+ 
+ 			h = hex_to_bin(bp[0]);
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index 31b88dcb0f01..c5536b7d8ce4 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -1700,7 +1700,12 @@ restart_locked:
+ 			goto out_unlock;
+ 	}
+ 
+-	if (unlikely(unix_peer(other) != sk && unix_recvq_full(other))) {
++	/* other == sk && unix_peer(other) != sk if
++	 * - unix_peer(sk) == NULL, destination address bound to sk
++	 * - unix_peer(sk) == sk by time of get but disconnected before lock
++	 */
++	if (other != sk &&
++	    unlikely(unix_peer(other) != sk && unix_recvq_full(other))) {
+ 		if (timeo) {
+ 			timeo = unix_wait_for_peer(other, timeo);
+ 
+@@ -2131,6 +2136,7 @@ again:
+ 
+ 			if (signal_pending(current)) {
+ 				err = sock_intr_errno(timeo);
++				scm_destroy(siocb->scm);
+ 				goto out;
+ 			}
+ 
+diff --git a/net/unix/diag.c b/net/unix/diag.c
+index 86fa0f3b2caf..27dd3dcb7739 100644
+--- a/net/unix/diag.c
++++ b/net/unix/diag.c
+@@ -219,7 +219,7 @@ done:
+ 	return skb->len;
+ }
+ 
+-static struct sock *unix_lookup_by_ino(int ino)
++static struct sock *unix_lookup_by_ino(unsigned int ino)
+ {
+ 	int i;
+ 	struct sock *sk;
+diff --git a/scripts/recordmcount.c b/scripts/recordmcount.c
+index 9c22317778eb..ee625e3a56ba 100644
+--- a/scripts/recordmcount.c
++++ b/scripts/recordmcount.c
+@@ -189,6 +189,20 @@ static void *mmap_file(char const *fname)
+ 		addr = umalloc(sb.st_size);
+ 		uread(fd_map, addr, sb.st_size);
+ 	}
++	if (sb.st_nlink != 1) {
++		/* file is hard-linked, break the hard link */
++		close(fd_map);
++		if (unlink(fname) < 0) {
++			perror(fname);
++			fail_file();
++		}
++		fd_map = open(fname, O_RDWR | O_CREAT, sb.st_mode);
++		if (fd_map < 0) {
++			perror(fname);
++			fail_file();
++		}
++		uwrite(fd_map, addr, sb.st_size);
++	}
+ 	return addr;
+ }
+ 
+diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c
+index 8a39dda7a325..0ae1bc4f16fa 100644
+--- a/virt/kvm/async_pf.c
++++ b/virt/kvm/async_pf.c
+@@ -161,7 +161,7 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn,
+ 	 * do alloc nowait since if we are going to sleep anyway we
+ 	 * may as well sleep faulting in page
+ 	 */
+-	work = kmem_cache_zalloc(async_pf_cache, GFP_NOWAIT);
++	work = kmem_cache_zalloc(async_pf_cache, GFP_NOWAIT | __GFP_NOWARN);
+ 	if (!work)
+ 		return 0;
+ 


             reply	other threads:[~2016-03-09 13:50 UTC|newest]

Thread overview: 59+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-03-09 13:50 Mike Pagano [this message]
  -- strict thread matches above, loose matches on Subject: below --
2017-05-09 16:20 [gentoo-commits] proj/linux-patches:3.12 commit in: / Mike Pagano
2017-03-18 15:33 Mike Pagano
2017-03-10  0:38 Mike Pagano
2017-03-02 16:35 Mike Pagano
2017-03-02 16:35 Mike Pagano
2017-02-01 12:48 Alice Ferrazzi
2016-12-19  0:43 Mike Pagano
2016-12-18 20:59 Mike Pagano
2016-12-09  0:41 Mike Pagano
2016-11-29 17:45 Alice Ferrazzi
2016-11-25 23:24 Mike Pagano
2016-11-11  0:58 Mike Pagano
2016-10-21 11:08 Mike Pagano
2016-09-09 19:25 Mike Pagano
2016-07-22 23:30 Mike Pagano
2016-06-20 19:58 Mike Pagano
2016-05-24 11:58 Mike Pagano
2016-04-28 14:05 Mike Pagano
2016-04-28 12:28 Mike Pagano
2016-04-27 19:40 Mike Pagano
2016-04-13 23:51 Mike Pagano
2016-03-18 18:55 Mike Pagano
2016-02-26 20:15 Mike Pagano
2016-02-15 19:19 Mike Pagano
2016-01-31 23:57 Mike Pagano
2016-01-31 23:49 Mike Pagano
2016-01-31 23:48 Mike Pagano
2016-01-20 15:53 Mike Pagano
2016-01-09 19:58 Mike Pagano
2015-11-03 18:38 Mike Pagano
2015-10-22 23:08 Mike Pagano
2015-10-22 23:00 Mike Pagano
2015-09-28 14:09 Mike Pagano
2015-09-15 14:24 Mike Pagano
2015-06-19 16:54 Mike Pagano
2015-05-21 23:58 Mike Pagano
2015-05-05 18:23 Mike Pagano
2015-04-10 18:15 Mike Pagano
2015-03-28 22:10 Mike Pagano
2015-03-28 20:29 Mike Pagano
2015-02-20 23:57 Mike Pagano
2015-02-14 22:59 Mike Pagano
2015-02-09 18:56 Mike Pagano
2015-01-02 19:11 Mike Pagano
2014-12-19 23:48 Mike Pagano
2014-12-07 14:48 Mike Pagano
2014-11-06 18:03 Mike Pagano
2014-10-24 19:30 Mike Pagano
2014-10-10 19:56 Mike Pagano
2014-09-30 17:16 Mike Pagano
2014-09-20 19:11 Anthony G. Basile
2014-08-19 11:44 Mike Pagano
2014-08-01 23:59 ` Mike Pagano
2014-07-23 11:54 Mike Pagano
2014-08-19 11:44 ` Mike Pagano
2014-07-04 21:45 Vlastimil Babka
2014-06-25 17:21 Mike Pagano
2014-06-25 17:20 Mike Pagano

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1457531419.db757024a135f0fa6729f44fbb65df6be9985e64.mpagano@gentoo \
    --to=mpagano@gentoo.org \
    --cc=gentoo-commits@lists.gentoo.org \
    --cc=gentoo-dev@lists.gentoo.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox