From: "Mike Pagano" <mpagano@gentoo.org>
To: gentoo-commits@lists.gentoo.org
Subject: [gentoo-commits] proj/linux-patches:3.14 commit in: /
Date: Wed, 9 Jul 2014 23:09:15 +0000 (UTC) [thread overview]
Message-ID: <1404947350.6d6d7bd9432a24acf0f69e3aae34bc211267fc63.mpagano@gentoo> (raw)
commit: 6d6d7bd9432a24acf0f69e3aae34bc211267fc63
Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Jul 9 23:09:10 2014 +0000
Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Jul 9 23:09:10 2014 +0000
URL: http://git.overlays.gentoo.org/gitweb/?p=proj/linux-patches.git;a=commit;h=6d6d7bd9
Linux patch 3.14.12
---
0000_README | 4 +
1011_linux-3.14.12.patch | 3170 ++++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 3174 insertions(+)
diff --git a/0000_README b/0000_README
index c719e74..d87d066 100644
--- a/0000_README
+++ b/0000_README
@@ -86,6 +86,10 @@ Patch: 1010_linux-3.14.11.patch
From: http://www.kernel.org
Desc: Linux 3.14.11
+Patch: 1011_linux-3.14.12.patch
+From: http://www.kernel.org
+Desc: Linux 3.14.12
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1011_linux-3.14.12.patch b/1011_linux-3.14.12.patch
new file mode 100644
index 0000000..9a5c593
--- /dev/null
+++ b/1011_linux-3.14.12.patch
@@ -0,0 +1,3170 @@
+diff --git a/Documentation/sysctl/vm.txt b/Documentation/sysctl/vm.txt
+index d614a9b6a280..b602784b289f 100644
+--- a/Documentation/sysctl/vm.txt
++++ b/Documentation/sysctl/vm.txt
+@@ -681,7 +681,8 @@ The batch value of each per cpu pagelist is also updated as a result. It is
+ set to pcp->high/4. The upper limit of batch is (PAGE_SHIFT * 8)
+
+ The initial value is zero. Kernel does not use this value at boot time to set
+-the high water marks for each per cpu page list.
++the high water marks for each per cpu page list. If the user writes '0' to this
++sysctl, it will revert to this default behavior.
+
+ ==============================================================
+
+diff --git a/Makefile b/Makefile
+index f1bbec5ece95..13d8f323ae43 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 14
+-SUBLEVEL = 11
++SUBLEVEL = 12
+ EXTRAVERSION =
+ NAME = Remembering Coco
+
+diff --git a/arch/arm/mach-omap2/mux.c b/arch/arm/mach-omap2/mux.c
+index 48094b58c88f..b12a4f9fc9d0 100644
+--- a/arch/arm/mach-omap2/mux.c
++++ b/arch/arm/mach-omap2/mux.c
+@@ -183,8 +183,10 @@ static int __init _omap_mux_get_by_name(struct omap_mux_partition *partition,
+ m0_entry = mux->muxnames[0];
+
+ /* First check for full name in mode0.muxmode format */
+- if (mode0_len && strncmp(muxname, m0_entry, mode0_len))
+- continue;
++ if (mode0_len)
++ if (strncmp(muxname, m0_entry, mode0_len) ||
++ (strlen(m0_entry) != mode0_len))
++ continue;
+
+ /* Then check for muxmode only */
+ for (i = 0; i < OMAP_MUX_NR_MODES; i++) {
+diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
+index 39ac630d83de..a8e4bdbbb4b8 100644
+--- a/arch/arm64/kernel/entry.S
++++ b/arch/arm64/kernel/entry.S
+@@ -275,7 +275,6 @@ el1_sp_pc:
+ * Stack or PC alignment exception handling
+ */
+ mrs x0, far_el1
+- mov x1, x25
+ mov x2, sp
+ b do_sp_pc_abort
+ el1_undef:
+diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c
+index e4193e3adc7f..0d64089d28b5 100644
+--- a/arch/arm64/mm/flush.c
++++ b/arch/arm64/mm/flush.c
+@@ -79,7 +79,8 @@ void __sync_icache_dcache(pte_t pte, unsigned long addr)
+ return;
+
+ if (!test_and_set_bit(PG_dcache_clean, &page->flags)) {
+- __flush_dcache_area(page_address(page), PAGE_SIZE);
++ __flush_dcache_area(page_address(page),
++ PAGE_SIZE << compound_order(page));
+ __flush_icache_all();
+ } else if (icache_is_aivivt()) {
+ __flush_icache_all();
+diff --git a/arch/unicore32/mm/alignment.c b/arch/unicore32/mm/alignment.c
+index de7dc5fdd58b..24e836023e6c 100644
+--- a/arch/unicore32/mm/alignment.c
++++ b/arch/unicore32/mm/alignment.c
+@@ -21,6 +21,7 @@
+ #include <linux/sched.h>
+ #include <linux/uaccess.h>
+
++#include <asm/pgtable.h>
+ #include <asm/tlbflush.h>
+ #include <asm/unaligned.h>
+
+diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
+index fdf83afbb7d9..3092300a07cd 100644
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -99,7 +99,7 @@ static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level)
+ #define KVM_REFILL_PAGES 25
+ #define KVM_MAX_CPUID_ENTRIES 80
+ #define KVM_NR_FIXED_MTRR_REGION 88
+-#define KVM_NR_VAR_MTRR 8
++#define KVM_NR_VAR_MTRR 10
+
+ #define ASYNC_PF_PER_VCPU 64
+
+@@ -462,7 +462,7 @@ struct kvm_vcpu_arch {
+ bool nmi_injected; /* Trying to inject an NMI this entry */
+
+ struct mtrr_state_type mtrr_state;
+- u32 pat;
++ u64 pat;
+
+ int switch_db_regs;
+ unsigned long db[KVM_NR_DB_REGS];
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index ee0c3b554a38..8fbd1a772272 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -1866,7 +1866,7 @@ static int set_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data)
+ if (!(data & HV_X64_MSR_TSC_REFERENCE_ENABLE))
+ break;
+ gfn = data >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT;
+- if (kvm_write_guest(kvm, data,
++ if (kvm_write_guest(kvm, gfn << HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT,
+ &tsc_ref, sizeof(tsc_ref)))
+ return 1;
+ mark_page_dirty(kvm, gfn);
+diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
+index 4e491d9b5292..dd0dd2d4ceca 100644
+--- a/block/blk-cgroup.c
++++ b/block/blk-cgroup.c
+@@ -80,7 +80,7 @@ static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q,
+ blkg->q = q;
+ INIT_LIST_HEAD(&blkg->q_node);
+ blkg->blkcg = blkcg;
+- blkg->refcnt = 1;
++ atomic_set(&blkg->refcnt, 1);
+
+ /* root blkg uses @q->root_rl, init rl only for !root blkgs */
+ if (blkcg != &blkcg_root) {
+@@ -399,11 +399,8 @@ void __blkg_release_rcu(struct rcu_head *rcu_head)
+
+ /* release the blkcg and parent blkg refs this blkg has been holding */
+ css_put(&blkg->blkcg->css);
+- if (blkg->parent) {
+- spin_lock_irq(blkg->q->queue_lock);
++ if (blkg->parent)
+ blkg_put(blkg->parent);
+- spin_unlock_irq(blkg->q->queue_lock);
+- }
+
+ blkg_free(blkg);
+ }
+diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h
+index 86154eab9523..c135f5660702 100644
+--- a/block/blk-cgroup.h
++++ b/block/blk-cgroup.h
+@@ -18,6 +18,7 @@
+ #include <linux/seq_file.h>
+ #include <linux/radix-tree.h>
+ #include <linux/blkdev.h>
++#include <linux/atomic.h>
+
+ /* Max limits for throttle policy */
+ #define THROTL_IOPS_MAX UINT_MAX
+@@ -104,7 +105,7 @@ struct blkcg_gq {
+ struct request_list rl;
+
+ /* reference count */
+- int refcnt;
++ atomic_t refcnt;
+
+ /* is this blkg online? protected by both blkcg and q locks */
+ bool online;
+@@ -253,13 +254,12 @@ static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
+ * blkg_get - get a blkg reference
+ * @blkg: blkg to get
+ *
+- * The caller should be holding queue_lock and an existing reference.
++ * The caller should be holding an existing reference.
+ */
+ static inline void blkg_get(struct blkcg_gq *blkg)
+ {
+- lockdep_assert_held(blkg->q->queue_lock);
+- WARN_ON_ONCE(!blkg->refcnt);
+- blkg->refcnt++;
++ WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
++ atomic_inc(&blkg->refcnt);
+ }
+
+ void __blkg_release_rcu(struct rcu_head *rcu);
+@@ -267,14 +267,11 @@ void __blkg_release_rcu(struct rcu_head *rcu);
+ /**
+ * blkg_put - put a blkg reference
+ * @blkg: blkg to put
+- *
+- * The caller should be holding queue_lock.
+ */
+ static inline void blkg_put(struct blkcg_gq *blkg)
+ {
+- lockdep_assert_held(blkg->q->queue_lock);
+- WARN_ON_ONCE(blkg->refcnt <= 0);
+- if (!--blkg->refcnt)
++ WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
++ if (atomic_dec_and_test(&blkg->refcnt))
+ call_rcu(&blkg->rcu_head, __blkg_release_rcu);
+ }
+
+diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
+index 7f2d09fbb10b..7296c7f074bd 100644
+--- a/drivers/block/rbd.c
++++ b/drivers/block/rbd.c
+@@ -1366,6 +1366,14 @@ static bool obj_request_exists_test(struct rbd_obj_request *obj_request)
+ return test_bit(OBJ_REQ_EXISTS, &obj_request->flags) != 0;
+ }
+
++static bool obj_request_overlaps_parent(struct rbd_obj_request *obj_request)
++{
++ struct rbd_device *rbd_dev = obj_request->img_request->rbd_dev;
++
++ return obj_request->img_offset <
++ round_up(rbd_dev->parent_overlap, rbd_obj_bytes(&rbd_dev->header));
++}
++
+ static void rbd_obj_request_get(struct rbd_obj_request *obj_request)
+ {
+ dout("%s: obj %p (was %d)\n", __func__, obj_request,
+@@ -1382,6 +1390,13 @@ static void rbd_obj_request_put(struct rbd_obj_request *obj_request)
+ kref_put(&obj_request->kref, rbd_obj_request_destroy);
+ }
+
++static void rbd_img_request_get(struct rbd_img_request *img_request)
++{
++ dout("%s: img %p (was %d)\n", __func__, img_request,
++ atomic_read(&img_request->kref.refcount));
++ kref_get(&img_request->kref);
++}
++
+ static bool img_request_child_test(struct rbd_img_request *img_request);
+ static void rbd_parent_request_destroy(struct kref *kref);
+ static void rbd_img_request_destroy(struct kref *kref);
+@@ -2128,6 +2143,7 @@ static void rbd_img_obj_callback(struct rbd_obj_request *obj_request)
+ img_request->next_completion = which;
+ out:
+ spin_unlock_irq(&img_request->completion_lock);
++ rbd_img_request_put(img_request);
+
+ if (!more)
+ rbd_img_request_complete(img_request);
+@@ -2225,6 +2241,7 @@ static int rbd_img_request_fill(struct rbd_img_request *img_request,
+ goto out_partial;
+ obj_request->osd_req = osd_req;
+ obj_request->callback = rbd_img_obj_callback;
++ rbd_img_request_get(img_request);
+
+ osd_req_op_extent_init(osd_req, 0, opcode, offset, length,
+ 0, 0);
+@@ -2647,7 +2664,7 @@ static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request)
+ */
+ if (!img_request_write_test(img_request) ||
+ !img_request_layered_test(img_request) ||
+- rbd_dev->parent_overlap <= obj_request->img_offset ||
++ !obj_request_overlaps_parent(obj_request) ||
+ ((known = obj_request_known_test(obj_request)) &&
+ obj_request_exists_test(obj_request))) {
+
+diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
+index 00f878a04d3f..0996a3a39855 100644
+--- a/drivers/clk/qcom/clk-rcg2.c
++++ b/drivers/clk/qcom/clk-rcg2.c
+@@ -55,7 +55,7 @@ static int clk_rcg2_is_enabled(struct clk_hw *hw)
+ if (ret)
+ return ret;
+
+- return (cmd & CMD_ROOT_OFF) != 0;
++ return (cmd & CMD_ROOT_OFF) == 0;
+ }
+
+ static u8 clk_rcg2_get_parent(struct clk_hw *hw)
+diff --git a/drivers/clk/qcom/mmcc-msm8974.c b/drivers/clk/qcom/mmcc-msm8974.c
+index c95774514b81..6a0ae77d1939 100644
+--- a/drivers/clk/qcom/mmcc-msm8974.c
++++ b/drivers/clk/qcom/mmcc-msm8974.c
+@@ -169,6 +169,7 @@ static struct clk_pll mmpll0 = {
+ .config_reg = 0x0014,
+ .mode_reg = 0x0000,
+ .status_reg = 0x001c,
++ .status_bit = 17,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "mmpll0",
+ .parent_names = (const char *[]){ "xo" },
+@@ -192,9 +193,10 @@ static struct clk_pll mmpll1 = {
+ .l_reg = 0x0044,
+ .m_reg = 0x0048,
+ .n_reg = 0x004c,
+- .config_reg = 0x0054,
++ .config_reg = 0x0050,
+ .mode_reg = 0x0040,
+ .status_reg = 0x005c,
++ .status_bit = 17,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "mmpll1",
+ .parent_names = (const char *[]){ "xo" },
+@@ -218,7 +220,7 @@ static struct clk_pll mmpll2 = {
+ .l_reg = 0x4104,
+ .m_reg = 0x4108,
+ .n_reg = 0x410c,
+- .config_reg = 0x4114,
++ .config_reg = 0x4110,
+ .mode_reg = 0x4100,
+ .status_reg = 0x411c,
+ .clkr.hw.init = &(struct clk_init_data){
+@@ -233,9 +235,10 @@ static struct clk_pll mmpll3 = {
+ .l_reg = 0x0084,
+ .m_reg = 0x0088,
+ .n_reg = 0x008c,
+- .config_reg = 0x0094,
++ .config_reg = 0x0090,
+ .mode_reg = 0x0080,
+ .status_reg = 0x009c,
++ .status_bit = 17,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "mmpll3",
+ .parent_names = (const char *[]){ "xo" },
+@@ -2318,7 +2321,7 @@ static const struct pll_config mmpll1_config = {
+ .vco_val = 0x0,
+ .vco_mask = 0x3 << 20,
+ .pre_div_val = 0x0,
+- .pre_div_mask = 0x3 << 12,
++ .pre_div_mask = 0x7 << 12,
+ .post_div_val = 0x0,
+ .post_div_mask = 0x3 << 8,
+ .mn_ena_mask = BIT(24),
+@@ -2332,7 +2335,7 @@ static struct pll_config mmpll3_config = {
+ .vco_val = 0x0,
+ .vco_mask = 0x3 << 20,
+ .pre_div_val = 0x0,
+- .pre_div_mask = 0x3 << 12,
++ .pre_div_mask = 0x7 << 12,
+ .post_div_val = 0x0,
+ .post_div_mask = 0x3 << 8,
+ .mn_ena_mask = BIT(24),
+diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
+index de9ef4a1986d..6d98c37c87ad 100644
+--- a/drivers/cpufreq/intel_pstate.c
++++ b/drivers/cpufreq/intel_pstate.c
+@@ -200,10 +200,7 @@ static signed int pid_calc(struct _pid *pid, int32_t busy)
+ pid->last_err = fp_error;
+
+ result = pterm + mul_fp(pid->integral, pid->i_gain) + dterm;
+- if (result >= 0)
+- result = result + (1 << (FRAC_BITS-1));
+- else
+- result = result - (1 << (FRAC_BITS-1));
++ result = result + (1 << (FRAC_BITS-1));
+ return (signed int)fp_toint(result);
+ }
+
+diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
+index 19a4f0535b63..fd98bec78816 100644
+--- a/drivers/gpu/drm/i915/intel_panel.c
++++ b/drivers/gpu/drm/i915/intel_panel.c
+@@ -736,9 +736,6 @@ static void i965_enable_backlight(struct intel_connector *connector)
+ ctl = freq << 16;
+ I915_WRITE(BLC_PWM_CTL, ctl);
+
+- /* XXX: combine this into above write? */
+- intel_panel_actually_set_backlight(connector, panel->backlight.level);
+-
+ ctl2 = BLM_PIPE(pipe);
+ if (panel->backlight.combination_mode)
+ ctl2 |= BLM_COMBINATION_MODE;
+@@ -747,6 +744,8 @@ static void i965_enable_backlight(struct intel_connector *connector)
+ I915_WRITE(BLC_PWM_CTL2, ctl2);
+ POSTING_READ(BLC_PWM_CTL2);
+ I915_WRITE(BLC_PWM_CTL2, ctl2 | BLM_PWM_ENABLE);
++
++ intel_panel_actually_set_backlight(connector, panel->backlight.level);
+ }
+
+ static void vlv_enable_backlight(struct intel_connector *connector)
+diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
+index bd1b00344dac..8e7245151b96 100644
+--- a/drivers/gpu/drm/i915/intel_pm.c
++++ b/drivers/gpu/drm/i915/intel_pm.c
+@@ -5138,10 +5138,25 @@ bool intel_display_power_enabled_sw(struct drm_device *dev,
+ {
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_power_domains *power_domains;
++ struct i915_power_well *power_well;
++ bool is_enabled;
++ int i;
++
++ if (dev_priv->pm.suspended)
++ return false;
+
+ power_domains = &dev_priv->power_domains;
++ is_enabled = true;
++ for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
++ if (power_well->always_on)
++ continue;
+
+- return power_domains->domain_use_count[domain];
++ if (!power_well->count) {
++ is_enabled = false;
++ break;
++ }
++ }
++ return is_enabled;
+ }
+
+ bool intel_display_power_enabled(struct drm_device *dev,
+diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
+index cbc2feeda59d..1890264cf115 100644
+--- a/drivers/gpu/drm/i915/intel_sdvo.c
++++ b/drivers/gpu/drm/i915/intel_sdvo.c
+@@ -1383,7 +1383,9 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
+ >> SDVO_PORT_MULTIPLY_SHIFT) + 1;
+ }
+
+- dotclock = pipe_config->port_clock / pipe_config->pixel_multiplier;
++ dotclock = pipe_config->port_clock;
++ if (pipe_config->pixel_multiplier)
++ dotclock /= pipe_config->pixel_multiplier;
+
+ if (HAS_PCH_SPLIT(dev))
+ ironlake_check_encoder_dotclock(pipe_config, dotclock);
+diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c
+index 7cf8b1348632..6fdf6e385bf7 100644
+--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c
++++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c
+@@ -51,6 +51,14 @@ nv04_disp_scanoutpos(struct nouveau_object *object, u32 mthd,
+ args->htotal = nv_rd32(priv, 0x680824 + (head * 0x2000)) & 0xffff;
+ args->hblanke = args->htotal - 1;
+
++ /*
++ * If output is vga instead of digital then vtotal/htotal is invalid
++ * so we have to give up and trigger the timestamping fallback in the
++ * drm core.
++ */
++ if (!args->vtotal || !args->htotal)
++ return -ENOTSUPP;
++
+ args->time[0] = ktime_to_ns(ktime_get());
+ line = nv_rd32(priv, 0x600868 + (head * 0x2000));
+ args->time[1] = ktime_to_ns(ktime_get());
+diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv108.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv108.c
+index a86bd3352bf8..e69bfde37aef 100644
+--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv108.c
++++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv108.c
+@@ -1318,10 +1318,12 @@ nv108_grctx_generate_mods(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
+ mmio_list(0x408010, 0x80000000, 0, 0);
+ mmio_list(0x419004, 0x00000000, 8, 1);
+ mmio_list(0x419008, 0x00000000, 0, 0);
++ mmio_list(0x4064cc, 0x80000000, 0, 0);
+ mmio_list(0x408004, 0x00000000, 8, 0);
+ mmio_list(0x408008, 0x80000030, 0, 0);
+ mmio_list(0x418808, 0x00000000, 8, 0);
+ mmio_list(0x41880c, 0x80000030, 0, 0);
++ mmio_list(0x4064c8, 0x00c20200, 0, 0);
+ mmio_list(0x418810, 0x80000000, 12, 2);
+ mmio_list(0x419848, 0x10000000, 12, 2);
+
+diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c
+index e8822a934c48..90d8bf8ce0dc 100644
+--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c
++++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c
+@@ -26,6 +26,7 @@
+
+ const struct nouveau_mc_intr
+ nv50_mc_intr[] = {
++ { 0x04000000, NVDEV_ENGINE_DISP }, /* DISP before FIFO, so pageflip-timestamping works! */
+ { 0x00000001, NVDEV_ENGINE_MPEG },
+ { 0x00000100, NVDEV_ENGINE_FIFO },
+ { 0x00001000, NVDEV_ENGINE_GR },
+@@ -34,7 +35,6 @@ nv50_mc_intr[] = {
+ { 0x00020000, NVDEV_ENGINE_VP }, /* NV84- */
+ { 0x00100000, NVDEV_SUBDEV_TIMER },
+ { 0x00200000, NVDEV_SUBDEV_GPIO },
+- { 0x04000000, NVDEV_ENGINE_DISP },
+ { 0x10000000, NVDEV_SUBDEV_BUS },
+ { 0x80000000, NVDEV_ENGINE_SW },
+ { 0x0002d101, NVDEV_SUBDEV_FB },
+diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c
+index f8a6f18e2d34..95b3d35388a8 100644
+--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c
++++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c
+@@ -26,6 +26,7 @@
+
+ static const struct nouveau_mc_intr
+ nv98_mc_intr[] = {
++ { 0x04000000, NVDEV_ENGINE_DISP }, /* DISP first, so pageflip timestamps work */
+ { 0x00000001, NVDEV_ENGINE_PPP },
+ { 0x00000100, NVDEV_ENGINE_FIFO },
+ { 0x00001000, NVDEV_ENGINE_GR },
+@@ -37,7 +38,6 @@ nv98_mc_intr[] = {
+ { 0x00100000, NVDEV_SUBDEV_TIMER },
+ { 0x00200000, NVDEV_SUBDEV_GPIO },
+ { 0x00400000, NVDEV_ENGINE_COPY0 }, /* NVA3- */
+- { 0x04000000, NVDEV_ENGINE_DISP },
+ { 0x10000000, NVDEV_SUBDEV_BUS },
+ { 0x80000000, NVDEV_ENGINE_SW },
+ { 0x0042d101, NVDEV_SUBDEV_FB },
+diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
+index 34472d317097..ac7f99a15fa7 100644
+--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
++++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
+@@ -26,6 +26,7 @@
+
+ const struct nouveau_mc_intr
+ nvc0_mc_intr[] = {
++ { 0x04000000, NVDEV_ENGINE_DISP }, /* DISP first, so pageflip timestamps work. */
+ { 0x00000001, NVDEV_ENGINE_PPP },
+ { 0x00000020, NVDEV_ENGINE_COPY0 },
+ { 0x00000040, NVDEV_ENGINE_COPY1 },
+@@ -40,7 +41,6 @@ nvc0_mc_intr[] = {
+ { 0x00200000, NVDEV_SUBDEV_GPIO },
+ { 0x01000000, NVDEV_SUBDEV_PWR },
+ { 0x02000000, NVDEV_SUBDEV_LTCG },
+- { 0x04000000, NVDEV_ENGINE_DISP },
+ { 0x08000000, NVDEV_SUBDEV_FB },
+ { 0x10000000, NVDEV_SUBDEV_BUS },
+ { 0x40000000, NVDEV_SUBDEV_IBUS },
+diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
+index 5524a3705224..6601690ab29c 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_display.c
++++ b/drivers/gpu/drm/nouveau/nouveau_display.c
+@@ -734,6 +734,9 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ fb->bits_per_pixel, fb->pitches[0], crtc->x, crtc->y,
+ new_bo->bo.offset };
+
++ /* Keep vblanks on during flip, for the target crtc of this flip */
++ drm_vblank_get(dev, nouveau_crtc(crtc)->index);
++
+ /* Emit a page flip */
+ if (nv_device(drm->device)->card_type >= NV_50) {
+ ret = nv50_display_flip_next(crtc, fb, chan, swap_interval);
+@@ -777,6 +780,7 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ return 0;
+
+ fail_unreserve:
++ drm_vblank_put(dev, nouveau_crtc(crtc)->index);
+ ttm_bo_unreserve(&old_bo->bo);
+ fail_unpin:
+ mutex_unlock(&chan->cli->mutex);
+@@ -796,6 +800,7 @@ nouveau_finish_page_flip(struct nouveau_channel *chan,
+ struct drm_device *dev = drm->dev;
+ struct nouveau_page_flip_state *s;
+ unsigned long flags;
++ int crtcid = -1;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+
+@@ -806,8 +811,16 @@ nouveau_finish_page_flip(struct nouveau_channel *chan,
+ }
+
+ s = list_first_entry(&fctx->flip, struct nouveau_page_flip_state, head);
+- if (s->event)
+- drm_send_vblank_event(dev, s->crtc, s->event);
++ if (s->event) {
++ /* Vblank timestamps/counts are only correct on >= NV-50 */
++ if (nv_device(drm->device)->card_type >= NV_50)
++ crtcid = s->crtc;
++
++ drm_send_vblank_event(dev, crtcid, s->event);
++ }
++
++ /* Give up ownership of vblank for page-flipped crtc */
++ drm_vblank_put(dev, s->crtc);
+
+ list_del(&s->head);
+ if (ps)
+diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
+index 41a9a9cb271a..0cca5f24196a 100644
+--- a/drivers/gpu/drm/radeon/atombios_crtc.c
++++ b/drivers/gpu/drm/radeon/atombios_crtc.c
+@@ -862,14 +862,16 @@ static void atombios_crtc_program_pll(struct drm_crtc *crtc,
+ args.v5.ucMiscInfo = 0; /* HDMI depth, etc. */
+ if (ss_enabled && (ss->type & ATOM_EXTERNAL_SS_MASK))
+ args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_REF_DIV_SRC;
+- switch (bpc) {
+- case 8:
+- default:
+- args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_24BPP;
+- break;
+- case 10:
+- args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_30BPP;
+- break;
++ if (encoder_mode == ATOM_ENCODER_MODE_HDMI) {
++ switch (bpc) {
++ case 8:
++ default:
++ args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_24BPP;
++ break;
++ case 10:
++ args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_30BPP;
++ break;
++ }
+ }
+ args.v5.ucTransmitterID = encoder_id;
+ args.v5.ucEncoderMode = encoder_mode;
+@@ -884,20 +886,22 @@ static void atombios_crtc_program_pll(struct drm_crtc *crtc,
+ args.v6.ucMiscInfo = 0; /* HDMI depth, etc. */
+ if (ss_enabled && (ss->type & ATOM_EXTERNAL_SS_MASK))
+ args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_REF_DIV_SRC;
+- switch (bpc) {
+- case 8:
+- default:
+- args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_24BPP;
+- break;
+- case 10:
+- args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_30BPP;
+- break;
+- case 12:
+- args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_36BPP;
+- break;
+- case 16:
+- args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_48BPP;
+- break;
++ if (encoder_mode == ATOM_ENCODER_MODE_HDMI) {
++ switch (bpc) {
++ case 8:
++ default:
++ args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_24BPP;
++ break;
++ case 10:
++ args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_30BPP;
++ break;
++ case 12:
++ args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_36BPP;
++ break;
++ case 16:
++ args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_48BPP;
++ break;
++ }
+ }
+ args.v6.ucTransmitterID = encoder_id;
+ args.v6.ucEncoderMode = encoder_mode;
+diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
+index 4ad7643fce5f..5727dbdeda7f 100644
+--- a/drivers/gpu/drm/radeon/atombios_dp.c
++++ b/drivers/gpu/drm/radeon/atombios_dp.c
+@@ -386,6 +386,19 @@ static int dp_get_max_dp_pix_clock(int link_rate,
+
+ /***** radeon specific DP functions *****/
+
++static int radeon_dp_get_max_link_rate(struct drm_connector *connector,
++ u8 dpcd[DP_DPCD_SIZE])
++{
++ int max_link_rate;
++
++ if (radeon_connector_is_dp12_capable(connector))
++ max_link_rate = min(drm_dp_max_link_rate(dpcd), 540000);
++ else
++ max_link_rate = min(drm_dp_max_link_rate(dpcd), 270000);
++
++ return max_link_rate;
++}
++
+ /* First get the min lane# when low rate is used according to pixel clock
+ * (prefer low rate), second check max lane# supported by DP panel,
+ * if the max lane# < low rate lane# then use max lane# instead.
+@@ -395,7 +408,7 @@ static int radeon_dp_get_dp_lane_number(struct drm_connector *connector,
+ int pix_clock)
+ {
+ int bpp = convert_bpc_to_bpp(radeon_get_monitor_bpc(connector));
+- int max_link_rate = drm_dp_max_link_rate(dpcd);
++ int max_link_rate = radeon_dp_get_max_link_rate(connector, dpcd);
+ int max_lane_num = drm_dp_max_lane_count(dpcd);
+ int lane_num;
+ int max_dp_pix_clock;
+@@ -433,7 +446,7 @@ static int radeon_dp_get_dp_link_clock(struct drm_connector *connector,
+ return 540000;
+ }
+
+- return drm_dp_max_link_rate(dpcd);
++ return radeon_dp_get_max_link_rate(connector, dpcd);
+ }
+
+ static u8 radeon_dp_encoder_service(struct radeon_device *rdev,
+diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
+index 607dc14d195e..ccca8b224d18 100644
+--- a/drivers/gpu/drm/radeon/atombios_encoders.c
++++ b/drivers/gpu/drm/radeon/atombios_encoders.c
+@@ -1898,8 +1898,11 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder)
+ args.v2.ucEncodeMode = ATOM_ENCODER_MODE_CRT;
+ else
+ args.v2.ucEncodeMode = atombios_get_encoder_mode(encoder);
+- } else
++ } else if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
++ args.v2.ucEncodeMode = ATOM_ENCODER_MODE_LVDS;
++ } else {
+ args.v2.ucEncodeMode = atombios_get_encoder_mode(encoder);
++ }
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+diff --git a/drivers/gpu/drm/radeon/cikd.h b/drivers/gpu/drm/radeon/cikd.h
+index d1b2c71b192a..a0a9f974cdd7 100644
+--- a/drivers/gpu/drm/radeon/cikd.h
++++ b/drivers/gpu/drm/radeon/cikd.h
+@@ -1745,12 +1745,12 @@
+ #define EOP_TC_WB_ACTION_EN (1 << 15) /* L2 */
+ #define EOP_TCL1_ACTION_EN (1 << 16)
+ #define EOP_TC_ACTION_EN (1 << 17) /* L2 */
++#define EOP_TCL2_VOLATILE (1 << 24)
+ #define EOP_CACHE_POLICY(x) ((x) << 25)
+ /* 0 - LRU
+ * 1 - Stream
+ * 2 - Bypass
+ */
+-#define EOP_TCL2_VOLATILE (1 << 27)
+ #define DATA_SEL(x) ((x) << 29)
+ /* 0 - discard
+ * 1 - send low 32bit data
+diff --git a/drivers/gpu/drm/radeon/cypress_dpm.c b/drivers/gpu/drm/radeon/cypress_dpm.c
+index cf783fc0ef21..cf777193c85f 100644
+--- a/drivers/gpu/drm/radeon/cypress_dpm.c
++++ b/drivers/gpu/drm/radeon/cypress_dpm.c
+@@ -1551,7 +1551,7 @@ int cypress_populate_smc_voltage_tables(struct radeon_device *rdev,
+
+ table->voltageMaskTable.highMask[RV770_SMC_VOLTAGEMASK_VDDCI] = 0;
+ table->voltageMaskTable.lowMask[RV770_SMC_VOLTAGEMASK_VDDCI] =
+- cpu_to_be32(eg_pi->vddc_voltage_table.mask_low);
++ cpu_to_be32(eg_pi->vddci_voltage_table.mask_low);
+ }
+
+ return 0;
+diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c
+index ca814276b075..03f08a3cf864 100644
+--- a/drivers/gpu/drm/radeon/ni_dpm.c
++++ b/drivers/gpu/drm/radeon/ni_dpm.c
+@@ -1315,7 +1315,7 @@ static void ni_populate_smc_voltage_tables(struct radeon_device *rdev,
+
+ table->voltageMaskTable.highMask[NISLANDS_SMC_VOLTAGEMASK_VDDCI] = 0;
+ table->voltageMaskTable.lowMask[NISLANDS_SMC_VOLTAGEMASK_VDDCI] =
+- cpu_to_be32(eg_pi->vddc_voltage_table.mask_low);
++ cpu_to_be32(eg_pi->vddci_voltage_table.mask_low);
+ }
+ }
+
+diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
+index 82d4f865546e..4d36b9e86275 100644
+--- a/drivers/gpu/drm/radeon/radeon_connectors.c
++++ b/drivers/gpu/drm/radeon/radeon_connectors.c
+@@ -1402,7 +1402,7 @@ bool radeon_connector_is_dp12_capable(struct drm_connector *connector)
+ struct radeon_device *rdev = dev->dev_private;
+
+ if (ASIC_IS_DCE5(rdev) &&
+- (rdev->clock.dp_extclk >= 53900) &&
++ (rdev->clock.default_dispclk >= 53900) &&
+ radeon_connector_encoder_is_hbr2(connector)) {
+ return true;
+ }
+diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
+index 7993aec86acc..7f2d6c0d11c1 100644
+--- a/drivers/gpu/drm/radeon/radeon_cs.c
++++ b/drivers/gpu/drm/radeon/radeon_cs.c
+@@ -97,6 +97,12 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
+ uint32_t domain = r->write_domain ?
+ r->write_domain : r->read_domains;
+
++ if (domain & RADEON_GEM_DOMAIN_CPU) {
++ DRM_ERROR("RADEON_GEM_DOMAIN_CPU is not valid "
++ "for command submission\n");
++ return -EINVAL;
++ }
++
+ p->relocs[i].lobj.domain = domain;
+ if (domain == RADEON_GEM_DOMAIN_VRAM)
+ domain |= RADEON_GEM_DOMAIN_GTT;
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+index 021b5227e783..1b0f34bd3a03 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+@@ -179,7 +179,6 @@ static int vmw_fb_set_par(struct fb_info *info)
+ vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, info->var.yoffset);
+ vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, info->var.xres);
+ vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, info->var.yres);
+- vmw_write(vmw_priv, SVGA_REG_BYTES_PER_LINE, info->fix.line_length);
+ vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
+ }
+
+diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
+index ec0ae2d1686a..6866448083b2 100644
+--- a/drivers/gpu/vga/vga_switcheroo.c
++++ b/drivers/gpu/vga/vga_switcheroo.c
+@@ -623,7 +623,8 @@ static int vga_switcheroo_runtime_suspend(struct device *dev)
+ ret = dev->bus->pm->runtime_suspend(dev);
+ if (ret)
+ return ret;
+-
++ if (vgasr_priv.handler->switchto)
++ vgasr_priv.handler->switchto(VGA_SWITCHEROO_IGD);
+ vga_switcheroo_power_switch(pdev, VGA_SWITCHEROO_OFF);
+ return 0;
+ }
+diff --git a/drivers/hwmon/ina2xx.c b/drivers/hwmon/ina2xx.c
+index 93d26e8af3e2..bfd3f3eeabcd 100644
+--- a/drivers/hwmon/ina2xx.c
++++ b/drivers/hwmon/ina2xx.c
+@@ -148,7 +148,8 @@ static int ina2xx_get_value(struct ina2xx_data *data, u8 reg)
+
+ switch (reg) {
+ case INA2XX_SHUNT_VOLTAGE:
+- val = DIV_ROUND_CLOSEST(data->regs[reg],
++ /* signed register */
++ val = DIV_ROUND_CLOSEST((s16)data->regs[reg],
+ data->config->shunt_div);
+ break;
+ case INA2XX_BUS_VOLTAGE:
+@@ -160,8 +161,8 @@ static int ina2xx_get_value(struct ina2xx_data *data, u8 reg)
+ val = data->regs[reg] * data->config->power_lsb;
+ break;
+ case INA2XX_CURRENT:
+- /* LSB=1mA (selected). Is in mA */
+- val = data->regs[reg];
++ /* signed register, LSB=1mA (selected), in mA */
++ val = (s16)data->regs[reg];
+ break;
+ default:
+ /* programmer goofed */
+diff --git a/drivers/iio/inkern.c b/drivers/iio/inkern.c
+index 0cf5f8e06cfc..1e8e94d4db7d 100644
+--- a/drivers/iio/inkern.c
++++ b/drivers/iio/inkern.c
+@@ -183,7 +183,7 @@ static struct iio_channel *of_iio_channel_get_by_name(struct device_node *np,
+ else if (name && index >= 0) {
+ pr_err("ERROR: could not get IIO channel %s:%s(%i)\n",
+ np->full_name, name ? name : "", index);
+- return chan;
++ return NULL;
+ }
+
+ /*
+@@ -193,8 +193,9 @@ static struct iio_channel *of_iio_channel_get_by_name(struct device_node *np,
+ */
+ np = np->parent;
+ if (np && !of_get_property(np, "io-channel-ranges", NULL))
+- break;
++ return NULL;
+ }
++
+ return chan;
+ }
+
+@@ -317,6 +318,7 @@ struct iio_channel *iio_channel_get(struct device *dev,
+ if (channel != NULL)
+ return channel;
+ }
++
+ return iio_channel_get_sys(name, channel_name);
+ }
+ EXPORT_SYMBOL_GPL(iio_channel_get);
+diff --git a/drivers/irqchip/spear-shirq.c b/drivers/irqchip/spear-shirq.c
+index 8527743b5cef..391b9cea73ed 100644
+--- a/drivers/irqchip/spear-shirq.c
++++ b/drivers/irqchip/spear-shirq.c
+@@ -125,7 +125,7 @@ static struct spear_shirq spear320_shirq_ras2 = {
+ };
+
+ static struct spear_shirq spear320_shirq_ras3 = {
+- .irq_nr = 3,
++ .irq_nr = 7,
+ .irq_bit_off = 0,
+ .invalid_irq = 1,
+ .regs = {
+diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
+index 94d2ac1b493e..359af3a519b5 100644
+--- a/drivers/md/dm-thin.c
++++ b/drivers/md/dm-thin.c
+@@ -2925,7 +2925,8 @@ static void set_discard_limits(struct pool_c *pt, struct queue_limits *limits)
+ */
+ if (pt->adjusted_pf.discard_passdown) {
+ data_limits = &bdev_get_queue(pt->data_dev->bdev)->limits;
+- limits->discard_granularity = data_limits->discard_granularity;
++ limits->discard_granularity = max(data_limits->discard_granularity,
++ pool->sectors_per_block << SECTOR_SHIFT);
+ } else
+ limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT;
+ }
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index 8b013f87c9d8..73aedcb639c0 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -7505,6 +7505,19 @@ void md_do_sync(struct md_thread *thread)
+ rdev->recovery_offset < j)
+ j = rdev->recovery_offset;
+ rcu_read_unlock();
++
++ /* If there is a bitmap, we need to make sure all
++ * writes that started before we added a spare
++ * complete before we start doing a recovery.
++ * Otherwise the write might complete and (via
++ * bitmap_endwrite) set a bit in the bitmap after the
++ * recovery has checked that bit and skipped that
++ * region.
++ */
++ if (mddev->bitmap) {
++ mddev->pers->quiesce(mddev, 1);
++ mddev->pers->quiesce(mddev, 0);
++ }
+ }
+
+ printk(KERN_INFO "md: %s of RAID array %s\n", desc, mdname(mddev));
+diff --git a/drivers/mmc/host/rtsx_pci_sdmmc.c b/drivers/mmc/host/rtsx_pci_sdmmc.c
+index c46feda07d56..7e1866175e7b 100644
+--- a/drivers/mmc/host/rtsx_pci_sdmmc.c
++++ b/drivers/mmc/host/rtsx_pci_sdmmc.c
+@@ -246,6 +246,9 @@ static void sd_send_cmd_get_rsp(struct realtek_pci_sdmmc *host,
+ case MMC_RSP_R1:
+ rsp_type = SD_RSP_TYPE_R1;
+ break;
++ case MMC_RSP_R1 & ~MMC_RSP_CRC:
++ rsp_type = SD_RSP_TYPE_R1 | SD_NO_CHECK_CRC7;
++ break;
+ case MMC_RSP_R1B:
+ rsp_type = SD_RSP_TYPE_R1b;
+ break;
+diff --git a/drivers/mtd/nand/fsl_elbc_nand.c b/drivers/mtd/nand/fsl_elbc_nand.c
+index bcf60800c3ce..c09ab95a2219 100644
+--- a/drivers/mtd/nand/fsl_elbc_nand.c
++++ b/drivers/mtd/nand/fsl_elbc_nand.c
+@@ -724,6 +724,19 @@ static int fsl_elbc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
+ return 0;
+ }
+
++/* ECC will be calculated automatically, and errors will be detected in
++ * waitfunc.
++ */
++static int fsl_elbc_write_subpage(struct mtd_info *mtd, struct nand_chip *chip,
++ uint32_t offset, uint32_t data_len,
++ const uint8_t *buf, int oob_required)
++{
++ fsl_elbc_write_buf(mtd, buf, mtd->writesize);
++ fsl_elbc_write_buf(mtd, chip->oob_poi, mtd->oobsize);
++
++ return 0;
++}
++
+ static int fsl_elbc_chip_init(struct fsl_elbc_mtd *priv)
+ {
+ struct fsl_lbc_ctrl *ctrl = priv->ctrl;
+@@ -762,6 +775,7 @@ static int fsl_elbc_chip_init(struct fsl_elbc_mtd *priv)
+
+ chip->ecc.read_page = fsl_elbc_read_page;
+ chip->ecc.write_page = fsl_elbc_write_page;
++ chip->ecc.write_subpage = fsl_elbc_write_subpage;
+
+ /* If CS Base Register selects full hardware ECC then use it */
+ if ((in_be32(&lbc->bank[priv->bank].br) & BR_DECC) ==
+diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
+index bf642ceef681..6f55d92dc233 100644
+--- a/drivers/mtd/nand/omap2.c
++++ b/drivers/mtd/nand/omap2.c
+@@ -1451,7 +1451,7 @@ static int omap_elm_correct_data(struct mtd_info *mtd, u_char *data,
+
+ /* Check if any error reported */
+ if (!is_error_reported)
+- return 0;
++ return stat;
+
+ /* Decode BCH error using ELM module */
+ elm_decode_bch_error_page(info->elm_dev, ecc_vec, err_vec);
+diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
+index 2a7a0b27ac38..51e15fd53108 100644
+--- a/drivers/mtd/nand/pxa3xx_nand.c
++++ b/drivers/mtd/nand/pxa3xx_nand.c
+@@ -128,10 +128,10 @@
+
+ /* macros for registers read/write */
+ #define nand_writel(info, off, val) \
+- __raw_writel((val), (info)->mmio_base + (off))
++ writel_relaxed((val), (info)->mmio_base + (off))
+
+ #define nand_readl(info, off) \
+- __raw_readl((info)->mmio_base + (off))
++ readl_relaxed((info)->mmio_base + (off))
+
+ /* error code and state */
+ enum {
+diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c
+index 511f6eecd58b..62db510b70ea 100644
+--- a/drivers/net/ethernet/allwinner/sun4i-emac.c
++++ b/drivers/net/ethernet/allwinner/sun4i-emac.c
+@@ -725,6 +725,7 @@ static int emac_open(struct net_device *dev)
+
+ ret = emac_mdio_probe(dev);
+ if (ret < 0) {
++ free_irq(dev->irq, dev);
+ netdev_err(dev, "cannot probe MDIO bus\n");
+ return ret;
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
+index 7e2995ecea6f..84e41920b5ff 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
++++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
+@@ -398,7 +398,7 @@ int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u32 gen_or_port,
+
+ MLX4_GET(func_cap->flags1, outbox, QUERY_FUNC_CAP_FLAGS1_OFFSET);
+ if (dev->caps.port_type[gen_or_port] == MLX4_PORT_TYPE_ETH) {
+- if (func_cap->flags1 & QUERY_FUNC_CAP_FLAGS1_OFFSET) {
++ if (func_cap->flags1 & QUERY_FUNC_CAP_FLAGS1_FORCE_VLAN) {
+ mlx4_err(dev, "VLAN is enforced on this port\n");
+ err = -EPROTONOSUPPORT;
+ goto out;
+diff --git a/drivers/net/wireless/b43/xmit.c b/drivers/net/wireless/b43/xmit.c
+index 50e5ddb12fb3..628ff628e04a 100644
+--- a/drivers/net/wireless/b43/xmit.c
++++ b/drivers/net/wireless/b43/xmit.c
+@@ -810,9 +810,13 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr)
+ break;
+ case B43_PHYTYPE_G:
+ status.band = IEEE80211_BAND_2GHZ;
+- /* chanid is the radio channel cookie value as used
+- * to tune the radio. */
+- status.freq = chanid + 2400;
++ /* Somewhere between 478.104 and 508.1084 firmware for G-PHY
++ * has been modified to be compatible with N-PHY and others.
++ */
++ if (dev->fw.rev >= 508)
++ status.freq = ieee80211_channel_to_frequency(chanid, status.band);
++ else
++ status.freq = chanid + 2400;
+ break;
+ case B43_PHYTYPE_N:
+ case B43_PHYTYPE_LP:
+diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c b/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c
+index 82bf3c5d3cdc..2de2736eb332 100644
+--- a/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c
++++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c
+@@ -300,10 +300,10 @@ brcmf_sdio_ai_coredisable(struct brcmf_sdio_dev *sdiodev,
+
+ wrapbase = ci->c_inf[idx].wrapbase;
+
+- /* if core is already in reset, just return */
++ /* if core is already in reset, skip reset */
+ regdata = brcmf_sdiod_regrl(sdiodev, wrapbase + BCMA_RESET_CTL, NULL);
+ if ((regdata & BCMA_RESET_CTL_RESET) != 0)
+- return;
++ goto post_reset_config;
+
+ /* configure reset */
+ brcmf_sdiod_regwl(sdiodev, wrapbase + BCMA_IOCTL, pre_resetbits |
+@@ -319,6 +319,7 @@ brcmf_sdio_ai_coredisable(struct brcmf_sdio_dev *sdiodev,
+ SPINWAIT(brcmf_sdiod_regrl(sdiodev, wrapbase + BCMA_RESET_CTL, NULL) !=
+ BCMA_RESET_CTL_RESET, 300);
+
++post_reset_config:
+ /* post reset configure */
+ brcmf_sdiod_regwl(sdiodev, wrapbase + BCMA_IOCTL, pre_resetbits |
+ BCMA_IOCTL_FGC | BCMA_IOCTL_CLK, NULL);
+diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
+index 8d42fd9b0811..16be0c07c64a 100644
+--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
++++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
+@@ -318,6 +318,7 @@ static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
+ {
+ int ret;
+ int t = 0;
++ int iter;
+
+ IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n");
+
+@@ -326,18 +327,23 @@ static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
+ if (ret >= 0)
+ return 0;
+
+- /* If HW is not ready, prepare the conditions to check again */
+- iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
+- CSR_HW_IF_CONFIG_REG_PREPARE);
++ for (iter = 0; iter < 10; iter++) {
++ /* If HW is not ready, prepare the conditions to check again */
++ iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
++ CSR_HW_IF_CONFIG_REG_PREPARE);
++
++ do {
++ ret = iwl_pcie_set_hw_ready(trans);
++ if (ret >= 0)
++ return 0;
+
+- do {
+- ret = iwl_pcie_set_hw_ready(trans);
+- if (ret >= 0)
+- return 0;
++ usleep_range(200, 1000);
++ t += 200;
++ } while (t < 150000);
++ msleep(25);
++ }
+
+- usleep_range(200, 1000);
+- t += 200;
+- } while (t < 150000);
++ IWL_DEBUG_INFO(trans, "got NIC after %d iterations\n", iter);
+
+ return ret;
+ }
+diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/rt2x00/rt2500pci.c
+index 2f1cd929c6f6..a511cccc9f01 100644
+--- a/drivers/net/wireless/rt2x00/rt2500pci.c
++++ b/drivers/net/wireless/rt2x00/rt2500pci.c
+@@ -1681,8 +1681,13 @@ static int rt2500pci_init_eeprom(struct rt2x00_dev *rt2x00dev)
+ /*
+ * Detect if this device has an hardware controlled radio.
+ */
+- if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_HARDWARE_RADIO))
++ if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_HARDWARE_RADIO)) {
+ __set_bit(CAPABILITY_HW_BUTTON, &rt2x00dev->cap_flags);
++ /*
++ * On this device RFKILL initialized during probe does not work.
++ */
++ __set_bit(REQUIRE_DELAYED_RFKILL, &rt2x00dev->cap_flags);
++ }
+
+ /*
+ * Check if the BBP tuning should be enabled.
+diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
+index e3b885d8f7db..5d45a1a740a4 100644
+--- a/drivers/net/wireless/rt2x00/rt2x00.h
++++ b/drivers/net/wireless/rt2x00/rt2x00.h
+@@ -693,6 +693,7 @@ enum rt2x00_capability_flags {
+ REQUIRE_SW_SEQNO,
+ REQUIRE_HT_TX_DESC,
+ REQUIRE_PS_AUTOWAKE,
++ REQUIRE_DELAYED_RFKILL,
+
+ /*
+ * Capabilities
+diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
+index 2bde6729f5e6..4fa43a2eeb73 100644
+--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
++++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
+@@ -1126,9 +1126,10 @@ static void rt2x00lib_uninitialize(struct rt2x00_dev *rt2x00dev)
+ return;
+
+ /*
+- * Unregister extra components.
++ * Stop rfkill polling.
+ */
+- rt2x00rfkill_unregister(rt2x00dev);
++ if (test_bit(REQUIRE_DELAYED_RFKILL, &rt2x00dev->cap_flags))
++ rt2x00rfkill_unregister(rt2x00dev);
+
+ /*
+ * Allow the HW to uninitialize.
+@@ -1166,6 +1167,12 @@ static int rt2x00lib_initialize(struct rt2x00_dev *rt2x00dev)
+
+ set_bit(DEVICE_STATE_INITIALIZED, &rt2x00dev->flags);
+
++ /*
++ * Start rfkill polling.
++ */
++ if (test_bit(REQUIRE_DELAYED_RFKILL, &rt2x00dev->cap_flags))
++ rt2x00rfkill_register(rt2x00dev);
++
+ return 0;
+ }
+
+@@ -1375,7 +1382,12 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev)
+ rt2x00link_register(rt2x00dev);
+ rt2x00leds_register(rt2x00dev);
+ rt2x00debug_register(rt2x00dev);
+- rt2x00rfkill_register(rt2x00dev);
++
++ /*
++ * Start rfkill polling.
++ */
++ if (!test_bit(REQUIRE_DELAYED_RFKILL, &rt2x00dev->cap_flags))
++ rt2x00rfkill_register(rt2x00dev);
+
+ return 0;
+
+@@ -1391,6 +1403,12 @@ void rt2x00lib_remove_dev(struct rt2x00_dev *rt2x00dev)
+ clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
+
+ /*
++ * Stop rfkill polling.
++ */
++ if (!test_bit(REQUIRE_DELAYED_RFKILL, &rt2x00dev->cap_flags))
++ rt2x00rfkill_unregister(rt2x00dev);
++
++ /*
+ * Disable radio.
+ */
+ rt2x00lib_disable_radio(rt2x00dev);
+diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
+index a87ee9b6585a..87a8ad5d2eb7 100644
+--- a/drivers/net/wireless/rt2x00/rt2x00mac.c
++++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
+@@ -487,6 +487,8 @@ int rt2x00mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ crypto.cipher = rt2x00crypto_key_to_cipher(key);
+ if (crypto.cipher == CIPHER_NONE)
+ return -EOPNOTSUPP;
++ if (crypto.cipher == CIPHER_TKIP && rt2x00_is_usb(rt2x00dev))
++ return -EOPNOTSUPP;
+
+ crypto.cmd = cmd;
+
+diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
+index fa764406df68..c5bb0e0a36b9 100644
+--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
++++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
+@@ -185,6 +185,11 @@ static struct viosrp_crq *crq_queue_next_crq(struct crq_queue *queue)
+ if (crq->valid & 0x80) {
+ if (++queue->cur == queue->size)
+ queue->cur = 0;
++
++ /* Ensure the read of the valid bit occurs before reading any
++ * other bits of the CRQ entry
++ */
++ rmb();
+ } else
+ crq = NULL;
+ spin_unlock_irqrestore(&queue->lock, flags);
+@@ -203,6 +208,11 @@ static int ibmvscsi_send_crq(struct ibmvscsi_host_data *hostdata,
+ {
+ struct vio_dev *vdev = to_vio_dev(hostdata->dev);
+
++ /*
++ * Ensure the command buffer is flushed to memory before handing it
++ * over to the VIOS to prevent it from fetching any stale data.
++ */
++ mb();
+ return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, word1, word2);
+ }
+
+@@ -797,7 +807,8 @@ static void purge_requests(struct ibmvscsi_host_data *hostdata, int error_code)
+ evt->hostdata->dev);
+ if (evt->cmnd_done)
+ evt->cmnd_done(evt->cmnd);
+- } else if (evt->done)
++ } else if (evt->done && evt->crq.format != VIOSRP_MAD_FORMAT &&
++ evt->iu.srp.login_req.opcode != SRP_LOGIN_REQ)
+ evt->done(evt);
+ free_event_struct(&evt->hostdata->pool, evt);
+ spin_lock_irqsave(hostdata->host->host_lock, flags);
+diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
+index ef29636d4c9b..edb4d46fa874 100644
+--- a/drivers/scsi/scsi_error.c
++++ b/drivers/scsi/scsi_error.c
+@@ -131,7 +131,7 @@ scmd_eh_abort_handler(struct work_struct *work)
+ "aborting command %p\n", scmd));
+ rtn = scsi_try_to_abort_cmd(sdev->host->hostt, scmd);
+ if (rtn == SUCCESS) {
+- scmd->result |= DID_TIME_OUT << 16;
++ set_host_byte(scmd, DID_TIME_OUT);
+ if (scsi_host_eh_past_deadline(sdev->host)) {
+ SCSI_LOG_ERROR_RECOVERY(3,
+ scmd_printk(KERN_INFO, scmd,
+@@ -167,7 +167,7 @@ scmd_eh_abort_handler(struct work_struct *work)
+ scmd_printk(KERN_WARNING, scmd,
+ "scmd %p terminate "
+ "aborted command\n", scmd));
+- scmd->result |= DID_TIME_OUT << 16;
++ set_host_byte(scmd, DID_TIME_OUT);
+ scsi_finish_command(scmd);
+ }
+ }
+@@ -290,7 +290,7 @@ enum blk_eh_timer_return scsi_times_out(struct request *req)
+ if (scsi_abort_command(scmd) == SUCCESS)
+ return BLK_EH_NOT_HANDLED;
+
+- scmd->result |= DID_TIME_OUT << 16;
++ set_host_byte(scmd, DID_TIME_OUT);
+
+ if (unlikely(rtn == BLK_EH_NOT_HANDLED &&
+ !scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD)))
+@@ -1773,7 +1773,7 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd)
+ break;
+ case DID_ABORT:
+ if (scmd->eh_eflags & SCSI_EH_ABORT_SCHEDULED) {
+- scmd->result |= DID_TIME_OUT << 16;
++ set_host_byte(scmd, DID_TIME_OUT);
+ return SUCCESS;
+ }
+ case DID_NO_CONNECT:
+diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.c b/drivers/scsi/sym53c8xx_2/sym_hipd.c
+index d92fe4037e94..6b349e301869 100644
+--- a/drivers/scsi/sym53c8xx_2/sym_hipd.c
++++ b/drivers/scsi/sym53c8xx_2/sym_hipd.c
+@@ -3000,7 +3000,11 @@ sym_dequeue_from_squeue(struct sym_hcb *np, int i, int target, int lun, int task
+ if ((target == -1 || cp->target == target) &&
+ (lun == -1 || cp->lun == lun) &&
+ (task == -1 || cp->tag == task)) {
++#ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING
+ sym_set_cam_status(cp->cmd, DID_SOFT_ERROR);
++#else
++ sym_set_cam_status(cp->cmd, DID_REQUEUE);
++#endif
+ sym_remque(&cp->link_ccbq);
+ sym_insque_tail(&cp->link_ccbq, &np->comp_ccbq);
+ }
+diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
+index db3b494e5926..8490aa411739 100644
+--- a/drivers/scsi/virtio_scsi.c
++++ b/drivers/scsi/virtio_scsi.c
+@@ -273,6 +273,16 @@ static void virtscsi_req_done(struct virtqueue *vq)
+ virtscsi_vq_done(vscsi, req_vq, virtscsi_complete_cmd);
+ };
+
++static void virtscsi_poll_requests(struct virtio_scsi *vscsi)
++{
++ int i, num_vqs;
++
++ num_vqs = vscsi->num_queues;
++ for (i = 0; i < num_vqs; i++)
++ virtscsi_vq_done(vscsi, &vscsi->req_vqs[i],
++ virtscsi_complete_cmd);
++}
++
+ static void virtscsi_complete_free(struct virtio_scsi *vscsi, void *buf)
+ {
+ struct virtio_scsi_cmd *cmd = buf;
+@@ -291,6 +301,8 @@ static void virtscsi_ctrl_done(struct virtqueue *vq)
+ virtscsi_vq_done(vscsi, &vscsi->ctrl_vq, virtscsi_complete_free);
+ };
+
++static void virtscsi_handle_event(struct work_struct *work);
++
+ static int virtscsi_kick_event(struct virtio_scsi *vscsi,
+ struct virtio_scsi_event_node *event_node)
+ {
+@@ -298,6 +310,7 @@ static int virtscsi_kick_event(struct virtio_scsi *vscsi,
+ struct scatterlist sg;
+ unsigned long flags;
+
++ INIT_WORK(&event_node->work, virtscsi_handle_event);
+ sg_init_one(&sg, &event_node->event, sizeof(struct virtio_scsi_event));
+
+ spin_lock_irqsave(&vscsi->event_vq.vq_lock, flags);
+@@ -415,7 +428,6 @@ static void virtscsi_complete_event(struct virtio_scsi *vscsi, void *buf)
+ {
+ struct virtio_scsi_event_node *event_node = buf;
+
+- INIT_WORK(&event_node->work, virtscsi_handle_event);
+ schedule_work(&event_node->work);
+ }
+
+@@ -605,6 +617,18 @@ static int virtscsi_tmf(struct virtio_scsi *vscsi, struct virtio_scsi_cmd *cmd)
+ cmd->resp.tmf.response == VIRTIO_SCSI_S_FUNCTION_SUCCEEDED)
+ ret = SUCCESS;
+
++ /*
++ * The spec guarantees that all requests related to the TMF have
++ * been completed, but the callback might not have run yet if
++ * we're using independent interrupts (e.g. MSI). Poll the
++ * virtqueues once.
++ *
++ * In the abort case, sc->scsi_done will do nothing, because
++ * the block layer must have detected a timeout and as a result
++ * REQ_ATOM_COMPLETE has been set.
++ */
++ virtscsi_poll_requests(vscsi);
++
+ out:
+ mempool_free(cmd, virtscsi_cmd_pool);
+ return ret;
+diff --git a/drivers/staging/iio/adc/ad7291.c b/drivers/staging/iio/adc/ad7291.c
+index 357cef2a6f4c..7194bd138762 100644
+--- a/drivers/staging/iio/adc/ad7291.c
++++ b/drivers/staging/iio/adc/ad7291.c
+@@ -465,7 +465,7 @@ static int ad7291_probe(struct i2c_client *client,
+ struct ad7291_platform_data *pdata = client->dev.platform_data;
+ struct ad7291_chip_info *chip;
+ struct iio_dev *indio_dev;
+- int ret = 0;
++ int ret;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*chip));
+ if (!indio_dev)
+@@ -475,7 +475,7 @@ static int ad7291_probe(struct i2c_client *client,
+ if (pdata && pdata->use_external_ref) {
+ chip->reg = devm_regulator_get(&client->dev, "vref");
+ if (IS_ERR(chip->reg))
+- return ret;
++ return PTR_ERR(chip->reg);
+
+ ret = regulator_enable(chip->reg);
+ if (ret)
+diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
+index e36d1f5ca191..28ac3f3b7ec3 100644
+--- a/drivers/tty/n_tty.c
++++ b/drivers/tty/n_tty.c
+@@ -1214,15 +1214,16 @@ static void n_tty_receive_parity_error(struct tty_struct *tty, unsigned char c)
+ {
+ struct n_tty_data *ldata = tty->disc_data;
+
+- if (I_IGNPAR(tty))
+- return;
+- if (I_PARMRK(tty)) {
+- put_tty_queue('\377', ldata);
+- put_tty_queue('\0', ldata);
+- put_tty_queue(c, ldata);
+- } else if (I_INPCK(tty))
+- put_tty_queue('\0', ldata);
+- else
++ if (I_INPCK(tty)) {
++ if (I_IGNPAR(tty))
++ return;
++ if (I_PARMRK(tty)) {
++ put_tty_queue('\377', ldata);
++ put_tty_queue('\0', ldata);
++ put_tty_queue(c, ldata);
++ } else
++ put_tty_queue('\0', ldata);
++ } else
+ put_tty_queue(c, ldata);
+ if (waitqueue_active(&tty->read_wait))
+ wake_up_interruptible(&tty->read_wait);
+diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
+index 29a7f632b354..612dfc720738 100644
+--- a/drivers/tty/serial/8250/8250_core.c
++++ b/drivers/tty/serial/8250/8250_core.c
+@@ -2356,7 +2356,7 @@ serial8250_do_set_termios(struct uart_port *port, struct ktermios *termios,
+ port->read_status_mask = UART_LSR_OE | UART_LSR_THRE | UART_LSR_DR;
+ if (termios->c_iflag & INPCK)
+ port->read_status_mask |= UART_LSR_FE | UART_LSR_PE;
+- if (termios->c_iflag & (BRKINT | PARMRK))
++ if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
+ port->read_status_mask |= UART_LSR_BI;
+
+ /*
+diff --git a/drivers/tty/serial/altera_uart.c b/drivers/tty/serial/altera_uart.c
+index 501667e3e3f5..323376668b72 100644
+--- a/drivers/tty/serial/altera_uart.c
++++ b/drivers/tty/serial/altera_uart.c
+@@ -185,6 +185,12 @@ static void altera_uart_set_termios(struct uart_port *port,
+ uart_update_timeout(port, termios->c_cflag, baud);
+ altera_uart_writel(port, baudclk, ALTERA_UART_DIVISOR_REG);
+ spin_unlock_irqrestore(&port->lock, flags);
++
++ /*
++ * FIXME: port->read_status_mask and port->ignore_status_mask
++ * need to be initialized based on termios settings for
++ * INPCK, IGNBRK, IGNPAR, PARMRK, BRKINT
++ */
+ }
+
+ static void altera_uart_rx_chars(struct altera_uart *pp)
+diff --git a/drivers/tty/serial/amba-pl010.c b/drivers/tty/serial/amba-pl010.c
+index 01c9e72433e1..971af1e22d0f 100644
+--- a/drivers/tty/serial/amba-pl010.c
++++ b/drivers/tty/serial/amba-pl010.c
+@@ -420,7 +420,7 @@ pl010_set_termios(struct uart_port *port, struct ktermios *termios,
+ uap->port.read_status_mask = UART01x_RSR_OE;
+ if (termios->c_iflag & INPCK)
+ uap->port.read_status_mask |= UART01x_RSR_FE | UART01x_RSR_PE;
+- if (termios->c_iflag & (BRKINT | PARMRK))
++ if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
+ uap->port.read_status_mask |= UART01x_RSR_BE;
+
+ /*
+diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
+index d58783d364e3..b569011ac713 100644
+--- a/drivers/tty/serial/amba-pl011.c
++++ b/drivers/tty/serial/amba-pl011.c
+@@ -1744,7 +1744,7 @@ pl011_set_termios(struct uart_port *port, struct ktermios *termios,
+ port->read_status_mask = UART011_DR_OE | 255;
+ if (termios->c_iflag & INPCK)
+ port->read_status_mask |= UART011_DR_FE | UART011_DR_PE;
+- if (termios->c_iflag & (BRKINT | PARMRK))
++ if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
+ port->read_status_mask |= UART011_DR_BE;
+
+ /*
+diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
+index a49f10d269b2..ce352b81e230 100644
+--- a/drivers/tty/serial/atmel_serial.c
++++ b/drivers/tty/serial/atmel_serial.c
+@@ -1811,7 +1811,7 @@ static void atmel_set_termios(struct uart_port *port, struct ktermios *termios,
+ port->read_status_mask = ATMEL_US_OVRE;
+ if (termios->c_iflag & INPCK)
+ port->read_status_mask |= (ATMEL_US_FRAME | ATMEL_US_PARE);
+- if (termios->c_iflag & (BRKINT | PARMRK))
++ if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
+ port->read_status_mask |= ATMEL_US_RXBRK;
+
+ if (atmel_use_pdc_rx(port))
+diff --git a/drivers/tty/serial/bcm63xx_uart.c b/drivers/tty/serial/bcm63xx_uart.c
+index 78e82b017b92..e0b87d507670 100644
+--- a/drivers/tty/serial/bcm63xx_uart.c
++++ b/drivers/tty/serial/bcm63xx_uart.c
+@@ -565,7 +565,7 @@ static void bcm_uart_set_termios(struct uart_port *port,
+ port->read_status_mask |= UART_FIFO_FRAMEERR_MASK;
+ port->read_status_mask |= UART_FIFO_PARERR_MASK;
+ }
+- if (new->c_iflag & (BRKINT))
++ if (new->c_iflag & (IGNBRK | BRKINT))
+ port->read_status_mask |= UART_FIFO_BRKDET_MASK;
+
+ port->ignore_status_mask = 0;
+diff --git a/drivers/tty/serial/bfin_uart.c b/drivers/tty/serial/bfin_uart.c
+index 869ceba2ec57..ac86a20992e9 100644
+--- a/drivers/tty/serial/bfin_uart.c
++++ b/drivers/tty/serial/bfin_uart.c
+@@ -833,7 +833,7 @@ bfin_serial_set_termios(struct uart_port *port, struct ktermios *termios,
+ port->read_status_mask = OE;
+ if (termios->c_iflag & INPCK)
+ port->read_status_mask |= (FE | PE);
+- if (termios->c_iflag & (BRKINT | PARMRK))
++ if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
+ port->read_status_mask |= BI;
+
+ /*
+diff --git a/drivers/tty/serial/dz.c b/drivers/tty/serial/dz.c
+index 2f2b2e538a54..cdbbc788230a 100644
+--- a/drivers/tty/serial/dz.c
++++ b/drivers/tty/serial/dz.c
+@@ -625,7 +625,7 @@ static void dz_set_termios(struct uart_port *uport, struct ktermios *termios,
+ dport->port.read_status_mask = DZ_OERR;
+ if (termios->c_iflag & INPCK)
+ dport->port.read_status_mask |= DZ_FERR | DZ_PERR;
+- if (termios->c_iflag & (BRKINT | PARMRK))
++ if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
+ dport->port.read_status_mask |= DZ_BREAK;
+
+ /* characters to ignore */
+diff --git a/drivers/tty/serial/efm32-uart.c b/drivers/tty/serial/efm32-uart.c
+index 0eb5b5673ede..948f17b6b497 100644
+--- a/drivers/tty/serial/efm32-uart.c
++++ b/drivers/tty/serial/efm32-uart.c
+@@ -407,7 +407,7 @@ static void efm32_uart_set_termios(struct uart_port *port,
+ if (new->c_iflag & INPCK)
+ port->read_status_mask |=
+ UARTn_RXDATAX_FERR | UARTn_RXDATAX_PERR;
+- if (new->c_iflag & (BRKINT | PARMRK))
++ if (new->c_iflag & (IGNBRK | BRKINT | PARMRK))
+ port->read_status_mask |= SW_UARTn_RXDATAX_BERR;
+
+ port->ignore_status_mask = 0;
+diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
+index 8978dc9a58b7..175f123f4f09 100644
+--- a/drivers/tty/serial/fsl_lpuart.c
++++ b/drivers/tty/serial/fsl_lpuart.c
+@@ -496,7 +496,7 @@ lpuart_set_termios(struct uart_port *port, struct ktermios *termios,
+ sport->port.read_status_mask = 0;
+ if (termios->c_iflag & INPCK)
+ sport->port.read_status_mask |= (UARTSR1_FE | UARTSR1_PE);
+- if (termios->c_iflag & (BRKINT | PARMRK))
++ if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
+ sport->port.read_status_mask |= UARTSR1_FE;
+
+ /* characters to ignore */
+diff --git a/drivers/tty/serial/ip22zilog.c b/drivers/tty/serial/ip22zilog.c
+index 1d9420548e16..1efd4c36ba0c 100644
+--- a/drivers/tty/serial/ip22zilog.c
++++ b/drivers/tty/serial/ip22zilog.c
+@@ -850,7 +850,7 @@ ip22zilog_convert_to_zs(struct uart_ip22zilog_port *up, unsigned int cflag,
+ up->port.read_status_mask = Rx_OVR;
+ if (iflag & INPCK)
+ up->port.read_status_mask |= CRC_ERR | PAR_ERR;
+- if (iflag & (BRKINT | PARMRK))
++ if (iflag & (IGNBRK | BRKINT | PARMRK))
+ up->port.read_status_mask |= BRK_ABRT;
+
+ up->port.ignore_status_mask = 0;
+diff --git a/drivers/tty/serial/m32r_sio.c b/drivers/tty/serial/m32r_sio.c
+index 9cd9b4eba9fc..68f2c53e0b54 100644
+--- a/drivers/tty/serial/m32r_sio.c
++++ b/drivers/tty/serial/m32r_sio.c
+@@ -737,7 +737,7 @@ static void m32r_sio_set_termios(struct uart_port *port,
+ up->port.read_status_mask = UART_LSR_OE | UART_LSR_THRE | UART_LSR_DR;
+ if (termios->c_iflag & INPCK)
+ up->port.read_status_mask |= UART_LSR_FE | UART_LSR_PE;
+- if (termios->c_iflag & (BRKINT | PARMRK))
++ if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
+ up->port.read_status_mask |= UART_LSR_BI;
+
+ /*
+diff --git a/drivers/tty/serial/max310x.c b/drivers/tty/serial/max310x.c
+index 8d71e4047bb3..558af637cd0c 100644
+--- a/drivers/tty/serial/max310x.c
++++ b/drivers/tty/serial/max310x.c
+@@ -842,7 +842,7 @@ static void max310x_set_termios(struct uart_port *port,
+ if (termios->c_iflag & INPCK)
+ port->read_status_mask |= MAX310X_LSR_RXPAR_BIT |
+ MAX310X_LSR_FRERR_BIT;
+- if (termios->c_iflag & (BRKINT | PARMRK))
++ if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
+ port->read_status_mask |= MAX310X_LSR_RXBRK_BIT;
+
+ /* Set status ignore mask */
+diff --git a/drivers/tty/serial/mcf.c b/drivers/tty/serial/mcf.c
+index 0edfaf8cd269..a6f085717f94 100644
+--- a/drivers/tty/serial/mcf.c
++++ b/drivers/tty/serial/mcf.c
+@@ -248,6 +248,12 @@ static void mcf_set_termios(struct uart_port *port, struct ktermios *termios,
+ mr1 |= MCFUART_MR1_PARITYNONE;
+ }
+
++ /*
++ * FIXME: port->read_status_mask and port->ignore_status_mask
++ * need to be initialized based on termios settings for
++ * INPCK, IGNBRK, IGNPAR, PARMRK, BRKINT
++ */
++
+ if (termios->c_cflag & CSTOPB)
+ mr2 |= MCFUART_MR2_STOP2;
+ else
+diff --git a/drivers/tty/serial/mfd.c b/drivers/tty/serial/mfd.c
+index 52c930fac210..445799dc9846 100644
+--- a/drivers/tty/serial/mfd.c
++++ b/drivers/tty/serial/mfd.c
+@@ -977,7 +977,7 @@ serial_hsu_set_termios(struct uart_port *port, struct ktermios *termios,
+ up->port.read_status_mask = UART_LSR_OE | UART_LSR_THRE | UART_LSR_DR;
+ if (termios->c_iflag & INPCK)
+ up->port.read_status_mask |= UART_LSR_FE | UART_LSR_PE;
+- if (termios->c_iflag & (BRKINT | PARMRK))
++ if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
+ up->port.read_status_mask |= UART_LSR_BI;
+
+ /* Characters to ignore */
+diff --git a/drivers/tty/serial/mpsc.c b/drivers/tty/serial/mpsc.c
+index e30a3ca3cea3..759c6a6fa74a 100644
+--- a/drivers/tty/serial/mpsc.c
++++ b/drivers/tty/serial/mpsc.c
+@@ -1458,7 +1458,7 @@ static void mpsc_set_termios(struct uart_port *port, struct ktermios *termios,
+ pi->port.read_status_mask |= SDMA_DESC_CMDSTAT_PE
+ | SDMA_DESC_CMDSTAT_FR;
+
+- if (termios->c_iflag & (BRKINT | PARMRK))
++ if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
+ pi->port.read_status_mask |= SDMA_DESC_CMDSTAT_BR;
+
+ /* Characters/events to ignore */
+diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
+index b5d779cd3c2b..c0f2b3e5452f 100644
+--- a/drivers/tty/serial/msm_serial.c
++++ b/drivers/tty/serial/msm_serial.c
+@@ -570,7 +570,7 @@ static void msm_set_termios(struct uart_port *port, struct ktermios *termios,
+ port->read_status_mask = 0;
+ if (termios->c_iflag & INPCK)
+ port->read_status_mask |= UART_SR_PAR_FRAME_ERR;
+- if (termios->c_iflag & (BRKINT | PARMRK))
++ if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
+ port->read_status_mask |= UART_SR_RX_BREAK;
+
+ uart_update_timeout(port, termios->c_cflag, baud);
+diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c
+index aa97fd845b4d..d98f1d4a9bd3 100644
+--- a/drivers/tty/serial/mxs-auart.c
++++ b/drivers/tty/serial/mxs-auart.c
+@@ -604,7 +604,7 @@ static void mxs_auart_settermios(struct uart_port *u,
+
+ if (termios->c_iflag & INPCK)
+ u->read_status_mask |= AUART_STAT_PERR;
+- if (termios->c_iflag & (BRKINT | PARMRK))
++ if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
+ u->read_status_mask |= AUART_STAT_BERR;
+
+ /*
+diff --git a/drivers/tty/serial/netx-serial.c b/drivers/tty/serial/netx-serial.c
+index 0a4dd70d29eb..7a6745601d4e 100644
+--- a/drivers/tty/serial/netx-serial.c
++++ b/drivers/tty/serial/netx-serial.c
+@@ -419,7 +419,7 @@ netx_set_termios(struct uart_port *port, struct ktermios *termios,
+ }
+
+ port->read_status_mask = 0;
+- if (termios->c_iflag & (BRKINT | PARMRK))
++ if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
+ port->read_status_mask |= SR_BE;
+ if (termios->c_iflag & INPCK)
+ port->read_status_mask |= SR_PE | SR_FE;
+diff --git a/drivers/tty/serial/pmac_zilog.c b/drivers/tty/serial/pmac_zilog.c
+index e9d420ff3931..8193635103ee 100644
+--- a/drivers/tty/serial/pmac_zilog.c
++++ b/drivers/tty/serial/pmac_zilog.c
+@@ -1092,7 +1092,7 @@ static void pmz_convert_to_zs(struct uart_pmac_port *uap, unsigned int cflag,
+ uap->port.read_status_mask = Rx_OVR;
+ if (iflag & INPCK)
+ uap->port.read_status_mask |= CRC_ERR | PAR_ERR;
+- if (iflag & (BRKINT | PARMRK))
++ if (iflag & (IGNBRK | BRKINT | PARMRK))
+ uap->port.read_status_mask |= BRK_ABRT;
+
+ uap->port.ignore_status_mask = 0;
+diff --git a/drivers/tty/serial/pnx8xxx_uart.c b/drivers/tty/serial/pnx8xxx_uart.c
+index de6c05c63683..2ba24a45c97f 100644
+--- a/drivers/tty/serial/pnx8xxx_uart.c
++++ b/drivers/tty/serial/pnx8xxx_uart.c
+@@ -477,7 +477,7 @@ pnx8xxx_set_termios(struct uart_port *port, struct ktermios *termios,
+ sport->port.read_status_mask |=
+ FIFO_TO_SM(PNX8XXX_UART_FIFO_RXFE) |
+ FIFO_TO_SM(PNX8XXX_UART_FIFO_RXPAR);
+- if (termios->c_iflag & (BRKINT | PARMRK))
++ if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
+ sport->port.read_status_mask |=
+ ISTAT_TO_SM(PNX8XXX_UART_INT_BREAK);
+
+diff --git a/drivers/tty/serial/pxa.c b/drivers/tty/serial/pxa.c
+index f9f20f383760..fc3f308cd6c1 100644
+--- a/drivers/tty/serial/pxa.c
++++ b/drivers/tty/serial/pxa.c
+@@ -492,7 +492,7 @@ serial_pxa_set_termios(struct uart_port *port, struct ktermios *termios,
+ up->port.read_status_mask = UART_LSR_OE | UART_LSR_THRE | UART_LSR_DR;
+ if (termios->c_iflag & INPCK)
+ up->port.read_status_mask |= UART_LSR_FE | UART_LSR_PE;
+- if (termios->c_iflag & (BRKINT | PARMRK))
++ if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
+ up->port.read_status_mask |= UART_LSR_BI;
+
+ /*
+diff --git a/drivers/tty/serial/sb1250-duart.c b/drivers/tty/serial/sb1250-duart.c
+index a7cdec2962dd..771f361c47ea 100644
+--- a/drivers/tty/serial/sb1250-duart.c
++++ b/drivers/tty/serial/sb1250-duart.c
+@@ -596,7 +596,7 @@ static void sbd_set_termios(struct uart_port *uport, struct ktermios *termios,
+ if (termios->c_iflag & INPCK)
+ uport->read_status_mask |= M_DUART_FRM_ERR |
+ M_DUART_PARITY_ERR;
+- if (termios->c_iflag & (BRKINT | PARMRK))
++ if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
+ uport->read_status_mask |= M_DUART_RCVD_BRK;
+
+ uport->ignore_status_mask = 0;
+diff --git a/drivers/tty/serial/sccnxp.c b/drivers/tty/serial/sccnxp.c
+index a447f71538ef..75ea83278ba4 100644
+--- a/drivers/tty/serial/sccnxp.c
++++ b/drivers/tty/serial/sccnxp.c
+@@ -667,7 +667,7 @@ static void sccnxp_set_termios(struct uart_port *port,
+ port->read_status_mask = SR_OVR;
+ if (termios->c_iflag & INPCK)
+ port->read_status_mask |= SR_PE | SR_FE;
+- if (termios->c_iflag & (BRKINT | PARMRK))
++ if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
+ port->read_status_mask |= SR_BRK;
+
+ /* Set status ignore mask */
+diff --git a/drivers/tty/serial/serial_ks8695.c b/drivers/tty/serial/serial_ks8695.c
+index e1caa99e3d3b..5c79bdab985d 100644
+--- a/drivers/tty/serial/serial_ks8695.c
++++ b/drivers/tty/serial/serial_ks8695.c
+@@ -437,7 +437,7 @@ static void ks8695uart_set_termios(struct uart_port *port, struct ktermios *term
+ port->read_status_mask = URLS_URROE;
+ if (termios->c_iflag & INPCK)
+ port->read_status_mask |= (URLS_URFE | URLS_URPE);
+- if (termios->c_iflag & (BRKINT | PARMRK))
++ if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
+ port->read_status_mask |= URLS_URBI;
+
+ /*
+diff --git a/drivers/tty/serial/serial_txx9.c b/drivers/tty/serial/serial_txx9.c
+index 90a080b1f9ee..cbf501da5df3 100644
+--- a/drivers/tty/serial/serial_txx9.c
++++ b/drivers/tty/serial/serial_txx9.c
+@@ -702,7 +702,7 @@ serial_txx9_set_termios(struct uart_port *port, struct ktermios *termios,
+ TXX9_SIDISR_TDIS | TXX9_SIDISR_RDIS;
+ if (termios->c_iflag & INPCK)
+ up->port.read_status_mask |= TXX9_SIDISR_UFER | TXX9_SIDISR_UPER;
+- if (termios->c_iflag & (BRKINT | PARMRK))
++ if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
+ up->port.read_status_mask |= TXX9_SIDISR_UBRK;
+
+ /*
+diff --git a/drivers/tty/serial/sirfsoc_uart.c b/drivers/tty/serial/sirfsoc_uart.c
+index b7bfe24d4ebc..020fe220d0e9 100644
+--- a/drivers/tty/serial/sirfsoc_uart.c
++++ b/drivers/tty/serial/sirfsoc_uart.c
+@@ -908,7 +908,7 @@ static void sirfsoc_uart_set_termios(struct uart_port *port,
+ if (termios->c_iflag & INPCK)
+ port->read_status_mask |= uint_en->sirfsoc_frm_err_en;
+ }
+- if (termios->c_iflag & (BRKINT | PARMRK))
++ if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
+ port->read_status_mask |= uint_en->sirfsoc_rxd_brk_en;
+ if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) {
+ if (termios->c_iflag & IGNPAR)
+diff --git a/drivers/tty/serial/st-asc.c b/drivers/tty/serial/st-asc.c
+index 21e6e84c0df8..0ee31755cb5a 100644
+--- a/drivers/tty/serial/st-asc.c
++++ b/drivers/tty/serial/st-asc.c
+@@ -547,7 +547,7 @@ static void asc_set_termios(struct uart_port *port, struct ktermios *termios,
+ ascport->port.read_status_mask = ASC_RXBUF_DUMMY_OE;
+ if (termios->c_iflag & INPCK)
+ ascport->port.read_status_mask |= ASC_RXBUF_FE | ASC_RXBUF_PE;
+- if (termios->c_iflag & (BRKINT | PARMRK))
++ if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
+ ascport->port.read_status_mask |= ASC_RXBUF_DUMMY_BE;
+
+ /*
+diff --git a/drivers/tty/serial/sunsab.c b/drivers/tty/serial/sunsab.c
+index 5faa8e905e98..80a58eca785b 100644
+--- a/drivers/tty/serial/sunsab.c
++++ b/drivers/tty/serial/sunsab.c
+@@ -719,7 +719,7 @@ static void sunsab_convert_to_sab(struct uart_sunsab_port *up, unsigned int cfla
+ if (iflag & INPCK)
+ up->port.read_status_mask |= (SAB82532_ISR0_PERR |
+ SAB82532_ISR0_FERR);
+- if (iflag & (BRKINT | PARMRK))
++ if (iflag & (IGNBRK | BRKINT | PARMRK))
+ up->port.read_status_mask |= (SAB82532_ISR1_BRK << 8);
+
+ /*
+diff --git a/drivers/tty/serial/sunsu.c b/drivers/tty/serial/sunsu.c
+index 9a0f24f83720..5326ae195e5f 100644
+--- a/drivers/tty/serial/sunsu.c
++++ b/drivers/tty/serial/sunsu.c
+@@ -834,7 +834,7 @@ sunsu_change_speed(struct uart_port *port, unsigned int cflag,
+ up->port.read_status_mask = UART_LSR_OE | UART_LSR_THRE | UART_LSR_DR;
+ if (iflag & INPCK)
+ up->port.read_status_mask |= UART_LSR_FE | UART_LSR_PE;
+- if (iflag & (BRKINT | PARMRK))
++ if (iflag & (IGNBRK | BRKINT | PARMRK))
+ up->port.read_status_mask |= UART_LSR_BI;
+
+ /*
+diff --git a/drivers/tty/serial/sunzilog.c b/drivers/tty/serial/sunzilog.c
+index a2c40ed287d2..a85db8b87156 100644
+--- a/drivers/tty/serial/sunzilog.c
++++ b/drivers/tty/serial/sunzilog.c
+@@ -915,7 +915,7 @@ sunzilog_convert_to_zs(struct uart_sunzilog_port *up, unsigned int cflag,
+ up->port.read_status_mask = Rx_OVR;
+ if (iflag & INPCK)
+ up->port.read_status_mask |= CRC_ERR | PAR_ERR;
+- if (iflag & (BRKINT | PARMRK))
++ if (iflag & (IGNBRK | BRKINT | PARMRK))
+ up->port.read_status_mask |= BRK_ABRT;
+
+ up->port.ignore_status_mask = 0;
+diff --git a/drivers/tty/serial/ucc_uart.c b/drivers/tty/serial/ucc_uart.c
+index d569ca58bab6..1c52074c38df 100644
+--- a/drivers/tty/serial/ucc_uart.c
++++ b/drivers/tty/serial/ucc_uart.c
+@@ -936,7 +936,7 @@ static void qe_uart_set_termios(struct uart_port *port,
+ port->read_status_mask = BD_SC_EMPTY | BD_SC_OV;
+ if (termios->c_iflag & INPCK)
+ port->read_status_mask |= BD_SC_FR | BD_SC_PR;
+- if (termios->c_iflag & (BRKINT | PARMRK))
++ if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
+ port->read_status_mask |= BD_SC_BR;
+
+ /*
+diff --git a/drivers/tty/serial/vr41xx_siu.c b/drivers/tty/serial/vr41xx_siu.c
+index a63c14bc9a24..db0c8a4ab03e 100644
+--- a/drivers/tty/serial/vr41xx_siu.c
++++ b/drivers/tty/serial/vr41xx_siu.c
+@@ -559,7 +559,7 @@ static void siu_set_termios(struct uart_port *port, struct ktermios *new,
+ port->read_status_mask = UART_LSR_THRE | UART_LSR_OE | UART_LSR_DR;
+ if (c_iflag & INPCK)
+ port->read_status_mask |= UART_LSR_FE | UART_LSR_PE;
+- if (c_iflag & (BRKINT | PARMRK))
++ if (c_iflag & (IGNBRK | BRKINT | PARMRK))
+ port->read_status_mask |= UART_LSR_BI;
+
+ port->ignore_status_mask = 0;
+diff --git a/drivers/tty/serial/zs.c b/drivers/tty/serial/zs.c
+index 6a169877109b..2b65bb7ffb8a 100644
+--- a/drivers/tty/serial/zs.c
++++ b/drivers/tty/serial/zs.c
+@@ -923,7 +923,7 @@ static void zs_set_termios(struct uart_port *uport, struct ktermios *termios,
+ uport->read_status_mask = Rx_OVR;
+ if (termios->c_iflag & INPCK)
+ uport->read_status_mask |= FRM_ERR | PAR_ERR;
+- if (termios->c_iflag & (BRKINT | PARMRK))
++ if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
+ uport->read_status_mask |= Rx_BRK;
+
+ uport->ignore_status_mask = 0;
+diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
+index 4ab2cb62dfce..3314516018c6 100644
+--- a/drivers/usb/chipidea/udc.c
++++ b/drivers/usb/chipidea/udc.c
+@@ -1326,6 +1326,7 @@ static int ep_dequeue(struct usb_ep *ep, struct usb_request *req)
+ struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep);
+ struct ci_hw_req *hwreq = container_of(req, struct ci_hw_req, req);
+ unsigned long flags;
++ struct td_node *node, *tmpnode;
+
+ if (ep == NULL || req == NULL || hwreq->req.status != -EALREADY ||
+ hwep->ep.desc == NULL || list_empty(&hwreq->queue) ||
+@@ -1336,6 +1337,12 @@ static int ep_dequeue(struct usb_ep *ep, struct usb_request *req)
+
+ hw_ep_flush(hwep->ci, hwep->num, hwep->dir);
+
++ list_for_each_entry_safe(node, tmpnode, &hwreq->tds, td) {
++ dma_pool_free(hwep->td_pool, node->ptr, node->dma);
++ list_del(&node->td);
++ kfree(node);
++ }
++
+ /* pop request */
+ list_del_init(&hwreq->queue);
+
+diff --git a/drivers/usb/gadget/f_fs.c b/drivers/usb/gadget/f_fs.c
+index 2b4334394076..5bcf7d001259 100644
+--- a/drivers/usb/gadget/f_fs.c
++++ b/drivers/usb/gadget/f_fs.c
+@@ -1227,11 +1227,13 @@ static int functionfs_bind(struct ffs_data *ffs, struct usb_composite_dev *cdev)
+ ffs->ep0req->context = ffs;
+
+ lang = ffs->stringtabs;
+- for (lang = ffs->stringtabs; *lang; ++lang) {
+- struct usb_string *str = (*lang)->strings;
+- int id = first_id;
+- for (; str->s; ++id, ++str)
+- str->id = id;
++ if (lang) {
++ for (; *lang; ++lang) {
++ struct usb_string *str = (*lang)->strings;
++ int id = first_id;
++ for (; str->s; ++id, ++str)
++ str->id = id;
++ }
+ }
+
+ ffs->gadget = cdev->gadget;
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index dff9b5ead3b3..65091d9aa997 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -1578,8 +1578,11 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
+ xhci_handle_cmd_reset_ep(xhci, slot_id, cmd_trb, cmd_comp_code);
+ break;
+ case TRB_RESET_DEV:
+- WARN_ON(slot_id != TRB_TO_SLOT_ID(
+- le32_to_cpu(cmd_trb->generic.field[3])));
++ /* SLOT_ID field in reset device cmd completion event TRB is 0.
++ * Use the SLOT_ID from the command TRB instead (xhci 4.6.11)
++ */
++ slot_id = TRB_TO_SLOT_ID(
++ le32_to_cpu(cmd_trb->generic.field[3]));
+ xhci_handle_cmd_reset_dev(xhci, slot_id, event);
+ break;
+ case TRB_NEC_GET_FW:
+@@ -3663,7 +3666,7 @@ static unsigned int xhci_get_burst_count(struct xhci_hcd *xhci,
+ return 0;
+
+ max_burst = urb->ep->ss_ep_comp.bMaxBurst;
+- return roundup(total_packet_count, max_burst + 1) - 1;
++ return DIV_ROUND_UP(total_packet_count, max_burst + 1) - 1;
+ }
+
+ /*
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index cca289667cf6..ab831048e8a4 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -928,7 +928,7 @@ int xhci_suspend(struct xhci_hcd *xhci)
+ */
+ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
+ {
+- u32 command, temp = 0;
++ u32 command, temp = 0, status;
+ struct usb_hcd *hcd = xhci_to_hcd(xhci);
+ struct usb_hcd *secondary_hcd;
+ int retval = 0;
+@@ -1046,8 +1046,12 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
+
+ done:
+ if (retval == 0) {
+- usb_hcd_resume_root_hub(hcd);
+- usb_hcd_resume_root_hub(xhci->shared_hcd);
++ /* Resume root hubs only when have pending events. */
++ status = readl(&xhci->op_regs->status);
++ if (status & STS_EINT) {
++ usb_hcd_resume_root_hub(hcd);
++ usb_hcd_resume_root_hub(xhci->shared_hcd);
++ }
+ }
+
+ /*
+diff --git a/drivers/usb/musb/musb_am335x.c b/drivers/usb/musb/musb_am335x.c
+index d2353781bd2d..1e58ed2361cc 100644
+--- a/drivers/usb/musb/musb_am335x.c
++++ b/drivers/usb/musb/musb_am335x.c
+@@ -19,21 +19,6 @@ err:
+ return ret;
+ }
+
+-static int of_remove_populated_child(struct device *dev, void *d)
+-{
+- struct platform_device *pdev = to_platform_device(dev);
+-
+- of_device_unregister(pdev);
+- return 0;
+-}
+-
+-static int am335x_child_remove(struct platform_device *pdev)
+-{
+- device_for_each_child(&pdev->dev, NULL, of_remove_populated_child);
+- pm_runtime_disable(&pdev->dev);
+- return 0;
+-}
+-
+ static const struct of_device_id am335x_child_of_match[] = {
+ { .compatible = "ti,am33xx-usb" },
+ { },
+@@ -42,13 +27,17 @@ MODULE_DEVICE_TABLE(of, am335x_child_of_match);
+
+ static struct platform_driver am335x_child_driver = {
+ .probe = am335x_child_probe,
+- .remove = am335x_child_remove,
+ .driver = {
+ .name = "am335x-usb-childs",
+ .of_match_table = am335x_child_of_match,
+ },
+ };
+
+-module_platform_driver(am335x_child_driver);
++static int __init am335x_child_init(void)
++{
++ return platform_driver_register(&am335x_child_driver);
++}
++module_init(am335x_child_init);
++
+ MODULE_DESCRIPTION("AM33xx child devices");
+ MODULE_LICENSE("GPL v2");
+diff --git a/drivers/usb/musb/musb_cppi41.c b/drivers/usb/musb/musb_cppi41.c
+index f88929609bac..c2d5afc57e22 100644
+--- a/drivers/usb/musb/musb_cppi41.c
++++ b/drivers/usb/musb/musb_cppi41.c
+@@ -266,7 +266,7 @@ static void cppi41_dma_callback(void *private_data)
+ }
+ list_add_tail(&cppi41_channel->tx_check,
+ &controller->early_tx_list);
+- if (!hrtimer_active(&controller->early_tx)) {
++ if (!hrtimer_is_queued(&controller->early_tx)) {
+ hrtimer_start_range_ns(&controller->early_tx,
+ ktime_set(0, 140 * NSEC_PER_USEC),
+ 40 * NSEC_PER_USEC,
+diff --git a/drivers/usb/musb/ux500.c b/drivers/usb/musb/ux500.c
+index c2e45e632723..f202e5088461 100644
+--- a/drivers/usb/musb/ux500.c
++++ b/drivers/usb/musb/ux500.c
+@@ -274,7 +274,6 @@ static int ux500_probe(struct platform_device *pdev)
+ musb->dev.parent = &pdev->dev;
+ musb->dev.dma_mask = &pdev->dev.coherent_dma_mask;
+ musb->dev.coherent_dma_mask = pdev->dev.coherent_dma_mask;
+- musb->dev.of_node = pdev->dev.of_node;
+
+ glue->dev = &pdev->dev;
+ glue->musb = musb;
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index edf3b124583c..115662c16dcc 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -1566,14 +1566,17 @@ static void ftdi_set_max_packet_size(struct usb_serial_port *port)
+ struct usb_device *udev = serial->dev;
+
+ struct usb_interface *interface = serial->interface;
+- struct usb_endpoint_descriptor *ep_desc = &interface->cur_altsetting->endpoint[1].desc;
++ struct usb_endpoint_descriptor *ep_desc;
+
+ unsigned num_endpoints;
+- int i;
++ unsigned i;
+
+ num_endpoints = interface->cur_altsetting->desc.bNumEndpoints;
+ dev_info(&udev->dev, "Number of endpoints %d\n", num_endpoints);
+
++ if (!num_endpoints)
++ return;
++
+ /* NOTE: some customers have programmed FT232R/FT245R devices
+ * with an endpoint size of 0 - not good. In this case, we
+ * want to override the endpoint descriptor setting and use a
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 70ede84f4f6b..e25e8ca09fe2 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -352,6 +352,9 @@ static void option_instat_callback(struct urb *urb);
+ /* Zoom */
+ #define ZOOM_PRODUCT_4597 0x9607
+
++/* SpeedUp SU9800 usb 3g modem */
++#define SPEEDUP_PRODUCT_SU9800 0x9800
++
+ /* Haier products */
+ #define HAIER_VENDOR_ID 0x201e
+ #define HAIER_PRODUCT_CE100 0x2009
+@@ -372,8 +375,12 @@ static void option_instat_callback(struct urb *urb);
+ /* Olivetti products */
+ #define OLIVETTI_VENDOR_ID 0x0b3c
+ #define OLIVETTI_PRODUCT_OLICARD100 0xc000
++#define OLIVETTI_PRODUCT_OLICARD120 0xc001
++#define OLIVETTI_PRODUCT_OLICARD140 0xc002
+ #define OLIVETTI_PRODUCT_OLICARD145 0xc003
++#define OLIVETTI_PRODUCT_OLICARD155 0xc004
+ #define OLIVETTI_PRODUCT_OLICARD200 0xc005
++#define OLIVETTI_PRODUCT_OLICARD160 0xc00a
+ #define OLIVETTI_PRODUCT_OLICARD500 0xc00b
+
+ /* Celot products */
+@@ -1577,6 +1584,7 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14),
+ .driver_info = (kernel_ulong_t)&four_g_w14_blacklist
+ },
++ { USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, SPEEDUP_PRODUCT_SU9800, 0xff) },
+ { USB_DEVICE(LONGCHEER_VENDOR_ID, ZOOM_PRODUCT_4597) },
+ { USB_DEVICE(LONGCHEER_VENDOR_ID, IBALL_3_5G_CONNECT) },
+ { USB_DEVICE(HAIER_VENDOR_ID, HAIER_PRODUCT_CE100) },
+@@ -1611,15 +1619,21 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDMNET) },
+ { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) }, /* HC28 enumerates with Siemens or Cinterion VID depending on FW revision */
+ { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) },
+-
+- { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100) },
++ { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100),
++ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
++ { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD120),
++ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
++ { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD140),
++ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD145) },
++ { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD155),
++ .driver_info = (kernel_ulong_t)&net_intf6_blacklist },
+ { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD200),
+- .driver_info = (kernel_ulong_t)&net_intf6_blacklist
+- },
++ .driver_info = (kernel_ulong_t)&net_intf6_blacklist },
++ { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD160),
++ .driver_info = (kernel_ulong_t)&net_intf6_blacklist },
+ { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD500),
+- .driver_info = (kernel_ulong_t)&net_intf4_blacklist
+- },
++ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ { USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */
+ { USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_GT_B3730, USB_CLASS_CDC_DATA, 0x00, 0x00) }, /* Samsung GT-B3730 LTE USB modem.*/
+ { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM600) },
+diff --git a/drivers/video/fb-puv3.c b/drivers/video/fb-puv3.c
+index 6db9ebd042a3..88fa2e70a0bb 100644
+--- a/drivers/video/fb-puv3.c
++++ b/drivers/video/fb-puv3.c
+@@ -18,8 +18,10 @@
+ #include <linux/fb.h>
+ #include <linux/init.h>
+ #include <linux/console.h>
++#include <linux/mm.h>
+
+ #include <asm/sizes.h>
++#include <asm/pgtable.h>
+ #include <mach/hardware.h>
+
+ /* Platform_data reserved for unifb registers. */
+diff --git a/fs/aio.c b/fs/aio.c
+index 19e7d9530dbe..e609e15f36b9 100644
+--- a/fs/aio.c
++++ b/fs/aio.c
+@@ -112,6 +112,11 @@ struct kioctx {
+
+ struct work_struct free_work;
+
++ /*
++ * signals when all in-flight requests are done
++ */
++ struct completion *requests_done;
++
+ struct {
+ /*
+ * This counts the number of available slots in the ringbuffer,
+@@ -508,6 +513,10 @@ static void free_ioctx_reqs(struct percpu_ref *ref)
+ {
+ struct kioctx *ctx = container_of(ref, struct kioctx, reqs);
+
++ /* At this point we know that there are no any in-flight requests */
++ if (ctx->requests_done)
++ complete(ctx->requests_done);
++
+ INIT_WORK(&ctx->free_work, free_ioctx);
+ schedule_work(&ctx->free_work);
+ }
+@@ -718,7 +727,8 @@ err:
+ * when the processes owning a context have all exited to encourage
+ * the rapid destruction of the kioctx.
+ */
+-static void kill_ioctx(struct mm_struct *mm, struct kioctx *ctx)
++static void kill_ioctx(struct mm_struct *mm, struct kioctx *ctx,
++ struct completion *requests_done)
+ {
+ if (!atomic_xchg(&ctx->dead, 1)) {
+ struct kioctx_table *table;
+@@ -747,7 +757,11 @@ static void kill_ioctx(struct mm_struct *mm, struct kioctx *ctx)
+ if (ctx->mmap_size)
+ vm_munmap(ctx->mmap_base, ctx->mmap_size);
+
++ ctx->requests_done = requests_done;
+ percpu_ref_kill(&ctx->users);
++ } else {
++ if (requests_done)
++ complete(requests_done);
+ }
+ }
+
+@@ -809,7 +823,7 @@ void exit_aio(struct mm_struct *mm)
+ */
+ ctx->mmap_size = 0;
+
+- kill_ioctx(mm, ctx);
++ kill_ioctx(mm, ctx, NULL);
+ }
+ }
+
+@@ -1187,7 +1201,7 @@ SYSCALL_DEFINE2(io_setup, unsigned, nr_events, aio_context_t __user *, ctxp)
+ if (!IS_ERR(ioctx)) {
+ ret = put_user(ioctx->user_id, ctxp);
+ if (ret)
+- kill_ioctx(current->mm, ioctx);
++ kill_ioctx(current->mm, ioctx, NULL);
+ percpu_ref_put(&ioctx->users);
+ }
+
+@@ -1205,8 +1219,22 @@ SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx)
+ {
+ struct kioctx *ioctx = lookup_ioctx(ctx);
+ if (likely(NULL != ioctx)) {
+- kill_ioctx(current->mm, ioctx);
++ struct completion requests_done =
++ COMPLETION_INITIALIZER_ONSTACK(requests_done);
++
++ /* Pass requests_done to kill_ioctx() where it can be set
++ * in a thread-safe way. If we try to set it here then we have
++ * a race condition if two io_destroy() called simultaneously.
++ */
++ kill_ioctx(current->mm, ioctx, &requests_done);
+ percpu_ref_put(&ioctx->users);
++
++ /* Wait until all IO for the context are done. Otherwise kernel
++ * keep using user-space buffers even if user thinks the context
++ * is destroyed.
++ */
++ wait_for_completion(&requests_done);
++
+ return 0;
+ }
+ pr_debug("EINVAL: io_destroy: invalid context id\n");
+diff --git a/fs/cifs/cifs_unicode.c b/fs/cifs/cifs_unicode.c
+index 0227b45ef00a..15e9505aa35f 100644
+--- a/fs/cifs/cifs_unicode.c
++++ b/fs/cifs/cifs_unicode.c
+@@ -290,7 +290,8 @@ int
+ cifsConvertToUTF16(__le16 *target, const char *source, int srclen,
+ const struct nls_table *cp, int mapChars)
+ {
+- int i, j, charlen;
++ int i, charlen;
++ int j = 0;
+ char src_char;
+ __le16 dst_char;
+ wchar_t tmp;
+@@ -298,12 +299,11 @@ cifsConvertToUTF16(__le16 *target, const char *source, int srclen,
+ if (!mapChars)
+ return cifs_strtoUTF16(target, source, PATH_MAX, cp);
+
+- for (i = 0, j = 0; i < srclen; j++) {
++ for (i = 0; i < srclen; j++) {
+ src_char = source[i];
+ charlen = 1;
+ switch (src_char) {
+ case 0:
+- put_unaligned(0, &target[j]);
+ goto ctoUTF16_out;
+ case ':':
+ dst_char = cpu_to_le16(UNI_COLON);
+@@ -350,6 +350,7 @@ cifsConvertToUTF16(__le16 *target, const char *source, int srclen,
+ }
+
+ ctoUTF16_out:
++ put_unaligned(0, &target[j]); /* Null terminate target unicode string */
+ return j;
+ }
+
+diff --git a/fs/cifs/link.c b/fs/cifs/link.c
+index 264ece71bdb2..68559fd557fb 100644
+--- a/fs/cifs/link.c
++++ b/fs/cifs/link.c
+@@ -374,7 +374,7 @@ cifs_create_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
+ oparms.cifs_sb = cifs_sb;
+ oparms.desired_access = GENERIC_WRITE;
+ oparms.create_options = create_options;
+- oparms.disposition = FILE_OPEN;
++ oparms.disposition = FILE_CREATE;
+ oparms.path = path;
+ oparms.fid = &fid;
+ oparms.reconnect = false;
+diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c
+index 594009f5f523..e6574d7b6642 100644
+--- a/fs/ext4/indirect.c
++++ b/fs/ext4/indirect.c
+@@ -389,7 +389,13 @@ static int ext4_alloc_branch(handle_t *handle, struct inode *inode,
+ return 0;
+ failed:
+ for (; i >= 0; i--) {
+- if (i != indirect_blks && branch[i].bh)
++ /*
++ * We want to ext4_forget() only freshly allocated indirect
++ * blocks. Buffer for new_blocks[i-1] is at branch[i].bh and
++ * buffer at branch[0].bh is indirect block / inode already
++ * existing before ext4_alloc_branch() was called.
++ */
++ if (i > 0 && i != indirect_blks && branch[i].bh)
+ ext4_forget(handle, 1, inode, branch[i].bh,
+ branch[i].bh->b_blocknr);
+ ext4_free_blocks(handle, inode, NULL, new_blocks[i],
+@@ -1312,16 +1318,24 @@ static int free_hole_blocks(handle_t *handle, struct inode *inode,
+ blk = *i_data;
+ if (level > 0) {
+ ext4_lblk_t first2;
++ ext4_lblk_t count2;
++
+ bh = sb_bread(inode->i_sb, le32_to_cpu(blk));
+ if (!bh) {
+ EXT4_ERROR_INODE_BLOCK(inode, le32_to_cpu(blk),
+ "Read failure");
+ return -EIO;
+ }
+- first2 = (first > offset) ? first - offset : 0;
++ if (first > offset) {
++ first2 = first - offset;
++ count2 = count;
++ } else {
++ first2 = 0;
++ count2 = count - (offset - first);
++ }
+ ret = free_hole_blocks(handle, inode, bh,
+ (__le32 *)bh->b_data, level - 1,
+- first2, count - offset,
++ first2, count2,
+ inode->i_sb->s_blocksize >> 2);
+ if (ret) {
+ brelse(bh);
+diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
+index 9a914e892bb1..f23a6ca37504 100644
+--- a/fs/nfsd/nfs4proc.c
++++ b/fs/nfsd/nfs4proc.c
+@@ -618,15 +618,6 @@ nfsd4_create(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+
+ switch (create->cr_type) {
+ case NF4LNK:
+- /* ugh! we have to null-terminate the linktext, or
+- * vfs_symlink() will choke. it is always safe to
+- * null-terminate by brute force, since at worst we
+- * will overwrite the first byte of the create namelen
+- * in the XDR buffer, which has already been extracted
+- * during XDR decode.
+- */
+- create->cr_linkname[create->cr_linklen] = 0;
+-
+ status = nfsd_symlink(rqstp, &cstate->current_fh,
+ create->cr_name, create->cr_namelen,
+ create->cr_linkname, create->cr_linklen,
+diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
+index bc11bf68ec7f..86573350350e 100644
+--- a/fs/nfsd/nfs4xdr.c
++++ b/fs/nfsd/nfs4xdr.c
+@@ -602,7 +602,18 @@ nfsd4_decode_create(struct nfsd4_compoundargs *argp, struct nfsd4_create *create
+ READ_BUF(4);
+ READ32(create->cr_linklen);
+ READ_BUF(create->cr_linklen);
+- SAVEMEM(create->cr_linkname, create->cr_linklen);
++ /*
++ * The VFS will want a null-terminated string, and
++ * null-terminating in place isn't safe since this might
++ * end on a page boundary:
++ */
++ create->cr_linkname =
++ kmalloc(create->cr_linklen + 1, GFP_KERNEL);
++ if (!create->cr_linkname)
++ return nfserr_jukebox;
++ memcpy(create->cr_linkname, p, create->cr_linklen);
++ create->cr_linkname[create->cr_linklen] = '\0';
++ defer_free(argp, kfree, create->cr_linkname);
+ break;
+ case NF4BLK:
+ case NF4CHR:
+diff --git a/include/uapi/linux/usb/functionfs.h b/include/uapi/linux/usb/functionfs.h
+index d6b01283f85c..29e9c7aa9c66 100644
+--- a/include/uapi/linux/usb/functionfs.h
++++ b/include/uapi/linux/usb/functionfs.h
+@@ -27,6 +27,13 @@ struct usb_endpoint_descriptor_no_audio {
+ __u8 bInterval;
+ } __attribute__((packed));
+
++/* Legacy format, deprecated as of 3.14. */
++struct usb_functionfs_descs_head {
++ __le32 magic;
++ __le32 length;
++ __le32 fs_count;
++ __le32 hs_count;
++} __attribute__((packed, deprecated));
+
+ /*
+ * All numbers must be in little endian order.
+diff --git a/kernel/auditsc.c b/kernel/auditsc.c
+index 37e621606807..619b58d3fcdf 100644
+--- a/kernel/auditsc.c
++++ b/kernel/auditsc.c
+@@ -1991,7 +1991,7 @@ static void audit_log_set_loginuid(kuid_t koldloginuid, kuid_t kloginuid,
+ if (!ab)
+ return;
+ audit_log_format(ab, "pid=%d uid=%u"
+- " old-auid=%u new-auid=%u old-ses=%u new-ses=%u"
++ " old-auid=%u auid=%u old-ses=%u ses=%u"
+ " res=%d",
+ current->pid, uid,
+ oldloginuid, loginuid, oldsessionid, sessionid,
+diff --git a/kernel/sysctl.c b/kernel/sysctl.c
+index aae21e842918..c1b26e176aa6 100644
+--- a/kernel/sysctl.c
++++ b/kernel/sysctl.c
+@@ -139,7 +139,6 @@ static unsigned long dirty_bytes_min = 2 * PAGE_SIZE;
+ /* this is needed for the proc_dointvec_minmax for [fs_]overflow UID and GID */
+ static int maxolduid = 65535;
+ static int minolduid;
+-static int min_percpu_pagelist_fract = 8;
+
+ static int ngroups_max = NGROUPS_MAX;
+ static const int cap_last_cap = CAP_LAST_CAP;
+@@ -1324,7 +1323,7 @@ static struct ctl_table vm_table[] = {
+ .maxlen = sizeof(percpu_pagelist_fraction),
+ .mode = 0644,
+ .proc_handler = percpu_pagelist_fraction_sysctl_handler,
+- .extra1 = &min_percpu_pagelist_fract,
++ .extra1 = &zero,
+ },
+ #ifdef CONFIG_MMU
+ {
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index f0831c22760c..fd21e601a891 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -1355,7 +1355,6 @@ void tracing_start(void)
+
+ arch_spin_unlock(&ftrace_max_lock);
+
+- ftrace_start();
+ out:
+ raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
+ }
+@@ -1402,7 +1401,6 @@ void tracing_stop(void)
+ struct ring_buffer *buffer;
+ unsigned long flags;
+
+- ftrace_stop();
+ raw_spin_lock_irqsave(&global_trace.start_lock, flags);
+ if (global_trace.stop_count++)
+ goto out;
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index 06a9bc0a3120..30dd6265a141 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -2338,6 +2338,31 @@ static void set_huge_ptep_writable(struct vm_area_struct *vma,
+ update_mmu_cache(vma, address, ptep);
+ }
+
++static int is_hugetlb_entry_migration(pte_t pte)
++{
++ swp_entry_t swp;
++
++ if (huge_pte_none(pte) || pte_present(pte))
++ return 0;
++ swp = pte_to_swp_entry(pte);
++ if (non_swap_entry(swp) && is_migration_entry(swp))
++ return 1;
++ else
++ return 0;
++}
++
++static int is_hugetlb_entry_hwpoisoned(pte_t pte)
++{
++ swp_entry_t swp;
++
++ if (huge_pte_none(pte) || pte_present(pte))
++ return 0;
++ swp = pte_to_swp_entry(pte);
++ if (non_swap_entry(swp) && is_hwpoison_entry(swp))
++ return 1;
++ else
++ return 0;
++}
+
+ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
+ struct vm_area_struct *vma)
+@@ -2377,10 +2402,26 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
+ dst_ptl = huge_pte_lock(h, dst, dst_pte);
+ src_ptl = huge_pte_lockptr(h, src, src_pte);
+ spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
+- if (!huge_pte_none(huge_ptep_get(src_pte))) {
++ entry = huge_ptep_get(src_pte);
++ if (huge_pte_none(entry)) { /* skip none entry */
++ ;
++ } else if (unlikely(is_hugetlb_entry_migration(entry) ||
++ is_hugetlb_entry_hwpoisoned(entry))) {
++ swp_entry_t swp_entry = pte_to_swp_entry(entry);
++
++ if (is_write_migration_entry(swp_entry) && cow) {
++ /*
++ * COW mappings require pages in both
++ * parent and child to be set to read.
++ */
++ make_migration_entry_read(&swp_entry);
++ entry = swp_entry_to_pte(swp_entry);
++ set_huge_pte_at(src, addr, src_pte, entry);
++ }
++ set_huge_pte_at(dst, addr, dst_pte, entry);
++ } else {
+ if (cow)
+ huge_ptep_set_wrprotect(src, addr, src_pte);
+- entry = huge_ptep_get(src_pte);
+ ptepage = pte_page(entry);
+ get_page(ptepage);
+ page_dup_rmap(ptepage);
+@@ -2396,32 +2437,6 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
+ return ret;
+ }
+
+-static int is_hugetlb_entry_migration(pte_t pte)
+-{
+- swp_entry_t swp;
+-
+- if (huge_pte_none(pte) || pte_present(pte))
+- return 0;
+- swp = pte_to_swp_entry(pte);
+- if (non_swap_entry(swp) && is_migration_entry(swp))
+- return 1;
+- else
+- return 0;
+-}
+-
+-static int is_hugetlb_entry_hwpoisoned(pte_t pte)
+-{
+- swp_entry_t swp;
+-
+- if (huge_pte_none(pte) || pte_present(pte))
+- return 0;
+- swp = pte_to_swp_entry(pte);
+- if (non_swap_entry(swp) && is_hwpoison_entry(swp))
+- return 1;
+- else
+- return 0;
+-}
+-
+ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
+ unsigned long start, unsigned long end,
+ struct page *ref_page)
+diff --git a/mm/memory.c b/mm/memory.c
+index 49e930f9ed46..2121d8b8db56 100644
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -3756,9 +3756,6 @@ static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+ }
+ }
+
+- /* THP should already have been handled */
+- BUG_ON(pmd_numa(*pmd));
+-
+ /*
+ * Use __pte_alloc instead of pte_alloc_map, because we can't
+ * run pte_offset_map on the pmd, if an huge pmd could
+diff --git a/mm/mempolicy.c b/mm/mempolicy.c
+index 56224d998c39..9c6288aea4f9 100644
+--- a/mm/mempolicy.c
++++ b/mm/mempolicy.c
+@@ -653,19 +653,18 @@ static unsigned long change_prot_numa(struct vm_area_struct *vma,
+ * @nodes and @flags,) it's isolated and queued to the pagelist which is
+ * passed via @private.)
+ */
+-static struct vm_area_struct *
++static int
+ queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
+ const nodemask_t *nodes, unsigned long flags, void *private)
+ {
+- int err;
+- struct vm_area_struct *first, *vma, *prev;
+-
++ int err = 0;
++ struct vm_area_struct *vma, *prev;
+
+- first = find_vma(mm, start);
+- if (!first)
+- return ERR_PTR(-EFAULT);
++ vma = find_vma(mm, start);
++ if (!vma)
++ return -EFAULT;
+ prev = NULL;
+- for (vma = first; vma && vma->vm_start < end; vma = vma->vm_next) {
++ for (; vma && vma->vm_start < end; vma = vma->vm_next) {
+ unsigned long endvma = vma->vm_end;
+
+ if (endvma > end)
+@@ -675,9 +674,9 @@ queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
+
+ if (!(flags & MPOL_MF_DISCONTIG_OK)) {
+ if (!vma->vm_next && vma->vm_end < end)
+- return ERR_PTR(-EFAULT);
++ return -EFAULT;
+ if (prev && prev->vm_end < vma->vm_start)
+- return ERR_PTR(-EFAULT);
++ return -EFAULT;
+ }
+
+ if (flags & MPOL_MF_LAZY) {
+@@ -691,15 +690,13 @@ queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
+
+ err = queue_pages_pgd_range(vma, start, endvma, nodes,
+ flags, private);
+- if (err) {
+- first = ERR_PTR(err);
++ if (err)
+ break;
+- }
+ }
+ next:
+ prev = vma;
+ }
+- return first;
++ return err;
+ }
+
+ /*
+@@ -1184,16 +1181,17 @@ out:
+
+ /*
+ * Allocate a new page for page migration based on vma policy.
+- * Start assuming that page is mapped by vma pointed to by @private.
++ * Start by assuming the page is mapped by the same vma as contains @start.
+ * Search forward from there, if not. N.B., this assumes that the
+ * list of pages handed to migrate_pages()--which is how we get here--
+ * is in virtual address order.
+ */
+-static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
++static struct page *new_page(struct page *page, unsigned long start, int **x)
+ {
+- struct vm_area_struct *vma = (struct vm_area_struct *)private;
++ struct vm_area_struct *vma;
+ unsigned long uninitialized_var(address);
+
++ vma = find_vma(current->mm, start);
+ while (vma) {
+ address = page_address_in_vma(page, vma);
+ if (address != -EFAULT)
+@@ -1223,7 +1221,7 @@ int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
+ return -ENOSYS;
+ }
+
+-static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
++static struct page *new_page(struct page *page, unsigned long start, int **x)
+ {
+ return NULL;
+ }
+@@ -1233,7 +1231,6 @@ static long do_mbind(unsigned long start, unsigned long len,
+ unsigned short mode, unsigned short mode_flags,
+ nodemask_t *nmask, unsigned long flags)
+ {
+- struct vm_area_struct *vma;
+ struct mm_struct *mm = current->mm;
+ struct mempolicy *new;
+ unsigned long end;
+@@ -1299,11 +1296,9 @@ static long do_mbind(unsigned long start, unsigned long len,
+ if (err)
+ goto mpol_out;
+
+- vma = queue_pages_range(mm, start, end, nmask,
++ err = queue_pages_range(mm, start, end, nmask,
+ flags | MPOL_MF_INVERT, &pagelist);
+-
+- err = PTR_ERR(vma); /* maybe ... */
+- if (!IS_ERR(vma))
++ if (!err)
+ err = mbind_range(mm, start, end, new);
+
+ if (!err) {
+@@ -1311,9 +1306,8 @@ static long do_mbind(unsigned long start, unsigned long len,
+
+ if (!list_empty(&pagelist)) {
+ WARN_ON_ONCE(flags & MPOL_MF_LAZY);
+- nr_failed = migrate_pages(&pagelist, new_vma_page,
+- (unsigned long)vma,
+- MIGRATE_SYNC, MR_MEMPOLICY_MBIND);
++ nr_failed = migrate_pages(&pagelist, new_page,
++ start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND);
+ if (nr_failed)
+ putback_movable_pages(&pagelist);
+ }
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index 4b5d4f6360d2..7e7f94755ab5 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -69,6 +69,7 @@
+
+ /* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */
+ static DEFINE_MUTEX(pcp_batch_high_lock);
++#define MIN_PERCPU_PAGELIST_FRACTION (8)
+
+ #ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
+ DEFINE_PER_CPU(int, numa_node);
+@@ -798,9 +799,21 @@ void __init init_cma_reserved_pageblock(struct page *page)
+ set_page_count(p, 0);
+ } while (++p, --i);
+
+- set_page_refcounted(page);
+ set_pageblock_migratetype(page, MIGRATE_CMA);
+- __free_pages(page, pageblock_order);
++
++ if (pageblock_order >= MAX_ORDER) {
++ i = pageblock_nr_pages;
++ p = page;
++ do {
++ set_page_refcounted(p);
++ __free_pages(p, MAX_ORDER - 1);
++ p += MAX_ORDER_NR_PAGES;
++ } while (i -= MAX_ORDER_NR_PAGES);
++ } else {
++ set_page_refcounted(page);
++ __free_pages(page, pageblock_order);
++ }
++
+ adjust_managed_page_count(page, pageblock_nr_pages);
+ }
+ #endif
+@@ -4106,7 +4119,7 @@ static void __meminit zone_init_free_lists(struct zone *zone)
+ memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY)
+ #endif
+
+-static int __meminit zone_batchsize(struct zone *zone)
++static int zone_batchsize(struct zone *zone)
+ {
+ #ifdef CONFIG_MMU
+ int batch;
+@@ -4222,8 +4235,8 @@ static void pageset_set_high(struct per_cpu_pageset *p,
+ pageset_update(&p->pcp, high, batch);
+ }
+
+-static void __meminit pageset_set_high_and_batch(struct zone *zone,
+- struct per_cpu_pageset *pcp)
++static void pageset_set_high_and_batch(struct zone *zone,
++ struct per_cpu_pageset *pcp)
+ {
+ if (percpu_pagelist_fraction)
+ pageset_set_high(pcp,
+@@ -5848,23 +5861,38 @@ int percpu_pagelist_fraction_sysctl_handler(ctl_table *table, int write,
+ void __user *buffer, size_t *length, loff_t *ppos)
+ {
+ struct zone *zone;
+- unsigned int cpu;
++ int old_percpu_pagelist_fraction;
+ int ret;
+
++ mutex_lock(&pcp_batch_high_lock);
++ old_percpu_pagelist_fraction = percpu_pagelist_fraction;
++
+ ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
+- if (!write || (ret < 0))
+- return ret;
++ if (!write || ret < 0)
++ goto out;
++
++ /* Sanity checking to avoid pcp imbalance */
++ if (percpu_pagelist_fraction &&
++ percpu_pagelist_fraction < MIN_PERCPU_PAGELIST_FRACTION) {
++ percpu_pagelist_fraction = old_percpu_pagelist_fraction;
++ ret = -EINVAL;
++ goto out;
++ }
++
++ /* No change? */
++ if (percpu_pagelist_fraction == old_percpu_pagelist_fraction)
++ goto out;
+
+- mutex_lock(&pcp_batch_high_lock);
+ for_each_populated_zone(zone) {
+- unsigned long high;
+- high = zone->managed_pages / percpu_pagelist_fraction;
++ unsigned int cpu;
++
+ for_each_possible_cpu(cpu)
+- pageset_set_high(per_cpu_ptr(zone->pageset, cpu),
+- high);
++ pageset_set_high_and_batch(zone,
++ per_cpu_ptr(zone->pageset, cpu));
+ }
++out:
+ mutex_unlock(&pcp_batch_high_lock);
+- return 0;
++ return ret;
+ }
+
+ int hashdist = HASHDIST_DEFAULT;
+diff --git a/mm/slab.c b/mm/slab.c
+index b264214c77ea..6dd8d5f3a3ac 100644
+--- a/mm/slab.c
++++ b/mm/slab.c
+@@ -375,6 +375,39 @@ static void **dbg_userword(struct kmem_cache *cachep, void *objp)
+
+ #endif
+
++#define OBJECT_FREE (0)
++#define OBJECT_ACTIVE (1)
++
++#ifdef CONFIG_DEBUG_SLAB_LEAK
++
++static void set_obj_status(struct page *page, int idx, int val)
++{
++ int freelist_size;
++ char *status;
++ struct kmem_cache *cachep = page->slab_cache;
++
++ freelist_size = cachep->num * sizeof(unsigned int);
++ status = (char *)page->freelist + freelist_size;
++ status[idx] = val;
++}
++
++static inline unsigned int get_obj_status(struct page *page, int idx)
++{
++ int freelist_size;
++ char *status;
++ struct kmem_cache *cachep = page->slab_cache;
++
++ freelist_size = cachep->num * sizeof(unsigned int);
++ status = (char *)page->freelist + freelist_size;
++
++ return status[idx];
++}
++
++#else
++static inline void set_obj_status(struct page *page, int idx, int val) {}
++
++#endif
++
+ /*
+ * Do not go above this order unless 0 objects fit into the slab or
+ * overridden on the command line.
+@@ -565,9 +598,18 @@ static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
+ return cachep->array[smp_processor_id()];
+ }
+
+-static size_t slab_mgmt_size(size_t nr_objs, size_t align)
++static size_t calculate_freelist_size(int nr_objs, size_t align)
+ {
+- return ALIGN(nr_objs * sizeof(unsigned int), align);
++ size_t freelist_size;
++
++ freelist_size = nr_objs * sizeof(unsigned int);
++ if (IS_ENABLED(CONFIG_DEBUG_SLAB_LEAK))
++ freelist_size += nr_objs * sizeof(char);
++
++ if (align)
++ freelist_size = ALIGN(freelist_size, align);
++
++ return freelist_size;
+ }
+
+ /*
+@@ -600,6 +642,10 @@ static void cache_estimate(unsigned long gfporder, size_t buffer_size,
+ nr_objs = slab_size / buffer_size;
+
+ } else {
++ int extra_space = 0;
++
++ if (IS_ENABLED(CONFIG_DEBUG_SLAB_LEAK))
++ extra_space = sizeof(char);
+ /*
+ * Ignore padding for the initial guess. The padding
+ * is at most @align-1 bytes, and @buffer_size is at
+@@ -608,17 +654,18 @@ static void cache_estimate(unsigned long gfporder, size_t buffer_size,
+ * into the memory allocation when taking the padding
+ * into account.
+ */
+- nr_objs = (slab_size) / (buffer_size + sizeof(unsigned int));
++ nr_objs = (slab_size) /
++ (buffer_size + sizeof(unsigned int) + extra_space);
+
+ /*
+ * This calculated number will be either the right
+ * amount, or one greater than what we want.
+ */
+- if (slab_mgmt_size(nr_objs, align) + nr_objs*buffer_size
+- > slab_size)
++ if (calculate_freelist_size(nr_objs, align) >
++ slab_size - nr_objs * buffer_size)
+ nr_objs--;
+
+- mgmt_size = slab_mgmt_size(nr_objs, align);
++ mgmt_size = calculate_freelist_size(nr_objs, align);
+ }
+ *num = nr_objs;
+ *left_over = slab_size - nr_objs*buffer_size - mgmt_size;
+@@ -2011,13 +2058,16 @@ static size_t calculate_slab_order(struct kmem_cache *cachep,
+ continue;
+
+ if (flags & CFLGS_OFF_SLAB) {
++ size_t freelist_size_per_obj = sizeof(unsigned int);
+ /*
+ * Max number of objs-per-slab for caches which
+ * use off-slab slabs. Needed to avoid a possible
+ * looping condition in cache_grow().
+ */
++ if (IS_ENABLED(CONFIG_DEBUG_SLAB_LEAK))
++ freelist_size_per_obj += sizeof(char);
+ offslab_limit = size;
+- offslab_limit /= sizeof(unsigned int);
++ offslab_limit /= freelist_size_per_obj;
+
+ if (num > offslab_limit)
+ break;
+@@ -2258,8 +2308,7 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
+ if (!cachep->num)
+ return -E2BIG;
+
+- freelist_size =
+- ALIGN(cachep->num * sizeof(unsigned int), cachep->align);
++ freelist_size = calculate_freelist_size(cachep->num, cachep->align);
+
+ /*
+ * If the slab has been placed off-slab, and we have enough space then
+@@ -2272,7 +2321,7 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
+
+ if (flags & CFLGS_OFF_SLAB) {
+ /* really off slab. No need for manual alignment */
+- freelist_size = cachep->num * sizeof(unsigned int);
++ freelist_size = calculate_freelist_size(cachep->num, 0);
+
+ #ifdef CONFIG_PAGE_POISONING
+ /* If we're going to use the generic kernel_map_pages()
+@@ -2589,6 +2638,7 @@ static void cache_init_objs(struct kmem_cache *cachep,
+ if (cachep->ctor)
+ cachep->ctor(objp);
+ #endif
++ set_obj_status(page, i, OBJECT_FREE);
+ slab_freelist(page)[i] = i;
+ }
+ }
+@@ -2797,6 +2847,7 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
+ BUG_ON(objnr >= cachep->num);
+ BUG_ON(objp != index_to_obj(cachep, page, objnr));
+
++ set_obj_status(page, objnr, OBJECT_FREE);
+ if (cachep->flags & SLAB_POISON) {
+ #ifdef CONFIG_DEBUG_PAGEALLOC
+ if ((cachep->size % PAGE_SIZE)==0 && OFF_SLAB(cachep)) {
+@@ -2930,6 +2981,8 @@ static inline void cache_alloc_debugcheck_before(struct kmem_cache *cachep,
+ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
+ gfp_t flags, void *objp, unsigned long caller)
+ {
++ struct page *page;
++
+ if (!objp)
+ return objp;
+ if (cachep->flags & SLAB_POISON) {
+@@ -2960,6 +3013,9 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
+ *dbg_redzone1(cachep, objp) = RED_ACTIVE;
+ *dbg_redzone2(cachep, objp) = RED_ACTIVE;
+ }
++
++ page = virt_to_head_page(objp);
++ set_obj_status(page, obj_to_index(cachep, page, objp), OBJECT_ACTIVE);
+ objp += obj_offset(cachep);
+ if (cachep->ctor && cachep->flags & SLAB_POISON)
+ cachep->ctor(objp);
+@@ -4201,21 +4257,12 @@ static void handle_slab(unsigned long *n, struct kmem_cache *c,
+ struct page *page)
+ {
+ void *p;
+- int i, j;
++ int i;
+
+ if (n[0] == n[1])
+ return;
+ for (i = 0, p = page->s_mem; i < c->num; i++, p += c->size) {
+- bool active = true;
+-
+- for (j = page->active; j < c->num; j++) {
+- /* Skip freed item */
+- if (slab_freelist(page)[j] == i) {
+- active = false;
+- break;
+- }
+- }
+- if (!active)
++ if (get_obj_status(page, i) != OBJECT_ACTIVE)
+ continue;
+
+ if (!add_caller(n, (unsigned long)*dbg_userword(c, p)))
+diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
+index 9a6bc9df5e81..1c6e950b9ae7 100644
+--- a/net/bluetooth/hci_conn.c
++++ b/net/bluetooth/hci_conn.c
+@@ -759,7 +759,7 @@ static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
+ /* If we're already encrypted set the REAUTH_PEND flag,
+ * otherwise set the ENCRYPT_PEND.
+ */
+- if (conn->key_type != 0xff)
++ if (conn->link_mode & HCI_LM_ENCRYPT)
+ set_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
+ else
+ set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
+diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
+index 66ada7794ed0..2e8c5765e5ea 100644
+--- a/net/bluetooth/hci_event.c
++++ b/net/bluetooth/hci_event.c
+@@ -48,6 +48,10 @@ static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
+ smp_mb__after_clear_bit(); /* wake_up_bit advises about this barrier */
+ wake_up_bit(&hdev->flags, HCI_INQUIRY);
+
++ hci_dev_lock(hdev);
++ hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
++ hci_dev_unlock(hdev);
++
+ hci_conn_check_pending(hdev);
+ }
+
+@@ -3177,8 +3181,11 @@ static void hci_user_confirm_request_evt(struct hci_dev *hdev,
+
+ /* If we're not the initiators request authorization to
+ * proceed from user space (mgmt_user_confirm with
+- * confirm_hint set to 1). */
+- if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
++ * confirm_hint set to 1). The exception is if neither
++ * side had MITM in which case we do auto-accept.
++ */
++ if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
++ (loc_mitm || rem_mitm)) {
+ BT_DBG("Confirming auto-accept as acceptor");
+ confirm_hint = 1;
+ goto confirm;
+diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
+index d4b7702d900f..27ae84154586 100644
+--- a/net/bluetooth/l2cap_sock.c
++++ b/net/bluetooth/l2cap_sock.c
+@@ -778,11 +778,6 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
+
+ /*change security for LE channels */
+ if (chan->scid == L2CAP_CID_ATT) {
+- if (!conn->hcon->out) {
+- err = -EINVAL;
+- break;
+- }
+-
+ if (smp_conn_security(conn->hcon, sec.level))
+ break;
+ sk->sk_state = BT_CONFIG;
+diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
+index a03ca3ca91bf..f1ac2a754acc 100644
+--- a/net/bluetooth/mgmt.c
++++ b/net/bluetooth/mgmt.c
+@@ -2826,8 +2826,13 @@ static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
+ }
+
+ if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
+- /* Continue with pairing via SMP */
++ /* Continue with pairing via SMP. The hdev lock must be
++ * released as SMP may try to recquire it for crypto
++ * purposes.
++ */
++ hci_dev_unlock(hdev);
+ err = smp_user_confirm_reply(conn, mgmt_op, passkey);
++ hci_dev_lock(hdev);
+
+ if (!err)
+ err = cmd_complete(sk, hdev->id, mgmt_op,
+diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
+index ebf80f3abd83..653ce5d9e6e0 100644
+--- a/net/mac80211/debugfs_netdev.c
++++ b/net/mac80211/debugfs_netdev.c
+@@ -34,8 +34,7 @@ static ssize_t ieee80211_if_read(
+ ssize_t ret = -EINVAL;
+
+ read_lock(&dev_base_lock);
+- if (sdata->dev->reg_state == NETREG_REGISTERED)
+- ret = (*format)(sdata, buf, sizeof(buf));
++ ret = (*format)(sdata, buf, sizeof(buf));
+ read_unlock(&dev_base_lock);
+
+ if (ret >= 0)
+@@ -62,8 +61,7 @@ static ssize_t ieee80211_if_write(
+
+ ret = -ENODEV;
+ rtnl_lock();
+- if (sdata->dev->reg_state == NETREG_REGISTERED)
+- ret = (*write)(sdata, buf, count);
++ ret = (*write)(sdata, buf, count);
+ rtnl_unlock();
+
+ return ret;
+diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
+index 2796a198728f..ea7013cb7e52 100644
+--- a/net/mac80211/ibss.c
++++ b/net/mac80211/ibss.c
+@@ -1655,6 +1655,7 @@ int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata,
+ sdata->u.ibss.control_port = params->control_port;
+ sdata->u.ibss.userspace_handles_dfs = params->userspace_handles_dfs;
+ sdata->u.ibss.basic_rates = params->basic_rates;
++ sdata->u.ibss.last_scan_completed = jiffies;
+
+ /* fix basic_rates if channel does not support these rates */
+ rate_flags = ieee80211_chandef_rate_flags(¶ms->chandef);
+diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
+index 137a192e64bc..bedaeecdab97 100644
+--- a/net/mac80211/sta_info.c
++++ b/net/mac80211/sta_info.c
+@@ -240,6 +240,7 @@ void sta_info_free(struct ieee80211_local *local, struct sta_info *sta)
+
+ sta_dbg(sta->sdata, "Destroyed STA %pM\n", sta->sta.addr);
+
++ kfree(rcu_dereference_raw(sta->sta.rates));
+ kfree(sta);
+ }
+
+diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c
+index 52ca952b802c..23247a0d2b69 100644
+--- a/net/netfilter/nf_nat_core.c
++++ b/net/netfilter/nf_nat_core.c
+@@ -517,6 +517,39 @@ static int nf_nat_proto_remove(struct nf_conn *i, void *data)
+ return i->status & IPS_NAT_MASK ? 1 : 0;
+ }
+
++static int nf_nat_proto_clean(struct nf_conn *ct, void *data)
++{
++ struct nf_conn_nat *nat = nfct_nat(ct);
++
++ if (nf_nat_proto_remove(ct, data))
++ return 1;
++
++ if (!nat || !nat->ct)
++ return 0;
++
++ /* This netns is being destroyed, and conntrack has nat null binding.
++ * Remove it from bysource hash, as the table will be freed soon.
++ *
++ * Else, when the conntrack is destoyed, nf_nat_cleanup_conntrack()
++ * will delete entry from already-freed table.
++ */
++ if (!del_timer(&ct->timeout))
++ return 1;
++
++ spin_lock_bh(&nf_nat_lock);
++ hlist_del_rcu(&nat->bysource);
++ ct->status &= ~IPS_NAT_DONE_MASK;
++ nat->ct = NULL;
++ spin_unlock_bh(&nf_nat_lock);
++
++ add_timer(&ct->timeout);
++
++ /* don't delete conntrack. Although that would make things a lot
++ * simpler, we'd end up flushing all conntracks on nat rmmod.
++ */
++ return 0;
++}
++
+ static void nf_nat_l4proto_clean(u8 l3proto, u8 l4proto)
+ {
+ struct nf_nat_proto_clean clean = {
+@@ -787,7 +820,7 @@ static void __net_exit nf_nat_net_exit(struct net *net)
+ {
+ struct nf_nat_proto_clean clean = {};
+
+- nf_ct_iterate_cleanup(net, &nf_nat_proto_remove, &clean, 0, 0);
++ nf_ct_iterate_cleanup(net, nf_nat_proto_clean, &clean, 0, 0);
+ synchronize_rcu();
+ nf_ct_free_hashtable(net->ct.nat_bysource, net->ct.nat_htable_size);
+ }
+diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
+index ca3256d6fde3..ede4b92710aa 100644
+--- a/sound/usb/pcm.c
++++ b/sound/usb/pcm.c
+@@ -1488,7 +1488,8 @@ static void retire_playback_urb(struct snd_usb_substream *subs,
+ * on two reads of a counter updated every ms.
+ */
+ if (abs(est_delay - subs->last_delay) * 1000 > runtime->rate * 2)
+- snd_printk(KERN_DEBUG "delay: estimated %d, actual %d\n",
++ dev_dbg_ratelimited(&subs->dev->dev,
++ "delay: estimated %d, actual %d\n",
+ est_delay, subs->last_delay);
+
+ if (!subs->running) {
+diff --git a/tools/usb/ffs-test.c b/tools/usb/ffs-test.c
+index fe1e66b6ef40..a87e99f37c52 100644
+--- a/tools/usb/ffs-test.c
++++ b/tools/usb/ffs-test.c
+@@ -116,8 +116,8 @@ static const struct {
+ .header = {
+ .magic = cpu_to_le32(FUNCTIONFS_DESCRIPTORS_MAGIC),
+ .length = cpu_to_le32(sizeof descriptors),
+- .fs_count = 3,
+- .hs_count = 3,
++ .fs_count = cpu_to_le32(3),
++ .hs_count = cpu_to_le32(3),
+ },
+ .fs_descs = {
+ .intf = {
next reply other threads:[~2014-07-09 23:09 UTC|newest]
Thread overview: 85+ messages / expand[flat|nested] mbox.gz Atom feed top
2014-07-09 23:09 Mike Pagano [this message]
-- strict thread matches above, loose matches on Subject: below --
2016-12-11 22:31 [gentoo-commits] proj/linux-patches:3.14 commit in: / Mike Pagano
2016-09-11 17:39 Mike Pagano
2016-09-09 19:22 Mike Pagano
2016-08-20 16:29 Mike Pagano
2016-08-17 12:18 Mike Pagano
2016-08-10 12:53 Mike Pagano
2016-07-27 19:15 Mike Pagano
2016-06-24 20:37 Mike Pagano
2016-06-08 11:21 Mike Pagano
2016-06-02 18:01 Mike Pagano
2016-05-19 12:38 Mike Pagano
2016-05-12 0:07 Mike Pagano
2016-05-04 23:46 Mike Pagano
2016-04-20 10:10 Mike Pagano
2016-04-12 19:01 Mike Pagano
2016-03-16 19:41 Mike Pagano
2016-03-10 0:49 Mike Pagano
2016-03-04 0:16 Mike Pagano
2016-02-25 23:29 Mike Pagano
2016-02-17 23:58 Mike Pagano
2016-01-31 21:34 Mike Pagano
2016-01-23 18:58 Mike Pagano
2016-01-20 15:13 Mike Pagano
2015-12-10 13:52 Mike Pagano
2015-11-10 0:05 Mike Pagano
2015-10-27 13:38 Mike Pagano
2015-10-23 19:40 Mike Pagano
2015-10-01 13:18 Mike Pagano
2015-09-21 17:37 Mike Pagano
2015-09-14 16:23 Mike Pagano
2015-08-17 16:37 Mike Pagano
2015-08-10 23:13 Mike Pagano
2015-08-03 22:33 Mike Pagano
2015-07-17 15:34 Mike Pagano
2015-07-10 23:40 Mike Pagano
2015-07-07 0:44 Mike Pagano
2015-06-30 14:34 Mike Pagano
2015-06-23 17:10 Mike Pagano
2015-06-06 21:34 Mike Pagano
2015-05-18 19:33 Mike Pagano
2015-05-13 19:23 Mike Pagano
2015-05-08 12:14 Mike Pagano
2015-04-29 17:04 Mike Pagano
2015-04-20 9:42 Mike Pagano
2015-04-14 9:50 Mike Pagano
2015-03-28 20:25 Mike Pagano
2015-03-26 20:52 Mike Pagano
2015-03-19 12:42 Mike Pagano
2015-03-07 14:45 Mike Pagano
2015-02-27 14:34 Mike Pagano
2015-02-14 21:11 Mike Pagano
2015-02-11 15:16 Mike Pagano
2015-02-07 1:28 Mike Pagano
2015-01-30 11:12 Mike Pagano
2015-01-28 22:16 Anthony G. Basile
2015-01-28 22:01 Anthony G. Basile
2015-01-17 0:55 Mike Pagano
2015-01-09 18:28 Mike Pagano
2015-01-09 16:18 Mike Pagano
2015-01-02 19:10 Mike Pagano
2014-12-16 20:29 Mike Pagano
2014-12-09 23:03 Mike Pagano
2014-11-23 12:07 Anthony G. Basile
2014-11-22 20:16 Mike Pagano
2014-11-15 0:32 Mike Pagano
2014-10-30 22:56 Mike Pagano
2014-10-30 22:42 Mike Pagano
2014-10-15 15:43 Mike Pagano
2014-10-09 23:03 Mike Pagano
2014-10-06 15:44 Mike Pagano
2014-09-17 19:59 Anthony G. Basile
2014-09-09 22:16 Vlastimil Babka
2014-08-19 11:44 Mike Pagano
2014-08-08 18:30 ` Mike Pagano
2014-08-14 12:44 Mike Pagano
2014-08-19 11:44 ` Mike Pagano
2014-08-02 0:19 Mike Pagano
2014-08-19 11:44 ` Mike Pagano
2014-07-28 19:17 Mike Pagano
2014-08-19 11:44 ` Mike Pagano
2014-07-18 12:05 Mike Pagano
2014-07-08 18:04 Mike Pagano
2014-07-01 12:08 Mike Pagano
2014-06-27 15:00 Mike Pagano
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1404947350.6d6d7bd9432a24acf0f69e3aae34bc211267fc63.mpagano@gentoo \
--to=mpagano@gentoo.org \
--cc=gentoo-commits@lists.gentoo.org \
--cc=gentoo-dev@lists.gentoo.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox