public inbox for gentoo-commits@lists.gentoo.org
 help / color / mirror / Atom feed
* [gentoo-commits] proj/linux-patches:4.13 commit in: /
@ 2017-11-18 20:04 Alice Ferrazzi
  0 siblings, 0 replies; 20+ messages in thread
From: Alice Ferrazzi @ 2017-11-18 20:04 UTC (permalink / raw
  To: gentoo-commits

commit:     b52e47c0c5ad60eb4e5ca0ad9658e41efe03911a
Author:     Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
AuthorDate: Sat Nov 18 20:04:21 2017 +0000
Commit:     Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
CommitDate: Sat Nov 18 20:04:21 2017 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=b52e47c0

linux kernel 4.13.14

 0000_README              |    4 +
 1013_linux-4.13.14.patch | 1678 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 1682 insertions(+)

diff --git a/0000_README b/0000_README
index 5edbdf0..8777a91 100644
--- a/0000_README
+++ b/0000_README
@@ -95,6 +95,10 @@ Patch:  1012_linux-4.13.13.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.13.13
 
+Patch:  1013_linux-4.13.14.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.13.14
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1013_linux-4.13.14.patch b/1013_linux-4.13.14.patch
new file mode 100644
index 0000000..c5443f7
--- /dev/null
+++ b/1013_linux-4.13.14.patch
@@ -0,0 +1,1678 @@
+diff --git a/Makefile b/Makefile
+index 1608a9b71381..4aabae365a6c 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 13
+-SUBLEVEL = 13
++SUBLEVEL = 14
+ EXTRAVERSION =
+ NAME = Fearless Coyote
+ 
+diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
+index a07ef3d6b3ec..2b478224532e 100644
+--- a/drivers/dma/dmatest.c
++++ b/drivers/dma/dmatest.c
+@@ -696,6 +696,7 @@ static int dmatest_func(void *data)
+ 			 * free it this time?" dancing.  For now, just
+ 			 * leave it dangling.
+ 			 */
++			WARN(1, "dmatest: Kernel stack may be corrupted!!\n");
+ 			dmaengine_unmap_put(um);
+ 			result("test timed out", total_tests, src_off, dst_off,
+ 			       len, 0);
+diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
+index 80d860cb0746..7a3b201d51df 100644
+--- a/drivers/edac/sb_edac.c
++++ b/drivers/edac/sb_edac.c
+@@ -455,6 +455,7 @@ static const struct pci_id_table pci_dev_descr_sbridge_table[] = {
+ static const struct pci_id_descr pci_dev_descr_ibridge[] = {
+ 		/* Processor Home Agent */
+ 	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0,        0, IMC0) },
++	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1,        1, IMC1) },
+ 
+ 		/* Memory controller */
+ 	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA,     0, IMC0) },
+@@ -465,7 +466,6 @@ static const struct pci_id_descr pci_dev_descr_ibridge[] = {
+ 	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD3,   0, IMC0) },
+ 
+ 		/* Optional, mode 2HA */
+-	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1,        1, IMC1) },
+ 	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TA,     1, IMC1) },
+ 	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_RAS,    1, IMC1) },
+ 	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD0,   1, IMC1) },
+@@ -2260,6 +2260,13 @@ static int sbridge_get_onedevice(struct pci_dev **prev,
+ next_imc:
+ 	sbridge_dev = get_sbridge_dev(bus, dev_descr->dom, multi_bus, sbridge_dev);
+ 	if (!sbridge_dev) {
++		/* If the HA1 wasn't found, don't create EDAC second memory controller */
++		if (dev_descr->dom == IMC1 && devno != 1) {
++			edac_dbg(0, "Skip IMC1: %04x:%04x (since HA1 was absent)\n",
++				 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
++			pci_dev_put(pdev);
++			return 0;
++		}
+ 
+ 		if (dev_descr->dom == SOCK)
+ 			goto out_imc;
+diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c
+index f4e8fbec6a94..b5304e264881 100644
+--- a/drivers/input/misc/ims-pcu.c
++++ b/drivers/input/misc/ims-pcu.c
+@@ -1635,13 +1635,25 @@ ims_pcu_get_cdc_union_desc(struct usb_interface *intf)
+ 		return NULL;
+ 	}
+ 
+-	while (buflen > 0) {
++	while (buflen >= sizeof(*union_desc)) {
+ 		union_desc = (struct usb_cdc_union_desc *)buf;
+ 
++		if (union_desc->bLength > buflen) {
++			dev_err(&intf->dev, "Too large descriptor\n");
++			return NULL;
++		}
++
+ 		if (union_desc->bDescriptorType == USB_DT_CS_INTERFACE &&
+ 		    union_desc->bDescriptorSubType == USB_CDC_UNION_TYPE) {
+ 			dev_dbg(&intf->dev, "Found union header\n");
+-			return union_desc;
++
++			if (union_desc->bLength >= sizeof(*union_desc))
++				return union_desc;
++
++			dev_err(&intf->dev,
++				"Union descriptor to short (%d vs %zd\n)",
++				union_desc->bLength, sizeof(*union_desc));
++			return NULL;
+ 		}
+ 
+ 		buflen -= union_desc->bLength;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+index 7f282e8f4e7f..dc7f952e341f 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+@@ -78,9 +78,11 @@ struct mlx5e_tc_flow {
+ };
+ 
+ struct mlx5e_tc_flow_parse_attr {
++	struct ip_tunnel_info tun_info;
+ 	struct mlx5_flow_spec spec;
+ 	int num_mod_hdr_actions;
+ 	void *mod_hdr_actions;
++	int mirred_ifindex;
+ };
+ 
+ enum {
+@@ -322,6 +324,12 @@ static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
+ static void mlx5e_detach_encap(struct mlx5e_priv *priv,
+ 			       struct mlx5e_tc_flow *flow);
+ 
++static int mlx5e_attach_encap(struct mlx5e_priv *priv,
++			      struct ip_tunnel_info *tun_info,
++			      struct net_device *mirred_dev,
++			      struct net_device **encap_dev,
++			      struct mlx5e_tc_flow *flow);
++
+ static struct mlx5_flow_handle *
+ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
+ 		      struct mlx5e_tc_flow_parse_attr *parse_attr,
+@@ -329,9 +337,27 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
+ {
+ 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ 	struct mlx5_esw_flow_attr *attr = flow->esw_attr;
+-	struct mlx5_flow_handle *rule;
++	struct net_device *out_dev, *encap_dev = NULL;
++	struct mlx5_flow_handle *rule = NULL;
++	struct mlx5e_rep_priv *rpriv;
++	struct mlx5e_priv *out_priv;
+ 	int err;
+ 
++	if (attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP) {
++		out_dev = __dev_get_by_index(dev_net(priv->netdev),
++					     attr->parse_attr->mirred_ifindex);
++		err = mlx5e_attach_encap(priv, &parse_attr->tun_info,
++					 out_dev, &encap_dev, flow);
++		if (err) {
++			rule = ERR_PTR(err);
++			if (err != -EAGAIN)
++				goto err_attach_encap;
++		}
++		out_priv = netdev_priv(encap_dev);
++		rpriv = out_priv->ppriv;
++		attr->out_rep = rpriv->rep;
++	}
++
+ 	err = mlx5_eswitch_add_vlan_action(esw, attr);
+ 	if (err) {
+ 		rule = ERR_PTR(err);
+@@ -347,10 +373,14 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
+ 		}
+ 	}
+ 
+-	rule = mlx5_eswitch_add_offloaded_rule(esw, &parse_attr->spec, attr);
+-	if (IS_ERR(rule))
+-		goto err_add_rule;
+-
++	/* we get here if (1) there's no error (rule being null) or when
++	 * (2) there's an encap action and we're on -EAGAIN (no valid neigh)
++	 */
++	if (rule != ERR_PTR(-EAGAIN)) {
++		rule = mlx5_eswitch_add_offloaded_rule(esw, &parse_attr->spec, attr);
++		if (IS_ERR(rule))
++			goto err_add_rule;
++	}
+ 	return rule;
+ 
+ err_add_rule:
+@@ -361,6 +391,7 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
+ err_add_vlan:
+ 	if (attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)
+ 		mlx5e_detach_encap(priv, flow);
++err_attach_encap:
+ 	return rule;
+ }
+ 
+@@ -389,6 +420,8 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
+ void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
+ 			      struct mlx5e_encap_entry *e)
+ {
++	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
++	struct mlx5_esw_flow_attr *esw_attr;
+ 	struct mlx5e_tc_flow *flow;
+ 	int err;
+ 
+@@ -404,10 +437,9 @@ void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
+ 	mlx5e_rep_queue_neigh_stats_work(priv);
+ 
+ 	list_for_each_entry(flow, &e->flows, encap) {
+-		flow->esw_attr->encap_id = e->encap_id;
+-		flow->rule = mlx5e_tc_add_fdb_flow(priv,
+-						   flow->esw_attr->parse_attr,
+-						   flow);
++		esw_attr = flow->esw_attr;
++		esw_attr->encap_id = e->encap_id;
++		flow->rule = mlx5_eswitch_add_offloaded_rule(esw, &esw_attr->parse_attr->spec, esw_attr);
+ 		if (IS_ERR(flow->rule)) {
+ 			err = PTR_ERR(flow->rule);
+ 			mlx5_core_warn(priv->mdev, "Failed to update cached encapsulation flow, %d\n",
+@@ -421,15 +453,13 @@ void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
+ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
+ 			      struct mlx5e_encap_entry *e)
+ {
++	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ 	struct mlx5e_tc_flow *flow;
+-	struct mlx5_fc *counter;
+ 
+ 	list_for_each_entry(flow, &e->flows, encap) {
+ 		if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
+ 			flow->flags &= ~MLX5E_TC_FLOW_OFFLOADED;
+-			counter = mlx5_flow_rule_counter(flow->rule);
+-			mlx5_del_flow_rules(flow->rule);
+-			mlx5_fc_destroy(priv->mdev, counter);
++			mlx5_eswitch_del_offloaded_rule(esw, flow->rule, flow->esw_attr);
+ 		}
+ 	}
+ 
+@@ -1871,7 +1901,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
+ 
+ 		if (is_tcf_mirred_egress_redirect(a)) {
+ 			int ifindex = tcf_mirred_ifindex(a);
+-			struct net_device *out_dev, *encap_dev = NULL;
++			struct net_device *out_dev;
+ 			struct mlx5e_priv *out_priv;
+ 
+ 			out_dev = __dev_get_by_index(dev_net(priv->netdev), ifindex);
+@@ -1884,17 +1914,13 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
+ 				rpriv = out_priv->ppriv;
+ 				attr->out_rep = rpriv->rep;
+ 			} else if (encap) {
+-				err = mlx5e_attach_encap(priv, info,
+-							 out_dev, &encap_dev, flow);
+-				if (err && err != -EAGAIN)
+-					return err;
++				parse_attr->mirred_ifindex = ifindex;
++				parse_attr->tun_info = *info;
++				attr->parse_attr = parse_attr;
+ 				attr->action |= MLX5_FLOW_CONTEXT_ACTION_ENCAP |
+ 					MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ 					MLX5_FLOW_CONTEXT_ACTION_COUNT;
+-				out_priv = netdev_priv(encap_dev);
+-				rpriv = out_priv->ppriv;
+-				attr->out_rep = rpriv->rep;
+-				attr->parse_attr = parse_attr;
++				/* attr->out_rep is resolved when we handle encap */
+ 			} else {
+ 				pr_err("devices %s %s not on same switch HW, can't offload forwarding\n",
+ 				       priv->netdev->name, out_dev->name);
+@@ -1972,7 +1998,7 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
+ 	if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
+ 		err = parse_tc_fdb_actions(priv, f->exts, parse_attr, flow);
+ 		if (err < 0)
+-			goto err_handle_encap_flow;
++			goto err_free;
+ 		flow->rule = mlx5e_tc_add_fdb_flow(priv, parse_attr, flow);
+ 	} else {
+ 		err = parse_tc_nic_actions(priv, f->exts, parse_attr, flow);
+@@ -1983,10 +2009,13 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
+ 
+ 	if (IS_ERR(flow->rule)) {
+ 		err = PTR_ERR(flow->rule);
+-		goto err_free;
++		if (err != -EAGAIN)
++			goto err_free;
+ 	}
+ 
+-	flow->flags |= MLX5E_TC_FLOW_OFFLOADED;
++	if (err != -EAGAIN)
++		flow->flags |= MLX5E_TC_FLOW_OFFLOADED;
++
+ 	err = rhashtable_insert_fast(&tc->ht, &flow->node,
+ 				     tc->ht_params);
+ 	if (err)
+@@ -2000,16 +2029,6 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
+ err_del_rule:
+ 	mlx5e_tc_del_flow(priv, flow);
+ 
+-err_handle_encap_flow:
+-	if (err == -EAGAIN) {
+-		err = rhashtable_insert_fast(&tc->ht, &flow->node,
+-					     tc->ht_params);
+-		if (err)
+-			mlx5e_tc_del_flow(priv, flow);
+-		else
+-			return 0;
+-	}
+-
+ err_free:
+ 	kvfree(parse_attr);
+ 	kfree(flow);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
+index 4b6b03d6297f..8ccb68a49c65 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
+@@ -356,10 +356,11 @@ void mlx5_drain_health_wq(struct mlx5_core_dev *dev)
+ void mlx5_drain_health_recovery(struct mlx5_core_dev *dev)
+ {
+ 	struct mlx5_core_health *health = &dev->priv.health;
++	unsigned long flags;
+ 
+-	spin_lock(&health->wq_lock);
++	spin_lock_irqsave(&health->wq_lock, flags);
+ 	set_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags);
+-	spin_unlock(&health->wq_lock);
++	spin_unlock_irqrestore(&health->wq_lock, flags);
+ 	cancel_delayed_work_sync(&dev->priv.health.recover_work);
+ }
+ 
+diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
+index 2bbda71818ad..e0a7176e1d39 100644
+--- a/drivers/net/geneve.c
++++ b/drivers/net/geneve.c
+@@ -113,13 +113,7 @@ static void tunnel_id_to_vni(__be64 tun_id, __u8 *vni)
+ 
+ static bool eq_tun_id_and_vni(u8 *tun_id, u8 *vni)
+ {
+-#ifdef __BIG_ENDIAN
+-	return (vni[0] == tun_id[2]) &&
+-	       (vni[1] == tun_id[1]) &&
+-	       (vni[2] == tun_id[0]);
+-#else
+ 	return !memcmp(vni, &tun_id[5], 3);
+-#endif
+ }
+ 
+ static sa_family_t geneve_get_sk_family(struct geneve_sock *gs)
+diff --git a/drivers/net/ipvlan/ipvtap.c b/drivers/net/ipvlan/ipvtap.c
+index 22f133ea8d7b..2bdd71c4fcd1 100644
+--- a/drivers/net/ipvlan/ipvtap.c
++++ b/drivers/net/ipvlan/ipvtap.c
+@@ -197,8 +197,8 @@ static int ipvtap_init(void)
+ {
+ 	int err;
+ 
+-	err = tap_create_cdev(&ipvtap_cdev, &ipvtap_major, "ipvtap");
+-
++	err = tap_create_cdev(&ipvtap_cdev, &ipvtap_major, "ipvtap",
++			      THIS_MODULE);
+ 	if (err)
+ 		goto out1;
+ 
+diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
+index 98e4deaa3a6a..5ab1b8849c30 100644
+--- a/drivers/net/macsec.c
++++ b/drivers/net/macsec.c
+@@ -742,6 +742,7 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
+ 	sg_init_table(sg, ret);
+ 	ret = skb_to_sgvec(skb, sg, 0, skb->len);
+ 	if (unlikely(ret < 0)) {
++		aead_request_free(req);
+ 		macsec_txsa_put(tx_sa);
+ 		kfree_skb(skb);
+ 		return ERR_PTR(ret);
+@@ -954,6 +955,7 @@ static struct sk_buff *macsec_decrypt(struct sk_buff *skb,
+ 	sg_init_table(sg, ret);
+ 	ret = skb_to_sgvec(skb, sg, 0, skb->len);
+ 	if (unlikely(ret < 0)) {
++		aead_request_free(req);
+ 		kfree_skb(skb);
+ 		return ERR_PTR(ret);
+ 	}
+diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
+index 91e7b19bbf86..a84691bd75a7 100644
+--- a/drivers/net/macvtap.c
++++ b/drivers/net/macvtap.c
+@@ -204,8 +204,8 @@ static int macvtap_init(void)
+ {
+ 	int err;
+ 
+-	err = tap_create_cdev(&macvtap_cdev, &macvtap_major, "macvtap");
+-
++	err = tap_create_cdev(&macvtap_cdev, &macvtap_major, "macvtap",
++			      THIS_MODULE);
+ 	if (err)
+ 		goto out1;
+ 
+diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
+index a404552555d4..6722b8354618 100644
+--- a/drivers/net/ppp/ppp_generic.c
++++ b/drivers/net/ppp/ppp_generic.c
+@@ -1339,7 +1339,17 @@ ppp_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats64)
+ 
+ static int ppp_dev_init(struct net_device *dev)
+ {
++	struct ppp *ppp;
++
+ 	netdev_lockdep_set_classes(dev);
++
++	ppp = netdev_priv(dev);
++	/* Let the netdevice take a reference on the ppp file. This ensures
++	 * that ppp_destroy_interface() won't run before the device gets
++	 * unregistered.
++	 */
++	atomic_inc(&ppp->file.refcnt);
++
+ 	return 0;
+ }
+ 
+@@ -1362,6 +1372,15 @@ static void ppp_dev_uninit(struct net_device *dev)
+ 	wake_up_interruptible(&ppp->file.rwait);
+ }
+ 
++static void ppp_dev_priv_destructor(struct net_device *dev)
++{
++	struct ppp *ppp;
++
++	ppp = netdev_priv(dev);
++	if (atomic_dec_and_test(&ppp->file.refcnt))
++		ppp_destroy_interface(ppp);
++}
++
+ static const struct net_device_ops ppp_netdev_ops = {
+ 	.ndo_init	 = ppp_dev_init,
+ 	.ndo_uninit      = ppp_dev_uninit,
+@@ -1387,6 +1406,7 @@ static void ppp_setup(struct net_device *dev)
+ 	dev->tx_queue_len = 3;
+ 	dev->type = ARPHRD_PPP;
+ 	dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
++	dev->priv_destructor = ppp_dev_priv_destructor;
+ 	netif_keep_dst(dev);
+ }
+ 
+diff --git a/drivers/net/tap.c b/drivers/net/tap.c
+index 3570c7576993..38edfcfd3c3a 100644
+--- a/drivers/net/tap.c
++++ b/drivers/net/tap.c
+@@ -517,6 +517,10 @@ static int tap_open(struct inode *inode, struct file *file)
+ 					     &tap_proto, 0);
+ 	if (!q)
+ 		goto err;
++	if (skb_array_init(&q->skb_array, tap->dev->tx_queue_len, GFP_KERNEL)) {
++		sk_free(&q->sk);
++		goto err;
++	}
+ 
+ 	RCU_INIT_POINTER(q->sock.wq, &q->wq);
+ 	init_waitqueue_head(&q->wq.wait);
+@@ -540,22 +544,18 @@ static int tap_open(struct inode *inode, struct file *file)
+ 	if ((tap->dev->features & NETIF_F_HIGHDMA) && (tap->dev->features & NETIF_F_SG))
+ 		sock_set_flag(&q->sk, SOCK_ZEROCOPY);
+ 
+-	err = -ENOMEM;
+-	if (skb_array_init(&q->skb_array, tap->dev->tx_queue_len, GFP_KERNEL))
+-		goto err_array;
+-
+ 	err = tap_set_queue(tap, file, q);
+-	if (err)
+-		goto err_queue;
++	if (err) {
++		/* tap_sock_destruct() will take care of freeing skb_array */
++		goto err_put;
++	}
+ 
+ 	dev_put(tap->dev);
+ 
+ 	rtnl_unlock();
+ 	return err;
+ 
+-err_queue:
+-	skb_array_cleanup(&q->skb_array);
+-err_array:
++err_put:
+ 	sock_put(&q->sk);
+ err:
+ 	if (tap)
+@@ -1035,6 +1035,8 @@ static long tap_ioctl(struct file *file, unsigned int cmd,
+ 	case TUNSETSNDBUF:
+ 		if (get_user(s, sp))
+ 			return -EFAULT;
++		if (s <= 0)
++			return -EINVAL;
+ 
+ 		q->sk.sk_sndbuf = s;
+ 		return 0;
+@@ -1252,8 +1254,8 @@ static int tap_list_add(dev_t major, const char *device_name)
+ 	return 0;
+ }
+ 
+-int tap_create_cdev(struct cdev *tap_cdev,
+-		    dev_t *tap_major, const char *device_name)
++int tap_create_cdev(struct cdev *tap_cdev, dev_t *tap_major,
++		    const char *device_name, struct module *module)
+ {
+ 	int err;
+ 
+@@ -1262,6 +1264,7 @@ int tap_create_cdev(struct cdev *tap_cdev,
+ 		goto out1;
+ 
+ 	cdev_init(tap_cdev, &tap_fops);
++	tap_cdev->owner = module;
+ 	err = cdev_add(tap_cdev, *tap_major, TAP_NUM_DEVS);
+ 	if (err)
+ 		goto out2;
+diff --git a/drivers/net/tun.c b/drivers/net/tun.c
+index cb1f7747adad..d1cb1ff83251 100644
+--- a/drivers/net/tun.c
++++ b/drivers/net/tun.c
+@@ -1813,6 +1813,9 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
+ 
+ 		if (!dev)
+ 			return -ENOMEM;
++		err = dev_get_valid_name(net, dev, name);
++		if (err < 0)
++			goto err_free_dev;
+ 
+ 		dev_net_set(dev, net);
+ 		dev->rtnl_link_ops = &tun_link_ops;
+@@ -2216,6 +2219,10 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
+ 			ret = -EFAULT;
+ 			break;
+ 		}
++		if (sndbuf <= 0) {
++			ret = -EINVAL;
++			break;
++		}
+ 
+ 		tun->sndbuf = sndbuf;
+ 		tun_set_sndbuf(tun);
+diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c
+index eee82ca55b7b..cf4f5fff3e50 100644
+--- a/drivers/usb/misc/usbtest.c
++++ b/drivers/usb/misc/usbtest.c
+@@ -202,12 +202,13 @@ get_endpoints(struct usbtest_dev *dev, struct usb_interface *intf)
+ 			return tmp;
+ 	}
+ 
+-	if (in) {
++	if (in)
+ 		dev->in_pipe = usb_rcvbulkpipe(udev,
+ 			in->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
++	if (out)
+ 		dev->out_pipe = usb_sndbulkpipe(udev,
+ 			out->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
+-	}
++
+ 	if (iso_in) {
+ 		dev->iso_in = &iso_in->desc;
+ 		dev->in_iso_pipe = usb_rcvisocpipe(udev,
+diff --git a/include/linux/if_tap.h b/include/linux/if_tap.h
+index 4837157da0dc..9ae41cdd0d4c 100644
+--- a/include/linux/if_tap.h
++++ b/include/linux/if_tap.h
+@@ -73,8 +73,8 @@ void tap_del_queues(struct tap_dev *tap);
+ int tap_get_minor(dev_t major, struct tap_dev *tap);
+ void tap_free_minor(dev_t major, struct tap_dev *tap);
+ int tap_queue_resize(struct tap_dev *tap);
+-int tap_create_cdev(struct cdev *tap_cdev,
+-		    dev_t *tap_major, const char *device_name);
++int tap_create_cdev(struct cdev *tap_cdev, dev_t *tap_major,
++		    const char *device_name, struct module *module);
+ void tap_destroy_cdev(dev_t major, struct cdev *tap_cdev);
+ 
+ #endif /*_LINUX_IF_TAP_H_*/
+diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
+index c99ba7914c0a..a6d3c8b6cb93 100644
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -3702,6 +3702,9 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
+ 				    unsigned char name_assign_type,
+ 				    void (*setup)(struct net_device *),
+ 				    unsigned int txqs, unsigned int rxqs);
++int dev_get_valid_name(struct net *net, struct net_device *dev,
++		       const char *name);
++
+ #define alloc_netdev(sizeof_priv, name, name_assign_type, setup) \
+ 	alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, 1, 1)
+ 
+diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
+index aa95053dfc78..db8162dd8c0b 100644
+--- a/include/net/inet_sock.h
++++ b/include/net/inet_sock.h
+@@ -96,7 +96,7 @@ struct inet_request_sock {
+ 	kmemcheck_bitfield_end(flags);
+ 	u32                     ir_mark;
+ 	union {
+-		struct ip_options_rcu	*opt;
++		struct ip_options_rcu __rcu	*ireq_opt;
+ #if IS_ENABLED(CONFIG_IPV6)
+ 		struct {
+ 			struct ipv6_txoptions	*ipv6_opt;
+@@ -132,6 +132,12 @@ static inline int inet_request_bound_dev_if(const struct sock *sk,
+ 	return sk->sk_bound_dev_if;
+ }
+ 
++static inline struct ip_options_rcu *ireq_opt_deref(const struct inet_request_sock *ireq)
++{
++	return rcu_dereference_check(ireq->ireq_opt,
++				     refcount_read(&ireq->req.rsk_refcnt) > 0);
++}
++
+ struct inet_cork {
+ 	unsigned int		flags;
+ 	__be32			addr;
+diff --git a/include/net/tcp.h b/include/net/tcp.h
+index 48978125947b..150c2c66897a 100644
+--- a/include/net/tcp.h
++++ b/include/net/tcp.h
+@@ -1750,12 +1750,12 @@ static inline void tcp_highest_sack_reset(struct sock *sk)
+ 	tcp_sk(sk)->highest_sack = tcp_write_queue_head(sk);
+ }
+ 
+-/* Called when old skb is about to be deleted (to be combined with new skb) */
+-static inline void tcp_highest_sack_combine(struct sock *sk,
++/* Called when old skb is about to be deleted and replaced by new skb */
++static inline void tcp_highest_sack_replace(struct sock *sk,
+ 					    struct sk_buff *old,
+ 					    struct sk_buff *new)
+ {
+-	if (tcp_sk(sk)->sacked_out && (old == tcp_sk(sk)->highest_sack))
++	if (old == tcp_highest_sack(sk))
+ 		tcp_sk(sk)->highest_sack = new;
+ }
+ 
+diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
+index 3bc890716c89..de2152730809 100644
+--- a/net/bridge/br_netlink.c
++++ b/net/bridge/br_netlink.c
+@@ -573,7 +573,7 @@ static int br_process_vlan_info(struct net_bridge *br,
+ 		}
+ 		*vinfo_last = NULL;
+ 
+-		return 0;
++		return err;
+ 	}
+ 
+ 	return br_vlan_info(br, p, cmd, vinfo_curr);
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 6fa30a4c60ef..4f9ec923d21b 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -1146,9 +1146,8 @@ static int dev_alloc_name_ns(struct net *net,
+ 	return ret;
+ }
+ 
+-static int dev_get_valid_name(struct net *net,
+-			      struct net_device *dev,
+-			      const char *name)
++int dev_get_valid_name(struct net *net, struct net_device *dev,
++		       const char *name)
+ {
+ 	BUG_ON(!net);
+ 
+@@ -1164,6 +1163,7 @@ static int dev_get_valid_name(struct net *net,
+ 
+ 	return 0;
+ }
++EXPORT_SYMBOL(dev_get_valid_name);
+ 
+ /**
+  *	dev_change_name - change name of a device
+diff --git a/net/core/sock.c b/net/core/sock.c
+index 0967da925022..e48424ddbc6b 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -1674,6 +1674,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
+ 		newsk->sk_userlocks	= sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
+ 
+ 		sock_reset_flag(newsk, SOCK_DONE);
++		cgroup_sk_alloc(&newsk->sk_cgrp_data);
+ 
+ 		rcu_read_lock();
+ 		filter = rcu_dereference(sk->sk_filter);
+@@ -1706,8 +1707,6 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
+ 		atomic64_set(&newsk->sk_cookie, 0);
+ 
+ 		mem_cgroup_sk_alloc(newsk);
+-		cgroup_sk_alloc(&newsk->sk_cgrp_data);
+-
+ 		/*
+ 		 * Before updating sk_refcnt, we must commit prior changes to memory
+ 		 * (Documentation/RCU/rculist_nulls.txt for details)
+diff --git a/net/core/sock_reuseport.c b/net/core/sock_reuseport.c
+index eed1ebf7f29d..b1e0dbea1e8c 100644
+--- a/net/core/sock_reuseport.c
++++ b/net/core/sock_reuseport.c
+@@ -36,9 +36,14 @@ int reuseport_alloc(struct sock *sk)
+ 	 * soft irq of receive path or setsockopt from process context
+ 	 */
+ 	spin_lock_bh(&reuseport_lock);
+-	WARN_ONCE(rcu_dereference_protected(sk->sk_reuseport_cb,
+-					    lockdep_is_held(&reuseport_lock)),
+-		  "multiple allocations for the same socket");
++
++	/* Allocation attempts can occur concurrently via the setsockopt path
++	 * and the bind/hash path.  Nothing to do when we lose the race.
++	 */
++	if (rcu_dereference_protected(sk->sk_reuseport_cb,
++				      lockdep_is_held(&reuseport_lock)))
++		goto out;
++
+ 	reuse = __reuseport_alloc(INIT_SOCKS);
+ 	if (!reuse) {
+ 		spin_unlock_bh(&reuseport_lock);
+@@ -49,6 +54,7 @@ int reuseport_alloc(struct sock *sk)
+ 	reuse->num_socks = 1;
+ 	rcu_assign_pointer(sk->sk_reuseport_cb, reuse);
+ 
++out:
+ 	spin_unlock_bh(&reuseport_lock);
+ 
+ 	return 0;
+diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
+index 1b202f16531f..bc70d96d762a 100644
+--- a/net/dccp/ipv4.c
++++ b/net/dccp/ipv4.c
+@@ -414,8 +414,7 @@ struct sock *dccp_v4_request_recv_sock(const struct sock *sk,
+ 	sk_daddr_set(newsk, ireq->ir_rmt_addr);
+ 	sk_rcv_saddr_set(newsk, ireq->ir_loc_addr);
+ 	newinet->inet_saddr	= ireq->ir_loc_addr;
+-	newinet->inet_opt	= ireq->opt;
+-	ireq->opt	   = NULL;
++	RCU_INIT_POINTER(newinet->inet_opt, rcu_dereference(ireq->ireq_opt));
+ 	newinet->mc_index  = inet_iif(skb);
+ 	newinet->mc_ttl	   = ip_hdr(skb)->ttl;
+ 	newinet->inet_id   = jiffies;
+@@ -430,7 +429,10 @@ struct sock *dccp_v4_request_recv_sock(const struct sock *sk,
+ 	if (__inet_inherit_port(sk, newsk) < 0)
+ 		goto put_and_exit;
+ 	*own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
+-
++	if (*own_req)
++		ireq->ireq_opt = NULL;
++	else
++		newinet->inet_opt = NULL;
+ 	return newsk;
+ 
+ exit_overflow:
+@@ -441,6 +443,7 @@ struct sock *dccp_v4_request_recv_sock(const struct sock *sk,
+ 	__NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENDROPS);
+ 	return NULL;
+ put_and_exit:
++	newinet->inet_opt = NULL;
+ 	inet_csk_prepare_forced_close(newsk);
+ 	dccp_done(newsk);
+ 	goto exit;
+@@ -492,7 +495,7 @@ static int dccp_v4_send_response(const struct sock *sk, struct request_sock *req
+ 							      ireq->ir_rmt_addr);
+ 		err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
+ 					    ireq->ir_rmt_addr,
+-					    ireq->opt);
++					    ireq_opt_deref(ireq));
+ 		err = net_xmit_eval(err);
+ 	}
+ 
+@@ -548,7 +551,7 @@ static void dccp_v4_ctl_send_reset(const struct sock *sk, struct sk_buff *rxskb)
+ static void dccp_v4_reqsk_destructor(struct request_sock *req)
+ {
+ 	dccp_feat_list_purge(&dccp_rsk(req)->dreq_featneg);
+-	kfree(inet_rsk(req)->opt);
++	kfree(rcu_dereference_protected(inet_rsk(req)->ireq_opt, 1));
+ }
+ 
+ void dccp_syn_ack_timeout(const struct request_sock *req)
+diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c
+index 20bc9c56fca0..278b48d70bd9 100644
+--- a/net/dsa/dsa2.c
++++ b/net/dsa/dsa2.c
+@@ -496,14 +496,15 @@ static int dsa_cpu_parse(struct dsa_port *port, u32 index,
+ 		if (!ethernet)
+ 			return -EINVAL;
+ 		ethernet_dev = of_find_net_device_by_node(ethernet);
++		if (!ethernet_dev)
++			return -EPROBE_DEFER;
+ 	} else {
+ 		ethernet_dev = dsa_dev_to_net_device(ds->cd->netdev[index]);
++		if (!ethernet_dev)
++			return -EPROBE_DEFER;
+ 		dev_put(ethernet_dev);
+ 	}
+ 
+-	if (!ethernet_dev)
+-		return -EPROBE_DEFER;
+-
+ 	if (!dst->cpu_dp) {
+ 		dst->cpu_dp = port;
+ 		dst->cpu_dp->netdev = ethernet_dev;
+diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
+index 2ae8f54cb321..82178cc69c96 100644
+--- a/net/ipv4/cipso_ipv4.c
++++ b/net/ipv4/cipso_ipv4.c
+@@ -1951,7 +1951,7 @@ int cipso_v4_req_setattr(struct request_sock *req,
+ 	buf = NULL;
+ 
+ 	req_inet = inet_rsk(req);
+-	opt = xchg(&req_inet->opt, opt);
++	opt = xchg((__force struct ip_options_rcu **)&req_inet->ireq_opt, opt);
+ 	if (opt)
+ 		kfree_rcu(opt, rcu);
+ 
+@@ -1973,11 +1973,13 @@ int cipso_v4_req_setattr(struct request_sock *req,
+  * values on failure.
+  *
+  */
+-static int cipso_v4_delopt(struct ip_options_rcu **opt_ptr)
++static int cipso_v4_delopt(struct ip_options_rcu __rcu **opt_ptr)
+ {
++	struct ip_options_rcu *opt = rcu_dereference_protected(*opt_ptr, 1);
+ 	int hdr_delta = 0;
+-	struct ip_options_rcu *opt = *opt_ptr;
+ 
++	if (!opt || opt->opt.cipso == 0)
++		return 0;
+ 	if (opt->opt.srr || opt->opt.rr || opt->opt.ts || opt->opt.router_alert) {
+ 		u8 cipso_len;
+ 		u8 cipso_off;
+@@ -2039,14 +2041,10 @@ static int cipso_v4_delopt(struct ip_options_rcu **opt_ptr)
+  */
+ void cipso_v4_sock_delattr(struct sock *sk)
+ {
+-	int hdr_delta;
+-	struct ip_options_rcu *opt;
+ 	struct inet_sock *sk_inet;
++	int hdr_delta;
+ 
+ 	sk_inet = inet_sk(sk);
+-	opt = rcu_dereference_protected(sk_inet->inet_opt, 1);
+-	if (!opt || opt->opt.cipso == 0)
+-		return;
+ 
+ 	hdr_delta = cipso_v4_delopt(&sk_inet->inet_opt);
+ 	if (sk_inet->is_icsk && hdr_delta > 0) {
+@@ -2066,15 +2064,7 @@ void cipso_v4_sock_delattr(struct sock *sk)
+  */
+ void cipso_v4_req_delattr(struct request_sock *req)
+ {
+-	struct ip_options_rcu *opt;
+-	struct inet_request_sock *req_inet;
+-
+-	req_inet = inet_rsk(req);
+-	opt = req_inet->opt;
+-	if (!opt || opt->opt.cipso == 0)
+-		return;
+-
+-	cipso_v4_delopt(&req_inet->opt);
++	cipso_v4_delopt(&inet_rsk(req)->ireq_opt);
+ }
+ 
+ /**
+diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c
+index d5cac99170b1..8c72034df28e 100644
+--- a/net/ipv4/gre_offload.c
++++ b/net/ipv4/gre_offload.c
+@@ -98,7 +98,7 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
+ 		greh = (struct gre_base_hdr *)skb_transport_header(skb);
+ 		pcsum = (__sum16 *)(greh + 1);
+ 
+-		if (gso_partial) {
++		if (gso_partial && skb_is_gso(skb)) {
+ 			unsigned int partial_adj;
+ 
+ 			/* Adjust checksum to account for the fact that
+diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
+index 4089c013cb03..4438990cf65e 100644
+--- a/net/ipv4/inet_connection_sock.c
++++ b/net/ipv4/inet_connection_sock.c
+@@ -537,9 +537,11 @@ struct dst_entry *inet_csk_route_req(const struct sock *sk,
+ {
+ 	const struct inet_request_sock *ireq = inet_rsk(req);
+ 	struct net *net = read_pnet(&ireq->ireq_net);
+-	struct ip_options_rcu *opt = ireq->opt;
++	struct ip_options_rcu *opt;
+ 	struct rtable *rt;
+ 
++	opt = ireq_opt_deref(ireq);
++
+ 	flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark,
+ 			   RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
+ 			   sk->sk_protocol, inet_sk_flowi_flags(sk),
+@@ -573,10 +575,9 @@ struct dst_entry *inet_csk_route_child_sock(const struct sock *sk,
+ 	struct flowi4 *fl4;
+ 	struct rtable *rt;
+ 
++	opt = rcu_dereference(ireq->ireq_opt);
+ 	fl4 = &newinet->cork.fl.u.ip4;
+ 
+-	rcu_read_lock();
+-	opt = rcu_dereference(newinet->inet_opt);
+ 	flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark,
+ 			   RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
+ 			   sk->sk_protocol, inet_sk_flowi_flags(sk),
+@@ -589,13 +590,11 @@ struct dst_entry *inet_csk_route_child_sock(const struct sock *sk,
+ 		goto no_route;
+ 	if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway)
+ 		goto route_err;
+-	rcu_read_unlock();
+ 	return &rt->dst;
+ 
+ route_err:
+ 	ip_rt_put(rt);
+ no_route:
+-	rcu_read_unlock();
+ 	__IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
+ 	return NULL;
+ }
+diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
+index 2e3389d614d1..3eeecee0b21f 100644
+--- a/net/ipv4/inet_hashtables.c
++++ b/net/ipv4/inet_hashtables.c
+@@ -449,10 +449,7 @@ static int inet_reuseport_add_sock(struct sock *sk,
+ 			return reuseport_add_sock(sk, sk2);
+ 	}
+ 
+-	/* Initial allocation may have already happened via setsockopt */
+-	if (!rcu_access_pointer(sk->sk_reuseport_cb))
+-		return reuseport_alloc(sk);
+-	return 0;
++	return reuseport_alloc(sk);
+ }
+ 
+ int __inet_hash(struct sock *sk, struct sock *osk)
+diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
+index fb1ad22b5e29..cdd627355ed1 100644
+--- a/net/ipv4/ipip.c
++++ b/net/ipv4/ipip.c
+@@ -128,43 +128,68 @@ static struct rtnl_link_ops ipip_link_ops __read_mostly;
+ 
+ static int ipip_err(struct sk_buff *skb, u32 info)
+ {
+-
+-/* All the routers (except for Linux) return only
+-   8 bytes of packet payload. It means, that precise relaying of
+-   ICMP in the real Internet is absolutely infeasible.
+- */
++	/* All the routers (except for Linux) return only
++	 * 8 bytes of packet payload. It means, that precise relaying of
++	 * ICMP in the real Internet is absolutely infeasible.
++	 */
+ 	struct net *net = dev_net(skb->dev);
+ 	struct ip_tunnel_net *itn = net_generic(net, ipip_net_id);
+ 	const struct iphdr *iph = (const struct iphdr *)skb->data;
+-	struct ip_tunnel *t;
+-	int err;
+ 	const int type = icmp_hdr(skb)->type;
+ 	const int code = icmp_hdr(skb)->code;
++	struct ip_tunnel *t;
++	int err = 0;
++
++	switch (type) {
++	case ICMP_DEST_UNREACH:
++		switch (code) {
++		case ICMP_SR_FAILED:
++			/* Impossible event. */
++			goto out;
++		default:
++			/* All others are translated to HOST_UNREACH.
++			 * rfc2003 contains "deep thoughts" about NET_UNREACH,
++			 * I believe they are just ether pollution. --ANK
++			 */
++			break;
++		}
++		break;
++
++	case ICMP_TIME_EXCEEDED:
++		if (code != ICMP_EXC_TTL)
++			goto out;
++		break;
++
++	case ICMP_REDIRECT:
++		break;
++
++	default:
++		goto out;
++	}
+ 
+-	err = -ENOENT;
+ 	t = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY,
+ 			     iph->daddr, iph->saddr, 0);
+-	if (!t)
++	if (!t) {
++		err = -ENOENT;
+ 		goto out;
++	}
+ 
+ 	if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
+-		ipv4_update_pmtu(skb, dev_net(skb->dev), info,
+-				 t->parms.link, 0, iph->protocol, 0);
+-		err = 0;
++		ipv4_update_pmtu(skb, net, info, t->parms.link, 0,
++				 iph->protocol, 0);
+ 		goto out;
+ 	}
+ 
+ 	if (type == ICMP_REDIRECT) {
+-		ipv4_redirect(skb, dev_net(skb->dev), t->parms.link, 0,
+-			      iph->protocol, 0);
+-		err = 0;
++		ipv4_redirect(skb, net, t->parms.link, 0, iph->protocol, 0);
+ 		goto out;
+ 	}
+ 
+-	if (t->parms.iph.daddr == 0)
++	if (t->parms.iph.daddr == 0) {
++		err = -ENOENT;
+ 		goto out;
++	}
+ 
+-	err = 0;
+ 	if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
+ 		goto out;
+ 
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c
+index c5aa25be7108..72afa4cfb022 100644
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -2507,7 +2507,7 @@ struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_or
+ 	struct rtable *ort = (struct rtable *) dst_orig;
+ 	struct rtable *rt;
+ 
+-	rt = dst_alloc(&ipv4_dst_blackhole_ops, NULL, 1, DST_OBSOLETE_NONE, 0);
++	rt = dst_alloc(&ipv4_dst_blackhole_ops, NULL, 1, DST_OBSOLETE_DEAD, 0);
+ 	if (rt) {
+ 		struct dst_entry *new = &rt->dst;
+ 
+diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
+index 03ad8778c395..0f914fda5bf3 100644
+--- a/net/ipv4/syncookies.c
++++ b/net/ipv4/syncookies.c
+@@ -355,7 +355,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb)
+ 	/* We throwed the options of the initial SYN away, so we hope
+ 	 * the ACK carries the same options again (see RFC1122 4.2.3.8)
+ 	 */
+-	ireq->opt = tcp_v4_save_options(skb);
++	RCU_INIT_POINTER(ireq->ireq_opt, tcp_v4_save_options(skb));
+ 
+ 	if (security_inet_conn_request(sk, skb, req)) {
+ 		reqsk_free(req);
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index bab7f0493098..e92e5dbcb3d6 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -6235,7 +6235,7 @@ struct request_sock *inet_reqsk_alloc(const struct request_sock_ops *ops,
+ 		struct inet_request_sock *ireq = inet_rsk(req);
+ 
+ 		kmemcheck_annotate_bitfield(ireq, flags);
+-		ireq->opt = NULL;
++		ireq->ireq_opt = NULL;
+ #if IS_ENABLED(CONFIG_IPV6)
+ 		ireq->pktopts = NULL;
+ #endif
+diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
+index b1441bc8192f..78835f681538 100644
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -878,7 +878,7 @@ static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
+ 
+ 		err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
+ 					    ireq->ir_rmt_addr,
+-					    ireq->opt);
++					    ireq_opt_deref(ireq));
+ 		err = net_xmit_eval(err);
+ 	}
+ 
+@@ -890,7 +890,7 @@ static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
+  */
+ static void tcp_v4_reqsk_destructor(struct request_sock *req)
+ {
+-	kfree(inet_rsk(req)->opt);
++	kfree(rcu_dereference_protected(inet_rsk(req)->ireq_opt, 1));
+ }
+ 
+ #ifdef CONFIG_TCP_MD5SIG
+@@ -1269,7 +1269,7 @@ static void tcp_v4_init_req(struct request_sock *req,
+ 
+ 	sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
+ 	sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
+-	ireq->opt = tcp_v4_save_options(skb);
++	RCU_INIT_POINTER(ireq->ireq_opt, tcp_v4_save_options(skb));
+ }
+ 
+ static struct dst_entry *tcp_v4_route_req(const struct sock *sk,
+@@ -1356,10 +1356,9 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
+ 	sk_daddr_set(newsk, ireq->ir_rmt_addr);
+ 	sk_rcv_saddr_set(newsk, ireq->ir_loc_addr);
+ 	newsk->sk_bound_dev_if = ireq->ir_iif;
+-	newinet->inet_saddr	      = ireq->ir_loc_addr;
+-	inet_opt	      = ireq->opt;
+-	rcu_assign_pointer(newinet->inet_opt, inet_opt);
+-	ireq->opt	      = NULL;
++	newinet->inet_saddr   = ireq->ir_loc_addr;
++	inet_opt	      = rcu_dereference(ireq->ireq_opt);
++	RCU_INIT_POINTER(newinet->inet_opt, inet_opt);
+ 	newinet->mc_index     = inet_iif(skb);
+ 	newinet->mc_ttl	      = ip_hdr(skb)->ttl;
+ 	newinet->rcv_tos      = ip_hdr(skb)->tos;
+@@ -1404,9 +1403,12 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
+ 	if (__inet_inherit_port(sk, newsk) < 0)
+ 		goto put_and_exit;
+ 	*own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
+-	if (*own_req)
++	if (likely(*own_req)) {
+ 		tcp_move_syn(newtp, req);
+-
++		ireq->ireq_opt = NULL;
++	} else {
++		newinet->inet_opt = NULL;
++	}
+ 	return newsk;
+ 
+ exit_overflow:
+@@ -1417,6 +1419,7 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
+ 	tcp_listendrop(sk);
+ 	return NULL;
+ put_and_exit:
++	newinet->inet_opt = NULL;
+ 	inet_csk_prepare_forced_close(newsk);
+ 	tcp_done(newsk);
+ 	goto exit;
+diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
+index 40f7c8ee9ba6..58587b0e2b5d 100644
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -2094,6 +2094,7 @@ static int tcp_mtu_probe(struct sock *sk)
+ 	nskb->ip_summed = skb->ip_summed;
+ 
+ 	tcp_insert_write_queue_before(nskb, skb, sk);
++	tcp_highest_sack_replace(sk, skb, nskb);
+ 
+ 	len = 0;
+ 	tcp_for_write_queue_from_safe(skb, next, sk) {
+@@ -2271,6 +2272,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
+ 
+ 	sent_pkts = 0;
+ 
++	tcp_mstamp_refresh(tp);
+ 	if (!push_one) {
+ 		/* Do MTU probing. */
+ 		result = tcp_mtu_probe(sk);
+@@ -2282,7 +2284,6 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
+ 	}
+ 
+ 	max_segs = tcp_tso_segs(sk, mss_now);
+-	tcp_mstamp_refresh(tp);
+ 	while ((skb = tcp_send_head(sk))) {
+ 		unsigned int limit;
+ 
+@@ -2694,7 +2695,7 @@ static bool tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb)
+ 		else if (!skb_shift(skb, next_skb, next_skb_size))
+ 			return false;
+ 	}
+-	tcp_highest_sack_combine(sk, next_skb, skb);
++	tcp_highest_sack_replace(sk, next_skb, skb);
+ 
+ 	tcp_unlink_write_queue(next_skb, sk);
+ 
+diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
+index 84861d71face..f9e1bcfb6e2d 100644
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -231,10 +231,7 @@ static int udp_reuseport_add_sock(struct sock *sk, struct udp_hslot *hslot)
+ 		}
+ 	}
+ 
+-	/* Initial allocation may have already happened via setsockopt */
+-	if (!rcu_access_pointer(sk->sk_reuseport_cb))
+-		return reuseport_alloc(sk);
+-	return 0;
++	return reuseport_alloc(sk);
+ }
+ 
+ /**
+diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
+index 0932c85b42af..6401574cd638 100644
+--- a/net/ipv4/udp_offload.c
++++ b/net/ipv4/udp_offload.c
+@@ -122,7 +122,7 @@ static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb,
+ 		 * will be using a length value equal to only one MSS sized
+ 		 * segment instead of the entire frame.
+ 		 */
+-		if (gso_partial) {
++		if (gso_partial && skb_is_gso(skb)) {
+ 			uh->len = htons(skb_shinfo(skb)->gso_size +
+ 					SKB_GSO_CB(skb)->data_offset +
+ 					skb->head - (unsigned char *)uh);
+diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
+index ba757c28a301..bdc93e51427d 100644
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -3367,6 +3367,7 @@ static void addrconf_permanent_addr(struct net_device *dev)
+ 		if ((ifp->flags & IFA_F_PERMANENT) &&
+ 		    fixup_permanent_addr(idev, ifp) < 0) {
+ 			write_unlock_bh(&idev->lock);
++			in6_ifa_hold(ifp);
+ 			ipv6_del_addr(ifp);
+ 			write_lock_bh(&idev->lock);
+ 
+diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
+index 8081bafe441b..15535ee327c5 100644
+--- a/net/ipv6/ip6_flowlabel.c
++++ b/net/ipv6/ip6_flowlabel.c
+@@ -315,6 +315,7 @@ struct ipv6_txoptions *fl6_merge_options(struct ipv6_txoptions *opt_space,
+ 	}
+ 	opt_space->dst1opt = fopt->dst1opt;
+ 	opt_space->opt_flen = fopt->opt_flen;
++	opt_space->tot_len = fopt->tot_len;
+ 	return opt_space;
+ }
+ EXPORT_SYMBOL_GPL(fl6_merge_options);
+diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
+index 1602b491b281..59c121b932ac 100644
+--- a/net/ipv6/ip6_gre.c
++++ b/net/ipv6/ip6_gre.c
+@@ -408,13 +408,16 @@ static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
+ 	case ICMPV6_DEST_UNREACH:
+ 		net_dbg_ratelimited("%s: Path to destination invalid or inactive!\n",
+ 				    t->parms.name);
+-		break;
++		if (code != ICMPV6_PORT_UNREACH)
++			break;
++		return;
+ 	case ICMPV6_TIME_EXCEED:
+ 		if (code == ICMPV6_EXC_HOPLIMIT) {
+ 			net_dbg_ratelimited("%s: Too small hop limit or routing loop in tunnel!\n",
+ 					    t->parms.name);
++			break;
+ 		}
+-		break;
++		return;
+ 	case ICMPV6_PARAMPROB:
+ 		teli = 0;
+ 		if (code == ICMPV6_HDR_FIELD)
+@@ -430,7 +433,7 @@ static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
+ 			net_dbg_ratelimited("%s: Recipient unable to parse tunneled packet!\n",
+ 					    t->parms.name);
+ 		}
+-		break;
++		return;
+ 	case ICMPV6_PKT_TOOBIG:
+ 		mtu = be32_to_cpu(info) - offset - t->tun_hlen;
+ 		if (t->dev->type == ARPHRD_ETHER)
+@@ -438,7 +441,7 @@ static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
+ 		if (mtu < IPV6_MIN_MTU)
+ 			mtu = IPV6_MIN_MTU;
+ 		t->dev->mtu = mtu;
+-		break;
++		return;
+ 	}
+ 
+ 	if (time_before(jiffies, t->err_time + IP6TUNNEL_ERR_TIMEO))
+@@ -500,8 +503,8 @@ static netdev_tx_t __gre6_xmit(struct sk_buff *skb,
+ 			       __u32 *pmtu, __be16 proto)
+ {
+ 	struct ip6_tnl *tunnel = netdev_priv(dev);
+-	__be16 protocol = (dev->type == ARPHRD_ETHER) ?
+-			  htons(ETH_P_TEB) : proto;
++	struct dst_entry *dst = skb_dst(skb);
++	__be16 protocol;
+ 
+ 	if (dev->type == ARPHRD_ETHER)
+ 		IPCB(skb)->flags = 0;
+@@ -515,9 +518,14 @@ static netdev_tx_t __gre6_xmit(struct sk_buff *skb,
+ 		tunnel->o_seqno++;
+ 
+ 	/* Push GRE header. */
++	protocol = (dev->type == ARPHRD_ETHER) ? htons(ETH_P_TEB) : proto;
+ 	gre_build_header(skb, tunnel->tun_hlen, tunnel->parms.o_flags,
+ 			 protocol, tunnel->parms.o_key, htonl(tunnel->o_seqno));
+ 
++	/* TooBig packet may have updated dst->dev's mtu */
++	if (dst && dst_mtu(dst) > dst->dev->mtu)
++		dst->ops->update_pmtu(dst, NULL, skb, dst->dev->mtu);
++
+ 	return ip6_tnl_xmit(skb, dev, dsfield, fl6, encap_limit, pmtu,
+ 			    NEXTHDR_GRE);
+ }
+diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
+index cdb3728faca7..4a87f9428ca5 100644
+--- a/net/ipv6/ip6_offload.c
++++ b/net/ipv6/ip6_offload.c
+@@ -105,7 +105,7 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
+ 
+ 	for (skb = segs; skb; skb = skb->next) {
+ 		ipv6h = (struct ipv6hdr *)(skb_mac_header(skb) + nhoff);
+-		if (gso_partial)
++		if (gso_partial && skb_is_gso(skb))
+ 			payload_len = skb_shinfo(skb)->gso_size +
+ 				      SKB_GSO_CB(skb)->data_offset +
+ 				      skb->head - (unsigned char *)(ipv6h + 1);
+diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
+index 2dfe50d8d609..3ce9ab29bd37 100644
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -1224,11 +1224,11 @@ static int ip6_setup_cork(struct sock *sk, struct inet_cork_full *cork,
+ 		if (WARN_ON(v6_cork->opt))
+ 			return -EINVAL;
+ 
+-		v6_cork->opt = kzalloc(opt->tot_len, sk->sk_allocation);
++		v6_cork->opt = kzalloc(sizeof(*opt), sk->sk_allocation);
+ 		if (unlikely(!v6_cork->opt))
+ 			return -ENOBUFS;
+ 
+-		v6_cork->opt->tot_len = opt->tot_len;
++		v6_cork->opt->tot_len = sizeof(*opt);
+ 		v6_cork->opt->opt_flen = opt->opt_flen;
+ 		v6_cork->opt->opt_nflen = opt->opt_nflen;
+ 
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index 2d0e7798c793..44eebe738c09 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -1251,7 +1251,7 @@ struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_ori
+ 	struct dst_entry *new = NULL;
+ 
+ 	rt = dst_alloc(&ip6_dst_blackhole_ops, loopback_dev, 1,
+-		       DST_OBSOLETE_NONE, 0);
++		       DST_OBSOLETE_DEAD, 0);
+ 	if (rt) {
+ 		rt6_info_init(rt);
+ 
+diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
+index f0edb7209079..412c513d69b3 100644
+--- a/net/l2tp/l2tp_ppp.c
++++ b/net/l2tp/l2tp_ppp.c
+@@ -584,6 +584,7 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
+ 	u32 tunnel_id, peer_tunnel_id;
+ 	u32 session_id, peer_session_id;
+ 	bool drop_refcnt = false;
++	bool drop_tunnel = false;
+ 	int ver = 2;
+ 	int fd;
+ 
+@@ -652,7 +653,9 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
+ 	if (tunnel_id == 0)
+ 		goto end;
+ 
+-	tunnel = l2tp_tunnel_find(sock_net(sk), tunnel_id);
++	tunnel = l2tp_tunnel_get(sock_net(sk), tunnel_id);
++	if (tunnel)
++		drop_tunnel = true;
+ 
+ 	/* Special case: create tunnel context if session_id and
+ 	 * peer_session_id is 0. Otherwise look up tunnel using supplied
+@@ -781,6 +784,8 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
+ end:
+ 	if (drop_refcnt)
+ 		l2tp_session_dec_refcount(session);
++	if (drop_tunnel)
++		l2tp_tunnel_dec_refcount(tunnel);
+ 	release_sock(sk);
+ 
+ 	return error;
+@@ -993,6 +998,9 @@ static int pppol2tp_session_ioctl(struct l2tp_session *session,
+ 		 session->name, cmd, arg);
+ 
+ 	sk = ps->sock;
++	if (!sk)
++		return -EBADR;
++
+ 	sock_hold(sk);
+ 
+ 	switch (cmd) {
+diff --git a/net/mac80211/key.c b/net/mac80211/key.c
+index a98fc2b5e0dc..938049395f90 100644
+--- a/net/mac80211/key.c
++++ b/net/mac80211/key.c
+@@ -4,7 +4,7 @@
+  * Copyright 2006-2007	Jiri Benc <jbenc@suse.cz>
+  * Copyright 2007-2008	Johannes Berg <johannes@sipsolutions.net>
+  * Copyright 2013-2014  Intel Mobile Communications GmbH
+- * Copyright 2015	Intel Deutschland GmbH
++ * Copyright 2015-2017	Intel Deutschland GmbH
+  *
+  * This program is free software; you can redistribute it and/or modify
+  * it under the terms of the GNU General Public License version 2 as
+@@ -19,6 +19,7 @@
+ #include <linux/slab.h>
+ #include <linux/export.h>
+ #include <net/mac80211.h>
++#include <crypto/algapi.h>
+ #include <asm/unaligned.h>
+ #include "ieee80211_i.h"
+ #include "driver-ops.h"
+@@ -609,6 +610,39 @@ void ieee80211_key_free_unused(struct ieee80211_key *key)
+ 	ieee80211_key_free_common(key);
+ }
+ 
++static bool ieee80211_key_identical(struct ieee80211_sub_if_data *sdata,
++				    struct ieee80211_key *old,
++				    struct ieee80211_key *new)
++{
++	u8 tkip_old[WLAN_KEY_LEN_TKIP], tkip_new[WLAN_KEY_LEN_TKIP];
++	u8 *tk_old, *tk_new;
++
++	if (!old || new->conf.keylen != old->conf.keylen)
++		return false;
++
++	tk_old = old->conf.key;
++	tk_new = new->conf.key;
++
++	/*
++	 * In station mode, don't compare the TX MIC key, as it's never used
++	 * and offloaded rekeying may not care to send it to the host. This
++	 * is the case in iwlwifi, for example.
++	 */
++	if (sdata->vif.type == NL80211_IFTYPE_STATION &&
++	    new->conf.cipher == WLAN_CIPHER_SUITE_TKIP &&
++	    new->conf.keylen == WLAN_KEY_LEN_TKIP &&
++	    !(new->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
++		memcpy(tkip_old, tk_old, WLAN_KEY_LEN_TKIP);
++		memcpy(tkip_new, tk_new, WLAN_KEY_LEN_TKIP);
++		memset(tkip_old + NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY, 0, 8);
++		memset(tkip_new + NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY, 0, 8);
++		tk_old = tkip_old;
++		tk_new = tkip_new;
++	}
++
++	return !crypto_memneq(tk_old, tk_new, new->conf.keylen);
++}
++
+ int ieee80211_key_link(struct ieee80211_key *key,
+ 		       struct ieee80211_sub_if_data *sdata,
+ 		       struct sta_info *sta)
+@@ -620,9 +654,6 @@ int ieee80211_key_link(struct ieee80211_key *key,
+ 
+ 	pairwise = key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE;
+ 	idx = key->conf.keyidx;
+-	key->local = sdata->local;
+-	key->sdata = sdata;
+-	key->sta = sta;
+ 
+ 	mutex_lock(&sdata->local->key_mtx);
+ 
+@@ -633,6 +664,20 @@ int ieee80211_key_link(struct ieee80211_key *key,
+ 	else
+ 		old_key = key_mtx_dereference(sdata->local, sdata->keys[idx]);
+ 
++	/*
++	 * Silently accept key re-installation without really installing the
++	 * new version of the key to avoid nonce reuse or replay issues.
++	 */
++	if (ieee80211_key_identical(sdata, old_key, key)) {
++		ieee80211_key_free_unused(key);
++		ret = 0;
++		goto out;
++	}
++
++	key->local = sdata->local;
++	key->sdata = sdata;
++	key->sta = sta;
++
+ 	increment_tailroom_need_count(sdata);
+ 
+ 	ieee80211_key_replace(sdata, sta, pairwise, old_key, key);
+@@ -648,6 +693,7 @@ int ieee80211_key_link(struct ieee80211_key *key,
+ 		ret = 0;
+ 	}
+ 
++ out:
+ 	mutex_unlock(&sdata->local->key_mtx);
+ 
+ 	return ret;
+diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
+index 7e794ad50cb0..09c8dbbd2d70 100644
+--- a/net/netlink/af_netlink.c
++++ b/net/netlink/af_netlink.c
+@@ -2258,16 +2258,17 @@ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
+ 	cb->min_dump_alloc = control->min_dump_alloc;
+ 	cb->skb = skb;
+ 
++	if (cb->start) {
++		ret = cb->start(cb);
++		if (ret)
++			goto error_unlock;
++	}
++
+ 	nlk->cb_running = true;
+ 
+ 	mutex_unlock(nlk->cb_mutex);
+ 
+-	ret = 0;
+-	if (cb->start)
+-		ret = cb->start(cb);
+-
+-	if (!ret)
+-		ret = netlink_dump(sk);
++	ret = netlink_dump(sk);
+ 
+ 	sock_put(sk);
+ 
+@@ -2298,6 +2299,7 @@ void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err,
+ 	size_t tlvlen = 0;
+ 	struct netlink_sock *nlk = nlk_sk(NETLINK_CB(in_skb).sk);
+ 	unsigned int flags = 0;
++	bool nlk_has_extack = nlk->flags & NETLINK_F_EXT_ACK;
+ 
+ 	/* Error messages get the original request appened, unless the user
+ 	 * requests to cap the error message, and get extra error data if
+@@ -2308,7 +2310,7 @@ void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err,
+ 			payload += nlmsg_len(nlh);
+ 		else
+ 			flags |= NLM_F_CAPPED;
+-		if (nlk->flags & NETLINK_F_EXT_ACK && extack) {
++		if (nlk_has_extack && extack) {
+ 			if (extack->_msg)
+ 				tlvlen += nla_total_size(strlen(extack->_msg) + 1);
+ 			if (extack->bad_attr)
+@@ -2317,8 +2319,7 @@ void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err,
+ 	} else {
+ 		flags |= NLM_F_CAPPED;
+ 
+-		if (nlk->flags & NETLINK_F_EXT_ACK &&
+-		    extack && extack->cookie_len)
++		if (nlk_has_extack && extack && extack->cookie_len)
+ 			tlvlen += nla_total_size(extack->cookie_len);
+ 	}
+ 
+@@ -2346,7 +2347,7 @@ void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err,
+ 	errmsg->error = err;
+ 	memcpy(&errmsg->msg, nlh, payload > sizeof(*errmsg) ? nlh->nlmsg_len : sizeof(*nlh));
+ 
+-	if (nlk->flags & NETLINK_F_EXT_ACK && extack) {
++	if (nlk_has_extack && extack) {
+ 		if (err) {
+ 			if (extack->_msg)
+ 				WARN_ON(nla_put_string(skb, NLMSGERR_ATTR_MSG,
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index 29d7b7e5b128..b0c8fee3d53d 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -1771,7 +1771,7 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
+ 
+ out:
+ 	if (err && rollover) {
+-		kfree(rollover);
++		kfree_rcu(rollover, rcu);
+ 		po->rollover = NULL;
+ 	}
+ 	mutex_unlock(&fanout_mutex);
+@@ -1798,8 +1798,10 @@ static struct packet_fanout *fanout_release(struct sock *sk)
+ 		else
+ 			f = NULL;
+ 
+-		if (po->rollover)
++		if (po->rollover) {
+ 			kfree_rcu(po->rollover, rcu);
++			po->rollover = NULL;
++		}
+ 	}
+ 	mutex_unlock(&fanout_mutex);
+ 
+@@ -3853,6 +3855,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
+ 	void *data = &val;
+ 	union tpacket_stats_u st;
+ 	struct tpacket_rollover_stats rstats;
++	struct packet_rollover *rollover;
+ 
+ 	if (level != SOL_PACKET)
+ 		return -ENOPROTOOPT;
+@@ -3931,13 +3934,18 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
+ 		       0);
+ 		break;
+ 	case PACKET_ROLLOVER_STATS:
+-		if (!po->rollover)
++		rcu_read_lock();
++		rollover = rcu_dereference(po->rollover);
++		if (rollover) {
++			rstats.tp_all = atomic_long_read(&rollover->num);
++			rstats.tp_huge = atomic_long_read(&rollover->num_huge);
++			rstats.tp_failed = atomic_long_read(&rollover->num_failed);
++			data = &rstats;
++			lv = sizeof(rstats);
++		}
++		rcu_read_unlock();
++		if (!rollover)
+ 			return -EINVAL;
+-		rstats.tp_all = atomic_long_read(&po->rollover->num);
+-		rstats.tp_huge = atomic_long_read(&po->rollover->num_huge);
+-		rstats.tp_failed = atomic_long_read(&po->rollover->num_failed);
+-		data = &rstats;
+-		lv = sizeof(rstats);
+ 		break;
+ 	case PACKET_TX_HAS_OFF:
+ 		val = po->tp_tx_has_off;
+diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
+index 4fb5a3222d0d..7935db0d787c 100644
+--- a/net/sched/sch_api.c
++++ b/net/sched/sch_api.c
+@@ -307,6 +307,8 @@ struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle)
+ {
+ 	struct Qdisc *q;
+ 
++	if (!handle)
++		return NULL;
+ 	q = qdisc_match_from_root(dev->qdisc, handle);
+ 	if (q)
+ 		goto out;
+diff --git a/net/sctp/input.c b/net/sctp/input.c
+index 41eb2ec10460..1678d9ea7740 100644
+--- a/net/sctp/input.c
++++ b/net/sctp/input.c
+@@ -421,7 +421,7 @@ void sctp_icmp_redirect(struct sock *sk, struct sctp_transport *t,
+ {
+ 	struct dst_entry *dst;
+ 
+-	if (!t)
++	if (sock_owned_by_user(sk) || !t)
+ 		return;
+ 	dst = sctp_transport_dst_check(t);
+ 	if (dst)
+diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
+index a4b6ffb61495..1344e3a411ae 100644
+--- a/net/sctp/ipv6.c
++++ b/net/sctp/ipv6.c
+@@ -882,8 +882,10 @@ static int sctp_inet6_bind_verify(struct sctp_sock *opt, union sctp_addr *addr)
+ 			net = sock_net(&opt->inet.sk);
+ 			rcu_read_lock();
+ 			dev = dev_get_by_index_rcu(net, addr->v6.sin6_scope_id);
+-			if (!dev ||
+-			    !ipv6_chk_addr(net, &addr->v6.sin6_addr, dev, 0)) {
++			if (!dev || !(opt->inet.freebind ||
++				      net->ipv6.sysctl.ip_nonlocal_bind ||
++				      ipv6_chk_addr(net, &addr->v6.sin6_addr,
++						    dev, 0))) {
+ 				rcu_read_unlock();
+ 				return 0;
+ 			}
+diff --git a/net/sctp/socket.c b/net/sctp/socket.c
+index 8d760863bc41..3d79085eb4e0 100644
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -169,6 +169,36 @@ static inline void sctp_set_owner_w(struct sctp_chunk *chunk)
+ 	sk_mem_charge(sk, chunk->skb->truesize);
+ }
+ 
++static void sctp_clear_owner_w(struct sctp_chunk *chunk)
++{
++	skb_orphan(chunk->skb);
++}
++
++static void sctp_for_each_tx_datachunk(struct sctp_association *asoc,
++				       void (*cb)(struct sctp_chunk *))
++
++{
++	struct sctp_outq *q = &asoc->outqueue;
++	struct sctp_transport *t;
++	struct sctp_chunk *chunk;
++
++	list_for_each_entry(t, &asoc->peer.transport_addr_list, transports)
++		list_for_each_entry(chunk, &t->transmitted, transmitted_list)
++			cb(chunk);
++
++	list_for_each_entry(chunk, &q->retransmit, list)
++		cb(chunk);
++
++	list_for_each_entry(chunk, &q->sacked, list)
++		cb(chunk);
++
++	list_for_each_entry(chunk, &q->abandoned, list)
++		cb(chunk);
++
++	list_for_each_entry(chunk, &q->out_chunk_list, list)
++		cb(chunk);
++}
++
+ /* Verify that this is a valid address. */
+ static inline int sctp_verify_addr(struct sock *sk, union sctp_addr *addr,
+ 				   int len)
+@@ -8196,7 +8226,9 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
+ 	 * paths won't try to lock it and then oldsk.
+ 	 */
+ 	lock_sock_nested(newsk, SINGLE_DEPTH_NESTING);
++	sctp_for_each_tx_datachunk(assoc, sctp_clear_owner_w);
+ 	sctp_assoc_migrate(assoc, newsk);
++	sctp_for_each_tx_datachunk(assoc, sctp_set_owner_w);
+ 
+ 	/* If the association on the newsk is already closed before accept()
+ 	 * is called, set RCV_SHUTDOWN flag.
+diff --git a/net/unix/diag.c b/net/unix/diag.c
+index 4d9679701a6d..384c84e83462 100644
+--- a/net/unix/diag.c
++++ b/net/unix/diag.c
+@@ -257,6 +257,8 @@ static int unix_diag_get_exact(struct sk_buff *in_skb,
+ 	err = -ENOENT;
+ 	if (sk == NULL)
+ 		goto out_nosk;
++	if (!net_eq(sock_net(sk), net))
++		goto out;
+ 
+ 	err = sock_diag_check_cookie(sk, req->udiag_cookie);
+ 	if (err)


^ permalink raw reply related	[flat|nested] 20+ messages in thread
* [gentoo-commits] proj/linux-patches:4.13 commit in: /
@ 2017-11-24  9:41 Alice Ferrazzi
  0 siblings, 0 replies; 20+ messages in thread
From: Alice Ferrazzi @ 2017-11-24  9:41 UTC (permalink / raw
  To: gentoo-commits

commit:     64881833b8854f3c3a82270811c619b49e013da7
Author:     Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
AuthorDate: Fri Nov 24 09:40:53 2017 +0000
Commit:     Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
CommitDate: Fri Nov 24 09:40:53 2017 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=64881833

linux kernel 4.13.16

 0000_README              |   4 +
 1015_linux-4.13.16.patch | 946 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 950 insertions(+)

diff --git a/0000_README b/0000_README
index 7f93bc3..846eb5a 100644
--- a/0000_README
+++ b/0000_README
@@ -103,6 +103,10 @@ Patch:  1014_linux-4.13.15.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.13.15
 
+Patch:  1015_linux-4.13.16.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.13.16
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1015_linux-4.13.16.patch b/1015_linux-4.13.16.patch
new file mode 100644
index 0000000..4f9c8c1
--- /dev/null
+++ b/1015_linux-4.13.16.patch
@@ -0,0 +1,946 @@
+diff --git a/Makefile b/Makefile
+index 3bd5d9d148d3..bc9a897e0431 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 13
+-SUBLEVEL = 15
++SUBLEVEL = 16
+ EXTRAVERSION =
+ NAME = Fearless Coyote
+ 
+diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
+index c55fb2cb2acc..24f749324c0f 100644
+--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
++++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
+@@ -811,7 +811,24 @@ static int __cache_amd_cpumap_setup(unsigned int cpu, int index,
+ 	struct cacheinfo *this_leaf;
+ 	int i, sibling;
+ 
+-	if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
++	/*
++	 * For L3, always use the pre-calculated cpu_llc_shared_mask
++	 * to derive shared_cpu_map.
++	 */
++	if (index == 3) {
++		for_each_cpu(i, cpu_llc_shared_mask(cpu)) {
++			this_cpu_ci = get_cpu_cacheinfo(i);
++			if (!this_cpu_ci->info_list)
++				continue;
++			this_leaf = this_cpu_ci->info_list + index;
++			for_each_cpu(sibling, cpu_llc_shared_mask(cpu)) {
++				if (!cpu_online(sibling))
++					continue;
++				cpumask_set_cpu(sibling,
++						&this_leaf->shared_cpu_map);
++			}
++		}
++	} else if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
+ 		unsigned int apicid, nshared, first, last;
+ 
+ 		this_leaf = this_cpu_ci->info_list + index;
+@@ -839,19 +856,6 @@ static int __cache_amd_cpumap_setup(unsigned int cpu, int index,
+ 						&this_leaf->shared_cpu_map);
+ 			}
+ 		}
+-	} else if (index == 3) {
+-		for_each_cpu(i, cpu_llc_shared_mask(cpu)) {
+-			this_cpu_ci = get_cpu_cacheinfo(i);
+-			if (!this_cpu_ci->info_list)
+-				continue;
+-			this_leaf = this_cpu_ci->info_list + index;
+-			for_each_cpu(sibling, cpu_llc_shared_mask(cpu)) {
+-				if (!cpu_online(sibling))
+-					continue;
+-				cpumask_set_cpu(sibling,
+-						&this_leaf->shared_cpu_map);
+-			}
+-		}
+ 	} else
+ 		return 0;
+ 
+diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
+index 810b138f5897..c82d9fd2f05a 100644
+--- a/drivers/char/ipmi/ipmi_msghandler.c
++++ b/drivers/char/ipmi/ipmi_msghandler.c
+@@ -4030,7 +4030,8 @@ smi_from_recv_msg(ipmi_smi_t intf, struct ipmi_recv_msg *recv_msg,
+ }
+ 
+ static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent,
+-			      struct list_head *timeouts, long timeout_period,
++			      struct list_head *timeouts,
++			      unsigned long timeout_period,
+ 			      int slot, unsigned long *flags,
+ 			      unsigned int *waiting_msgs)
+ {
+@@ -4043,8 +4044,8 @@ static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent,
+ 	if (!ent->inuse)
+ 		return;
+ 
+-	ent->timeout -= timeout_period;
+-	if (ent->timeout > 0) {
++	if (timeout_period < ent->timeout) {
++		ent->timeout -= timeout_period;
+ 		(*waiting_msgs)++;
+ 		return;
+ 	}
+@@ -4110,7 +4111,8 @@ static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent,
+ 	}
+ }
+ 
+-static unsigned int ipmi_timeout_handler(ipmi_smi_t intf, long timeout_period)
++static unsigned int ipmi_timeout_handler(ipmi_smi_t intf,
++					 unsigned long timeout_period)
+ {
+ 	struct list_head     timeouts;
+ 	struct ipmi_recv_msg *msg, *msg2;
+diff --git a/drivers/char/tpm/tpm-dev-common.c b/drivers/char/tpm/tpm-dev-common.c
+index 610638a80383..461bf0b8a094 100644
+--- a/drivers/char/tpm/tpm-dev-common.c
++++ b/drivers/char/tpm/tpm-dev-common.c
+@@ -110,6 +110,12 @@ ssize_t tpm_common_write(struct file *file, const char __user *buf,
+ 		return -EFAULT;
+ 	}
+ 
++	if (in_size < 6 ||
++	    in_size < be32_to_cpu(*((__be32 *) (priv->data_buffer + 2)))) {
++		mutex_unlock(&priv->buffer_mutex);
++		return -EINVAL;
++	}
++
+ 	/* atomic tpm command send and result receive. We only hold the ops
+ 	 * lock during this period so that the tpm can be unregistered even if
+ 	 * the char dev is held open.
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index c99dc59d729b..76e8054bfc4e 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -3253,7 +3253,7 @@ u32 bond_xmit_hash(struct bonding *bond, struct sk_buff *skb)
+ 	hash ^= (hash >> 16);
+ 	hash ^= (hash >> 8);
+ 
+-	return hash;
++	return hash >> 1;
+ }
+ 
+ /*-------------------------- Device entry points ----------------------------*/
+diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
+index c28fa5a8734c..ba15eeadfe21 100644
+--- a/drivers/net/ethernet/broadcom/bcmsysport.c
++++ b/drivers/net/ethernet/broadcom/bcmsysport.c
+@@ -1743,15 +1743,17 @@ static inline void bcm_sysport_mask_all_intrs(struct bcm_sysport_priv *priv)
+ 
+ static inline void gib_set_pad_extension(struct bcm_sysport_priv *priv)
+ {
+-	u32 __maybe_unused reg;
++	u32 reg;
+ 
+-	/* Include Broadcom tag in pad extension */
++	reg = gib_readl(priv, GIB_CONTROL);
++	/* Include Broadcom tag in pad extension and fix up IPG_LENGTH */
+ 	if (netdev_uses_dsa(priv->netdev)) {
+-		reg = gib_readl(priv, GIB_CONTROL);
+ 		reg &= ~(GIB_PAD_EXTENSION_MASK << GIB_PAD_EXTENSION_SHIFT);
+ 		reg |= ENET_BRCM_TAG_LEN << GIB_PAD_EXTENSION_SHIFT;
+-		gib_writel(priv, reg, GIB_CONTROL);
+ 	}
++	reg &= ~(GIB_IPG_LEN_MASK << GIB_IPG_LEN_SHIFT);
++	reg |= 12 << GIB_IPG_LEN_SHIFT;
++	gib_writel(priv, reg, GIB_CONTROL);
+ }
+ 
+ static int bcm_sysport_open(struct net_device *dev)
+diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c
+index e92859dab7ae..e191c4ebeaf4 100644
+--- a/drivers/net/ethernet/fealnx.c
++++ b/drivers/net/ethernet/fealnx.c
+@@ -257,8 +257,8 @@ enum rx_desc_status_bits {
+ 	RXFSD = 0x00000800,	/* first descriptor */
+ 	RXLSD = 0x00000400,	/* last descriptor */
+ 	ErrorSummary = 0x80,	/* error summary */
+-	RUNT = 0x40,		/* runt packet received */
+-	LONG = 0x20,		/* long packet received */
++	RUNTPKT = 0x40,		/* runt packet received */
++	LONGPKT = 0x20,		/* long packet received */
+ 	FAE = 0x10,		/* frame align error */
+ 	CRC = 0x08,		/* crc error */
+ 	RXER = 0x04,		/* receive error */
+@@ -1632,7 +1632,7 @@ static int netdev_rx(struct net_device *dev)
+ 					       dev->name, rx_status);
+ 
+ 				dev->stats.rx_errors++;	/* end of a packet. */
+-				if (rx_status & (LONG | RUNT))
++				if (rx_status & (LONGPKT | RUNTPKT))
+ 					dev->stats.rx_length_errors++;
+ 				if (rx_status & RXER)
+ 					dev->stats.rx_frame_errors++;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+index 7344433259fc..1c513dc0105e 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+@@ -213,22 +213,20 @@ static inline bool mlx5e_rx_cache_get(struct mlx5e_rq *rq,
+ static inline int mlx5e_page_alloc_mapped(struct mlx5e_rq *rq,
+ 					  struct mlx5e_dma_info *dma_info)
+ {
+-	struct page *page;
+-
+ 	if (mlx5e_rx_cache_get(rq, dma_info))
+ 		return 0;
+ 
+-	page = dev_alloc_pages(rq->buff.page_order);
+-	if (unlikely(!page))
++	dma_info->page = dev_alloc_pages(rq->buff.page_order);
++	if (unlikely(!dma_info->page))
+ 		return -ENOMEM;
+ 
+-	dma_info->addr = dma_map_page(rq->pdev, page, 0,
++	dma_info->addr = dma_map_page(rq->pdev, dma_info->page, 0,
+ 				      RQ_PAGE_SIZE(rq), rq->buff.map_dir);
+ 	if (unlikely(dma_mapping_error(rq->pdev, dma_info->addr))) {
+-		put_page(page);
++		put_page(dma_info->page);
++		dma_info->page = NULL;
+ 		return -ENOMEM;
+ 	}
+-	dma_info->page = page;
+ 
+ 	return 0;
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
+index 16885827367b..553bc230d70d 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
+@@ -1545,9 +1545,16 @@ static int mlx5_try_fast_unload(struct mlx5_core_dev *dev)
+ 		return -EAGAIN;
+ 	}
+ 
++	/* Panic tear down fw command will stop the PCI bus communication
++	 * with the HCA, so the health polll is no longer needed.
++	 */
++	mlx5_drain_health_wq(dev);
++	mlx5_stop_health_poll(dev);
++
+ 	ret = mlx5_cmd_force_teardown_hca(dev);
+ 	if (ret) {
+ 		mlx5_core_dbg(dev, "Firmware couldn't do fast unload error: %d\n", ret);
++		mlx5_start_health_poll(dev);
+ 		return ret;
+ 	}
+ 
+diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
+index b2ff88e69a81..3d4f7959dabb 100644
+--- a/drivers/net/usb/asix_devices.c
++++ b/drivers/net/usb/asix_devices.c
+@@ -626,7 +626,7 @@ static int asix_suspend(struct usb_interface *intf, pm_message_t message)
+ 	struct usbnet *dev = usb_get_intfdata(intf);
+ 	struct asix_common_private *priv = dev->driver_priv;
+ 
+-	if (priv->suspend)
++	if (priv && priv->suspend)
+ 		priv->suspend(dev);
+ 
+ 	return usbnet_suspend(intf, message);
+@@ -678,7 +678,7 @@ static int asix_resume(struct usb_interface *intf)
+ 	struct usbnet *dev = usb_get_intfdata(intf);
+ 	struct asix_common_private *priv = dev->driver_priv;
+ 
+-	if (priv->resume)
++	if (priv && priv->resume)
+ 		priv->resume(dev);
+ 
+ 	return usbnet_resume(intf);
+diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
+index 8ab281b478f2..4f88f64cccb4 100644
+--- a/drivers/net/usb/cdc_ether.c
++++ b/drivers/net/usb/cdc_ether.c
+@@ -221,7 +221,7 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
+ 			goto bad_desc;
+ 	}
+ 
+-	if (header.usb_cdc_ether_desc) {
++	if (header.usb_cdc_ether_desc && info->ether->wMaxSegmentSize) {
+ 		dev->hard_mtu = le16_to_cpu(info->ether->wMaxSegmentSize);
+ 		/* because of Zaurus, we may be ignoring the host
+ 		 * side link address we were given.
+diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
+index 9c80e80c5493..8d5e97251efe 100644
+--- a/drivers/net/usb/cdc_ncm.c
++++ b/drivers/net/usb/cdc_ncm.c
+@@ -771,7 +771,7 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_
+ 	int err;
+ 	u8 iface_no;
+ 	struct usb_cdc_parsed_header hdr;
+-	u16 curr_ntb_format;
++	__le16 curr_ntb_format;
+ 
+ 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ 	if (!ctx)
+@@ -889,7 +889,7 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_
+ 			goto error2;
+ 		}
+ 
+-		if (curr_ntb_format == USB_CDC_NCM_NTB32_FORMAT) {
++		if (curr_ntb_format == cpu_to_le16(USB_CDC_NCM_NTB32_FORMAT)) {
+ 			dev_info(&intf->dev, "resetting NTB format to 16-bit");
+ 			err = usbnet_write_cmd(dev, USB_CDC_SET_NTB_FORMAT,
+ 					       USB_TYPE_CLASS | USB_DIR_OUT
+diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
+index 8c3733608271..8d4a6f7cba61 100644
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -499,6 +499,7 @@ static int qmi_wwan_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
+ 		return 1;
+ 	}
+ 	if (rawip) {
++		skb_reset_mac_header(skb);
+ 		skb->dev = dev->net; /* normally set by eth_type_trans */
+ 		skb->protocol = proto;
+ 		return 1;
+@@ -681,7 +682,7 @@ static int qmi_wwan_bind(struct usbnet *dev, struct usb_interface *intf)
+ 	}
+ 
+ 	/* errors aren't fatal - we can live with the dynamic address */
+-	if (cdc_ether) {
++	if (cdc_ether && cdc_ether->wMaxSegmentSize) {
+ 		dev->hard_mtu = le16_to_cpu(cdc_ether->wMaxSegmentSize);
+ 		usbnet_get_ethernet_addr(dev, cdc_ether->iMACAddress);
+ 	}
+diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
+index 8a1eaf3c302a..e91ef5e236cc 100644
+--- a/drivers/net/vrf.c
++++ b/drivers/net/vrf.c
+@@ -1271,7 +1271,7 @@ static int vrf_fib_rule(const struct net_device *dev, __u8 family, bool add_it)
+ 	frh->family = family;
+ 	frh->action = FR_ACT_TO_TBL;
+ 
+-	if (nla_put_u32(skb, FRA_L3MDEV, 1))
++	if (nla_put_u8(skb, FRA_L3MDEV, 1))
+ 		goto nla_put_failure;
+ 
+ 	if (nla_put_u32(skb, FRA_PRIORITY, FIB_RULE_PREF))
+diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
+index e17baac70f43..436154720bf8 100644
+--- a/drivers/net/vxlan.c
++++ b/drivers/net/vxlan.c
+@@ -1632,26 +1632,19 @@ static struct sk_buff *vxlan_na_create(struct sk_buff *request,
+ static int neigh_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni)
+ {
+ 	struct vxlan_dev *vxlan = netdev_priv(dev);
+-	struct nd_msg *msg;
+-	const struct ipv6hdr *iphdr;
+ 	const struct in6_addr *daddr;
+-	struct neighbour *n;
++	const struct ipv6hdr *iphdr;
+ 	struct inet6_dev *in6_dev;
++	struct neighbour *n;
++	struct nd_msg *msg;
+ 
+ 	in6_dev = __in6_dev_get(dev);
+ 	if (!in6_dev)
+ 		goto out;
+ 
+-	if (!pskb_may_pull(skb, sizeof(struct ipv6hdr) + sizeof(struct nd_msg)))
+-		goto out;
+-
+ 	iphdr = ipv6_hdr(skb);
+ 	daddr = &iphdr->daddr;
+-
+ 	msg = (struct nd_msg *)(iphdr + 1);
+-	if (msg->icmph.icmp6_code != 0 ||
+-	    msg->icmph.icmp6_type != NDISC_NEIGHBOUR_SOLICITATION)
+-		goto out;
+ 
+ 	if (ipv6_addr_loopback(daddr) ||
+ 	    ipv6_addr_is_multicast(&msg->target))
+@@ -2258,11 +2251,11 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
+ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
+ {
+ 	struct vxlan_dev *vxlan = netdev_priv(dev);
++	struct vxlan_rdst *rdst, *fdst = NULL;
+ 	const struct ip_tunnel_info *info;
+-	struct ethhdr *eth;
+ 	bool did_rsc = false;
+-	struct vxlan_rdst *rdst, *fdst = NULL;
+ 	struct vxlan_fdb *f;
++	struct ethhdr *eth;
+ 	__be32 vni = 0;
+ 
+ 	info = skb_tunnel_info(skb);
+@@ -2287,12 +2280,14 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
+ 		if (ntohs(eth->h_proto) == ETH_P_ARP)
+ 			return arp_reduce(dev, skb, vni);
+ #if IS_ENABLED(CONFIG_IPV6)
+-		else if (ntohs(eth->h_proto) == ETH_P_IPV6) {
+-			struct ipv6hdr *hdr, _hdr;
+-			if ((hdr = skb_header_pointer(skb,
+-						      skb_network_offset(skb),
+-						      sizeof(_hdr), &_hdr)) &&
+-			    hdr->nexthdr == IPPROTO_ICMPV6)
++		else if (ntohs(eth->h_proto) == ETH_P_IPV6 &&
++			 pskb_may_pull(skb, sizeof(struct ipv6hdr) +
++					    sizeof(struct nd_msg)) &&
++			 ipv6_hdr(skb)->nexthdr == IPPROTO_ICMPV6) {
++			struct nd_msg *m = (struct nd_msg *)(ipv6_hdr(skb) + 1);
++
++			if (m->icmph.icmp6_code == 0 &&
++			    m->icmph.icmp6_type == NDISC_NEIGHBOUR_SOLICITATION)
+ 				return neigh_reduce(dev, skb, vni);
+ 		}
+ #endif
+diff --git a/drivers/tty/serial/8250/8250_fintek.c b/drivers/tty/serial/8250/8250_fintek.c
+index e500f7dd2470..4bd376c08b59 100644
+--- a/drivers/tty/serial/8250/8250_fintek.c
++++ b/drivers/tty/serial/8250/8250_fintek.c
+@@ -118,6 +118,9 @@ static int fintek_8250_enter_key(u16 base_port, u8 key)
+ 	if (!request_muxed_region(base_port, 2, "8250_fintek"))
+ 		return -EBUSY;
+ 
++	/* Force to deactive all SuperIO in this base_port */
++	outb(EXIT_KEY, base_port + ADDR_PORT);
++
+ 	outb(key, base_port + ADDR_PORT);
+ 	outb(key, base_port + ADDR_PORT);
+ 	return 0;
+diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
+index 1ea05ac57aa7..670f7e334f93 100644
+--- a/drivers/tty/serial/omap-serial.c
++++ b/drivers/tty/serial/omap-serial.c
+@@ -693,7 +693,7 @@ static void serial_omap_set_mctrl(struct uart_port *port, unsigned int mctrl)
+ 	if ((mctrl & TIOCM_RTS) && (port->status & UPSTAT_AUTORTS))
+ 		up->efr |= UART_EFR_RTS;
+ 	else
+-		up->efr &= UART_EFR_RTS;
++		up->efr &= ~UART_EFR_RTS;
+ 	serial_out(up, UART_EFR, up->efr);
+ 	serial_out(up, UART_LCR, lcr);
+ 
+diff --git a/fs/coda/upcall.c b/fs/coda/upcall.c
+index e82357c89979..8cf16d8c5261 100644
+--- a/fs/coda/upcall.c
++++ b/fs/coda/upcall.c
+@@ -446,8 +446,7 @@ int venus_fsync(struct super_block *sb, struct CodaFid *fid)
+ 	UPARG(CODA_FSYNC);
+ 
+ 	inp->coda_fsync.VFid = *fid;
+-	error = coda_upcall(coda_vcp(sb), sizeof(union inputArgs),
+-			    &outsize, inp);
++	error = coda_upcall(coda_vcp(sb), insize, &outsize, inp);
+ 
+ 	CODA_FREE(inp, insize);
+ 	return error;
+diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
+index 74407c6dd592..ec8f75813beb 100644
+--- a/fs/ocfs2/dlm/dlmrecovery.c
++++ b/fs/ocfs2/dlm/dlmrecovery.c
+@@ -2419,6 +2419,7 @@ static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node)
+ 					dlm_lockres_put(res);
+ 					continue;
+ 				}
++				dlm_move_lockres_to_recovery_list(dlm, res);
+ 			} else if (res->owner == dlm->node_num) {
+ 				dlm_free_dead_locks(dlm, res, dead_node);
+ 				__dlm_lockres_calc_usage(dlm, res);
+diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
+index bfeb647459d9..2fc8e65c07c5 100644
+--- a/fs/ocfs2/file.c
++++ b/fs/ocfs2/file.c
+@@ -1168,6 +1168,13 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr)
+ 	}
+ 	size_change = S_ISREG(inode->i_mode) && attr->ia_valid & ATTR_SIZE;
+ 	if (size_change) {
++		/*
++		 * Here we should wait dio to finish before inode lock
++		 * to avoid a deadlock between ocfs2_setattr() and
++		 * ocfs2_dio_end_io_write()
++		 */
++		inode_dio_wait(inode);
++
+ 		status = ocfs2_rw_lock(inode, 1);
+ 		if (status < 0) {
+ 			mlog_errno(status);
+@@ -1207,8 +1214,6 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr)
+ 		if (status)
+ 			goto bail_unlock;
+ 
+-		inode_dio_wait(inode);
+-
+ 		if (i_size_read(inode) >= attr->ia_size) {
+ 			if (ocfs2_should_order_data(inode)) {
+ 				status = ocfs2_begin_ordered_truncate(inode,
+diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
+index fc14b8b3f6ce..1d86e09f17c1 100644
+--- a/include/linux/mmzone.h
++++ b/include/linux/mmzone.h
+@@ -691,7 +691,8 @@ typedef struct pglist_data {
+ 	 * is the first PFN that needs to be initialised.
+ 	 */
+ 	unsigned long first_deferred_pfn;
+-	unsigned long static_init_size;
++	/* Number of non-deferred pages */
++	unsigned long static_init_pgcnt;
+ #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
+ 
+ #ifdef CONFIG_TRANSPARENT_HUGEPAGE
+diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
+index 63df75ae70ee..baf2dd102686 100644
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -3655,6 +3655,13 @@ static inline void nf_reset_trace(struct sk_buff *skb)
+ #endif
+ }
+ 
++static inline void ipvs_reset(struct sk_buff *skb)
++{
++#if IS_ENABLED(CONFIG_IP_VS)
++	skb->ipvs_property = 0;
++#endif
++}
++
+ /* Note: This doesn't put any conntrack and bridge info in dst. */
+ static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src,
+ 			     bool copy)
+diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
+index 908b309d60d7..b8f51dffeae9 100644
+--- a/kernel/rcu/tree_plugin.h
++++ b/kernel/rcu/tree_plugin.h
+@@ -1493,7 +1493,7 @@ static void rcu_prepare_for_idle(void)
+ 	rdtp->last_accelerate = jiffies;
+ 	for_each_rcu_flavor(rsp) {
+ 		rdp = this_cpu_ptr(rsp->rda);
+-		if (rcu_segcblist_pend_cbs(&rdp->cblist))
++		if (!rcu_segcblist_pend_cbs(&rdp->cblist))
+ 			continue;
+ 		rnp = rdp->mynode;
+ 		raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index 1423da8dd16f..3bd0999c266f 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -289,28 +289,37 @@ EXPORT_SYMBOL(nr_online_nodes);
+ int page_group_by_mobility_disabled __read_mostly;
+ 
+ #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
++
++/*
++ * Determine how many pages need to be initialized durig early boot
++ * (non-deferred initialization).
++ * The value of first_deferred_pfn will be set later, once non-deferred pages
++ * are initialized, but for now set it ULONG_MAX.
++ */
+ static inline void reset_deferred_meminit(pg_data_t *pgdat)
+ {
+-	unsigned long max_initialise;
+-	unsigned long reserved_lowmem;
++	phys_addr_t start_addr, end_addr;
++	unsigned long max_pgcnt;
++	unsigned long reserved;
+ 
+ 	/*
+ 	 * Initialise at least 2G of a node but also take into account that
+ 	 * two large system hashes that can take up 1GB for 0.25TB/node.
+ 	 */
+-	max_initialise = max(2UL << (30 - PAGE_SHIFT),
+-		(pgdat->node_spanned_pages >> 8));
++	max_pgcnt = max(2UL << (30 - PAGE_SHIFT),
++			(pgdat->node_spanned_pages >> 8));
+ 
+ 	/*
+ 	 * Compensate the all the memblock reservations (e.g. crash kernel)
+ 	 * from the initial estimation to make sure we will initialize enough
+ 	 * memory to boot.
+ 	 */
+-	reserved_lowmem = memblock_reserved_memory_within(pgdat->node_start_pfn,
+-			pgdat->node_start_pfn + max_initialise);
+-	max_initialise += reserved_lowmem;
++	start_addr = PFN_PHYS(pgdat->node_start_pfn);
++	end_addr = PFN_PHYS(pgdat->node_start_pfn + max_pgcnt);
++	reserved = memblock_reserved_memory_within(start_addr, end_addr);
++	max_pgcnt += PHYS_PFN(reserved);
+ 
+-	pgdat->static_init_size = min(max_initialise, pgdat->node_spanned_pages);
++	pgdat->static_init_pgcnt = min(max_pgcnt, pgdat->node_spanned_pages);
+ 	pgdat->first_deferred_pfn = ULONG_MAX;
+ }
+ 
+@@ -337,7 +346,7 @@ static inline bool update_defer_init(pg_data_t *pgdat,
+ 	if (zone_end < pgdat_end_pfn(pgdat))
+ 		return true;
+ 	(*nr_initialised)++;
+-	if ((*nr_initialised > pgdat->static_init_size) &&
++	if ((*nr_initialised > pgdat->static_init_pgcnt) &&
+ 	    (pfn & (PAGES_PER_SECTION - 1)) == 0) {
+ 		pgdat->first_deferred_pfn = pfn;
+ 		return false;
+diff --git a/mm/page_ext.c b/mm/page_ext.c
+index 88ccc044b09a..9dbabbfc4557 100644
+--- a/mm/page_ext.c
++++ b/mm/page_ext.c
+@@ -124,7 +124,6 @@ struct page_ext *lookup_page_ext(struct page *page)
+ 	struct page_ext *base;
+ 
+ 	base = NODE_DATA(page_to_nid(page))->node_page_ext;
+-#if defined(CONFIG_DEBUG_VM)
+ 	/*
+ 	 * The sanity checks the page allocator does upon freeing a
+ 	 * page can reach here before the page_ext arrays are
+@@ -133,7 +132,6 @@ struct page_ext *lookup_page_ext(struct page *page)
+ 	 */
+ 	if (unlikely(!base))
+ 		return NULL;
+-#endif
+ 	index = pfn - round_down(node_start_pfn(page_to_nid(page)),
+ 					MAX_ORDER_NR_PAGES);
+ 	return get_entry(base, index);
+@@ -198,7 +196,6 @@ struct page_ext *lookup_page_ext(struct page *page)
+ {
+ 	unsigned long pfn = page_to_pfn(page);
+ 	struct mem_section *section = __pfn_to_section(pfn);
+-#if defined(CONFIG_DEBUG_VM)
+ 	/*
+ 	 * The sanity checks the page allocator does upon freeing a
+ 	 * page can reach here before the page_ext arrays are
+@@ -207,7 +204,6 @@ struct page_ext *lookup_page_ext(struct page *page)
+ 	 */
+ 	if (!section->page_ext)
+ 		return NULL;
+-#endif
+ 	return get_entry(section->page_ext, pfn);
+ }
+ 
+diff --git a/mm/pagewalk.c b/mm/pagewalk.c
+index 1a4197965415..7d973f63088c 100644
+--- a/mm/pagewalk.c
++++ b/mm/pagewalk.c
+@@ -187,8 +187,12 @@ static int walk_hugetlb_range(unsigned long addr, unsigned long end,
+ 	do {
+ 		next = hugetlb_entry_end(h, addr, end);
+ 		pte = huge_pte_offset(walk->mm, addr & hmask, sz);
+-		if (pte && walk->hugetlb_entry)
++
++		if (pte)
+ 			err = walk->hugetlb_entry(pte, hmask, addr, next, walk);
++		else if (walk->pte_hole)
++			err = walk->pte_hole(addr, next, walk);
++
+ 		if (err)
+ 			break;
+ 	} while (addr = next, addr != end);
+diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
+index 9649579b5b9f..4a72ee4e2ae9 100644
+--- a/net/8021q/vlan.c
++++ b/net/8021q/vlan.c
+@@ -376,6 +376,9 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
+ 			dev->name);
+ 		vlan_vid_add(dev, htons(ETH_P_8021Q), 0);
+ 	}
++	if (event == NETDEV_DOWN &&
++	    (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
++		vlan_vid_del(dev, htons(ETH_P_8021Q), 0);
+ 
+ 	vlan_info = rtnl_dereference(dev->vlan_info);
+ 	if (!vlan_info)
+@@ -423,9 +426,6 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
+ 		struct net_device *tmp;
+ 		LIST_HEAD(close_list);
+ 
+-		if (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
+-			vlan_vid_del(dev, htons(ETH_P_8021Q), 0);
+-
+ 		/* Put all VLANs for this dev in the down state too.  */
+ 		vlan_group_for_each_dev(grp, i, vlandev) {
+ 			flgs = vlandev->flags;
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index 72eb23d2426f..a0155578e951 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -4476,6 +4476,7 @@ void skb_scrub_packet(struct sk_buff *skb, bool xnet)
+ 	if (!xnet)
+ 		return;
+ 
++	ipvs_reset(skb);
+ 	skb_orphan(skb);
+ 	skb->mark = 0;
+ }
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index e92e5dbcb3d6..ffe96de8a079 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -2613,7 +2613,6 @@ void tcp_simple_retransmit(struct sock *sk)
+ 	struct tcp_sock *tp = tcp_sk(sk);
+ 	struct sk_buff *skb;
+ 	unsigned int mss = tcp_current_mss(sk);
+-	u32 prior_lost = tp->lost_out;
+ 
+ 	tcp_for_write_queue(skb, sk) {
+ 		if (skb == tcp_send_head(sk))
+@@ -2630,7 +2629,7 @@ void tcp_simple_retransmit(struct sock *sk)
+ 
+ 	tcp_clear_retrans_hints_partial(tp);
+ 
+-	if (prior_lost == tp->lost_out)
++	if (!tp->lost_out)
+ 		return;
+ 
+ 	if (tcp_is_reno(tp))
+diff --git a/net/ipv4/tcp_nv.c b/net/ipv4/tcp_nv.c
+index 6d650ed3cb59..5c871666c561 100644
+--- a/net/ipv4/tcp_nv.c
++++ b/net/ipv4/tcp_nv.c
+@@ -263,7 +263,7 @@ static void tcpnv_acked(struct sock *sk, const struct ack_sample *sample)
+ 
+ 	/* rate in 100's bits per second */
+ 	rate64 = ((u64)sample->in_flight) * 8000000;
+-	rate = (u32)div64_u64(rate64, (u64)(avg_rtt * 100));
++	rate = (u32)div64_u64(rate64, (u64)(avg_rtt ?: 1) * 100);
+ 
+ 	/* Remember the maximum rate seen during this RTT
+ 	 * Note: It may be more than one RTT. This function should be
+diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c
+index 11f69bbf9307..b6a2aa1dcf56 100644
+--- a/net/ipv4/tcp_offload.c
++++ b/net/ipv4/tcp_offload.c
+@@ -149,11 +149,19 @@ struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
+ 	 * is freed by GSO engine
+ 	 */
+ 	if (copy_destructor) {
++		int delta;
++
+ 		swap(gso_skb->sk, skb->sk);
+ 		swap(gso_skb->destructor, skb->destructor);
+ 		sum_truesize += skb->truesize;
+-		refcount_add(sum_truesize - gso_skb->truesize,
+-			   &skb->sk->sk_wmem_alloc);
++		delta = sum_truesize - gso_skb->truesize;
++		/* In some pathological cases, delta can be negative.
++		 * We need to either use refcount_add() or refcount_sub_and_test()
++		 */
++		if (likely(delta >= 0))
++			refcount_add(delta, &skb->sk->sk_wmem_alloc);
++		else
++			WARN_ON_ONCE(refcount_sub_and_test(-delta, &skb->sk->sk_wmem_alloc));
+ 	}
+ 
+ 	delta = htonl(oldlen + (skb_tail_pointer(skb) -
+diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
+index 58587b0e2b5d..e359840f46c0 100644
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -3207,13 +3207,8 @@ struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
+ 	th->source = htons(ireq->ir_num);
+ 	th->dest = ireq->ir_rmt_port;
+ 	skb->mark = ireq->ir_mark;
+-	/* Setting of flags are superfluous here for callers (and ECE is
+-	 * not even correctly set)
+-	 */
+-	tcp_init_nondata_skb(skb, tcp_rsk(req)->snt_isn,
+-			     TCPHDR_SYN | TCPHDR_ACK);
+-
+-	th->seq = htonl(TCP_SKB_CB(skb)->seq);
++	skb->ip_summed = CHECKSUM_PARTIAL;
++	th->seq = htonl(tcp_rsk(req)->snt_isn);
+ 	/* XXX data is queued and acked as is. No buffer/window check */
+ 	th->ack_seq = htonl(tcp_rsk(req)->rcv_nxt);
+ 
+diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
+index 4d322c1b7233..e4280b6568b4 100644
+--- a/net/l2tp/l2tp_ip.c
++++ b/net/l2tp/l2tp_ip.c
+@@ -123,6 +123,7 @@ static int l2tp_ip_recv(struct sk_buff *skb)
+ 	unsigned char *ptr, *optr;
+ 	struct l2tp_session *session;
+ 	struct l2tp_tunnel *tunnel = NULL;
++	struct iphdr *iph;
+ 	int length;
+ 
+ 	if (!pskb_may_pull(skb, 4))
+@@ -178,24 +179,17 @@ static int l2tp_ip_recv(struct sk_buff *skb)
+ 		goto discard;
+ 
+ 	tunnel_id = ntohl(*(__be32 *) &skb->data[4]);
+-	tunnel = l2tp_tunnel_find(net, tunnel_id);
+-	if (tunnel) {
+-		sk = tunnel->sock;
+-		sock_hold(sk);
+-	} else {
+-		struct iphdr *iph = (struct iphdr *) skb_network_header(skb);
+-
+-		read_lock_bh(&l2tp_ip_lock);
+-		sk = __l2tp_ip_bind_lookup(net, iph->daddr, iph->saddr,
+-					   inet_iif(skb), tunnel_id);
+-		if (!sk) {
+-			read_unlock_bh(&l2tp_ip_lock);
+-			goto discard;
+-		}
++	iph = (struct iphdr *)skb_network_header(skb);
+ 
+-		sock_hold(sk);
++	read_lock_bh(&l2tp_ip_lock);
++	sk = __l2tp_ip_bind_lookup(net, iph->daddr, iph->saddr, inet_iif(skb),
++				   tunnel_id);
++	if (!sk) {
+ 		read_unlock_bh(&l2tp_ip_lock);
++		goto discard;
+ 	}
++	sock_hold(sk);
++	read_unlock_bh(&l2tp_ip_lock);
+ 
+ 	if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
+ 		goto discard_put;
+diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
+index 88b397c30d86..8bcaa975b432 100644
+--- a/net/l2tp/l2tp_ip6.c
++++ b/net/l2tp/l2tp_ip6.c
+@@ -136,6 +136,7 @@ static int l2tp_ip6_recv(struct sk_buff *skb)
+ 	unsigned char *ptr, *optr;
+ 	struct l2tp_session *session;
+ 	struct l2tp_tunnel *tunnel = NULL;
++	struct ipv6hdr *iph;
+ 	int length;
+ 
+ 	if (!pskb_may_pull(skb, 4))
+@@ -192,24 +193,17 @@ static int l2tp_ip6_recv(struct sk_buff *skb)
+ 		goto discard;
+ 
+ 	tunnel_id = ntohl(*(__be32 *) &skb->data[4]);
+-	tunnel = l2tp_tunnel_find(net, tunnel_id);
+-	if (tunnel) {
+-		sk = tunnel->sock;
+-		sock_hold(sk);
+-	} else {
+-		struct ipv6hdr *iph = ipv6_hdr(skb);
+-
+-		read_lock_bh(&l2tp_ip6_lock);
+-		sk = __l2tp_ip6_bind_lookup(net, &iph->daddr, &iph->saddr,
+-					    inet6_iif(skb), tunnel_id);
+-		if (!sk) {
+-			read_unlock_bh(&l2tp_ip6_lock);
+-			goto discard;
+-		}
++	iph = ipv6_hdr(skb);
+ 
+-		sock_hold(sk);
++	read_lock_bh(&l2tp_ip6_lock);
++	sk = __l2tp_ip6_bind_lookup(net, &iph->daddr, &iph->saddr,
++				    inet6_iif(skb), tunnel_id);
++	if (!sk) {
+ 		read_unlock_bh(&l2tp_ip6_lock);
++		goto discard;
+ 	}
++	sock_hold(sk);
++	read_unlock_bh(&l2tp_ip6_lock);
+ 
+ 	if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
+ 		goto discard_put;
+diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
+index 09c8dbbd2d70..2939a6b87c27 100644
+--- a/net/netlink/af_netlink.c
++++ b/net/netlink/af_netlink.c
+@@ -2128,7 +2128,7 @@ static int netlink_dump(struct sock *sk)
+ 	struct sk_buff *skb = NULL;
+ 	struct nlmsghdr *nlh;
+ 	struct module *module;
+-	int len, err = -ENOBUFS;
++	int err = -ENOBUFS;
+ 	int alloc_min_size;
+ 	int alloc_size;
+ 
+@@ -2175,9 +2175,11 @@ static int netlink_dump(struct sock *sk)
+ 	skb_reserve(skb, skb_tailroom(skb) - alloc_size);
+ 	netlink_skb_set_owner_r(skb, sk);
+ 
+-	len = cb->dump(skb, cb);
++	if (nlk->dump_done_errno > 0)
++		nlk->dump_done_errno = cb->dump(skb, cb);
+ 
+-	if (len > 0) {
++	if (nlk->dump_done_errno > 0 ||
++	    skb_tailroom(skb) < nlmsg_total_size(sizeof(nlk->dump_done_errno))) {
+ 		mutex_unlock(nlk->cb_mutex);
+ 
+ 		if (sk_filter(sk, skb))
+@@ -2187,13 +2189,15 @@ static int netlink_dump(struct sock *sk)
+ 		return 0;
+ 	}
+ 
+-	nlh = nlmsg_put_answer(skb, cb, NLMSG_DONE, sizeof(len), NLM_F_MULTI);
+-	if (!nlh)
++	nlh = nlmsg_put_answer(skb, cb, NLMSG_DONE,
++			       sizeof(nlk->dump_done_errno), NLM_F_MULTI);
++	if (WARN_ON(!nlh))
+ 		goto errout_skb;
+ 
+ 	nl_dump_check_consistent(cb, nlh);
+ 
+-	memcpy(nlmsg_data(nlh), &len, sizeof(len));
++	memcpy(nlmsg_data(nlh), &nlk->dump_done_errno,
++	       sizeof(nlk->dump_done_errno));
+ 
+ 	if (sk_filter(sk, skb))
+ 		kfree_skb(skb);
+@@ -2265,6 +2269,7 @@ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
+ 	}
+ 
+ 	nlk->cb_running = true;
++	nlk->dump_done_errno = INT_MAX;
+ 
+ 	mutex_unlock(nlk->cb_mutex);
+ 
+diff --git a/net/netlink/af_netlink.h b/net/netlink/af_netlink.h
+index 3490f2430532..8908fc2d3de0 100644
+--- a/net/netlink/af_netlink.h
++++ b/net/netlink/af_netlink.h
+@@ -33,6 +33,7 @@ struct netlink_sock {
+ 	wait_queue_head_t	wait;
+ 	bool			bound;
+ 	bool			cb_running;
++	int			dump_done_errno;
+ 	struct netlink_callback	cb;
+ 	struct mutex		*cb_mutex;
+ 	struct mutex		cb_def_mutex;
+diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
+index 1344e3a411ae..edb462b0b73b 100644
+--- a/net/sctp/ipv6.c
++++ b/net/sctp/ipv6.c
+@@ -807,9 +807,10 @@ static void sctp_inet6_skb_msgname(struct sk_buff *skb, char *msgname,
+ 		addr->v6.sin6_flowinfo = 0;
+ 		addr->v6.sin6_port = sh->source;
+ 		addr->v6.sin6_addr = ipv6_hdr(skb)->saddr;
+-		if (ipv6_addr_type(&addr->v6.sin6_addr) & IPV6_ADDR_LINKLOCAL) {
++		if (ipv6_addr_type(&addr->v6.sin6_addr) & IPV6_ADDR_LINKLOCAL)
+ 			addr->v6.sin6_scope_id = sctp_v6_skb_iif(skb);
+-		}
++		else
++			addr->v6.sin6_scope_id = 0;
+ 	}
+ 
+ 	*addr_len = sctp_v6_addr_to_user(sctp_sk(skb->sk), addr);
+diff --git a/net/sctp/socket.c b/net/sctp/socket.c
+index 3d79085eb4e0..083da13e1af4 100644
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -4924,6 +4924,10 @@ int sctp_do_peeloff(struct sock *sk, sctp_assoc_t id, struct socket **sockp)
+ 	struct socket *sock;
+ 	int err = 0;
+ 
++	/* Do not peel off from one netns to another one. */
++	if (!net_eq(current->nsproxy->net_ns, sock_net(sk)))
++		return -EINVAL;
++
+ 	if (!asoc)
+ 		return -EINVAL;
+ 
+diff --git a/security/integrity/ima/ima_appraise.c b/security/integrity/ima/ima_appraise.c
+index 809ba70fbbbf..7d769b948de8 100644
+--- a/security/integrity/ima/ima_appraise.c
++++ b/security/integrity/ima/ima_appraise.c
+@@ -320,6 +320,9 @@ void ima_update_xattr(struct integrity_iint_cache *iint, struct file *file)
+ 	if (iint->flags & IMA_DIGSIG)
+ 		return;
+ 
++	if (iint->ima_file_status != INTEGRITY_PASS)
++		return;
++
+ 	rc = ima_collect_measurement(iint, file, NULL, 0, ima_hash_algo);
+ 	if (rc < 0)
+ 		return;


^ permalink raw reply related	[flat|nested] 20+ messages in thread
* [gentoo-commits] proj/linux-patches:4.13 commit in: /
@ 2017-11-21  9:43 Alice Ferrazzi
  0 siblings, 0 replies; 20+ messages in thread
From: Alice Ferrazzi @ 2017-11-21  9:43 UTC (permalink / raw
  To: gentoo-commits

commit:     1e22ee21bb5c0cf7b0f171214fe2df3323b61db9
Author:     Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
AuthorDate: Tue Nov 21 09:38:28 2017 +0000
Commit:     Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
CommitDate: Tue Nov 21 09:38:28 2017 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=1e22ee21

linux kernel 4.13.15

 0000_README              |    4 +
 1014_linux-4.13.15.patch | 1057 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 1061 insertions(+)

diff --git a/0000_README b/0000_README
index 8777a91..7f93bc3 100644
--- a/0000_README
+++ b/0000_README
@@ -99,6 +99,10 @@ Patch:  1013_linux-4.13.14.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.13.14
 
+Patch:  1014_linux-4.13.15.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.13.15
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1014_linux-4.13.15.patch b/1014_linux-4.13.15.patch
new file mode 100644
index 0000000..9e3a0bd
--- /dev/null
+++ b/1014_linux-4.13.15.patch
@@ -0,0 +1,1057 @@
+diff --git a/Makefile b/Makefile
+index 4aabae365a6c..3bd5d9d148d3 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 13
+-SUBLEVEL = 14
++SUBLEVEL = 15
+ EXTRAVERSION =
+ NAME = Fearless Coyote
+ 
+diff --git a/arch/x86/kernel/cpu/mcheck/mce-severity.c b/arch/x86/kernel/cpu/mcheck/mce-severity.c
+index 87cc9ab7a13c..4b8187639c2d 100644
+--- a/arch/x86/kernel/cpu/mcheck/mce-severity.c
++++ b/arch/x86/kernel/cpu/mcheck/mce-severity.c
+@@ -245,6 +245,9 @@ static int mce_severity_amd(struct mce *m, int tolerant, char **msg, bool is_exc
+ 
+ 	if (m->status & MCI_STATUS_UC) {
+ 
++		if (ctx == IN_KERNEL)
++			return MCE_PANIC_SEVERITY;
++
+ 		/*
+ 		 * On older systems where overflow_recov flag is not present, we
+ 		 * should simply panic if an error overflow occurs. If
+@@ -255,10 +258,6 @@ static int mce_severity_amd(struct mce *m, int tolerant, char **msg, bool is_exc
+ 			if (mce_flags.smca)
+ 				return mce_severity_amd_smca(m, ctx);
+ 
+-			/* software can try to contain */
+-			if (!(m->mcgstatus & MCG_STATUS_RIPV) && (ctx == IN_KERNEL))
+-				return MCE_PANIC_SEVERITY;
+-
+ 			/* kill current process */
+ 			return MCE_AR_SEVERITY;
+ 		} else {
+diff --git a/crypto/dh.c b/crypto/dh.c
+index b1032a5c1bfa..aadaf36fb56f 100644
+--- a/crypto/dh.c
++++ b/crypto/dh.c
+@@ -21,19 +21,12 @@ struct dh_ctx {
+ 	MPI xa;
+ };
+ 
+-static inline void dh_clear_params(struct dh_ctx *ctx)
++static void dh_clear_ctx(struct dh_ctx *ctx)
+ {
+ 	mpi_free(ctx->p);
+ 	mpi_free(ctx->g);
+-	ctx->p = NULL;
+-	ctx->g = NULL;
+-}
+-
+-static void dh_free_ctx(struct dh_ctx *ctx)
+-{
+-	dh_clear_params(ctx);
+ 	mpi_free(ctx->xa);
+-	ctx->xa = NULL;
++	memset(ctx, 0, sizeof(*ctx));
+ }
+ 
+ /*
+@@ -71,10 +64,8 @@ static int dh_set_params(struct dh_ctx *ctx, struct dh *params)
+ 		return -EINVAL;
+ 
+ 	ctx->g = mpi_read_raw_data(params->g, params->g_size);
+-	if (!ctx->g) {
+-		mpi_free(ctx->p);
++	if (!ctx->g)
+ 		return -EINVAL;
+-	}
+ 
+ 	return 0;
+ }
+@@ -86,21 +77,23 @@ static int dh_set_secret(struct crypto_kpp *tfm, const void *buf,
+ 	struct dh params;
+ 
+ 	/* Free the old MPI key if any */
+-	dh_free_ctx(ctx);
++	dh_clear_ctx(ctx);
+ 
+ 	if (crypto_dh_decode_key(buf, len, &params) < 0)
+-		return -EINVAL;
++		goto err_clear_ctx;
+ 
+ 	if (dh_set_params(ctx, &params) < 0)
+-		return -EINVAL;
++		goto err_clear_ctx;
+ 
+ 	ctx->xa = mpi_read_raw_data(params.key, params.key_size);
+-	if (!ctx->xa) {
+-		dh_clear_params(ctx);
+-		return -EINVAL;
+-	}
++	if (!ctx->xa)
++		goto err_clear_ctx;
+ 
+ 	return 0;
++
++err_clear_ctx:
++	dh_clear_ctx(ctx);
++	return -EINVAL;
+ }
+ 
+ static int dh_compute_value(struct kpp_request *req)
+@@ -158,7 +151,7 @@ static void dh_exit_tfm(struct crypto_kpp *tfm)
+ {
+ 	struct dh_ctx *ctx = dh_get_ctx(tfm);
+ 
+-	dh_free_ctx(ctx);
++	dh_clear_ctx(ctx);
+ }
+ 
+ static struct kpp_alg dh = {
+diff --git a/crypto/dh_helper.c b/crypto/dh_helper.c
+index 8ba8a3f82620..7f00c771fe8d 100644
+--- a/crypto/dh_helper.c
++++ b/crypto/dh_helper.c
+@@ -83,6 +83,14 @@ int crypto_dh_decode_key(const char *buf, unsigned int len, struct dh *params)
+ 	if (secret.len != crypto_dh_key_len(params))
+ 		return -EINVAL;
+ 
++	/*
++	 * Don't permit the buffer for 'key' or 'g' to be larger than 'p', since
++	 * some drivers assume otherwise.
++	 */
++	if (params->key_size > params->p_size ||
++	    params->g_size > params->p_size)
++		return -EINVAL;
++
+ 	/* Don't allocate memory. Set pointers to data within
+ 	 * the given buffer
+ 	 */
+@@ -90,6 +98,14 @@ int crypto_dh_decode_key(const char *buf, unsigned int len, struct dh *params)
+ 	params->p = (void *)(ptr + params->key_size);
+ 	params->g = (void *)(ptr + params->key_size + params->p_size);
+ 
++	/*
++	 * Don't permit 'p' to be 0.  It's not a prime number, and it's subject
++	 * to corner cases such as 'mod 0' being undefined or
++	 * crypto_kpp_maxsize() returning 0.
++	 */
++	if (memchr_inv(params->p, 0, params->p_size) == NULL)
++		return -EINVAL;
++
+ 	return 0;
+ }
+ EXPORT_SYMBOL_GPL(crypto_dh_decode_key);
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index e331e212f5fc..99c97f65149e 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -3068,6 +3068,12 @@ static int btusb_probe(struct usb_interface *intf,
+ 	if (id->driver_info & BTUSB_QCA_ROME) {
+ 		data->setup_on_usb = btusb_setup_qca;
+ 		hdev->set_bdaddr = btusb_set_bdaddr_ath3012;
++
++		/* QCA Rome devices lose their updated firmware over suspend,
++		 * but the USB hub doesn't notice any status change.
++		 * Explicitly request a device reset on resume.
++		 */
++		set_bit(BTUSB_RESET_RESUME, &data->flags);
+ 	}
+ 
+ #ifdef CONFIG_BT_HCIBTUSB_RTL
+diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
+index 8b27211f6c50..b8093b3bf7c7 100644
+--- a/drivers/hid/Kconfig
++++ b/drivers/hid/Kconfig
+@@ -230,7 +230,7 @@ config HID_CMEDIA
+ 
+ config HID_CP2112
+ 	tristate "Silicon Labs CP2112 HID USB-to-SMBus Bridge support"
+-	depends on USB_HID && I2C && GPIOLIB
++	depends on USB_HID && HIDRAW && I2C && GPIOLIB
+ 	select GPIOLIB_IRQCHIP
+ 	---help---
+ 	Support for Silicon Labs CP2112 HID USB to SMBus Master Bridge.
+diff --git a/drivers/hid/wacom_wac.h b/drivers/hid/wacom_wac.h
+index 8a03654048bf..feb62fd4dfc3 100644
+--- a/drivers/hid/wacom_wac.h
++++ b/drivers/hid/wacom_wac.h
+@@ -166,6 +166,7 @@
+ 				 ((f)->physical == HID_DG_PEN) || \
+ 				 ((f)->application == HID_DG_PEN) || \
+ 				 ((f)->application == HID_DG_DIGITIZER) || \
++				 ((f)->application == WACOM_HID_WD_PEN) || \
+ 				 ((f)->application == WACOM_HID_WD_DIGITIZER) || \
+ 				 ((f)->application == WACOM_HID_G9_PEN) || \
+ 				 ((f)->application == WACOM_HID_G11_PEN))
+diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c
+index bd76534a2749..9672b696d428 100644
+--- a/drivers/media/rc/imon.c
++++ b/drivers/media/rc/imon.c
+@@ -2516,6 +2516,11 @@ static int imon_probe(struct usb_interface *interface,
+ 	mutex_lock(&driver_lock);
+ 
+ 	first_if = usb_ifnum_to_if(usbdev, 0);
++	if (!first_if) {
++		ret = -ENODEV;
++		goto fail;
++	}
++
+ 	first_if_ctx = usb_get_intfdata(first_if);
+ 
+ 	if (ifnum == 0) {
+diff --git a/drivers/media/usb/dvb-usb/dib0700_devices.c b/drivers/media/usb/dvb-usb/dib0700_devices.c
+index 6a57fc6d3472..a04101d1e716 100644
+--- a/drivers/media/usb/dvb-usb/dib0700_devices.c
++++ b/drivers/media/usb/dvb-usb/dib0700_devices.c
+@@ -291,7 +291,7 @@ static int stk7700P2_frontend_attach(struct dvb_usb_adapter *adap)
+ 					     stk7700d_dib7000p_mt2266_config)
+ 		    != 0) {
+ 			err("%s: state->dib7000p_ops.i2c_enumeration failed.  Cannot continue\n", __func__);
+-			dvb_detach(&state->dib7000p_ops);
++			dvb_detach(state->dib7000p_ops.set_wbd_ref);
+ 			return -ENODEV;
+ 		}
+ 	}
+@@ -325,7 +325,7 @@ static int stk7700d_frontend_attach(struct dvb_usb_adapter *adap)
+ 					     stk7700d_dib7000p_mt2266_config)
+ 		    != 0) {
+ 			err("%s: state->dib7000p_ops.i2c_enumeration failed.  Cannot continue\n", __func__);
+-			dvb_detach(&state->dib7000p_ops);
++			dvb_detach(state->dib7000p_ops.set_wbd_ref);
+ 			return -ENODEV;
+ 		}
+ 	}
+@@ -478,7 +478,7 @@ static int stk7700ph_frontend_attach(struct dvb_usb_adapter *adap)
+ 				     &stk7700ph_dib7700_xc3028_config) != 0) {
+ 		err("%s: state->dib7000p_ops.i2c_enumeration failed.  Cannot continue\n",
+ 		    __func__);
+-		dvb_detach(&state->dib7000p_ops);
++		dvb_detach(state->dib7000p_ops.set_wbd_ref);
+ 		return -ENODEV;
+ 	}
+ 
+@@ -1010,7 +1010,7 @@ static int stk7070p_frontend_attach(struct dvb_usb_adapter *adap)
+ 				     &dib7070p_dib7000p_config) != 0) {
+ 		err("%s: state->dib7000p_ops.i2c_enumeration failed.  Cannot continue\n",
+ 		    __func__);
+-		dvb_detach(&state->dib7000p_ops);
++		dvb_detach(state->dib7000p_ops.set_wbd_ref);
+ 		return -ENODEV;
+ 	}
+ 
+@@ -1068,7 +1068,7 @@ static int stk7770p_frontend_attach(struct dvb_usb_adapter *adap)
+ 				     &dib7770p_dib7000p_config) != 0) {
+ 		err("%s: state->dib7000p_ops.i2c_enumeration failed.  Cannot continue\n",
+ 		    __func__);
+-		dvb_detach(&state->dib7000p_ops);
++		dvb_detach(state->dib7000p_ops.set_wbd_ref);
+ 		return -ENODEV;
+ 	}
+ 
+@@ -3056,7 +3056,7 @@ static int nim7090_frontend_attach(struct dvb_usb_adapter *adap)
+ 
+ 	if (state->dib7000p_ops.i2c_enumeration(&adap->dev->i2c_adap, 1, 0x10, &nim7090_dib7000p_config) != 0) {
+ 		err("%s: state->dib7000p_ops.i2c_enumeration failed.  Cannot continue\n", __func__);
+-		dvb_detach(&state->dib7000p_ops);
++		dvb_detach(state->dib7000p_ops.set_wbd_ref);
+ 		return -ENODEV;
+ 	}
+ 	adap->fe_adap[0].fe = state->dib7000p_ops.init(&adap->dev->i2c_adap, 0x80, &nim7090_dib7000p_config);
+@@ -3109,7 +3109,7 @@ static int tfe7090pvr_frontend0_attach(struct dvb_usb_adapter *adap)
+ 	/* initialize IC 0 */
+ 	if (state->dib7000p_ops.i2c_enumeration(&adap->dev->i2c_adap, 1, 0x20, &tfe7090pvr_dib7000p_config[0]) != 0) {
+ 		err("%s: state->dib7000p_ops.i2c_enumeration failed.  Cannot continue\n", __func__);
+-		dvb_detach(&state->dib7000p_ops);
++		dvb_detach(state->dib7000p_ops.set_wbd_ref);
+ 		return -ENODEV;
+ 	}
+ 
+@@ -3139,7 +3139,7 @@ static int tfe7090pvr_frontend1_attach(struct dvb_usb_adapter *adap)
+ 	i2c = state->dib7000p_ops.get_i2c_master(adap->dev->adapter[0].fe_adap[0].fe, DIBX000_I2C_INTERFACE_GPIO_6_7, 1);
+ 	if (state->dib7000p_ops.i2c_enumeration(i2c, 1, 0x10, &tfe7090pvr_dib7000p_config[1]) != 0) {
+ 		err("%s: state->dib7000p_ops.i2c_enumeration failed.  Cannot continue\n", __func__);
+-		dvb_detach(&state->dib7000p_ops);
++		dvb_detach(state->dib7000p_ops.set_wbd_ref);
+ 		return -ENODEV;
+ 	}
+ 
+@@ -3214,7 +3214,7 @@ static int tfe7790p_frontend_attach(struct dvb_usb_adapter *adap)
+ 				1, 0x10, &tfe7790p_dib7000p_config) != 0) {
+ 		err("%s: state->dib7000p_ops.i2c_enumeration failed.  Cannot continue\n",
+ 				__func__);
+-		dvb_detach(&state->dib7000p_ops);
++		dvb_detach(state->dib7000p_ops.set_wbd_ref);
+ 		return -ENODEV;
+ 	}
+ 	adap->fe_adap[0].fe = state->dib7000p_ops.init(&adap->dev->i2c_adap,
+@@ -3309,7 +3309,7 @@ static int stk7070pd_frontend_attach0(struct dvb_usb_adapter *adap)
+ 				     stk7070pd_dib7000p_config) != 0) {
+ 		err("%s: state->dib7000p_ops.i2c_enumeration failed.  Cannot continue\n",
+ 		    __func__);
+-		dvb_detach(&state->dib7000p_ops);
++		dvb_detach(state->dib7000p_ops.set_wbd_ref);
+ 		return -ENODEV;
+ 	}
+ 
+@@ -3384,7 +3384,7 @@ static int novatd_frontend_attach(struct dvb_usb_adapter *adap)
+ 					     stk7070pd_dib7000p_config) != 0) {
+ 			err("%s: state->dib7000p_ops.i2c_enumeration failed.  Cannot continue\n",
+ 			    __func__);
+-			dvb_detach(&state->dib7000p_ops);
++			dvb_detach(state->dib7000p_ops.set_wbd_ref);
+ 			return -ENODEV;
+ 		}
+ 	}
+@@ -3620,7 +3620,7 @@ static int pctv340e_frontend_attach(struct dvb_usb_adapter *adap)
+ 
+ 	if (state->dib7000p_ops.dib7000pc_detection(&adap->dev->i2c_adap) == 0) {
+ 		/* Demodulator not found for some reason? */
+-		dvb_detach(&state->dib7000p_ops);
++		dvb_detach(state->dib7000p_ops.set_wbd_ref);
+ 		return -ENODEV;
+ 	}
+ 
+diff --git a/drivers/platform/x86/peaq-wmi.c b/drivers/platform/x86/peaq-wmi.c
+index 77d1f90b0794..684e9593547a 100644
+--- a/drivers/platform/x86/peaq-wmi.c
++++ b/drivers/platform/x86/peaq-wmi.c
+@@ -8,6 +8,7 @@
+  */
+ 
+ #include <linux/acpi.h>
++#include <linux/dmi.h>
+ #include <linux/input-polldev.h>
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+@@ -64,8 +65,23 @@ static void peaq_wmi_poll(struct input_polled_dev *dev)
+ 	}
+ }
+ 
++/* Some other devices (Shuttle XS35) use the same WMI GUID for other purposes */
++static const struct dmi_system_id peaq_dmi_table[] = {
++	{
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "PEAQ"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "PEAQ PMM C1010 MD99187"),
++		},
++	},
++	{}
++};
++
+ static int __init peaq_wmi_init(void)
+ {
++	/* WMI GUID is not unique, also check for a DMI match */
++	if (!dmi_check_system(peaq_dmi_table))
++		return -ENODEV;
++
+ 	if (!wmi_has_guid(PEAQ_DOLBY_BUTTON_GUID))
+ 		return -ENODEV;
+ 
+@@ -86,6 +102,9 @@ static int __init peaq_wmi_init(void)
+ 
+ static void __exit peaq_wmi_exit(void)
+ {
++	if (!dmi_check_system(peaq_dmi_table))
++		return;
++
+ 	if (!wmi_has_guid(PEAQ_DOLBY_BUTTON_GUID))
+ 		return;
+ 
+diff --git a/drivers/staging/ccree/cc_lli_defs.h b/drivers/staging/ccree/cc_lli_defs.h
+index 851d3907167e..a9c417b07b04 100644
+--- a/drivers/staging/ccree/cc_lli_defs.h
++++ b/drivers/staging/ccree/cc_lli_defs.h
+@@ -59,7 +59,7 @@ static inline void cc_lli_set_addr(u32 *lli_p, dma_addr_t addr)
+ 	lli_p[LLI_WORD0_OFFSET] = (addr & U32_MAX);
+ #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ 	lli_p[LLI_WORD1_OFFSET] &= ~LLI_HADDR_MASK;
+-	lli_p[LLI_WORD1_OFFSET] |= FIELD_PREP(LLI_HADDR_MASK, (addr >> 16));
++	lli_p[LLI_WORD1_OFFSET] |= FIELD_PREP(LLI_HADDR_MASK, (addr >> 32));
+ #endif /* CONFIG_ARCH_DMA_ADDR_T_64BIT */
+ }
+ 
+diff --git a/drivers/staging/greybus/spilib.c b/drivers/staging/greybus/spilib.c
+index e97b19148497..1e7321a1404c 100644
+--- a/drivers/staging/greybus/spilib.c
++++ b/drivers/staging/greybus/spilib.c
+@@ -544,11 +544,14 @@ int gb_spilib_master_init(struct gb_connection *connection, struct device *dev,
+ 
+ 	return 0;
+ 
+-exit_spi_unregister:
+-	spi_unregister_master(master);
+ exit_spi_put:
+ 	spi_master_put(master);
+ 
++	return ret;
++
++exit_spi_unregister:
++	spi_unregister_master(master);
++
+ 	return ret;
+ }
+ EXPORT_SYMBOL_GPL(gb_spilib_master_init);
+@@ -558,7 +561,6 @@ void gb_spilib_master_exit(struct gb_connection *connection)
+ 	struct spi_master *master = gb_connection_get_data(connection);
+ 
+ 	spi_unregister_master(master);
+-	spi_master_put(master);
+ }
+ EXPORT_SYMBOL_GPL(gb_spilib_master_exit);
+ 
+diff --git a/drivers/staging/rtl8188eu/core/rtw_recv.c b/drivers/staging/rtl8188eu/core/rtw_recv.c
+index 3fd5f4102b36..afb9dadc1cfe 100644
+--- a/drivers/staging/rtl8188eu/core/rtw_recv.c
++++ b/drivers/staging/rtl8188eu/core/rtw_recv.c
+@@ -259,10 +259,12 @@ static int recvframe_chkmic(struct adapter *adapter,
+ 			}
+ 
+ 			/* icv_len included the mic code */
+-			datalen = precvframe->pkt->len-prxattrib->hdrlen - 8;
++			datalen = precvframe->pkt->len-prxattrib->hdrlen -
++				  prxattrib->iv_len-prxattrib->icv_len-8;
+ 			pframe = precvframe->pkt->data;
+-			payload = pframe+prxattrib->hdrlen;
++			payload = pframe+prxattrib->hdrlen+prxattrib->iv_len;
+ 
++			RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("\n prxattrib->iv_len=%d prxattrib->icv_len=%d\n", prxattrib->iv_len, prxattrib->icv_len));
+ 			rtw_seccalctkipmic(mickey, pframe, payload, datalen, &miccode[0],
+ 					   (unsigned char)prxattrib->priority); /* care the length of the data */
+ 
+@@ -407,15 +409,9 @@ static struct recv_frame *decryptor(struct adapter *padapter,
+ 		default:
+ 			break;
+ 		}
+-		if (res != _FAIL) {
+-			memmove(precv_frame->pkt->data + precv_frame->attrib.iv_len, precv_frame->pkt->data, precv_frame->attrib.hdrlen);
+-			skb_pull(precv_frame->pkt, precv_frame->attrib.iv_len);
+-			skb_trim(precv_frame->pkt, precv_frame->pkt->len - precv_frame->attrib.icv_len);
+-		}
+ 	} else if (prxattrib->bdecrypted == 1 && prxattrib->encrypt > 0 &&
+-		   (psecuritypriv->busetkipkey == 1 || prxattrib->encrypt != _TKIP_)) {
+-		psecuritypriv->hw_decrypted = true;
+-	}
++		   (psecuritypriv->busetkipkey == 1 || prxattrib->encrypt != _TKIP_))
++			psecuritypriv->hw_decrypted = true;
+ 
+ 	if (res == _FAIL) {
+ 		rtw_free_recvframe(return_packet, &padapter->recvpriv.free_recv_queue);
+@@ -456,7 +452,7 @@ static struct recv_frame *portctrl(struct adapter *adapter,
+ 
+ 	if (auth_alg == 2) {
+ 		/* get ether_type */
+-		ptr = ptr + pfhdr->attrib.hdrlen + LLC_HEADER_SIZE;
++		ptr = ptr + pfhdr->attrib.hdrlen + LLC_HEADER_SIZE + pfhdr->attrib.iv_len;
+ 		memcpy(&be_tmp, ptr, 2);
+ 		ether_type = ntohs(be_tmp);
+ 
+@@ -1138,8 +1134,6 @@ static int validate_recv_data_frame(struct adapter *adapter,
+ 	}
+ 
+ 	if (pattrib->privacy) {
+-		struct sk_buff *skb = precv_frame->pkt;
+-
+ 		RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("validate_recv_data_frame:pattrib->privacy=%x\n", pattrib->privacy));
+ 		RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("\n ^^^^^^^^^^^IS_MCAST(pattrib->ra(0x%02x))=%d^^^^^^^^^^^^^^^6\n", pattrib->ra[0], IS_MCAST(pattrib->ra)));
+ 
+@@ -1148,13 +1142,6 @@ static int validate_recv_data_frame(struct adapter *adapter,
+ 		RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("\n pattrib->encrypt=%d\n", pattrib->encrypt));
+ 
+ 		SET_ICE_IV_LEN(pattrib->iv_len, pattrib->icv_len, pattrib->encrypt);
+-
+-		if (pattrib->bdecrypted == 1 && pattrib->encrypt > 0) {
+-			memmove(skb->data + pattrib->iv_len,
+-				skb->data, pattrib->hdrlen);
+-			skb_pull(skb, pattrib->iv_len);
+-			skb_trim(skb, skb->len - pattrib->icv_len);
+-		}
+ 	} else {
+ 		pattrib->encrypt = 0;
+ 		pattrib->iv_len = 0;
+@@ -1274,7 +1261,6 @@ static int validate_recv_frame(struct adapter *adapter,
+ 	 * Hence forward the frame to the monitor anyway to preserve the order
+ 	 * in which frames were received.
+ 	 */
+-
+ 	rtl88eu_mon_recv_hook(adapter->pmondev, precv_frame);
+ 
+ exit:
+@@ -1296,8 +1282,11 @@ static int wlanhdr_to_ethhdr(struct recv_frame *precvframe)
+ 	u8 *ptr = precvframe->pkt->data;
+ 	struct rx_pkt_attrib *pattrib = &precvframe->attrib;
+ 
+-	psnap = (struct ieee80211_snap_hdr *)(ptr+pattrib->hdrlen);
+-	psnap_type = ptr+pattrib->hdrlen + SNAP_SIZE;
++	if (pattrib->encrypt)
++		skb_trim(precvframe->pkt, precvframe->pkt->len - pattrib->icv_len);
++
++	psnap = (struct ieee80211_snap_hdr *)(ptr+pattrib->hdrlen + pattrib->iv_len);
++	psnap_type = ptr+pattrib->hdrlen + pattrib->iv_len+SNAP_SIZE;
+ 	/* convert hdr + possible LLC headers into Ethernet header */
+ 	if ((!memcmp(psnap, rtw_rfc1042_header, SNAP_SIZE) &&
+ 	     (!memcmp(psnap_type, SNAP_ETH_TYPE_IPX, 2) == false) &&
+@@ -1310,9 +1299,12 @@ static int wlanhdr_to_ethhdr(struct recv_frame *precvframe)
+ 		bsnaphdr = false;
+ 	}
+ 
+-	rmv_len = pattrib->hdrlen + (bsnaphdr ? SNAP_SIZE : 0);
++	rmv_len = pattrib->hdrlen + pattrib->iv_len + (bsnaphdr ? SNAP_SIZE : 0);
+ 	len = precvframe->pkt->len - rmv_len;
+ 
++	RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
++		 ("\n===pattrib->hdrlen: %x,  pattrib->iv_len:%x===\n\n", pattrib->hdrlen,  pattrib->iv_len));
++
+ 	memcpy(&be_tmp, ptr+rmv_len, 2);
+ 	eth_type = ntohs(be_tmp); /* pattrib->ether_type */
+ 	pattrib->eth_type = eth_type;
+@@ -1337,6 +1329,7 @@ static struct recv_frame *recvframe_defrag(struct adapter *adapter,
+ 					   struct __queue *defrag_q)
+ {
+ 	struct list_head *plist, *phead;
++	u8 wlanhdr_offset;
+ 	u8	curfragnum;
+ 	struct recv_frame *pfhdr, *pnfhdr;
+ 	struct recv_frame *prframe, *pnextrframe;
+@@ -1385,7 +1378,12 @@ static struct recv_frame *recvframe_defrag(struct adapter *adapter,
+ 		/* copy the 2nd~n fragment frame's payload to the first fragment */
+ 		/* get the 2nd~last fragment frame's payload */
+ 
+-		skb_pull(pnextrframe->pkt, pnfhdr->attrib.hdrlen);
++		wlanhdr_offset = pnfhdr->attrib.hdrlen + pnfhdr->attrib.iv_len;
++
++		skb_pull(pnextrframe->pkt, wlanhdr_offset);
++
++		/* append  to first fragment frame's tail (if privacy frame, pull the ICV) */
++		skb_trim(prframe->pkt, prframe->pkt->len - pfhdr->attrib.icv_len);
+ 
+ 		/* memcpy */
+ 		memcpy(skb_tail_pointer(pfhdr->pkt), pnfhdr->pkt->data,
+@@ -1393,7 +1391,7 @@ static struct recv_frame *recvframe_defrag(struct adapter *adapter,
+ 
+ 		skb_put(prframe->pkt, pnfhdr->pkt->len);
+ 
+-		pfhdr->attrib.icv_len = 0;
++		pfhdr->attrib.icv_len = pnfhdr->attrib.icv_len;
+ 		plist = plist->next;
+ 	}
+ 
+@@ -1519,6 +1517,11 @@ static int amsdu_to_msdu(struct adapter *padapter, struct recv_frame *prframe)
+ 	nr_subframes = 0;
+ 	pattrib = &prframe->attrib;
+ 
++	skb_pull(prframe->pkt, prframe->attrib.hdrlen);
++
++	if (prframe->attrib.iv_len > 0)
++		skb_pull(prframe->pkt, prframe->attrib.iv_len);
++
+ 	a_len = prframe->pkt->len;
+ 
+ 	pdata = prframe->pkt->data;
+@@ -1887,6 +1890,24 @@ static int process_recv_indicatepkts(struct adapter *padapter,
+ 	return retval;
+ }
+ 
++static int recv_func_prehandle(struct adapter *padapter,
++			       struct recv_frame *rframe)
++{
++	int ret = _SUCCESS;
++	struct __queue *pfree_recv_queue = &padapter->recvpriv.free_recv_queue;
++
++	/* check the frame crtl field and decache */
++	ret = validate_recv_frame(padapter, rframe);
++	if (ret != _SUCCESS) {
++		RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("recv_func: validate_recv_frame fail! drop pkt\n"));
++		rtw_free_recvframe(rframe, pfree_recv_queue);/* free this recv_frame */
++		goto exit;
++	}
++
++exit:
++	return ret;
++}
++
+ static int recv_func_posthandle(struct adapter *padapter,
+ 				struct recv_frame *prframe)
+ {
+@@ -1939,7 +1960,6 @@ static int recv_func(struct adapter *padapter, struct recv_frame *rframe)
+ 	struct rx_pkt_attrib *prxattrib = &rframe->attrib;
+ 	struct security_priv *psecuritypriv = &padapter->securitypriv;
+ 	struct mlme_priv *mlmepriv = &padapter->mlmepriv;
+-	struct __queue *pfree_recv_queue = &padapter->recvpriv.free_recv_queue;
+ 
+ 	/* check if need to handle uc_swdec_pending_queue*/
+ 	if (check_fwstate(mlmepriv, WIFI_STATION_STATE) && psecuritypriv->busetkipkey) {
+@@ -1951,12 +1971,9 @@ static int recv_func(struct adapter *padapter, struct recv_frame *rframe)
+ 		}
+ 	}
+ 
+-	/* check the frame crtl field and decache */
+-	ret = validate_recv_frame(padapter, rframe);
+-	if (ret != _SUCCESS) {
+-		RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("recv_func: validate_recv_frame fail! drop pkt\n"));
+-		rtw_free_recvframe(rframe, pfree_recv_queue);/* free this recv_frame */
+-	} else {
++	ret = recv_func_prehandle(padapter, rframe);
++
++	if (ret == _SUCCESS) {
+ 		/* check if need to enqueue into uc_swdec_pending_queue*/
+ 		if (check_fwstate(mlmepriv, WIFI_STATION_STATE) &&
+ 		    !IS_MCAST(prxattrib->ra) && prxattrib->encrypt > 0 &&
+diff --git a/drivers/staging/rtl8188eu/os_dep/mon.c b/drivers/staging/rtl8188eu/os_dep/mon.c
+index 37fd52d7364f..225c23fc69dc 100644
+--- a/drivers/staging/rtl8188eu/os_dep/mon.c
++++ b/drivers/staging/rtl8188eu/os_dep/mon.c
+@@ -66,34 +66,6 @@ static void mon_recv_decrypted(struct net_device *dev, const u8 *data,
+ 	netif_rx(skb);
+ }
+ 
+-static void mon_recv_decrypted_recv(struct net_device *dev, const u8 *data,
+-				    int data_len)
+-{
+-	struct sk_buff *skb;
+-	struct ieee80211_hdr *hdr;
+-	int hdr_len;
+-
+-	skb = netdev_alloc_skb(dev, data_len);
+-	if (!skb)
+-		return;
+-	memcpy(skb_put(skb, data_len), data, data_len);
+-
+-	/*
+-	 * Frame data is not encrypted. Strip off protection so
+-	 * userspace doesn't think that it is.
+-	 */
+-
+-	hdr = (struct ieee80211_hdr *)skb->data;
+-	hdr_len = ieee80211_hdrlen(hdr->frame_control);
+-
+-	if (ieee80211_has_protected(hdr->frame_control))
+-		hdr->frame_control &= ~cpu_to_le16(IEEE80211_FCTL_PROTECTED);
+-
+-	skb->ip_summed = CHECKSUM_UNNECESSARY;
+-	skb->protocol = eth_type_trans(skb, dev);
+-	netif_rx(skb);
+-}
+-
+ static void mon_recv_encrypted(struct net_device *dev, const u8 *data,
+ 			       int data_len)
+ {
+@@ -110,6 +82,7 @@ static void mon_recv_encrypted(struct net_device *dev, const u8 *data,
+ void rtl88eu_mon_recv_hook(struct net_device *dev, struct recv_frame *frame)
+ {
+ 	struct rx_pkt_attrib *attr;
++	int iv_len, icv_len;
+ 	int data_len;
+ 	u8 *data;
+ 
+@@ -122,8 +95,11 @@ void rtl88eu_mon_recv_hook(struct net_device *dev, struct recv_frame *frame)
+ 	data = frame->pkt->data;
+ 	data_len = frame->pkt->len;
+ 
++	/* Broadcast and multicast frames don't have attr->{iv,icv}_len set */
++	SET_ICE_IV_LEN(iv_len, icv_len, attr->encrypt);
++
+ 	if (attr->bdecrypted)
+-		mon_recv_decrypted_recv(dev, data, data_len);
++		mon_recv_decrypted(dev, data, data_len, iv_len, icv_len);
+ 	else
+ 		mon_recv_encrypted(dev, data, data_len);
+ }
+diff --git a/drivers/staging/sm750fb/ddk750_chip.h b/drivers/staging/sm750fb/ddk750_chip.h
+index 2c7a9b9a7c8a..d9d4c485e54c 100644
+--- a/drivers/staging/sm750fb/ddk750_chip.h
++++ b/drivers/staging/sm750fb/ddk750_chip.h
+@@ -17,7 +17,7 @@ static inline u32 peek32(u32 addr)
+ 	return readl(addr + mmio750);
+ }
+ 
+-static inline void poke32(u32 data, u32 addr)
++static inline void poke32(u32 addr, u32 data)
+ {
+ 	writel(data, addr + mmio750);
+ }
+diff --git a/drivers/staging/vboxvideo/vbox_drv.h b/drivers/staging/vboxvideo/vbox_drv.h
+index 4b9302703b36..eeac4f0cb2c6 100644
+--- a/drivers/staging/vboxvideo/vbox_drv.h
++++ b/drivers/staging/vboxvideo/vbox_drv.h
+@@ -137,8 +137,8 @@ struct vbox_connector {
+ 	char name[32];
+ 	struct vbox_crtc *vbox_crtc;
+ 	struct {
+-		u16 width;
+-		u16 height;
++		u32 width;
++		u32 height;
+ 		bool disconnected;
+ 	} mode_hint;
+ };
+@@ -150,8 +150,8 @@ struct vbox_crtc {
+ 	unsigned int crtc_id;
+ 	u32 fb_offset;
+ 	bool cursor_enabled;
+-	u16 x_hint;
+-	u16 y_hint;
++	u32 x_hint;
++	u32 y_hint;
+ };
+ 
+ struct vbox_encoder {
+diff --git a/drivers/staging/vboxvideo/vbox_irq.c b/drivers/staging/vboxvideo/vbox_irq.c
+index 3ca8bec62ac4..74abdf02d9fd 100644
+--- a/drivers/staging/vboxvideo/vbox_irq.c
++++ b/drivers/staging/vboxvideo/vbox_irq.c
+@@ -150,8 +150,8 @@ static void vbox_update_mode_hints(struct vbox_private *vbox)
+ 
+ 		disconnected = !(hints->enabled);
+ 		crtc_id = vbox_conn->vbox_crtc->crtc_id;
+-		vbox_conn->mode_hint.width = hints->cx & 0x8fff;
+-		vbox_conn->mode_hint.height = hints->cy & 0x8fff;
++		vbox_conn->mode_hint.width = hints->cx;
++		vbox_conn->mode_hint.height = hints->cy;
+ 		vbox_conn->vbox_crtc->x_hint = hints->dx;
+ 		vbox_conn->vbox_crtc->y_hint = hints->dy;
+ 		vbox_conn->mode_hint.disconnected = disconnected;
+diff --git a/drivers/staging/vboxvideo/vbox_mode.c b/drivers/staging/vboxvideo/vbox_mode.c
+index f2b85f3256fa..14b59ab782d0 100644
+--- a/drivers/staging/vboxvideo/vbox_mode.c
++++ b/drivers/staging/vboxvideo/vbox_mode.c
+@@ -560,12 +560,22 @@ static int vbox_get_modes(struct drm_connector *connector)
+ 		++num_modes;
+ 	}
+ 	vbox_set_edid(connector, preferred_width, preferred_height);
+-	drm_object_property_set_value(
+-		&connector->base, vbox->dev->mode_config.suggested_x_property,
+-		vbox_connector->vbox_crtc->x_hint);
+-	drm_object_property_set_value(
+-		&connector->base, vbox->dev->mode_config.suggested_y_property,
+-		vbox_connector->vbox_crtc->y_hint);
++
++	if (vbox_connector->vbox_crtc->x_hint != -1)
++		drm_object_property_set_value(&connector->base,
++			vbox->dev->mode_config.suggested_x_property,
++			vbox_connector->vbox_crtc->x_hint);
++	else
++		drm_object_property_set_value(&connector->base,
++			vbox->dev->mode_config.suggested_x_property, 0);
++
++	if (vbox_connector->vbox_crtc->y_hint != -1)
++		drm_object_property_set_value(&connector->base,
++			vbox->dev->mode_config.suggested_y_property,
++			vbox_connector->vbox_crtc->y_hint);
++	else
++		drm_object_property_set_value(&connector->base,
++			vbox->dev->mode_config.suggested_y_property, 0);
+ 
+ 	return num_modes;
+ }
+@@ -650,9 +660,9 @@ static int vbox_connector_init(struct drm_device *dev,
+ 
+ 	drm_mode_create_suggested_offset_properties(dev);
+ 	drm_object_attach_property(&connector->base,
+-				   dev->mode_config.suggested_x_property, -1);
++				   dev->mode_config.suggested_x_property, 0);
+ 	drm_object_attach_property(&connector->base,
+-				   dev->mode_config.suggested_y_property, -1);
++				   dev->mode_config.suggested_y_property, 0);
+ 	drm_connector_register(connector);
+ 
+ 	drm_mode_connector_attach_encoder(connector, encoder);
+diff --git a/drivers/staging/wilc1000/wilc_wlan.c b/drivers/staging/wilc1000/wilc_wlan.c
+index 9addef1f1e12..f49dfa82f1b8 100644
+--- a/drivers/staging/wilc1000/wilc_wlan.c
++++ b/drivers/staging/wilc1000/wilc_wlan.c
+@@ -714,7 +714,7 @@ int wilc_wlan_handle_txq(struct net_device *dev, u32 *txq_count)
+ 					char *bssid = ((struct tx_complete_data *)(tqe->priv))->bssid;
+ 
+ 					buffer_offset = ETH_ETHERNET_HDR_OFFSET;
+-					memcpy(&txb[offset + 4], bssid, 6);
++					memcpy(&txb[offset + 8], bssid, 6);
+ 				} else {
+ 					buffer_offset = HOST_HDR_OFFSET;
+ 				}
+diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
+index 066b58cb6c98..54d7134ea991 100644
+--- a/drivers/usb/core/devio.c
++++ b/drivers/usb/core/devio.c
+@@ -1833,6 +1833,18 @@ static int proc_unlinkurb(struct usb_dev_state *ps, void __user *arg)
+ 	return 0;
+ }
+ 
++static void compute_isochronous_actual_length(struct urb *urb)
++{
++	unsigned int i;
++
++	if (urb->number_of_packets > 0) {
++		urb->actual_length = 0;
++		for (i = 0; i < urb->number_of_packets; i++)
++			urb->actual_length +=
++					urb->iso_frame_desc[i].actual_length;
++	}
++}
++
+ static int processcompl(struct async *as, void __user * __user *arg)
+ {
+ 	struct urb *urb = as->urb;
+@@ -1840,6 +1852,7 @@ static int processcompl(struct async *as, void __user * __user *arg)
+ 	void __user *addr = as->userurb;
+ 	unsigned int i;
+ 
++	compute_isochronous_actual_length(urb);
+ 	if (as->userbuffer && urb->actual_length) {
+ 		if (copy_urb_data_to_user(as->userbuffer, urb))
+ 			goto err_out;
+@@ -2008,6 +2021,7 @@ static int processcompl_compat(struct async *as, void __user * __user *arg)
+ 	void __user *addr = as->userurb;
+ 	unsigned int i;
+ 
++	compute_isochronous_actual_length(urb);
+ 	if (as->userbuffer && urb->actual_length) {
+ 		if (copy_urb_data_to_user(as->userbuffer, urb))
+ 			return -EFAULT;
+diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
+index a6aaf2f193a4..37c418e581fb 100644
+--- a/drivers/usb/core/quirks.c
++++ b/drivers/usb/core/quirks.c
+@@ -221,6 +221,9 @@ static const struct usb_device_id usb_quirk_list[] = {
+ 	/* Corsair Strafe RGB */
+ 	{ USB_DEVICE(0x1b1c, 0x1b20), .driver_info = USB_QUIRK_DELAY_INIT },
+ 
++	/* Corsair K70 LUX */
++	{ USB_DEVICE(0x1b1c, 0x1b36), .driver_info = USB_QUIRK_DELAY_INIT },
++
+ 	/* MIDI keyboard WORLDE MINI */
+ 	{ USB_DEVICE(0x1c75, 0x0204), .driver_info =
+ 			USB_QUIRK_CONFIG_INTF_STRINGS },
+diff --git a/drivers/usb/early/xhci-dbc.h b/drivers/usb/early/xhci-dbc.h
+index 2df0f6e613fe..a516cab0bf4a 100644
+--- a/drivers/usb/early/xhci-dbc.h
++++ b/drivers/usb/early/xhci-dbc.h
+@@ -90,8 +90,8 @@ struct xdbc_context {
+ 
+ #define XDBC_INFO_CONTEXT_SIZE		48
+ #define XDBC_MAX_STRING_LENGTH		64
+-#define XDBC_STRING_MANUFACTURER	"Linux"
+-#define XDBC_STRING_PRODUCT		"Remote GDB"
++#define XDBC_STRING_MANUFACTURER	"Linux Foundation"
++#define XDBC_STRING_PRODUCT		"Linux USB GDB Target"
+ #define XDBC_STRING_SERIAL		"0001"
+ 
+ struct xdbc_strings {
+@@ -103,7 +103,7 @@ struct xdbc_strings {
+ 
+ #define XDBC_PROTOCOL		1	/* GNU Remote Debug Command Set */
+ #define XDBC_VENDOR_ID		0x1d6b	/* Linux Foundation 0x1d6b */
+-#define XDBC_PRODUCT_ID		0x0004	/* __le16 idProduct; device 0004 */
++#define XDBC_PRODUCT_ID		0x0011	/* __le16 idProduct; device 0011 */
+ #define XDBC_DEVICE_REV		0x0010	/* 0.10 */
+ 
+ /*
+diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
+index d21874b35cf6..6121ab4b29bb 100644
+--- a/drivers/usb/gadget/function/f_fs.c
++++ b/drivers/usb/gadget/function/f_fs.c
+@@ -3669,6 +3669,7 @@ static void ffs_closed(struct ffs_data *ffs)
+ 		goto done;
+ 
+ 	ffs_obj->desc_ready = false;
++	ffs_obj->ffs_data = NULL;
+ 
+ 	if (test_and_clear_bit(FFS_FL_CALL_CLOSED_CALLBACK, &ffs->flags) &&
+ 	    ffs_obj->ffs_closed_callback)
+diff --git a/drivers/usb/serial/garmin_gps.c b/drivers/usb/serial/garmin_gps.c
+index b2f2e87aed94..91e7e3a166a5 100644
+--- a/drivers/usb/serial/garmin_gps.c
++++ b/drivers/usb/serial/garmin_gps.c
+@@ -138,6 +138,7 @@ struct garmin_data {
+ 	__u8   privpkt[4*6];
+ 	spinlock_t lock;
+ 	struct list_head pktlist;
++	struct usb_anchor write_urbs;
+ };
+ 
+ 
+@@ -905,13 +906,19 @@ static int garmin_init_session(struct usb_serial_port *port)
+ 					sizeof(GARMIN_START_SESSION_REQ), 0);
+ 
+ 			if (status < 0)
+-				break;
++				goto err_kill_urbs;
+ 		}
+ 
+ 		if (status > 0)
+ 			status = 0;
+ 	}
+ 
++	return status;
++
++err_kill_urbs:
++	usb_kill_anchored_urbs(&garmin_data_p->write_urbs);
++	usb_kill_urb(port->interrupt_in_urb);
++
+ 	return status;
+ }
+ 
+@@ -930,7 +937,6 @@ static int garmin_open(struct tty_struct *tty, struct usb_serial_port *port)
+ 	spin_unlock_irqrestore(&garmin_data_p->lock, flags);
+ 
+ 	/* shutdown any bulk reads that might be going on */
+-	usb_kill_urb(port->write_urb);
+ 	usb_kill_urb(port->read_urb);
+ 
+ 	if (garmin_data_p->state == STATE_RESET)
+@@ -953,7 +959,7 @@ static void garmin_close(struct usb_serial_port *port)
+ 
+ 	/* shutdown our urbs */
+ 	usb_kill_urb(port->read_urb);
+-	usb_kill_urb(port->write_urb);
++	usb_kill_anchored_urbs(&garmin_data_p->write_urbs);
+ 
+ 	/* keep reset state so we know that we must start a new session */
+ 	if (garmin_data_p->state != STATE_RESET)
+@@ -1037,12 +1043,14 @@ static int garmin_write_bulk(struct usb_serial_port *port,
+ 	}
+ 
+ 	/* send it down the pipe */
++	usb_anchor_urb(urb, &garmin_data_p->write_urbs);
+ 	status = usb_submit_urb(urb, GFP_ATOMIC);
+ 	if (status) {
+ 		dev_err(&port->dev,
+ 		   "%s - usb_submit_urb(write bulk) failed with status = %d\n",
+ 				__func__, status);
+ 		count = status;
++		usb_unanchor_urb(urb);
+ 		kfree(buffer);
+ 	}
+ 
+@@ -1401,9 +1409,16 @@ static int garmin_port_probe(struct usb_serial_port *port)
+ 	garmin_data_p->state = 0;
+ 	garmin_data_p->flags = 0;
+ 	garmin_data_p->count = 0;
++	init_usb_anchor(&garmin_data_p->write_urbs);
+ 	usb_set_serial_port_data(port, garmin_data_p);
+ 
+ 	status = garmin_init_session(port);
++	if (status)
++		goto err_free;
++
++	return 0;
++err_free:
++	kfree(garmin_data_p);
+ 
+ 	return status;
+ }
+@@ -1413,6 +1428,7 @@ static int garmin_port_remove(struct usb_serial_port *port)
+ {
+ 	struct garmin_data *garmin_data_p = usb_get_serial_port_data(port);
+ 
++	usb_kill_anchored_urbs(&garmin_data_p->write_urbs);
+ 	usb_kill_urb(port->interrupt_in_urb);
+ 	del_timer_sync(&garmin_data_p->timer);
+ 	kfree(garmin_data_p);
+diff --git a/drivers/usb/serial/metro-usb.c b/drivers/usb/serial/metro-usb.c
+index 14511d6a7d44..3950d44b80f1 100644
+--- a/drivers/usb/serial/metro-usb.c
++++ b/drivers/usb/serial/metro-usb.c
+@@ -189,7 +189,7 @@ static int metrousb_open(struct tty_struct *tty, struct usb_serial_port *port)
+ 		dev_err(&port->dev,
+ 			"%s - failed submitting interrupt in urb, error code=%d\n",
+ 			__func__, result);
+-		goto exit;
++		return result;
+ 	}
+ 
+ 	/* Send activate cmd to device */
+@@ -198,9 +198,14 @@ static int metrousb_open(struct tty_struct *tty, struct usb_serial_port *port)
+ 		dev_err(&port->dev,
+ 			"%s - failed to configure device, error code=%d\n",
+ 			__func__, result);
+-		goto exit;
++		goto err_kill_urb;
+ 	}
+-exit:
++
++	return 0;
++
++err_kill_urb:
++	usb_kill_urb(port->interrupt_in_urb);
++
+ 	return result;
+ }
+ 
+diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
+index eb9928963a53..9f9d3a904464 100644
+--- a/drivers/usb/serial/qcserial.c
++++ b/drivers/usb/serial/qcserial.c
+@@ -148,6 +148,7 @@ static const struct usb_device_id id_table[] = {
+ 	{DEVICE_SWI(0x1199, 0x68a2)},	/* Sierra Wireless MC7710 */
+ 	{DEVICE_SWI(0x1199, 0x68c0)},	/* Sierra Wireless MC7304/MC7354 */
+ 	{DEVICE_SWI(0x1199, 0x901c)},	/* Sierra Wireless EM7700 */
++	{DEVICE_SWI(0x1199, 0x901e)},	/* Sierra Wireless EM7355 QDL */
+ 	{DEVICE_SWI(0x1199, 0x901f)},	/* Sierra Wireless EM7355 */
+ 	{DEVICE_SWI(0x1199, 0x9040)},	/* Sierra Wireless Modem */
+ 	{DEVICE_SWI(0x1199, 0x9041)},	/* Sierra Wireless MC7305/MC7355 */
+diff --git a/drivers/usb/serial/usb_debug.c b/drivers/usb/serial/usb_debug.c
+index 12f4c5a91e62..48f285a1ad00 100644
+--- a/drivers/usb/serial/usb_debug.c
++++ b/drivers/usb/serial/usb_debug.c
+@@ -34,13 +34,13 @@ static const struct usb_device_id id_table[] = {
+ };
+ 
+ static const struct usb_device_id dbc_id_table[] = {
+-	{ USB_DEVICE(0x1d6b, 0x0004) },
++	{ USB_DEVICE(0x1d6b, 0x0011) },
+ 	{ },
+ };
+ 
+ static const struct usb_device_id id_table_combined[] = {
+ 	{ USB_DEVICE(0x0525, 0x127a) },
+-	{ USB_DEVICE(0x1d6b, 0x0004) },
++	{ USB_DEVICE(0x1d6b, 0x0011) },
+ 	{ },
+ };
+ MODULE_DEVICE_TABLE(usb, id_table_combined);
+diff --git a/tools/testing/selftests/x86/protection_keys.c b/tools/testing/selftests/x86/protection_keys.c
+index 3237bc010e1c..3c54d5c40952 100644
+--- a/tools/testing/selftests/x86/protection_keys.c
++++ b/tools/testing/selftests/x86/protection_keys.c
+@@ -188,17 +188,29 @@ void lots_o_noops_around_write(int *write_to_me)
+ #define u64 uint64_t
+ 
+ #ifdef __i386__
+-#define SYS_mprotect_key 380
+-#define SYS_pkey_alloc	 381
+-#define SYS_pkey_free	 382
++
++#ifndef SYS_mprotect_key
++# define SYS_mprotect_key 380
++#endif
++#ifndef SYS_pkey_alloc
++# define SYS_pkey_alloc	 381
++# define SYS_pkey_free	 382
++#endif
+ #define REG_IP_IDX REG_EIP
+ #define si_pkey_offset 0x14
++
+ #else
+-#define SYS_mprotect_key 329
+-#define SYS_pkey_alloc	 330
+-#define SYS_pkey_free	 331
++
++#ifndef SYS_mprotect_key
++# define SYS_mprotect_key 329
++#endif
++#ifndef SYS_pkey_alloc
++# define SYS_pkey_alloc	 330
++# define SYS_pkey_free	 331
++#endif
+ #define REG_IP_IDX REG_RIP
+ #define si_pkey_offset 0x20
++
+ #endif
+ 
+ void dump_mem(void *dumpme, int len_bytes)


^ permalink raw reply related	[flat|nested] 20+ messages in thread
* [gentoo-commits] proj/linux-patches:4.13 commit in: /
@ 2017-11-15 11:17 Mike Pagano
  0 siblings, 0 replies; 20+ messages in thread
From: Mike Pagano @ 2017-11-15 11:17 UTC (permalink / raw
  To: gentoo-commits

commit:     ed6af92327ec6ed1cc218729516e64ff22a09fc6
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Nov 15 11:17:38 2017 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Nov 15 11:17:38 2017 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=ed6af923

Linux patch 4.13.13

 0000_README              |    4 +
 1012_linux-4.13.13.patch | 1467 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 1471 insertions(+)

diff --git a/0000_README b/0000_README
index 878d286..5edbdf0 100644
--- a/0000_README
+++ b/0000_README
@@ -91,6 +91,10 @@ Patch:  1011_linux-4.13.12.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.13.12
 
+Patch:  1012_linux-4.13.13.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.13.13
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1012_linux-4.13.13.patch b/1012_linux-4.13.13.patch
new file mode 100644
index 0000000..dce5871
--- /dev/null
+++ b/1012_linux-4.13.13.patch
@@ -0,0 +1,1467 @@
+diff --git a/Makefile b/Makefile
+index a7c847f495b0..1608a9b71381 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 13
+-SUBLEVEL = 12
++SUBLEVEL = 13
+ EXTRAVERSION =
+ NAME = Fearless Coyote
+ 
+diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
+index 948c648fea00..0fcd82f01388 100644
+--- a/arch/arm/kernel/traps.c
++++ b/arch/arm/kernel/traps.c
+@@ -154,30 +154,26 @@ static void dump_mem(const char *lvl, const char *str, unsigned long bottom,
+ 	set_fs(fs);
+ }
+ 
+-static void dump_instr(const char *lvl, struct pt_regs *regs)
++static void __dump_instr(const char *lvl, struct pt_regs *regs)
+ {
+ 	unsigned long addr = instruction_pointer(regs);
+ 	const int thumb = thumb_mode(regs);
+ 	const int width = thumb ? 4 : 8;
+-	mm_segment_t fs;
+ 	char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str;
+ 	int i;
+ 
+ 	/*
+-	 * We need to switch to kernel mode so that we can use __get_user
+-	 * to safely read from kernel space.  Note that we now dump the
+-	 * code first, just in case the backtrace kills us.
++	 * Note that we now dump the code first, just in case the backtrace
++	 * kills us.
+ 	 */
+-	fs = get_fs();
+-	set_fs(KERNEL_DS);
+ 
+ 	for (i = -4; i < 1 + !!thumb; i++) {
+ 		unsigned int val, bad;
+ 
+ 		if (thumb)
+-			bad = __get_user(val, &((u16 *)addr)[i]);
++			bad = get_user(val, &((u16 *)addr)[i]);
+ 		else
+-			bad = __get_user(val, &((u32 *)addr)[i]);
++			bad = get_user(val, &((u32 *)addr)[i]);
+ 
+ 		if (!bad)
+ 			p += sprintf(p, i == 0 ? "(%0*x) " : "%0*x ",
+@@ -188,8 +184,20 @@ static void dump_instr(const char *lvl, struct pt_regs *regs)
+ 		}
+ 	}
+ 	printk("%sCode: %s\n", lvl, str);
++}
+ 
+-	set_fs(fs);
++static void dump_instr(const char *lvl, struct pt_regs *regs)
++{
++	mm_segment_t fs;
++
++	if (!user_mode(regs)) {
++		fs = get_fs();
++		set_fs(KERNEL_DS);
++		__dump_instr(lvl, regs);
++		set_fs(fs);
++	} else {
++		__dump_instr(lvl, regs);
++	}
+ }
+ 
+ #ifdef CONFIG_ARM_UNWIND
+diff --git a/arch/mips/ar7/platform.c b/arch/mips/ar7/platform.c
+index df7acea3747a..4674f1efbe7a 100644
+--- a/arch/mips/ar7/platform.c
++++ b/arch/mips/ar7/platform.c
+@@ -575,6 +575,7 @@ static int __init ar7_register_uarts(void)
+ 	uart_port.type		= PORT_AR7;
+ 	uart_port.uartclk	= clk_get_rate(bus_clk) / 2;
+ 	uart_port.iotype	= UPIO_MEM32;
++	uart_port.flags		= UPF_FIXED_TYPE;
+ 	uart_port.regshift	= 2;
+ 
+ 	uart_port.line		= 0;
+@@ -653,6 +654,10 @@ static int __init ar7_register_devices(void)
+ 	u32 val;
+ 	int res;
+ 
++	res = ar7_gpio_init();
++	if (res)
++		pr_warn("unable to register gpios: %d\n", res);
++
+ 	res = ar7_register_uarts();
+ 	if (res)
+ 		pr_err("unable to setup uart(s): %d\n", res);
+diff --git a/arch/mips/ar7/prom.c b/arch/mips/ar7/prom.c
+index 4fd83336131a..dd53987a690f 100644
+--- a/arch/mips/ar7/prom.c
++++ b/arch/mips/ar7/prom.c
+@@ -246,8 +246,6 @@ void __init prom_init(void)
+ 	ar7_init_cmdline(fw_arg0, (char **)fw_arg1);
+ 	ar7_init_env((struct env_var *)fw_arg2);
+ 	console_config();
+-
+-	ar7_gpio_init();
+ }
+ 
+ #define PORT(offset) (KSEG1ADDR(AR7_REGS_UART0 + (offset * 4)))
+diff --git a/arch/mips/include/asm/mips-cm.h b/arch/mips/include/asm/mips-cm.h
+index cfdbab015769..163317fd3d7e 100644
+--- a/arch/mips/include/asm/mips-cm.h
++++ b/arch/mips/include/asm/mips-cm.h
+@@ -240,8 +240,8 @@ BUILD_CM_Cx_R_(tcid_8_priority,	0x80)
+ #define CM_GCR_BASE_GCRBASE_MSK			(_ULCAST_(0x1ffff) << 15)
+ #define CM_GCR_BASE_CMDEFTGT_SHF		0
+ #define CM_GCR_BASE_CMDEFTGT_MSK		(_ULCAST_(0x3) << 0)
+-#define  CM_GCR_BASE_CMDEFTGT_DISABLED		0
+-#define  CM_GCR_BASE_CMDEFTGT_MEM		1
++#define  CM_GCR_BASE_CMDEFTGT_MEM		0
++#define  CM_GCR_BASE_CMDEFTGT_RESERVED		1
+ #define  CM_GCR_BASE_CMDEFTGT_IOCU0		2
+ #define  CM_GCR_BASE_CMDEFTGT_IOCU1		3
+ 
+diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c
+index 1b070a76fcdd..5e0d87f4958f 100644
+--- a/arch/mips/kernel/smp-bmips.c
++++ b/arch/mips/kernel/smp-bmips.c
+@@ -589,11 +589,11 @@ void __init bmips_cpu_setup(void)
+ 
+ 		/* Flush and enable RAC */
+ 		cfg = __raw_readl(cbr + BMIPS_RAC_CONFIG);
+-		__raw_writel(cfg | 0x100, BMIPS_RAC_CONFIG);
++		__raw_writel(cfg | 0x100, cbr + BMIPS_RAC_CONFIG);
+ 		__raw_readl(cbr + BMIPS_RAC_CONFIG);
+ 
+ 		cfg = __raw_readl(cbr + BMIPS_RAC_CONFIG);
+-		__raw_writel(cfg | 0xf, BMIPS_RAC_CONFIG);
++		__raw_writel(cfg | 0xf, cbr + BMIPS_RAC_CONFIG);
+ 		__raw_readl(cbr + BMIPS_RAC_CONFIG);
+ 
+ 		cfg = __raw_readl(cbr + BMIPS_RAC_ADDRESS_RANGE);
+diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
+index b42812e014c0..1fcc30ff9569 100644
+--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
++++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
+@@ -645,6 +645,16 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
+ 		hnow_v = hpte_new_to_old_v(hnow_v, hnow_r);
+ 		hnow_r = hpte_new_to_old_r(hnow_r);
+ 	}
++
++	/*
++	 * If the HPT is being resized, don't update the HPTE,
++	 * instead let the guest retry after the resize operation is complete.
++	 * The synchronization for hpte_setup_done test vs. set is provided
++	 * by the HPTE lock.
++	 */
++	if (!kvm->arch.hpte_setup_done)
++		goto out_unlock;
++
+ 	if ((hnow_v & ~HPTE_V_HVLOCK) != hpte[0] || hnow_r != hpte[1] ||
+ 	    rev->guest_rpte != hpte[2])
+ 		/* HPTE has been changed under us; let the guest retry */
+diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
+index 9ecd9aea0b54..c059541743f0 100644
+--- a/arch/powerpc/kvm/book3s_hv.c
++++ b/arch/powerpc/kvm/book3s_hv.c
+@@ -2688,11 +2688,14 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
+ 	 * Hard-disable interrupts, and check resched flag and signals.
+ 	 * If we need to reschedule or deliver a signal, clean up
+ 	 * and return without going into the guest(s).
++	 * If the hpte_setup_done flag has been cleared, don't go into the
++	 * guest because that means a HPT resize operation is in progress.
+ 	 */
+ 	local_irq_disable();
+ 	hard_irq_disable();
+ 	if (lazy_irq_pending() || need_resched() ||
+-	    recheck_signals(&core_info)) {
++	    recheck_signals(&core_info) ||
++	    (!kvm_is_radix(vc->kvm) && !vc->kvm->arch.hpte_setup_done)) {
+ 		local_irq_enable();
+ 		vc->vcore_state = VCORE_INACTIVE;
+ 		/* Unlock all except the primary vcore */
+@@ -3061,7 +3064,7 @@ static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc)
+ 
+ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
+ {
+-	int n_ceded, i;
++	int n_ceded, i, r;
+ 	struct kvmppc_vcore *vc;
+ 	struct kvm_vcpu *v;
+ 
+@@ -3115,6 +3118,20 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
+ 
+ 	while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE &&
+ 	       !signal_pending(current)) {
++		/* See if the HPT and VRMA are ready to go */
++		if (!kvm_is_radix(vcpu->kvm) &&
++		    !vcpu->kvm->arch.hpte_setup_done) {
++			spin_unlock(&vc->lock);
++			r = kvmppc_hv_setup_htab_rma(vcpu);
++			spin_lock(&vc->lock);
++			if (r) {
++				kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
++				kvm_run->fail_entry.hardware_entry_failure_reason = 0;
++				vcpu->arch.ret = r;
++				break;
++			}
++		}
++
+ 		if (vc->vcore_state == VCORE_PREEMPT && vc->runner == NULL)
+ 			kvmppc_vcore_end_preempt(vc);
+ 
+@@ -3232,13 +3249,6 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
+ 	/* Order vcpus_running vs. hpte_setup_done, see kvmppc_alloc_reset_hpt */
+ 	smp_mb();
+ 
+-	/* On the first time here, set up HTAB and VRMA */
+-	if (!kvm_is_radix(vcpu->kvm) && !vcpu->kvm->arch.hpte_setup_done) {
+-		r = kvmppc_hv_setup_htab_rma(vcpu);
+-		if (r)
+-			goto out;
+-	}
+-
+ 	flush_all_to_thread(current);
+ 
+ 	/* Save userspace EBB and other register values */
+@@ -3286,7 +3296,6 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
+ 	}
+ 	mtspr(SPRN_VRSAVE, user_vrsave);
+ 
+- out:
+ 	vcpu->arch.state = KVMPPC_VCPU_NOTREADY;
+ 	atomic_dec(&vcpu->kvm->arch.vcpus_running);
+ 	return r;
+diff --git a/arch/x86/crypto/sha1-mb/sha1_mb_mgr_flush_avx2.S b/arch/x86/crypto/sha1-mb/sha1_mb_mgr_flush_avx2.S
+index 93b945597ecf..7cfba738f104 100644
+--- a/arch/x86/crypto/sha1-mb/sha1_mb_mgr_flush_avx2.S
++++ b/arch/x86/crypto/sha1-mb/sha1_mb_mgr_flush_avx2.S
+@@ -157,8 +157,8 @@ LABEL skip_ %I
+ .endr
+ 
+ 	# Find min length
+-	vmovdqa _lens+0*16(state), %xmm0
+-	vmovdqa _lens+1*16(state), %xmm1
++	vmovdqu _lens+0*16(state), %xmm0
++	vmovdqu _lens+1*16(state), %xmm1
+ 
+ 	vpminud %xmm1, %xmm0, %xmm2     # xmm2 has {D,C,B,A}
+ 	vpalignr $8, %xmm2, %xmm3, %xmm3   # xmm3 has {x,x,D,C}
+@@ -178,8 +178,8 @@ LABEL skip_ %I
+ 	vpsubd  %xmm2, %xmm0, %xmm0
+ 	vpsubd  %xmm2, %xmm1, %xmm1
+ 
+-	vmovdqa %xmm0, _lens+0*16(state)
+-	vmovdqa %xmm1, _lens+1*16(state)
++	vmovdqu %xmm0, _lens+0*16(state)
++	vmovdqu %xmm1, _lens+1*16(state)
+ 
+ 	# "state" and "args" are the same address, arg1
+ 	# len is arg2
+@@ -235,8 +235,8 @@ ENTRY(sha1_mb_mgr_get_comp_job_avx2)
+ 	jc      .return_null
+ 
+ 	# Find min length
+-	vmovdqa _lens(state), %xmm0
+-	vmovdqa _lens+1*16(state), %xmm1
++	vmovdqu _lens(state), %xmm0
++	vmovdqu _lens+1*16(state), %xmm1
+ 
+ 	vpminud %xmm1, %xmm0, %xmm2        # xmm2 has {D,C,B,A}
+ 	vpalignr $8, %xmm2, %xmm3, %xmm3   # xmm3 has {x,x,D,C}
+diff --git a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S b/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S
+index 8fe6338bcc84..16c4ccb1f154 100644
+--- a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S
++++ b/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S
+@@ -155,8 +155,8 @@ LABEL skip_ %I
+ .endr
+ 
+ 	# Find min length
+-	vmovdqa _lens+0*16(state), %xmm0
+-	vmovdqa _lens+1*16(state), %xmm1
++	vmovdqu _lens+0*16(state), %xmm0
++	vmovdqu _lens+1*16(state), %xmm1
+ 
+ 	vpminud %xmm1, %xmm0, %xmm2		# xmm2 has {D,C,B,A}
+ 	vpalignr $8, %xmm2, %xmm3, %xmm3	# xmm3 has {x,x,D,C}
+@@ -176,8 +176,8 @@ LABEL skip_ %I
+ 	vpsubd	%xmm2, %xmm0, %xmm0
+ 	vpsubd	%xmm2, %xmm1, %xmm1
+ 
+-	vmovdqa	%xmm0, _lens+0*16(state)
+-	vmovdqa	%xmm1, _lens+1*16(state)
++	vmovdqu	%xmm0, _lens+0*16(state)
++	vmovdqu	%xmm1, _lens+1*16(state)
+ 
+ 	# "state" and "args" are the same address, arg1
+ 	# len is arg2
+@@ -234,8 +234,8 @@ ENTRY(sha256_mb_mgr_get_comp_job_avx2)
+ 	jc	.return_null
+ 
+ 	# Find min length
+-	vmovdqa	_lens(state), %xmm0
+-	vmovdqa	_lens+1*16(state), %xmm1
++	vmovdqu	_lens(state), %xmm0
++	vmovdqu	_lens+1*16(state), %xmm1
+ 
+ 	vpminud	%xmm1, %xmm0, %xmm2		# xmm2 has {D,C,B,A}
+ 	vpalignr $8, %xmm2, %xmm3, %xmm3	# xmm3 has {x,x,D,C}
+diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
+index 836877e2da22..cdf82492b770 100644
+--- a/arch/x86/kernel/cpu/Makefile
++++ b/arch/x86/kernel/cpu/Makefile
+@@ -21,7 +21,7 @@ obj-y			+= common.o
+ obj-y			+= rdrand.o
+ obj-y			+= match.o
+ obj-y			+= bugs.o
+-obj-y			+= aperfmperf.o
++obj-$(CONFIG_CPU_FREQ)	+= aperfmperf.o
+ 
+ obj-$(CONFIG_PROC_FS)	+= proc.o
+ obj-$(CONFIG_X86_FEATURE_NAMES) += capflags.o powerflags.o
+diff --git a/arch/x86/kernel/cpu/aperfmperf.c b/arch/x86/kernel/cpu/aperfmperf.c
+index 957813e0180d..0ee83321a313 100644
+--- a/arch/x86/kernel/cpu/aperfmperf.c
++++ b/arch/x86/kernel/cpu/aperfmperf.c
+@@ -42,6 +42,10 @@ static void aperfmperf_snapshot_khz(void *dummy)
+ 	s64 time_delta = ktime_ms_delta(now, s->time);
+ 	unsigned long flags;
+ 
++	/* Don't bother re-computing within the cache threshold time. */
++	if (time_delta < APERFMPERF_CACHE_THRESHOLD_MS)
++		return;
++
+ 	local_irq_save(flags);
+ 	rdmsrl(MSR_IA32_APERF, aperf);
+ 	rdmsrl(MSR_IA32_MPERF, mperf);
+@@ -70,7 +74,6 @@ static void aperfmperf_snapshot_khz(void *dummy)
+ 
+ unsigned int arch_freq_get_on_cpu(int cpu)
+ {
+-	s64 time_delta;
+ 	unsigned int khz;
+ 
+ 	if (!cpu_khz)
+@@ -79,12 +82,6 @@ unsigned int arch_freq_get_on_cpu(int cpu)
+ 	if (!static_cpu_has(X86_FEATURE_APERFMPERF))
+ 		return 0;
+ 
+-	/* Don't bother re-computing within the cache threshold time. */
+-	time_delta = ktime_ms_delta(ktime_get(), per_cpu(samples.time, cpu));
+-	khz = per_cpu(samples.khz, cpu);
+-	if (khz && time_delta < APERFMPERF_CACHE_THRESHOLD_MS)
+-		return khz;
+-
+ 	smp_call_function_single(cpu, aperfmperf_snapshot_khz, NULL, 1);
+ 	khz = per_cpu(samples.khz, cpu);
+ 	if (khz)
+diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c
+index 510e69596278..6df621ae62a7 100644
+--- a/arch/x86/kernel/cpu/proc.c
++++ b/arch/x86/kernel/cpu/proc.c
+@@ -77,10 +77,8 @@ static int show_cpuinfo(struct seq_file *m, void *v)
+ 		seq_printf(m, "microcode\t: 0x%x\n", c->microcode);
+ 
+ 	if (cpu_has(c, X86_FEATURE_TSC)) {
+-		unsigned int freq = arch_freq_get_on_cpu(cpu);
++		unsigned int freq = cpufreq_quick_get(cpu);
+ 
+-		if (!freq)
+-			freq = cpufreq_quick_get(cpu);
+ 		if (!freq)
+ 			freq = cpu_khz;
+ 		seq_printf(m, "cpu MHz\t\t: %u.%03u\n",
+diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
+index 54b9e89d4d6b..893fd8c849e2 100644
+--- a/arch/x86/kernel/smpboot.c
++++ b/arch/x86/kernel/smpboot.c
+@@ -193,6 +193,12 @@ static void smp_callin(void)
+ 	 */
+ 	smp_store_cpu_info(cpuid);
+ 
++	/*
++	 * The topology information must be up to date before
++	 * calibrate_delay() and notify_cpu_starting().
++	 */
++	set_cpu_sibling_map(raw_smp_processor_id());
++
+ 	/*
+ 	 * Get our bogomips.
+ 	 * Update loops_per_jiffy in cpu_data. Previous call to
+@@ -203,11 +209,6 @@ static void smp_callin(void)
+ 	cpu_data(cpuid).loops_per_jiffy = loops_per_jiffy;
+ 	pr_debug("Stack at about %p\n", &cpuid);
+ 
+-	/*
+-	 * This must be done before setting cpu_online_mask
+-	 * or calling notify_cpu_starting.
+-	 */
+-	set_cpu_sibling_map(raw_smp_processor_id());
+ 	wmb();
+ 
+ 	notify_cpu_starting(cpuid);
+diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
+index bf54309b85da..b2157d4a5338 100644
+--- a/arch/x86/kernel/traps.c
++++ b/arch/x86/kernel/traps.c
+@@ -221,9 +221,6 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
+ 		if (fixup_exception(regs, trapnr))
+ 			return 0;
+ 
+-		if (fixup_bug(regs, trapnr))
+-			return 0;
+-
+ 		tsk->thread.error_code = error_code;
+ 		tsk->thread.trap_nr = trapnr;
+ 		die(str, regs, error_code);
+@@ -304,6 +301,13 @@ static void do_error_trap(struct pt_regs *regs, long error_code, char *str,
+ 
+ 	RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
+ 
++	/*
++	 * WARN*()s end up here; fix them up before we call the
++	 * notifier chain.
++	 */
++	if (!user_mode(regs) && fixup_bug(regs, trapnr))
++		return;
++
+ 	if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) !=
+ 			NOTIFY_STOP) {
+ 		cond_local_irq_enable(regs);
+diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
+index 796d96bb0821..ad2b925a808e 100644
+--- a/arch/x86/kernel/tsc.c
++++ b/arch/x86/kernel/tsc.c
+@@ -1346,12 +1346,10 @@ void __init tsc_init(void)
+ unsigned long calibrate_delay_is_known(void)
+ {
+ 	int sibling, cpu = smp_processor_id();
+-	struct cpumask *mask = topology_core_cpumask(cpu);
++	int constant_tsc = cpu_has(&cpu_data(cpu), X86_FEATURE_CONSTANT_TSC);
++	const struct cpumask *mask = topology_core_cpumask(cpu);
+ 
+-	if (!tsc_disabled && !cpu_has(&cpu_data(cpu), X86_FEATURE_CONSTANT_TSC))
+-		return 0;
+-
+-	if (!mask)
++	if (tsc_disabled || !constant_tsc || !mask)
+ 		return 0;
+ 
+ 	sibling = cpumask_any_but(mask, cpu);
+diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c
+index 350f7096baac..7913b6921959 100644
+--- a/arch/x86/oprofile/op_model_ppro.c
++++ b/arch/x86/oprofile/op_model_ppro.c
+@@ -212,8 +212,8 @@ static void arch_perfmon_setup_counters(void)
+ 	eax.full = cpuid_eax(0xa);
+ 
+ 	/* Workaround for BIOS bugs in 6/15. Taken from perfmon2 */
+-	if (eax.split.version_id == 0 && __this_cpu_read(cpu_info.x86) == 6 &&
+-		__this_cpu_read(cpu_info.x86_model) == 15) {
++	if (eax.split.version_id == 0 && boot_cpu_data.x86 == 6 &&
++	    boot_cpu_data.x86_model == 15) {
+ 		eax.split.version_id = 2;
+ 		eax.split.num_counters = 2;
+ 		eax.split.bit_width = 40;
+diff --git a/crypto/ccm.c b/crypto/ccm.c
+index 1ce37ae0ce56..0a083342ec8c 100644
+--- a/crypto/ccm.c
++++ b/crypto/ccm.c
+@@ -363,7 +363,7 @@ static int crypto_ccm_decrypt(struct aead_request *req)
+ 	unsigned int cryptlen = req->cryptlen;
+ 	u8 *authtag = pctx->auth_tag;
+ 	u8 *odata = pctx->odata;
+-	u8 *iv = req->iv;
++	u8 *iv = pctx->idata;
+ 	int err;
+ 
+ 	cryptlen -= authsize;
+@@ -379,6 +379,8 @@ static int crypto_ccm_decrypt(struct aead_request *req)
+ 	if (req->src != req->dst)
+ 		dst = pctx->dst;
+ 
++	memcpy(iv, req->iv, 16);
++
+ 	skcipher_request_set_tfm(skreq, ctx->ctr);
+ 	skcipher_request_set_callback(skreq, pctx->flags,
+ 				      crypto_ccm_decrypt_done, req);
+diff --git a/drivers/acpi/acpica/evgpeblk.c b/drivers/acpi/acpica/evgpeblk.c
+index 9c941947a063..3a3cb8624f41 100644
+--- a/drivers/acpi/acpica/evgpeblk.c
++++ b/drivers/acpi/acpica/evgpeblk.c
+@@ -440,9 +440,11 @@ acpi_ev_initialize_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
+ 			     void *ignored)
+ {
+ 	acpi_status status;
++	acpi_event_status event_status;
+ 	struct acpi_gpe_event_info *gpe_event_info;
+ 	u32 gpe_enabled_count;
+ 	u32 gpe_index;
++	u32 gpe_number;
+ 	u32 i;
+ 	u32 j;
+ 
+@@ -470,30 +472,40 @@ acpi_ev_initialize_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
+ 
+ 			gpe_index = (i * ACPI_GPE_REGISTER_WIDTH) + j;
+ 			gpe_event_info = &gpe_block->event_info[gpe_index];
++			gpe_number = gpe_block->block_base_number + gpe_index;
+ 
+ 			/*
+ 			 * Ignore GPEs that have no corresponding _Lxx/_Exx method
+-			 * and GPEs that are used to wake the system
++			 * and GPEs that are used for wakeup
+ 			 */
+-			if ((ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) ==
+-			     ACPI_GPE_DISPATCH_NONE)
+-			    || (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) ==
+-				ACPI_GPE_DISPATCH_HANDLER)
+-			    || (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) ==
+-				ACPI_GPE_DISPATCH_RAW_HANDLER)
++			if ((ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) !=
++			     ACPI_GPE_DISPATCH_METHOD)
+ 			    || (gpe_event_info->flags & ACPI_GPE_CAN_WAKE)) {
+ 				continue;
+ 			}
+ 
++			event_status = 0;
++			(void)acpi_hw_get_gpe_status(gpe_event_info,
++						     &event_status);
++
+ 			status = acpi_ev_add_gpe_reference(gpe_event_info);
+ 			if (ACPI_FAILURE(status)) {
+ 				ACPI_EXCEPTION((AE_INFO, status,
+ 					"Could not enable GPE 0x%02X",
+-					gpe_index +
+-					gpe_block->block_base_number));
++					gpe_number));
+ 				continue;
+ 			}
+ 
++			gpe_event_info->flags |= ACPI_GPE_AUTO_ENABLED;
++
++			if (event_status & ACPI_EVENT_FLAG_STATUS_SET) {
++				ACPI_INFO(("GPE 0x%02X active on init",
++					   gpe_number));
++				(void)acpi_ev_gpe_dispatch(gpe_block->node,
++							   gpe_event_info,
++							   gpe_number);
++			}
++
+ 			gpe_enabled_count++;
+ 		}
+ 	}
+diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c
+index 57718a3e029a..67c7c4ce276c 100644
+--- a/drivers/acpi/acpica/evxfgpe.c
++++ b/drivers/acpi/acpica/evxfgpe.c
+@@ -435,6 +435,14 @@ acpi_setup_gpe_for_wake(acpi_handle wake_device,
+ 		 */
+ 		gpe_event_info->flags =
+ 		    (ACPI_GPE_DISPATCH_NOTIFY | ACPI_GPE_LEVEL_TRIGGERED);
++	} else if (gpe_event_info->flags & ACPI_GPE_AUTO_ENABLED) {
++		/*
++		 * A reference to this GPE has been added during the GPE block
++		 * initialization, so drop it now to prevent the GPE from being
++		 * permanently enabled and clear its ACPI_GPE_AUTO_ENABLED flag.
++		 */
++		(void)acpi_ev_remove_gpe_reference(gpe_event_info);
++		gpe_event_info->flags &= ~ACPI_GPE_AUTO_ENABLED;
+ 	}
+ 
+ 	/*
+diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
+index 70fd5502c284..b7bdf9d0f5c0 100644
+--- a/drivers/acpi/scan.c
++++ b/drivers/acpi/scan.c
+@@ -2058,6 +2058,9 @@ int __init acpi_scan_init(void)
+ 			acpi_get_spcr_uart_addr();
+ 	}
+ 
++	acpi_gpe_apply_masked_gpes();
++	acpi_update_all_gpes();
++
+ 	mutex_lock(&acpi_scan_lock);
+ 	/*
+ 	 * Enumerate devices in the ACPI namespace.
+@@ -2082,9 +2085,6 @@ int __init acpi_scan_init(void)
+ 		}
+ 	}
+ 
+-	acpi_gpe_apply_masked_gpes();
+-	acpi_update_all_gpes();
+-
+ 	acpi_scan_initialized = true;
+ 
+  out:
+diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
+index fa8243c5c062..59b78e42a58b 100644
+--- a/drivers/acpi/sleep.c
++++ b/drivers/acpi/sleep.c
+@@ -160,6 +160,14 @@ static int __init init_nvs_nosave(const struct dmi_system_id *d)
+ 	return 0;
+ }
+ 
++static bool acpi_sleep_no_lps0;
++
++static int __init init_no_lps0(const struct dmi_system_id *d)
++{
++	acpi_sleep_no_lps0 = true;
++	return 0;
++}
++
+ static struct dmi_system_id acpisleep_dmi_table[] __initdata = {
+ 	{
+ 	.callback = init_old_suspend_ordering,
+@@ -343,6 +351,19 @@ static struct dmi_system_id acpisleep_dmi_table[] __initdata = {
+ 		DMI_MATCH(DMI_PRODUCT_NAME, "80E3"),
+ 		},
+ 	},
++	/*
++	 * https://bugzilla.kernel.org/show_bug.cgi?id=196907
++	 * Some Dell XPS13 9360 cannot do suspend-to-idle using the Low Power
++	 * S0 Idle firmware interface.
++	 */
++	{
++	.callback = init_no_lps0,
++	.ident = "Dell XPS13 9360",
++	.matches = {
++		DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
++		DMI_MATCH(DMI_PRODUCT_NAME, "XPS 13 9360"),
++		},
++	},
+ 	{},
+ };
+ 
+@@ -485,6 +506,7 @@ static void acpi_pm_end(void)
+ }
+ #else /* !CONFIG_ACPI_SLEEP */
+ #define acpi_target_sleep_state	ACPI_STATE_S0
++#define acpi_sleep_no_lps0	(false)
+ static inline void acpi_sleep_dmi_check(void) {}
+ #endif /* CONFIG_ACPI_SLEEP */
+ 
+@@ -702,6 +724,12 @@ static int lps0_device_attach(struct acpi_device *adev,
+ 	if (lps0_device_handle)
+ 		return 0;
+ 
++	if (acpi_sleep_no_lps0) {
++		acpi_handle_info(adev->handle,
++				 "Low Power S0 Idle interface disabled\n");
++		return 0;
++	}
++
+ 	if (!(acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0))
+ 		return 0;
+ 
+diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
+index b008b6a98098..cf54a1cf8c55 100644
+--- a/drivers/block/rbd.c
++++ b/drivers/block/rbd.c
+@@ -2692,7 +2692,7 @@ static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request)
+ 	 * from the parent.
+ 	 */
+ 	page_count = (u32)calc_pages_for(0, length);
+-	pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
++	pages = ceph_alloc_page_vector(page_count, GFP_NOIO);
+ 	if (IS_ERR(pages)) {
+ 		result = PTR_ERR(pages);
+ 		pages = NULL;
+@@ -2827,7 +2827,7 @@ static int rbd_img_obj_exists_submit(struct rbd_obj_request *obj_request)
+ 	 */
+ 	size = sizeof (__le64) + sizeof (__le32) + sizeof (__le32);
+ 	page_count = (u32)calc_pages_for(0, size);
+-	pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
++	pages = ceph_alloc_page_vector(page_count, GFP_NOIO);
+ 	if (IS_ERR(pages)) {
+ 		ret = PTR_ERR(pages);
+ 		goto fail_stat_request;
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+index 4436d53ae16c..f160a66b7098 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+@@ -722,7 +722,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
+ 		 * allocation taken by fbdev
+ 		 */
+ 		if (!(dev_priv->capabilities & SVGA_CAP_3D))
+-			mem_size *= 2;
++			mem_size *= 3;
+ 
+ 		dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE;
+ 		dev_priv->prim_bb_mem =
+diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
+index a6b762271a40..47a9696e7874 100644
+--- a/drivers/input/mouse/elan_i2c_core.c
++++ b/drivers/input/mouse/elan_i2c_core.c
+@@ -1253,6 +1253,7 @@ static const struct acpi_device_id elan_acpi_id[] = {
+ 	{ "ELAN0605", 0 },
+ 	{ "ELAN0609", 0 },
+ 	{ "ELAN060B", 0 },
++	{ "ELAN060C", 0 },
+ 	{ "ELAN0611", 0 },
+ 	{ "ELAN1000", 0 },
+ 	{ }
+diff --git a/drivers/net/can/c_can/c_can_pci.c b/drivers/net/can/c_can/c_can_pci.c
+index cf7c18947189..d065c0e2d18e 100644
+--- a/drivers/net/can/c_can/c_can_pci.c
++++ b/drivers/net/can/c_can/c_can_pci.c
+@@ -178,7 +178,6 @@ static int c_can_pci_probe(struct pci_dev *pdev,
+ 		break;
+ 	case BOSCH_D_CAN:
+ 		priv->regs = reg_map_d_can;
+-		priv->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES;
+ 		break;
+ 	default:
+ 		ret = -EINVAL;
+diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c
+index e36d10520e24..717530eac70c 100644
+--- a/drivers/net/can/c_can/c_can_platform.c
++++ b/drivers/net/can/c_can/c_can_platform.c
+@@ -320,7 +320,6 @@ static int c_can_plat_probe(struct platform_device *pdev)
+ 		break;
+ 	case BOSCH_D_CAN:
+ 		priv->regs = reg_map_d_can;
+-		priv->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES;
+ 		priv->read_reg = c_can_plat_read_reg_aligned_to_16bit;
+ 		priv->write_reg = c_can_plat_write_reg_aligned_to_16bit;
+ 		priv->read_reg32 = d_can_plat_read_reg32;
+diff --git a/drivers/net/can/ifi_canfd/ifi_canfd.c b/drivers/net/can/ifi_canfd/ifi_canfd.c
+index 4d1fe8d95042..2772d05ff11c 100644
+--- a/drivers/net/can/ifi_canfd/ifi_canfd.c
++++ b/drivers/net/can/ifi_canfd/ifi_canfd.c
+@@ -670,9 +670,9 @@ static void ifi_canfd_set_bittiming(struct net_device *ndev)
+ 	       priv->base + IFI_CANFD_FTIME);
+ 
+ 	/* Configure transmitter delay */
+-	tdc = (dbt->brp * (dbt->phase_seg1 + 1)) & IFI_CANFD_TDELAY_MASK;
+-	writel(IFI_CANFD_TDELAY_EN | IFI_CANFD_TDELAY_ABS | tdc,
+-	       priv->base + IFI_CANFD_TDELAY);
++	tdc = dbt->brp * (dbt->prop_seg + dbt->phase_seg1);
++	tdc &= IFI_CANFD_TDELAY_MASK;
++	writel(IFI_CANFD_TDELAY_EN | tdc, priv->base + IFI_CANFD_TDELAY);
+ }
+ 
+ static void ifi_canfd_set_filter(struct net_device *ndev, const u32 id,
+diff --git a/drivers/net/can/peak_canfd/peak_pciefd_main.c b/drivers/net/can/peak_canfd/peak_pciefd_main.c
+index 51c2d182a33a..b4efd711f824 100644
+--- a/drivers/net/can/peak_canfd/peak_pciefd_main.c
++++ b/drivers/net/can/peak_canfd/peak_pciefd_main.c
+@@ -29,14 +29,19 @@
+ #include "peak_canfd_user.h"
+ 
+ MODULE_AUTHOR("Stephane Grosjean <s.grosjean@peak-system.com>");
+-MODULE_DESCRIPTION("Socket-CAN driver for PEAK PCAN PCIe FD family cards");
+-MODULE_SUPPORTED_DEVICE("PEAK PCAN PCIe FD CAN cards");
++MODULE_DESCRIPTION("Socket-CAN driver for PEAK PCAN PCIe/M.2 FD family cards");
++MODULE_SUPPORTED_DEVICE("PEAK PCAN PCIe/M.2 FD CAN cards");
+ MODULE_LICENSE("GPL v2");
+ 
+ #define PCIEFD_DRV_NAME		"peak_pciefd"
+ 
+ #define PEAK_PCI_VENDOR_ID	0x001c	/* The PCI device and vendor IDs */
+ #define PEAK_PCIEFD_ID		0x0013	/* for PCIe slot cards */
++#define PCAN_CPCIEFD_ID		0x0014	/* for Compact-PCI Serial slot cards */
++#define PCAN_PCIE104FD_ID	0x0017	/* for PCIe-104 Express slot cards */
++#define PCAN_MINIPCIEFD_ID      0x0018	/* for mini-PCIe slot cards */
++#define PCAN_PCIEFD_OEM_ID      0x0019	/* for PCIe slot OEM cards */
++#define PCAN_M2_ID		0x001a	/* for M2 slot cards */
+ 
+ /* PEAK PCIe board access description */
+ #define PCIEFD_BAR0_SIZE		(64 * 1024)
+@@ -203,6 +208,11 @@ struct pciefd_board {
+ /* supported device ids. */
+ static const struct pci_device_id peak_pciefd_tbl[] = {
+ 	{PEAK_PCI_VENDOR_ID, PEAK_PCIEFD_ID, PCI_ANY_ID, PCI_ANY_ID,},
++	{PEAK_PCI_VENDOR_ID, PCAN_CPCIEFD_ID, PCI_ANY_ID, PCI_ANY_ID,},
++	{PEAK_PCI_VENDOR_ID, PCAN_PCIE104FD_ID, PCI_ANY_ID, PCI_ANY_ID,},
++	{PEAK_PCI_VENDOR_ID, PCAN_MINIPCIEFD_ID, PCI_ANY_ID, PCI_ANY_ID,},
++	{PEAK_PCI_VENDOR_ID, PCAN_PCIEFD_OEM_ID, PCI_ANY_ID, PCI_ANY_ID,},
++	{PEAK_PCI_VENDOR_ID, PCAN_M2_ID, PCI_ANY_ID, PCI_ANY_ID,},
+ 	{0,}
+ };
+ 
+diff --git a/drivers/net/can/sun4i_can.c b/drivers/net/can/sun4i_can.c
+index b0c80859f746..1ac2090a1721 100644
+--- a/drivers/net/can/sun4i_can.c
++++ b/drivers/net/can/sun4i_can.c
+@@ -539,6 +539,13 @@ static int sun4i_can_err(struct net_device *dev, u8 isrc, u8 status)
+ 		}
+ 		stats->rx_over_errors++;
+ 		stats->rx_errors++;
++
++		/* reset the CAN IP by entering reset mode
++		 * ignoring timeout error
++		 */
++		set_reset_mode(dev);
++		set_normal_mode(dev);
++
+ 		/* clear bit */
+ 		sun4i_can_write_cmdreg(priv, SUN4I_CMD_CLEAR_OR_FLAG);
+ 	}
+@@ -653,8 +660,9 @@ static irqreturn_t sun4i_can_interrupt(int irq, void *dev_id)
+ 			netif_wake_queue(dev);
+ 			can_led_event(dev, CAN_LED_EVENT_TX);
+ 		}
+-		if (isrc & SUN4I_INT_RBUF_VLD) {
+-			/* receive interrupt */
++		if ((isrc & SUN4I_INT_RBUF_VLD) &&
++		    !(isrc & SUN4I_INT_DATA_OR)) {
++			/* receive interrupt - don't read if overrun occurred */
+ 			while (status & SUN4I_STA_RBUF_RDY) {
+ 				/* RX buffer is not empty */
+ 				sun4i_can_rx(dev);
+diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
+index 2fcbaec8b368..71eddf645566 100644
+--- a/include/acpi/actypes.h
++++ b/include/acpi/actypes.h
+@@ -775,7 +775,7 @@ typedef u32 acpi_event_status;
+  *   |  | | |  +-- Type of dispatch:to method, handler, notify, or none
+  *   |  | | +----- Interrupt type: edge or level triggered
+  *   |  | +------- Is a Wake GPE
+- *   |  +--------- Is GPE masked by the software GPE masking mechanism
++ *   |  +--------- Has been enabled automatically at init time
+  *   +------------ <Reserved>
+  */
+ #define ACPI_GPE_DISPATCH_NONE          (u8) 0x00
+@@ -791,6 +791,7 @@ typedef u32 acpi_event_status;
+ #define ACPI_GPE_XRUPT_TYPE_MASK        (u8) 0x08
+ 
+ #define ACPI_GPE_CAN_WAKE               (u8) 0x10
++#define ACPI_GPE_AUTO_ENABLED           (u8) 0x20
+ 
+ /*
+  * Flags for GPE and Lock interfaces
+diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
+index 48407569585d..a7c2cee39570 100644
+--- a/include/net/netfilter/nf_conntrack.h
++++ b/include/net/netfilter/nf_conntrack.h
+@@ -17,7 +17,6 @@
+ #include <linux/bitops.h>
+ #include <linux/compiler.h>
+ #include <linux/atomic.h>
+-#include <linux/rhashtable.h>
+ 
+ #include <linux/netfilter/nf_conntrack_tcp.h>
+ #include <linux/netfilter/nf_conntrack_dccp.h>
+@@ -83,7 +82,7 @@ struct nf_conn {
+ 	possible_net_t ct_net;
+ 
+ #if IS_ENABLED(CONFIG_NF_NAT)
+-	struct rhlist_head nat_bysource;
++	struct hlist_node	nat_bysource;
+ #endif
+ 	/* all members below initialized via memset */
+ 	u8 __nfct_init_offset[0];
+diff --git a/include/net/netfilter/nf_nat.h b/include/net/netfilter/nf_nat.h
+index 05c82a1a4267..b71701302e61 100644
+--- a/include/net/netfilter/nf_nat.h
++++ b/include/net/netfilter/nf_nat.h
+@@ -1,6 +1,5 @@
+ #ifndef _NF_NAT_H
+ #define _NF_NAT_H
+-#include <linux/rhashtable.h>
+ #include <linux/netfilter_ipv4.h>
+ #include <linux/netfilter/nf_nat.h>
+ #include <net/netfilter/nf_conntrack_tuple.h>
+diff --git a/include/sound/seq_kernel.h b/include/sound/seq_kernel.h
+index feb58d455560..4b9ee3009aa0 100644
+--- a/include/sound/seq_kernel.h
++++ b/include/sound/seq_kernel.h
+@@ -49,7 +49,8 @@ typedef union snd_seq_timestamp snd_seq_timestamp_t;
+ #define SNDRV_SEQ_DEFAULT_CLIENT_EVENTS	200
+ 
+ /* max delivery path length */
+-#define SNDRV_SEQ_MAX_HOPS		10
++/* NOTE: this shouldn't be greater than MAX_LOCKDEP_SUBCLASSES */
++#define SNDRV_SEQ_MAX_HOPS		8
+ 
+ /* max size of event size */
+ #define SNDRV_SEQ_MAX_EVENT_LEN		0x3fffffff
+diff --git a/include/sound/timer.h b/include/sound/timer.h
+index c4d76ff056c6..7ae226ab6990 100644
+--- a/include/sound/timer.h
++++ b/include/sound/timer.h
+@@ -90,6 +90,8 @@ struct snd_timer {
+ 	struct list_head ack_list_head;
+ 	struct list_head sack_list_head; /* slow ack list head */
+ 	struct tasklet_struct task_queue;
++	int max_instances;	/* upper limit of timer instances */
++	int num_instances;	/* current number of timer instances */
+ };
+ 
+ struct snd_timer_instance {
+diff --git a/kernel/workqueue_internal.h b/kernel/workqueue_internal.h
+index 8635417c587b..29fa81f0f51a 100644
+--- a/kernel/workqueue_internal.h
++++ b/kernel/workqueue_internal.h
+@@ -9,6 +9,7 @@
+ 
+ #include <linux/workqueue.h>
+ #include <linux/kthread.h>
++#include <linux/preempt.h>
+ 
+ struct worker_pool;
+ 
+@@ -59,7 +60,7 @@ struct worker {
+  */
+ static inline struct worker *current_wq_worker(void)
+ {
+-	if (current->flags & PF_WQ_WORKER)
++	if (in_task() && (current->flags & PF_WQ_WORKER))
+ 		return kthread_data(current);
+ 	return NULL;
+ }
+diff --git a/lib/asn1_decoder.c b/lib/asn1_decoder.c
+index fef5d2e114be..1ef0cec38d78 100644
+--- a/lib/asn1_decoder.c
++++ b/lib/asn1_decoder.c
+@@ -228,7 +228,7 @@ int asn1_ber_decoder(const struct asn1_decoder *decoder,
+ 		hdr = 2;
+ 
+ 		/* Extract a tag from the data */
+-		if (unlikely(dp >= datalen - 1))
++		if (unlikely(datalen - dp < 2))
+ 			goto data_overrun_error;
+ 		tag = data[dp++];
+ 		if (unlikely((tag & 0x1f) == ASN1_LONG_TAG))
+@@ -274,7 +274,7 @@ int asn1_ber_decoder(const struct asn1_decoder *decoder,
+ 				int n = len - 0x80;
+ 				if (unlikely(n > 2))
+ 					goto length_too_long;
+-				if (unlikely(dp >= datalen - n))
++				if (unlikely(n > datalen - dp))
+ 					goto data_overrun_error;
+ 				hdr += n;
+ 				for (len = 0; n > 0; n--) {
+diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c
+index b1d3740ae36a..2fb80a4bfb34 100644
+--- a/net/netfilter/nf_nat_core.c
++++ b/net/netfilter/nf_nat_core.c
+@@ -30,19 +30,17 @@
+ #include <net/netfilter/nf_conntrack_zones.h>
+ #include <linux/netfilter/nf_nat.h>
+ 
++static DEFINE_SPINLOCK(nf_nat_lock);
++
+ static DEFINE_MUTEX(nf_nat_proto_mutex);
+ static const struct nf_nat_l3proto __rcu *nf_nat_l3protos[NFPROTO_NUMPROTO]
+ 						__read_mostly;
+ static const struct nf_nat_l4proto __rcu **nf_nat_l4protos[NFPROTO_NUMPROTO]
+ 						__read_mostly;
+ 
+-struct nf_nat_conn_key {
+-	const struct net *net;
+-	const struct nf_conntrack_tuple *tuple;
+-	const struct nf_conntrack_zone *zone;
+-};
+-
+-static struct rhltable nf_nat_bysource_table;
++static struct hlist_head *nf_nat_bysource __read_mostly;
++static unsigned int nf_nat_htable_size __read_mostly;
++static unsigned int nf_nat_hash_rnd __read_mostly;
+ 
+ inline const struct nf_nat_l3proto *
+ __nf_nat_l3proto_find(u8 family)
+@@ -118,17 +116,19 @@ int nf_xfrm_me_harder(struct net *net, struct sk_buff *skb, unsigned int family)
+ EXPORT_SYMBOL(nf_xfrm_me_harder);
+ #endif /* CONFIG_XFRM */
+ 
+-static u32 nf_nat_bysource_hash(const void *data, u32 len, u32 seed)
++/* We keep an extra hash for each conntrack, for fast searching. */
++static unsigned int
++hash_by_src(const struct net *n, const struct nf_conntrack_tuple *tuple)
+ {
+-	const struct nf_conntrack_tuple *t;
+-	const struct nf_conn *ct = data;
++	unsigned int hash;
++
++	get_random_once(&nf_nat_hash_rnd, sizeof(nf_nat_hash_rnd));
+ 
+-	t = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
+ 	/* Original src, to ensure we map it consistently if poss. */
++	hash = jhash2((u32 *)&tuple->src, sizeof(tuple->src) / sizeof(u32),
++		      tuple->dst.protonum ^ nf_nat_hash_rnd ^ net_hash_mix(n));
+ 
+-	seed ^= net_hash_mix(nf_ct_net(ct));
+-	return jhash2((const u32 *)&t->src, sizeof(t->src) / sizeof(u32),
+-		      t->dst.protonum ^ seed);
++	return reciprocal_scale(hash, nf_nat_htable_size);
+ }
+ 
+ /* Is this tuple already taken? (not by us) */
+@@ -184,28 +184,6 @@ same_src(const struct nf_conn *ct,
+ 		t->src.u.all == tuple->src.u.all);
+ }
+ 
+-static int nf_nat_bysource_cmp(struct rhashtable_compare_arg *arg,
+-			       const void *obj)
+-{
+-	const struct nf_nat_conn_key *key = arg->key;
+-	const struct nf_conn *ct = obj;
+-
+-	if (!same_src(ct, key->tuple) ||
+-	    !net_eq(nf_ct_net(ct), key->net) ||
+-	    !nf_ct_zone_equal(ct, key->zone, IP_CT_DIR_ORIGINAL))
+-		return 1;
+-
+-	return 0;
+-}
+-
+-static struct rhashtable_params nf_nat_bysource_params = {
+-	.head_offset = offsetof(struct nf_conn, nat_bysource),
+-	.obj_hashfn = nf_nat_bysource_hash,
+-	.obj_cmpfn = nf_nat_bysource_cmp,
+-	.nelem_hint = 256,
+-	.min_size = 1024,
+-};
+-
+ /* Only called for SRC manip */
+ static int
+ find_appropriate_src(struct net *net,
+@@ -216,26 +194,22 @@ find_appropriate_src(struct net *net,
+ 		     struct nf_conntrack_tuple *result,
+ 		     const struct nf_nat_range *range)
+ {
++	unsigned int h = hash_by_src(net, tuple);
+ 	const struct nf_conn *ct;
+-	struct nf_nat_conn_key key = {
+-		.net = net,
+-		.tuple = tuple,
+-		.zone = zone
+-	};
+-	struct rhlist_head *hl, *h;
+-
+-	hl = rhltable_lookup(&nf_nat_bysource_table, &key,
+-			     nf_nat_bysource_params);
+ 
+-	rhl_for_each_entry_rcu(ct, h, hl, nat_bysource) {
+-		nf_ct_invert_tuplepr(result,
+-				     &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
+-		result->dst = tuple->dst;
+-
+-		if (in_range(l3proto, l4proto, result, range))
+-			return 1;
++	hlist_for_each_entry_rcu(ct, &nf_nat_bysource[h], nat_bysource) {
++		if (same_src(ct, tuple) &&
++		    net_eq(net, nf_ct_net(ct)) &&
++		    nf_ct_zone_equal(ct, zone, IP_CT_DIR_ORIGINAL)) {
++			/* Copy source part from reply tuple. */
++			nf_ct_invert_tuplepr(result,
++				       &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
++			result->dst = tuple->dst;
++
++			if (in_range(l3proto, l4proto, result, range))
++				return 1;
++		}
+ 	}
+-
+ 	return 0;
+ }
+ 
+@@ -408,6 +382,7 @@ nf_nat_setup_info(struct nf_conn *ct,
+ 		  const struct nf_nat_range *range,
+ 		  enum nf_nat_manip_type maniptype)
+ {
++	struct net *net = nf_ct_net(ct);
+ 	struct nf_conntrack_tuple curr_tuple, new_tuple;
+ 
+ 	/* Can't setup nat info for confirmed ct. */
+@@ -447,19 +422,14 @@ nf_nat_setup_info(struct nf_conn *ct,
+ 	}
+ 
+ 	if (maniptype == NF_NAT_MANIP_SRC) {
+-		struct nf_nat_conn_key key = {
+-			.net = nf_ct_net(ct),
+-			.tuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
+-			.zone = nf_ct_zone(ct),
+-		};
+-		int err;
+-
+-		err = rhltable_insert_key(&nf_nat_bysource_table,
+-					  &key,
+-					  &ct->nat_bysource,
+-					  nf_nat_bysource_params);
+-		if (err)
+-			return NF_DROP;
++		unsigned int srchash;
++
++		srchash = hash_by_src(net,
++				      &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
++		spin_lock_bh(&nf_nat_lock);
++		hlist_add_head_rcu(&ct->nat_bysource,
++				   &nf_nat_bysource[srchash]);
++		spin_unlock_bh(&nf_nat_lock);
+ 	}
+ 
+ 	/* It's done. */
+@@ -568,8 +538,9 @@ static int nf_nat_proto_clean(struct nf_conn *ct, void *data)
+ 	 * will delete entry from already-freed table.
+ 	 */
+ 	clear_bit(IPS_SRC_NAT_DONE_BIT, &ct->status);
+-	rhltable_remove(&nf_nat_bysource_table, &ct->nat_bysource,
+-			nf_nat_bysource_params);
++	spin_lock_bh(&nf_nat_lock);
++	hlist_del_rcu(&ct->nat_bysource);
++	spin_unlock_bh(&nf_nat_lock);
+ 
+ 	/* don't delete conntrack.  Although that would make things a lot
+ 	 * simpler, we'd end up flushing all conntracks on nat rmmod.
+@@ -697,9 +668,11 @@ EXPORT_SYMBOL_GPL(nf_nat_l3proto_unregister);
+ /* No one using conntrack by the time this called. */
+ static void nf_nat_cleanup_conntrack(struct nf_conn *ct)
+ {
+-	if (ct->status & IPS_SRC_NAT_DONE)
+-		rhltable_remove(&nf_nat_bysource_table, &ct->nat_bysource,
+-				nf_nat_bysource_params);
++	if (ct->status & IPS_SRC_NAT_DONE) {
++		spin_lock_bh(&nf_nat_lock);
++		hlist_del_rcu(&ct->nat_bysource);
++		spin_unlock_bh(&nf_nat_lock);
++	}
+ }
+ 
+ static struct nf_ct_ext_type nat_extend __read_mostly = {
+@@ -823,13 +796,16 @@ static int __init nf_nat_init(void)
+ {
+ 	int ret;
+ 
+-	ret = rhltable_init(&nf_nat_bysource_table, &nf_nat_bysource_params);
+-	if (ret)
+-		return ret;
++	/* Leave them the same for the moment. */
++	nf_nat_htable_size = nf_conntrack_htable_size;
++
++	nf_nat_bysource = nf_ct_alloc_hashtable(&nf_nat_htable_size, 0);
++	if (!nf_nat_bysource)
++		return -ENOMEM;
+ 
+ 	ret = nf_ct_extend_register(&nat_extend);
+ 	if (ret < 0) {
+-		rhltable_destroy(&nf_nat_bysource_table);
++		nf_ct_free_hashtable(nf_nat_bysource, nf_nat_htable_size);
+ 		printk(KERN_ERR "nf_nat_core: Unable to register extension\n");
+ 		return ret;
+ 	}
+@@ -863,8 +839,8 @@ static void __exit nf_nat_cleanup(void)
+ 
+ 	for (i = 0; i < NFPROTO_NUMPROTO; i++)
+ 		kfree(nf_nat_l4protos[i]);
+-
+-	rhltable_destroy(&nf_nat_bysource_table);
++	synchronize_net();
++	nf_ct_free_hashtable(nf_nat_bysource, nf_nat_htable_size);
+ }
+ 
+ MODULE_LICENSE("GPL");
+diff --git a/net/netfilter/nft_set_hash.c b/net/netfilter/nft_set_hash.c
+index 0fa01d772c5e..9c0d5a7ce5f9 100644
+--- a/net/netfilter/nft_set_hash.c
++++ b/net/netfilter/nft_set_hash.c
+@@ -643,7 +643,6 @@ nft_hash_select_ops(const struct nft_ctx *ctx, const struct nft_set_desc *desc,
+ {
+ 	if (desc->size) {
+ 		switch (desc->klen) {
+-		case 2:
+ 		case 4:
+ 			return &nft_hash_fast_ops;
+ 		default:
+diff --git a/sound/core/hrtimer.c b/sound/core/hrtimer.c
+index 1ac0c423903e..6e47b823bcaa 100644
+--- a/sound/core/hrtimer.c
++++ b/sound/core/hrtimer.c
+@@ -159,6 +159,7 @@ static int __init snd_hrtimer_init(void)
+ 	timer->hw = hrtimer_hw;
+ 	timer->hw.resolution = resolution;
+ 	timer->hw.ticks = NANO_SEC / resolution;
++	timer->max_instances = 100; /* lower the limit */
+ 
+ 	err = snd_timer_global_register(timer);
+ 	if (err < 0) {
+diff --git a/sound/core/seq/oss/seq_oss_midi.c b/sound/core/seq/oss/seq_oss_midi.c
+index aaff9ee32695..b30b2139e3f0 100644
+--- a/sound/core/seq/oss/seq_oss_midi.c
++++ b/sound/core/seq/oss/seq_oss_midi.c
+@@ -612,9 +612,7 @@ send_midi_event(struct seq_oss_devinfo *dp, struct snd_seq_event *ev, struct seq
+ 	if (!dp->timer->running)
+ 		len = snd_seq_oss_timer_start(dp->timer);
+ 	if (ev->type == SNDRV_SEQ_EVENT_SYSEX) {
+-		if ((ev->flags & SNDRV_SEQ_EVENT_LENGTH_MASK) == SNDRV_SEQ_EVENT_LENGTH_VARIABLE)
+-			snd_seq_oss_readq_puts(dp->readq, mdev->seq_device,
+-					       ev->data.ext.ptr, ev->data.ext.len);
++		snd_seq_oss_readq_sysex(dp->readq, mdev->seq_device, ev);
+ 	} else {
+ 		len = snd_midi_event_decode(mdev->coder, msg, sizeof(msg), ev);
+ 		if (len > 0)
+diff --git a/sound/core/seq/oss/seq_oss_readq.c b/sound/core/seq/oss/seq_oss_readq.c
+index 046cb586fb2f..06b21226b4e7 100644
+--- a/sound/core/seq/oss/seq_oss_readq.c
++++ b/sound/core/seq/oss/seq_oss_readq.c
+@@ -117,6 +117,35 @@ snd_seq_oss_readq_puts(struct seq_oss_readq *q, int dev, unsigned char *data, in
+ 	return 0;
+ }
+ 
++/*
++ * put MIDI sysex bytes; the event buffer may be chained, thus it has
++ * to be expanded via snd_seq_dump_var_event().
++ */
++struct readq_sysex_ctx {
++	struct seq_oss_readq *readq;
++	int dev;
++};
++
++static int readq_dump_sysex(void *ptr, void *buf, int count)
++{
++	struct readq_sysex_ctx *ctx = ptr;
++
++	return snd_seq_oss_readq_puts(ctx->readq, ctx->dev, buf, count);
++}
++
++int snd_seq_oss_readq_sysex(struct seq_oss_readq *q, int dev,
++			    struct snd_seq_event *ev)
++{
++	struct readq_sysex_ctx ctx = {
++		.readq = q,
++		.dev = dev
++	};
++
++	if ((ev->flags & SNDRV_SEQ_EVENT_LENGTH_MASK) != SNDRV_SEQ_EVENT_LENGTH_VARIABLE)
++		return 0;
++	return snd_seq_dump_var_event(ev, readq_dump_sysex, &ctx);
++}
++
+ /*
+  * copy an event to input queue:
+  * return zero if enqueued
+diff --git a/sound/core/seq/oss/seq_oss_readq.h b/sound/core/seq/oss/seq_oss_readq.h
+index f1463f1f449e..8d033ca2d23f 100644
+--- a/sound/core/seq/oss/seq_oss_readq.h
++++ b/sound/core/seq/oss/seq_oss_readq.h
+@@ -44,6 +44,8 @@ void snd_seq_oss_readq_delete(struct seq_oss_readq *q);
+ void snd_seq_oss_readq_clear(struct seq_oss_readq *readq);
+ unsigned int snd_seq_oss_readq_poll(struct seq_oss_readq *readq, struct file *file, poll_table *wait);
+ int snd_seq_oss_readq_puts(struct seq_oss_readq *readq, int dev, unsigned char *data, int len);
++int snd_seq_oss_readq_sysex(struct seq_oss_readq *q, int dev,
++			    struct snd_seq_event *ev);
+ int snd_seq_oss_readq_put_event(struct seq_oss_readq *readq, union evrec *ev);
+ int snd_seq_oss_readq_put_timestamp(struct seq_oss_readq *readq, unsigned long curt, int seq_mode);
+ int snd_seq_oss_readq_pick(struct seq_oss_readq *q, union evrec *rec);
+diff --git a/sound/core/timer.c b/sound/core/timer.c
+index a9b9a277e00c..c8e4d0da13b4 100644
+--- a/sound/core/timer.c
++++ b/sound/core/timer.c
+@@ -180,7 +180,7 @@ static void snd_timer_request(struct snd_timer_id *tid)
+  *
+  * call this with register_mutex down.
+  */
+-static void snd_timer_check_slave(struct snd_timer_instance *slave)
++static int snd_timer_check_slave(struct snd_timer_instance *slave)
+ {
+ 	struct snd_timer *timer;
+ 	struct snd_timer_instance *master;
+@@ -190,16 +190,21 @@ static void snd_timer_check_slave(struct snd_timer_instance *slave)
+ 		list_for_each_entry(master, &timer->open_list_head, open_list) {
+ 			if (slave->slave_class == master->slave_class &&
+ 			    slave->slave_id == master->slave_id) {
++				if (master->timer->num_instances >=
++				    master->timer->max_instances)
++					return -EBUSY;
+ 				list_move_tail(&slave->open_list,
+ 					       &master->slave_list_head);
++				master->timer->num_instances++;
+ 				spin_lock_irq(&slave_active_lock);
+ 				slave->master = master;
+ 				slave->timer = master->timer;
+ 				spin_unlock_irq(&slave_active_lock);
+-				return;
++				return 0;
+ 			}
+ 		}
+ 	}
++	return 0;
+ }
+ 
+ /*
+@@ -208,7 +213,7 @@ static void snd_timer_check_slave(struct snd_timer_instance *slave)
+  *
+  * call this with register_mutex down.
+  */
+-static void snd_timer_check_master(struct snd_timer_instance *master)
++static int snd_timer_check_master(struct snd_timer_instance *master)
+ {
+ 	struct snd_timer_instance *slave, *tmp;
+ 
+@@ -216,7 +221,11 @@ static void snd_timer_check_master(struct snd_timer_instance *master)
+ 	list_for_each_entry_safe(slave, tmp, &snd_timer_slave_list, open_list) {
+ 		if (slave->slave_class == master->slave_class &&
+ 		    slave->slave_id == master->slave_id) {
++			if (master->timer->num_instances >=
++			    master->timer->max_instances)
++				return -EBUSY;
+ 			list_move_tail(&slave->open_list, &master->slave_list_head);
++			master->timer->num_instances++;
+ 			spin_lock_irq(&slave_active_lock);
+ 			spin_lock(&master->timer->lock);
+ 			slave->master = master;
+@@ -228,8 +237,11 @@ static void snd_timer_check_master(struct snd_timer_instance *master)
+ 			spin_unlock_irq(&slave_active_lock);
+ 		}
+ 	}
++	return 0;
+ }
+ 
++static int snd_timer_close_locked(struct snd_timer_instance *timeri);
++
+ /*
+  * open a timer instance
+  * when opening a master, the slave id must be here given.
+@@ -240,6 +252,7 @@ int snd_timer_open(struct snd_timer_instance **ti,
+ {
+ 	struct snd_timer *timer;
+ 	struct snd_timer_instance *timeri = NULL;
++	int err;
+ 
+ 	if (tid->dev_class == SNDRV_TIMER_CLASS_SLAVE) {
+ 		/* open a slave instance */
+@@ -259,10 +272,14 @@ int snd_timer_open(struct snd_timer_instance **ti,
+ 		timeri->slave_id = tid->device;
+ 		timeri->flags |= SNDRV_TIMER_IFLG_SLAVE;
+ 		list_add_tail(&timeri->open_list, &snd_timer_slave_list);
+-		snd_timer_check_slave(timeri);
++		err = snd_timer_check_slave(timeri);
++		if (err < 0) {
++			snd_timer_close_locked(timeri);
++			timeri = NULL;
++		}
+ 		mutex_unlock(&register_mutex);
+ 		*ti = timeri;
+-		return 0;
++		return err;
+ 	}
+ 
+ 	/* open a master instance */
+@@ -288,6 +305,10 @@ int snd_timer_open(struct snd_timer_instance **ti,
+ 			return -EBUSY;
+ 		}
+ 	}
++	if (timer->num_instances >= timer->max_instances) {
++		mutex_unlock(&register_mutex);
++		return -EBUSY;
++	}
+ 	timeri = snd_timer_instance_new(owner, timer);
+ 	if (!timeri) {
+ 		mutex_unlock(&register_mutex);
+@@ -314,25 +335,27 @@ int snd_timer_open(struct snd_timer_instance **ti,
+ 	}
+ 
+ 	list_add_tail(&timeri->open_list, &timer->open_list_head);
+-	snd_timer_check_master(timeri);
++	timer->num_instances++;
++	err = snd_timer_check_master(timeri);
++	if (err < 0) {
++		snd_timer_close_locked(timeri);
++		timeri = NULL;
++	}
+ 	mutex_unlock(&register_mutex);
+ 	*ti = timeri;
+-	return 0;
++	return err;
+ }
+ EXPORT_SYMBOL(snd_timer_open);
+ 
+ /*
+  * close a timer instance
++ * call this with register_mutex down.
+  */
+-int snd_timer_close(struct snd_timer_instance *timeri)
++static int snd_timer_close_locked(struct snd_timer_instance *timeri)
+ {
+ 	struct snd_timer *timer = NULL;
+ 	struct snd_timer_instance *slave, *tmp;
+ 
+-	if (snd_BUG_ON(!timeri))
+-		return -ENXIO;
+-
+-	mutex_lock(&register_mutex);
+ 	list_del(&timeri->open_list);
+ 
+ 	/* force to stop the timer */
+@@ -340,6 +363,7 @@ int snd_timer_close(struct snd_timer_instance *timeri)
+ 
+ 	timer = timeri->timer;
+ 	if (timer) {
++		timer->num_instances--;
+ 		/* wait, until the active callback is finished */
+ 		spin_lock_irq(&timer->lock);
+ 		while (timeri->flags & SNDRV_TIMER_IFLG_CALLBACK) {
+@@ -355,6 +379,7 @@ int snd_timer_close(struct snd_timer_instance *timeri)
+ 		list_for_each_entry_safe(slave, tmp, &timeri->slave_list_head,
+ 					 open_list) {
+ 			list_move_tail(&slave->open_list, &snd_timer_slave_list);
++			timer->num_instances--;
+ 			slave->master = NULL;
+ 			slave->timer = NULL;
+ 			list_del_init(&slave->ack_list);
+@@ -382,9 +407,24 @@ int snd_timer_close(struct snd_timer_instance *timeri)
+ 		module_put(timer->module);
+ 	}
+ 
+-	mutex_unlock(&register_mutex);
+ 	return 0;
+ }
++
++/*
++ * close a timer instance
++ */
++int snd_timer_close(struct snd_timer_instance *timeri)
++{
++	int err;
++
++	if (snd_BUG_ON(!timeri))
++		return -ENXIO;
++
++	mutex_lock(&register_mutex);
++	err = snd_timer_close_locked(timeri);
++	mutex_unlock(&register_mutex);
++	return err;
++}
+ EXPORT_SYMBOL(snd_timer_close);
+ 
+ unsigned long snd_timer_resolution(struct snd_timer_instance *timeri)
+@@ -855,6 +895,7 @@ int snd_timer_new(struct snd_card *card, char *id, struct snd_timer_id *tid,
+ 	spin_lock_init(&timer->lock);
+ 	tasklet_init(&timer->task_queue, snd_timer_tasklet,
+ 		     (unsigned long)timer);
++	timer->max_instances = 1000; /* default limit per timer */
+ 	if (card != NULL) {
+ 		timer->module = card->module;
+ 		err = snd_device_new(card, SNDRV_DEV_TIMER, timer, &ops);
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index fe4d06398fc3..c5f0e8d42d22 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -6527,6 +6527,11 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
+ 		{0x14, 0x90170110},
+ 		{0x1b, 0x90a70130},
+ 		{0x21, 0x03211020}),
++	SND_HDA_PIN_QUIRK(0x10ec0274, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
++		{0x12, 0xb7a60130},
++		{0x13, 0xb8a61140},
++		{0x16, 0x90170110},
++		{0x21, 0x04211020}),
+ 	SND_HDA_PIN_QUIRK(0x10ec0280, 0x103c, "HP", ALC280_FIXUP_HP_GPIO4,
+ 		{0x12, 0x90a60130},
+ 		{0x14, 0x90170110},
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index 0e54fe490458..f910c4fd932b 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -1373,6 +1373,7 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
+ 			case 0x199:
+ 				return SNDRV_PCM_FMTBIT_DSD_U32_LE;
+ 			case 0x19b:
++			case 0x203:
+ 				return SNDRV_PCM_FMTBIT_DSD_U32_BE;
+ 			default:
+ 				break;


^ permalink raw reply related	[flat|nested] 20+ messages in thread
* [gentoo-commits] proj/linux-patches:4.13 commit in: /
@ 2017-11-08 13:48 Mike Pagano
  0 siblings, 0 replies; 20+ messages in thread
From: Mike Pagano @ 2017-11-08 13:48 UTC (permalink / raw
  To: gentoo-commits

commit:     38dbdf600ccd3f9bbf4038202e0d001db9abedeb
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Nov  8 13:48:09 2017 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Nov  8 13:48:09 2017 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=38dbdf60

Linux patch 4.13.12

 0000_README              |    4 +
 1011_linux-4.13.12.patch | 1438 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 1442 insertions(+)

diff --git a/0000_README b/0000_README
index bca516e..878d286 100644
--- a/0000_README
+++ b/0000_README
@@ -87,6 +87,10 @@ Patch:  1010_linux-4.13.11.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.13.11
 
+Patch:  1011_linux-4.13.12.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.13.12
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1011_linux-4.13.12.patch b/1011_linux-4.13.12.patch
new file mode 100644
index 0000000..ed5088c
--- /dev/null
+++ b/1011_linux-4.13.12.patch
@@ -0,0 +1,1438 @@
+diff --git a/Makefile b/Makefile
+index 8280953c8a45..a7c847f495b0 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 13
+-SUBLEVEL = 11
++SUBLEVEL = 12
+ EXTRAVERSION =
+ NAME = Fearless Coyote
+ 
+diff --git a/arch/arm/boot/dts/armada-375.dtsi b/arch/arm/boot/dts/armada-375.dtsi
+index 50c5e8417802..10b99530280a 100644
+--- a/arch/arm/boot/dts/armada-375.dtsi
++++ b/arch/arm/boot/dts/armada-375.dtsi
+@@ -178,9 +178,9 @@
+ 				reg = <0x8000 0x1000>;
+ 				cache-unified;
+ 				cache-level = <2>;
+-				arm,double-linefill-incr = <1>;
++				arm,double-linefill-incr = <0>;
+ 				arm,double-linefill-wrap = <0>;
+-				arm,double-linefill = <1>;
++				arm,double-linefill = <0>;
+ 				prefetch-data = <1>;
+ 			};
+ 
+diff --git a/arch/arm/boot/dts/armada-38x.dtsi b/arch/arm/boot/dts/armada-38x.dtsi
+index af31f5d6c0e5..c3448622e79e 100644
+--- a/arch/arm/boot/dts/armada-38x.dtsi
++++ b/arch/arm/boot/dts/armada-38x.dtsi
+@@ -143,9 +143,9 @@
+ 				reg = <0x8000 0x1000>;
+ 				cache-unified;
+ 				cache-level = <2>;
+-				arm,double-linefill-incr = <1>;
++				arm,double-linefill-incr = <0>;
+ 				arm,double-linefill-wrap = <0>;
+-				arm,double-linefill = <1>;
++				arm,double-linefill = <0>;
+ 				prefetch-data = <1>;
+ 			};
+ 
+diff --git a/arch/arm/boot/dts/armada-39x.dtsi b/arch/arm/boot/dts/armada-39x.dtsi
+index 60fbfd5907c7..55d02641d930 100644
+--- a/arch/arm/boot/dts/armada-39x.dtsi
++++ b/arch/arm/boot/dts/armada-39x.dtsi
+@@ -111,9 +111,9 @@
+ 				reg = <0x8000 0x1000>;
+ 				cache-unified;
+ 				cache-level = <2>;
+-				arm,double-linefill-incr = <1>;
++				arm,double-linefill-incr = <0>;
+ 				arm,double-linefill-wrap = <0>;
+-				arm,double-linefill = <1>;
++				arm,double-linefill = <0>;
+ 				prefetch-data = <1>;
+ 			};
+ 
+diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild
+index 721ab5ecfb9b..0f2c8a2a8131 100644
+--- a/arch/arm/include/asm/Kbuild
++++ b/arch/arm/include/asm/Kbuild
+@@ -20,7 +20,6 @@ generic-y += simd.h
+ generic-y += sizes.h
+ generic-y += timex.h
+ generic-y += trace_clock.h
+-generic-y += unaligned.h
+ 
+ generated-y += mach-types.h
+ generated-y += unistd-nr.h
+diff --git a/arch/arm/include/asm/unaligned.h b/arch/arm/include/asm/unaligned.h
+new file mode 100644
+index 000000000000..ab905ffcf193
+--- /dev/null
++++ b/arch/arm/include/asm/unaligned.h
+@@ -0,0 +1,27 @@
++#ifndef __ASM_ARM_UNALIGNED_H
++#define __ASM_ARM_UNALIGNED_H
++
++/*
++ * We generally want to set CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS on ARMv6+,
++ * but we don't want to use linux/unaligned/access_ok.h since that can lead
++ * to traps on unaligned stm/ldm or strd/ldrd.
++ */
++#include <asm/byteorder.h>
++
++#if defined(__LITTLE_ENDIAN)
++# include <linux/unaligned/le_struct.h>
++# include <linux/unaligned/be_byteshift.h>
++# include <linux/unaligned/generic.h>
++# define get_unaligned	__get_unaligned_le
++# define put_unaligned	__put_unaligned_le
++#elif defined(__BIG_ENDIAN)
++# include <linux/unaligned/be_struct.h>
++# include <linux/unaligned/le_byteshift.h>
++# include <linux/unaligned/generic.h>
++# define get_unaligned	__get_unaligned_be
++# define put_unaligned	__put_unaligned_be
++#else
++# error need to define endianess
++#endif
++
++#endif /* __ASM_ARM_UNALIGNED_H */
+diff --git a/arch/arm/kvm/emulate.c b/arch/arm/kvm/emulate.c
+index 0064b86a2c87..30a13647c54c 100644
+--- a/arch/arm/kvm/emulate.c
++++ b/arch/arm/kvm/emulate.c
+@@ -227,7 +227,7 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu)
+ 	u32 return_offset = (is_thumb) ? 2 : 4;
+ 
+ 	kvm_update_psr(vcpu, UND_MODE);
+-	*vcpu_reg(vcpu, 14) = *vcpu_pc(vcpu) - return_offset;
++	*vcpu_reg(vcpu, 14) = *vcpu_pc(vcpu) + return_offset;
+ 
+ 	/* Branch to exception vector */
+ 	*vcpu_pc(vcpu) = exc_vector_base(vcpu) + vect_offset;
+@@ -239,10 +239,8 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu)
+  */
+ static void inject_abt(struct kvm_vcpu *vcpu, bool is_pabt, unsigned long addr)
+ {
+-	unsigned long cpsr = *vcpu_cpsr(vcpu);
+-	bool is_thumb = (cpsr & PSR_T_BIT);
+ 	u32 vect_offset;
+-	u32 return_offset = (is_thumb) ? 4 : 0;
++	u32 return_offset = (is_pabt) ? 4 : 8;
+ 	bool is_lpae;
+ 
+ 	kvm_update_psr(vcpu, ABT_MODE);
+diff --git a/arch/arm/kvm/hyp/Makefile b/arch/arm/kvm/hyp/Makefile
+index 8679405b0b2b..92eab1d51785 100644
+--- a/arch/arm/kvm/hyp/Makefile
++++ b/arch/arm/kvm/hyp/Makefile
+@@ -2,7 +2,7 @@
+ # Makefile for Kernel-based Virtual Machine module, HYP part
+ #
+ 
+-ccflags-y += -fno-stack-protector
++ccflags-y += -fno-stack-protector -DDISABLE_BRANCH_PROFILING
+ 
+ KVM=../../../../virt/kvm
+ 
+diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
+index 8a62648848e5..c99ffd8dce27 100644
+--- a/arch/arm64/kernel/traps.c
++++ b/arch/arm64/kernel/traps.c
+@@ -116,7 +116,7 @@ static void __dump_instr(const char *lvl, struct pt_regs *regs)
+ 	for (i = -4; i < 1; i++) {
+ 		unsigned int val, bad;
+ 
+-		bad = __get_user(val, &((u32 *)addr)[i]);
++		bad = get_user(val, &((u32 *)addr)[i]);
+ 
+ 		if (!bad)
+ 			p += sprintf(p, i == 0 ? "(%08x) " : "%08x ", val);
+diff --git a/arch/arm64/kvm/hyp/Makefile b/arch/arm64/kvm/hyp/Makefile
+index 14c4e3b14bcb..48b03547a969 100644
+--- a/arch/arm64/kvm/hyp/Makefile
++++ b/arch/arm64/kvm/hyp/Makefile
+@@ -2,7 +2,7 @@
+ # Makefile for Kernel-based Virtual Machine module, HYP part
+ #
+ 
+-ccflags-y += -fno-stack-protector
++ccflags-y += -fno-stack-protector -DDISABLE_BRANCH_PROFILING
+ 
+ KVM=../../../../virt/kvm
+ 
+diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c
+index da6a8cfa54a0..3556715a774e 100644
+--- a/arch/arm64/kvm/inject_fault.c
++++ b/arch/arm64/kvm/inject_fault.c
+@@ -33,12 +33,26 @@
+ #define LOWER_EL_AArch64_VECTOR		0x400
+ #define LOWER_EL_AArch32_VECTOR		0x600
+ 
++/*
++ * Table taken from ARMv8 ARM DDI0487B-B, table G1-10.
++ */
++static const u8 return_offsets[8][2] = {
++	[0] = { 0, 0 },		/* Reset, unused */
++	[1] = { 4, 2 },		/* Undefined */
++	[2] = { 0, 0 },		/* SVC, unused */
++	[3] = { 4, 4 },		/* Prefetch abort */
++	[4] = { 8, 8 },		/* Data abort */
++	[5] = { 0, 0 },		/* HVC, unused */
++	[6] = { 4, 4 },		/* IRQ, unused */
++	[7] = { 4, 4 },		/* FIQ, unused */
++};
++
+ static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
+ {
+ 	unsigned long cpsr;
+ 	unsigned long new_spsr_value = *vcpu_cpsr(vcpu);
+ 	bool is_thumb = (new_spsr_value & COMPAT_PSR_T_BIT);
+-	u32 return_offset = (is_thumb) ? 4 : 0;
++	u32 return_offset = return_offsets[vect_offset >> 2][is_thumb];
+ 	u32 sctlr = vcpu_cp15(vcpu, c1_SCTLR);
+ 
+ 	cpsr = mode | COMPAT_PSR_I_BIT;
+diff --git a/arch/mips/kernel/smp-cmp.c b/arch/mips/kernel/smp-cmp.c
+index 76923349b4fe..797da807916f 100644
+--- a/arch/mips/kernel/smp-cmp.c
++++ b/arch/mips/kernel/smp-cmp.c
+@@ -19,7 +19,7 @@
+ #undef DEBUG
+ 
+ #include <linux/kernel.h>
+-#include <linux/sched.h>
++#include <linux/sched/task_stack.h>
+ #include <linux/smp.h>
+ #include <linux/cpumask.h>
+ #include <linux/interrupt.h>
+diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
+index 6bace7695788..20d7bc5f0eb5 100644
+--- a/arch/mips/kernel/smp.c
++++ b/arch/mips/kernel/smp.c
+@@ -66,6 +66,7 @@ EXPORT_SYMBOL(cpu_sibling_map);
+ cpumask_t cpu_core_map[NR_CPUS] __read_mostly;
+ EXPORT_SYMBOL(cpu_core_map);
+ 
++static DECLARE_COMPLETION(cpu_starting);
+ static DECLARE_COMPLETION(cpu_running);
+ 
+ /*
+@@ -376,6 +377,12 @@ asmlinkage void start_secondary(void)
+ 	cpumask_set_cpu(cpu, &cpu_coherent_mask);
+ 	notify_cpu_starting(cpu);
+ 
++	/* Notify boot CPU that we're starting & ready to sync counters */
++	complete(&cpu_starting);
++
++	synchronise_count_slave(cpu);
++
++	/* The CPU is running and counters synchronised, now mark it online */
+ 	set_cpu_online(cpu, true);
+ 
+ 	set_cpu_sibling_map(cpu);
+@@ -383,8 +390,11 @@ asmlinkage void start_secondary(void)
+ 
+ 	calculate_cpu_foreign_map();
+ 
++	/*
++	 * Notify boot CPU that we're up & online and it can safely return
++	 * from __cpu_up
++	 */
+ 	complete(&cpu_running);
+-	synchronise_count_slave(cpu);
+ 
+ 	/*
+ 	 * irq will be enabled in ->smp_finish(), enabling it too early
+@@ -443,17 +453,17 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle)
+ {
+ 	mp_ops->boot_secondary(cpu, tidle);
+ 
+-	/*
+-	 * We must check for timeout here, as the CPU will not be marked
+-	 * online until the counters are synchronised.
+-	 */
+-	if (!wait_for_completion_timeout(&cpu_running,
++	/* Wait for CPU to start and be ready to sync counters */
++	if (!wait_for_completion_timeout(&cpu_starting,
+ 					 msecs_to_jiffies(1000))) {
+ 		pr_crit("CPU%u: failed to start\n", cpu);
+ 		return -EIO;
+ 	}
+ 
+ 	synchronise_count_master(cpu);
++
++	/* Wait for CPU to finish startup & mark itself online before return */
++	wait_for_completion(&cpu_running);
+ 	return 0;
+ }
+ 
+diff --git a/arch/mips/mm/uasm-micromips.c b/arch/mips/mm/uasm-micromips.c
+index c28ff53c8da0..cdb5a191b9d5 100644
+--- a/arch/mips/mm/uasm-micromips.c
++++ b/arch/mips/mm/uasm-micromips.c
+@@ -80,7 +80,7 @@ static const struct insn const insn_table_MM[insn_invalid] = {
+ 	[insn_jr]	= {M(mm_pool32a_op, 0, 0, 0, mm_jalr_op, mm_pool32axf_op), RS},
+ 	[insn_lb]	= {M(mm_lb32_op, 0, 0, 0, 0, 0), RT | RS | SIMM},
+ 	[insn_ld]	= {0, 0},
+-	[insn_lh]	= {M(mm_lh32_op, 0, 0, 0, 0, 0), RS | RS | SIMM},
++	[insn_lh]	= {M(mm_lh32_op, 0, 0, 0, 0, 0), RT | RS | SIMM},
+ 	[insn_ll]	= {M(mm_pool32c_op, 0, 0, (mm_ll_func << 1), 0, 0), RS | RT | SIMM},
+ 	[insn_lld]	= {0, 0},
+ 	[insn_lui]	= {M(mm_pool32i_op, mm_lui_op, 0, 0, 0, 0), RS | SIMM},
+diff --git a/arch/mips/net/ebpf_jit.c b/arch/mips/net/ebpf_jit.c
+index 401776f92288..e45f05cc510d 100644
+--- a/arch/mips/net/ebpf_jit.c
++++ b/arch/mips/net/ebpf_jit.c
+@@ -1485,7 +1485,7 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
+ 		}
+ 		src = ebpf_to_mips_reg(ctx, insn, src_reg_no_fp);
+ 		if (src < 0)
+-			return dst;
++			return src;
+ 		if (BPF_MODE(insn->code) == BPF_XADD) {
+ 			switch (BPF_SIZE(insn->code)) {
+ 			case BPF_W:
+diff --git a/arch/powerpc/include/asm/code-patching.h b/arch/powerpc/include/asm/code-patching.h
+index 5482928eea1b..abef812de7f8 100644
+--- a/arch/powerpc/include/asm/code-patching.h
++++ b/arch/powerpc/include/asm/code-patching.h
+@@ -83,16 +83,8 @@ static inline unsigned long ppc_function_entry(void *func)
+ 	 * On PPC64 ABIv1 the function pointer actually points to the
+ 	 * function's descriptor. The first entry in the descriptor is the
+ 	 * address of the function text.
+-	 *
+-	 * However, we may also receive pointer to an assembly symbol. To
+-	 * detect that, we first check if the function pointer we receive
+-	 * already points to kernel/module text and we only dereference it
+-	 * if it doesn't.
+ 	 */
+-	if (kernel_text_address((unsigned long)func))
+-		return (unsigned long)func;
+-	else
+-		return ((func_descr_t *)func)->entry;
++	return ((func_descr_t *)func)->entry;
+ #else
+ 	return (unsigned long)func;
+ #endif
+diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
+index 367494dc67d9..bebc3007a793 100644
+--- a/arch/powerpc/kernel/kprobes.c
++++ b/arch/powerpc/kernel/kprobes.c
+@@ -600,7 +600,12 @@ NOKPROBE_SYMBOL(kprobe_fault_handler);
+ 
+ unsigned long arch_deref_entry_point(void *entry)
+ {
+-	return ppc_global_function_entry(entry);
++#ifdef PPC64_ELF_ABI_v1
++	if (!kernel_text_address((unsigned long)entry))
++		return ppc_global_function_entry(entry);
++	else
++#endif
++		return (unsigned long)entry;
+ }
+ NOKPROBE_SYMBOL(arch_deref_entry_point);
+ 
+diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
+index cdf82492b770..836877e2da22 100644
+--- a/arch/x86/kernel/cpu/Makefile
++++ b/arch/x86/kernel/cpu/Makefile
+@@ -21,7 +21,7 @@ obj-y			+= common.o
+ obj-y			+= rdrand.o
+ obj-y			+= match.o
+ obj-y			+= bugs.o
+-obj-$(CONFIG_CPU_FREQ)	+= aperfmperf.o
++obj-y			+= aperfmperf.o
+ 
+ obj-$(CONFIG_PROC_FS)	+= proc.o
+ obj-$(CONFIG_X86_FEATURE_NAMES) += capflags.o powerflags.o
+diff --git a/arch/x86/kernel/cpu/aperfmperf.c b/arch/x86/kernel/cpu/aperfmperf.c
+index 0ee83321a313..957813e0180d 100644
+--- a/arch/x86/kernel/cpu/aperfmperf.c
++++ b/arch/x86/kernel/cpu/aperfmperf.c
+@@ -42,10 +42,6 @@ static void aperfmperf_snapshot_khz(void *dummy)
+ 	s64 time_delta = ktime_ms_delta(now, s->time);
+ 	unsigned long flags;
+ 
+-	/* Don't bother re-computing within the cache threshold time. */
+-	if (time_delta < APERFMPERF_CACHE_THRESHOLD_MS)
+-		return;
+-
+ 	local_irq_save(flags);
+ 	rdmsrl(MSR_IA32_APERF, aperf);
+ 	rdmsrl(MSR_IA32_MPERF, mperf);
+@@ -74,6 +70,7 @@ static void aperfmperf_snapshot_khz(void *dummy)
+ 
+ unsigned int arch_freq_get_on_cpu(int cpu)
+ {
++	s64 time_delta;
+ 	unsigned int khz;
+ 
+ 	if (!cpu_khz)
+@@ -82,6 +79,12 @@ unsigned int arch_freq_get_on_cpu(int cpu)
+ 	if (!static_cpu_has(X86_FEATURE_APERFMPERF))
+ 		return 0;
+ 
++	/* Don't bother re-computing within the cache threshold time. */
++	time_delta = ktime_ms_delta(ktime_get(), per_cpu(samples.time, cpu));
++	khz = per_cpu(samples.khz, cpu);
++	if (khz && time_delta < APERFMPERF_CACHE_THRESHOLD_MS)
++		return khz;
++
+ 	smp_call_function_single(cpu, aperfmperf_snapshot_khz, NULL, 1);
+ 	khz = per_cpu(samples.khz, cpu);
+ 	if (khz)
+diff --git a/arch/x86/kernel/cpu/mcheck/dev-mcelog.c b/arch/x86/kernel/cpu/mcheck/dev-mcelog.c
+index 10cec43aac38..7f85b76f43bc 100644
+--- a/arch/x86/kernel/cpu/mcheck/dev-mcelog.c
++++ b/arch/x86/kernel/cpu/mcheck/dev-mcelog.c
+@@ -24,14 +24,6 @@ static DEFINE_MUTEX(mce_chrdev_read_mutex);
+ static char mce_helper[128];
+ static char *mce_helper_argv[2] = { mce_helper, NULL };
+ 
+-#define mce_log_get_idx_check(p) \
+-({ \
+-	RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \
+-			 !lockdep_is_held(&mce_chrdev_read_mutex), \
+-			 "suspicious mce_log_get_idx_check() usage"); \
+-	smp_load_acquire(&(p)); \
+-})
+-
+ /*
+  * Lockless MCE logging infrastructure.
+  * This avoids deadlocks on printk locks without having to break locks. Also
+@@ -53,43 +45,32 @@ static int dev_mce_log(struct notifier_block *nb, unsigned long val,
+ 				void *data)
+ {
+ 	struct mce *mce = (struct mce *)data;
+-	unsigned int next, entry;
+-
+-	wmb();
+-	for (;;) {
+-		entry = mce_log_get_idx_check(mcelog.next);
+-		for (;;) {
+-
+-			/*
+-			 * When the buffer fills up discard new entries.
+-			 * Assume that the earlier errors are the more
+-			 * interesting ones:
+-			 */
+-			if (entry >= MCE_LOG_LEN) {
+-				set_bit(MCE_OVERFLOW,
+-					(unsigned long *)&mcelog.flags);
+-				return NOTIFY_OK;
+-			}
+-			/* Old left over entry. Skip: */
+-			if (mcelog.entry[entry].finished) {
+-				entry++;
+-				continue;
+-			}
+-			break;
+-		}
+-		smp_rmb();
+-		next = entry + 1;
+-		if (cmpxchg(&mcelog.next, entry, next) == entry)
+-			break;
++	unsigned int entry;
++
++	mutex_lock(&mce_chrdev_read_mutex);
++
++	entry = mcelog.next;
++
++	/*
++	 * When the buffer fills up discard new entries. Assume that the
++	 * earlier errors are the more interesting ones:
++	 */
++	if (entry >= MCE_LOG_LEN) {
++		set_bit(MCE_OVERFLOW, (unsigned long *)&mcelog.flags);
++		goto unlock;
+ 	}
++
++	mcelog.next = entry + 1;
++
+ 	memcpy(mcelog.entry + entry, mce, sizeof(struct mce));
+-	wmb();
+ 	mcelog.entry[entry].finished = 1;
+-	wmb();
+ 
+ 	/* wake processes polling /dev/mcelog */
+ 	wake_up_interruptible(&mce_chrdev_wait);
+ 
++unlock:
++	mutex_unlock(&mce_chrdev_read_mutex);
++
+ 	return NOTIFY_OK;
+ }
+ 
+@@ -177,13 +158,6 @@ static int mce_chrdev_release(struct inode *inode, struct file *file)
+ 	return 0;
+ }
+ 
+-static void collect_tscs(void *data)
+-{
+-	unsigned long *cpu_tsc = (unsigned long *)data;
+-
+-	cpu_tsc[smp_processor_id()] = rdtsc();
+-}
+-
+ static int mce_apei_read_done;
+ 
+ /* Collect MCE record of previous boot in persistent storage via APEI ERST. */
+@@ -231,14 +205,9 @@ static ssize_t mce_chrdev_read(struct file *filp, char __user *ubuf,
+ 				size_t usize, loff_t *off)
+ {
+ 	char __user *buf = ubuf;
+-	unsigned long *cpu_tsc;
+-	unsigned prev, next;
++	unsigned next;
+ 	int i, err;
+ 
+-	cpu_tsc = kmalloc(nr_cpu_ids * sizeof(long), GFP_KERNEL);
+-	if (!cpu_tsc)
+-		return -ENOMEM;
+-
+ 	mutex_lock(&mce_chrdev_read_mutex);
+ 
+ 	if (!mce_apei_read_done) {
+@@ -247,65 +216,29 @@ static ssize_t mce_chrdev_read(struct file *filp, char __user *ubuf,
+ 			goto out;
+ 	}
+ 
+-	next = mce_log_get_idx_check(mcelog.next);
+-
+ 	/* Only supports full reads right now */
+ 	err = -EINVAL;
+ 	if (*off != 0 || usize < MCE_LOG_LEN*sizeof(struct mce))
+ 		goto out;
+ 
++	next = mcelog.next;
+ 	err = 0;
+-	prev = 0;
+-	do {
+-		for (i = prev; i < next; i++) {
+-			unsigned long start = jiffies;
+-			struct mce *m = &mcelog.entry[i];
+-
+-			while (!m->finished) {
+-				if (time_after_eq(jiffies, start + 2)) {
+-					memset(m, 0, sizeof(*m));
+-					goto timeout;
+-				}
+-				cpu_relax();
+-			}
+-			smp_rmb();
+-			err |= copy_to_user(buf, m, sizeof(*m));
+-			buf += sizeof(*m);
+-timeout:
+-			;
+-		}
+-
+-		memset(mcelog.entry + prev, 0,
+-		       (next - prev) * sizeof(struct mce));
+-		prev = next;
+-		next = cmpxchg(&mcelog.next, prev, 0);
+-	} while (next != prev);
+-
+-	synchronize_sched();
+ 
+-	/*
+-	 * Collect entries that were still getting written before the
+-	 * synchronize.
+-	 */
+-	on_each_cpu(collect_tscs, cpu_tsc, 1);
+-
+-	for (i = next; i < MCE_LOG_LEN; i++) {
++	for (i = 0; i < next; i++) {
+ 		struct mce *m = &mcelog.entry[i];
+ 
+-		if (m->finished && m->tsc < cpu_tsc[m->cpu]) {
+-			err |= copy_to_user(buf, m, sizeof(*m));
+-			smp_rmb();
+-			buf += sizeof(*m);
+-			memset(m, 0, sizeof(*m));
+-		}
++		err |= copy_to_user(buf, m, sizeof(*m));
++		buf += sizeof(*m);
+ 	}
+ 
++	memset(mcelog.entry, 0, next * sizeof(struct mce));
++	mcelog.next = 0;
++
+ 	if (err)
+ 		err = -EFAULT;
+ 
+ out:
+ 	mutex_unlock(&mce_chrdev_read_mutex);
+-	kfree(cpu_tsc);
+ 
+ 	return err ? err : buf - ubuf;
+ }
+diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c
+index 218f79825b3c..510e69596278 100644
+--- a/arch/x86/kernel/cpu/proc.c
++++ b/arch/x86/kernel/cpu/proc.c
+@@ -2,6 +2,7 @@
+ #include <linux/timex.h>
+ #include <linux/string.h>
+ #include <linux/seq_file.h>
++#include <linux/cpufreq.h>
+ 
+ /*
+  *	Get CPU information for use by the procfs.
+@@ -75,9 +76,16 @@ static int show_cpuinfo(struct seq_file *m, void *v)
+ 	if (c->microcode)
+ 		seq_printf(m, "microcode\t: 0x%x\n", c->microcode);
+ 
+-	if (cpu_has(c, X86_FEATURE_TSC))
++	if (cpu_has(c, X86_FEATURE_TSC)) {
++		unsigned int freq = arch_freq_get_on_cpu(cpu);
++
++		if (!freq)
++			freq = cpufreq_quick_get(cpu);
++		if (!freq)
++			freq = cpu_khz;
+ 		seq_printf(m, "cpu MHz\t\t: %u.%03u\n",
+-			   cpu_khz / 1000, (cpu_khz % 1000));
++			   freq / 1000, (freq % 1000));
++	}
+ 
+ 	/* Cache size */
+ 	if (c->x86_cache_size >= 0)
+diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
+index d3d5523862c2..b49952b5a189 100644
+--- a/drivers/block/virtio_blk.c
++++ b/drivers/block/virtio_blk.c
+@@ -593,10 +593,22 @@ static int virtblk_map_queues(struct blk_mq_tag_set *set)
+ 	return blk_mq_virtio_map_queues(set, vblk->vdev, 0);
+ }
+ 
++#ifdef CONFIG_VIRTIO_BLK_SCSI
++static void virtblk_initialize_rq(struct request *req)
++{
++	struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
++
++	scsi_req_init(&vbr->sreq);
++}
++#endif
++
+ static const struct blk_mq_ops virtio_mq_ops = {
+ 	.queue_rq	= virtio_queue_rq,
+ 	.complete	= virtblk_request_done,
+ 	.init_request	= virtblk_init_request,
++#ifdef CONFIG_VIRTIO_BLK_SCSI
++	.initialize_rq_fn = virtblk_initialize_rq,
++#endif
+ 	.map_queues	= virtblk_map_queues,
+ };
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+index 31db356476f8..1086cf86354f 100644
+--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+@@ -93,6 +93,10 @@ static int uvd_v6_0_early_init(void *handle)
+ {
+ 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ 
++	if (!(adev->flags & AMD_IS_APU) &&
++	    (RREG32_SMC(ixCC_HARVEST_FUSES) & CC_HARVEST_FUSES__UVD_DISABLE_MASK))
++		return -ENOENT;
++
+ 	uvd_v6_0_set_ring_funcs(adev);
+ 	uvd_v6_0_set_irq_funcs(adev);
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+index 90332f55cfba..cf81065e3c5a 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+@@ -365,15 +365,10 @@ static unsigned vce_v3_0_get_harvest_config(struct amdgpu_device *adev)
+ {
+ 	u32 tmp;
+ 
+-	/* Fiji, Stoney, Polaris10, Polaris11, Polaris12 are single pipe */
+ 	if ((adev->asic_type == CHIP_FIJI) ||
+-	    (adev->asic_type == CHIP_STONEY) ||
+-	    (adev->asic_type == CHIP_POLARIS10) ||
+-	    (adev->asic_type == CHIP_POLARIS11) ||
+-	    (adev->asic_type == CHIP_POLARIS12))
++	    (adev->asic_type == CHIP_STONEY))
+ 		return AMDGPU_VCE_HARVEST_VCE1;
+ 
+-	/* Tonga and CZ are dual or single pipe */
+ 	if (adev->flags & AMD_IS_APU)
+ 		tmp = (RREG32_SMC(ixVCE_HARVEST_FUSE_MACRO__ADDRESS) &
+ 		       VCE_HARVEST_FUSE_MACRO__MASK) >>
+@@ -391,6 +386,11 @@ static unsigned vce_v3_0_get_harvest_config(struct amdgpu_device *adev)
+ 	case 3:
+ 		return AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1;
+ 	default:
++		if ((adev->asic_type == CHIP_POLARIS10) ||
++		    (adev->asic_type == CHIP_POLARIS11) ||
++		    (adev->asic_type == CHIP_POLARIS12))
++			return AMDGPU_VCE_HARVEST_VCE1;
++
+ 		return 0;
+ 	}
+ }
+diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
+index 61c313e21a91..169843de91cb 100644
+--- a/drivers/gpu/drm/i915/intel_dp.c
++++ b/drivers/gpu/drm/i915/intel_dp.c
+@@ -3687,9 +3687,16 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
+ 
+ 	}
+ 
+-	/* Read the eDP Display control capabilities registers */
+-	if ((intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
+-	    drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV,
++	/*
++	 * Read the eDP display control registers.
++	 *
++	 * Do this independent of DP_DPCD_DISPLAY_CONTROL_CAPABLE bit in
++	 * DP_EDP_CONFIGURATION_CAP, because some buggy displays do not have it
++	 * set, but require eDP 1.4+ detection (e.g. for supported link rates
++	 * method). The display control registers should read zero if they're
++	 * not supported anyway.
++	 */
++	if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV,
+ 			     intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) ==
+ 			     sizeof(intel_dp->edp_dpcd))
+ 		DRM_DEBUG_KMS("EDP DPCD : %*ph\n", (int) sizeof(intel_dp->edp_dpcd),
+diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
+index d93efb49a2e2..954e9454625e 100644
+--- a/drivers/gpu/drm/i915/intel_drv.h
++++ b/drivers/gpu/drm/i915/intel_drv.h
+@@ -495,7 +495,6 @@ struct intel_crtc_scaler_state {
+ 
+ struct intel_pipe_wm {
+ 	struct intel_wm_level wm[5];
+-	struct intel_wm_level raw_wm[5];
+ 	uint32_t linetime;
+ 	bool fbc_wm_enabled;
+ 	bool pipe_enabled;
+diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
+index 40b224b44d1b..1427cec843b9 100644
+--- a/drivers/gpu/drm/i915/intel_pm.c
++++ b/drivers/gpu/drm/i915/intel_pm.c
+@@ -2696,9 +2696,9 @@ static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
+ 				 const struct intel_crtc *intel_crtc,
+ 				 int level,
+ 				 struct intel_crtc_state *cstate,
+-				 struct intel_plane_state *pristate,
+-				 struct intel_plane_state *sprstate,
+-				 struct intel_plane_state *curstate,
++				 const struct intel_plane_state *pristate,
++				 const struct intel_plane_state *sprstate,
++				 const struct intel_plane_state *curstate,
+ 				 struct intel_wm_level *result)
+ {
+ 	uint16_t pri_latency = dev_priv->wm.pri_latency[level];
+@@ -3016,28 +3016,24 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
+ 	struct intel_pipe_wm *pipe_wm;
+ 	struct drm_device *dev = state->dev;
+ 	const struct drm_i915_private *dev_priv = to_i915(dev);
+-	struct intel_plane *intel_plane;
+-	struct intel_plane_state *pristate = NULL;
+-	struct intel_plane_state *sprstate = NULL;
+-	struct intel_plane_state *curstate = NULL;
++	struct drm_plane *plane;
++	const struct drm_plane_state *plane_state;
++	const struct intel_plane_state *pristate = NULL;
++	const struct intel_plane_state *sprstate = NULL;
++	const struct intel_plane_state *curstate = NULL;
+ 	int level, max_level = ilk_wm_max_level(dev_priv), usable_level;
+ 	struct ilk_wm_maximums max;
+ 
+ 	pipe_wm = &cstate->wm.ilk.optimal;
+ 
+-	for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
+-		struct intel_plane_state *ps;
+-
+-		ps = intel_atomic_get_existing_plane_state(state,
+-							   intel_plane);
+-		if (!ps)
+-			continue;
++	drm_atomic_crtc_state_for_each_plane_state(plane, plane_state, &cstate->base) {
++		const struct intel_plane_state *ps = to_intel_plane_state(plane_state);
+ 
+-		if (intel_plane->base.type == DRM_PLANE_TYPE_PRIMARY)
++		if (plane->type == DRM_PLANE_TYPE_PRIMARY)
+ 			pristate = ps;
+-		else if (intel_plane->base.type == DRM_PLANE_TYPE_OVERLAY)
++		else if (plane->type == DRM_PLANE_TYPE_OVERLAY)
+ 			sprstate = ps;
+-		else if (intel_plane->base.type == DRM_PLANE_TYPE_CURSOR)
++		else if (plane->type == DRM_PLANE_TYPE_CURSOR)
+ 			curstate = ps;
+ 	}
+ 
+@@ -3059,11 +3055,9 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
+ 	if (pipe_wm->sprites_scaled)
+ 		usable_level = 0;
+ 
+-	ilk_compute_wm_level(dev_priv, intel_crtc, 0, cstate,
+-			     pristate, sprstate, curstate, &pipe_wm->raw_wm[0]);
+-
+ 	memset(&pipe_wm->wm, 0, sizeof(pipe_wm->wm));
+-	pipe_wm->wm[0] = pipe_wm->raw_wm[0];
++	ilk_compute_wm_level(dev_priv, intel_crtc, 0, cstate,
++			     pristate, sprstate, curstate, &pipe_wm->wm[0]);
+ 
+ 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+ 		pipe_wm->linetime = hsw_compute_linetime_wm(cstate);
+@@ -3073,8 +3067,8 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
+ 
+ 	ilk_compute_wm_reg_maximums(dev_priv, 1, &max);
+ 
+-	for (level = 1; level <= max_level; level++) {
+-		struct intel_wm_level *wm = &pipe_wm->raw_wm[level];
++	for (level = 1; level <= usable_level; level++) {
++		struct intel_wm_level *wm = &pipe_wm->wm[level];
+ 
+ 		ilk_compute_wm_level(dev_priv, intel_crtc, level, cstate,
+ 				     pristate, sprstate, curstate, wm);
+@@ -3084,13 +3078,10 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
+ 		 * register maximums since such watermarks are
+ 		 * always invalid.
+ 		 */
+-		if (level > usable_level)
+-			continue;
+-
+-		if (ilk_validate_wm_level(level, &max, wm))
+-			pipe_wm->wm[level] = *wm;
+-		else
+-			usable_level = level;
++		if (!ilk_validate_wm_level(level, &max, wm)) {
++			memset(wm, 0, sizeof(*wm));
++			break;
++		}
+ 	}
+ 
+ 	return 0;
+diff --git a/drivers/irqchip/irq-mvebu-gicp.c b/drivers/irqchip/irq-mvebu-gicp.c
+index b283fc90be1e..17a4a7b6cdbb 100644
+--- a/drivers/irqchip/irq-mvebu-gicp.c
++++ b/drivers/irqchip/irq-mvebu-gicp.c
+@@ -194,6 +194,7 @@ static int mvebu_gicp_probe(struct platform_device *pdev)
+ 		return -ENOMEM;
+ 
+ 	gicp->dev = &pdev->dev;
++	spin_lock_init(&gicp->spi_lock);
+ 
+ 	gicp->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ 	if (!gicp->res)
+diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
+index e702d48bd023..81ba6e0d88d8 100644
+--- a/fs/cifs/dir.c
++++ b/fs/cifs/dir.c
+@@ -204,7 +204,8 @@ check_name(struct dentry *direntry, struct cifs_tcon *tcon)
+ 	struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb);
+ 	int i;
+ 
+-	if (unlikely(direntry->d_name.len >
++	if (unlikely(tcon->fsAttrInfo.MaxPathNameComponentLength &&
++		     direntry->d_name.len >
+ 		     le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength)))
+ 		return -ENAMETOOLONG;
+ 
+@@ -520,7 +521,7 @@ cifs_atomic_open(struct inode *inode, struct dentry *direntry,
+ 
+ 	rc = check_name(direntry, tcon);
+ 	if (rc)
+-		goto out_free_xid;
++		goto out;
+ 
+ 	server = tcon->ses->server;
+ 
+diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
+index 28d2753be094..a9e3b26e1b72 100644
+--- a/fs/hugetlbfs/inode.c
++++ b/fs/hugetlbfs/inode.c
+@@ -855,9 +855,12 @@ static int hugetlbfs_error_remove_page(struct address_space *mapping,
+ 				struct page *page)
+ {
+ 	struct inode *inode = mapping->host;
++	pgoff_t index = page->index;
+ 
+ 	remove_huge_page(page);
+-	hugetlb_fix_reserve_counts(inode);
++	if (unlikely(hugetlb_unreserve_pages(inode, index, index + 1, 1)))
++		hugetlb_fix_reserve_counts(inode);
++
+ 	return 0;
+ }
+ 
+diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
+index fb15a96df0b6..386aecce881d 100644
+--- a/fs/ocfs2/alloc.c
++++ b/fs/ocfs2/alloc.c
+@@ -7310,13 +7310,24 @@ int ocfs2_truncate_inline(struct inode *inode, struct buffer_head *di_bh,
+ 
+ static int ocfs2_trim_extent(struct super_block *sb,
+ 			     struct ocfs2_group_desc *gd,
+-			     u32 start, u32 count)
++			     u64 group, u32 start, u32 count)
+ {
+ 	u64 discard, bcount;
++	struct ocfs2_super *osb = OCFS2_SB(sb);
+ 
+ 	bcount = ocfs2_clusters_to_blocks(sb, count);
+-	discard = le64_to_cpu(gd->bg_blkno) +
+-			ocfs2_clusters_to_blocks(sb, start);
++	discard = ocfs2_clusters_to_blocks(sb, start);
++
++	/*
++	 * For the first cluster group, the gd->bg_blkno is not at the start
++	 * of the group, but at an offset from the start. If we add it while
++	 * calculating discard for first group, we will wrongly start fstrim a
++	 * few blocks after the desried start block and the range can cross
++	 * over into the next cluster group. So, add it only if this is not
++	 * the first cluster group.
++	 */
++	if (group != osb->first_cluster_group_blkno)
++		discard += le64_to_cpu(gd->bg_blkno);
+ 
+ 	trace_ocfs2_trim_extent(sb, (unsigned long long)discard, bcount);
+ 
+@@ -7324,7 +7335,7 @@ static int ocfs2_trim_extent(struct super_block *sb,
+ }
+ 
+ static int ocfs2_trim_group(struct super_block *sb,
+-			    struct ocfs2_group_desc *gd,
++			    struct ocfs2_group_desc *gd, u64 group,
+ 			    u32 start, u32 max, u32 minbits)
+ {
+ 	int ret = 0, count = 0, next;
+@@ -7343,7 +7354,7 @@ static int ocfs2_trim_group(struct super_block *sb,
+ 		next = ocfs2_find_next_bit(bitmap, max, start);
+ 
+ 		if ((next - start) >= minbits) {
+-			ret = ocfs2_trim_extent(sb, gd,
++			ret = ocfs2_trim_extent(sb, gd, group,
+ 						start, next - start);
+ 			if (ret < 0) {
+ 				mlog_errno(ret);
+@@ -7441,7 +7452,8 @@ int ocfs2_trim_fs(struct super_block *sb, struct fstrim_range *range)
+ 		}
+ 
+ 		gd = (struct ocfs2_group_desc *)gd_bh->b_data;
+-		cnt = ocfs2_trim_group(sb, gd, first_bit, last_bit, minlen);
++		cnt = ocfs2_trim_group(sb, gd, group,
++				       first_bit, last_bit, minlen);
+ 		brelse(gd_bh);
+ 		gd_bh = NULL;
+ 		if (cnt < 0) {
+diff --git a/include/linux/swap.h b/include/linux/swap.h
+index d83d28e53e62..a615eda102ae 100644
+--- a/include/linux/swap.h
++++ b/include/linux/swap.h
+@@ -246,6 +246,10 @@ struct swap_info_struct {
+ 					 * both locks need hold, hold swap_lock
+ 					 * first.
+ 					 */
++	spinlock_t cont_lock;		/*
++					 * protect swap count continuation page
++					 * list.
++					 */
+ 	struct work_struct discard_work; /* discard worker */
+ 	struct swap_cluster_list discard_clusters; /* discard clusters list */
+ };
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index 7242a6e1ec76..95bbe99e4e6c 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -901,9 +901,11 @@ list_update_cgroup_event(struct perf_event *event,
+ 	cpuctx_entry = &cpuctx->cgrp_cpuctx_entry;
+ 	/* cpuctx->cgrp is NULL unless a cgroup event is active in this CPU .*/
+ 	if (add) {
++		struct perf_cgroup *cgrp = perf_cgroup_from_task(current, ctx);
++
+ 		list_add(cpuctx_entry, this_cpu_ptr(&cgrp_cpuctx_list));
+-		if (perf_cgroup_from_task(current, ctx) == event->cgrp)
+-			cpuctx->cgrp = event->cgrp;
++		if (cgroup_is_descendant(cgrp->css.cgroup, event->cgrp->css.cgroup))
++			cpuctx->cgrp = cgrp;
+ 	} else {
+ 		list_del(cpuctx_entry);
+ 		cpuctx->cgrp = NULL;
+diff --git a/kernel/futex.c b/kernel/futex.c
+index bf57ab12ffe8..a6639b346373 100644
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -901,11 +901,27 @@ void exit_pi_state_list(struct task_struct *curr)
+ 	 */
+ 	raw_spin_lock_irq(&curr->pi_lock);
+ 	while (!list_empty(head)) {
+-
+ 		next = head->next;
+ 		pi_state = list_entry(next, struct futex_pi_state, list);
+ 		key = pi_state->key;
+ 		hb = hash_futex(&key);
++
++		/*
++		 * We can race against put_pi_state() removing itself from the
++		 * list (a waiter going away). put_pi_state() will first
++		 * decrement the reference count and then modify the list, so
++		 * its possible to see the list entry but fail this reference
++		 * acquire.
++		 *
++		 * In that case; drop the locks to let put_pi_state() make
++		 * progress and retry the loop.
++		 */
++		if (!atomic_inc_not_zero(&pi_state->refcount)) {
++			raw_spin_unlock_irq(&curr->pi_lock);
++			cpu_relax();
++			raw_spin_lock_irq(&curr->pi_lock);
++			continue;
++		}
+ 		raw_spin_unlock_irq(&curr->pi_lock);
+ 
+ 		spin_lock(&hb->lock);
+@@ -916,8 +932,10 @@ void exit_pi_state_list(struct task_struct *curr)
+ 		 * task still owns the PI-state:
+ 		 */
+ 		if (head->next != next) {
++			/* retain curr->pi_lock for the loop invariant */
+ 			raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
+ 			spin_unlock(&hb->lock);
++			put_pi_state(pi_state);
+ 			continue;
+ 		}
+ 
+@@ -925,9 +943,8 @@ void exit_pi_state_list(struct task_struct *curr)
+ 		WARN_ON(list_empty(&pi_state->list));
+ 		list_del_init(&pi_state->list);
+ 		pi_state->owner = NULL;
+-		raw_spin_unlock(&curr->pi_lock);
+ 
+-		get_pi_state(pi_state);
++		raw_spin_unlock(&curr->pi_lock);
+ 		raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
+ 		spin_unlock(&hb->lock);
+ 
+diff --git a/lib/asn1_decoder.c b/lib/asn1_decoder.c
+index 0bd8a611eb83..fef5d2e114be 100644
+--- a/lib/asn1_decoder.c
++++ b/lib/asn1_decoder.c
+@@ -284,6 +284,9 @@ int asn1_ber_decoder(const struct asn1_decoder *decoder,
+ 				if (unlikely(len > datalen - dp))
+ 					goto data_overrun_error;
+ 			}
++		} else {
++			if (unlikely(len > datalen - dp))
++				goto data_overrun_error;
+ 		}
+ 
+ 		if (flags & FLAG_CONS) {
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index 31e207cb399b..011725849f52 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -3977,6 +3977,9 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
+ 			    unsigned long src_addr,
+ 			    struct page **pagep)
+ {
++	struct address_space *mapping;
++	pgoff_t idx;
++	unsigned long size;
+ 	int vm_shared = dst_vma->vm_flags & VM_SHARED;
+ 	struct hstate *h = hstate_vma(dst_vma);
+ 	pte_t _dst_pte;
+@@ -4014,13 +4017,24 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
+ 	__SetPageUptodate(page);
+ 	set_page_huge_active(page);
+ 
++	mapping = dst_vma->vm_file->f_mapping;
++	idx = vma_hugecache_offset(h, dst_vma, dst_addr);
++
+ 	/*
+ 	 * If shared, add to page cache
+ 	 */
+ 	if (vm_shared) {
+-		struct address_space *mapping = dst_vma->vm_file->f_mapping;
+-		pgoff_t idx = vma_hugecache_offset(h, dst_vma, dst_addr);
++		size = i_size_read(mapping->host) >> huge_page_shift(h);
++		ret = -EFAULT;
++		if (idx >= size)
++			goto out_release_nounlock;
+ 
++		/*
++		 * Serialization between remove_inode_hugepages() and
++		 * huge_add_to_page_cache() below happens through the
++		 * hugetlb_fault_mutex_table that here must be hold by
++		 * the caller.
++		 */
+ 		ret = huge_add_to_page_cache(page, mapping, idx);
+ 		if (ret)
+ 			goto out_release_nounlock;
+@@ -4029,6 +4043,20 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
+ 	ptl = huge_pte_lockptr(h, dst_mm, dst_pte);
+ 	spin_lock(ptl);
+ 
++	/*
++	 * Recheck the i_size after holding PT lock to make sure not
++	 * to leave any page mapped (as page_mapped()) beyond the end
++	 * of the i_size (remove_inode_hugepages() is strict about
++	 * enforcing that). If we bail out here, we'll also leave a
++	 * page in the radix tree in the vm_shared case beyond the end
++	 * of the i_size, but remove_inode_hugepages() will take care
++	 * of it as soon as we drop the hugetlb_fault_mutex_table.
++	 */
++	size = i_size_read(mapping->host) >> huge_page_shift(h);
++	ret = -EFAULT;
++	if (idx >= size)
++		goto out_release_unlock;
++
+ 	ret = -EEXIST;
+ 	if (!huge_pte_none(huge_ptep_get(dst_pte)))
+ 		goto out_release_unlock;
+diff --git a/mm/swapfile.c b/mm/swapfile.c
+index a8952b6563c6..3191465b0ccf 100644
+--- a/mm/swapfile.c
++++ b/mm/swapfile.c
+@@ -2635,6 +2635,7 @@ static struct swap_info_struct *alloc_swap_info(void)
+ 	p->flags = SWP_USED;
+ 	spin_unlock(&swap_lock);
+ 	spin_lock_init(&p->lock);
++	spin_lock_init(&p->cont_lock);
+ 
+ 	return p;
+ }
+@@ -3307,6 +3308,7 @@ int add_swap_count_continuation(swp_entry_t entry, gfp_t gfp_mask)
+ 	head = vmalloc_to_page(si->swap_map + offset);
+ 	offset &= ~PAGE_MASK;
+ 
++	spin_lock(&si->cont_lock);
+ 	/*
+ 	 * Page allocation does not initialize the page's lru field,
+ 	 * but it does always reset its private field.
+@@ -3326,7 +3328,7 @@ int add_swap_count_continuation(swp_entry_t entry, gfp_t gfp_mask)
+ 		 * a continuation page, free our allocation and use this one.
+ 		 */
+ 		if (!(count & COUNT_CONTINUED))
+-			goto out;
++			goto out_unlock_cont;
+ 
+ 		map = kmap_atomic(list_page) + offset;
+ 		count = *map;
+@@ -3337,11 +3339,13 @@ int add_swap_count_continuation(swp_entry_t entry, gfp_t gfp_mask)
+ 		 * free our allocation and use this one.
+ 		 */
+ 		if ((count & ~COUNT_CONTINUED) != SWAP_CONT_MAX)
+-			goto out;
++			goto out_unlock_cont;
+ 	}
+ 
+ 	list_add_tail(&page->lru, &head->lru);
+ 	page = NULL;			/* now it's attached, don't free it */
++out_unlock_cont:
++	spin_unlock(&si->cont_lock);
+ out:
+ 	unlock_cluster(ci);
+ 	spin_unlock(&si->lock);
+@@ -3366,6 +3370,7 @@ static bool swap_count_continued(struct swap_info_struct *si,
+ 	struct page *head;
+ 	struct page *page;
+ 	unsigned char *map;
++	bool ret;
+ 
+ 	head = vmalloc_to_page(si->swap_map + offset);
+ 	if (page_private(head) != SWP_CONTINUED) {
+@@ -3373,6 +3378,7 @@ static bool swap_count_continued(struct swap_info_struct *si,
+ 		return false;		/* need to add count continuation */
+ 	}
+ 
++	spin_lock(&si->cont_lock);
+ 	offset &= ~PAGE_MASK;
+ 	page = list_entry(head->lru.next, struct page, lru);
+ 	map = kmap_atomic(page) + offset;
+@@ -3393,8 +3399,10 @@ static bool swap_count_continued(struct swap_info_struct *si,
+ 		if (*map == SWAP_CONT_MAX) {
+ 			kunmap_atomic(map);
+ 			page = list_entry(page->lru.next, struct page, lru);
+-			if (page == head)
+-				return false;	/* add count continuation */
++			if (page == head) {
++				ret = false;	/* add count continuation */
++				goto out;
++			}
+ 			map = kmap_atomic(page) + offset;
+ init_map:		*map = 0;		/* we didn't zero the page */
+ 		}
+@@ -3407,7 +3415,7 @@ init_map:		*map = 0;		/* we didn't zero the page */
+ 			kunmap_atomic(map);
+ 			page = list_entry(page->lru.prev, struct page, lru);
+ 		}
+-		return true;			/* incremented */
++		ret = true;			/* incremented */
+ 
+ 	} else {				/* decrementing */
+ 		/*
+@@ -3433,8 +3441,11 @@ init_map:		*map = 0;		/* we didn't zero the page */
+ 			kunmap_atomic(map);
+ 			page = list_entry(page->lru.prev, struct page, lru);
+ 		}
+-		return count == COUNT_CONTINUED;
++		ret = count == COUNT_CONTINUED;
+ 	}
++out:
++	spin_unlock(&si->cont_lock);
++	return ret;
+ }
+ 
+ /*
+diff --git a/security/keys/keyring.c b/security/keys/keyring.c
+index 06173b091a74..c04032302a25 100644
+--- a/security/keys/keyring.c
++++ b/security/keys/keyring.c
+@@ -459,34 +459,33 @@ static long keyring_read(const struct key *keyring,
+ 			 char __user *buffer, size_t buflen)
+ {
+ 	struct keyring_read_iterator_context ctx;
+-	unsigned long nr_keys;
+-	int ret;
++	long ret;
+ 
+ 	kenter("{%d},,%zu", key_serial(keyring), buflen);
+ 
+ 	if (buflen & (sizeof(key_serial_t) - 1))
+ 		return -EINVAL;
+ 
+-	nr_keys = keyring->keys.nr_leaves_on_tree;
+-	if (nr_keys == 0)
+-		return 0;
+-
+-	/* Calculate how much data we could return */
+-	if (!buffer || !buflen)
+-		return nr_keys * sizeof(key_serial_t);
+-
+-	/* Copy the IDs of the subscribed keys into the buffer */
+-	ctx.buffer = (key_serial_t __user *)buffer;
+-	ctx.buflen = buflen;
+-	ctx.count = 0;
+-	ret = assoc_array_iterate(&keyring->keys, keyring_read_iterator, &ctx);
+-	if (ret < 0) {
+-		kleave(" = %d [iterate]", ret);
+-		return ret;
++	/* Copy as many key IDs as fit into the buffer */
++	if (buffer && buflen) {
++		ctx.buffer = (key_serial_t __user *)buffer;
++		ctx.buflen = buflen;
++		ctx.count = 0;
++		ret = assoc_array_iterate(&keyring->keys,
++					  keyring_read_iterator, &ctx);
++		if (ret < 0) {
++			kleave(" = %ld [iterate]", ret);
++			return ret;
++		}
+ 	}
+ 
+-	kleave(" = %zu [ok]", ctx.count);
+-	return ctx.count;
++	/* Return the size of the buffer needed */
++	ret = keyring->keys.nr_leaves_on_tree * sizeof(key_serial_t);
++	if (ret <= buflen)
++		kleave("= %ld [ok]", ret);
++	else
++		kleave("= %ld [buffer too small]", ret);
++	return ret;
+ }
+ 
+ /*
+diff --git a/security/keys/trusted.c b/security/keys/trusted.c
+index bd85315cbfeb..98aa89ff7bfd 100644
+--- a/security/keys/trusted.c
++++ b/security/keys/trusted.c
+@@ -1147,20 +1147,21 @@ static long trusted_read(const struct key *key, char __user *buffer,
+ 	p = dereference_key_locked(key);
+ 	if (!p)
+ 		return -EINVAL;
+-	if (!buffer || buflen <= 0)
+-		return 2 * p->blob_len;
+-	ascii_buf = kmalloc(2 * p->blob_len, GFP_KERNEL);
+-	if (!ascii_buf)
+-		return -ENOMEM;
+ 
+-	bufp = ascii_buf;
+-	for (i = 0; i < p->blob_len; i++)
+-		bufp = hex_byte_pack(bufp, p->blob[i]);
+-	if ((copy_to_user(buffer, ascii_buf, 2 * p->blob_len)) != 0) {
++	if (buffer && buflen >= 2 * p->blob_len) {
++		ascii_buf = kmalloc(2 * p->blob_len, GFP_KERNEL);
++		if (!ascii_buf)
++			return -ENOMEM;
++
++		bufp = ascii_buf;
++		for (i = 0; i < p->blob_len; i++)
++			bufp = hex_byte_pack(bufp, p->blob[i]);
++		if (copy_to_user(buffer, ascii_buf, 2 * p->blob_len) != 0) {
++			kzfree(ascii_buf);
++			return -EFAULT;
++		}
+ 		kzfree(ascii_buf);
+-		return -EFAULT;
+ 	}
+-	kzfree(ascii_buf);
+ 	return 2 * p->blob_len;
+ }
+ 
+diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
+index 6c9cba2166d9..d10c780dfd54 100644
+--- a/sound/core/seq/seq_clientmgr.c
++++ b/sound/core/seq/seq_clientmgr.c
+@@ -663,7 +663,7 @@ static int deliver_to_subscribers(struct snd_seq_client *client,
+ 	if (atomic)
+ 		read_lock(&grp->list_lock);
+ 	else
+-		down_read(&grp->list_mutex);
++		down_read_nested(&grp->list_mutex, hop);
+ 	list_for_each_entry(subs, &grp->list_head, src_list) {
+ 		/* both ports ready? */
+ 		if (atomic_read(&subs->ref_count) != 2)
+diff --git a/sound/core/timer_compat.c b/sound/core/timer_compat.c
+index 6a437eb66115..59127b6ef39e 100644
+--- a/sound/core/timer_compat.c
++++ b/sound/core/timer_compat.c
+@@ -133,7 +133,8 @@ enum {
+ #endif /* CONFIG_X86_X32 */
+ };
+ 
+-static long snd_timer_user_ioctl_compat(struct file *file, unsigned int cmd, unsigned long arg)
++static long __snd_timer_user_ioctl_compat(struct file *file, unsigned int cmd,
++					  unsigned long arg)
+ {
+ 	void __user *argp = compat_ptr(arg);
+ 
+@@ -153,7 +154,7 @@ static long snd_timer_user_ioctl_compat(struct file *file, unsigned int cmd, uns
+ 	case SNDRV_TIMER_IOCTL_PAUSE:
+ 	case SNDRV_TIMER_IOCTL_PAUSE_OLD:
+ 	case SNDRV_TIMER_IOCTL_NEXT_DEVICE:
+-		return snd_timer_user_ioctl(file, cmd, (unsigned long)argp);
++		return __snd_timer_user_ioctl(file, cmd, (unsigned long)argp);
+ 	case SNDRV_TIMER_IOCTL_GPARAMS32:
+ 		return snd_timer_user_gparams_compat(file, argp);
+ 	case SNDRV_TIMER_IOCTL_INFO32:
+@@ -167,3 +168,15 @@ static long snd_timer_user_ioctl_compat(struct file *file, unsigned int cmd, uns
+ 	}
+ 	return -ENOIOCTLCMD;
+ }
++
++static long snd_timer_user_ioctl_compat(struct file *file, unsigned int cmd,
++					unsigned long arg)
++{
++	struct snd_timer_user *tu = file->private_data;
++	long ret;
++
++	mutex_lock(&tu->ioctl_lock);
++	ret = __snd_timer_user_ioctl_compat(file, cmd, arg);
++	mutex_unlock(&tu->ioctl_lock);
++	return ret;
++}
+diff --git a/sound/soc/codecs/adau17x1.c b/sound/soc/codecs/adau17x1.c
+index 2c1bd2763864..6758f789b712 100644
+--- a/sound/soc/codecs/adau17x1.c
++++ b/sound/soc/codecs/adau17x1.c
+@@ -90,6 +90,27 @@ static int adau17x1_pll_event(struct snd_soc_dapm_widget *w,
+ 	return 0;
+ }
+ 
++static int adau17x1_adc_fixup(struct snd_soc_dapm_widget *w,
++	struct snd_kcontrol *kcontrol, int event)
++{
++	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
++	struct adau *adau = snd_soc_codec_get_drvdata(codec);
++
++	/*
++	 * If we are capturing, toggle the ADOSR bit in Converter Control 0 to
++	 * avoid losing SNR (workaround from ADI). This must be done after
++	 * the ADC(s) have been enabled. According to the data sheet, it is
++	 * normally illegal to set this bit when the sampling rate is 96 kHz,
++	 * but according to ADI it is acceptable for this workaround.
++	 */
++	regmap_update_bits(adau->regmap, ADAU17X1_CONVERTER0,
++		ADAU17X1_CONVERTER0_ADOSR, ADAU17X1_CONVERTER0_ADOSR);
++	regmap_update_bits(adau->regmap, ADAU17X1_CONVERTER0,
++		ADAU17X1_CONVERTER0_ADOSR, 0);
++
++	return 0;
++}
++
+ static const char * const adau17x1_mono_stereo_text[] = {
+ 	"Stereo",
+ 	"Mono Left Channel (L+R)",
+@@ -121,7 +142,8 @@ static const struct snd_soc_dapm_widget adau17x1_dapm_widgets[] = {
+ 	SND_SOC_DAPM_MUX("Right DAC Mode Mux", SND_SOC_NOPM, 0, 0,
+ 		&adau17x1_dac_mode_mux),
+ 
+-	SND_SOC_DAPM_ADC("Left Decimator", NULL, ADAU17X1_ADC_CONTROL, 0, 0),
++	SND_SOC_DAPM_ADC_E("Left Decimator", NULL, ADAU17X1_ADC_CONTROL, 0, 0,
++			   adau17x1_adc_fixup, SND_SOC_DAPM_POST_PMU),
+ 	SND_SOC_DAPM_ADC("Right Decimator", NULL, ADAU17X1_ADC_CONTROL, 1, 0),
+ 	SND_SOC_DAPM_DAC("Left DAC", NULL, ADAU17X1_DAC_CONTROL0, 0, 0),
+ 	SND_SOC_DAPM_DAC("Right DAC", NULL, ADAU17X1_DAC_CONTROL0, 1, 0),
+diff --git a/sound/soc/codecs/adau17x1.h b/sound/soc/codecs/adau17x1.h
+index bf04b7efee40..db350035fad7 100644
+--- a/sound/soc/codecs/adau17x1.h
++++ b/sound/soc/codecs/adau17x1.h
+@@ -129,5 +129,7 @@ bool adau17x1_has_dsp(struct adau *adau);
+ 
+ #define ADAU17X1_CONVERTER0_CONVSR_MASK		0x7
+ 
++#define ADAU17X1_CONVERTER0_ADOSR		BIT(3)
++
+ 
+ #endif
+diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c
+index aa6b68db80b4..b606f1643fe5 100644
+--- a/virt/kvm/arm/vgic/vgic-its.c
++++ b/virt/kvm/arm/vgic/vgic-its.c
+@@ -1803,37 +1803,33 @@ typedef int (*entry_fn_t)(struct vgic_its *its, u32 id, void *entry,
+ static int scan_its_table(struct vgic_its *its, gpa_t base, int size, int esz,
+ 			  int start_id, entry_fn_t fn, void *opaque)
+ {
+-	void *entry = kzalloc(esz, GFP_KERNEL);
+ 	struct kvm *kvm = its->dev->kvm;
+ 	unsigned long len = size;
+ 	int id = start_id;
+ 	gpa_t gpa = base;
++	char entry[esz];
+ 	int ret;
+ 
++	memset(entry, 0, esz);
++
+ 	while (len > 0) {
+ 		int next_offset;
+ 		size_t byte_offset;
+ 
+ 		ret = kvm_read_guest(kvm, gpa, entry, esz);
+ 		if (ret)
+-			goto out;
++			return ret;
+ 
+ 		next_offset = fn(its, id, entry, opaque);
+-		if (next_offset <= 0) {
+-			ret = next_offset;
+-			goto out;
+-		}
++		if (next_offset <= 0)
++			return next_offset;
+ 
+ 		byte_offset = next_offset * esz;
+ 		id += next_offset;
+ 		gpa += byte_offset;
+ 		len -= byte_offset;
+ 	}
+-	ret =  1;
+-
+-out:
+-	kfree(entry);
+-	return ret;
++	return 1;
+ }
+ 
+ /**


^ permalink raw reply related	[flat|nested] 20+ messages in thread
* [gentoo-commits] proj/linux-patches:4.13 commit in: /
@ 2017-11-02 10:04 Mike Pagano
  0 siblings, 0 replies; 20+ messages in thread
From: Mike Pagano @ 2017-11-02 10:04 UTC (permalink / raw
  To: gentoo-commits

commit:     50174d7ad05946e5ba0c2ac8252400afb4c9f9bd
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu Nov  2 10:04:40 2017 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu Nov  2 10:04:40 2017 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=50174d7a

Linux patch 4.13.11

 0000_README              |    4 +
 1010_linux-4.13.11.patch | 1567 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 1571 insertions(+)

diff --git a/0000_README b/0000_README
index 053794d..bca516e 100644
--- a/0000_README
+++ b/0000_README
@@ -83,6 +83,10 @@ Patch:  1009_linux-4.13.10.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.13.10
 
+Patch:  1010_linux-4.13.11.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.13.11
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1010_linux-4.13.11.patch b/1010_linux-4.13.11.patch
new file mode 100644
index 0000000..21d2944
--- /dev/null
+++ b/1010_linux-4.13.11.patch
@@ -0,0 +1,1567 @@
+diff --git a/Makefile b/Makefile
+index 0e30a0d282e8..8280953c8a45 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 13
+-SUBLEVEL = 10
++SUBLEVEL = 11
+ EXTRAVERSION =
+ NAME = Fearless Coyote
+ 
+diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c
+index 53766e2bc029..58f6fbc7df39 100644
+--- a/arch/powerpc/kvm/book3s_64_vio.c
++++ b/arch/powerpc/kvm/book3s_64_vio.c
+@@ -479,28 +479,30 @@ long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
+ 		return ret;
+ 
+ 	dir = iommu_tce_direction(tce);
++
++	idx = srcu_read_lock(&vcpu->kvm->srcu);
++
+ 	if ((dir != DMA_NONE) && kvmppc_gpa_to_ua(vcpu->kvm,
+-			tce & ~(TCE_PCI_READ | TCE_PCI_WRITE), &ua, NULL))
+-		return H_PARAMETER;
++			tce & ~(TCE_PCI_READ | TCE_PCI_WRITE), &ua, NULL)) {
++		ret = H_PARAMETER;
++		goto unlock_exit;
++	}
+ 
+ 	entry = ioba >> stt->page_shift;
+ 
+ 	list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
+-		if (dir == DMA_NONE) {
++		if (dir == DMA_NONE)
+ 			ret = kvmppc_tce_iommu_unmap(vcpu->kvm,
+ 					stit->tbl, entry);
+-		} else {
+-			idx = srcu_read_lock(&vcpu->kvm->srcu);
++		else
+ 			ret = kvmppc_tce_iommu_map(vcpu->kvm, stit->tbl,
+ 					entry, ua, dir);
+-			srcu_read_unlock(&vcpu->kvm->srcu, idx);
+-		}
+ 
+ 		if (ret == H_SUCCESS)
+ 			continue;
+ 
+ 		if (ret == H_TOO_HARD)
+-			return ret;
++			goto unlock_exit;
+ 
+ 		WARN_ON_ONCE(1);
+ 		kvmppc_clear_tce(stit->tbl, entry);
+@@ -508,7 +510,10 @@ long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
+ 
+ 	kvmppc_tce_put(stt, entry, tce);
+ 
+-	return H_SUCCESS;
++unlock_exit:
++	srcu_read_unlock(&vcpu->kvm->srcu, idx);
++
++	return ret;
+ }
+ EXPORT_SYMBOL_GPL(kvmppc_h_put_tce);
+ 
+diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+index dc58c2a560f9..e92cb5fd28f2 100644
+--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
++++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+@@ -1296,6 +1296,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
+ 	bne	3f
+ BEGIN_FTR_SECTION
+ 	PPC_MSGSYNC
++	lwsync
+ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
+ 	lbz	r0, HSTATE_HOST_IPI(r13)
+ 	cmpwi	r0, 0
+@@ -2767,6 +2768,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
+ 	PPC_MSGCLR(6)
+ 	/* see if it's a host IPI */
+ 	li	r3, 1
++BEGIN_FTR_SECTION
++	PPC_MSGSYNC
++	lwsync
++END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
+ 	lbz	r0, HSTATE_HOST_IPI(r13)
+ 	cmpwi	r0, 0
+ 	bnelr
+diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
+index 1a75c0b5f4ca..86468190d4b2 100644
+--- a/arch/powerpc/kvm/powerpc.c
++++ b/arch/powerpc/kvm/powerpc.c
+@@ -639,8 +639,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
+ 		break;
+ #endif
+ 	case KVM_CAP_PPC_HTM:
+-		r = cpu_has_feature(CPU_FTR_TM_COMP) &&
+-		    is_kvmppc_hv_enabled(kvm);
++		r = cpu_has_feature(CPU_FTR_TM_COMP) && hv_enabled;
+ 		break;
+ 	default:
+ 		r = 0;
+diff --git a/arch/powerpc/sysdev/xive/common.c b/arch/powerpc/sysdev/xive/common.c
+index 6595462b1fc8..6e0c9dee724f 100644
+--- a/arch/powerpc/sysdev/xive/common.c
++++ b/arch/powerpc/sysdev/xive/common.c
+@@ -447,7 +447,7 @@ static int xive_find_target_in_mask(const struct cpumask *mask,
+ 	int cpu, first, num, i;
+ 
+ 	/* Pick up a starting point CPU in the mask based on  fuzz */
+-	num = cpumask_weight(mask);
++	num = min_t(int, cpumask_weight(mask), nr_cpu_ids);
+ 	first = fuzz % num;
+ 
+ 	/* Locate it */
+diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
+index 21900e1cee9c..d185aa3965bf 100644
+--- a/arch/s390/kernel/entry.S
++++ b/arch/s390/kernel/entry.S
+@@ -521,12 +521,15 @@ ENTRY(pgm_check_handler)
+ 	tmhh	%r8,0x0001		# test problem state bit
+ 	jnz	2f			# -> fault in user space
+ #if IS_ENABLED(CONFIG_KVM)
+-	# cleanup critical section for sie64a
++	# cleanup critical section for program checks in sie64a
+ 	lgr	%r14,%r9
+ 	slg	%r14,BASED(.Lsie_critical_start)
+ 	clg	%r14,BASED(.Lsie_critical_length)
+ 	jhe	0f
+-	brasl	%r14,.Lcleanup_sie
++	lg	%r14,__SF_EMPTY(%r15)		# get control block pointer
++	ni	__SIE_PROG0C+3(%r14),0xfe	# no longer in SIE
++	lctlg	%c1,%c1,__LC_USER_ASCE		# load primary asce
++	larl	%r9,sie_exit			# skip forward to sie_exit
+ #endif
+ 0:	tmhh	%r8,0x4000		# PER bit set in old PSW ?
+ 	jnz	1f			# -> enabled, can't be a double fault
+diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c
+index 458da8509b75..6db28f17ff28 100644
+--- a/arch/x86/kernel/amd_nb.c
++++ b/arch/x86/kernel/amd_nb.c
+@@ -27,6 +27,8 @@ static const struct pci_device_id amd_root_ids[] = {
+ 	{}
+ };
+ 
++#define PCI_DEVICE_ID_AMD_CNB17H_F4     0x1704
++
+ const struct pci_device_id amd_nb_misc_ids[] = {
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
+@@ -37,6 +39,7 @@ const struct pci_device_id amd_nb_misc_ids[] = {
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) },
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) },
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) },
++	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) },
+ 	{}
+ };
+ EXPORT_SYMBOL_GPL(amd_nb_misc_ids);
+@@ -48,6 +51,7 @@ static const struct pci_device_id amd_nb_link_ids[] = {
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) },
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F4) },
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F4) },
++	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F4) },
+ 	{}
+ };
+ 
+@@ -402,11 +406,48 @@ void amd_flush_garts(void)
+ }
+ EXPORT_SYMBOL_GPL(amd_flush_garts);
+ 
++static void __fix_erratum_688(void *info)
++{
++#define MSR_AMD64_IC_CFG 0xC0011021
++
++	msr_set_bit(MSR_AMD64_IC_CFG, 3);
++	msr_set_bit(MSR_AMD64_IC_CFG, 14);
++}
++
++/* Apply erratum 688 fix so machines without a BIOS fix work. */
++static __init void fix_erratum_688(void)
++{
++	struct pci_dev *F4;
++	u32 val;
++
++	if (boot_cpu_data.x86 != 0x14)
++		return;
++
++	if (!amd_northbridges.num)
++		return;
++
++	F4 = node_to_amd_nb(0)->link;
++	if (!F4)
++		return;
++
++	if (pci_read_config_dword(F4, 0x164, &val))
++		return;
++
++	if (val & BIT(2))
++		return;
++
++	on_each_cpu(__fix_erratum_688, NULL, 0);
++
++	pr_info("x86/cpu/AMD: CPU erratum 688 worked around\n");
++}
++
+ static __init int init_amd_nbs(void)
+ {
+ 	amd_cache_northbridges();
+ 	amd_cache_gart();
+ 
++	fix_erratum_688();
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
+index da7043893249..ebb60db0e499 100644
+--- a/drivers/block/nbd.c
++++ b/drivers/block/nbd.c
+@@ -386,6 +386,15 @@ static int sock_xmit(struct nbd_device *nbd, int index, int send,
+ 	return result;
+ }
+ 
++/*
++ * Different settings for sk->sk_sndtimeo can result in different return values
++ * if there is a signal pending when we enter sendmsg, because reasons?
++ */
++static inline int was_interrupted(int result)
++{
++	return result == -ERESTARTSYS || result == -EINTR;
++}
++
+ /* always call with the tx_lock held */
+ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
+ {
+@@ -458,7 +467,7 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
+ 	result = sock_xmit(nbd, index, 1, &from,
+ 			(type == NBD_CMD_WRITE) ? MSG_MORE : 0, &sent);
+ 	if (result <= 0) {
+-		if (result == -ERESTARTSYS) {
++		if (was_interrupted(result)) {
+ 			/* If we havne't sent anything we can just return BUSY,
+ 			 * however if we have sent something we need to make
+ 			 * sure we only allow this req to be sent until we are
+@@ -502,7 +511,7 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
+ 			}
+ 			result = sock_xmit(nbd, index, 1, &from, flags, &sent);
+ 			if (result <= 0) {
+-				if (result == -ERESTARTSYS) {
++				if (was_interrupted(result)) {
+ 					/* We've already sent the header, we
+ 					 * have no choice but to set pending and
+ 					 * return BUSY.
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+index 1f01020ce3a9..6cdf43a8bf6a 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+@@ -830,7 +830,7 @@ uint32_t smu7_get_xclk(struct pp_hwmgr *hwmgr)
+ {
+ 	uint32_t reference_clock, tmp;
+ 	struct cgs_display_info info = {0};
+-	struct cgs_mode_info mode_info;
++	struct cgs_mode_info mode_info = {0};
+ 
+ 	info.mode_info = &mode_info;
+ 
+@@ -3951,10 +3951,9 @@ static int smu7_program_display_gap(struct pp_hwmgr *hwmgr)
+ 	uint32_t ref_clock;
+ 	uint32_t refresh_rate = 0;
+ 	struct cgs_display_info info = {0};
+-	struct cgs_mode_info mode_info;
++	struct cgs_mode_info mode_info = {0};
+ 
+ 	info.mode_info = &mode_info;
+-
+ 	cgs_get_active_displays_info(hwmgr->device, &info);
+ 	num_active_displays = info.display_count;
+ 
+@@ -3970,6 +3969,7 @@ static int smu7_program_display_gap(struct pp_hwmgr *hwmgr)
+ 	frame_time_in_us = 1000000 / refresh_rate;
+ 
+ 	pre_vbi_time_in_us = frame_time_in_us - 200 - mode_info.vblank_time_us;
++
+ 	data->frame_time_x2 = frame_time_in_us * 2 / 100;
+ 
+ 	display_gap2 = pre_vbi_time_in_us * (ref_clock / 100);
+diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
+index f33d90226704..1c3f92533778 100644
+--- a/drivers/gpu/drm/i915/i915_perf.c
++++ b/drivers/gpu/drm/i915/i915_perf.c
+@@ -2480,6 +2480,10 @@ static const struct file_operations fops = {
+ 	.poll		= i915_perf_poll,
+ 	.read		= i915_perf_read,
+ 	.unlocked_ioctl	= i915_perf_ioctl,
++	/* Our ioctl have no arguments, so it's safe to use the same function
++	 * to handle 32bits compatibility.
++	 */
++	.compat_ioctl   = i915_perf_ioctl,
+ };
+ 
+ 
+diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
+index cfbc8ba4c96c..a6b762271a40 100644
+--- a/drivers/input/mouse/elan_i2c_core.c
++++ b/drivers/input/mouse/elan_i2c_core.c
+@@ -1253,6 +1253,7 @@ static const struct acpi_device_id elan_acpi_id[] = {
+ 	{ "ELAN0605", 0 },
+ 	{ "ELAN0609", 0 },
+ 	{ "ELAN060B", 0 },
++	{ "ELAN0611", 0 },
+ 	{ "ELAN1000", 0 },
+ 	{ }
+ };
+diff --git a/drivers/input/tablet/gtco.c b/drivers/input/tablet/gtco.c
+index b796e891e2ee..4b8b9d7aa75e 100644
+--- a/drivers/input/tablet/gtco.c
++++ b/drivers/input/tablet/gtco.c
+@@ -230,13 +230,17 @@ static void parse_hid_report_descriptor(struct gtco *device, char * report,
+ 
+ 	/* Walk  this report and pull out the info we need */
+ 	while (i < length) {
+-		prefix = report[i];
+-
+-		/* Skip over prefix */
+-		i++;
++		prefix = report[i++];
+ 
+ 		/* Determine data size and save the data in the proper variable */
+-		size = PREF_SIZE(prefix);
++		size = (1U << PREF_SIZE(prefix)) >> 1;
++		if (i + size > length) {
++			dev_err(ddev,
++				"Not enough data (need %d, have %d)\n",
++				i + size, length);
++			break;
++		}
++
+ 		switch (size) {
+ 		case 1:
+ 			data = report[i];
+@@ -244,8 +248,7 @@ static void parse_hid_report_descriptor(struct gtco *device, char * report,
+ 		case 2:
+ 			data16 = get_unaligned_le16(&report[i]);
+ 			break;
+-		case 3:
+-			size = 4;
++		case 4:
+ 			data32 = get_unaligned_le32(&report[i]);
+ 			break;
+ 		}
+diff --git a/drivers/net/can/sun4i_can.c b/drivers/net/can/sun4i_can.c
+index 68ef0a4cd821..b0c80859f746 100644
+--- a/drivers/net/can/sun4i_can.c
++++ b/drivers/net/can/sun4i_can.c
+@@ -342,7 +342,7 @@ static int sun4i_can_start(struct net_device *dev)
+ 
+ 	/* enter the selected mode */
+ 	mod_reg_val = readl(priv->base + SUN4I_REG_MSEL_ADDR);
+-	if (priv->can.ctrlmode & CAN_CTRLMODE_PRESUME_ACK)
++	if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)
+ 		mod_reg_val |= SUN4I_MSEL_LOOPBACK_MODE;
+ 	else if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
+ 		mod_reg_val |= SUN4I_MSEL_LISTEN_ONLY_MODE;
+@@ -811,7 +811,6 @@ static int sun4ican_probe(struct platform_device *pdev)
+ 	priv->can.ctrlmode_supported = CAN_CTRLMODE_BERR_REPORTING |
+ 				       CAN_CTRLMODE_LISTENONLY |
+ 				       CAN_CTRLMODE_LOOPBACK |
+-				       CAN_CTRLMODE_PRESUME_ACK |
+ 				       CAN_CTRLMODE_3_SAMPLES;
+ 	priv->base = addr;
+ 	priv->clk = clk;
+diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c
+index 18cc529fb807..9b18d96ef526 100644
+--- a/drivers/net/can/usb/kvaser_usb.c
++++ b/drivers/net/can/usb/kvaser_usb.c
+@@ -137,6 +137,7 @@ static inline bool kvaser_is_usbcan(const struct usb_device_id *id)
+ #define CMD_RESET_ERROR_COUNTER		49
+ #define CMD_TX_ACKNOWLEDGE		50
+ #define CMD_CAN_ERROR_EVENT		51
++#define CMD_FLUSH_QUEUE_REPLY		68
+ 
+ #define CMD_LEAF_USB_THROTTLE		77
+ #define CMD_LEAF_LOG_MESSAGE		106
+@@ -1301,6 +1302,11 @@ static void kvaser_usb_handle_message(const struct kvaser_usb *dev,
+ 			goto warn;
+ 		break;
+ 
++	case CMD_FLUSH_QUEUE_REPLY:
++		if (dev->family != KVASER_LEAF)
++			goto warn;
++		break;
++
+ 	default:
+ warn:		dev_warn(dev->udev->dev.parent,
+ 			 "Unhandled message (%d)\n", msg->id);
+@@ -1609,7 +1615,8 @@ static int kvaser_usb_close(struct net_device *netdev)
+ 	if (err)
+ 		netdev_warn(netdev, "Cannot flush queue, error %d\n", err);
+ 
+-	if (kvaser_usb_send_simple_msg(dev, CMD_RESET_CHIP, priv->channel))
++	err = kvaser_usb_send_simple_msg(dev, CMD_RESET_CHIP, priv->channel);
++	if (err)
+ 		netdev_warn(netdev, "Cannot reset card, error %d\n", err);
+ 
+ 	err = kvaser_usb_stop_chip(priv);
+diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
+index 5c2a08ef08ba..0cc96e9ae6ff 100644
+--- a/drivers/nvme/host/fc.c
++++ b/drivers/nvme/host/fc.c
+@@ -2477,10 +2477,10 @@ nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl)
+ 	nvme_fc_abort_aen_ops(ctrl);
+ 
+ 	/* wait for all io that had to be aborted */
+-	spin_lock_irqsave(&ctrl->lock, flags);
++	spin_lock_irq(&ctrl->lock);
+ 	wait_event_lock_irq(ctrl->ioabort_wait, ctrl->iocnt == 0, ctrl->lock);
+ 	ctrl->flags &= ~FCCTRL_TERMIO;
+-	spin_unlock_irqrestore(&ctrl->lock, flags);
++	spin_unlock_irq(&ctrl->lock);
+ 
+ 	nvme_fc_term_aen_ops(ctrl);
+ 
+@@ -2693,6 +2693,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
+ 	ctrl->rport = rport;
+ 	ctrl->dev = lport->dev;
+ 	ctrl->cnum = idx;
++	init_waitqueue_head(&ctrl->ioabort_wait);
+ 
+ 	get_device(ctrl->dev);
+ 	kref_init(&ctrl->ref);
+diff --git a/drivers/regulator/fan53555.c b/drivers/regulator/fan53555.c
+index 60f431831582..ca29d49fdadd 100644
+--- a/drivers/regulator/fan53555.c
++++ b/drivers/regulator/fan53555.c
+@@ -476,7 +476,10 @@ static const struct i2c_device_id fan53555_id[] = {
+ 		.name = "fan53555",
+ 		.driver_data = FAN53555_VENDOR_FAIRCHILD
+ 	}, {
+-		.name = "syr82x",
++		.name = "syr827",
++		.driver_data = FAN53555_VENDOR_SILERGY
++	}, {
++		.name = "syr828",
+ 		.driver_data = FAN53555_VENDOR_SILERGY
+ 	},
+ 	{ },
+diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
+index bcc8f3dfd4c4..b3f9243cfed5 100644
+--- a/drivers/s390/scsi/zfcp_aux.c
++++ b/drivers/s390/scsi/zfcp_aux.c
+@@ -358,6 +358,8 @@ struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *ccw_device)
+ 
+ 	adapter->next_port_scan = jiffies;
+ 
++	adapter->erp_action.adapter = adapter;
++
+ 	if (zfcp_qdio_setup(adapter))
+ 		goto failed;
+ 
+@@ -514,6 +516,9 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn,
+ 	port->dev.groups = zfcp_port_attr_groups;
+ 	port->dev.release = zfcp_port_release;
+ 
++	port->erp_action.adapter = adapter;
++	port->erp_action.port = port;
++
+ 	if (dev_set_name(&port->dev, "0x%016llx", (unsigned long long)wwpn)) {
+ 		kfree(port);
+ 		goto err_out;
+diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
+index 7ccfce559034..3b23d6754598 100644
+--- a/drivers/s390/scsi/zfcp_erp.c
++++ b/drivers/s390/scsi/zfcp_erp.c
+@@ -193,9 +193,8 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status,
+ 		atomic_or(ZFCP_STATUS_COMMON_ERP_INUSE,
+ 				&zfcp_sdev->status);
+ 		erp_action = &zfcp_sdev->erp_action;
+-		memset(erp_action, 0, sizeof(struct zfcp_erp_action));
+-		erp_action->port = port;
+-		erp_action->sdev = sdev;
++		WARN_ON_ONCE(erp_action->port != port);
++		WARN_ON_ONCE(erp_action->sdev != sdev);
+ 		if (!(atomic_read(&zfcp_sdev->status) &
+ 		      ZFCP_STATUS_COMMON_RUNNING))
+ 			act_status |= ZFCP_STATUS_ERP_CLOSE_ONLY;
+@@ -208,8 +207,8 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status,
+ 		zfcp_erp_action_dismiss_port(port);
+ 		atomic_or(ZFCP_STATUS_COMMON_ERP_INUSE, &port->status);
+ 		erp_action = &port->erp_action;
+-		memset(erp_action, 0, sizeof(struct zfcp_erp_action));
+-		erp_action->port = port;
++		WARN_ON_ONCE(erp_action->port != port);
++		WARN_ON_ONCE(erp_action->sdev != NULL);
+ 		if (!(atomic_read(&port->status) & ZFCP_STATUS_COMMON_RUNNING))
+ 			act_status |= ZFCP_STATUS_ERP_CLOSE_ONLY;
+ 		break;
+@@ -219,7 +218,8 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status,
+ 		zfcp_erp_action_dismiss_adapter(adapter);
+ 		atomic_or(ZFCP_STATUS_COMMON_ERP_INUSE, &adapter->status);
+ 		erp_action = &adapter->erp_action;
+-		memset(erp_action, 0, sizeof(struct zfcp_erp_action));
++		WARN_ON_ONCE(erp_action->port != NULL);
++		WARN_ON_ONCE(erp_action->sdev != NULL);
+ 		if (!(atomic_read(&adapter->status) &
+ 		      ZFCP_STATUS_COMMON_RUNNING))
+ 			act_status |= ZFCP_STATUS_ERP_CLOSE_ONLY;
+@@ -229,7 +229,11 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status,
+ 		return NULL;
+ 	}
+ 
+-	erp_action->adapter = adapter;
++	WARN_ON_ONCE(erp_action->adapter != adapter);
++	memset(&erp_action->list, 0, sizeof(erp_action->list));
++	memset(&erp_action->timer, 0, sizeof(erp_action->timer));
++	erp_action->step = ZFCP_ERP_STEP_UNINITIALIZED;
++	erp_action->fsf_req_id = 0;
+ 	erp_action->action = need;
+ 	erp_action->status = act_status;
+ 
+diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
+index a1eeeaaa0fca..1c11b8402b41 100644
+--- a/drivers/s390/scsi/zfcp_scsi.c
++++ b/drivers/s390/scsi/zfcp_scsi.c
+@@ -115,10 +115,15 @@ static int zfcp_scsi_slave_alloc(struct scsi_device *sdev)
+ 	struct zfcp_unit *unit;
+ 	int npiv = adapter->connection_features & FSF_FEATURE_NPIV_MODE;
+ 
++	zfcp_sdev->erp_action.adapter = adapter;
++	zfcp_sdev->erp_action.sdev = sdev;
++
+ 	port = zfcp_get_port_by_wwpn(adapter, rport->port_name);
+ 	if (!port)
+ 		return -ENXIO;
+ 
++	zfcp_sdev->erp_action.port = port;
++
+ 	unit = zfcp_unit_find(port, zfcp_scsi_dev_lun(sdev));
+ 	if (unit)
+ 		put_device(&unit->dev);
+diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c
+index 9ee025b1d0e0..289b6fe306fd 100644
+--- a/drivers/scsi/aacraid/comminit.c
++++ b/drivers/scsi/aacraid/comminit.c
+@@ -302,9 +302,11 @@ int aac_send_shutdown(struct aac_dev * dev)
+ 		return -ENOMEM;
+ 	aac_fib_init(fibctx);
+ 
+-	mutex_lock(&dev->ioctl_mutex);
+-	dev->adapter_shutdown = 1;
+-	mutex_unlock(&dev->ioctl_mutex);
++	if (!dev->adapter_shutdown) {
++		mutex_lock(&dev->ioctl_mutex);
++		dev->adapter_shutdown = 1;
++		mutex_unlock(&dev->ioctl_mutex);
++	}
+ 
+ 	cmd = (struct aac_close *) fib_data(fibctx);
+ 	cmd->command = cpu_to_le32(VM_CloseAll);
+diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
+index 0f277df73af0..231bd3345f44 100644
+--- a/drivers/scsi/aacraid/linit.c
++++ b/drivers/scsi/aacraid/linit.c
+@@ -1401,8 +1401,9 @@ static void __aac_shutdown(struct aac_dev * aac)
+ {
+ 	int i;
+ 
++	mutex_lock(&aac->ioctl_mutex);
+ 	aac->adapter_shutdown = 1;
+-	aac_send_shutdown(aac);
++	mutex_unlock(&aac->ioctl_mutex);
+ 
+ 	if (aac->aif_thread) {
+ 		int i;
+@@ -1415,7 +1416,11 @@ static void __aac_shutdown(struct aac_dev * aac)
+ 		}
+ 		kthread_stop(aac->thread);
+ 	}
++
++	aac_send_shutdown(aac);
++
+ 	aac_adapter_disable_int(aac);
++
+ 	if (aac_is_src(aac)) {
+ 		if (aac->max_msix > 1) {
+ 			for (i = 0; i < aac->max_msix; i++) {
+diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
+index 5da006d81900..2bf6d4022af0 100644
+--- a/drivers/scsi/qla2xxx/qla_os.c
++++ b/drivers/scsi/qla2xxx/qla_os.c
+@@ -3051,6 +3051,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
+ 	    host->max_cmd_len, host->max_channel, host->max_lun,
+ 	    host->transportt, sht->vendor_id);
+ 
++	INIT_WORK(&base_vha->iocb_work, qla2x00_iocb_work_fn);
++
+ 	/* Set up the irqs */
+ 	ret = qla2x00_request_irqs(ha, rsp);
+ 	if (ret)
+@@ -3165,8 +3167,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
+ 	    host->can_queue, base_vha->req,
+ 	    base_vha->mgmt_svr_loop_id, host->sg_tablesize);
+ 
+-	INIT_WORK(&base_vha->iocb_work, qla2x00_iocb_work_fn);
+-
+ 	if (ha->mqenable) {
+ 		bool mq = false;
+ 		bool startit = false;
+diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
+index 1a9de8419997..d99b10c73c55 100644
+--- a/drivers/scsi/sg.c
++++ b/drivers/scsi/sg.c
+@@ -837,7 +837,7 @@ sg_fill_request_table(Sg_fd *sfp, sg_req_info_t *rinfo)
+ 
+ 	val = 0;
+ 	list_for_each_entry(srp, &sfp->rq_list, entry) {
+-		if (val > SG_MAX_QUEUE)
++		if (val >= SG_MAX_QUEUE)
+ 			break;
+ 		rinfo[val].req_state = srp->done + 1;
+ 		rinfo[val].problem =
+diff --git a/drivers/spi/spi-armada-3700.c b/drivers/spi/spi-armada-3700.c
+index 6c7d7a460689..53a04267eb38 100644
+--- a/drivers/spi/spi-armada-3700.c
++++ b/drivers/spi/spi-armada-3700.c
+@@ -161,7 +161,7 @@ static void a3700_spi_deactivate_cs(struct a3700_spi *a3700_spi,
+ }
+ 
+ static int a3700_spi_pin_mode_set(struct a3700_spi *a3700_spi,
+-				  unsigned int pin_mode)
++				  unsigned int pin_mode, bool receiving)
+ {
+ 	u32 val;
+ 
+@@ -177,6 +177,9 @@ static int a3700_spi_pin_mode_set(struct a3700_spi *a3700_spi,
+ 		break;
+ 	case SPI_NBITS_QUAD:
+ 		val |= A3700_SPI_DATA_PIN1;
++		/* RX during address reception uses 4-pin */
++		if (receiving)
++			val |= A3700_SPI_ADDR_PIN;
+ 		break;
+ 	default:
+ 		dev_err(&a3700_spi->master->dev, "wrong pin mode %u", pin_mode);
+@@ -392,7 +395,8 @@ static bool a3700_spi_wait_completion(struct spi_device *spi)
+ 
+ 	spireg_write(a3700_spi, A3700_SPI_INT_MASK_REG, 0);
+ 
+-	return true;
++	/* Timeout was reached */
++	return false;
+ }
+ 
+ static bool a3700_spi_transfer_wait(struct spi_device *spi,
+@@ -653,7 +657,7 @@ static int a3700_spi_transfer_one(struct spi_master *master,
+ 	else if (xfer->rx_buf)
+ 		nbits = xfer->rx_nbits;
+ 
+-	a3700_spi_pin_mode_set(a3700_spi, nbits);
++	a3700_spi_pin_mode_set(a3700_spi, nbits, xfer->rx_buf ? true : false);
+ 
+ 	if (xfer->rx_buf) {
+ 		/* Set read data length */
+diff --git a/drivers/spi/spi-bcm-qspi.c b/drivers/spi/spi-bcm-qspi.c
+index b19722ba908c..31eb882ed62d 100644
+--- a/drivers/spi/spi-bcm-qspi.c
++++ b/drivers/spi/spi-bcm-qspi.c
+@@ -1278,7 +1278,7 @@ int bcm_qspi_probe(struct platform_device *pdev,
+ 			goto qspi_probe_err;
+ 		}
+ 	} else {
+-		goto qspi_probe_err;
++		goto qspi_resource_err;
+ 	}
+ 
+ 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "bspi");
+@@ -1300,7 +1300,7 @@ int bcm_qspi_probe(struct platform_device *pdev,
+ 		qspi->base[CHIP_SELECT]  = devm_ioremap_resource(dev, res);
+ 		if (IS_ERR(qspi->base[CHIP_SELECT])) {
+ 			ret = PTR_ERR(qspi->base[CHIP_SELECT]);
+-			goto qspi_probe_err;
++			goto qspi_resource_err;
+ 		}
+ 	}
+ 
+@@ -1308,7 +1308,7 @@ int bcm_qspi_probe(struct platform_device *pdev,
+ 				GFP_KERNEL);
+ 	if (!qspi->dev_ids) {
+ 		ret = -ENOMEM;
+-		goto qspi_probe_err;
++		goto qspi_resource_err;
+ 	}
+ 
+ 	for (val = 0; val < num_irqs; val++) {
+@@ -1397,8 +1397,9 @@ int bcm_qspi_probe(struct platform_device *pdev,
+ 	bcm_qspi_hw_uninit(qspi);
+ 	clk_disable_unprepare(qspi->clk);
+ qspi_probe_err:
+-	spi_master_put(master);
+ 	kfree(qspi->dev_ids);
++qspi_resource_err:
++	spi_master_put(master);
+ 	return ret;
+ }
+ /* probe function to be called by SoC specific platform driver probe */
+diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
+index 82360594fa8e..57efbd3b053b 100644
+--- a/drivers/xen/gntdev.c
++++ b/drivers/xen/gntdev.c
+@@ -1024,6 +1024,7 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
+ 	mutex_unlock(&priv->lock);
+ 
+ 	if (use_ptemod) {
++		map->pages_vm_start = vma->vm_start;
+ 		err = apply_to_page_range(vma->vm_mm, vma->vm_start,
+ 					  vma->vm_end - vma->vm_start,
+ 					  find_grant_ptes, map);
+@@ -1061,7 +1062,6 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
+ 					    set_grant_ptes_as_special, NULL);
+ 		}
+ #endif
+-		map->pages_vm_start = vma->vm_start;
+ 	}
+ 
+ 	return 0;
+diff --git a/drivers/xen/xen-balloon.c b/drivers/xen/xen-balloon.c
+index e89136ab851e..b437fccd4e62 100644
+--- a/drivers/xen/xen-balloon.c
++++ b/drivers/xen/xen-balloon.c
+@@ -57,7 +57,7 @@ static int register_balloon(struct device *dev);
+ static void watch_target(struct xenbus_watch *watch,
+ 			 const char *path, const char *token)
+ {
+-	unsigned long long new_target;
++	unsigned long long new_target, static_max;
+ 	int err;
+ 	static bool watch_fired;
+ 	static long target_diff;
+@@ -72,13 +72,20 @@ static void watch_target(struct xenbus_watch *watch,
+ 	 * pages. PAGE_SHIFT converts bytes to pages, hence PAGE_SHIFT - 10.
+ 	 */
+ 	new_target >>= PAGE_SHIFT - 10;
+-	if (watch_fired) {
+-		balloon_set_new_target(new_target - target_diff);
+-		return;
++
++	if (!watch_fired) {
++		watch_fired = true;
++		err = xenbus_scanf(XBT_NIL, "memory", "static-max", "%llu",
++				   &static_max);
++		if (err != 1)
++			static_max = new_target;
++		else
++			static_max >>= PAGE_SHIFT - 10;
++		target_diff = xen_pv_domain() ? 0
++				: static_max - balloon_stats.target_pages;
+ 	}
+ 
+-	watch_fired = true;
+-	target_diff = new_target - balloon_stats.target_pages;
++	balloon_set_new_target(new_target - target_diff);
+ }
+ static struct xenbus_watch target_watch = {
+ 	.node = "memory/target",
+diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
+index 7007ae2a5ad2..388f0267cec5 100644
+--- a/fs/ceph/caps.c
++++ b/fs/ceph/caps.c
+@@ -1985,6 +1985,7 @@ static int try_flush_caps(struct inode *inode, u64 *ptid)
+ retry:
+ 	spin_lock(&ci->i_ceph_lock);
+ 	if (ci->i_ceph_flags & CEPH_I_NOFLUSH) {
++		spin_unlock(&ci->i_ceph_lock);
+ 		dout("try_flush_caps skipping %p I_NOFLUSH set\n", inode);
+ 		goto out;
+ 	}
+@@ -2002,8 +2003,10 @@ static int try_flush_caps(struct inode *inode, u64 *ptid)
+ 			mutex_lock(&session->s_mutex);
+ 			goto retry;
+ 		}
+-		if (cap->session->s_state < CEPH_MDS_SESSION_OPEN)
++		if (cap->session->s_state < CEPH_MDS_SESSION_OPEN) {
++			spin_unlock(&ci->i_ceph_lock);
+ 			goto out;
++		}
+ 
+ 		flushing = __mark_caps_flushing(inode, session, true,
+ 						&flush_tid, &oldest_flush_tid);
+diff --git a/fs/cifs/Kconfig b/fs/cifs/Kconfig
+index f7243617316c..d5b2e12b5d02 100644
+--- a/fs/cifs/Kconfig
++++ b/fs/cifs/Kconfig
+@@ -5,9 +5,14 @@ config CIFS
+ 	select CRYPTO
+ 	select CRYPTO_MD4
+ 	select CRYPTO_MD5
++	select CRYPTO_SHA256
++	select CRYPTO_CMAC
+ 	select CRYPTO_HMAC
+ 	select CRYPTO_ARC4
++	select CRYPTO_AEAD2
++	select CRYPTO_CCM
+ 	select CRYPTO_ECB
++	select CRYPTO_AES
+ 	select CRYPTO_DES
+ 	help
+ 	  This is the client VFS module for the SMB3 family of NAS protocols,
+diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
+index 03b6eae0ae28..ab69d895d1e9 100644
+--- a/fs/cifs/cifsglob.h
++++ b/fs/cifs/cifsglob.h
+@@ -661,7 +661,9 @@ struct TCP_Server_Info {
+ #endif
+ 	unsigned int	max_read;
+ 	unsigned int	max_write;
+-	__u8		preauth_hash[512];
++#ifdef CONFIG_CIFS_SMB311
++	__u8	preauth_sha_hash[64]; /* save initital negprot hash */
++#endif /* 3.1.1 */
+ 	struct delayed_work reconnect; /* reconnect workqueue job */
+ 	struct mutex reconnect_mutex; /* prevent simultaneous reconnects */
+ 	unsigned long echo_interval;
+@@ -849,7 +851,9 @@ struct cifs_ses {
+ 	__u8 smb3signingkey[SMB3_SIGN_KEY_SIZE];
+ 	__u8 smb3encryptionkey[SMB3_SIGN_KEY_SIZE];
+ 	__u8 smb3decryptionkey[SMB3_SIGN_KEY_SIZE];
+-	__u8 preauth_hash[512];
++#ifdef CONFIG_CIFS_SMB311
++	__u8 preauth_sha_hash[64];
++#endif /* 3.1.1 */
+ };
+ 
+ static inline bool
+diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
+index ddc633ef6064..834d18dbfb58 100644
+--- a/fs/cifs/smb2pdu.c
++++ b/fs/cifs/smb2pdu.c
+@@ -1243,7 +1243,7 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
+ 	struct smb2_tree_connect_req *req;
+ 	struct smb2_tree_connect_rsp *rsp = NULL;
+ 	struct kvec iov[2];
+-	struct kvec rsp_iov;
++	struct kvec rsp_iov = { NULL, 0 };
+ 	int rc = 0;
+ 	int resp_buftype;
+ 	int unc_path_len;
+@@ -1360,7 +1360,7 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
+ 	return rc;
+ 
+ tcon_error_exit:
+-	if (rsp->hdr.sync_hdr.Status == STATUS_BAD_NETWORK_NAME) {
++	if (rsp && rsp->hdr.sync_hdr.Status == STATUS_BAD_NETWORK_NAME) {
+ 		cifs_dbg(VFS, "BAD_NETWORK_NAME: %s\n", tree);
+ 	}
+ 	goto tcon_exit;
+@@ -1963,6 +1963,9 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
+ 	} else
+ 		iov[0].iov_len = get_rfc1002_length(req) + 4;
+ 
++	/* validate negotiate request must be signed - see MS-SMB2 3.2.5.5 */
++	if (opcode == FSCTL_VALIDATE_NEGOTIATE_INFO)
++		req->hdr.sync_hdr.Flags |= SMB2_FLAGS_SIGNED;
+ 
+ 	rc = SendReceive2(xid, ses, iov, n_iov, &resp_buftype, flags, &rsp_iov);
+ 	cifs_small_buf_release(req);
+diff --git a/fs/cifs/smb2transport.c b/fs/cifs/smb2transport.c
+index 67367cf1f8cd..99493946e2f9 100644
+--- a/fs/cifs/smb2transport.c
++++ b/fs/cifs/smb2transport.c
+@@ -390,6 +390,7 @@ generate_smb30signingkey(struct cifs_ses *ses)
+ 	return generate_smb3signingkey(ses, &triplet);
+ }
+ 
++#ifdef CONFIG_CIFS_SMB311
+ int
+ generate_smb311signingkey(struct cifs_ses *ses)
+ 
+@@ -398,25 +399,26 @@ generate_smb311signingkey(struct cifs_ses *ses)
+ 	struct derivation *d;
+ 
+ 	d = &triplet.signing;
+-	d->label.iov_base = "SMB2AESCMAC";
+-	d->label.iov_len = 12;
+-	d->context.iov_base = "SmbSign";
+-	d->context.iov_len = 8;
++	d->label.iov_base = "SMBSigningKey";
++	d->label.iov_len = 14;
++	d->context.iov_base = ses->preauth_sha_hash;
++	d->context.iov_len = 64;
+ 
+ 	d = &triplet.encryption;
+-	d->label.iov_base = "SMB2AESCCM";
+-	d->label.iov_len = 11;
+-	d->context.iov_base = "ServerIn ";
+-	d->context.iov_len = 10;
++	d->label.iov_base = "SMBC2SCipherKey";
++	d->label.iov_len = 16;
++	d->context.iov_base = ses->preauth_sha_hash;
++	d->context.iov_len = 64;
+ 
+ 	d = &triplet.decryption;
+-	d->label.iov_base = "SMB2AESCCM";
+-	d->label.iov_len = 11;
+-	d->context.iov_base = "ServerOut";
+-	d->context.iov_len = 10;
++	d->label.iov_base = "SMBS2CCipherKey";
++	d->label.iov_len = 16;
++	d->context.iov_base = ses->preauth_sha_hash;
++	d->context.iov_len = 64;
+ 
+ 	return generate_smb3signingkey(ses, &triplet);
+ }
++#endif /* 311 */
+ 
+ int
+ smb3_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server)
+diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
+index 00800c07ba1c..4fbcb8721b2f 100644
+--- a/fs/fuse/dir.c
++++ b/fs/fuse/dir.c
+@@ -1312,7 +1312,8 @@ static int parse_dirplusfile(char *buf, size_t nbytes, struct file *file,
+ 			*/
+ 			over = !dir_emit(ctx, dirent->name, dirent->namelen,
+ 				       dirent->ino, dirent->type);
+-			ctx->pos = dirent->off;
++			if (!over)
++				ctx->pos = dirent->off;
+ 		}
+ 
+ 		buf += reclen;
+diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
+index ef55c926463c..7b43b89defad 100644
+--- a/fs/overlayfs/inode.c
++++ b/fs/overlayfs/inode.c
+@@ -595,18 +595,30 @@ static bool ovl_verify_inode(struct inode *inode, struct dentry *lowerdentry,
+ 	return true;
+ }
+ 
+-struct inode *ovl_get_inode(struct dentry *dentry, struct dentry *upperdentry)
++struct inode *ovl_get_inode(struct dentry *dentry, struct dentry *upperdentry,
++			    struct dentry *index)
+ {
+ 	struct dentry *lowerdentry = ovl_dentry_lower(dentry);
+ 	struct inode *realinode = upperdentry ? d_inode(upperdentry) : NULL;
+ 	struct inode *inode;
++	/* Already indexed or could be indexed on copy up? */
++	bool indexed = (index || (ovl_indexdir(dentry->d_sb) && !upperdentry));
++
++	if (WARN_ON(upperdentry && indexed && !lowerdentry))
++		return ERR_PTR(-EIO);
+ 
+ 	if (!realinode)
+ 		realinode = d_inode(lowerdentry);
+ 
+-	if (!S_ISDIR(realinode->i_mode) &&
+-	    (upperdentry || (lowerdentry && ovl_indexdir(dentry->d_sb)))) {
+-		struct inode *key = d_inode(lowerdentry ?: upperdentry);
++	/*
++	 * Copy up origin (lower) may exist for non-indexed upper, but we must
++	 * not use lower as hash key in that case.
++	 * Hash inodes that are or could be indexed by origin inode and
++	 * non-indexed upper inodes that could be hard linked by upper inode.
++	 */
++	if (!S_ISDIR(realinode->i_mode) && (upperdentry || indexed)) {
++		struct inode *key = d_inode(indexed ? lowerdentry :
++						      upperdentry);
+ 		unsigned int nlink;
+ 
+ 		inode = iget5_locked(dentry->d_sb, (unsigned long) key,
+diff --git a/fs/overlayfs/namei.c b/fs/overlayfs/namei.c
+index 9deec68075dc..0223ef4acbe4 100644
+--- a/fs/overlayfs/namei.c
++++ b/fs/overlayfs/namei.c
+@@ -405,14 +405,13 @@ int ovl_verify_index(struct dentry *index, struct path *lowerstack,
+ 	 * be treated as stale (i.e. after unlink of the overlay inode).
+ 	 * We don't know the verification rules for directory and whiteout
+ 	 * index entries, because they have not been implemented yet, so return
+-	 * EROFS if those entries are found to avoid corrupting an index that
+-	 * was created by a newer kernel.
++	 * EINVAL if those entries are found to abort the mount to avoid
++	 * corrupting an index that was created by a newer kernel.
+ 	 */
+-	err = -EROFS;
++	err = -EINVAL;
+ 	if (d_is_dir(index) || ovl_is_whiteout(index))
+ 		goto fail;
+ 
+-	err = -EINVAL;
+ 	if (index->d_name.len < sizeof(struct ovl_fh)*2)
+ 		goto fail;
+ 
+@@ -507,6 +506,10 @@ static struct dentry *ovl_lookup_index(struct dentry *dentry,
+ 	index = lookup_one_len_unlocked(name.name, ofs->indexdir, name.len);
+ 	if (IS_ERR(index)) {
+ 		err = PTR_ERR(index);
++		if (err == -ENOENT) {
++			index = NULL;
++			goto out;
++		}
+ 		pr_warn_ratelimited("overlayfs: failed inode index lookup (ino=%lu, key=%*s, err=%i);\n"
+ 				    "overlayfs: mount with '-o index=off' to disable inodes index.\n",
+ 				    d_inode(origin)->i_ino, name.len, name.name,
+@@ -516,18 +519,9 @@ static struct dentry *ovl_lookup_index(struct dentry *dentry,
+ 
+ 	inode = d_inode(index);
+ 	if (d_is_negative(index)) {
+-		if (upper && d_inode(origin)->i_nlink > 1) {
+-			pr_warn_ratelimited("overlayfs: hard link with origin but no index (ino=%lu).\n",
+-					    d_inode(origin)->i_ino);
+-			goto fail;
+-		}
+-
+-		dput(index);
+-		index = NULL;
++		goto out_dput;
+ 	} else if (upper && d_inode(upper) != inode) {
+-		pr_warn_ratelimited("overlayfs: wrong index found (index=%pd2, ino=%lu, upper ino=%lu).\n",
+-				    index, inode->i_ino, d_inode(upper)->i_ino);
+-		goto fail;
++		goto out_dput;
+ 	} else if (ovl_dentry_weird(index) || ovl_is_whiteout(index) ||
+ 		   ((inode->i_mode ^ d_inode(origin)->i_mode) & S_IFMT)) {
+ 		/*
+@@ -547,6 +541,11 @@ static struct dentry *ovl_lookup_index(struct dentry *dentry,
+ 	kfree(name.name);
+ 	return index;
+ 
++out_dput:
++	dput(index);
++	index = NULL;
++	goto out;
++
+ fail:
+ 	dput(index);
+ 	index = ERR_PTR(-EIO);
+@@ -709,7 +708,7 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
+ 		upperdentry = dget(index);
+ 
+ 	if (upperdentry || ctr) {
+-		inode = ovl_get_inode(dentry, upperdentry);
++		inode = ovl_get_inode(dentry, upperdentry, index);
+ 		err = PTR_ERR(inode);
+ 		if (IS_ERR(inode))
+ 			goto out_free_oe;
+diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h
+index f57f47742f5f..bccb1e683387 100644
+--- a/fs/overlayfs/overlayfs.h
++++ b/fs/overlayfs/overlayfs.h
+@@ -284,7 +284,8 @@ int ovl_update_time(struct inode *inode, struct timespec *ts, int flags);
+ bool ovl_is_private_xattr(const char *name);
+ 
+ struct inode *ovl_new_inode(struct super_block *sb, umode_t mode, dev_t rdev);
+-struct inode *ovl_get_inode(struct dentry *dentry, struct dentry *upperdentry);
++struct inode *ovl_get_inode(struct dentry *dentry, struct dentry *upperdentry,
++			    struct dentry *index);
+ static inline void ovl_copyattr(struct inode *from, struct inode *to)
+ {
+ 	to->i_uid = from->i_uid;
+diff --git a/fs/overlayfs/readdir.c b/fs/overlayfs/readdir.c
+index 74f7ead442f0..3ff960372cb9 100644
+--- a/fs/overlayfs/readdir.c
++++ b/fs/overlayfs/readdir.c
+@@ -704,13 +704,12 @@ int ovl_indexdir_cleanup(struct dentry *dentry, struct vfsmount *mnt,
+ 			break;
+ 		}
+ 		err = ovl_verify_index(index, lowerstack, numlower);
+-		if (err) {
+-			if (err == -EROFS)
+-				break;
++		/* Cleanup stale and orphan index entries */
++		if (err && (err == -ESTALE || err == -ENOENT))
+ 			err = ovl_cleanup(dir, index);
+-			if (err)
+-				break;
+-		}
++		if (err)
++			break;
++
+ 		dput(index);
+ 		index = NULL;
+ 	}
+diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
+index a1464905c1ea..e2192e1eb564 100644
+--- a/fs/overlayfs/super.c
++++ b/fs/overlayfs/super.c
+@@ -174,6 +174,9 @@ static struct inode *ovl_alloc_inode(struct super_block *sb)
+ {
+ 	struct ovl_inode *oi = kmem_cache_alloc(ovl_inode_cachep, GFP_KERNEL);
+ 
++	if (!oi)
++		return NULL;
++
+ 	oi->cache = NULL;
+ 	oi->redirect = NULL;
+ 	oi->version = 0;
+diff --git a/include/uapi/linux/spi/spidev.h b/include/uapi/linux/spi/spidev.h
+index dd5f21e75805..856de39d0b89 100644
+--- a/include/uapi/linux/spi/spidev.h
++++ b/include/uapi/linux/spi/spidev.h
+@@ -23,6 +23,7 @@
+ #define SPIDEV_H
+ 
+ #include <linux/types.h>
++#include <linux/ioctl.h>
+ 
+ /* User space versions of kernel symbols for SPI clocking modes,
+  * matching <linux/spi/spi.h>
+diff --git a/kernel/workqueue.c b/kernel/workqueue.c
+index ca937b0c3a96..d5bf849e0f48 100644
+--- a/kernel/workqueue.c
++++ b/kernel/workqueue.c
+@@ -68,6 +68,7 @@ enum {
+ 	 * attach_mutex to avoid changing binding state while
+ 	 * worker_attach_to_pool() is in progress.
+ 	 */
++	POOL_MANAGER_ACTIVE	= 1 << 0,	/* being managed */
+ 	POOL_DISASSOCIATED	= 1 << 2,	/* cpu can't serve workers */
+ 
+ 	/* worker flags */
+@@ -165,7 +166,6 @@ struct worker_pool {
+ 						/* L: hash of busy workers */
+ 
+ 	/* see manage_workers() for details on the two manager mutexes */
+-	struct mutex		manager_arb;	/* manager arbitration */
+ 	struct worker		*manager;	/* L: purely informational */
+ 	struct mutex		attach_mutex;	/* attach/detach exclusion */
+ 	struct list_head	workers;	/* A: attached workers */
+@@ -299,6 +299,7 @@ static struct workqueue_attrs *wq_update_unbound_numa_attrs_buf;
+ 
+ static DEFINE_MUTEX(wq_pool_mutex);	/* protects pools and workqueues list */
+ static DEFINE_SPINLOCK(wq_mayday_lock);	/* protects wq->maydays list */
++static DECLARE_WAIT_QUEUE_HEAD(wq_manager_wait); /* wait for manager to go away */
+ 
+ static LIST_HEAD(workqueues);		/* PR: list of all workqueues */
+ static bool workqueue_freezing;		/* PL: have wqs started freezing? */
+@@ -801,7 +802,7 @@ static bool need_to_create_worker(struct worker_pool *pool)
+ /* Do we have too many workers and should some go away? */
+ static bool too_many_workers(struct worker_pool *pool)
+ {
+-	bool managing = mutex_is_locked(&pool->manager_arb);
++	bool managing = pool->flags & POOL_MANAGER_ACTIVE;
+ 	int nr_idle = pool->nr_idle + managing; /* manager is considered idle */
+ 	int nr_busy = pool->nr_workers - nr_idle;
+ 
+@@ -1980,24 +1981,17 @@ static bool manage_workers(struct worker *worker)
+ {
+ 	struct worker_pool *pool = worker->pool;
+ 
+-	/*
+-	 * Anyone who successfully grabs manager_arb wins the arbitration
+-	 * and becomes the manager.  mutex_trylock() on pool->manager_arb
+-	 * failure while holding pool->lock reliably indicates that someone
+-	 * else is managing the pool and the worker which failed trylock
+-	 * can proceed to executing work items.  This means that anyone
+-	 * grabbing manager_arb is responsible for actually performing
+-	 * manager duties.  If manager_arb is grabbed and released without
+-	 * actual management, the pool may stall indefinitely.
+-	 */
+-	if (!mutex_trylock(&pool->manager_arb))
++	if (pool->flags & POOL_MANAGER_ACTIVE)
+ 		return false;
++
++	pool->flags |= POOL_MANAGER_ACTIVE;
+ 	pool->manager = worker;
+ 
+ 	maybe_create_worker(pool);
+ 
+ 	pool->manager = NULL;
+-	mutex_unlock(&pool->manager_arb);
++	pool->flags &= ~POOL_MANAGER_ACTIVE;
++	wake_up(&wq_manager_wait);
+ 	return true;
+ }
+ 
+@@ -3215,7 +3209,6 @@ static int init_worker_pool(struct worker_pool *pool)
+ 	setup_timer(&pool->mayday_timer, pool_mayday_timeout,
+ 		    (unsigned long)pool);
+ 
+-	mutex_init(&pool->manager_arb);
+ 	mutex_init(&pool->attach_mutex);
+ 	INIT_LIST_HEAD(&pool->workers);
+ 
+@@ -3285,13 +3278,15 @@ static void put_unbound_pool(struct worker_pool *pool)
+ 	hash_del(&pool->hash_node);
+ 
+ 	/*
+-	 * Become the manager and destroy all workers.  Grabbing
+-	 * manager_arb prevents @pool's workers from blocking on
+-	 * attach_mutex.
++	 * Become the manager and destroy all workers.  This prevents
++	 * @pool's workers from blocking on attach_mutex.  We're the last
++	 * manager and @pool gets freed with the flag set.
+ 	 */
+-	mutex_lock(&pool->manager_arb);
+-
+ 	spin_lock_irq(&pool->lock);
++	wait_event_lock_irq(wq_manager_wait,
++			    !(pool->flags & POOL_MANAGER_ACTIVE), pool->lock);
++	pool->flags |= POOL_MANAGER_ACTIVE;
++
+ 	while ((worker = first_idle_worker(pool)))
+ 		destroy_worker(worker);
+ 	WARN_ON(pool->nr_workers || pool->nr_idle);
+@@ -3305,8 +3300,6 @@ static void put_unbound_pool(struct worker_pool *pool)
+ 	if (pool->detach_completion)
+ 		wait_for_completion(pool->detach_completion);
+ 
+-	mutex_unlock(&pool->manager_arb);
+-
+ 	/* shut down the timers */
+ 	del_timer_sync(&pool->idle_timer);
+ 	del_timer_sync(&pool->mayday_timer);
+diff --git a/lib/assoc_array.c b/lib/assoc_array.c
+index 59fd7c0b119c..5cd093589c5a 100644
+--- a/lib/assoc_array.c
++++ b/lib/assoc_array.c
+@@ -598,21 +598,31 @@ static bool assoc_array_insert_into_terminal_node(struct assoc_array_edit *edit,
+ 		if ((edit->segment_cache[ASSOC_ARRAY_FAN_OUT] ^ base_seg) == 0)
+ 			goto all_leaves_cluster_together;
+ 
+-		/* Otherwise we can just insert a new node ahead of the old
+-		 * one.
++		/* Otherwise all the old leaves cluster in the same slot, but
++		 * the new leaf wants to go into a different slot - so we
++		 * create a new node (n0) to hold the new leaf and a pointer to
++		 * a new node (n1) holding all the old leaves.
++		 *
++		 * This can be done by falling through to the node splitting
++		 * path.
+ 		 */
+-		goto present_leaves_cluster_but_not_new_leaf;
++		pr_devel("present leaves cluster but not new leaf\n");
+ 	}
+ 
+ split_node:
+ 	pr_devel("split node\n");
+ 
+-	/* We need to split the current node; we know that the node doesn't
+-	 * simply contain a full set of leaves that cluster together (it
+-	 * contains meta pointers and/or non-clustering leaves).
++	/* We need to split the current node.  The node must contain anything
++	 * from a single leaf (in the one leaf case, this leaf will cluster
++	 * with the new leaf) and the rest meta-pointers, to all leaves, some
++	 * of which may cluster.
++	 *
++	 * It won't contain the case in which all the current leaves plus the
++	 * new leaves want to cluster in the same slot.
+ 	 *
+ 	 * We need to expel at least two leaves out of a set consisting of the
+-	 * leaves in the node and the new leaf.
++	 * leaves in the node and the new leaf.  The current meta pointers can
++	 * just be copied as they shouldn't cluster with any of the leaves.
+ 	 *
+ 	 * We need a new node (n0) to replace the current one and a new node to
+ 	 * take the expelled nodes (n1).
+@@ -717,33 +727,6 @@ static bool assoc_array_insert_into_terminal_node(struct assoc_array_edit *edit,
+ 	pr_devel("<--%s() = ok [split node]\n", __func__);
+ 	return true;
+ 
+-present_leaves_cluster_but_not_new_leaf:
+-	/* All the old leaves cluster in the same slot, but the new leaf wants
+-	 * to go into a different slot, so we create a new node to hold the new
+-	 * leaf and a pointer to a new node holding all the old leaves.
+-	 */
+-	pr_devel("present leaves cluster but not new leaf\n");
+-
+-	new_n0->back_pointer = node->back_pointer;
+-	new_n0->parent_slot = node->parent_slot;
+-	new_n0->nr_leaves_on_branch = node->nr_leaves_on_branch;
+-	new_n1->back_pointer = assoc_array_node_to_ptr(new_n0);
+-	new_n1->parent_slot = edit->segment_cache[0];
+-	new_n1->nr_leaves_on_branch = node->nr_leaves_on_branch;
+-	edit->adjust_count_on = new_n0;
+-
+-	for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++)
+-		new_n1->slots[i] = node->slots[i];
+-
+-	new_n0->slots[edit->segment_cache[0]] = assoc_array_node_to_ptr(new_n0);
+-	edit->leaf_p = &new_n0->slots[edit->segment_cache[ASSOC_ARRAY_FAN_OUT]];
+-
+-	edit->set[0].ptr = &assoc_array_ptr_to_node(node->back_pointer)->slots[node->parent_slot];
+-	edit->set[0].to = assoc_array_node_to_ptr(new_n0);
+-	edit->excised_meta[0] = assoc_array_node_to_ptr(node);
+-	pr_devel("<--%s() = ok [insert node before]\n", __func__);
+-	return true;
+-
+ all_leaves_cluster_together:
+ 	/* All the leaves, new and old, want to cluster together in this node
+ 	 * in the same slot, so we have to replace this node with a shortcut to
+diff --git a/net/wireless/sme.c b/net/wireless/sme.c
+index 0a49b88070d0..b6533ecbf5b1 100644
+--- a/net/wireless/sme.c
++++ b/net/wireless/sme.c
+@@ -522,11 +522,6 @@ static int cfg80211_sme_connect(struct wireless_dev *wdev,
+ 		return -EOPNOTSUPP;
+ 
+ 	if (wdev->current_bss) {
+-		if (!prev_bssid)
+-			return -EALREADY;
+-		if (prev_bssid &&
+-		    !ether_addr_equal(prev_bssid, wdev->current_bss->pub.bssid))
+-			return -ENOTCONN;
+ 		cfg80211_unhold_bss(wdev->current_bss);
+ 		cfg80211_put_bss(wdev->wiphy, &wdev->current_bss->pub);
+ 		wdev->current_bss = NULL;
+@@ -1063,11 +1058,35 @@ int cfg80211_connect(struct cfg80211_registered_device *rdev,
+ 
+ 	ASSERT_WDEV_LOCK(wdev);
+ 
+-	if (WARN_ON(wdev->connect_keys)) {
+-		kzfree(wdev->connect_keys);
+-		wdev->connect_keys = NULL;
++	/*
++	 * If we have an ssid_len, we're trying to connect or are
++	 * already connected, so reject a new SSID unless it's the
++	 * same (which is the case for re-association.)
++	 */
++	if (wdev->ssid_len &&
++	    (wdev->ssid_len != connect->ssid_len ||
++	     memcmp(wdev->ssid, connect->ssid, wdev->ssid_len)))
++		return -EALREADY;
++
++	/*
++	 * If connected, reject (re-)association unless prev_bssid
++	 * matches the current BSSID.
++	 */
++	if (wdev->current_bss) {
++		if (!prev_bssid)
++			return -EALREADY;
++		if (!ether_addr_equal(prev_bssid, wdev->current_bss->pub.bssid))
++			return -ENOTCONN;
+ 	}
+ 
++	/*
++	 * Reject if we're in the process of connecting with WEP,
++	 * this case isn't very interesting and trying to handle
++	 * it would make the code much more complex.
++	 */
++	if (wdev->connect_keys)
++		return -EINPROGRESS;
++
+ 	cfg80211_oper_and_ht_capa(&connect->ht_capa_mask,
+ 				  rdev->wiphy.ht_capa_mod_mask);
+ 
+@@ -1118,7 +1137,12 @@ int cfg80211_connect(struct cfg80211_registered_device *rdev,
+ 
+ 	if (err) {
+ 		wdev->connect_keys = NULL;
+-		wdev->ssid_len = 0;
++		/*
++		 * This could be reassoc getting refused, don't clear
++		 * ssid_len in that case.
++		 */
++		if (!wdev->current_bss)
++			wdev->ssid_len = 0;
+ 		return err;
+ 	}
+ 
+@@ -1145,6 +1169,14 @@ int cfg80211_disconnect(struct cfg80211_registered_device *rdev,
+ 	else if (wdev->ssid_len)
+ 		err = rdev_disconnect(rdev, dev, reason);
+ 
++	/*
++	 * Clear ssid_len unless we actually were fully connected,
++	 * in which case cfg80211_disconnected() will take care of
++	 * this later.
++	 */
++	if (!wdev->current_bss)
++		wdev->ssid_len = 0;
++
+ 	return err;
+ }
+ 
+diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
+index 9391ced05259..c0a6cdd42ff2 100644
+--- a/net/xfrm/xfrm_user.c
++++ b/net/xfrm/xfrm_user.c
+@@ -1684,32 +1684,34 @@ static int dump_one_policy(struct xfrm_policy *xp, int dir, int count, void *ptr
+ 
+ static int xfrm_dump_policy_done(struct netlink_callback *cb)
+ {
+-	struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *) &cb->args[1];
++	struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *)cb->args;
+ 	struct net *net = sock_net(cb->skb->sk);
+ 
+ 	xfrm_policy_walk_done(walk, net);
+ 	return 0;
+ }
+ 
++static int xfrm_dump_policy_start(struct netlink_callback *cb)
++{
++	struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *)cb->args;
++
++	BUILD_BUG_ON(sizeof(*walk) > sizeof(cb->args));
++
++	xfrm_policy_walk_init(walk, XFRM_POLICY_TYPE_ANY);
++	return 0;
++}
++
+ static int xfrm_dump_policy(struct sk_buff *skb, struct netlink_callback *cb)
+ {
+ 	struct net *net = sock_net(skb->sk);
+-	struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *) &cb->args[1];
++	struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *)cb->args;
+ 	struct xfrm_dump_info info;
+ 
+-	BUILD_BUG_ON(sizeof(struct xfrm_policy_walk) >
+-		     sizeof(cb->args) - sizeof(cb->args[0]));
+-
+ 	info.in_skb = cb->skb;
+ 	info.out_skb = skb;
+ 	info.nlmsg_seq = cb->nlh->nlmsg_seq;
+ 	info.nlmsg_flags = NLM_F_MULTI;
+ 
+-	if (!cb->args[0]) {
+-		cb->args[0] = 1;
+-		xfrm_policy_walk_init(walk, XFRM_POLICY_TYPE_ANY);
+-	}
+-
+ 	(void) xfrm_policy_walk(net, walk, dump_one_policy, &info);
+ 
+ 	return skb->len;
+@@ -2467,6 +2469,7 @@ static const struct nla_policy xfrma_spd_policy[XFRMA_SPD_MAX+1] = {
+ 
+ static const struct xfrm_link {
+ 	int (*doit)(struct sk_buff *, struct nlmsghdr *, struct nlattr **);
++	int (*start)(struct netlink_callback *);
+ 	int (*dump)(struct sk_buff *, struct netlink_callback *);
+ 	int (*done)(struct netlink_callback *);
+ 	const struct nla_policy *nla_pol;
+@@ -2480,6 +2483,7 @@ static const struct xfrm_link {
+ 	[XFRM_MSG_NEWPOLICY   - XFRM_MSG_BASE] = { .doit = xfrm_add_policy    },
+ 	[XFRM_MSG_DELPOLICY   - XFRM_MSG_BASE] = { .doit = xfrm_get_policy    },
+ 	[XFRM_MSG_GETPOLICY   - XFRM_MSG_BASE] = { .doit = xfrm_get_policy,
++						   .start = xfrm_dump_policy_start,
+ 						   .dump = xfrm_dump_policy,
+ 						   .done = xfrm_dump_policy_done },
+ 	[XFRM_MSG_ALLOCSPI    - XFRM_MSG_BASE] = { .doit = xfrm_alloc_userspi },
+@@ -2532,6 +2536,7 @@ static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
+ 
+ 		{
+ 			struct netlink_dump_control c = {
++				.start = link->start,
+ 				.dump = link->dump,
+ 				.done = link->done,
+ 			};
+diff --git a/samples/trace_events/trace-events-sample.c b/samples/trace_events/trace-events-sample.c
+index 446beb7ac48d..5522692100ba 100644
+--- a/samples/trace_events/trace-events-sample.c
++++ b/samples/trace_events/trace-events-sample.c
+@@ -78,7 +78,7 @@ static int simple_thread_fn(void *arg)
+ }
+ 
+ static DEFINE_MUTEX(thread_mutex);
+-static bool simple_thread_cnt;
++static int simple_thread_cnt;
+ 
+ int foo_bar_reg(void)
+ {
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 217bb582aff1..fe4d06398fc3 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -327,6 +327,7 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
+ 	case 0x10ec0215:
+ 	case 0x10ec0225:
+ 	case 0x10ec0233:
++	case 0x10ec0236:
+ 	case 0x10ec0255:
+ 	case 0x10ec0256:
+ 	case 0x10ec0282:
+@@ -911,6 +912,7 @@ static struct alc_codec_rename_pci_table rename_pci_tbl[] = {
+ 	{ 0x10ec0275, 0x1028, 0, "ALC3260" },
+ 	{ 0x10ec0899, 0x1028, 0, "ALC3861" },
+ 	{ 0x10ec0298, 0x1028, 0, "ALC3266" },
++	{ 0x10ec0236, 0x1028, 0, "ALC3204" },
+ 	{ 0x10ec0256, 0x1028, 0, "ALC3246" },
+ 	{ 0x10ec0225, 0x1028, 0, "ALC3253" },
+ 	{ 0x10ec0295, 0x1028, 0, "ALC3254" },
+@@ -3930,6 +3932,7 @@ static void alc_headset_mode_unplugged(struct hda_codec *codec)
+ 		alc_process_coef_fw(codec, coef0255_1);
+ 		alc_process_coef_fw(codec, coef0255);
+ 		break;
++	case 0x10ec0236:
+ 	case 0x10ec0256:
+ 		alc_process_coef_fw(codec, coef0256);
+ 		alc_process_coef_fw(codec, coef0255);
+@@ -4028,6 +4031,7 @@ static void alc_headset_mode_mic_in(struct hda_codec *codec, hda_nid_t hp_pin,
+ 	};
+ 
+ 	switch (codec->core.vendor_id) {
++	case 0x10ec0236:
+ 	case 0x10ec0255:
+ 	case 0x10ec0256:
+ 		alc_write_coef_idx(codec, 0x45, 0xc489);
+@@ -4160,6 +4164,7 @@ static void alc_headset_mode_default(struct hda_codec *codec)
+ 		alc_process_coef_fw(codec, alc225_pre_hsmode);
+ 		alc_process_coef_fw(codec, coef0225);
+ 		break;
++	case 0x10ec0236:
+ 	case 0x10ec0255:
+ 	case 0x10ec0256:
+ 		alc_process_coef_fw(codec, coef0255);
+@@ -4256,6 +4261,7 @@ static void alc_headset_mode_ctia(struct hda_codec *codec)
+ 	case 0x10ec0255:
+ 		alc_process_coef_fw(codec, coef0255);
+ 		break;
++	case 0x10ec0236:
+ 	case 0x10ec0256:
+ 		alc_process_coef_fw(codec, coef0256);
+ 		break;
+@@ -4366,6 +4372,7 @@ static void alc_headset_mode_omtp(struct hda_codec *codec)
+ 	case 0x10ec0255:
+ 		alc_process_coef_fw(codec, coef0255);
+ 		break;
++	case 0x10ec0236:
+ 	case 0x10ec0256:
+ 		alc_process_coef_fw(codec, coef0256);
+ 		break;
+@@ -4451,6 +4458,7 @@ static void alc_determine_headset_type(struct hda_codec *codec)
+ 	};
+ 
+ 	switch (codec->core.vendor_id) {
++	case 0x10ec0236:
+ 	case 0x10ec0255:
+ 	case 0x10ec0256:
+ 		alc_process_coef_fw(codec, coef0255);
+@@ -4705,6 +4713,7 @@ static void alc255_set_default_jack_type(struct hda_codec *codec)
+ 	case 0x10ec0255:
+ 		alc_process_coef_fw(codec, alc255fw);
+ 		break;
++	case 0x10ec0236:
+ 	case 0x10ec0256:
+ 		alc_process_coef_fw(codec, alc256fw);
+ 		break;
+@@ -6402,6 +6411,14 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
+ 		ALC225_STANDARD_PINS,
+ 		{0x12, 0xb7a60130},
+ 		{0x1b, 0x90170110}),
++	SND_HDA_PIN_QUIRK(0x10ec0236, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
++		{0x12, 0x90a60140},
++		{0x14, 0x90170110},
++		{0x21, 0x02211020}),
++	SND_HDA_PIN_QUIRK(0x10ec0236, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
++		{0x12, 0x90a60140},
++		{0x14, 0x90170150},
++		{0x21, 0x02211020}),
+ 	SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL2_MIC_NO_PRESENCE,
+ 		{0x14, 0x90170110},
+ 		{0x21, 0x02211020}),
+@@ -6789,6 +6806,7 @@ static int patch_alc269(struct hda_codec *codec)
+ 	case 0x10ec0255:
+ 		spec->codec_variant = ALC269_TYPE_ALC255;
+ 		break;
++	case 0x10ec0236:
+ 	case 0x10ec0256:
+ 		spec->codec_variant = ALC269_TYPE_ALC256;
+ 		spec->shutup = alc256_shutup;
+@@ -7840,6 +7858,7 @@ static const struct hda_device_id snd_hda_id_realtek[] = {
+ 	HDA_CODEC_ENTRY(0x10ec0233, "ALC233", patch_alc269),
+ 	HDA_CODEC_ENTRY(0x10ec0234, "ALC234", patch_alc269),
+ 	HDA_CODEC_ENTRY(0x10ec0235, "ALC233", patch_alc269),
++	HDA_CODEC_ENTRY(0x10ec0236, "ALC236", patch_alc269),
+ 	HDA_CODEC_ENTRY(0x10ec0255, "ALC255", patch_alc269),
+ 	HDA_CODEC_ENTRY(0x10ec0256, "ALC256", patch_alc269),
+ 	HDA_CODEC_ENTRY(0x10ec0260, "ALC260", patch_alc260),


^ permalink raw reply related	[flat|nested] 20+ messages in thread
* [gentoo-commits] proj/linux-patches:4.13 commit in: /
@ 2017-10-27 10:26 Mike Pagano
  0 siblings, 0 replies; 20+ messages in thread
From: Mike Pagano @ 2017-10-27 10:26 UTC (permalink / raw
  To: gentoo-commits

commit:     f01e5c477daa550b36769d87c9212c40a8636d20
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri Oct 27 10:25:59 2017 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri Oct 27 10:25:59 2017 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=f01e5c47

Linux patch 4.13.10

 0000_README              |    4 +
 1009_linux-4.13.10.patch | 3818 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 3822 insertions(+)

diff --git a/0000_README b/0000_README
index 9628e89..053794d 100644
--- a/0000_README
+++ b/0000_README
@@ -79,6 +79,10 @@ Patch:  1008_linux-4.13.9.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.13.9
 
+Patch:  1009_linux-4.13.10.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.13.10
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1009_linux-4.13.10.patch b/1009_linux-4.13.10.patch
new file mode 100644
index 0000000..56c56ee
--- /dev/null
+++ b/1009_linux-4.13.10.patch
@@ -0,0 +1,3818 @@
+diff --git a/Makefile b/Makefile
+index aa0267950444..0e30a0d282e8 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 13
+-SUBLEVEL = 9
++SUBLEVEL = 10
+ EXTRAVERSION =
+ NAME = Fearless Coyote
+ 
+diff --git a/arch/arm/boot/dts/sun6i-a31.dtsi b/arch/arm/boot/dts/sun6i-a31.dtsi
+index aebc3f9dc7b6..fa1bbdca1710 100644
+--- a/arch/arm/boot/dts/sun6i-a31.dtsi
++++ b/arch/arm/boot/dts/sun6i-a31.dtsi
+@@ -311,8 +311,8 @@
+ 					#size-cells = <0>;
+ 					reg = <0>;
+ 
+-					tcon1_in_drc1: endpoint@0 {
+-						reg = <0>;
++					tcon1_in_drc1: endpoint@1 {
++						reg = <1>;
+ 						remote-endpoint = <&drc1_out_tcon1>;
+ 					};
+ 				};
+@@ -1012,8 +1012,8 @@
+ 					#size-cells = <0>;
+ 					reg = <1>;
+ 
+-					be1_out_drc1: endpoint@0 {
+-						reg = <0>;
++					be1_out_drc1: endpoint@1 {
++						reg = <1>;
+ 						remote-endpoint = <&drc1_in_be1>;
+ 					};
+ 				};
+@@ -1042,8 +1042,8 @@
+ 					#size-cells = <0>;
+ 					reg = <0>;
+ 
+-					drc1_in_be1: endpoint@0 {
+-						reg = <0>;
++					drc1_in_be1: endpoint@1 {
++						reg = <1>;
+ 						remote-endpoint = <&be1_out_drc1>;
+ 					};
+ 				};
+@@ -1053,8 +1053,8 @@
+ 					#size-cells = <0>;
+ 					reg = <1>;
+ 
+-					drc1_out_tcon1: endpoint@0 {
+-						reg = <0>;
++					drc1_out_tcon1: endpoint@1 {
++						reg = <1>;
+ 						remote-endpoint = <&tcon1_in_drc1>;
+ 					};
+ 				};
+diff --git a/arch/arm64/boot/dts/rockchip/rk3399-firefly.dts b/arch/arm64/boot/dts/rockchip/rk3399-firefly.dts
+index ba1d9810ad1e..c27242a7d5e7 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3399-firefly.dts
++++ b/arch/arm64/boot/dts/rockchip/rk3399-firefly.dts
+@@ -370,10 +370,10 @@
+ 				regulator-always-on;
+ 				regulator-boot-on;
+ 				regulator-min-microvolt = <1800000>;
+-				regulator-max-microvolt = <3300000>;
++				regulator-max-microvolt = <3000000>;
+ 				regulator-state-mem {
+ 					regulator-on-in-suspend;
+-					regulator-suspend-microvolt = <3300000>;
++					regulator-suspend-microvolt = <3000000>;
+ 				};
+ 			};
+ 
+diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
+index 23de307c3052..41e60a9c7db2 100644
+--- a/arch/parisc/kernel/syscall.S
++++ b/arch/parisc/kernel/syscall.S
+@@ -742,7 +742,7 @@ lws_compare_and_swap_2:
+ 10:	ldd	0(%r25), %r25
+ 11:	ldd	0(%r24), %r24
+ #else
+-	/* Load new value into r22/r23 - high/low */
++	/* Load old value into r22/r23 - high/low */
+ 10:	ldw	0(%r25), %r22
+ 11:	ldw	4(%r25), %r23
+ 	/* Load new value into fr4 for atomic store later */
+@@ -834,11 +834,11 @@ cas2_action:
+ 	copy	%r0, %r28
+ #else
+ 	/* Compare first word */
+-19:	ldw,ma	0(%r26), %r29
++19:	ldw	0(%r26), %r29
+ 	sub,=	%r29, %r22, %r0
+ 	b,n	cas2_end
+ 	/* Compare second word */
+-20:	ldw,ma	4(%r26), %r29
++20:	ldw	4(%r26), %r29
+ 	sub,=	%r29, %r23, %r0
+ 	b,n	cas2_end
+ 	/* Perform the store */
+diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c
+index 2d956aa0a38a..8c0105a49839 100644
+--- a/arch/parisc/kernel/time.c
++++ b/arch/parisc/kernel/time.c
+@@ -253,7 +253,10 @@ static int __init init_cr16_clocksource(void)
+ 		cpu0_loc = per_cpu(cpu_data, 0).cpu_loc;
+ 
+ 		for_each_online_cpu(cpu) {
+-			if (cpu0_loc == per_cpu(cpu_data, cpu).cpu_loc)
++			if (cpu == 0)
++				continue;
++			if ((cpu0_loc != 0) &&
++			    (cpu0_loc == per_cpu(cpu_data, cpu).cpu_loc))
+ 				continue;
+ 
+ 			clocksource_cr16.name = "cr16_unstable";
+diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
+index 1020a11a24e5..03895b6db719 100644
+--- a/arch/s390/kernel/smp.c
++++ b/arch/s390/kernel/smp.c
+@@ -293,7 +293,10 @@ static void pcpu_attach_task(struct pcpu *pcpu, struct task_struct *tsk)
+ 	lc->lpp = LPP_MAGIC;
+ 	lc->current_pid = tsk->pid;
+ 	lc->user_timer = tsk->thread.user_timer;
++	lc->guest_timer = tsk->thread.guest_timer;
+ 	lc->system_timer = tsk->thread.system_timer;
++	lc->hardirq_timer = tsk->thread.hardirq_timer;
++	lc->softirq_timer = tsk->thread.softirq_timer;
+ 	lc->steal_timer = 0;
+ }
+ 
+diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
+index 59edbe9d4ccb..636a5fcfdeb7 100644
+--- a/arch/x86/kernel/cpu/microcode/intel.c
++++ b/arch/x86/kernel/cpu/microcode/intel.c
+@@ -34,6 +34,7 @@
+ #include <linux/mm.h>
+ 
+ #include <asm/microcode_intel.h>
++#include <asm/intel-family.h>
+ #include <asm/processor.h>
+ #include <asm/tlbflush.h>
+ #include <asm/setup.h>
+@@ -917,6 +918,18 @@ static int get_ucode_fw(void *to, const void *from, size_t n)
+ 	return 0;
+ }
+ 
++static bool is_blacklisted(unsigned int cpu)
++{
++	struct cpuinfo_x86 *c = &cpu_data(cpu);
++
++	if (c->x86 == 6 && c->x86_model == INTEL_FAM6_BROADWELL_X) {
++		pr_err_once("late loading on model 79 is disabled.\n");
++		return true;
++	}
++
++	return false;
++}
++
+ static enum ucode_state request_microcode_fw(int cpu, struct device *device,
+ 					     bool refresh_fw)
+ {
+@@ -925,6 +938,9 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device,
+ 	const struct firmware *firmware;
+ 	enum ucode_state ret;
+ 
++	if (is_blacklisted(cpu))
++		return UCODE_NFOUND;
++
+ 	sprintf(name, "intel-ucode/%02x-%02x-%02x",
+ 		c->x86, c->x86_model, c->x86_mask);
+ 
+@@ -949,6 +965,9 @@ static int get_ucode_user(void *to, const void *from, size_t n)
+ static enum ucode_state
+ request_microcode_user(int cpu, const void __user *buf, size_t size)
+ {
++	if (is_blacklisted(cpu))
++		return UCODE_NFOUND;
++
+ 	return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
+ }
+ 
+diff --git a/crypto/asymmetric_keys/pkcs7_parser.c b/crypto/asymmetric_keys/pkcs7_parser.c
+index af4cd8649117..d140d8bb2c96 100644
+--- a/crypto/asymmetric_keys/pkcs7_parser.c
++++ b/crypto/asymmetric_keys/pkcs7_parser.c
+@@ -88,6 +88,9 @@ static int pkcs7_check_authattrs(struct pkcs7_message *msg)
+ 	bool want = false;
+ 
+ 	sinfo = msg->signed_infos;
++	if (!sinfo)
++		goto inconsistent;
++
+ 	if (sinfo->authattrs) {
+ 		want = true;
+ 		msg->have_authattrs = true;
+diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
+index 5bdf923294a5..da7043893249 100644
+--- a/drivers/block/nbd.c
++++ b/drivers/block/nbd.c
+@@ -243,7 +243,6 @@ static void nbd_size_set(struct nbd_device *nbd, loff_t blocksize,
+ 	struct nbd_config *config = nbd->config;
+ 	config->blksize = blocksize;
+ 	config->bytesize = blocksize * nr_blocks;
+-	nbd_size_update(nbd);
+ }
+ 
+ static void nbd_complete_rq(struct request *req)
+@@ -1090,6 +1089,7 @@ static int nbd_start_device(struct nbd_device *nbd)
+ 		args->index = i;
+ 		queue_work(recv_workqueue, &args->work);
+ 	}
++	nbd_size_update(nbd);
+ 	return error;
+ }
+ 
+diff --git a/drivers/bus/mvebu-mbus.c b/drivers/bus/mvebu-mbus.c
+index c7f396903184..70db4d5638a6 100644
+--- a/drivers/bus/mvebu-mbus.c
++++ b/drivers/bus/mvebu-mbus.c
+@@ -720,7 +720,7 @@ mvebu_mbus_default_setup_cpu_target(struct mvebu_mbus_state *mbus)
+ 			if (mbus->hw_io_coherency)
+ 				w->mbus_attr |= ATTR_HW_COHERENCY;
+ 			w->base = base & DDR_BASE_CS_LOW_MASK;
+-			w->size = (size | ~DDR_SIZE_MASK) + 1;
++			w->size = (u64)(size | ~DDR_SIZE_MASK) + 1;
+ 		}
+ 	}
+ 	mvebu_mbus_dram_info.num_cs = cs;
+diff --git a/drivers/clocksource/cs5535-clockevt.c b/drivers/clocksource/cs5535-clockevt.c
+index a1df588343f2..1de8cac99a0e 100644
+--- a/drivers/clocksource/cs5535-clockevt.c
++++ b/drivers/clocksource/cs5535-clockevt.c
+@@ -117,7 +117,8 @@ static irqreturn_t mfgpt_tick(int irq, void *dev_id)
+ 	/* Turn off the clock (and clear the event) */
+ 	disable_timer(cs5535_event_clock);
+ 
+-	if (clockevent_state_shutdown(&cs5535_clockevent))
++	if (clockevent_state_detached(&cs5535_clockevent) ||
++	    clockevent_state_shutdown(&cs5535_clockevent))
+ 		return IRQ_HANDLED;
+ 
+ 	/* Clear the counter */
+diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
+index d3b3252a8742..384bf4695e99 100644
+--- a/drivers/gpu/drm/i915/intel_ddi.c
++++ b/drivers/gpu/drm/i915/intel_ddi.c
+@@ -664,8 +664,8 @@ intel_ddi_get_buf_trans_fdi(struct drm_i915_private *dev_priv,
+ 			    int *n_entries)
+ {
+ 	if (IS_BROADWELL(dev_priv)) {
+-		*n_entries = ARRAY_SIZE(hsw_ddi_translations_fdi);
+-		return hsw_ddi_translations_fdi;
++		*n_entries = ARRAY_SIZE(bdw_ddi_translations_fdi);
++		return bdw_ddi_translations_fdi;
+ 	} else if (IS_HASWELL(dev_priv)) {
+ 		*n_entries = ARRAY_SIZE(hsw_ddi_translations_fdi);
+ 		return hsw_ddi_translations_fdi;
+diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
+index 3d35ea3e95db..92ff3e4ca013 100644
+--- a/drivers/gpu/drm/nouveau/nv50_display.c
++++ b/drivers/gpu/drm/nouveau/nv50_display.c
+@@ -3281,11 +3281,14 @@ nv50_mstm = {
+ void
+ nv50_mstm_service(struct nv50_mstm *mstm)
+ {
+-	struct drm_dp_aux *aux = mstm->mgr.aux;
++	struct drm_dp_aux *aux = mstm ? mstm->mgr.aux : NULL;
+ 	bool handled = true;
+ 	int ret;
+ 	u8 esi[8] = {};
+ 
++	if (!aux)
++		return;
++
+ 	while (handled) {
+ 		ret = drm_dp_dpcd_read(aux, DP_SINK_COUNT_ESI, esi, 8);
+ 		if (ret != 8) {
+diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c
+index 8e2e24a74774..44e116f7880d 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c
+@@ -39,5 +39,5 @@ int
+ g84_bsp_new(struct nvkm_device *device, int index, struct nvkm_engine **pengine)
+ {
+ 	return nvkm_xtensa_new_(&g84_bsp, device, index,
+-				true, 0x103000, pengine);
++				device->chipset != 0x92, 0x103000, pengine);
+ }
+diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
+index d06ad2c372bf..455da298227f 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
+@@ -241,6 +241,8 @@ nvkm_vm_unmap_pgt(struct nvkm_vm *vm, int big, u32 fpde, u32 lpde)
+ 			mmu->func->map_pgt(vpgd->obj, pde, vpgt->mem);
+ 		}
+ 
++		mmu->func->flush(vm);
++
+ 		nvkm_memory_del(&pgt);
+ 	}
+ }
+diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c
+index 22ffcb73c185..b51adffa4841 100644
+--- a/drivers/i2c/busses/i2c-ismt.c
++++ b/drivers/i2c/busses/i2c-ismt.c
+@@ -340,12 +340,15 @@ static int ismt_process_desc(const struct ismt_desc *desc,
+ 			data->word = dma_buffer[0] | (dma_buffer[1] << 8);
+ 			break;
+ 		case I2C_SMBUS_BLOCK_DATA:
+-		case I2C_SMBUS_I2C_BLOCK_DATA:
+ 			if (desc->rxbytes != dma_buffer[0] + 1)
+ 				return -EMSGSIZE;
+ 
+ 			memcpy(data->block, dma_buffer, desc->rxbytes);
+ 			break;
++		case I2C_SMBUS_I2C_BLOCK_DATA:
++			memcpy(&data->block[1], dma_buffer, desc->rxbytes);
++			data->block[0] = desc->rxbytes;
++			break;
+ 		}
+ 		return 0;
+ 	}
+diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c
+index 0ecdb47a23ab..01f767ee4546 100644
+--- a/drivers/i2c/busses/i2c-piix4.c
++++ b/drivers/i2c/busses/i2c-piix4.c
+@@ -94,6 +94,12 @@
+ #define SB800_PIIX4_PORT_IDX_ALT	0x2e
+ #define SB800_PIIX4_PORT_IDX_SEL	0x2f
+ #define SB800_PIIX4_PORT_IDX_MASK	0x06
++#define SB800_PIIX4_PORT_IDX_SHIFT	1
++
++/* On kerncz, SmBus0Sel is at bit 20:19 of PMx00 DecodeEn */
++#define SB800_PIIX4_PORT_IDX_KERNCZ		0x02
++#define SB800_PIIX4_PORT_IDX_MASK_KERNCZ	0x18
++#define SB800_PIIX4_PORT_IDX_SHIFT_KERNCZ	3
+ 
+ /* insmod parameters */
+ 
+@@ -149,6 +155,8 @@ static const struct dmi_system_id piix4_dmi_ibm[] = {
+  */
+ static DEFINE_MUTEX(piix4_mutex_sb800);
+ static u8 piix4_port_sel_sb800;
++static u8 piix4_port_mask_sb800;
++static u8 piix4_port_shift_sb800;
+ static const char *piix4_main_port_names_sb800[PIIX4_MAX_ADAPTERS] = {
+ 	" port 0", " port 2", " port 3", " port 4"
+ };
+@@ -347,7 +355,19 @@ static int piix4_setup_sb800(struct pci_dev *PIIX4_dev,
+ 
+ 	/* Find which register is used for port selection */
+ 	if (PIIX4_dev->vendor == PCI_VENDOR_ID_AMD) {
+-		piix4_port_sel_sb800 = SB800_PIIX4_PORT_IDX_ALT;
++		switch (PIIX4_dev->device) {
++		case PCI_DEVICE_ID_AMD_KERNCZ_SMBUS:
++			piix4_port_sel_sb800 = SB800_PIIX4_PORT_IDX_KERNCZ;
++			piix4_port_mask_sb800 = SB800_PIIX4_PORT_IDX_MASK_KERNCZ;
++			piix4_port_shift_sb800 = SB800_PIIX4_PORT_IDX_SHIFT_KERNCZ;
++			break;
++		case PCI_DEVICE_ID_AMD_HUDSON2_SMBUS:
++		default:
++			piix4_port_sel_sb800 = SB800_PIIX4_PORT_IDX_ALT;
++			piix4_port_mask_sb800 = SB800_PIIX4_PORT_IDX_MASK;
++			piix4_port_shift_sb800 = SB800_PIIX4_PORT_IDX_SHIFT;
++			break;
++		}
+ 	} else {
+ 		mutex_lock(&piix4_mutex_sb800);
+ 		outb_p(SB800_PIIX4_PORT_IDX_SEL, SB800_PIIX4_SMB_IDX);
+@@ -355,6 +375,8 @@ static int piix4_setup_sb800(struct pci_dev *PIIX4_dev,
+ 		piix4_port_sel_sb800 = (port_sel & 0x01) ?
+ 				       SB800_PIIX4_PORT_IDX_ALT :
+ 				       SB800_PIIX4_PORT_IDX;
++		piix4_port_mask_sb800 = SB800_PIIX4_PORT_IDX_MASK;
++		piix4_port_shift_sb800 = SB800_PIIX4_PORT_IDX_SHIFT;
+ 		mutex_unlock(&piix4_mutex_sb800);
+ 	}
+ 
+@@ -616,8 +638,8 @@ static s32 piix4_access_sb800(struct i2c_adapter *adap, u16 addr,
+ 	smba_en_lo = inb_p(SB800_PIIX4_SMB_IDX + 1);
+ 
+ 	port = adapdata->port;
+-	if ((smba_en_lo & SB800_PIIX4_PORT_IDX_MASK) != port)
+-		outb_p((smba_en_lo & ~SB800_PIIX4_PORT_IDX_MASK) | port,
++	if ((smba_en_lo & piix4_port_mask_sb800) != port)
++		outb_p((smba_en_lo & ~piix4_port_mask_sb800) | port,
+ 		       SB800_PIIX4_SMB_IDX + 1);
+ 
+ 	retval = piix4_access(adap, addr, flags, read_write,
+@@ -706,7 +728,7 @@ static int piix4_add_adapter(struct pci_dev *dev, unsigned short smba,
+ 
+ 	adapdata->smba = smba;
+ 	adapdata->sb800_main = sb800_main;
+-	adapdata->port = port << 1;
++	adapdata->port = port << piix4_port_shift_sb800;
+ 
+ 	/* set up the sysfs linkage to our parent device */
+ 	adap->dev.parent = &dev->dev;
+diff --git a/drivers/iio/dummy/iio_simple_dummy_events.c b/drivers/iio/dummy/iio_simple_dummy_events.c
+index ed63ffd849f8..7ec2a0bb0807 100644
+--- a/drivers/iio/dummy/iio_simple_dummy_events.c
++++ b/drivers/iio/dummy/iio_simple_dummy_events.c
+@@ -72,6 +72,7 @@ int iio_simple_dummy_write_event_config(struct iio_dev *indio_dev,
+ 				st->event_en = state;
+ 			else
+ 				return -EINVAL;
++			break;
+ 		default:
+ 			return -EINVAL;
+ 		}
+diff --git a/drivers/input/touchscreen/stmfts.c b/drivers/input/touchscreen/stmfts.c
+index 157fdb4bb2e8..8c6c6178ec12 100644
+--- a/drivers/input/touchscreen/stmfts.c
++++ b/drivers/input/touchscreen/stmfts.c
+@@ -663,12 +663,10 @@ static int stmfts_probe(struct i2c_client *client,
+ 	sdata->input->open = stmfts_input_open;
+ 	sdata->input->close = stmfts_input_close;
+ 
++	input_set_capability(sdata->input, EV_ABS, ABS_MT_POSITION_X);
++	input_set_capability(sdata->input, EV_ABS, ABS_MT_POSITION_Y);
+ 	touchscreen_parse_properties(sdata->input, true, &sdata->prop);
+ 
+-	input_set_abs_params(sdata->input, ABS_MT_POSITION_X, 0,
+-						sdata->prop.max_x, 0, 0);
+-	input_set_abs_params(sdata->input, ABS_MT_POSITION_Y, 0,
+-						sdata->prop.max_y, 0, 0);
+ 	input_set_abs_params(sdata->input, ABS_MT_TOUCH_MAJOR, 0, 255, 0, 0);
+ 	input_set_abs_params(sdata->input, ABS_MT_TOUCH_MINOR, 0, 255, 0, 0);
+ 	input_set_abs_params(sdata->input, ABS_MT_ORIENTATION, 0, 255, 0, 0);
+diff --git a/drivers/media/cec/cec-adap.c b/drivers/media/cec/cec-adap.c
+index d596b601ff42..8cfc5e84a129 100644
+--- a/drivers/media/cec/cec-adap.c
++++ b/drivers/media/cec/cec-adap.c
+@@ -1726,12 +1726,19 @@ static int cec_receive_notify(struct cec_adapter *adap, struct cec_msg *msg,
+ 	 */
+ 	switch (msg->msg[1]) {
+ 	case CEC_MSG_GET_CEC_VERSION:
+-	case CEC_MSG_GIVE_DEVICE_VENDOR_ID:
+ 	case CEC_MSG_ABORT:
+ 	case CEC_MSG_GIVE_DEVICE_POWER_STATUS:
+-	case CEC_MSG_GIVE_PHYSICAL_ADDR:
+ 	case CEC_MSG_GIVE_OSD_NAME:
++		/*
++		 * These messages reply with a directed message, so ignore if
++		 * the initiator is Unregistered.
++		 */
++		if (!adap->passthrough && from_unregistered)
++			return 0;
++		/* Fall through */
++	case CEC_MSG_GIVE_DEVICE_VENDOR_ID:
+ 	case CEC_MSG_GIVE_FEATURES:
++	case CEC_MSG_GIVE_PHYSICAL_ADDR:
+ 		/*
+ 		 * Skip processing these messages if the passthrough mode
+ 		 * is on.
+@@ -1739,7 +1746,7 @@ static int cec_receive_notify(struct cec_adapter *adap, struct cec_msg *msg,
+ 		if (adap->passthrough)
+ 			goto skip_processing;
+ 		/* Ignore if addressing is wrong */
+-		if (is_broadcast || from_unregistered)
++		if (is_broadcast)
+ 			return 0;
+ 		break;
+ 
+diff --git a/drivers/media/dvb-frontends/dib3000mc.c b/drivers/media/dvb-frontends/dib3000mc.c
+index 224283fe100a..4d086a7248e9 100644
+--- a/drivers/media/dvb-frontends/dib3000mc.c
++++ b/drivers/media/dvb-frontends/dib3000mc.c
+@@ -55,29 +55,57 @@ struct dib3000mc_state {
+ 
+ static u16 dib3000mc_read_word(struct dib3000mc_state *state, u16 reg)
+ {
+-	u8 wb[2] = { (reg >> 8) | 0x80, reg & 0xff };
+-	u8 rb[2];
+ 	struct i2c_msg msg[2] = {
+-		{ .addr = state->i2c_addr >> 1, .flags = 0,        .buf = wb, .len = 2 },
+-		{ .addr = state->i2c_addr >> 1, .flags = I2C_M_RD, .buf = rb, .len = 2 },
++		{ .addr = state->i2c_addr >> 1, .flags = 0,        .len = 2 },
++		{ .addr = state->i2c_addr >> 1, .flags = I2C_M_RD, .len = 2 },
+ 	};
++	u16 word;
++	u8 *b;
++
++	b = kmalloc(4, GFP_KERNEL);
++	if (!b)
++		return 0;
++
++	b[0] = (reg >> 8) | 0x80;
++	b[1] = reg;
++	b[2] = 0;
++	b[3] = 0;
++
++	msg[0].buf = b;
++	msg[1].buf = b + 2;
+ 
+ 	if (i2c_transfer(state->i2c_adap, msg, 2) != 2)
+ 		dprintk("i2c read error on %d\n",reg);
+ 
+-	return (rb[0] << 8) | rb[1];
++	word = (b[2] << 8) | b[3];
++	kfree(b);
++
++	return word;
+ }
+ 
+ static int dib3000mc_write_word(struct dib3000mc_state *state, u16 reg, u16 val)
+ {
+-	u8 b[4] = {
+-		(reg >> 8) & 0xff, reg & 0xff,
+-		(val >> 8) & 0xff, val & 0xff,
+-	};
+ 	struct i2c_msg msg = {
+-		.addr = state->i2c_addr >> 1, .flags = 0, .buf = b, .len = 4
++		.addr = state->i2c_addr >> 1, .flags = 0, .len = 4
+ 	};
+-	return i2c_transfer(state->i2c_adap, &msg, 1) != 1 ? -EREMOTEIO : 0;
++	int rc;
++	u8 *b;
++
++	b = kmalloc(4, GFP_KERNEL);
++	if (!b)
++		return -ENOMEM;
++
++	b[0] = reg >> 8;
++	b[1] = reg;
++	b[2] = val >> 8;
++	b[3] = val;
++
++	msg.buf = b;
++
++	rc = i2c_transfer(state->i2c_adap, &msg, 1) != 1 ? -EREMOTEIO : 0;
++	kfree(b);
++
++	return rc;
+ }
+ 
+ static int dib3000mc_identify(struct dib3000mc_state *state)
+diff --git a/drivers/media/dvb-frontends/dvb-pll.c b/drivers/media/dvb-frontends/dvb-pll.c
+index 7bec3e028bee..5553b89b804e 100644
+--- a/drivers/media/dvb-frontends/dvb-pll.c
++++ b/drivers/media/dvb-frontends/dvb-pll.c
+@@ -753,13 +753,19 @@ struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe, int pll_addr,
+ 				    struct i2c_adapter *i2c,
+ 				    unsigned int pll_desc_id)
+ {
+-	u8 b1 [] = { 0 };
+-	struct i2c_msg msg = { .addr = pll_addr, .flags = I2C_M_RD,
+-			       .buf = b1, .len = 1 };
++	u8 *b1;
++	struct i2c_msg msg = { .addr = pll_addr, .flags = I2C_M_RD, .len = 1 };
+ 	struct dvb_pll_priv *priv = NULL;
+ 	int ret;
+ 	const struct dvb_pll_desc *desc;
+ 
++	b1 = kmalloc(1, GFP_KERNEL);
++	if (!b1)
++		return NULL;
++
++	b1[0] = 0;
++	msg.buf = b1;
++
+ 	if ((id[dvb_pll_devcount] > DVB_PLL_UNDEFINED) &&
+ 	    (id[dvb_pll_devcount] < ARRAY_SIZE(pll_list)))
+ 		pll_desc_id = id[dvb_pll_devcount];
+@@ -773,15 +779,19 @@ struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe, int pll_addr,
+ 			fe->ops.i2c_gate_ctrl(fe, 1);
+ 
+ 		ret = i2c_transfer (i2c, &msg, 1);
+-		if (ret != 1)
++		if (ret != 1) {
++			kfree(b1);
+ 			return NULL;
++		}
+ 		if (fe->ops.i2c_gate_ctrl)
+ 			     fe->ops.i2c_gate_ctrl(fe, 0);
+ 	}
+ 
+ 	priv = kzalloc(sizeof(struct dvb_pll_priv), GFP_KERNEL);
+-	if (priv == NULL)
++	if (!priv) {
++		kfree(b1);
+ 		return NULL;
++	}
+ 
+ 	priv->pll_i2c_address = pll_addr;
+ 	priv->i2c = i2c;
+@@ -811,6 +821,8 @@ struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe, int pll_addr,
+ 				"insmod option" : "autodetected");
+ 	}
+ 
++	kfree(b1);
++
+ 	return fe;
+ }
+ EXPORT_SYMBOL(dvb_pll_attach);
+diff --git a/drivers/media/platform/s5p-cec/exynos_hdmi_cecctrl.c b/drivers/media/platform/s5p-cec/exynos_hdmi_cecctrl.c
+index 1edf667d562a..146ae6f25cdb 100644
+--- a/drivers/media/platform/s5p-cec/exynos_hdmi_cecctrl.c
++++ b/drivers/media/platform/s5p-cec/exynos_hdmi_cecctrl.c
+@@ -172,7 +172,8 @@ u32 s5p_cec_get_status(struct s5p_cec_dev *cec)
+ {
+ 	u32 status = 0;
+ 
+-	status = readb(cec->reg + S5P_CEC_STATUS_0);
++	status = readb(cec->reg + S5P_CEC_STATUS_0) & 0xf;
++	status |= (readb(cec->reg + S5P_CEC_TX_STAT1) & 0xf) << 4;
+ 	status |= readb(cec->reg + S5P_CEC_STATUS_1) << 8;
+ 	status |= readb(cec->reg + S5P_CEC_STATUS_2) << 16;
+ 	status |= readb(cec->reg + S5P_CEC_STATUS_3) << 24;
+diff --git a/drivers/media/platform/s5p-cec/s5p_cec.c b/drivers/media/platform/s5p-cec/s5p_cec.c
+index 8e06071a7977..7d8d67e5448f 100644
+--- a/drivers/media/platform/s5p-cec/s5p_cec.c
++++ b/drivers/media/platform/s5p-cec/s5p_cec.c
+@@ -92,7 +92,10 @@ static irqreturn_t s5p_cec_irq_handler(int irq, void *priv)
+ 	dev_dbg(cec->dev, "irq received\n");
+ 
+ 	if (status & CEC_STATUS_TX_DONE) {
+-		if (status & CEC_STATUS_TX_ERROR) {
++		if (status & CEC_STATUS_TX_NACK) {
++			dev_dbg(cec->dev, "CEC_STATUS_TX_NACK set\n");
++			cec->tx = STATE_NACK;
++		} else if (status & CEC_STATUS_TX_ERROR) {
+ 			dev_dbg(cec->dev, "CEC_STATUS_TX_ERROR set\n");
+ 			cec->tx = STATE_ERROR;
+ 		} else {
+@@ -135,6 +138,12 @@ static irqreturn_t s5p_cec_irq_handler_thread(int irq, void *priv)
+ 		cec_transmit_done(cec->adap, CEC_TX_STATUS_OK, 0, 0, 0, 0);
+ 		cec->tx = STATE_IDLE;
+ 		break;
++	case STATE_NACK:
++		cec_transmit_done(cec->adap,
++			CEC_TX_STATUS_MAX_RETRIES | CEC_TX_STATUS_NACK,
++			0, 1, 0, 0);
++		cec->tx = STATE_IDLE;
++		break;
+ 	case STATE_ERROR:
+ 		cec_transmit_done(cec->adap,
+ 			CEC_TX_STATUS_MAX_RETRIES | CEC_TX_STATUS_ERROR,
+diff --git a/drivers/media/platform/s5p-cec/s5p_cec.h b/drivers/media/platform/s5p-cec/s5p_cec.h
+index 8bcd8dc1aeb9..86ded522ef27 100644
+--- a/drivers/media/platform/s5p-cec/s5p_cec.h
++++ b/drivers/media/platform/s5p-cec/s5p_cec.h
+@@ -35,6 +35,7 @@
+ #define CEC_STATUS_TX_TRANSFERRING	(1 << 1)
+ #define CEC_STATUS_TX_DONE		(1 << 2)
+ #define CEC_STATUS_TX_ERROR		(1 << 3)
++#define CEC_STATUS_TX_NACK		(1 << 4)
+ #define CEC_STATUS_TX_BYTES		(0xFF << 8)
+ #define CEC_STATUS_RX_RUNNING		(1 << 16)
+ #define CEC_STATUS_RX_RECEIVING		(1 << 17)
+@@ -55,6 +56,7 @@ enum cec_state {
+ 	STATE_IDLE,
+ 	STATE_BUSY,
+ 	STATE_DONE,
++	STATE_NACK,
+ 	STATE_ERROR
+ };
+ 
+diff --git a/drivers/media/tuners/mt2060.c b/drivers/media/tuners/mt2060.c
+index 2e487f9a2cc3..4983eeb39f36 100644
+--- a/drivers/media/tuners/mt2060.c
++++ b/drivers/media/tuners/mt2060.c
+@@ -38,41 +38,74 @@ MODULE_PARM_DESC(debug, "Turn on/off debugging (default:off).");
+ static int mt2060_readreg(struct mt2060_priv *priv, u8 reg, u8 *val)
+ {
+ 	struct i2c_msg msg[2] = {
+-		{ .addr = priv->cfg->i2c_address, .flags = 0,        .buf = &reg, .len = 1 },
+-		{ .addr = priv->cfg->i2c_address, .flags = I2C_M_RD, .buf = val,  .len = 1 },
++		{ .addr = priv->cfg->i2c_address, .flags = 0, .len = 1 },
++		{ .addr = priv->cfg->i2c_address, .flags = I2C_M_RD, .len = 1 },
+ 	};
++	int rc = 0;
++	u8 *b;
++
++	b = kmalloc(2, GFP_KERNEL);
++	if (!b)
++		return -ENOMEM;
++
++	b[0] = reg;
++	b[1] = 0;
++
++	msg[0].buf = b;
++	msg[1].buf = b + 1;
+ 
+ 	if (i2c_transfer(priv->i2c, msg, 2) != 2) {
+ 		printk(KERN_WARNING "mt2060 I2C read failed\n");
+-		return -EREMOTEIO;
++		rc = -EREMOTEIO;
+ 	}
+-	return 0;
++	*val = b[1];
++	kfree(b);
++
++	return rc;
+ }
+ 
+ // Writes a single register
+ static int mt2060_writereg(struct mt2060_priv *priv, u8 reg, u8 val)
+ {
+-	u8 buf[2] = { reg, val };
+ 	struct i2c_msg msg = {
+-		.addr = priv->cfg->i2c_address, .flags = 0, .buf = buf, .len = 2
++		.addr = priv->cfg->i2c_address, .flags = 0, .len = 2
+ 	};
++	u8 *buf;
++	int rc = 0;
++
++	buf = kmalloc(2, GFP_KERNEL);
++	if (!buf)
++		return -ENOMEM;
++
++	buf[0] = reg;
++	buf[1] = val;
++
++	msg.buf = buf;
+ 
+ 	if (i2c_transfer(priv->i2c, &msg, 1) != 1) {
+ 		printk(KERN_WARNING "mt2060 I2C write failed\n");
+-		return -EREMOTEIO;
++		rc = -EREMOTEIO;
+ 	}
+-	return 0;
++	kfree(buf);
++	return rc;
+ }
+ 
+ // Writes a set of consecutive registers
+ static int mt2060_writeregs(struct mt2060_priv *priv,u8 *buf, u8 len)
+ {
+ 	int rem, val_len;
+-	u8 xfer_buf[16];
++	u8 *xfer_buf;
++	int rc = 0;
+ 	struct i2c_msg msg = {
+-		.addr = priv->cfg->i2c_address, .flags = 0, .buf = xfer_buf
++		.addr = priv->cfg->i2c_address, .flags = 0
+ 	};
+ 
++	xfer_buf = kmalloc(16, GFP_KERNEL);
++	if (!xfer_buf)
++		return -ENOMEM;
++
++	msg.buf = xfer_buf;
++
+ 	for (rem = len - 1; rem > 0; rem -= priv->i2c_max_regs) {
+ 		val_len = min_t(int, rem, priv->i2c_max_regs);
+ 		msg.len = 1 + val_len;
+@@ -81,11 +114,13 @@ static int mt2060_writeregs(struct mt2060_priv *priv,u8 *buf, u8 len)
+ 
+ 		if (i2c_transfer(priv->i2c, &msg, 1) != 1) {
+ 			printk(KERN_WARNING "mt2060 I2C write failed (len=%i)\n", val_len);
+-			return -EREMOTEIO;
++			rc = -EREMOTEIO;
++			break;
+ 		}
+ 	}
+ 
+-	return 0;
++	kfree(xfer_buf);
++	return rc;
+ }
+ 
+ // Initialisation sequences
+diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
+index ba8a0f58fe08..b2d0ee0be8c3 100644
+--- a/drivers/mmc/host/sdhci-pci-core.c
++++ b/drivers/mmc/host/sdhci-pci-core.c
+@@ -449,6 +449,8 @@ static void intel_dsm_init(struct intel_host *intel_host, struct device *dev,
+ 	int err;
+ 	u32 val;
+ 
++	intel_host->d3_retune = true;
++
+ 	err = __intel_dsm(intel_host, dev, INTEL_DSM_FNS, &intel_host->dsm_fns);
+ 	if (err) {
+ 		pr_debug("%s: DSM not supported, error %d\n",
+diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
+index 13f0f219d8aa..a13a4896a8bd 100644
+--- a/drivers/net/can/flexcan.c
++++ b/drivers/net/can/flexcan.c
+@@ -182,22 +182,23 @@
+ /* FLEXCAN hardware feature flags
+  *
+  * Below is some version info we got:
+- *    SOC   Version   IP-Version  Glitch- [TR]WRN_INT Memory err RTR re-
+- *                                Filter? connected?  detection  ception in MB
+- *   MX25  FlexCAN2  03.00.00.00     no        no         no        no
+- *   MX28  FlexCAN2  03.00.04.00    yes       yes         no        no
+- *   MX35  FlexCAN2  03.00.00.00     no        no         no        no
+- *   MX53  FlexCAN2  03.00.00.00    yes        no         no        no
+- *   MX6s  FlexCAN3  10.00.12.00    yes       yes         no       yes
+- *   VF610 FlexCAN3  ?               no       yes        yes       yes?
++ *    SOC   Version   IP-Version  Glitch- [TR]WRN_INT IRQ Err Memory err RTR re-
++ *                                Filter? connected?  Passive detection  ception in MB
++ *   MX25  FlexCAN2  03.00.00.00     no        no         ?       no        no
++ *   MX28  FlexCAN2  03.00.04.00    yes       yes        no       no        no
++ *   MX35  FlexCAN2  03.00.00.00     no        no         ?       no        no
++ *   MX53  FlexCAN2  03.00.00.00    yes        no        no       no        no
++ *   MX6s  FlexCAN3  10.00.12.00    yes       yes        no       no       yes
++ *   VF610 FlexCAN3  ?               no       yes         ?      yes       yes?
+  *
+  * Some SOCs do not have the RX_WARN & TX_WARN interrupt line connected.
+  */
+-#define FLEXCAN_QUIRK_BROKEN_ERR_STATE	BIT(1) /* [TR]WRN_INT not connected */
++#define FLEXCAN_QUIRK_BROKEN_WERR_STATE	BIT(1) /* [TR]WRN_INT not connected */
+ #define FLEXCAN_QUIRK_DISABLE_RXFG	BIT(2) /* Disable RX FIFO Global mask */
+ #define FLEXCAN_QUIRK_ENABLE_EACEN_RRS	BIT(3) /* Enable EACEN and RRS bit in ctrl2 */
+ #define FLEXCAN_QUIRK_DISABLE_MECR	BIT(4) /* Disable Memory error detection */
+ #define FLEXCAN_QUIRK_USE_OFF_TIMESTAMP	BIT(5) /* Use timestamp based offloading */
++#define FLEXCAN_QUIRK_BROKEN_PERR_STATE	BIT(6) /* No interrupt for error passive */
+ 
+ /* Structure of the message buffer */
+ struct flexcan_mb {
+@@ -281,14 +282,17 @@ struct flexcan_priv {
+ };
+ 
+ static const struct flexcan_devtype_data fsl_p1010_devtype_data = {
+-	.quirks = FLEXCAN_QUIRK_BROKEN_ERR_STATE,
++	.quirks = FLEXCAN_QUIRK_BROKEN_WERR_STATE |
++		FLEXCAN_QUIRK_BROKEN_PERR_STATE,
+ };
+ 
+-static const struct flexcan_devtype_data fsl_imx28_devtype_data;
++static const struct flexcan_devtype_data fsl_imx28_devtype_data = {
++	.quirks = FLEXCAN_QUIRK_BROKEN_PERR_STATE,
++};
+ 
+ static const struct flexcan_devtype_data fsl_imx6q_devtype_data = {
+ 	.quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
+-		FLEXCAN_QUIRK_USE_OFF_TIMESTAMP,
++		FLEXCAN_QUIRK_USE_OFF_TIMESTAMP | FLEXCAN_QUIRK_BROKEN_PERR_STATE,
+ };
+ 
+ static const struct flexcan_devtype_data fsl_vf610_devtype_data = {
+@@ -335,6 +339,22 @@ static inline void flexcan_write(u32 val, void __iomem *addr)
+ }
+ #endif
+ 
++static inline void flexcan_error_irq_enable(const struct flexcan_priv *priv)
++{
++	struct flexcan_regs __iomem *regs = priv->regs;
++	u32 reg_ctrl = (priv->reg_ctrl_default | FLEXCAN_CTRL_ERR_MSK);
++
++	flexcan_write(reg_ctrl, &regs->ctrl);
++}
++
++static inline void flexcan_error_irq_disable(const struct flexcan_priv *priv)
++{
++	struct flexcan_regs __iomem *regs = priv->regs;
++	u32 reg_ctrl = (priv->reg_ctrl_default & ~FLEXCAN_CTRL_ERR_MSK);
++
++	flexcan_write(reg_ctrl, &regs->ctrl);
++}
++
+ static inline int flexcan_transceiver_enable(const struct flexcan_priv *priv)
+ {
+ 	if (!priv->reg_xceiver)
+@@ -713,6 +733,7 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id)
+ 	struct flexcan_regs __iomem *regs = priv->regs;
+ 	irqreturn_t handled = IRQ_NONE;
+ 	u32 reg_iflag1, reg_esr;
++	enum can_state last_state = priv->can.state;
+ 
+ 	reg_iflag1 = flexcan_read(&regs->iflag1);
+ 
+@@ -765,8 +786,10 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id)
+ 		flexcan_write(reg_esr & FLEXCAN_ESR_ALL_INT, &regs->esr);
+ 	}
+ 
+-	/* state change interrupt */
+-	if (reg_esr & FLEXCAN_ESR_ERR_STATE)
++	/* state change interrupt or broken error state quirk fix is enabled */
++	if ((reg_esr & FLEXCAN_ESR_ERR_STATE) ||
++	    (priv->devtype_data->quirks & (FLEXCAN_QUIRK_BROKEN_WERR_STATE |
++	                                   FLEXCAN_QUIRK_BROKEN_PERR_STATE)))
+ 		flexcan_irq_state(dev, reg_esr);
+ 
+ 	/* bus error IRQ - handle if bus error reporting is activated */
+@@ -774,6 +797,44 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id)
+ 	    (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING))
+ 		flexcan_irq_bus_err(dev, reg_esr);
+ 
++	/* availability of error interrupt among state transitions in case
++	 * bus error reporting is de-activated and
++	 * FLEXCAN_QUIRK_BROKEN_PERR_STATE is enabled:
++	 *  +--------------------------------------------------------------+
++	 *  | +----------------------------------------------+ [stopped /  |
++	 *  | |                                              |  sleeping] -+
++	 *  +-+-> active <-> warning <-> passive -> bus off -+
++	 *        ___________^^^^^^^^^^^^_______________________________
++	 *        disabled(1)  enabled             disabled
++	 *
++	 * (1): enabled if FLEXCAN_QUIRK_BROKEN_WERR_STATE is enabled
++	 */
++	if ((last_state != priv->can.state) &&
++	    (priv->devtype_data->quirks & FLEXCAN_QUIRK_BROKEN_PERR_STATE) &&
++	    !(priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)) {
++		switch (priv->can.state) {
++		case CAN_STATE_ERROR_ACTIVE:
++			if (priv->devtype_data->quirks &
++			    FLEXCAN_QUIRK_BROKEN_WERR_STATE)
++				flexcan_error_irq_enable(priv);
++			else
++				flexcan_error_irq_disable(priv);
++			break;
++
++		case CAN_STATE_ERROR_WARNING:
++			flexcan_error_irq_enable(priv);
++			break;
++
++		case CAN_STATE_ERROR_PASSIVE:
++		case CAN_STATE_BUS_OFF:
++			flexcan_error_irq_disable(priv);
++			break;
++
++		default:
++			break;
++		}
++	}
++
+ 	return handled;
+ }
+ 
+@@ -887,7 +948,7 @@ static int flexcan_chip_start(struct net_device *dev)
+ 	 * on most Flexcan cores, too. Otherwise we don't get
+ 	 * any error warning or passive interrupts.
+ 	 */
+-	if (priv->devtype_data->quirks & FLEXCAN_QUIRK_BROKEN_ERR_STATE ||
++	if (priv->devtype_data->quirks & FLEXCAN_QUIRK_BROKEN_WERR_STATE ||
+ 	    priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
+ 		reg_ctrl |= FLEXCAN_CTRL_ERR_MSK;
+ 	else
+diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c
+index be928ce62d32..9fdb0f0bfa06 100644
+--- a/drivers/net/can/usb/esd_usb2.c
++++ b/drivers/net/can/usb/esd_usb2.c
+@@ -333,7 +333,7 @@ static void esd_usb2_rx_can_msg(struct esd_usb2_net_priv *priv,
+ 		}
+ 
+ 		cf->can_id = id & ESD_IDMASK;
+-		cf->can_dlc = get_can_dlc(msg->msg.rx.dlc);
++		cf->can_dlc = get_can_dlc(msg->msg.rx.dlc & ~ESD_RTR);
+ 
+ 		if (id & ESD_EXTID)
+ 			cf->can_id |= CAN_EFF_FLAG;
+diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
+index afcc1312dbaf..68ac3e88a8ce 100644
+--- a/drivers/net/can/usb/gs_usb.c
++++ b/drivers/net/can/usb/gs_usb.c
+@@ -375,6 +375,8 @@ static void gs_usb_receive_bulk_callback(struct urb *urb)
+ 
+ 		gs_free_tx_context(txc);
+ 
++		atomic_dec(&dev->active_tx_urbs);
++
+ 		netif_wake_queue(netdev);
+ 	}
+ 
+@@ -463,14 +465,6 @@ static void gs_usb_xmit_callback(struct urb *urb)
+ 			  urb->transfer_buffer_length,
+ 			  urb->transfer_buffer,
+ 			  urb->transfer_dma);
+-
+-	atomic_dec(&dev->active_tx_urbs);
+-
+-	if (!netif_device_present(netdev))
+-		return;
+-
+-	if (netif_queue_stopped(netdev))
+-		netif_wake_queue(netdev);
+ }
+ 
+ static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb,
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
+index 4eb1e1ce9ace..ef72baf6dd96 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
+@@ -429,7 +429,8 @@ void brcmf_fweh_process_event(struct brcmf_pub *drvr,
+ 	if (code != BRCMF_E_IF && !fweh->evt_handler[code])
+ 		return;
+ 
+-	if (datalen > BRCMF_DCMD_MAXLEN)
++	if (datalen > BRCMF_DCMD_MAXLEN ||
++	    datalen + sizeof(*event_packet) > packet_len)
+ 		return;
+ 
+ 	if (in_interrupt())
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c
+index b3aab2fe96eb..ef685465f80a 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c
+@@ -14764,8 +14764,8 @@ static void wlc_phy_ipa_restore_tx_digi_filts_nphy(struct brcms_phy *pi)
+ }
+ 
+ static void
+-wlc_phy_set_rfseq_nphy(struct brcms_phy *pi, u8 cmd, u8 *events, u8 *dlys,
+-		       u8 len)
++wlc_phy_set_rfseq_nphy(struct brcms_phy *pi, u8 cmd, const u8 *events,
++		       const u8 *dlys, u8 len)
+ {
+ 	u32 t1_offset, t2_offset;
+ 	u8 ctr;
+@@ -15240,16 +15240,16 @@ static void wlc_phy_workarounds_nphy_gainctrl_2057_rev5(struct brcms_phy *pi)
+ static void wlc_phy_workarounds_nphy_gainctrl_2057_rev6(struct brcms_phy *pi)
+ {
+ 	u16 currband;
+-	s8 lna1G_gain_db_rev7[] = { 9, 14, 19, 24 };
+-	s8 *lna1_gain_db = NULL;
+-	s8 *lna1_gain_db_2 = NULL;
+-	s8 *lna2_gain_db = NULL;
+-	s8 tiaA_gain_db_rev7[] = { -9, -6, -3, 0, 3, 3, 3, 3, 3, 3 };
+-	s8 *tia_gain_db;
+-	s8 tiaA_gainbits_rev7[] = { 0, 1, 2, 3, 4, 4, 4, 4, 4, 4 };
+-	s8 *tia_gainbits;
+-	u16 rfseqA_init_gain_rev7[] = { 0x624f, 0x624f };
+-	u16 *rfseq_init_gain;
++	static const s8 lna1G_gain_db_rev7[] = { 9, 14, 19, 24 };
++	const s8 *lna1_gain_db = NULL;
++	const s8 *lna1_gain_db_2 = NULL;
++	const s8 *lna2_gain_db = NULL;
++	static const s8 tiaA_gain_db_rev7[] = { -9, -6, -3, 0, 3, 3, 3, 3, 3, 3 };
++	const s8 *tia_gain_db;
++	static const s8 tiaA_gainbits_rev7[] = { 0, 1, 2, 3, 4, 4, 4, 4, 4, 4 };
++	const s8 *tia_gainbits;
++	static const u16 rfseqA_init_gain_rev7[] = { 0x624f, 0x624f };
++	const u16 *rfseq_init_gain;
+ 	u16 init_gaincode;
+ 	u16 clip1hi_gaincode;
+ 	u16 clip1md_gaincode = 0;
+@@ -15310,10 +15310,9 @@ static void wlc_phy_workarounds_nphy_gainctrl_2057_rev6(struct brcms_phy *pi)
+ 
+ 			if ((freq <= 5080) || (freq == 5825)) {
+ 
+-				s8 lna1A_gain_db_rev7[] = { 11, 16, 20, 24 };
+-				s8 lna1A_gain_db_2_rev7[] = {
+-					11, 17, 22, 25};
+-				s8 lna2A_gain_db_rev7[] = { -1, 6, 10, 14 };
++				static const s8 lna1A_gain_db_rev7[] = { 11, 16, 20, 24 };
++				static const s8 lna1A_gain_db_2_rev7[] = { 11, 17, 22, 25};
++				static const s8 lna2A_gain_db_rev7[] = { -1, 6, 10, 14 };
+ 
+ 				crsminu_th = 0x3e;
+ 				lna1_gain_db = lna1A_gain_db_rev7;
+@@ -15321,10 +15320,9 @@ static void wlc_phy_workarounds_nphy_gainctrl_2057_rev6(struct brcms_phy *pi)
+ 				lna2_gain_db = lna2A_gain_db_rev7;
+ 			} else if ((freq >= 5500) && (freq <= 5700)) {
+ 
+-				s8 lna1A_gain_db_rev7[] = { 11, 17, 21, 25 };
+-				s8 lna1A_gain_db_2_rev7[] = {
+-					12, 18, 22, 26};
+-				s8 lna2A_gain_db_rev7[] = { 1, 8, 12, 16 };
++				static const s8 lna1A_gain_db_rev7[] = { 11, 17, 21, 25 };
++				static const s8 lna1A_gain_db_2_rev7[] = { 12, 18, 22, 26};
++				static const s8 lna2A_gain_db_rev7[] = { 1, 8, 12, 16 };
+ 
+ 				crsminu_th = 0x45;
+ 				clip1md_gaincode_B = 0x14;
+@@ -15335,10 +15333,9 @@ static void wlc_phy_workarounds_nphy_gainctrl_2057_rev6(struct brcms_phy *pi)
+ 				lna2_gain_db = lna2A_gain_db_rev7;
+ 			} else {
+ 
+-				s8 lna1A_gain_db_rev7[] = { 12, 18, 22, 26 };
+-				s8 lna1A_gain_db_2_rev7[] = {
+-					12, 18, 22, 26};
+-				s8 lna2A_gain_db_rev7[] = { -1, 6, 10, 14 };
++				static const s8 lna1A_gain_db_rev7[] = { 12, 18, 22, 26 };
++				static const s8 lna1A_gain_db_2_rev7[] = { 12, 18, 22, 26};
++				static const s8 lna2A_gain_db_rev7[] = { -1, 6, 10, 14 };
+ 
+ 				crsminu_th = 0x41;
+ 				lna1_gain_db = lna1A_gain_db_rev7;
+@@ -15450,65 +15447,65 @@ static void wlc_phy_workarounds_nphy_gainctrl(struct brcms_phy *pi)
+ 		NPHY_RFSEQ_CMD_CLR_HIQ_DIS,
+ 		NPHY_RFSEQ_CMD_SET_HPF_BW
+ 	};
+-	u8 rfseq_updategainu_dlys[] = { 10, 30, 1 };
+-	s8 lna1G_gain_db[] = { 7, 11, 16, 23 };
+-	s8 lna1G_gain_db_rev4[] = { 8, 12, 17, 25 };
+-	s8 lna1G_gain_db_rev5[] = { 9, 13, 18, 26 };
+-	s8 lna1G_gain_db_rev6[] = { 8, 13, 18, 25 };
+-	s8 lna1G_gain_db_rev6_224B0[] = { 10, 14, 19, 27 };
+-	s8 lna1A_gain_db[] = { 7, 11, 17, 23 };
+-	s8 lna1A_gain_db_rev4[] = { 8, 12, 18, 23 };
+-	s8 lna1A_gain_db_rev5[] = { 6, 10, 16, 21 };
+-	s8 lna1A_gain_db_rev6[] = { 6, 10, 16, 21 };
+-	s8 *lna1_gain_db = NULL;
+-	s8 lna2G_gain_db[] = { -5, 6, 10, 14 };
+-	s8 lna2G_gain_db_rev5[] = { -3, 7, 11, 16 };
+-	s8 lna2G_gain_db_rev6[] = { -5, 6, 10, 14 };
+-	s8 lna2G_gain_db_rev6_224B0[] = { -5, 6, 10, 15 };
+-	s8 lna2A_gain_db[] = { -6, 2, 6, 10 };
+-	s8 lna2A_gain_db_rev4[] = { -5, 2, 6, 10 };
+-	s8 lna2A_gain_db_rev5[] = { -7, 0, 4, 8 };
+-	s8 lna2A_gain_db_rev6[] = { -7, 0, 4, 8 };
+-	s8 *lna2_gain_db = NULL;
+-	s8 tiaG_gain_db[] = {
++	static const u8 rfseq_updategainu_dlys[] = { 10, 30, 1 };
++	static const s8 lna1G_gain_db[] = { 7, 11, 16, 23 };
++	static const s8 lna1G_gain_db_rev4[] = { 8, 12, 17, 25 };
++	static const s8 lna1G_gain_db_rev5[] = { 9, 13, 18, 26 };
++	static const s8 lna1G_gain_db_rev6[] = { 8, 13, 18, 25 };
++	static const s8 lna1G_gain_db_rev6_224B0[] = { 10, 14, 19, 27 };
++	static const s8 lna1A_gain_db[] = { 7, 11, 17, 23 };
++	static const s8 lna1A_gain_db_rev4[] = { 8, 12, 18, 23 };
++	static const s8 lna1A_gain_db_rev5[] = { 6, 10, 16, 21 };
++	static const s8 lna1A_gain_db_rev6[] = { 6, 10, 16, 21 };
++	const s8 *lna1_gain_db = NULL;
++	static const s8 lna2G_gain_db[] = { -5, 6, 10, 14 };
++	static const s8 lna2G_gain_db_rev5[] = { -3, 7, 11, 16 };
++	static const s8 lna2G_gain_db_rev6[] = { -5, 6, 10, 14 };
++	static const s8 lna2G_gain_db_rev6_224B0[] = { -5, 6, 10, 15 };
++	static const s8 lna2A_gain_db[] = { -6, 2, 6, 10 };
++	static const s8 lna2A_gain_db_rev4[] = { -5, 2, 6, 10 };
++	static const s8 lna2A_gain_db_rev5[] = { -7, 0, 4, 8 };
++	static const s8 lna2A_gain_db_rev6[] = { -7, 0, 4, 8 };
++	const s8 *lna2_gain_db = NULL;
++	static const s8 tiaG_gain_db[] = {
+ 		0x0A, 0x0A, 0x0A, 0x0A, 0x0A, 0x0A, 0x0A, 0x0A, 0x0A, 0x0A };
+-	s8 tiaA_gain_db[] = {
++	static const s8 tiaA_gain_db[] = {
+ 		0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13 };
+-	s8 tiaA_gain_db_rev4[] = {
++	static const s8 tiaA_gain_db_rev4[] = {
+ 		0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d };
+-	s8 tiaA_gain_db_rev5[] = {
++	static const s8 tiaA_gain_db_rev5[] = {
+ 		0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d };
+-	s8 tiaA_gain_db_rev6[] = {
++	static const s8 tiaA_gain_db_rev6[] = {
+ 		0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d };
+-	s8 *tia_gain_db;
+-	s8 tiaG_gainbits[] = {
++	const s8 *tia_gain_db;
++	static const s8 tiaG_gainbits[] = {
+ 		0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03 };
+-	s8 tiaA_gainbits[] = {
++	static const s8 tiaA_gainbits[] = {
+ 		0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06 };
+-	s8 tiaA_gainbits_rev4[] = {
++	static const s8 tiaA_gainbits_rev4[] = {
+ 		0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04 };
+-	s8 tiaA_gainbits_rev5[] = {
++	static const s8 tiaA_gainbits_rev5[] = {
+ 		0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04 };
+-	s8 tiaA_gainbits_rev6[] = {
++	static const s8 tiaA_gainbits_rev6[] = {
+ 		0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04 };
+-	s8 *tia_gainbits;
+-	s8 lpf_gain_db[] = { 0x00, 0x06, 0x0c, 0x12, 0x12, 0x12 };
+-	s8 lpf_gainbits[] = { 0x00, 0x01, 0x02, 0x03, 0x03, 0x03 };
+-	u16 rfseqG_init_gain[] = { 0x613f, 0x613f, 0x613f, 0x613f };
+-	u16 rfseqG_init_gain_rev4[] = { 0x513f, 0x513f, 0x513f, 0x513f };
+-	u16 rfseqG_init_gain_rev5[] = { 0x413f, 0x413f, 0x413f, 0x413f };
+-	u16 rfseqG_init_gain_rev5_elna[] = {
++	const s8 *tia_gainbits;
++	static const s8 lpf_gain_db[] = { 0x00, 0x06, 0x0c, 0x12, 0x12, 0x12 };
++	static const s8 lpf_gainbits[] = { 0x00, 0x01, 0x02, 0x03, 0x03, 0x03 };
++	static const u16 rfseqG_init_gain[] = { 0x613f, 0x613f, 0x613f, 0x613f };
++	static const u16 rfseqG_init_gain_rev4[] = { 0x513f, 0x513f, 0x513f, 0x513f };
++	static const u16 rfseqG_init_gain_rev5[] = { 0x413f, 0x413f, 0x413f, 0x413f };
++	static const u16 rfseqG_init_gain_rev5_elna[] = {
+ 		0x013f, 0x013f, 0x013f, 0x013f };
+-	u16 rfseqG_init_gain_rev6[] = { 0x513f, 0x513f };
+-	u16 rfseqG_init_gain_rev6_224B0[] = { 0x413f, 0x413f };
+-	u16 rfseqG_init_gain_rev6_elna[] = { 0x113f, 0x113f };
+-	u16 rfseqA_init_gain[] = { 0x516f, 0x516f, 0x516f, 0x516f };
+-	u16 rfseqA_init_gain_rev4[] = { 0x614f, 0x614f, 0x614f, 0x614f };
+-	u16 rfseqA_init_gain_rev4_elna[] = {
++	static const u16 rfseqG_init_gain_rev6[] = { 0x513f, 0x513f };
++	static const u16 rfseqG_init_gain_rev6_224B0[] = { 0x413f, 0x413f };
++	static const u16 rfseqG_init_gain_rev6_elna[] = { 0x113f, 0x113f };
++	static const u16 rfseqA_init_gain[] = { 0x516f, 0x516f, 0x516f, 0x516f };
++	static const u16 rfseqA_init_gain_rev4[] = { 0x614f, 0x614f, 0x614f, 0x614f };
++	static const u16 rfseqA_init_gain_rev4_elna[] = {
+ 		0x314f, 0x314f, 0x314f, 0x314f };
+-	u16 rfseqA_init_gain_rev5[] = { 0x714f, 0x714f, 0x714f, 0x714f };
+-	u16 rfseqA_init_gain_rev6[] = { 0x714f, 0x714f };
+-	u16 *rfseq_init_gain;
++	static const u16 rfseqA_init_gain_rev5[] = { 0x714f, 0x714f, 0x714f, 0x714f };
++	static const u16 rfseqA_init_gain_rev6[] = { 0x714f, 0x714f };
++	const u16 *rfseq_init_gain;
+ 	u16 initG_gaincode = 0x627e;
+ 	u16 initG_gaincode_rev4 = 0x527e;
+ 	u16 initG_gaincode_rev5 = 0x427e;
+@@ -15538,10 +15535,10 @@ static void wlc_phy_workarounds_nphy_gainctrl(struct brcms_phy *pi)
+ 	u16 clip1mdA_gaincode_rev6 = 0x2084;
+ 	u16 clip1md_gaincode = 0;
+ 	u16 clip1loG_gaincode = 0x0074;
+-	u16 clip1loG_gaincode_rev5[] = {
++	static const u16 clip1loG_gaincode_rev5[] = {
+ 		0x0062, 0x0064, 0x006a, 0x106a, 0x106c, 0x1074, 0x107c, 0x207c
+ 	};
+-	u16 clip1loG_gaincode_rev6[] = {
++	static const u16 clip1loG_gaincode_rev6[] = {
+ 		0x106a, 0x106c, 0x1074, 0x107c, 0x007e, 0x107e, 0x207e, 0x307e
+ 	};
+ 	u16 clip1loG_gaincode_rev6_224B0 = 0x1074;
+@@ -16066,7 +16063,7 @@ static void wlc_phy_workarounds_nphy_gainctrl(struct brcms_phy *pi)
+ 
+ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
+ {
+-	u8 rfseq_rx2tx_events[] = {
++	static const u8 rfseq_rx2tx_events[] = {
+ 		NPHY_RFSEQ_CMD_NOP,
+ 		NPHY_RFSEQ_CMD_RXG_FBW,
+ 		NPHY_RFSEQ_CMD_TR_SWITCH,
+@@ -16076,7 +16073,7 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
+ 		NPHY_RFSEQ_CMD_EXT_PA
+ 	};
+ 	u8 rfseq_rx2tx_dlys[] = { 8, 6, 6, 2, 4, 60, 1 };
+-	u8 rfseq_tx2rx_events[] = {
++	static const u8 rfseq_tx2rx_events[] = {
+ 		NPHY_RFSEQ_CMD_NOP,
+ 		NPHY_RFSEQ_CMD_EXT_PA,
+ 		NPHY_RFSEQ_CMD_TX_GAIN,
+@@ -16085,8 +16082,8 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
+ 		NPHY_RFSEQ_CMD_RXG_FBW,
+ 		NPHY_RFSEQ_CMD_CLR_HIQ_DIS
+ 	};
+-	u8 rfseq_tx2rx_dlys[] = { 8, 6, 2, 4, 4, 6, 1 };
+-	u8 rfseq_tx2rx_events_rev3[] = {
++	static const u8 rfseq_tx2rx_dlys[] = { 8, 6, 2, 4, 4, 6, 1 };
++	static const u8 rfseq_tx2rx_events_rev3[] = {
+ 		NPHY_REV3_RFSEQ_CMD_EXT_PA,
+ 		NPHY_REV3_RFSEQ_CMD_INT_PA_PU,
+ 		NPHY_REV3_RFSEQ_CMD_TX_GAIN,
+@@ -16096,7 +16093,7 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
+ 		NPHY_REV3_RFSEQ_CMD_CLR_HIQ_DIS,
+ 		NPHY_REV3_RFSEQ_CMD_END
+ 	};
+-	u8 rfseq_tx2rx_dlys_rev3[] = { 8, 4, 2, 2, 4, 4, 6, 1 };
++	static const u8 rfseq_tx2rx_dlys_rev3[] = { 8, 4, 2, 2, 4, 4, 6, 1 };
+ 	u8 rfseq_rx2tx_events_rev3[] = {
+ 		NPHY_REV3_RFSEQ_CMD_NOP,
+ 		NPHY_REV3_RFSEQ_CMD_RXG_FBW,
+@@ -16110,7 +16107,7 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
+ 	};
+ 	u8 rfseq_rx2tx_dlys_rev3[] = { 8, 6, 6, 4, 4, 18, 42, 1, 1 };
+ 
+-	u8 rfseq_rx2tx_events_rev3_ipa[] = {
++	static const u8 rfseq_rx2tx_events_rev3_ipa[] = {
+ 		NPHY_REV3_RFSEQ_CMD_NOP,
+ 		NPHY_REV3_RFSEQ_CMD_RXG_FBW,
+ 		NPHY_REV3_RFSEQ_CMD_TR_SWITCH,
+@@ -16121,15 +16118,15 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
+ 		NPHY_REV3_RFSEQ_CMD_INT_PA_PU,
+ 		NPHY_REV3_RFSEQ_CMD_END
+ 	};
+-	u8 rfseq_rx2tx_dlys_rev3_ipa[] = { 8, 6, 6, 4, 4, 16, 43, 1, 1 };
+-	u16 rfseq_rx2tx_dacbufpu_rev7[] = { 0x10f, 0x10f };
++	static const u8 rfseq_rx2tx_dlys_rev3_ipa[] = { 8, 6, 6, 4, 4, 16, 43, 1, 1 };
++	static const u16 rfseq_rx2tx_dacbufpu_rev7[] = { 0x10f, 0x10f };
+ 
+ 	s16 alpha0, alpha1, alpha2;
+ 	s16 beta0, beta1, beta2;
+ 	u32 leg_data_weights, ht_data_weights, nss1_data_weights,
+ 	    stbc_data_weights;
+ 	u8 chan_freq_range = 0;
+-	u16 dac_control = 0x0002;
++	static const u16 dac_control = 0x0002;
+ 	u16 aux_adc_vmid_rev7_core0[] = { 0x8e, 0x96, 0x96, 0x96 };
+ 	u16 aux_adc_vmid_rev7_core1[] = { 0x8f, 0x9f, 0x9f, 0x96 };
+ 	u16 aux_adc_vmid_rev4[] = { 0xa2, 0xb4, 0xb4, 0x89 };
+@@ -16139,8 +16136,8 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
+ 	u16 aux_adc_gain_rev4[] = { 0x02, 0x02, 0x02, 0x00 };
+ 	u16 aux_adc_gain_rev3[] = { 0x02, 0x02, 0x02, 0x00 };
+ 	u16 *aux_adc_gain;
+-	u16 sk_adc_vmid[] = { 0xb4, 0xb4, 0xb4, 0x24 };
+-	u16 sk_adc_gain[] = { 0x02, 0x02, 0x02, 0x02 };
++	static const u16 sk_adc_vmid[] = { 0xb4, 0xb4, 0xb4, 0x24 };
++	static const u16 sk_adc_gain[] = { 0x02, 0x02, 0x02, 0x02 };
+ 	s32 min_nvar_val = 0x18d;
+ 	s32 min_nvar_offset_6mbps = 20;
+ 	u8 pdetrange;
+@@ -16151,9 +16148,9 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
+ 	u16 rfseq_rx2tx_lpf_h_hpc_rev7 = 0x77;
+ 	u16 rfseq_tx2rx_lpf_h_hpc_rev7 = 0x77;
+ 	u16 rfseq_pktgn_lpf_h_hpc_rev7 = 0x77;
+-	u16 rfseq_htpktgn_lpf_hpc_rev7[] = { 0x77, 0x11, 0x11 };
+-	u16 rfseq_pktgn_lpf_hpc_rev7[] = { 0x11, 0x11 };
+-	u16 rfseq_cckpktgn_lpf_hpc_rev7[] = { 0x11, 0x11 };
++	static const u16 rfseq_htpktgn_lpf_hpc_rev7[] = { 0x77, 0x11, 0x11 };
++	static const u16 rfseq_pktgn_lpf_hpc_rev7[] = { 0x11, 0x11 };
++	static const u16 rfseq_cckpktgn_lpf_hpc_rev7[] = { 0x11, 0x11 };
+ 	u16 ipalvlshift_3p3_war_en = 0;
+ 	u16 rccal_bcap_val, rccal_scap_val;
+ 	u16 rccal_tx20_11b_bcap = 0;
+@@ -24291,13 +24288,13 @@ static void wlc_phy_update_txcal_ladder_nphy(struct brcms_phy *pi, u16 core)
+ 	u16 bbmult;
+ 	u16 tblentry;
+ 
+-	struct nphy_txiqcal_ladder ladder_lo[] = {
++	static const struct nphy_txiqcal_ladder ladder_lo[] = {
+ 		{3, 0}, {4, 0}, {6, 0}, {9, 0}, {13, 0}, {18, 0},
+ 		{25, 0}, {25, 1}, {25, 2}, {25, 3}, {25, 4}, {25, 5},
+ 		{25, 6}, {25, 7}, {35, 7}, {50, 7}, {71, 7}, {100, 7}
+ 	};
+ 
+-	struct nphy_txiqcal_ladder ladder_iq[] = {
++	static const struct nphy_txiqcal_ladder ladder_iq[] = {
+ 		{3, 0}, {4, 0}, {6, 0}, {9, 0}, {13, 0}, {18, 0},
+ 		{25, 0}, {35, 0}, {50, 0}, {71, 0}, {100, 0}, {100, 1},
+ 		{100, 2}, {100, 3}, {100, 4}, {100, 5}, {100, 6}, {100, 7}
+@@ -25773,67 +25770,67 @@ wlc_phy_cal_txiqlo_nphy(struct brcms_phy *pi, struct nphy_txgains target_gain,
+ 	u16 cal_gain[2];
+ 	struct nphy_iqcal_params cal_params[2];
+ 	u32 tbl_len;
+-	void *tbl_ptr;
++	const void *tbl_ptr;
+ 	bool ladder_updated[2];
+ 	u8 mphase_cal_lastphase = 0;
+ 	int bcmerror = 0;
+ 	bool phyhang_avoid_state = false;
+ 
+-	u16 tbl_tx_iqlo_cal_loft_ladder_20[] = {
++	static const u16 tbl_tx_iqlo_cal_loft_ladder_20[] = {
+ 		0x0300, 0x0500, 0x0700, 0x0900, 0x0d00, 0x1100, 0x1900, 0x1901,
+ 		0x1902,
+ 		0x1903, 0x1904, 0x1905, 0x1906, 0x1907, 0x2407, 0x3207, 0x4607,
+ 		0x6407
+ 	};
+ 
+-	u16 tbl_tx_iqlo_cal_iqimb_ladder_20[] = {
++	static const u16 tbl_tx_iqlo_cal_iqimb_ladder_20[] = {
+ 		0x0200, 0x0300, 0x0600, 0x0900, 0x0d00, 0x1100, 0x1900, 0x2400,
+ 		0x3200,
+ 		0x4600, 0x6400, 0x6401, 0x6402, 0x6403, 0x6404, 0x6405, 0x6406,
+ 		0x6407
+ 	};
+ 
+-	u16 tbl_tx_iqlo_cal_loft_ladder_40[] = {
++	static const u16 tbl_tx_iqlo_cal_loft_ladder_40[] = {
+ 		0x0200, 0x0300, 0x0400, 0x0700, 0x0900, 0x0c00, 0x1200, 0x1201,
+ 		0x1202,
+ 		0x1203, 0x1204, 0x1205, 0x1206, 0x1207, 0x1907, 0x2307, 0x3207,
+ 		0x4707
+ 	};
+ 
+-	u16 tbl_tx_iqlo_cal_iqimb_ladder_40[] = {
++	static const u16 tbl_tx_iqlo_cal_iqimb_ladder_40[] = {
+ 		0x0100, 0x0200, 0x0400, 0x0700, 0x0900, 0x0c00, 0x1200, 0x1900,
+ 		0x2300,
+ 		0x3200, 0x4700, 0x4701, 0x4702, 0x4703, 0x4704, 0x4705, 0x4706,
+ 		0x4707
+ 	};
+ 
+-	u16 tbl_tx_iqlo_cal_startcoefs[] = {
++	static const u16 tbl_tx_iqlo_cal_startcoefs[] = {
+ 		0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 		0x0000
+ 	};
+ 
+-	u16 tbl_tx_iqlo_cal_cmds_fullcal[] = {
++	static const u16 tbl_tx_iqlo_cal_cmds_fullcal[] = {
+ 		0x8123, 0x8264, 0x8086, 0x8245, 0x8056,
+ 		0x9123, 0x9264, 0x9086, 0x9245, 0x9056
+ 	};
+ 
+-	u16 tbl_tx_iqlo_cal_cmds_recal[] = {
++	static const u16 tbl_tx_iqlo_cal_cmds_recal[] = {
+ 		0x8101, 0x8253, 0x8053, 0x8234, 0x8034,
+ 		0x9101, 0x9253, 0x9053, 0x9234, 0x9034
+ 	};
+ 
+-	u16 tbl_tx_iqlo_cal_startcoefs_nphyrev3[] = {
++	static const u16 tbl_tx_iqlo_cal_startcoefs_nphyrev3[] = {
+ 		0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 		0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 		0x0000
+ 	};
+ 
+-	u16 tbl_tx_iqlo_cal_cmds_fullcal_nphyrev3[] = {
++	static const u16 tbl_tx_iqlo_cal_cmds_fullcal_nphyrev3[] = {
+ 		0x8434, 0x8334, 0x8084, 0x8267, 0x8056, 0x8234,
+ 		0x9434, 0x9334, 0x9084, 0x9267, 0x9056, 0x9234
+ 	};
+ 
+-	u16 tbl_tx_iqlo_cal_cmds_recal_nphyrev3[] = {
++	static const u16 tbl_tx_iqlo_cal_cmds_recal_nphyrev3[] = {
+ 		0x8423, 0x8323, 0x8073, 0x8256, 0x8045, 0x8223,
+ 		0x9423, 0x9323, 0x9073, 0x9256, 0x9045, 0x9223
+ 	};
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
+index 2bc6bace069c..b4302f41493c 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
+@@ -1122,7 +1122,7 @@ static u8 _rtl8821ae_dbi_read(struct rtl_priv *rtlpriv, u16 addr)
+ 	}
+ 	if (0 == tmp) {
+ 		read_addr = REG_DBI_RDATA + addr % 4;
+-		ret = rtl_read_byte(rtlpriv, read_addr);
++		ret = rtl_read_word(rtlpriv, read_addr);
+ 	}
+ 	return ret;
+ }
+diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
+index df57655779ed..5da006d81900 100644
+--- a/drivers/scsi/qla2xxx/qla_os.c
++++ b/drivers/scsi/qla2xxx/qla_os.c
+@@ -3165,6 +3165,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
+ 	    host->can_queue, base_vha->req,
+ 	    base_vha->mgmt_svr_loop_id, host->sg_tablesize);
+ 
++	INIT_WORK(&base_vha->iocb_work, qla2x00_iocb_work_fn);
++
+ 	if (ha->mqenable) {
+ 		bool mq = false;
+ 		bool startit = false;
+@@ -3213,7 +3215,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
+ 	 */
+ 	qla2xxx_wake_dpc(base_vha);
+ 
+-	INIT_WORK(&base_vha->iocb_work, qla2x00_iocb_work_fn);
+ 	INIT_WORK(&ha->board_disable, qla2x00_disable_board_on_pci_error);
+ 
+ 	if (IS_QLA8031(ha) || IS_MCTP_CAPABLE(ha)) {
+diff --git a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c
+index 5f3d8f2339e3..4be864dbd41c 100644
+--- a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c
++++ b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c
+@@ -390,8 +390,7 @@ static int bcm2835_audio_open_connection(struct bcm2835_alsa_stream *alsa_stream
+ 			__func__, instance);
+ 		instance->alsa_stream = alsa_stream;
+ 		alsa_stream->instance = instance;
+-		ret = 0; // xxx todo -1;
+-		goto err_free_mem;
++		return 0;
+ 	}
+ 
+ 	/* Initialize and create a VCHI connection */
+@@ -401,16 +400,15 @@ static int bcm2835_audio_open_connection(struct bcm2835_alsa_stream *alsa_stream
+ 			LOG_ERR("%s: failed to initialise VCHI instance (ret=%d)\n",
+ 				__func__, ret);
+ 
+-			ret = -EIO;
+-			goto err_free_mem;
++			return -EIO;
+ 		}
+ 		ret = vchi_connect(NULL, 0, vchi_instance);
+ 		if (ret) {
+ 			LOG_ERR("%s: failed to connect VCHI instance (ret=%d)\n",
+ 				__func__, ret);
+ 
+-			ret = -EIO;
+-			goto err_free_mem;
++			kfree(vchi_instance);
++			return -EIO;
+ 		}
+ 		initted = 1;
+ 	}
+@@ -421,19 +419,16 @@ static int bcm2835_audio_open_connection(struct bcm2835_alsa_stream *alsa_stream
+ 	if (IS_ERR(instance)) {
+ 		LOG_ERR("%s: failed to initialize audio service\n", __func__);
+ 
+-		ret = PTR_ERR(instance);
+-		goto err_free_mem;
++		/* vchi_instance is retained for use the next time. */
++		return PTR_ERR(instance);
+ 	}
+ 
+ 	instance->alsa_stream = alsa_stream;
+ 	alsa_stream->instance = instance;
+ 
+ 	LOG_DBG(" success !\n");
+-	ret = 0;
+-err_free_mem:
+-	kfree(vchi_instance);
+ 
+-	return ret;
++	return 0;
+ }
+ 
+ int bcm2835_audio_open(struct bcm2835_alsa_stream *alsa_stream)
+diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
+index 5e056064259c..18c923a4c16e 100644
+--- a/drivers/usb/class/cdc-acm.c
++++ b/drivers/usb/class/cdc-acm.c
+@@ -1832,6 +1832,9 @@ static const struct usb_device_id acm_ids[] = {
+ 	{ USB_DEVICE(0xfff0, 0x0100), /* DATECS FP-2000 */
+ 	.driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */
+ 	},
++	{ USB_DEVICE(0x09d8, 0x0320), /* Elatec GmbH TWN3 */
++	.driver_info = NO_UNION_NORMAL, /* has misplaced union descriptor */
++	},
+ 
+ 	{ USB_DEVICE(0x2912, 0x0001), /* ATOL FPrint */
+ 	.driver_info = CLEAR_HALT_CONDITIONS,
+diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
+index 68b54bd88d1e..883549ee946c 100644
+--- a/drivers/usb/core/config.c
++++ b/drivers/usb/core/config.c
+@@ -960,10 +960,12 @@ int usb_get_bos_descriptor(struct usb_device *dev)
+ 	for (i = 0; i < num; i++) {
+ 		buffer += length;
+ 		cap = (struct usb_dev_cap_header *)buffer;
+-		length = cap->bLength;
+ 
+-		if (total_len < length)
++		if (total_len < sizeof(*cap) || total_len < cap->bLength) {
++			dev->bos->desc->bNumDeviceCaps = i;
+ 			break;
++		}
++		length = cap->bLength;
+ 		total_len -= length;
+ 
+ 		if (cap->bDescriptorType != USB_DT_DEVICE_CAPABILITY) {
+diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
+index 1d4dfdeb61c1..066b58cb6c98 100644
+--- a/drivers/usb/core/devio.c
++++ b/drivers/usb/core/devio.c
+@@ -1576,11 +1576,7 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
+ 			totlen += isopkt[u].length;
+ 		}
+ 		u *= sizeof(struct usb_iso_packet_descriptor);
+-		if (totlen <= uurb->buffer_length)
+-			uurb->buffer_length = totlen;
+-		else
+-			WARN_ONCE(1, "uurb->buffer_length is too short %d vs %d",
+-				  totlen, uurb->buffer_length);
++		uurb->buffer_length = totlen;
+ 		break;
+ 
+ 	default:
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index 78c2aca5b0fc..3f44341259d8 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -2710,13 +2710,16 @@ static int hub_port_wait_reset(struct usb_hub *hub, int port1,
+ 	if (!(portstatus & USB_PORT_STAT_CONNECTION))
+ 		return -ENOTCONN;
+ 
+-	/* bomb out completely if the connection bounced.  A USB 3.0
+-	 * connection may bounce if multiple warm resets were issued,
++	/* Retry if connect change is set but status is still connected.
++	 * A USB 3.0 connection may bounce if multiple warm resets were issued,
+ 	 * but the device may have successfully re-connected. Ignore it.
+ 	 */
+ 	if (!hub_is_superspeed(hub->hdev) &&
+-			(portchange & USB_PORT_STAT_C_CONNECTION))
+-		return -ENOTCONN;
++	    (portchange & USB_PORT_STAT_C_CONNECTION)) {
++		usb_clear_port_feature(hub->hdev, port1,
++				       USB_PORT_FEAT_C_CONNECTION);
++		return -EAGAIN;
++	}
+ 
+ 	if (!(portstatus & USB_PORT_STAT_ENABLE))
+ 		return -EBUSY;
+diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
+index 82806e311202..a6aaf2f193a4 100644
+--- a/drivers/usb/core/quirks.c
++++ b/drivers/usb/core/quirks.c
+@@ -221,6 +221,10 @@ static const struct usb_device_id usb_quirk_list[] = {
+ 	/* Corsair Strafe RGB */
+ 	{ USB_DEVICE(0x1b1c, 0x1b20), .driver_info = USB_QUIRK_DELAY_INIT },
+ 
++	/* MIDI keyboard WORLDE MINI */
++	{ USB_DEVICE(0x1c75, 0x0204), .driver_info =
++			USB_QUIRK_CONFIG_INTF_STRINGS },
++
+ 	/* Acer C120 LED Projector */
+ 	{ USB_DEVICE(0x1de1, 0xc102), .driver_info = USB_QUIRK_NO_LPM },
+ 
+diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
+index 950dee33bfcc..ad4c697cf30e 100644
+--- a/drivers/usb/host/xhci-hub.c
++++ b/drivers/usb/host/xhci-hub.c
+@@ -420,14 +420,25 @@ static int xhci_stop_device(struct xhci_hcd *xhci, int slot_id, int suspend)
+ 						     GFP_NOWAIT);
+ 			if (!command) {
+ 				spin_unlock_irqrestore(&xhci->lock, flags);
+-				xhci_free_command(xhci, cmd);
+-				return -ENOMEM;
++				ret = -ENOMEM;
++				goto cmd_cleanup;
++			}
++
++			ret = xhci_queue_stop_endpoint(xhci, command, slot_id,
++						       i, suspend);
++			if (ret) {
++				spin_unlock_irqrestore(&xhci->lock, flags);
++				xhci_free_command(xhci, command);
++				goto cmd_cleanup;
+ 			}
+-			xhci_queue_stop_endpoint(xhci, command, slot_id, i,
+-						 suspend);
+ 		}
+ 	}
+-	xhci_queue_stop_endpoint(xhci, cmd, slot_id, 0, suspend);
++	ret = xhci_queue_stop_endpoint(xhci, cmd, slot_id, 0, suspend);
++	if (ret) {
++		spin_unlock_irqrestore(&xhci->lock, flags);
++		goto cmd_cleanup;
++	}
++
+ 	xhci_ring_cmd_db(xhci);
+ 	spin_unlock_irqrestore(&xhci->lock, flags);
+ 
+@@ -439,6 +450,8 @@ static int xhci_stop_device(struct xhci_hcd *xhci, int slot_id, int suspend)
+ 		xhci_warn(xhci, "Timeout while waiting for stop endpoint command\n");
+ 		ret = -ETIME;
+ 	}
++
++cmd_cleanup:
+ 	xhci_free_command(xhci, cmd);
+ 	return ret;
+ }
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index cc368ad2b51e..b0f42e1b91ee 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -1309,6 +1309,7 @@ static void xhci_complete_del_and_free_cmd(struct xhci_command *cmd, u32 status)
+ void xhci_cleanup_command_queue(struct xhci_hcd *xhci)
+ {
+ 	struct xhci_command *cur_cmd, *tmp_cmd;
++	xhci->current_cmd = NULL;
+ 	list_for_each_entry_safe(cur_cmd, tmp_cmd, &xhci->cmd_list, cmd_list)
+ 		xhci_complete_del_and_free_cmd(cur_cmd, COMP_COMMAND_ABORTED);
+ }
+@@ -2577,15 +2578,21 @@ static int handle_tx_event(struct xhci_hcd *xhci,
+ 				(struct xhci_generic_trb *) ep_trb);
+ 
+ 		/*
+-		 * No-op TRB should not trigger interrupts.
+-		 * If ep_trb is a no-op TRB, it means the
+-		 * corresponding TD has been cancelled. Just ignore
+-		 * the TD.
++		 * No-op TRB could trigger interrupts in a case where
++		 * a URB was killed and a STALL_ERROR happens right
++		 * after the endpoint ring stopped. Reset the halted
++		 * endpoint. Otherwise, the endpoint remains stalled
++		 * indefinitely.
+ 		 */
+ 		if (trb_is_noop(ep_trb)) {
+-			xhci_dbg(xhci,
+-				 "ep_trb is a no-op TRB. Skip it for slot %u ep %u\n",
+-				 slot_id, ep_index);
++			if (trb_comp_code == COMP_STALL_ERROR ||
++			    xhci_requires_manual_halt_cleanup(xhci, ep_ctx,
++							      trb_comp_code))
++				xhci_cleanup_halted_endpoint(xhci, slot_id,
++							     ep_index,
++							     ep_ring->stream_id,
++							     td, ep_trb,
++							     EP_HARD_RESET);
+ 			goto cleanup;
+ 		}
+ 
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index ee198ea47f49..51535ba2bcd4 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -4805,7 +4805,8 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
+ 		 */
+ 		hcd->has_tt = 1;
+ 	} else {
+-		if (xhci->sbrn == 0x31) {
++		/* Some 3.1 hosts return sbrn 0x30, can't rely on sbrn alone */
++		if (xhci->sbrn == 0x31 || xhci->usb3_rhub.min_rev >= 1) {
+ 			xhci_info(xhci, "Host supports USB 3.1 Enhanced SuperSpeed\n");
+ 			hcd->speed = HCD_USB31;
+ 			hcd->self.root_hub->speed = USB_SPEED_SUPER_PLUS;
+diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
+index b67692857daf..458957e97fee 100644
+--- a/drivers/usb/musb/musb_core.c
++++ b/drivers/usb/musb/musb_core.c
+@@ -906,7 +906,7 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
+ 	 */
+ 	if (int_usb & MUSB_INTR_RESET) {
+ 		handled = IRQ_HANDLED;
+-		if (devctl & MUSB_DEVCTL_HM) {
++		if (is_host_active(musb)) {
+ 			/*
+ 			 * When BABBLE happens what we can depends on which
+ 			 * platform MUSB is running, because some platforms
+@@ -916,9 +916,7 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
+ 			 * drop the session.
+ 			 */
+ 			dev_err(musb->controller, "Babble\n");
+-
+-			if (is_host_active(musb))
+-				musb_recover_from_babble(musb);
++			musb_recover_from_babble(musb);
+ 		} else {
+ 			musb_dbg(musb, "BUS RESET as %s",
+ 				usb_otg_state_string(musb->xceiv->otg->state));
+@@ -1861,22 +1859,22 @@ static void musb_pm_runtime_check_session(struct musb *musb)
+ 		MUSB_DEVCTL_HR;
+ 	switch (devctl & ~s) {
+ 	case MUSB_QUIRK_B_INVALID_VBUS_91:
+-		if (musb->quirk_retries--) {
++		if (musb->quirk_retries && !musb->flush_irq_work) {
+ 			musb_dbg(musb,
+ 				 "Poll devctl on invalid vbus, assume no session");
+ 			schedule_delayed_work(&musb->irq_work,
+ 					      msecs_to_jiffies(1000));
+-
++			musb->quirk_retries--;
+ 			return;
+ 		}
+ 		/* fall through */
+ 	case MUSB_QUIRK_A_DISCONNECT_19:
+-		if (musb->quirk_retries--) {
++		if (musb->quirk_retries && !musb->flush_irq_work) {
+ 			musb_dbg(musb,
+ 				 "Poll devctl on possible host mode disconnect");
+ 			schedule_delayed_work(&musb->irq_work,
+ 					      msecs_to_jiffies(1000));
+-
++			musb->quirk_retries--;
+ 			return;
+ 		}
+ 		if (!musb->session)
+@@ -2681,8 +2679,15 @@ static int musb_suspend(struct device *dev)
+ 
+ 	musb_platform_disable(musb);
+ 	musb_disable_interrupts(musb);
++
++	musb->flush_irq_work = true;
++	while (flush_delayed_work(&musb->irq_work))
++		;
++	musb->flush_irq_work = false;
++
+ 	if (!(musb->io.quirks & MUSB_PRESERVE_SESSION))
+ 		musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
++
+ 	WARN_ON(!list_empty(&musb->pending_list));
+ 
+ 	spin_lock_irqsave(&musb->lock, flags);
+diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
+index 9f22c5b8ce37..1830a571d025 100644
+--- a/drivers/usb/musb/musb_core.h
++++ b/drivers/usb/musb/musb_core.h
+@@ -428,6 +428,8 @@ struct musb {
+ 	unsigned		test_mode:1;
+ 	unsigned		softconnect:1;
+ 
++	unsigned		flush_irq_work:1;
++
+ 	u8			address;
+ 	u8			test_mode_nr;
+ 	u16			ackpend;		/* ep0 */
+diff --git a/drivers/usb/musb/musb_cppi41.c b/drivers/usb/musb/musb_cppi41.c
+index ba255280a624..1ec0a4947b6b 100644
+--- a/drivers/usb/musb/musb_cppi41.c
++++ b/drivers/usb/musb/musb_cppi41.c
+@@ -26,15 +26,28 @@
+ 
+ #define MUSB_DMA_NUM_CHANNELS 15
+ 
++#define DA8XX_USB_MODE		0x10
++#define DA8XX_USB_AUTOREQ	0x14
++#define DA8XX_USB_TEARDOWN	0x1c
++
++#define DA8XX_DMA_NUM_CHANNELS 4
++
+ struct cppi41_dma_controller {
+ 	struct dma_controller controller;
+-	struct cppi41_dma_channel rx_channel[MUSB_DMA_NUM_CHANNELS];
+-	struct cppi41_dma_channel tx_channel[MUSB_DMA_NUM_CHANNELS];
++	struct cppi41_dma_channel *rx_channel;
++	struct cppi41_dma_channel *tx_channel;
+ 	struct hrtimer early_tx;
+ 	struct list_head early_tx_list;
+ 	u32 rx_mode;
+ 	u32 tx_mode;
+ 	u32 auto_req;
++
++	u32 tdown_reg;
++	u32 autoreq_reg;
++
++	void (*set_dma_mode)(struct cppi41_dma_channel *cppi41_channel,
++			     unsigned int mode);
++	u8 num_channels;
+ };
+ 
+ static void save_rx_toggle(struct cppi41_dma_channel *cppi41_channel)
+@@ -349,6 +362,32 @@ static void cppi41_set_dma_mode(struct cppi41_dma_channel *cppi41_channel,
+ 	}
+ }
+ 
++static void da8xx_set_dma_mode(struct cppi41_dma_channel *cppi41_channel,
++		unsigned int mode)
++{
++	struct cppi41_dma_controller *controller = cppi41_channel->controller;
++	struct musb *musb = controller->controller.musb;
++	unsigned int shift;
++	u32 port;
++	u32 new_mode;
++	u32 old_mode;
++
++	old_mode = controller->tx_mode;
++	port = cppi41_channel->port_num;
++
++	shift = (port - 1) * 4;
++	if (!cppi41_channel->is_tx)
++		shift += 16;
++	new_mode = old_mode & ~(3 << shift);
++	new_mode |= mode << shift;
++
++	if (new_mode == old_mode)
++		return;
++	controller->tx_mode = new_mode;
++	musb_writel(musb->ctrl_base, DA8XX_USB_MODE, new_mode);
++}
++
++
+ static void cppi41_set_autoreq_mode(struct cppi41_dma_channel *cppi41_channel,
+ 		unsigned mode)
+ {
+@@ -364,8 +403,8 @@ static void cppi41_set_autoreq_mode(struct cppi41_dma_channel *cppi41_channel,
+ 	if (new_mode == old_mode)
+ 		return;
+ 	controller->auto_req = new_mode;
+-	musb_writel(controller->controller.musb->ctrl_base, USB_CTRL_AUTOREQ,
+-		    new_mode);
++	musb_writel(controller->controller.musb->ctrl_base,
++		    controller->autoreq_reg, new_mode);
+ }
+ 
+ static bool cppi41_configure_channel(struct dma_channel *channel,
+@@ -373,6 +412,7 @@ static bool cppi41_configure_channel(struct dma_channel *channel,
+ 				dma_addr_t dma_addr, u32 len)
+ {
+ 	struct cppi41_dma_channel *cppi41_channel = channel->private_data;
++	struct cppi41_dma_controller *controller = cppi41_channel->controller;
+ 	struct dma_chan *dc = cppi41_channel->dc;
+ 	struct dma_async_tx_descriptor *dma_desc;
+ 	enum dma_transfer_direction direction;
+@@ -398,7 +438,7 @@ static bool cppi41_configure_channel(struct dma_channel *channel,
+ 			musb_writel(musb->ctrl_base,
+ 				RNDIS_REG(cppi41_channel->port_num), len);
+ 			/* gen rndis */
+-			cppi41_set_dma_mode(cppi41_channel,
++			controller->set_dma_mode(cppi41_channel,
+ 					EP_MODE_DMA_GEN_RNDIS);
+ 
+ 			/* auto req */
+@@ -407,14 +447,15 @@ static bool cppi41_configure_channel(struct dma_channel *channel,
+ 		} else {
+ 			musb_writel(musb->ctrl_base,
+ 					RNDIS_REG(cppi41_channel->port_num), 0);
+-			cppi41_set_dma_mode(cppi41_channel,
++			controller->set_dma_mode(cppi41_channel,
+ 					EP_MODE_DMA_TRANSPARENT);
+ 			cppi41_set_autoreq_mode(cppi41_channel,
+ 					EP_MODE_AUTOREQ_NONE);
+ 		}
+ 	} else {
+ 		/* fallback mode */
+-		cppi41_set_dma_mode(cppi41_channel, EP_MODE_DMA_TRANSPARENT);
++		controller->set_dma_mode(cppi41_channel,
++				EP_MODE_DMA_TRANSPARENT);
+ 		cppi41_set_autoreq_mode(cppi41_channel, EP_MODE_AUTOREQ_NONE);
+ 		len = min_t(u32, packet_sz, len);
+ 	}
+@@ -445,7 +486,7 @@ static struct dma_channel *cppi41_dma_channel_allocate(struct dma_controller *c,
+ 	struct cppi41_dma_channel *cppi41_channel = NULL;
+ 	u8 ch_num = hw_ep->epnum - 1;
+ 
+-	if (ch_num >= MUSB_DMA_NUM_CHANNELS)
++	if (ch_num >= controller->num_channels)
+ 		return NULL;
+ 
+ 	if (is_tx)
+@@ -581,12 +622,13 @@ static int cppi41_dma_channel_abort(struct dma_channel *channel)
+ 
+ 	do {
+ 		if (is_tx)
+-			musb_writel(musb->ctrl_base, USB_TDOWN, tdbit);
++			musb_writel(musb->ctrl_base, controller->tdown_reg,
++				    tdbit);
+ 		ret = dmaengine_terminate_all(cppi41_channel->dc);
+ 	} while (ret == -EAGAIN);
+ 
+ 	if (is_tx) {
+-		musb_writel(musb->ctrl_base, USB_TDOWN, tdbit);
++		musb_writel(musb->ctrl_base, controller->tdown_reg, tdbit);
+ 
+ 		csr = musb_readw(epio, MUSB_TXCSR);
+ 		if (csr & MUSB_TXCSR_TXPKTRDY) {
+@@ -604,7 +646,7 @@ static void cppi41_release_all_dma_chans(struct cppi41_dma_controller *ctrl)
+ 	struct dma_chan *dc;
+ 	int i;
+ 
+-	for (i = 0; i < MUSB_DMA_NUM_CHANNELS; i++) {
++	for (i = 0; i < ctrl->num_channels; i++) {
+ 		dc = ctrl->tx_channel[i].dc;
+ 		if (dc)
+ 			dma_release_channel(dc);
+@@ -656,7 +698,7 @@ static int cppi41_dma_controller_start(struct cppi41_dma_controller *controller)
+ 			goto err;
+ 
+ 		ret = -EINVAL;
+-		if (port > MUSB_DMA_NUM_CHANNELS || !port)
++		if (port > controller->num_channels || !port)
+ 			goto err;
+ 		if (is_tx)
+ 			cppi41_channel = &controller->tx_channel[port - 1];
+@@ -697,6 +739,8 @@ void cppi41_dma_controller_destroy(struct dma_controller *c)
+ 
+ 	hrtimer_cancel(&controller->early_tx);
+ 	cppi41_dma_controller_stop(controller);
++	kfree(controller->rx_channel);
++	kfree(controller->tx_channel);
+ 	kfree(controller);
+ }
+ EXPORT_SYMBOL_GPL(cppi41_dma_controller_destroy);
+@@ -705,6 +749,7 @@ struct dma_controller *
+ cppi41_dma_controller_create(struct musb *musb, void __iomem *base)
+ {
+ 	struct cppi41_dma_controller *controller;
++	int channel_size;
+ 	int ret = 0;
+ 
+ 	if (!musb->controller->parent->of_node) {
+@@ -727,12 +772,37 @@ cppi41_dma_controller_create(struct musb *musb, void __iomem *base)
+ 	controller->controller.is_compatible = cppi41_is_compatible;
+ 	controller->controller.musb = musb;
+ 
++	if (musb->io.quirks & MUSB_DA8XX) {
++		controller->tdown_reg = DA8XX_USB_TEARDOWN;
++		controller->autoreq_reg = DA8XX_USB_AUTOREQ;
++		controller->set_dma_mode = da8xx_set_dma_mode;
++		controller->num_channels = DA8XX_DMA_NUM_CHANNELS;
++	} else {
++		controller->tdown_reg = USB_TDOWN;
++		controller->autoreq_reg = USB_CTRL_AUTOREQ;
++		controller->set_dma_mode = cppi41_set_dma_mode;
++		controller->num_channels = MUSB_DMA_NUM_CHANNELS;
++	}
++
++	channel_size = controller->num_channels *
++			sizeof(struct cppi41_dma_channel);
++	controller->rx_channel = kzalloc(channel_size, GFP_KERNEL);
++	if (!controller->rx_channel)
++		goto rx_channel_alloc_fail;
++	controller->tx_channel = kzalloc(channel_size, GFP_KERNEL);
++	if (!controller->tx_channel)
++		goto tx_channel_alloc_fail;
++
+ 	ret = cppi41_dma_controller_start(controller);
+ 	if (ret)
+ 		goto plat_get_fail;
+ 	return &controller->controller;
+ 
+ plat_get_fail:
++	kfree(controller->tx_channel);
++tx_channel_alloc_fail:
++	kfree(controller->rx_channel);
++rx_channel_alloc_fail:
+ 	kfree(controller);
+ kzalloc_fail:
+ 	if (ret == -EPROBE_DEFER)
+diff --git a/drivers/usb/musb/sunxi.c b/drivers/usb/musb/sunxi.c
+index c9a09b5bb6e5..dc353e24d53c 100644
+--- a/drivers/usb/musb/sunxi.c
++++ b/drivers/usb/musb/sunxi.c
+@@ -297,6 +297,8 @@ static int sunxi_musb_exit(struct musb *musb)
+ 	if (test_bit(SUNXI_MUSB_FL_HAS_SRAM, &glue->flags))
+ 		sunxi_sram_release(musb->controller->parent);
+ 
++	devm_usb_put_phy(glue->dev, glue->xceiv);
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/usb/serial/metro-usb.c b/drivers/usb/serial/metro-usb.c
+index cc84da8dbb84..14511d6a7d44 100644
+--- a/drivers/usb/serial/metro-usb.c
++++ b/drivers/usb/serial/metro-usb.c
+@@ -45,6 +45,7 @@ struct metrousb_private {
+ static const struct usb_device_id id_table[] = {
+ 	{ USB_DEVICE(FOCUS_VENDOR_ID, FOCUS_PRODUCT_ID_BI) },
+ 	{ USB_DEVICE(FOCUS_VENDOR_ID, FOCUS_PRODUCT_ID_UNI) },
++	{ USB_DEVICE_INTERFACE_CLASS(0x0c2e, 0x0730, 0xff) },	/* MS7820 */
+ 	{ }, /* Terminating entry. */
+ };
+ MODULE_DEVICE_TABLE(usb, id_table);
+diff --git a/fs/crypto/keyinfo.c b/fs/crypto/keyinfo.c
+index 018c588c7ac3..8e704d12a1cf 100644
+--- a/fs/crypto/keyinfo.c
++++ b/fs/crypto/keyinfo.c
+@@ -109,6 +109,11 @@ static int validate_user_key(struct fscrypt_info *crypt_info,
+ 		goto out;
+ 	}
+ 	ukp = user_key_payload_locked(keyring_key);
++	if (!ukp) {
++		/* key was revoked before we acquired its semaphore */
++		res = -EKEYREVOKED;
++		goto out;
++	}
+ 	if (ukp->datalen != sizeof(struct fscrypt_key)) {
+ 		res = -EINVAL;
+ 		goto out;
+diff --git a/fs/ecryptfs/ecryptfs_kernel.h b/fs/ecryptfs/ecryptfs_kernel.h
+index 9c351bf757b2..3fbc0ff79699 100644
+--- a/fs/ecryptfs/ecryptfs_kernel.h
++++ b/fs/ecryptfs/ecryptfs_kernel.h
+@@ -84,11 +84,16 @@ struct ecryptfs_page_crypt_context {
+ static inline struct ecryptfs_auth_tok *
+ ecryptfs_get_encrypted_key_payload_data(struct key *key)
+ {
+-	if (key->type == &key_type_encrypted)
+-		return (struct ecryptfs_auth_tok *)
+-			(&((struct encrypted_key_payload *)key->payload.data[0])->payload_data);
+-	else
++	struct encrypted_key_payload *payload;
++
++	if (key->type != &key_type_encrypted)
+ 		return NULL;
++
++	payload = key->payload.data[0];
++	if (!payload)
++		return ERR_PTR(-EKEYREVOKED);
++
++	return (struct ecryptfs_auth_tok *)payload->payload_data;
+ }
+ 
+ static inline struct key *ecryptfs_get_encrypted_key(char *sig)
+@@ -114,12 +119,17 @@ static inline struct ecryptfs_auth_tok *
+ ecryptfs_get_key_payload_data(struct key *key)
+ {
+ 	struct ecryptfs_auth_tok *auth_tok;
++	struct user_key_payload *ukp;
+ 
+ 	auth_tok = ecryptfs_get_encrypted_key_payload_data(key);
+-	if (!auth_tok)
+-		return (struct ecryptfs_auth_tok *)user_key_payload_locked(key)->data;
+-	else
++	if (auth_tok)
+ 		return auth_tok;
++
++	ukp = user_key_payload_locked(key);
++	if (!ukp)
++		return ERR_PTR(-EKEYREVOKED);
++
++	return (struct ecryptfs_auth_tok *)ukp->data;
+ }
+ 
+ #define ECRYPTFS_MAX_KEYSET_SIZE 1024
+diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c
+index 3cf1546dca82..fa218cd64f74 100644
+--- a/fs/ecryptfs/keystore.c
++++ b/fs/ecryptfs/keystore.c
+@@ -459,7 +459,8 @@ static int ecryptfs_verify_version(u16 version)
+  * @auth_tok_key: key containing the authentication token
+  * @auth_tok: authentication token
+  *
+- * Returns zero on valid auth tok; -EINVAL otherwise
++ * Returns zero on valid auth tok; -EINVAL if the payload is invalid; or
++ * -EKEYREVOKED if the key was revoked before we acquired its semaphore.
+  */
+ static int
+ ecryptfs_verify_auth_tok_from_key(struct key *auth_tok_key,
+@@ -468,6 +469,12 @@ ecryptfs_verify_auth_tok_from_key(struct key *auth_tok_key,
+ 	int rc = 0;
+ 
+ 	(*auth_tok) = ecryptfs_get_key_payload_data(auth_tok_key);
++	if (IS_ERR(*auth_tok)) {
++		rc = PTR_ERR(*auth_tok);
++		*auth_tok = NULL;
++		goto out;
++	}
++
+ 	if (ecryptfs_verify_version((*auth_tok)->version)) {
+ 		printk(KERN_ERR "Data structure version mismatch. Userspace "
+ 		       "tools must match eCryptfs kernel module with major "
+diff --git a/fs/fscache/object-list.c b/fs/fscache/object-list.c
+index 67f940892ef8..5eb2e24ce790 100644
+--- a/fs/fscache/object-list.c
++++ b/fs/fscache/object-list.c
+@@ -330,6 +330,13 @@ static void fscache_objlist_config(struct fscache_objlist_data *data)
+ 	rcu_read_lock();
+ 
+ 	confkey = user_key_payload_rcu(key);
++	if (!confkey) {
++		/* key was revoked */
++		rcu_read_unlock();
++		key_put(key);
++		goto no_config;
++	}
++
+ 	buf = confkey->data;
+ 
+ 	for (len = confkey->datalen - 1; len >= 0; len--) {
+diff --git a/fs/iomap.c b/fs/iomap.c
+index 59cc98ad7577..b8a91064abc6 100644
+--- a/fs/iomap.c
++++ b/fs/iomap.c
+@@ -993,6 +993,13 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
+ 	WARN_ON_ONCE(ret);
+ 	ret = 0;
+ 
++	if (iov_iter_rw(iter) == WRITE && !is_sync_kiocb(iocb) &&
++	    !inode->i_sb->s_dio_done_wq) {
++		ret = sb_init_dio_done_wq(inode->i_sb);
++		if (ret < 0)
++			goto out_free_dio;
++	}
++
+ 	inode_dio_begin(inode);
+ 
+ 	blk_start_plug(&plug);
+@@ -1015,13 +1022,6 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
+ 	if (ret < 0)
+ 		iomap_dio_set_error(dio, ret);
+ 
+-	if (ret >= 0 && iov_iter_rw(iter) == WRITE && !is_sync_kiocb(iocb) &&
+-			!inode->i_sb->s_dio_done_wq) {
+-		ret = sb_init_dio_done_wq(inode->i_sb);
+-		if (ret < 0)
+-			iomap_dio_set_error(dio, ret);
+-	}
+-
+ 	if (!atomic_dec_and_test(&dio->ref)) {
+ 		if (!is_sync_kiocb(iocb))
+ 			return -EIOCBQUEUED;
+diff --git a/fs/xfs/libxfs/xfs_ag_resv.c b/fs/xfs/libxfs/xfs_ag_resv.c
+index b008ff3250eb..df3e600835e8 100644
+--- a/fs/xfs/libxfs/xfs_ag_resv.c
++++ b/fs/xfs/libxfs/xfs_ag_resv.c
+@@ -156,7 +156,8 @@ __xfs_ag_resv_free(
+ 	trace_xfs_ag_resv_free(pag, type, 0);
+ 
+ 	resv = xfs_perag_resv(pag, type);
+-	pag->pag_mount->m_ag_max_usable += resv->ar_asked;
++	if (pag->pag_agno == 0)
++		pag->pag_mount->m_ag_max_usable += resv->ar_asked;
+ 	/*
+ 	 * AGFL blocks are always considered "free", so whatever
+ 	 * was reserved at mount time must be given back at umount.
+@@ -216,7 +217,14 @@ __xfs_ag_resv_init(
+ 		return error;
+ 	}
+ 
+-	mp->m_ag_max_usable -= ask;
++	/*
++	 * Reduce the maximum per-AG allocation length by however much we're
++	 * trying to reserve for an AG.  Since this is a filesystem-wide
++	 * counter, we only make the adjustment for AG 0.  This assumes that
++	 * there aren't any AGs hungrier for per-AG reservation than AG 0.
++	 */
++	if (pag->pag_agno == 0)
++		mp->m_ag_max_usable -= ask;
+ 
+ 	resv = xfs_perag_resv(pag, type);
+ 	resv->ar_asked = ask;
+diff --git a/fs/xfs/libxfs/xfs_alloc.c b/fs/xfs/libxfs/xfs_alloc.c
+index 744dcaec34cc..f965ce832bc0 100644
+--- a/fs/xfs/libxfs/xfs_alloc.c
++++ b/fs/xfs/libxfs/xfs_alloc.c
+@@ -1584,6 +1584,10 @@ xfs_alloc_ag_vextent_small(
+ 
+ 				bp = xfs_btree_get_bufs(args->mp, args->tp,
+ 					args->agno, fbno, 0);
++				if (!bp) {
++					error = -EFSCORRUPTED;
++					goto error0;
++				}
+ 				xfs_trans_binval(args->tp, bp);
+ 			}
+ 			args->len = 1;
+@@ -2141,6 +2145,10 @@ xfs_alloc_fix_freelist(
+ 		if (error)
+ 			goto out_agbp_relse;
+ 		bp = xfs_btree_get_bufs(mp, tp, args->agno, bno, 0);
++		if (!bp) {
++			error = -EFSCORRUPTED;
++			goto out_agbp_relse;
++		}
+ 		xfs_trans_binval(tp, bp);
+ 	}
+ 
+diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
+index 6f2a5baded76..5c6eb19664f2 100644
+--- a/fs/xfs/libxfs/xfs_bmap.c
++++ b/fs/xfs/libxfs/xfs_bmap.c
+@@ -3860,6 +3860,17 @@ xfs_trim_extent(
+ 	}
+ }
+ 
++/* trim extent to within eof */
++void
++xfs_trim_extent_eof(
++	struct xfs_bmbt_irec	*irec,
++	struct xfs_inode	*ip)
++
++{
++	xfs_trim_extent(irec, 0, XFS_B_TO_FSB(ip->i_mount,
++					      i_size_read(VFS_I(ip))));
++}
++
+ /*
+  * Trim the returned map to the required bounds
+  */
+diff --git a/fs/xfs/libxfs/xfs_bmap.h b/fs/xfs/libxfs/xfs_bmap.h
+index 851982a5dfbc..502e0d8fb4ff 100644
+--- a/fs/xfs/libxfs/xfs_bmap.h
++++ b/fs/xfs/libxfs/xfs_bmap.h
+@@ -208,6 +208,7 @@ void	xfs_bmap_trace_exlist(struct xfs_inode *ip, xfs_extnum_t cnt,
+ 
+ void	xfs_trim_extent(struct xfs_bmbt_irec *irec, xfs_fileoff_t bno,
+ 		xfs_filblks_t len);
++void	xfs_trim_extent_eof(struct xfs_bmbt_irec *, struct xfs_inode *);
+ int	xfs_bmap_add_attrfork(struct xfs_inode *ip, int size, int rsvd);
+ void	xfs_bmap_local_to_extents_empty(struct xfs_inode *ip, int whichfork);
+ void	xfs_bmap_add_free(struct xfs_mount *mp, struct xfs_defer_ops *dfops,
+diff --git a/fs/xfs/libxfs/xfs_log_format.h b/fs/xfs/libxfs/xfs_log_format.h
+index 8372e9bcd7b6..71de185735e0 100644
+--- a/fs/xfs/libxfs/xfs_log_format.h
++++ b/fs/xfs/libxfs/xfs_log_format.h
+@@ -270,6 +270,7 @@ typedef struct xfs_inode_log_format {
+ 	uint32_t		ilf_fields;	/* flags for fields logged */
+ 	uint16_t		ilf_asize;	/* size of attr d/ext/root */
+ 	uint16_t		ilf_dsize;	/* size of data/ext/root */
++	uint32_t		ilf_pad;	/* pad for 64 bit boundary */
+ 	uint64_t		ilf_ino;	/* inode number */
+ 	union {
+ 		uint32_t	ilfu_rdev;	/* rdev value for dev inode*/
+@@ -280,29 +281,17 @@ typedef struct xfs_inode_log_format {
+ 	int32_t			ilf_boffset;	/* off of inode in buffer */
+ } xfs_inode_log_format_t;
+ 
+-typedef struct xfs_inode_log_format_32 {
+-	uint16_t		ilf_type;	/* inode log item type */
+-	uint16_t		ilf_size;	/* size of this item */
+-	uint32_t		ilf_fields;	/* flags for fields logged */
+-	uint16_t		ilf_asize;	/* size of attr d/ext/root */
+-	uint16_t		ilf_dsize;	/* size of data/ext/root */
+-	uint64_t		ilf_ino;	/* inode number */
+-	union {
+-		uint32_t	ilfu_rdev;	/* rdev value for dev inode*/
+-		uuid_t		ilfu_uuid;	/* mount point value */
+-	} ilf_u;
+-	int64_t			ilf_blkno;	/* blkno of inode buffer */
+-	int32_t			ilf_len;	/* len of inode buffer */
+-	int32_t			ilf_boffset;	/* off of inode in buffer */
+-} __attribute__((packed)) xfs_inode_log_format_32_t;
+-
+-typedef struct xfs_inode_log_format_64 {
++/*
++ * Old 32 bit systems will log in this format without the 64 bit
++ * alignment padding. Recovery will detect this and convert it to the
++ * correct format.
++ */
++struct xfs_inode_log_format_32 {
+ 	uint16_t		ilf_type;	/* inode log item type */
+ 	uint16_t		ilf_size;	/* size of this item */
+ 	uint32_t		ilf_fields;	/* flags for fields logged */
+ 	uint16_t		ilf_asize;	/* size of attr d/ext/root */
+ 	uint16_t		ilf_dsize;	/* size of data/ext/root */
+-	uint32_t		ilf_pad;	/* pad for 64 bit boundary */
+ 	uint64_t		ilf_ino;	/* inode number */
+ 	union {
+ 		uint32_t	ilfu_rdev;	/* rdev value for dev inode*/
+@@ -311,7 +300,7 @@ typedef struct xfs_inode_log_format_64 {
+ 	int64_t			ilf_blkno;	/* blkno of inode buffer */
+ 	int32_t			ilf_len;	/* len of inode buffer */
+ 	int32_t			ilf_boffset;	/* off of inode in buffer */
+-} xfs_inode_log_format_64_t;
++} __attribute__((packed));
+ 
+ 
+ /*
+diff --git a/fs/xfs/xfs_acl.c b/fs/xfs/xfs_acl.c
+index 7034e17535de..3354140de07e 100644
+--- a/fs/xfs/xfs_acl.c
++++ b/fs/xfs/xfs_acl.c
+@@ -247,6 +247,8 @@ xfs_set_mode(struct inode *inode, umode_t mode)
+ int
+ xfs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
+ {
++	umode_t mode;
++	bool set_mode = false;
+ 	int error = 0;
+ 
+ 	if (!acl)
+@@ -257,16 +259,24 @@ xfs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
+ 		return error;
+ 
+ 	if (type == ACL_TYPE_ACCESS) {
+-		umode_t mode;
+-
+ 		error = posix_acl_update_mode(inode, &mode, &acl);
+ 		if (error)
+ 			return error;
+-		error = xfs_set_mode(inode, mode);
+-		if (error)
+-			return error;
++		set_mode = true;
+ 	}
+ 
+  set_acl:
+-	return __xfs_set_acl(inode, acl, type);
++	error =  __xfs_set_acl(inode, acl, type);
++	if (error)
++		return error;
++
++	/*
++	 * We set the mode after successfully updating the ACL xattr because the
++	 * xattr update can fail at ENOSPC and we don't want to change the mode
++	 * if the ACL update hasn't been applied.
++	 */
++	if (set_mode)
++		error = xfs_set_mode(inode, mode);
++
++	return error;
+ }
+diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
+index f9efd67f6fa1..41b767ecfe50 100644
+--- a/fs/xfs/xfs_aops.c
++++ b/fs/xfs/xfs_aops.c
+@@ -330,7 +330,8 @@ xfs_end_io(
+ 		error = xfs_reflink_end_cow(ip, offset, size);
+ 		break;
+ 	case XFS_IO_UNWRITTEN:
+-		error = xfs_iomap_write_unwritten(ip, offset, size);
++		/* writeback should never update isize */
++		error = xfs_iomap_write_unwritten(ip, offset, size, false);
+ 		break;
+ 	default:
+ 		ASSERT(!xfs_ioend_is_append(ioend) || ioend->io_append_trans);
+@@ -432,6 +433,19 @@ xfs_imap_valid(
+ {
+ 	offset >>= inode->i_blkbits;
+ 
++	/*
++	 * We have to make sure the cached mapping is within EOF to protect
++	 * against eofblocks trimming on file release leaving us with a stale
++	 * mapping. Otherwise, a page for a subsequent file extending buffered
++	 * write could get picked up by this writeback cycle and written to the
++	 * wrong blocks.
++	 *
++	 * Note that what we really want here is a generic mapping invalidation
++	 * mechanism to protect us from arbitrary extent modifying contexts, not
++	 * just eofblocks.
++	 */
++	xfs_trim_extent_eof(imap, XFS_I(inode));
++
+ 	return offset >= imap->br_startoff &&
+ 		offset < imap->br_startoff + imap->br_blockcount;
+ }
+@@ -721,6 +735,14 @@ xfs_vm_invalidatepage(
+ {
+ 	trace_xfs_invalidatepage(page->mapping->host, page, offset,
+ 				 length);
++
++	/*
++	 * If we are invalidating the entire page, clear the dirty state from it
++	 * so that we can check for attempts to release dirty cached pages in
++	 * xfs_vm_releasepage().
++	 */
++	if (offset == 0 && length >= PAGE_SIZE)
++		cancel_dirty_page(page);
+ 	block_invalidatepage(page, offset, length);
+ }
+ 
+@@ -1176,25 +1198,27 @@ xfs_vm_releasepage(
+ 	 * mm accommodates an old ext3 case where clean pages might not have had
+ 	 * the dirty bit cleared. Thus, it can send actual dirty pages to
+ 	 * ->releasepage() via shrink_active_list(). Conversely,
+-	 * block_invalidatepage() can send pages that are still marked dirty
+-	 * but otherwise have invalidated buffers.
++	 * block_invalidatepage() can send pages that are still marked dirty but
++	 * otherwise have invalidated buffers.
+ 	 *
+ 	 * We want to release the latter to avoid unnecessary buildup of the
+-	 * LRU, skip the former and warn if we've left any lingering
+-	 * delalloc/unwritten buffers on clean pages. Skip pages with delalloc
+-	 * or unwritten buffers and warn if the page is not dirty. Otherwise
+-	 * try to release the buffers.
++	 * LRU, so xfs_vm_invalidatepage() clears the page dirty flag on pages
++	 * that are entirely invalidated and need to be released.  Hence the
++	 * only time we should get dirty pages here is through
++	 * shrink_active_list() and so we can simply skip those now.
++	 *
++	 * warn if we've left any lingering delalloc/unwritten buffers on clean
++	 * or invalidated pages we are about to release.
+ 	 */
++	if (PageDirty(page))
++		return 0;
++
+ 	xfs_count_page_state(page, &delalloc, &unwritten);
+ 
+-	if (delalloc) {
+-		WARN_ON_ONCE(!PageDirty(page));
++	if (WARN_ON_ONCE(delalloc))
+ 		return 0;
+-	}
+-	if (unwritten) {
+-		WARN_ON_ONCE(!PageDirty(page));
++	if (WARN_ON_ONCE(unwritten))
+ 		return 0;
+-	}
+ 
+ 	return try_to_free_buffers(page);
+ }
+diff --git a/fs/xfs/xfs_attr_inactive.c b/fs/xfs/xfs_attr_inactive.c
+index be0b79d8900f..c6643004e583 100644
+--- a/fs/xfs/xfs_attr_inactive.c
++++ b/fs/xfs/xfs_attr_inactive.c
+@@ -302,6 +302,8 @@ xfs_attr3_node_inactive(
+ 						 &bp, XFS_ATTR_FORK);
+ 			if (error)
+ 				return error;
++			node = bp->b_addr;
++			btree = dp->d_ops->node_tree_p(node);
+ 			child_fsb = be32_to_cpu(btree[i + 1].before);
+ 			xfs_trans_brelse(*trans, bp);
+ 		}
+diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
+index 3e9b7a4fb8fd..799c69a72f2c 100644
+--- a/fs/xfs/xfs_bmap_util.c
++++ b/fs/xfs/xfs_bmap_util.c
+@@ -84,6 +84,7 @@ xfs_zero_extent(
+ 		GFP_NOFS, 0);
+ }
+ 
++#ifdef CONFIG_XFS_RT
+ int
+ xfs_bmap_rtalloc(
+ 	struct xfs_bmalloca	*ap)	/* bmap alloc argument struct */
+@@ -190,6 +191,7 @@ xfs_bmap_rtalloc(
+ 	}
+ 	return 0;
+ }
++#endif /* CONFIG_XFS_RT */
+ 
+ /*
+  * Check if the endoff is outside the last extent. If so the caller will grow
+@@ -1459,7 +1461,19 @@ xfs_shift_file_space(
+ 		return error;
+ 
+ 	/*
+-	 * The extent shiting code works on extent granularity. So, if
++	 * Clean out anything hanging around in the cow fork now that
++	 * we've flushed all the dirty data out to disk to avoid having
++	 * CoW extents at the wrong offsets.
++	 */
++	if (xfs_is_reflink_inode(ip)) {
++		error = xfs_reflink_cancel_cow_range(ip, offset, NULLFILEOFF,
++				true);
++		if (error)
++			return error;
++	}
++
++	/*
++	 * The extent shifting code works on extent granularity. So, if
+ 	 * stop_fsb is not the starting block of extent, we need to split
+ 	 * the extent at stop_fsb.
+ 	 */
+@@ -2109,11 +2123,31 @@ xfs_swap_extents(
+ 		ip->i_d.di_flags2 |= tip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK;
+ 		tip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK;
+ 		tip->i_d.di_flags2 |= f & XFS_DIFLAG2_REFLINK;
++	}
++
++	/* Swap the cow forks. */
++	if (xfs_sb_version_hasreflink(&mp->m_sb)) {
++		xfs_extnum_t	extnum;
++
++		ASSERT(ip->i_cformat == XFS_DINODE_FMT_EXTENTS);
++		ASSERT(tip->i_cformat == XFS_DINODE_FMT_EXTENTS);
++
++		extnum = ip->i_cnextents;
++		ip->i_cnextents = tip->i_cnextents;
++		tip->i_cnextents = extnum;
++
+ 		cowfp = ip->i_cowfp;
+ 		ip->i_cowfp = tip->i_cowfp;
+ 		tip->i_cowfp = cowfp;
+-		xfs_inode_set_cowblocks_tag(ip);
+-		xfs_inode_set_cowblocks_tag(tip);
++
++		if (ip->i_cowfp && ip->i_cnextents)
++			xfs_inode_set_cowblocks_tag(ip);
++		else
++			xfs_inode_clear_cowblocks_tag(ip);
++		if (tip->i_cowfp && tip->i_cnextents)
++			xfs_inode_set_cowblocks_tag(tip);
++		else
++			xfs_inode_clear_cowblocks_tag(tip);
+ 	}
+ 
+ 	xfs_trans_log_inode(tp, ip,  src_log_flags);
+diff --git a/fs/xfs/xfs_bmap_util.h b/fs/xfs/xfs_bmap_util.h
+index 0cede1043571..fb8d0c7d1db8 100644
+--- a/fs/xfs/xfs_bmap_util.h
++++ b/fs/xfs/xfs_bmap_util.h
+@@ -28,7 +28,20 @@ struct xfs_mount;
+ struct xfs_trans;
+ struct xfs_bmalloca;
+ 
++#ifdef CONFIG_XFS_RT
+ int	xfs_bmap_rtalloc(struct xfs_bmalloca *ap);
++#else /* !CONFIG_XFS_RT */
++/*
++ * Attempts to allocate RT extents when RT is disable indicates corruption and
++ * should trigger a shutdown.
++ */
++static inline int
++xfs_bmap_rtalloc(struct xfs_bmalloca *ap)
++{
++	return -EFSCORRUPTED;
++}
++#endif /* CONFIG_XFS_RT */
++
+ int	xfs_bmap_eof(struct xfs_inode *ip, xfs_fileoff_t endoff,
+ 		     int whichfork, int *eof);
+ int	xfs_bmap_punch_delalloc_range(struct xfs_inode *ip,
+diff --git a/fs/xfs/xfs_error.c b/fs/xfs/xfs_error.c
+index 2f4feb959bfb..028e50a36f95 100644
+--- a/fs/xfs/xfs_error.c
++++ b/fs/xfs/xfs_error.c
+@@ -344,7 +344,7 @@ xfs_verifier_error(
+ {
+ 	struct xfs_mount *mp = bp->b_target->bt_mount;
+ 
+-	xfs_alert(mp, "Metadata %s detected at %pF, %s block 0x%llx",
++	xfs_alert(mp, "Metadata %s detected at %pS, %s block 0x%llx",
+ 		  bp->b_error == -EFSBADCRC ? "CRC error" : "corruption",
+ 		  __return_address, bp->b_ops->name, bp->b_bn);
+ 
+diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
+index c4893e226fd8..ad5100ce8c44 100644
+--- a/fs/xfs/xfs_file.c
++++ b/fs/xfs/xfs_file.c
+@@ -58,7 +58,7 @@ xfs_zero_range(
+ 	xfs_off_t		count,
+ 	bool			*did_zero)
+ {
+-	return iomap_zero_range(VFS_I(ip), pos, count, NULL, &xfs_iomap_ops);
++	return iomap_zero_range(VFS_I(ip), pos, count, did_zero, &xfs_iomap_ops);
+ }
+ 
+ int
+@@ -432,7 +432,6 @@ xfs_dio_write_end_io(
+ 	struct inode		*inode = file_inode(iocb->ki_filp);
+ 	struct xfs_inode	*ip = XFS_I(inode);
+ 	loff_t			offset = iocb->ki_pos;
+-	bool			update_size = false;
+ 	int			error = 0;
+ 
+ 	trace_xfs_end_io_direct_write(ip, offset, size);
+@@ -443,6 +442,21 @@ xfs_dio_write_end_io(
+ 	if (size <= 0)
+ 		return size;
+ 
++	if (flags & IOMAP_DIO_COW) {
++		error = xfs_reflink_end_cow(ip, offset, size);
++		if (error)
++			return error;
++	}
++
++	/*
++	 * Unwritten conversion updates the in-core isize after extent
++	 * conversion but before updating the on-disk size. Updating isize any
++	 * earlier allows a racing dio read to find unwritten extents before
++	 * they are converted.
++	 */
++	if (flags & IOMAP_DIO_UNWRITTEN)
++		return xfs_iomap_write_unwritten(ip, offset, size, true);
++
+ 	/*
+ 	 * We need to update the in-core inode size here so that we don't end up
+ 	 * with the on-disk inode size being outside the in-core inode size. We
+@@ -457,20 +471,11 @@ xfs_dio_write_end_io(
+ 	spin_lock(&ip->i_flags_lock);
+ 	if (offset + size > i_size_read(inode)) {
+ 		i_size_write(inode, offset + size);
+-		update_size = true;
+-	}
+-	spin_unlock(&ip->i_flags_lock);
+-
+-	if (flags & IOMAP_DIO_COW) {
+-		error = xfs_reflink_end_cow(ip, offset, size);
+-		if (error)
+-			return error;
+-	}
+-
+-	if (flags & IOMAP_DIO_UNWRITTEN)
+-		error = xfs_iomap_write_unwritten(ip, offset, size);
+-	else if (update_size)
++		spin_unlock(&ip->i_flags_lock);
+ 		error = xfs_setfilesize(ip, offset, size);
++	} else {
++		spin_unlock(&ip->i_flags_lock);
++	}
+ 
+ 	return error;
+ }
+diff --git a/fs/xfs/xfs_fsmap.c b/fs/xfs/xfs_fsmap.c
+index 814ed729881d..43cfc07996a4 100644
+--- a/fs/xfs/xfs_fsmap.c
++++ b/fs/xfs/xfs_fsmap.c
+@@ -367,29 +367,6 @@ xfs_getfsmap_datadev_helper(
+ 	return xfs_getfsmap_helper(cur->bc_tp, info, rec, rec_daddr);
+ }
+ 
+-/* Transform a rtbitmap "record" into a fsmap */
+-STATIC int
+-xfs_getfsmap_rtdev_rtbitmap_helper(
+-	struct xfs_trans		*tp,
+-	struct xfs_rtalloc_rec		*rec,
+-	void				*priv)
+-{
+-	struct xfs_mount		*mp = tp->t_mountp;
+-	struct xfs_getfsmap_info	*info = priv;
+-	struct xfs_rmap_irec		irec;
+-	xfs_daddr_t			rec_daddr;
+-
+-	rec_daddr = XFS_FSB_TO_BB(mp, rec->ar_startblock);
+-
+-	irec.rm_startblock = rec->ar_startblock;
+-	irec.rm_blockcount = rec->ar_blockcount;
+-	irec.rm_owner = XFS_RMAP_OWN_NULL;	/* "free" */
+-	irec.rm_offset = 0;
+-	irec.rm_flags = 0;
+-
+-	return xfs_getfsmap_helper(tp, info, &irec, rec_daddr);
+-}
+-
+ /* Transform a bnobt irec into a fsmap */
+ STATIC int
+ xfs_getfsmap_datadev_bnobt_helper(
+@@ -475,6 +452,30 @@ xfs_getfsmap_logdev(
+ 	return xfs_getfsmap_helper(tp, info, &rmap, 0);
+ }
+ 
++#ifdef CONFIG_XFS_RT
++/* Transform a rtbitmap "record" into a fsmap */
++STATIC int
++xfs_getfsmap_rtdev_rtbitmap_helper(
++	struct xfs_trans		*tp,
++	struct xfs_rtalloc_rec		*rec,
++	void				*priv)
++{
++	struct xfs_mount		*mp = tp->t_mountp;
++	struct xfs_getfsmap_info	*info = priv;
++	struct xfs_rmap_irec		irec;
++	xfs_daddr_t			rec_daddr;
++
++	rec_daddr = XFS_FSB_TO_BB(mp, rec->ar_startblock);
++
++	irec.rm_startblock = rec->ar_startblock;
++	irec.rm_blockcount = rec->ar_blockcount;
++	irec.rm_owner = XFS_RMAP_OWN_NULL;	/* "free" */
++	irec.rm_offset = 0;
++	irec.rm_flags = 0;
++
++	return xfs_getfsmap_helper(tp, info, &irec, rec_daddr);
++}
++
+ /* Execute a getfsmap query against the realtime device. */
+ STATIC int
+ __xfs_getfsmap_rtdev(
+@@ -561,6 +562,7 @@ xfs_getfsmap_rtdev_rtbitmap(
+ 	return __xfs_getfsmap_rtdev(tp, keys, xfs_getfsmap_rtdev_rtbitmap_query,
+ 			info);
+ }
++#endif /* CONFIG_XFS_RT */
+ 
+ /* Execute a getfsmap query against the regular data device. */
+ STATIC int
+@@ -795,7 +797,15 @@ xfs_getfsmap_check_keys(
+ 	return false;
+ }
+ 
++/*
++ * There are only two devices if we didn't configure RT devices at build time.
++ */
++#ifdef CONFIG_XFS_RT
+ #define XFS_GETFSMAP_DEVS	3
++#else
++#define XFS_GETFSMAP_DEVS	2
++#endif /* CONFIG_XFS_RT */
++
+ /*
+  * Get filesystem's extents as described in head, and format for
+  * output.  Calls formatter to fill the user's buffer until all
+@@ -853,10 +863,12 @@ xfs_getfsmap(
+ 		handlers[1].dev = new_encode_dev(mp->m_logdev_targp->bt_dev);
+ 		handlers[1].fn = xfs_getfsmap_logdev;
+ 	}
++#ifdef CONFIG_XFS_RT
+ 	if (mp->m_rtdev_targp) {
+ 		handlers[2].dev = new_encode_dev(mp->m_rtdev_targp->bt_dev);
+ 		handlers[2].fn = xfs_getfsmap_rtdev_rtbitmap;
+ 	}
++#endif /* CONFIG_XFS_RT */
+ 
+ 	xfs_sort(handlers, XFS_GETFSMAP_DEVS, sizeof(struct xfs_getfsmap_dev),
+ 			xfs_getfsmap_dev_compare);
+diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
+index 97045e8dfed5..cd2e5b122b01 100644
+--- a/fs/xfs/xfs_inode.c
++++ b/fs/xfs/xfs_inode.c
+@@ -1623,10 +1623,12 @@ xfs_itruncate_extents(
+ 		goto out;
+ 
+ 	/*
+-	 * Clear the reflink flag if we truncated everything.
++	 * Clear the reflink flag if there are no data fork blocks and
++	 * there are no extents staged in the cow fork.
+ 	 */
+-	if (ip->i_d.di_nblocks == 0 && xfs_is_reflink_inode(ip)) {
+-		ip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK;
++	if (xfs_is_reflink_inode(ip) && ip->i_cnextents == 0) {
++		if (ip->i_d.di_nblocks == 0)
++			ip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK;
+ 		xfs_inode_clear_cowblocks_tag(ip);
+ 	}
+ 
+diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c
+index 6d0f74ec31e8..9bbc2d7cc8cb 100644
+--- a/fs/xfs/xfs_inode_item.c
++++ b/fs/xfs/xfs_inode_item.c
+@@ -364,6 +364,9 @@ xfs_inode_to_log_dinode(
+ 	to->di_dmstate = from->di_dmstate;
+ 	to->di_flags = from->di_flags;
+ 
++	/* log a dummy value to ensure log structure is fully initialised */
++	to->di_next_unlinked = NULLAGINO;
++
+ 	if (from->di_version == 3) {
+ 		to->di_changecount = inode->i_version;
+ 		to->di_crtime.t_sec = from->di_crtime.t_sec;
+@@ -404,6 +407,11 @@ xfs_inode_item_format_core(
+  * the second with the on-disk inode structure, and a possible third and/or
+  * fourth with the inode data/extents/b-tree root and inode attributes
+  * data/extents/b-tree root.
++ *
++ * Note: Always use the 64 bit inode log format structure so we don't
++ * leave an uninitialised hole in the format item on 64 bit systems. Log
++ * recovery on 32 bit systems handles this just fine, so there's no reason
++ * for not using an initialising the properly padded structure all the time.
+  */
+ STATIC void
+ xfs_inode_item_format(
+@@ -412,8 +420,8 @@ xfs_inode_item_format(
+ {
+ 	struct xfs_inode_log_item *iip = INODE_ITEM(lip);
+ 	struct xfs_inode	*ip = iip->ili_inode;
+-	struct xfs_inode_log_format *ilf;
+ 	struct xfs_log_iovec	*vecp = NULL;
++	struct xfs_inode_log_format *ilf;
+ 
+ 	ASSERT(ip->i_d.di_version > 1);
+ 
+@@ -425,7 +433,17 @@ xfs_inode_item_format(
+ 	ilf->ilf_boffset = ip->i_imap.im_boffset;
+ 	ilf->ilf_fields = XFS_ILOG_CORE;
+ 	ilf->ilf_size = 2; /* format + core */
+-	xlog_finish_iovec(lv, vecp, sizeof(struct xfs_inode_log_format));
++
++	/*
++	 * make sure we don't leak uninitialised data into the log in the case
++	 * when we don't log every field in the inode.
++	 */
++	ilf->ilf_dsize = 0;
++	ilf->ilf_asize = 0;
++	ilf->ilf_pad = 0;
++	uuid_copy(&ilf->ilf_u.ilfu_uuid, &uuid_null);
++
++	xlog_finish_iovec(lv, vecp, sizeof(*ilf));
+ 
+ 	xfs_inode_item_format_core(ip, lv, &vecp);
+ 	xfs_inode_item_format_data_fork(iip, ilf, lv, &vecp);
+@@ -745,7 +763,7 @@ xfs_iflush_done(
+ 		 */
+ 		iip = INODE_ITEM(blip);
+ 		if ((iip->ili_logged && blip->li_lsn == iip->ili_flush_lsn) ||
+-		    lip->li_flags & XFS_LI_FAILED)
++		    (blip->li_flags & XFS_LI_FAILED))
+ 			need_ail++;
+ 
+ 		blip = next;
+@@ -855,44 +873,29 @@ xfs_istale_done(
+ }
+ 
+ /*
+- * convert an xfs_inode_log_format struct from either 32 or 64 bit versions
+- * (which can have different field alignments) to the native version
++ * convert an xfs_inode_log_format struct from the old 32 bit version
++ * (which can have different field alignments) to the native 64 bit version
+  */
+ int
+ xfs_inode_item_format_convert(
+-	xfs_log_iovec_t		*buf,
+-	xfs_inode_log_format_t	*in_f)
++	struct xfs_log_iovec		*buf,
++	struct xfs_inode_log_format	*in_f)
+ {
+-	if (buf->i_len == sizeof(xfs_inode_log_format_32_t)) {
+-		xfs_inode_log_format_32_t *in_f32 = buf->i_addr;
+-
+-		in_f->ilf_type = in_f32->ilf_type;
+-		in_f->ilf_size = in_f32->ilf_size;
+-		in_f->ilf_fields = in_f32->ilf_fields;
+-		in_f->ilf_asize = in_f32->ilf_asize;
+-		in_f->ilf_dsize = in_f32->ilf_dsize;
+-		in_f->ilf_ino = in_f32->ilf_ino;
+-		/* copy biggest field of ilf_u */
+-		uuid_copy(&in_f->ilf_u.ilfu_uuid, &in_f32->ilf_u.ilfu_uuid);
+-		in_f->ilf_blkno = in_f32->ilf_blkno;
+-		in_f->ilf_len = in_f32->ilf_len;
+-		in_f->ilf_boffset = in_f32->ilf_boffset;
+-		return 0;
+-	} else if (buf->i_len == sizeof(xfs_inode_log_format_64_t)){
+-		xfs_inode_log_format_64_t *in_f64 = buf->i_addr;
+-
+-		in_f->ilf_type = in_f64->ilf_type;
+-		in_f->ilf_size = in_f64->ilf_size;
+-		in_f->ilf_fields = in_f64->ilf_fields;
+-		in_f->ilf_asize = in_f64->ilf_asize;
+-		in_f->ilf_dsize = in_f64->ilf_dsize;
+-		in_f->ilf_ino = in_f64->ilf_ino;
+-		/* copy biggest field of ilf_u */
+-		uuid_copy(&in_f->ilf_u.ilfu_uuid, &in_f64->ilf_u.ilfu_uuid);
+-		in_f->ilf_blkno = in_f64->ilf_blkno;
+-		in_f->ilf_len = in_f64->ilf_len;
+-		in_f->ilf_boffset = in_f64->ilf_boffset;
+-		return 0;
+-	}
+-	return -EFSCORRUPTED;
++	struct xfs_inode_log_format_32	*in_f32 = buf->i_addr;
++
++	if (buf->i_len != sizeof(*in_f32))
++		return -EFSCORRUPTED;
++
++	in_f->ilf_type = in_f32->ilf_type;
++	in_f->ilf_size = in_f32->ilf_size;
++	in_f->ilf_fields = in_f32->ilf_fields;
++	in_f->ilf_asize = in_f32->ilf_asize;
++	in_f->ilf_dsize = in_f32->ilf_dsize;
++	in_f->ilf_ino = in_f32->ilf_ino;
++	/* copy biggest field of ilf_u */
++	uuid_copy(&in_f->ilf_u.ilfu_uuid, &in_f32->ilf_u.ilfu_uuid);
++	in_f->ilf_blkno = in_f32->ilf_blkno;
++	in_f->ilf_len = in_f32->ilf_len;
++	in_f->ilf_boffset = in_f32->ilf_boffset;
++	return 0;
+ }
+diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
+index 813394c62849..2cef389d8955 100644
+--- a/fs/xfs/xfs_iomap.c
++++ b/fs/xfs/xfs_iomap.c
+@@ -829,7 +829,8 @@ int
+ xfs_iomap_write_unwritten(
+ 	xfs_inode_t	*ip,
+ 	xfs_off_t	offset,
+-	xfs_off_t	count)
++	xfs_off_t	count,
++	bool		update_isize)
+ {
+ 	xfs_mount_t	*mp = ip->i_mount;
+ 	xfs_fileoff_t	offset_fsb;
+@@ -840,6 +841,7 @@ xfs_iomap_write_unwritten(
+ 	xfs_trans_t	*tp;
+ 	xfs_bmbt_irec_t imap;
+ 	struct xfs_defer_ops dfops;
++	struct inode	*inode = VFS_I(ip);
+ 	xfs_fsize_t	i_size;
+ 	uint		resblks;
+ 	int		error;
+@@ -899,7 +901,8 @@ xfs_iomap_write_unwritten(
+ 		i_size = XFS_FSB_TO_B(mp, offset_fsb + count_fsb);
+ 		if (i_size > offset + count)
+ 			i_size = offset + count;
+-
++		if (update_isize && i_size > i_size_read(inode))
++			i_size_write(inode, i_size);
+ 		i_size = xfs_new_eof(ip, i_size);
+ 		if (i_size) {
+ 			ip->i_d.di_size = i_size;
+diff --git a/fs/xfs/xfs_iomap.h b/fs/xfs/xfs_iomap.h
+index 00db3ecea084..ee535065c5d0 100644
+--- a/fs/xfs/xfs_iomap.h
++++ b/fs/xfs/xfs_iomap.h
+@@ -27,7 +27,7 @@ int xfs_iomap_write_direct(struct xfs_inode *, xfs_off_t, size_t,
+ 			struct xfs_bmbt_irec *, int);
+ int xfs_iomap_write_allocate(struct xfs_inode *, int, xfs_off_t,
+ 			struct xfs_bmbt_irec *);
+-int xfs_iomap_write_unwritten(struct xfs_inode *, xfs_off_t, xfs_off_t);
++int xfs_iomap_write_unwritten(struct xfs_inode *, xfs_off_t, xfs_off_t, bool);
+ 
+ void xfs_bmbt_to_iomap(struct xfs_inode *, struct iomap *,
+ 		struct xfs_bmbt_irec *);
+diff --git a/fs/xfs/xfs_ondisk.h b/fs/xfs/xfs_ondisk.h
+index 0c381d71b242..0492436a053f 100644
+--- a/fs/xfs/xfs_ondisk.h
++++ b/fs/xfs/xfs_ondisk.h
+@@ -134,7 +134,7 @@ xfs_check_ondisk_structs(void)
+ 	XFS_CHECK_STRUCT_SIZE(struct xfs_icreate_log,		28);
+ 	XFS_CHECK_STRUCT_SIZE(struct xfs_ictimestamp,		8);
+ 	XFS_CHECK_STRUCT_SIZE(struct xfs_inode_log_format_32,	52);
+-	XFS_CHECK_STRUCT_SIZE(struct xfs_inode_log_format_64,	56);
++	XFS_CHECK_STRUCT_SIZE(struct xfs_inode_log_format,	56);
+ 	XFS_CHECK_STRUCT_SIZE(struct xfs_qoff_logformat,	20);
+ 	XFS_CHECK_STRUCT_SIZE(struct xfs_trans_header,		16);
+ }
+diff --git a/fs/xfs/xfs_pnfs.c b/fs/xfs/xfs_pnfs.c
+index 2f2dc3c09ad0..4246876df7b7 100644
+--- a/fs/xfs/xfs_pnfs.c
++++ b/fs/xfs/xfs_pnfs.c
+@@ -274,7 +274,7 @@ xfs_fs_commit_blocks(
+ 					(end - 1) >> PAGE_SHIFT);
+ 		WARN_ON_ONCE(error);
+ 
+-		error = xfs_iomap_write_unwritten(ip, start, length);
++		error = xfs_iomap_write_unwritten(ip, start, length, false);
+ 		if (error)
+ 			goto out_drop_iolock;
+ 	}
+diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c
+index f45fbf0db9bb..8c8390a7f121 100644
+--- a/fs/xfs/xfs_reflink.c
++++ b/fs/xfs/xfs_reflink.c
+@@ -735,7 +735,13 @@ xfs_reflink_end_cow(
+ 	/* If there is a hole at end_fsb - 1 go to the previous extent */
+ 	if (!xfs_iext_lookup_extent(ip, ifp, end_fsb - 1, &idx, &got) ||
+ 	    got.br_startoff > end_fsb) {
+-		ASSERT(idx > 0);
++		/*
++		 * In case of racing, overlapping AIO writes no COW extents
++		 * might be left by the time I/O completes for the loser of
++		 * the race.  In that case we are done.
++		 */
++		if (idx <= 0)
++			goto out_cancel;
+ 		xfs_iext_get_extent(ifp, --idx, &got);
+ 	}
+ 
+@@ -807,6 +813,7 @@ xfs_reflink_end_cow(
+ 
+ out_defer:
+ 	xfs_defer_cancel(&dfops);
++out_cancel:
+ 	xfs_trans_cancel(tp);
+ 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
+ out:
+diff --git a/include/linux/key.h b/include/linux/key.h
+index e315e16b6ff8..8a15cabe928d 100644
+--- a/include/linux/key.h
++++ b/include/linux/key.h
+@@ -138,6 +138,11 @@ struct key_restriction {
+ 	struct key_type *keytype;
+ };
+ 
++enum key_state {
++	KEY_IS_UNINSTANTIATED,
++	KEY_IS_POSITIVE,		/* Positively instantiated */
++};
++
+ /*****************************************************************************/
+ /*
+  * authentication token / access credential / keyring
+@@ -169,6 +174,7 @@ struct key {
+ 						 * - may not match RCU dereferenced payload
+ 						 * - payload should contain own length
+ 						 */
++	short			state;		/* Key state (+) or rejection error (-) */
+ 
+ #ifdef KEY_DEBUGGING
+ 	unsigned		magic;
+@@ -176,18 +182,16 @@ struct key {
+ #endif
+ 
+ 	unsigned long		flags;		/* status flags (change with bitops) */
+-#define KEY_FLAG_INSTANTIATED	0	/* set if key has been instantiated */
+-#define KEY_FLAG_DEAD		1	/* set if key type has been deleted */
+-#define KEY_FLAG_REVOKED	2	/* set if key had been revoked */
+-#define KEY_FLAG_IN_QUOTA	3	/* set if key consumes quota */
+-#define KEY_FLAG_USER_CONSTRUCT	4	/* set if key is being constructed in userspace */
+-#define KEY_FLAG_NEGATIVE	5	/* set if key is negative */
+-#define KEY_FLAG_ROOT_CAN_CLEAR	6	/* set if key can be cleared by root without permission */
+-#define KEY_FLAG_INVALIDATED	7	/* set if key has been invalidated */
+-#define KEY_FLAG_BUILTIN	8	/* set if key is built in to the kernel */
+-#define KEY_FLAG_ROOT_CAN_INVAL	9	/* set if key can be invalidated by root without permission */
+-#define KEY_FLAG_KEEP		10	/* set if key should not be removed */
+-#define KEY_FLAG_UID_KEYRING	11	/* set if key is a user or user session keyring */
++#define KEY_FLAG_DEAD		0	/* set if key type has been deleted */
++#define KEY_FLAG_REVOKED	1	/* set if key had been revoked */
++#define KEY_FLAG_IN_QUOTA	2	/* set if key consumes quota */
++#define KEY_FLAG_USER_CONSTRUCT	3	/* set if key is being constructed in userspace */
++#define KEY_FLAG_ROOT_CAN_CLEAR	4	/* set if key can be cleared by root without permission */
++#define KEY_FLAG_INVALIDATED	5	/* set if key has been invalidated */
++#define KEY_FLAG_BUILTIN	6	/* set if key is built in to the kernel */
++#define KEY_FLAG_ROOT_CAN_INVAL	7	/* set if key can be invalidated by root without permission */
++#define KEY_FLAG_KEEP		8	/* set if key should not be removed */
++#define KEY_FLAG_UID_KEYRING	9	/* set if key is a user or user session keyring */
+ 
+ 	/* the key type and key description string
+ 	 * - the desc is used to match a key against search criteria
+@@ -213,7 +217,6 @@ struct key {
+ 			struct list_head name_link;
+ 			struct assoc_array keys;
+ 		};
+-		int reject_error;
+ 	};
+ 
+ 	/* This is set on a keyring to restrict the addition of a link to a key
+@@ -353,17 +356,27 @@ extern void key_set_timeout(struct key *, unsigned);
+ #define	KEY_NEED_SETATTR 0x20	/* Require permission to change attributes */
+ #define	KEY_NEED_ALL	0x3f	/* All the above permissions */
+ 
++static inline short key_read_state(const struct key *key)
++{
++	/* Barrier versus mark_key_instantiated(). */
++	return smp_load_acquire(&key->state);
++}
++
+ /**
+- * key_is_instantiated - Determine if a key has been positively instantiated
++ * key_is_positive - Determine if a key has been positively instantiated
+  * @key: The key to check.
+  *
+  * Return true if the specified key has been positively instantiated, false
+  * otherwise.
+  */
+-static inline bool key_is_instantiated(const struct key *key)
++static inline bool key_is_positive(const struct key *key)
++{
++	return key_read_state(key) == KEY_IS_POSITIVE;
++}
++
++static inline bool key_is_negative(const struct key *key)
+ {
+-	return test_bit(KEY_FLAG_INSTANTIATED, &key->flags) &&
+-		!test_bit(KEY_FLAG_NEGATIVE, &key->flags);
++	return key_read_state(key) < 0;
+ }
+ 
+ #define dereference_key_rcu(KEY)					\
+diff --git a/include/linux/mbus.h b/include/linux/mbus.h
+index 0d3f14fd2621..4773145246ed 100644
+--- a/include/linux/mbus.h
++++ b/include/linux/mbus.h
+@@ -31,8 +31,8 @@ struct mbus_dram_target_info
+ 	struct mbus_dram_window {
+ 		u8	cs_index;
+ 		u8	mbus_attr;
+-		u32	base;
+-		u32	size;
++		u64	base;
++		u64	size;
+ 	} cs[4];
+ };
+ 
+diff --git a/include/sound/control.h b/include/sound/control.h
+index bd7246de58e7..a1f1152bc687 100644
+--- a/include/sound/control.h
++++ b/include/sound/control.h
+@@ -248,6 +248,9 @@ int snd_ctl_add_vmaster_hook(struct snd_kcontrol *kctl,
+ 			     void *private_data);
+ void snd_ctl_sync_vmaster(struct snd_kcontrol *kctl, bool hook_only);
+ #define snd_ctl_sync_vmaster_hook(kctl)	snd_ctl_sync_vmaster(kctl, true)
++int snd_ctl_apply_vmaster_slaves(struct snd_kcontrol *kctl,
++				 int (*func)(struct snd_kcontrol *, void *),
++				 void *arg);
+ 
+ /*
+  * Helper functions for jack-detection controls
+diff --git a/lib/digsig.c b/lib/digsig.c
+index 03d7c63837ae..6ba6fcd92dd1 100644
+--- a/lib/digsig.c
++++ b/lib/digsig.c
+@@ -87,6 +87,12 @@ static int digsig_verify_rsa(struct key *key,
+ 	down_read(&key->sem);
+ 	ukp = user_key_payload_locked(key);
+ 
++	if (!ukp) {
++		/* key was revoked before we acquired its semaphore */
++		err = -EKEYREVOKED;
++		goto err1;
++	}
++
+ 	if (ukp->datalen < sizeof(*pkh))
+ 		goto err1;
+ 
+diff --git a/net/can/af_can.c b/net/can/af_can.c
+index 88edac0f3e36..df186cdcc2b4 100644
+--- a/net/can/af_can.c
++++ b/net/can/af_can.c
+@@ -875,9 +875,14 @@ static int can_pernet_init(struct net *net)
+ 	spin_lock_init(&net->can.can_rcvlists_lock);
+ 	net->can.can_rx_alldev_list =
+ 		kzalloc(sizeof(struct dev_rcv_lists), GFP_KERNEL);
+-
++	if (!net->can.can_rx_alldev_list)
++		goto out;
+ 	net->can.can_stats = kzalloc(sizeof(struct s_stats), GFP_KERNEL);
++	if (!net->can.can_stats)
++		goto out_free_alldev_list;
+ 	net->can.can_pstats = kzalloc(sizeof(struct s_pstats), GFP_KERNEL);
++	if (!net->can.can_pstats)
++		goto out_free_can_stats;
+ 
+ 	if (IS_ENABLED(CONFIG_PROC_FS)) {
+ 		/* the statistics are updated every second (timer triggered) */
+@@ -892,6 +897,13 @@ static int can_pernet_init(struct net *net)
+ 	}
+ 
+ 	return 0;
++
++ out_free_can_stats:
++	kfree(net->can.can_stats);
++ out_free_alldev_list:
++	kfree(net->can.can_rx_alldev_list);
++ out:
++	return -ENOMEM;
+ }
+ 
+ static void can_pernet_exit(struct net *net)
+diff --git a/net/dns_resolver/dns_key.c b/net/dns_resolver/dns_key.c
+index 8737412c7b27..e1d4d898a007 100644
+--- a/net/dns_resolver/dns_key.c
++++ b/net/dns_resolver/dns_key.c
+@@ -224,7 +224,7 @@ static int dns_resolver_match_preparse(struct key_match_data *match_data)
+ static void dns_resolver_describe(const struct key *key, struct seq_file *m)
+ {
+ 	seq_puts(m, key->description);
+-	if (key_is_instantiated(key)) {
++	if (key_is_positive(key)) {
+ 		int err = PTR_ERR(key->payload.data[dns_key_error]);
+ 
+ 		if (err)
+diff --git a/samples/trace_events/trace-events-sample.c b/samples/trace_events/trace-events-sample.c
+index bc7fcf010a5b..446beb7ac48d 100644
+--- a/samples/trace_events/trace-events-sample.c
++++ b/samples/trace_events/trace-events-sample.c
+@@ -78,29 +78,37 @@ static int simple_thread_fn(void *arg)
+ }
+ 
+ static DEFINE_MUTEX(thread_mutex);
++static bool simple_thread_cnt;
+ 
+ int foo_bar_reg(void)
+ {
++	mutex_lock(&thread_mutex);
++	if (simple_thread_cnt++)
++		goto out;
++
+ 	pr_info("Starting thread for foo_bar_fn\n");
+ 	/*
+ 	 * We shouldn't be able to start a trace when the module is
+ 	 * unloading (there's other locks to prevent that). But
+ 	 * for consistency sake, we still take the thread_mutex.
+ 	 */
+-	mutex_lock(&thread_mutex);
+ 	simple_tsk_fn = kthread_run(simple_thread_fn, NULL, "event-sample-fn");
++ out:
+ 	mutex_unlock(&thread_mutex);
+ 	return 0;
+ }
+ 
+ void foo_bar_unreg(void)
+ {
+-	pr_info("Killing thread for foo_bar_fn\n");
+-	/* protect against module unloading */
+ 	mutex_lock(&thread_mutex);
++	if (--simple_thread_cnt)
++		goto out;
++
++	pr_info("Killing thread for foo_bar_fn\n");
+ 	if (simple_tsk_fn)
+ 		kthread_stop(simple_tsk_fn);
+ 	simple_tsk_fn = NULL;
++ out:
+ 	mutex_unlock(&thread_mutex);
+ }
+ 
+diff --git a/security/keys/big_key.c b/security/keys/big_key.c
+index 9c3b16ee1768..ad39a0bdf846 100644
+--- a/security/keys/big_key.c
++++ b/security/keys/big_key.c
+@@ -247,7 +247,7 @@ void big_key_revoke(struct key *key)
+ 
+ 	/* clear the quota */
+ 	key_payload_reserve(key, 0);
+-	if (key_is_instantiated(key) &&
++	if (key_is_positive(key) &&
+ 	    (size_t)key->payload.data[big_key_len] > BIG_KEY_FILE_THRESHOLD)
+ 		vfs_truncate(path, 0);
+ }
+@@ -279,7 +279,7 @@ void big_key_describe(const struct key *key, struct seq_file *m)
+ 
+ 	seq_puts(m, key->description);
+ 
+-	if (key_is_instantiated(key))
++	if (key_is_positive(key))
+ 		seq_printf(m, ": %zu [%s]",
+ 			   datalen,
+ 			   datalen > BIG_KEY_FILE_THRESHOLD ? "file" : "buff");
+diff --git a/security/keys/encrypted-keys/encrypted.c b/security/keys/encrypted-keys/encrypted.c
+index 69855ba0d3b3..d92cbf9687c3 100644
+--- a/security/keys/encrypted-keys/encrypted.c
++++ b/security/keys/encrypted-keys/encrypted.c
+@@ -309,6 +309,13 @@ static struct key *request_user_key(const char *master_desc, const u8 **master_k
+ 
+ 	down_read(&ukey->sem);
+ 	upayload = user_key_payload_locked(ukey);
++	if (!upayload) {
++		/* key was revoked before we acquired its semaphore */
++		up_read(&ukey->sem);
++		key_put(ukey);
++		ukey = ERR_PTR(-EKEYREVOKED);
++		goto error;
++	}
+ 	*master_key = upayload->data;
+ 	*master_keylen = upayload->datalen;
+ error:
+@@ -847,7 +854,7 @@ static int encrypted_update(struct key *key, struct key_preparsed_payload *prep)
+ 	size_t datalen = prep->datalen;
+ 	int ret = 0;
+ 
+-	if (test_bit(KEY_FLAG_NEGATIVE, &key->flags))
++	if (key_is_negative(key))
+ 		return -ENOKEY;
+ 	if (datalen <= 0 || datalen > 32767 || !prep->data)
+ 		return -EINVAL;
+diff --git a/security/keys/gc.c b/security/keys/gc.c
+index 87cb260e4890..f01d48cb3de1 100644
+--- a/security/keys/gc.c
++++ b/security/keys/gc.c
+@@ -129,15 +129,15 @@ static noinline void key_gc_unused_keys(struct list_head *keys)
+ 	while (!list_empty(keys)) {
+ 		struct key *key =
+ 			list_entry(keys->next, struct key, graveyard_link);
++		short state = key->state;
++
+ 		list_del(&key->graveyard_link);
+ 
+ 		kdebug("- %u", key->serial);
+ 		key_check(key);
+ 
+ 		/* Throw away the key data if the key is instantiated */
+-		if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags) &&
+-		    !test_bit(KEY_FLAG_NEGATIVE, &key->flags) &&
+-		    key->type->destroy)
++		if (state == KEY_IS_POSITIVE && key->type->destroy)
+ 			key->type->destroy(key);
+ 
+ 		security_key_free(key);
+@@ -151,7 +151,7 @@ static noinline void key_gc_unused_keys(struct list_head *keys)
+ 		}
+ 
+ 		atomic_dec(&key->user->nkeys);
+-		if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags))
++		if (state != KEY_IS_UNINSTANTIATED)
+ 			atomic_dec(&key->user->nikeys);
+ 
+ 		key_user_put(key->user);
+diff --git a/security/keys/key.c b/security/keys/key.c
+index e5c0896c3a8f..3186c769f300 100644
+--- a/security/keys/key.c
++++ b/security/keys/key.c
+@@ -401,6 +401,18 @@ int key_payload_reserve(struct key *key, size_t datalen)
+ }
+ EXPORT_SYMBOL(key_payload_reserve);
+ 
++/*
++ * Change the key state to being instantiated.
++ */
++static void mark_key_instantiated(struct key *key, int reject_error)
++{
++	/* Commit the payload before setting the state; barrier versus
++	 * key_read_state().
++	 */
++	smp_store_release(&key->state,
++			  (reject_error < 0) ? reject_error : KEY_IS_POSITIVE);
++}
++
+ /*
+  * Instantiate a key and link it into the target keyring atomically.  Must be
+  * called with the target keyring's semaphore writelocked.  The target key's
+@@ -424,14 +436,14 @@ static int __key_instantiate_and_link(struct key *key,
+ 	mutex_lock(&key_construction_mutex);
+ 
+ 	/* can't instantiate twice */
+-	if (!test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) {
++	if (key->state == KEY_IS_UNINSTANTIATED) {
+ 		/* instantiate the key */
+ 		ret = key->type->instantiate(key, prep);
+ 
+ 		if (ret == 0) {
+ 			/* mark the key as being instantiated */
+ 			atomic_inc(&key->user->nikeys);
+-			set_bit(KEY_FLAG_INSTANTIATED, &key->flags);
++			mark_key_instantiated(key, 0);
+ 
+ 			if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags))
+ 				awaken = 1;
+@@ -577,13 +589,10 @@ int key_reject_and_link(struct key *key,
+ 	mutex_lock(&key_construction_mutex);
+ 
+ 	/* can't instantiate twice */
+-	if (!test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) {
++	if (key->state == KEY_IS_UNINSTANTIATED) {
+ 		/* mark the key as being negatively instantiated */
+ 		atomic_inc(&key->user->nikeys);
+-		key->reject_error = -error;
+-		smp_wmb();
+-		set_bit(KEY_FLAG_NEGATIVE, &key->flags);
+-		set_bit(KEY_FLAG_INSTANTIATED, &key->flags);
++		mark_key_instantiated(key, -error);
+ 		now = current_kernel_time();
+ 		key->expiry = now.tv_sec + timeout;
+ 		key_schedule_gc(key->expiry + key_gc_delay);
+@@ -752,8 +761,8 @@ static inline key_ref_t __key_update(key_ref_t key_ref,
+ 
+ 	ret = key->type->update(key, prep);
+ 	if (ret == 0)
+-		/* updating a negative key instantiates it */
+-		clear_bit(KEY_FLAG_NEGATIVE, &key->flags);
++		/* Updating a negative key positively instantiates it */
++		mark_key_instantiated(key, 0);
+ 
+ 	up_write(&key->sem);
+ 
+@@ -936,6 +945,16 @@ key_ref_t key_create_or_update(key_ref_t keyring_ref,
+ 	 */
+ 	__key_link_end(keyring, &index_key, edit);
+ 
++	key = key_ref_to_ptr(key_ref);
++	if (test_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags)) {
++		ret = wait_for_key_construction(key, true);
++		if (ret < 0) {
++			key_ref_put(key_ref);
++			key_ref = ERR_PTR(ret);
++			goto error_free_prep;
++		}
++	}
++
+ 	key_ref = __key_update(key_ref, &prep);
+ 	goto error_free_prep;
+ }
+@@ -986,8 +1005,8 @@ int key_update(key_ref_t key_ref, const void *payload, size_t plen)
+ 
+ 	ret = key->type->update(key, &prep);
+ 	if (ret == 0)
+-		/* updating a negative key instantiates it */
+-		clear_bit(KEY_FLAG_NEGATIVE, &key->flags);
++		/* Updating a negative key positively instantiates it */
++		mark_key_instantiated(key, 0);
+ 
+ 	up_write(&key->sem);
+ 
+diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
+index 6a82090c7fc1..2eb624c0aefc 100644
+--- a/security/keys/keyctl.c
++++ b/security/keys/keyctl.c
+@@ -766,10 +766,9 @@ long keyctl_read_key(key_serial_t keyid, char __user *buffer, size_t buflen)
+ 
+ 	key = key_ref_to_ptr(key_ref);
+ 
+-	if (test_bit(KEY_FLAG_NEGATIVE, &key->flags)) {
+-		ret = -ENOKEY;
+-		goto error2;
+-	}
++	ret = key_read_state(key);
++	if (ret < 0)
++		goto error2; /* Negatively instantiated */
+ 
+ 	/* see if we can read it directly */
+ 	ret = key_permission(key_ref, KEY_NEED_READ);
+@@ -901,7 +900,7 @@ long keyctl_chown_key(key_serial_t id, uid_t user, gid_t group)
+ 		atomic_dec(&key->user->nkeys);
+ 		atomic_inc(&newowner->nkeys);
+ 
+-		if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) {
++		if (key->state != KEY_IS_UNINSTANTIATED) {
+ 			atomic_dec(&key->user->nikeys);
+ 			atomic_inc(&newowner->nikeys);
+ 		}
+diff --git a/security/keys/keyring.c b/security/keys/keyring.c
+index 4fa82a8a9c0e..06173b091a74 100644
+--- a/security/keys/keyring.c
++++ b/security/keys/keyring.c
+@@ -414,7 +414,7 @@ static void keyring_describe(const struct key *keyring, struct seq_file *m)
+ 	else
+ 		seq_puts(m, "[anon]");
+ 
+-	if (key_is_instantiated(keyring)) {
++	if (key_is_positive(keyring)) {
+ 		if (keyring->keys.nr_leaves_on_tree != 0)
+ 			seq_printf(m, ": %lu", keyring->keys.nr_leaves_on_tree);
+ 		else
+@@ -553,7 +553,8 @@ static int keyring_search_iterator(const void *object, void *iterator_data)
+ {
+ 	struct keyring_search_context *ctx = iterator_data;
+ 	const struct key *key = keyring_ptr_to_key(object);
+-	unsigned long kflags = key->flags;
++	unsigned long kflags = READ_ONCE(key->flags);
++	short state = READ_ONCE(key->state);
+ 
+ 	kenter("{%d}", key->serial);
+ 
+@@ -597,9 +598,8 @@ static int keyring_search_iterator(const void *object, void *iterator_data)
+ 
+ 	if (ctx->flags & KEYRING_SEARCH_DO_STATE_CHECK) {
+ 		/* we set a different error code if we pass a negative key */
+-		if (kflags & (1 << KEY_FLAG_NEGATIVE)) {
+-			smp_rmb();
+-			ctx->result = ERR_PTR(key->reject_error);
++		if (state < 0) {
++			ctx->result = ERR_PTR(state);
+ 			kleave(" = %d [neg]", ctx->skipped_ret);
+ 			goto skipped;
+ 		}
+diff --git a/security/keys/proc.c b/security/keys/proc.c
+index bf08d02b6646..e6aa1b257578 100644
+--- a/security/keys/proc.c
++++ b/security/keys/proc.c
+@@ -182,6 +182,7 @@ static int proc_keys_show(struct seq_file *m, void *v)
+ 	unsigned long timo;
+ 	key_ref_t key_ref, skey_ref;
+ 	char xbuf[16];
++	short state;
+ 	int rc;
+ 
+ 	struct keyring_search_context ctx = {
+@@ -240,17 +241,19 @@ static int proc_keys_show(struct seq_file *m, void *v)
+ 			sprintf(xbuf, "%luw", timo / (60*60*24*7));
+ 	}
+ 
++	state = key_read_state(key);
++
+ #define showflag(KEY, LETTER, FLAG) \
+ 	(test_bit(FLAG,	&(KEY)->flags) ? LETTER : '-')
+ 
+ 	seq_printf(m, "%08x %c%c%c%c%c%c%c %5d %4s %08x %5d %5d %-9.9s ",
+ 		   key->serial,
+-		   showflag(key, 'I', KEY_FLAG_INSTANTIATED),
++		   state != KEY_IS_UNINSTANTIATED ? 'I' : '-',
+ 		   showflag(key, 'R', KEY_FLAG_REVOKED),
+ 		   showflag(key, 'D', KEY_FLAG_DEAD),
+ 		   showflag(key, 'Q', KEY_FLAG_IN_QUOTA),
+ 		   showflag(key, 'U', KEY_FLAG_USER_CONSTRUCT),
+-		   showflag(key, 'N', KEY_FLAG_NEGATIVE),
++		   state < 0 ? 'N' : '-',
+ 		   showflag(key, 'i', KEY_FLAG_INVALIDATED),
+ 		   refcount_read(&key->usage),
+ 		   xbuf,
+diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
+index 293d3598153b..740affd65ee9 100644
+--- a/security/keys/process_keys.c
++++ b/security/keys/process_keys.c
+@@ -730,7 +730,7 @@ key_ref_t lookup_user_key(key_serial_t id, unsigned long lflags,
+ 
+ 	ret = -EIO;
+ 	if (!(lflags & KEY_LOOKUP_PARTIAL) &&
+-	    !test_bit(KEY_FLAG_INSTANTIATED, &key->flags))
++	    key_read_state(key) == KEY_IS_UNINSTANTIATED)
+ 		goto invalid_key;
+ 
+ 	/* check the permissions */
+diff --git a/security/keys/request_key.c b/security/keys/request_key.c
+index 63e63a42db3c..e8036cd0ad54 100644
+--- a/security/keys/request_key.c
++++ b/security/keys/request_key.c
+@@ -595,10 +595,9 @@ int wait_for_key_construction(struct key *key, bool intr)
+ 			  intr ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
+ 	if (ret)
+ 		return -ERESTARTSYS;
+-	if (test_bit(KEY_FLAG_NEGATIVE, &key->flags)) {
+-		smp_rmb();
+-		return key->reject_error;
+-	}
++	ret = key_read_state(key);
++	if (ret < 0)
++		return ret;
+ 	return key_validate(key);
+ }
+ EXPORT_SYMBOL(wait_for_key_construction);
+diff --git a/security/keys/request_key_auth.c b/security/keys/request_key_auth.c
+index afe9d22ab361..4a740e50322b 100644
+--- a/security/keys/request_key_auth.c
++++ b/security/keys/request_key_auth.c
+@@ -73,7 +73,7 @@ static void request_key_auth_describe(const struct key *key,
+ 
+ 	seq_puts(m, "key:");
+ 	seq_puts(m, key->description);
+-	if (key_is_instantiated(key))
++	if (key_is_positive(key))
+ 		seq_printf(m, " pid:%d ci:%zu", rka->pid, rka->callout_len);
+ }
+ 
+diff --git a/security/keys/trusted.c b/security/keys/trusted.c
+index ddfaebf60fc8..bd85315cbfeb 100644
+--- a/security/keys/trusted.c
++++ b/security/keys/trusted.c
+@@ -1066,7 +1066,7 @@ static int trusted_update(struct key *key, struct key_preparsed_payload *prep)
+ 	char *datablob;
+ 	int ret = 0;
+ 
+-	if (test_bit(KEY_FLAG_NEGATIVE, &key->flags))
++	if (key_is_negative(key))
+ 		return -ENOKEY;
+ 	p = key->payload.data[0];
+ 	if (!p->migratable)
+diff --git a/security/keys/user_defined.c b/security/keys/user_defined.c
+index 3d8c68eba516..9f558bedba23 100644
+--- a/security/keys/user_defined.c
++++ b/security/keys/user_defined.c
+@@ -114,7 +114,7 @@ int user_update(struct key *key, struct key_preparsed_payload *prep)
+ 
+ 	/* attach the new data, displacing the old */
+ 	key->expiry = prep->expiry;
+-	if (!test_bit(KEY_FLAG_NEGATIVE, &key->flags))
++	if (key_is_positive(key))
+ 		zap = dereference_key_locked(key);
+ 	rcu_assign_keypointer(key, prep->payload.data[0]);
+ 	prep->payload.data[0] = NULL;
+@@ -162,7 +162,7 @@ EXPORT_SYMBOL_GPL(user_destroy);
+ void user_describe(const struct key *key, struct seq_file *m)
+ {
+ 	seq_puts(m, key->description);
+-	if (key_is_instantiated(key))
++	if (key_is_positive(key))
+ 		seq_printf(m, ": %u", key->datalen);
+ }
+ 
+diff --git a/sound/core/seq/seq_lock.c b/sound/core/seq/seq_lock.c
+index 0ff7926a5a69..cda64b489e42 100644
+--- a/sound/core/seq/seq_lock.c
++++ b/sound/core/seq/seq_lock.c
+@@ -23,8 +23,6 @@
+ #include <sound/core.h>
+ #include "seq_lock.h"
+ 
+-#if defined(CONFIG_SMP) || defined(CONFIG_SND_DEBUG)
+-
+ /* wait until all locks are released */
+ void snd_use_lock_sync_helper(snd_use_lock_t *lockp, const char *file, int line)
+ {
+@@ -41,5 +39,3 @@ void snd_use_lock_sync_helper(snd_use_lock_t *lockp, const char *file, int line)
+ 	}
+ }
+ EXPORT_SYMBOL(snd_use_lock_sync_helper);
+-
+-#endif
+diff --git a/sound/core/seq/seq_lock.h b/sound/core/seq/seq_lock.h
+index 54044bc2c9ef..ac38031c370e 100644
+--- a/sound/core/seq/seq_lock.h
++++ b/sound/core/seq/seq_lock.h
+@@ -3,8 +3,6 @@
+ 
+ #include <linux/sched.h>
+ 
+-#if defined(CONFIG_SMP) || defined(CONFIG_SND_DEBUG)
+-
+ typedef atomic_t snd_use_lock_t;
+ 
+ /* initialize lock */
+@@ -20,14 +18,4 @@ typedef atomic_t snd_use_lock_t;
+ void snd_use_lock_sync_helper(snd_use_lock_t *lock, const char *file, int line);
+ #define snd_use_lock_sync(lockp) snd_use_lock_sync_helper(lockp, __BASE_FILE__, __LINE__)
+ 
+-#else /* SMP || CONFIG_SND_DEBUG */
+-
+-typedef spinlock_t snd_use_lock_t;	/* dummy */
+-#define snd_use_lock_init(lockp) /**/
+-#define snd_use_lock_use(lockp) /**/
+-#define snd_use_lock_free(lockp) /**/
+-#define snd_use_lock_sync(lockp) /**/
+-
+-#endif /* SMP || CONFIG_SND_DEBUG */
+-
+ #endif /* __SND_SEQ_LOCK_H */
+diff --git a/sound/core/vmaster.c b/sound/core/vmaster.c
+index 6c58e6f73a01..e43af18d4383 100644
+--- a/sound/core/vmaster.c
++++ b/sound/core/vmaster.c
+@@ -484,3 +484,34 @@ void snd_ctl_sync_vmaster(struct snd_kcontrol *kcontrol, bool hook_only)
+ 		master->hook(master->hook_private_data, master->val);
+ }
+ EXPORT_SYMBOL_GPL(snd_ctl_sync_vmaster);
++
++/**
++ * snd_ctl_apply_vmaster_slaves - Apply function to each vmaster slave
++ * @kctl: vmaster kctl element
++ * @func: function to apply
++ * @arg: optional function argument
++ *
++ * Apply the function @func to each slave kctl of the given vmaster kctl.
++ * Returns 0 if successful, or a negative error code.
++ */
++int snd_ctl_apply_vmaster_slaves(struct snd_kcontrol *kctl,
++				 int (*func)(struct snd_kcontrol *, void *),
++				 void *arg)
++{
++	struct link_master *master;
++	struct link_slave *slave;
++	int err;
++
++	master = snd_kcontrol_chip(kctl);
++	err = master_init(master);
++	if (err < 0)
++		return err;
++	list_for_each_entry(slave, &master->slaves, list) {
++		err = func(&slave->slave, arg);
++		if (err < 0)
++			return err;
++	}
++
++	return 0;
++}
++EXPORT_SYMBOL_GPL(snd_ctl_apply_vmaster_slaves);
+diff --git a/sound/hda/hdac_controller.c b/sound/hda/hdac_controller.c
+index 978dc1801b3a..f6d2985b2520 100644
+--- a/sound/hda/hdac_controller.c
++++ b/sound/hda/hdac_controller.c
+@@ -284,6 +284,11 @@ int snd_hdac_bus_parse_capabilities(struct hdac_bus *bus)
+ 		dev_dbg(bus->dev, "HDA capability ID: 0x%x\n",
+ 			(cur_cap & AZX_CAP_HDR_ID_MASK) >> AZX_CAP_HDR_ID_OFF);
+ 
++		if (cur_cap == -1) {
++			dev_dbg(bus->dev, "Invalid capability reg read\n");
++			break;
++		}
++
+ 		switch ((cur_cap & AZX_CAP_HDR_ID_MASK) >> AZX_CAP_HDR_ID_OFF) {
+ 		case AZX_ML_CAP_ID:
+ 			dev_dbg(bus->dev, "Found ML capability\n");
+diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
+index 821aad374a06..0bb46966d2d4 100644
+--- a/sound/pci/hda/hda_codec.c
++++ b/sound/pci/hda/hda_codec.c
+@@ -1803,36 +1803,6 @@ static int check_slave_present(struct hda_codec *codec,
+ 	return 1;
+ }
+ 
+-/* guess the value corresponding to 0dB */
+-static int get_kctl_0dB_offset(struct hda_codec *codec,
+-			       struct snd_kcontrol *kctl, int *step_to_check)
+-{
+-	int _tlv[4];
+-	const int *tlv = NULL;
+-	int val = -1;
+-
+-	if ((kctl->vd[0].access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) &&
+-	    kctl->tlv.c == snd_hda_mixer_amp_tlv) {
+-		get_ctl_amp_tlv(kctl, _tlv);
+-		tlv = _tlv;
+-	} else if (kctl->vd[0].access & SNDRV_CTL_ELEM_ACCESS_TLV_READ)
+-		tlv = kctl->tlv.p;
+-	if (tlv && tlv[0] == SNDRV_CTL_TLVT_DB_SCALE) {
+-		int step = tlv[3];
+-		step &= ~TLV_DB_SCALE_MUTE;
+-		if (!step)
+-			return -1;
+-		if (*step_to_check && *step_to_check != step) {
+-			codec_err(codec, "Mismatching dB step for vmaster slave (%d!=%d)\n",
+--				   *step_to_check, step);
+-			return -1;
+-		}
+-		*step_to_check = step;
+-		val = -tlv[2] / step;
+-	}
+-	return val;
+-}
+-
+ /* call kctl->put with the given value(s) */
+ static int put_kctl_with_value(struct snd_kcontrol *kctl, int val)
+ {
+@@ -1847,19 +1817,58 @@ static int put_kctl_with_value(struct snd_kcontrol *kctl, int val)
+ 	return 0;
+ }
+ 
+-/* initialize the slave volume with 0dB */
+-static int init_slave_0dB(struct hda_codec *codec,
+-			  void *data, struct snd_kcontrol *slave)
++struct slave_init_arg {
++	struct hda_codec *codec;
++	int step;
++};
++
++/* initialize the slave volume with 0dB via snd_ctl_apply_vmaster_slaves() */
++static int init_slave_0dB(struct snd_kcontrol *kctl, void *_arg)
+ {
+-	int offset = get_kctl_0dB_offset(codec, slave, data);
+-	if (offset > 0)
+-		put_kctl_with_value(slave, offset);
++	struct slave_init_arg *arg = _arg;
++	int _tlv[4];
++	const int *tlv = NULL;
++	int step;
++	int val;
++
++	if (kctl->vd[0].access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) {
++		if (kctl->tlv.c != snd_hda_mixer_amp_tlv) {
++			codec_err(arg->codec,
++				  "Unexpected TLV callback for slave %s:%d\n",
++				  kctl->id.name, kctl->id.index);
++			return 0; /* ignore */
++		}
++		get_ctl_amp_tlv(kctl, _tlv);
++		tlv = _tlv;
++	} else if (kctl->vd[0].access & SNDRV_CTL_ELEM_ACCESS_TLV_READ)
++		tlv = kctl->tlv.p;
++
++	if (!tlv || tlv[0] != SNDRV_CTL_TLVT_DB_SCALE)
++		return 0;
++
++	step = tlv[3];
++	step &= ~TLV_DB_SCALE_MUTE;
++	if (!step)
++		return 0;
++	if (arg->step && arg->step != step) {
++		codec_err(arg->codec,
++			  "Mismatching dB step for vmaster slave (%d!=%d)\n",
++			  arg->step, step);
++		return 0;
++	}
++
++	arg->step = step;
++	val = -tlv[2] / step;
++	if (val > 0) {
++		put_kctl_with_value(kctl, val);
++		return val;
++	}
++
+ 	return 0;
+ }
+ 
+-/* unmute the slave */
+-static int init_slave_unmute(struct hda_codec *codec,
+-			     void *data, struct snd_kcontrol *slave)
++/* unmute the slave via snd_ctl_apply_vmaster_slaves() */
++static int init_slave_unmute(struct snd_kcontrol *slave, void *_arg)
+ {
+ 	return put_kctl_with_value(slave, 1);
+ }
+@@ -1919,9 +1928,13 @@ int __snd_hda_add_vmaster(struct hda_codec *codec, char *name,
+ 	/* init with master mute & zero volume */
+ 	put_kctl_with_value(kctl, 0);
+ 	if (init_slave_vol) {
+-		int step = 0;
+-		map_slaves(codec, slaves, suffix,
+-			   tlv ? init_slave_0dB : init_slave_unmute, &step);
++		struct slave_init_arg arg = {
++			.codec = codec,
++			.step = 0,
++		};
++		snd_ctl_apply_vmaster_slaves(kctl,
++					     tlv ? init_slave_0dB : init_slave_unmute,
++					     &arg);
+ 	}
+ 
+ 	if (ctl_ret)
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index 5d2a63248b1d..0e54fe490458 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -1352,6 +1352,7 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
+ 	case USB_ID(0x20b1, 0x2008): /* Matrix Audio X-Sabre */
+ 	case USB_ID(0x20b1, 0x300a): /* Matrix Audio Mini-i Pro */
+ 	case USB_ID(0x22d9, 0x0416): /* OPPO HA-1 */
++	case USB_ID(0x2772, 0x0230): /* Pro-Ject Pre Box S2 Digital */
+ 		if (fp->altsetting == 2)
+ 			return SNDRV_PCM_FMTBIT_DSD_U32_BE;
+ 		break;
+diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
+index 0dafba2c1e7d..bd9c6b31a504 100644
+--- a/tools/power/x86/turbostat/turbostat.c
++++ b/tools/power/x86/turbostat/turbostat.c
+@@ -92,7 +92,6 @@ unsigned int do_ring_perf_limit_reasons;
+ unsigned int crystal_hz;
+ unsigned long long tsc_hz;
+ int base_cpu;
+-int do_migrate;
+ double discover_bclk(unsigned int family, unsigned int model);
+ unsigned int has_hwp;	/* IA32_PM_ENABLE, IA32_HWP_CAPABILITIES */
+ 			/* IA32_HWP_REQUEST, IA32_HWP_STATUS */
+@@ -303,9 +302,6 @@ int for_all_cpus(int (func)(struct thread_data *, struct core_data *, struct pkg
+ 
+ int cpu_migrate(int cpu)
+ {
+-	if (!do_migrate)
+-		return 0;
+-
+ 	CPU_ZERO_S(cpu_affinity_setsize, cpu_affinity_set);
+ 	CPU_SET_S(cpu, cpu_affinity_setsize, cpu_affinity_set);
+ 	if (sched_setaffinity(0, cpu_affinity_setsize, cpu_affinity_set) == -1)
+@@ -5007,7 +5003,6 @@ void cmdline(int argc, char **argv)
+ 		{"hide",	required_argument,	0, 'H'},	// meh, -h taken by --help
+ 		{"Joules",	no_argument,		0, 'J'},
+ 		{"list",	no_argument,		0, 'l'},
+-		{"migrate",	no_argument,		0, 'm'},
+ 		{"out",		required_argument,	0, 'o'},
+ 		{"quiet",	no_argument,		0, 'q'},
+ 		{"show",	required_argument,	0, 's'},
+@@ -5019,7 +5014,7 @@ void cmdline(int argc, char **argv)
+ 
+ 	progname = argv[0];
+ 
+-	while ((opt = getopt_long_only(argc, argv, "+C:c:Ddhi:Jmo:qST:v",
++	while ((opt = getopt_long_only(argc, argv, "+C:c:Ddhi:JM:m:o:qST:v",
+ 				long_options, &option_index)) != -1) {
+ 		switch (opt) {
+ 		case 'a':
+@@ -5062,9 +5057,6 @@ void cmdline(int argc, char **argv)
+ 			list_header_only++;
+ 			quiet++;
+ 			break;
+-		case 'm':
+-			do_migrate = 1;
+-			break;
+ 		case 'o':
+ 			outf = fopen_or_die(optarg, "w");
+ 			break;


^ permalink raw reply related	[flat|nested] 20+ messages in thread
* [gentoo-commits] proj/linux-patches:4.13 commit in: /
@ 2017-10-21 20:16 Mike Pagano
  0 siblings, 0 replies; 20+ messages in thread
From: Mike Pagano @ 2017-10-21 20:16 UTC (permalink / raw
  To: gentoo-commits

commit:     62163c3e2846e0b8240d82bce060059c8bd2b598
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sat Oct 21 20:16:08 2017 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sat Oct 21 20:16:08 2017 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=62163c3e

Linux patch 4.13.9

 0000_README             |   4 +
 1008_linux-4.13.9.patch | 903 ++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 907 insertions(+)

diff --git a/0000_README b/0000_README
index 37fc5da..9628e89 100644
--- a/0000_README
+++ b/0000_README
@@ -75,6 +75,10 @@ Patch:  1007_linux-4.13.8.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.13.8
 
+Patch:  1008_linux-4.13.9.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.13.9
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1008_linux-4.13.9.patch b/1008_linux-4.13.9.patch
new file mode 100644
index 0000000..fb55923
--- /dev/null
+++ b/1008_linux-4.13.9.patch
@@ -0,0 +1,903 @@
+diff --git a/Makefile b/Makefile
+index 66ec023da822..aa0267950444 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 13
+-SUBLEVEL = 8
++SUBLEVEL = 9
+ EXTRAVERSION =
+ NAME = Fearless Coyote
+ 
+diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
+index 98b3dd8cf2bf..a7be1b4283a0 100644
+--- a/arch/x86/kernel/apic/apic.c
++++ b/arch/x86/kernel/apic/apic.c
+@@ -599,9 +599,14 @@ static const struct x86_cpu_id deadline_match[] = {
+ 
+ static void apic_check_deadline_errata(void)
+ {
+-	const struct x86_cpu_id *m = x86_match_cpu(deadline_match);
++	const struct x86_cpu_id *m;
+ 	u32 rev;
+ 
++	if (!boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER) ||
++	    boot_cpu_has(X86_FEATURE_HYPERVISOR))
++		return;
++
++	m = x86_match_cpu(deadline_match);
+ 	if (!m)
+ 		return;
+ 
+diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
+index 3cd60f460b61..8b27211f6c50 100644
+--- a/drivers/hid/Kconfig
++++ b/drivers/hid/Kconfig
+@@ -281,6 +281,7 @@ config HID_ELECOM
+ 	Support for ELECOM devices:
+ 	  - BM084 Bluetooth Mouse
+ 	  - DEFT Trackball (Wired and wireless)
++	  - HUGE Trackball (Wired and wireless)
+ 
+ config HID_ELO
+ 	tristate "ELO USB 4000/4500 touchscreen"
+diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
+index 9017dcc14502..efb3501b4123 100644
+--- a/drivers/hid/hid-core.c
++++ b/drivers/hid/hid-core.c
+@@ -2031,6 +2031,8 @@ static const struct hid_device_id hid_have_special_driver[] = {
+ 	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRED) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRELESS) },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_HUGE_WIRED) },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_HUGE_WIRELESS) },
+ #endif
+ #if IS_ENABLED(CONFIG_HID_ELO)
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_ELO, 0x0009) },
+diff --git a/drivers/hid/hid-elecom.c b/drivers/hid/hid-elecom.c
+index e2c7465df69f..54aeea57d209 100644
+--- a/drivers/hid/hid-elecom.c
++++ b/drivers/hid/hid-elecom.c
+@@ -3,6 +3,7 @@
+  *  Copyright (c) 2010 Richard Nauber <Richard.Nauber@gmail.com>
+  *  Copyright (c) 2016 Yuxuan Shui <yshuiv7@gmail.com>
+  *  Copyright (c) 2017 Diego Elio Pettenò <flameeyes@flameeyes.eu>
++ *  Copyright (c) 2017 Alex Manoussakis <amanou@gnu.org>
+  */
+ 
+ /*
+@@ -32,9 +33,11 @@ static __u8 *elecom_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+ 		break;
+ 	case USB_DEVICE_ID_ELECOM_DEFT_WIRED:
+ 	case USB_DEVICE_ID_ELECOM_DEFT_WIRELESS:
+-		/* The DEFT trackball has eight buttons, but its descriptor only
+-		 * reports five, disabling the three Fn buttons on the top of
+-		 * the mouse.
++	case USB_DEVICE_ID_ELECOM_HUGE_WIRED:
++	case USB_DEVICE_ID_ELECOM_HUGE_WIRELESS:
++		/* The DEFT/HUGE trackball has eight buttons, but its descriptor
++		 * only reports five, disabling the three Fn buttons on the top
++		 * of the mouse.
+ 		 *
+ 		 * Apply the following diff to the descriptor:
+ 		 *
+@@ -62,7 +65,7 @@ static __u8 *elecom_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+ 		 * End Collection,                     End Collection,
+ 		 */
+ 		if (*rsize == 213 && rdesc[13] == 5 && rdesc[21] == 5) {
+-			hid_info(hdev, "Fixing up Elecom DEFT Fn buttons\n");
++			hid_info(hdev, "Fixing up Elecom DEFT/HUGE Fn buttons\n");
+ 			rdesc[13] = 8; /* Button/Variable Report Count */
+ 			rdesc[21] = 8; /* Button/Variable Usage Maximum */
+ 			rdesc[29] = 0; /* Button/Constant Report Count */
+@@ -76,6 +79,8 @@ static const struct hid_device_id elecom_devices[] = {
+ 	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRED) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRELESS) },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_HUGE_WIRED) },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_HUGE_WIRELESS) },
+ 	{ }
+ };
+ MODULE_DEVICE_TABLE(hid, elecom_devices);
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index c9ba4c6db74c..1333ac5c6597 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -366,6 +366,8 @@
+ #define USB_DEVICE_ID_ELECOM_BM084	0x0061
+ #define USB_DEVICE_ID_ELECOM_DEFT_WIRED	0x00fe
+ #define USB_DEVICE_ID_ELECOM_DEFT_WIRELESS	0x00ff
++#define USB_DEVICE_ID_ELECOM_HUGE_WIRED	0x010c
++#define USB_DEVICE_ID_ELECOM_HUGE_WIRELESS	0x010d
+ 
+ #define USB_VENDOR_ID_DREAM_CHEEKY	0x1d34
+ #define USB_DEVICE_ID_DREAM_CHEEKY_WN	0x0004
+diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
+index e57cc40cb768..be3fccab07fe 100644
+--- a/drivers/hv/channel.c
++++ b/drivers/hv/channel.c
+@@ -177,6 +177,11 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
+ 		      &vmbus_connection.chn_msg_list);
+ 	spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
+ 
++	if (newchannel->rescind) {
++		err = -ENODEV;
++		goto error_free_gpadl;
++	}
++
+ 	ret = vmbus_post_msg(open_msg,
+ 			     sizeof(struct vmbus_channel_open_channel), true);
+ 
+@@ -421,6 +426,11 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
+ 
+ 	spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
+ 
++	if (channel->rescind) {
++		ret = -ENODEV;
++		goto cleanup;
++	}
++
+ 	ret = vmbus_post_msg(gpadlmsg, msginfo->msgsize -
+ 			     sizeof(*msginfo), true);
+ 	if (ret != 0)
+@@ -494,6 +504,10 @@ int vmbus_teardown_gpadl(struct vmbus_channel *channel, u32 gpadl_handle)
+ 	list_add_tail(&info->msglistentry,
+ 		      &vmbus_connection.chn_msg_list);
+ 	spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
++
++	if (channel->rescind)
++		goto post_msg_err;
++
+ 	ret = vmbus_post_msg(msg, sizeof(struct vmbus_channel_gpadl_teardown),
+ 			     true);
+ 
+@@ -626,6 +640,7 @@ void vmbus_close(struct vmbus_channel *channel)
+ 		 */
+ 		return;
+ 	}
++	mutex_lock(&vmbus_connection.channel_mutex);
+ 	/*
+ 	 * Close all the sub-channels first and then close the
+ 	 * primary channel.
+@@ -634,16 +649,15 @@ void vmbus_close(struct vmbus_channel *channel)
+ 		cur_channel = list_entry(cur, struct vmbus_channel, sc_list);
+ 		vmbus_close_internal(cur_channel);
+ 		if (cur_channel->rescind) {
+-			mutex_lock(&vmbus_connection.channel_mutex);
+-			hv_process_channel_removal(cur_channel,
++			hv_process_channel_removal(
+ 					   cur_channel->offermsg.child_relid);
+-			mutex_unlock(&vmbus_connection.channel_mutex);
+ 		}
+ 	}
+ 	/*
+ 	 * Now close the primary.
+ 	 */
+ 	vmbus_close_internal(channel);
++	mutex_unlock(&vmbus_connection.channel_mutex);
+ }
+ EXPORT_SYMBOL_GPL(vmbus_close);
+ 
+diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
+index 037361158074..18c94ed02562 100644
+--- a/drivers/hv/channel_mgmt.c
++++ b/drivers/hv/channel_mgmt.c
+@@ -159,7 +159,7 @@ static void vmbus_rescind_cleanup(struct vmbus_channel *channel)
+ 
+ 
+ 	spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
+-
++	channel->rescind = true;
+ 	list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
+ 				msglistentry) {
+ 
+@@ -381,14 +381,21 @@ static void vmbus_release_relid(u32 relid)
+ 		       true);
+ }
+ 
+-void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid)
++void hv_process_channel_removal(u32 relid)
+ {
+ 	unsigned long flags;
+-	struct vmbus_channel *primary_channel;
++	struct vmbus_channel *primary_channel, *channel;
+ 
+-	BUG_ON(!channel->rescind);
+ 	BUG_ON(!mutex_is_locked(&vmbus_connection.channel_mutex));
+ 
++	/*
++	 * Make sure channel is valid as we may have raced.
++	 */
++	channel = relid2channel(relid);
++	if (!channel)
++		return;
++
++	BUG_ON(!channel->rescind);
+ 	if (channel->target_cpu != get_cpu()) {
+ 		put_cpu();
+ 		smp_call_function_single(channel->target_cpu,
+@@ -451,6 +458,12 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
+ 	/* Make sure this is a new offer */
+ 	mutex_lock(&vmbus_connection.channel_mutex);
+ 
++	/*
++	 * Now that we have acquired the channel_mutex,
++	 * we can release the potentially racing rescind thread.
++	 */
++	atomic_dec(&vmbus_connection.offer_in_progress);
++
+ 	list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
+ 		if (!uuid_le_cmp(channel->offermsg.offer.if_type,
+ 			newchannel->offermsg.offer.if_type) &&
+@@ -481,7 +494,6 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
+ 			channel->num_sc++;
+ 			spin_unlock_irqrestore(&channel->lock, flags);
+ 		} else {
+-			atomic_dec(&vmbus_connection.offer_in_progress);
+ 			goto err_free_chan;
+ 		}
+ 	}
+@@ -510,7 +522,7 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
+ 	if (!fnew) {
+ 		if (channel->sc_creation_callback != NULL)
+ 			channel->sc_creation_callback(newchannel);
+-		atomic_dec(&vmbus_connection.offer_in_progress);
++		newchannel->probe_done = true;
+ 		return;
+ 	}
+ 
+@@ -541,7 +553,7 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
+ 		goto err_deq_chan;
+ 	}
+ 
+-	atomic_dec(&vmbus_connection.offer_in_progress);
++	newchannel->probe_done = true;
+ 	return;
+ 
+ err_deq_chan:
+@@ -839,7 +851,6 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
+ {
+ 	struct vmbus_channel_rescind_offer *rescind;
+ 	struct vmbus_channel *channel;
+-	unsigned long flags;
+ 	struct device *dev;
+ 
+ 	rescind = (struct vmbus_channel_rescind_offer *)hdr;
+@@ -878,15 +889,25 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
+ 		return;
+ 	}
+ 
+-	spin_lock_irqsave(&channel->lock, flags);
+-	channel->rescind = true;
+-	spin_unlock_irqrestore(&channel->lock, flags);
++	/*
++	 * Now wait for offer handling to complete.
++	 */
++	while (READ_ONCE(channel->probe_done) == false) {
++		/*
++		 * We wait here until any channel offer is currently
++		 * being processed.
++		 */
++		msleep(1);
++	}
+ 
+-	vmbus_rescind_cleanup(channel);
++	/*
++	 * At this point, the rescind handling can proceed safely.
++	 */
+ 
+ 	if (channel->device_obj) {
+ 		if (channel->chn_rescind_callback) {
+ 			channel->chn_rescind_callback(channel);
++			vmbus_rescind_cleanup(channel);
+ 			return;
+ 		}
+ 		/*
+@@ -895,6 +916,7 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
+ 		 */
+ 		dev = get_device(&channel->device_obj->device);
+ 		if (dev) {
++			vmbus_rescind_cleanup(channel);
+ 			vmbus_device_unregister(channel->device_obj);
+ 			put_device(dev);
+ 		}
+@@ -907,16 +929,16 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
+ 		 * 1. Close all sub-channels first
+ 		 * 2. Then close the primary channel.
+ 		 */
++		mutex_lock(&vmbus_connection.channel_mutex);
++		vmbus_rescind_cleanup(channel);
+ 		if (channel->state == CHANNEL_OPEN_STATE) {
+ 			/*
+ 			 * The channel is currently not open;
+ 			 * it is safe for us to cleanup the channel.
+ 			 */
+-			mutex_lock(&vmbus_connection.channel_mutex);
+-			hv_process_channel_removal(channel,
+-						channel->offermsg.child_relid);
+-			mutex_unlock(&vmbus_connection.channel_mutex);
++			hv_process_channel_removal(rescind->child_relid);
+ 		}
++		mutex_unlock(&vmbus_connection.channel_mutex);
+ 	}
+ }
+ 
+diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
+index 1f450c39a9b0..741daa6e2fc7 100644
+--- a/drivers/hv/ring_buffer.c
++++ b/drivers/hv/ring_buffer.c
+@@ -29,6 +29,7 @@
+ #include <linux/uio.h>
+ #include <linux/vmalloc.h>
+ #include <linux/slab.h>
++#include <linux/prefetch.h>
+ 
+ #include "hyperv_vmbus.h"
+ 
+@@ -94,30 +95,6 @@ hv_set_next_write_location(struct hv_ring_buffer_info *ring_info,
+ 	ring_info->ring_buffer->write_index = next_write_location;
+ }
+ 
+-/* Get the next read location for the specified ring buffer. */
+-static inline u32
+-hv_get_next_read_location(const struct hv_ring_buffer_info *ring_info)
+-{
+-	return ring_info->ring_buffer->read_index;
+-}
+-
+-/*
+- * Get the next read location + offset for the specified ring buffer.
+- * This allows the caller to skip.
+- */
+-static inline u32
+-hv_get_next_readlocation_withoffset(const struct hv_ring_buffer_info *ring_info,
+-				    u32 offset)
+-{
+-	u32 next = ring_info->ring_buffer->read_index;
+-
+-	next += offset;
+-	if (next >= ring_info->ring_datasize)
+-		next -= ring_info->ring_datasize;
+-
+-	return next;
+-}
+-
+ /* Set the next read location for the specified ring buffer. */
+ static inline void
+ hv_set_next_read_location(struct hv_ring_buffer_info *ring_info,
+@@ -141,29 +118,6 @@ hv_get_ring_bufferindices(struct hv_ring_buffer_info *ring_info)
+ 	return (u64)ring_info->ring_buffer->write_index << 32;
+ }
+ 
+-/*
+- * Helper routine to copy to source from ring buffer.
+- * Assume there is enough room. Handles wrap-around in src case only!!
+- */
+-static u32 hv_copyfrom_ringbuffer(
+-	const struct hv_ring_buffer_info *ring_info,
+-	void				*dest,
+-	u32				destlen,
+-	u32				start_read_offset)
+-{
+-	void *ring_buffer = hv_get_ring_buffer(ring_info);
+-	u32 ring_buffer_size = hv_get_ring_buffersize(ring_info);
+-
+-	memcpy(dest, ring_buffer + start_read_offset, destlen);
+-
+-	start_read_offset += destlen;
+-	if (start_read_offset >= ring_buffer_size)
+-		start_read_offset -= ring_buffer_size;
+-
+-	return start_read_offset;
+-}
+-
+-
+ /*
+  * Helper routine to copy from source to ring buffer.
+  * Assume there is enough room. Handles wrap-around in dest case only!!
+@@ -334,33 +288,22 @@ int hv_ringbuffer_write(struct vmbus_channel *channel,
+ 	return 0;
+ }
+ 
+-static inline void
+-init_cached_read_index(struct hv_ring_buffer_info *rbi)
+-{
+-	rbi->cached_read_index = rbi->ring_buffer->read_index;
+-}
+-
+ int hv_ringbuffer_read(struct vmbus_channel *channel,
+ 		       void *buffer, u32 buflen, u32 *buffer_actual_len,
+ 		       u64 *requestid, bool raw)
+ {
+-	u32 bytes_avail_toread;
+-	u32 next_read_location;
+-	u64 prev_indices = 0;
+-	struct vmpacket_descriptor desc;
+-	u32 offset;
+-	u32 packetlen;
+-	struct hv_ring_buffer_info *inring_info = &channel->inbound;
+-
+-	if (buflen <= 0)
++	struct vmpacket_descriptor *desc;
++	u32 packetlen, offset;
++
++	if (unlikely(buflen == 0))
+ 		return -EINVAL;
+ 
+ 	*buffer_actual_len = 0;
+ 	*requestid = 0;
+ 
+-	bytes_avail_toread = hv_get_bytes_to_read(inring_info);
+ 	/* Make sure there is something to read */
+-	if (bytes_avail_toread < sizeof(desc)) {
++	desc = hv_pkt_iter_first(channel);
++	if (desc == NULL) {
+ 		/*
+ 		 * No error is set when there is even no header, drivers are
+ 		 * supposed to analyze buffer_actual_len.
+@@ -368,48 +311,22 @@ int hv_ringbuffer_read(struct vmbus_channel *channel,
+ 		return 0;
+ 	}
+ 
+-	init_cached_read_index(inring_info);
+-
+-	next_read_location = hv_get_next_read_location(inring_info);
+-	next_read_location = hv_copyfrom_ringbuffer(inring_info, &desc,
+-						    sizeof(desc),
+-						    next_read_location);
+-
+-	offset = raw ? 0 : (desc.offset8 << 3);
+-	packetlen = (desc.len8 << 3) - offset;
++	offset = raw ? 0 : (desc->offset8 << 3);
++	packetlen = (desc->len8 << 3) - offset;
+ 	*buffer_actual_len = packetlen;
+-	*requestid = desc.trans_id;
+-
+-	if (bytes_avail_toread < packetlen + offset)
+-		return -EAGAIN;
++	*requestid = desc->trans_id;
+ 
+-	if (packetlen > buflen)
++	if (unlikely(packetlen > buflen))
+ 		return -ENOBUFS;
+ 
+-	next_read_location =
+-		hv_get_next_readlocation_withoffset(inring_info, offset);
++	/* since ring is double mapped, only one copy is necessary */
++	memcpy(buffer, (const char *)desc + offset, packetlen);
+ 
+-	next_read_location = hv_copyfrom_ringbuffer(inring_info,
+-						buffer,
+-						packetlen,
+-						next_read_location);
++	/* Advance ring index to next packet descriptor */
++	__hv_pkt_iter_next(channel, desc);
+ 
+-	next_read_location = hv_copyfrom_ringbuffer(inring_info,
+-						&prev_indices,
+-						sizeof(u64),
+-						next_read_location);
+-
+-	/*
+-	 * Make sure all reads are done before we update the read index since
+-	 * the writer may start writing to the read area once the read index
+-	 * is updated.
+-	 */
+-	virt_mb();
+-
+-	/* Update the read index */
+-	hv_set_next_read_location(inring_info, next_read_location);
+-
+-	hv_signal_on_read(channel);
++	/* Notify host of update */
++	hv_pkt_iter_close(channel);
+ 
+ 	return 0;
+ }
+@@ -441,9 +358,6 @@ struct vmpacket_descriptor *hv_pkt_iter_first(struct vmbus_channel *channel)
+ {
+ 	struct hv_ring_buffer_info *rbi = &channel->inbound;
+ 
+-	/* set state for later hv_signal_on_read() */
+-	init_cached_read_index(rbi);
+-
+ 	if (hv_pkt_iter_avail(rbi) < sizeof(struct vmpacket_descriptor))
+ 		return NULL;
+ 
+@@ -471,10 +385,7 @@ __hv_pkt_iter_next(struct vmbus_channel *channel,
+ 		rbi->priv_read_index -= dsize;
+ 
+ 	/* more data? */
+-	if (hv_pkt_iter_avail(rbi) < sizeof(struct vmpacket_descriptor))
+-		return NULL;
+-	else
+-		return hv_get_ring_buffer(rbi) + rbi->priv_read_index;
++	return hv_pkt_iter_first(channel);
+ }
+ EXPORT_SYMBOL_GPL(__hv_pkt_iter_next);
+ 
+@@ -484,6 +395,7 @@ EXPORT_SYMBOL_GPL(__hv_pkt_iter_next);
+ void hv_pkt_iter_close(struct vmbus_channel *channel)
+ {
+ 	struct hv_ring_buffer_info *rbi = &channel->inbound;
++	u32 orig_write_sz = hv_get_bytes_to_write(rbi);
+ 
+ 	/*
+ 	 * Make sure all reads are done before we update the read index since
+@@ -493,6 +405,40 @@ void hv_pkt_iter_close(struct vmbus_channel *channel)
+ 	virt_rmb();
+ 	rbi->ring_buffer->read_index = rbi->priv_read_index;
+ 
+-	hv_signal_on_read(channel);
++	/*
++	 * Issue a full memory barrier before making the signaling decision.
++	 * Here is the reason for having this barrier:
++	 * If the reading of the pend_sz (in this function)
++	 * were to be reordered and read before we commit the new read
++	 * index (in the calling function)  we could
++	 * have a problem. If the host were to set the pending_sz after we
++	 * have sampled pending_sz and go to sleep before we commit the
++	 * read index, we could miss sending the interrupt. Issue a full
++	 * memory barrier to address this.
++	 */
++	virt_mb();
++
++	/* If host has disabled notifications then skip */
++	if (rbi->ring_buffer->interrupt_mask)
++		return;
++
++	if (rbi->ring_buffer->feature_bits.feat_pending_send_sz) {
++		u32 pending_sz = READ_ONCE(rbi->ring_buffer->pending_send_sz);
++
++		/*
++		 * If there was space before we began iteration,
++		 * then host was not blocked. Also handles case where
++		 * pending_sz is zero then host has nothing pending
++		 * and does not need to be signaled.
++		 */
++		if (orig_write_sz > pending_sz)
++			return;
++
++		/* If pending write will not fit, don't give false hope. */
++		if (hv_get_bytes_to_write(rbi) < pending_sz)
++			return;
++	}
++
++	vmbus_setevent(channel);
+ }
+ EXPORT_SYMBOL_GPL(hv_pkt_iter_close);
+diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
+index ed84e96715a0..5ad627044dd1 100644
+--- a/drivers/hv/vmbus_drv.c
++++ b/drivers/hv/vmbus_drv.c
+@@ -768,8 +768,7 @@ static void vmbus_device_release(struct device *device)
+ 	struct vmbus_channel *channel = hv_dev->channel;
+ 
+ 	mutex_lock(&vmbus_connection.channel_mutex);
+-	hv_process_channel_removal(channel,
+-				   channel->offermsg.child_relid);
++	hv_process_channel_removal(channel->offermsg.child_relid);
+ 	mutex_unlock(&vmbus_connection.channel_mutex);
+ 	kfree(hv_dev);
+ 
+@@ -940,6 +939,9 @@ static void vmbus_chan_sched(struct hv_per_cpu_context *hv_cpu)
+ 			if (channel->offermsg.child_relid != relid)
+ 				continue;
+ 
++			if (channel->rescind)
++				continue;
++
+ 			switch (channel->callback_mode) {
+ 			case HV_CALL_ISR:
+ 				vmbus_channel_isr(channel);
+diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
+index b7d7bbec74e0..3647085dab0a 100644
+--- a/include/linux/hyperv.h
++++ b/include/linux/hyperv.h
+@@ -127,7 +127,6 @@ struct hv_ring_buffer_info {
+ 	u32 ring_data_startoffset;
+ 	u32 priv_write_index;
+ 	u32 priv_read_index;
+-	u32 cached_read_index;
+ };
+ 
+ /*
+@@ -180,19 +179,6 @@ static inline u32 hv_get_bytes_to_write(const struct hv_ring_buffer_info *rbi)
+ 	return write;
+ }
+ 
+-static inline u32 hv_get_cached_bytes_to_write(
+-	const struct hv_ring_buffer_info *rbi)
+-{
+-	u32 read_loc, write_loc, dsize, write;
+-
+-	dsize = rbi->ring_datasize;
+-	read_loc = rbi->cached_read_index;
+-	write_loc = rbi->ring_buffer->write_index;
+-
+-	write = write_loc >= read_loc ? dsize - (write_loc - read_loc) :
+-		read_loc - write_loc;
+-	return write;
+-}
+ /*
+  * VMBUS version is 32 bit entity broken up into
+  * two 16 bit quantities: major_number. minor_number.
+@@ -895,6 +881,8 @@ struct vmbus_channel {
+ 	 */
+ 	enum hv_numa_policy affinity_policy;
+ 
++	bool probe_done;
++
+ };
+ 
+ static inline bool is_hvsock_channel(const struct vmbus_channel *c)
+@@ -1453,7 +1441,7 @@ extern bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp, u8 *buf,
+ 				const int *srv_version, int srv_vercnt,
+ 				int *nego_fw_version, int *nego_srv_version);
+ 
+-void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid);
++void hv_process_channel_removal(u32 relid);
+ 
+ void vmbus_setevent(struct vmbus_channel *channel);
+ /*
+@@ -1473,55 +1461,6 @@ hv_get_ring_buffer(const struct hv_ring_buffer_info *ring_info)
+ 	return ring_info->ring_buffer->buffer;
+ }
+ 
+-/*
+- * To optimize the flow management on the send-side,
+- * when the sender is blocked because of lack of
+- * sufficient space in the ring buffer, potential the
+- * consumer of the ring buffer can signal the producer.
+- * This is controlled by the following parameters:
+- *
+- * 1. pending_send_sz: This is the size in bytes that the
+- *    producer is trying to send.
+- * 2. The feature bit feat_pending_send_sz set to indicate if
+- *    the consumer of the ring will signal when the ring
+- *    state transitions from being full to a state where
+- *    there is room for the producer to send the pending packet.
+- */
+-
+-static inline  void hv_signal_on_read(struct vmbus_channel *channel)
+-{
+-	u32 cur_write_sz, cached_write_sz;
+-	u32 pending_sz;
+-	struct hv_ring_buffer_info *rbi = &channel->inbound;
+-
+-	/*
+-	 * Issue a full memory barrier before making the signaling decision.
+-	 * Here is the reason for having this barrier:
+-	 * If the reading of the pend_sz (in this function)
+-	 * were to be reordered and read before we commit the new read
+-	 * index (in the calling function)  we could
+-	 * have a problem. If the host were to set the pending_sz after we
+-	 * have sampled pending_sz and go to sleep before we commit the
+-	 * read index, we could miss sending the interrupt. Issue a full
+-	 * memory barrier to address this.
+-	 */
+-	virt_mb();
+-
+-	pending_sz = READ_ONCE(rbi->ring_buffer->pending_send_sz);
+-	/* If the other end is not blocked on write don't bother. */
+-	if (pending_sz == 0)
+-		return;
+-
+-	cur_write_sz = hv_get_bytes_to_write(rbi);
+-
+-	if (cur_write_sz < pending_sz)
+-		return;
+-
+-	cached_write_sz = hv_get_cached_bytes_to_write(rbi);
+-	if (cached_write_sz < pending_sz)
+-		vmbus_setevent(channel);
+-}
+-
+ /*
+  * Mask off host interrupt callback notifications
+  */
+diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c
+index 8ec6ba230bb9..6b9311631aa1 100644
+--- a/mm/page_vma_mapped.c
++++ b/mm/page_vma_mapped.c
+@@ -6,17 +6,6 @@
+ 
+ #include "internal.h"
+ 
+-static inline bool check_pmd(struct page_vma_mapped_walk *pvmw)
+-{
+-	pmd_t pmde;
+-	/*
+-	 * Make sure we don't re-load pmd between present and !trans_huge check.
+-	 * We need a consistent view.
+-	 */
+-	pmde = READ_ONCE(*pvmw->pmd);
+-	return pmd_present(pmde) && !pmd_trans_huge(pmde);
+-}
+-
+ static inline bool not_found(struct page_vma_mapped_walk *pvmw)
+ {
+ 	page_vma_mapped_walk_done(pvmw);
+@@ -106,6 +95,7 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
+ 	pgd_t *pgd;
+ 	p4d_t *p4d;
+ 	pud_t *pud;
++	pmd_t pmde;
+ 
+ 	/* The only possible pmd mapping has been handled on last iteration */
+ 	if (pvmw->pmd && !pvmw->pte)
+@@ -138,7 +128,13 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
+ 	if (!pud_present(*pud))
+ 		return false;
+ 	pvmw->pmd = pmd_offset(pud, pvmw->address);
+-	if (pmd_trans_huge(*pvmw->pmd)) {
++	/*
++	 * Make sure the pmd value isn't cached in a register by the
++	 * compiler and used as a stale value after we've observed a
++	 * subsequent update.
++	 */
++	pmde = READ_ONCE(*pvmw->pmd);
++	if (pmd_trans_huge(pmde)) {
+ 		pvmw->ptl = pmd_lock(mm, pvmw->pmd);
+ 		if (!pmd_present(*pvmw->pmd))
+ 			return not_found(pvmw);
+@@ -153,9 +149,8 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
+ 			spin_unlock(pvmw->ptl);
+ 			pvmw->ptl = NULL;
+ 		}
+-	} else {
+-		if (!check_pmd(pvmw))
+-			return false;
++	} else if (!pmd_present(pmde)) {
++		return false;
+ 	}
+ 	if (!map_pte(pvmw))
+ 		goto next_pte;
+diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
+index 01e779b91c8e..2e3ffc3bc483 100644
+--- a/tools/perf/util/parse-events.c
++++ b/tools/perf/util/parse-events.c
+@@ -309,10 +309,11 @@ static char *get_config_name(struct list_head *head_terms)
+ static struct perf_evsel *
+ __add_event(struct list_head *list, int *idx,
+ 	    struct perf_event_attr *attr,
+-	    char *name, struct cpu_map *cpus,
++	    char *name, struct perf_pmu *pmu,
+ 	    struct list_head *config_terms)
+ {
+ 	struct perf_evsel *evsel;
++	struct cpu_map *cpus = pmu ? pmu->cpus : NULL;
+ 
+ 	event_attr_init(attr);
+ 
+@@ -323,7 +324,7 @@ __add_event(struct list_head *list, int *idx,
+ 	(*idx)++;
+ 	evsel->cpus        = cpu_map__get(cpus);
+ 	evsel->own_cpus    = cpu_map__get(cpus);
+-	evsel->system_wide = !!cpus;
++	evsel->system_wide = pmu ? pmu->is_uncore : false;
+ 
+ 	if (name)
+ 		evsel->name = strdup(name);
+@@ -1232,7 +1233,7 @@ int parse_events_add_pmu(struct parse_events_evlist *data,
+ 
+ 	if (!head_config) {
+ 		attr.type = pmu->type;
+-		evsel = __add_event(list, &data->idx, &attr, NULL, pmu->cpus, NULL);
++		evsel = __add_event(list, &data->idx, &attr, NULL, pmu, NULL);
+ 		return evsel ? 0 : -ENOMEM;
+ 	}
+ 
+@@ -1253,7 +1254,7 @@ int parse_events_add_pmu(struct parse_events_evlist *data,
+ 		return -EINVAL;
+ 
+ 	evsel = __add_event(list, &data->idx, &attr,
+-			    get_config_name(head_config), pmu->cpus,
++			    get_config_name(head_config), pmu,
+ 			    &config_terms);
+ 	if (evsel) {
+ 		evsel->unit = info.unit;
+diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
+index ac16a9db1fb5..1c4d7b4e4fb5 100644
+--- a/tools/perf/util/pmu.c
++++ b/tools/perf/util/pmu.c
+@@ -470,17 +470,36 @@ static void pmu_read_sysfs(void)
+ 	closedir(dir);
+ }
+ 
++static struct cpu_map *__pmu_cpumask(const char *path)
++{
++	FILE *file;
++	struct cpu_map *cpus;
++
++	file = fopen(path, "r");
++	if (!file)
++		return NULL;
++
++	cpus = cpu_map__read(file);
++	fclose(file);
++	return cpus;
++}
++
++/*
++ * Uncore PMUs have a "cpumask" file under sysfs. CPU PMUs (e.g. on arm/arm64)
++ * may have a "cpus" file.
++ */
++#define CPUS_TEMPLATE_UNCORE	"%s/bus/event_source/devices/%s/cpumask"
++#define CPUS_TEMPLATE_CPU	"%s/bus/event_source/devices/%s/cpus"
++
+ static struct cpu_map *pmu_cpumask(const char *name)
+ {
+-	struct stat st;
+ 	char path[PATH_MAX];
+-	FILE *file;
+ 	struct cpu_map *cpus;
+ 	const char *sysfs = sysfs__mountpoint();
+ 	const char *templates[] = {
+-		 "%s/bus/event_source/devices/%s/cpumask",
+-		 "%s/bus/event_source/devices/%s/cpus",
+-		 NULL
++		CPUS_TEMPLATE_UNCORE,
++		CPUS_TEMPLATE_CPU,
++		NULL
+ 	};
+ 	const char **template;
+ 
+@@ -489,20 +508,25 @@ static struct cpu_map *pmu_cpumask(const char *name)
+ 
+ 	for (template = templates; *template; template++) {
+ 		snprintf(path, PATH_MAX, *template, sysfs, name);
+-		if (stat(path, &st) == 0)
+-			break;
++		cpus = __pmu_cpumask(path);
++		if (cpus)
++			return cpus;
+ 	}
+ 
+-	if (!*template)
+-		return NULL;
++	return NULL;
++}
+ 
+-	file = fopen(path, "r");
+-	if (!file)
+-		return NULL;
++static bool pmu_is_uncore(const char *name)
++{
++	char path[PATH_MAX];
++	struct cpu_map *cpus;
++	const char *sysfs = sysfs__mountpoint();
+ 
+-	cpus = cpu_map__read(file);
+-	fclose(file);
+-	return cpus;
++	snprintf(path, PATH_MAX, CPUS_TEMPLATE_UNCORE, sysfs, name);
++	cpus = __pmu_cpumask(path);
++	cpu_map__put(cpus);
++
++	return !!cpus;
+ }
+ 
+ /*
+@@ -617,6 +641,8 @@ static struct perf_pmu *pmu_lookup(const char *name)
+ 
+ 	pmu->cpus = pmu_cpumask(name);
+ 
++	pmu->is_uncore = pmu_is_uncore(name);
++
+ 	INIT_LIST_HEAD(&pmu->format);
+ 	INIT_LIST_HEAD(&pmu->aliases);
+ 	list_splice(&format, &pmu->format);
+diff --git a/tools/perf/util/pmu.h b/tools/perf/util/pmu.h
+index 389e9729331f..fe0de0502ce2 100644
+--- a/tools/perf/util/pmu.h
++++ b/tools/perf/util/pmu.h
+@@ -22,6 +22,7 @@ struct perf_pmu {
+ 	char *name;
+ 	__u32 type;
+ 	bool selectable;
++	bool is_uncore;
+ 	struct perf_event_attr *default_config;
+ 	struct cpu_map *cpus;
+ 	struct list_head format;  /* HEAD struct perf_pmu_format -> list */


^ permalink raw reply related	[flat|nested] 20+ messages in thread
* [gentoo-commits] proj/linux-patches:4.13 commit in: /
@ 2017-10-18 13:56 Mike Pagano
  0 siblings, 0 replies; 20+ messages in thread
From: Mike Pagano @ 2017-10-18 13:56 UTC (permalink / raw
  To: gentoo-commits

commit:     1e4a3566e900f79b096594817ce8dc43cf3ffaff
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Oct 18 13:55:53 2017 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Oct 18 13:55:53 2017 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=1e4a3566

Linux patch 4.13.8

 0000_README             |    4 +
 1007_linux-4.13.8.patch | 1684 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 1688 insertions(+)

diff --git a/0000_README b/0000_README
index b6a0ed9..37fc5da 100644
--- a/0000_README
+++ b/0000_README
@@ -71,6 +71,10 @@ Patch:  1006_linux-4.13.7.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.13.7
 
+Patch:  1007_linux-4.13.8.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.13.8
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1007_linux-4.13.8.patch b/1007_linux-4.13.8.patch
new file mode 100644
index 0000000..dd87679
--- /dev/null
+++ b/1007_linux-4.13.8.patch
@@ -0,0 +1,1684 @@
+diff --git a/Makefile b/Makefile
+index 0d4f1b19869d..66ec023da822 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 13
+-SUBLEVEL = 7
++SUBLEVEL = 8
+ EXTRAVERSION =
+ NAME = Fearless Coyote
+ 
+diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c
+index f08a7b4facb9..4f0a1a6f7589 100644
+--- a/arch/mips/math-emu/cp1emu.c
++++ b/arch/mips/math-emu/cp1emu.c
+@@ -2387,7 +2387,6 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
+ 					break;
+ 				default:
+ 					/* Reserved R6 ops */
+-					pr_err("Reserved MIPS R6 CMP.condn.S operation\n");
+ 					return SIGILL;
+ 				}
+ 			}
+@@ -2461,7 +2460,6 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
+ 					break;
+ 				default:
+ 					/* Reserved R6 ops */
+-					pr_err("Reserved MIPS R6 CMP.condn.D operation\n");
+ 					return SIGILL;
+ 				}
+ 			}
+diff --git a/arch/mips/net/ebpf_jit.c b/arch/mips/net/ebpf_jit.c
+index 3f87b96da5c4..401776f92288 100644
+--- a/arch/mips/net/ebpf_jit.c
++++ b/arch/mips/net/ebpf_jit.c
+@@ -679,7 +679,7 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
+ {
+ 	int src, dst, r, td, ts, mem_off, b_off;
+ 	bool need_swap, did_move, cmp_eq;
+-	unsigned int target;
++	unsigned int target = 0;
+ 	u64 t64;
+ 	s64 t64s;
+ 
+diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
+index e7636bac7372..6c98821fef5e 100644
+--- a/arch/x86/include/asm/alternative-asm.h
++++ b/arch/x86/include/asm/alternative-asm.h
+@@ -62,8 +62,10 @@
+ #define new_len2		145f-144f
+ 
+ /*
+- * max without conditionals. Idea adapted from:
++ * gas compatible max based on the idea from:
+  * http://graphics.stanford.edu/~seander/bithacks.html#IntegerMinOrMax
++ *
++ * The additional "-" is needed because gas uses a "true" value of -1.
+  */
+ #define alt_max_short(a, b)	((a) ^ (((a) ^ (b)) & -(-((a) < (b)))))
+ 
+diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
+index 1b020381ab38..d4aea31eec03 100644
+--- a/arch/x86/include/asm/alternative.h
++++ b/arch/x86/include/asm/alternative.h
+@@ -103,12 +103,12 @@ static inline int alternatives_text_reserved(void *start, void *end)
+ 	alt_end_marker ":\n"
+ 
+ /*
+- * max without conditionals. Idea adapted from:
++ * gas compatible max based on the idea from:
+  * http://graphics.stanford.edu/~seander/bithacks.html#IntegerMinOrMax
+  *
+- * The additional "-" is needed because gas works with s32s.
++ * The additional "-" is needed because gas uses a "true" value of -1.
+  */
+-#define alt_max_short(a, b)	"((" a ") ^ (((" a ") ^ (" b ")) & -(-((" a ") - (" b ")))))"
++#define alt_max_short(a, b)	"((" a ") ^ (((" a ") ^ (" b ")) & -(-((" a ") < (" b ")))))"
+ 
+ /*
+  * Pad the second replacement alternative with additional NOPs if it is
+diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
+index 86e8f0b2537b..c4fa4a85d4cb 100644
+--- a/arch/x86/kernel/cpu/microcode/core.c
++++ b/arch/x86/kernel/cpu/microcode/core.c
+@@ -122,9 +122,6 @@ static bool __init check_loader_disabled_bsp(void)
+ 	bool *res = &dis_ucode_ldr;
+ #endif
+ 
+-	if (!have_cpuid_p())
+-		return *res;
+-
+ 	/*
+ 	 * CPUID(1).ECX[31]: reserved for hypervisor use. This is still not
+ 	 * completely accurate as xen pv guests don't see that CPUID bit set but
+@@ -166,24 +163,36 @@ bool get_builtin_firmware(struct cpio_data *cd, const char *name)
+ void __init load_ucode_bsp(void)
+ {
+ 	unsigned int cpuid_1_eax;
++	bool intel = true;
+ 
+-	if (check_loader_disabled_bsp())
++	if (!have_cpuid_p())
+ 		return;
+ 
+ 	cpuid_1_eax = native_cpuid_eax(1);
+ 
+ 	switch (x86_cpuid_vendor()) {
+ 	case X86_VENDOR_INTEL:
+-		if (x86_family(cpuid_1_eax) >= 6)
+-			load_ucode_intel_bsp();
++		if (x86_family(cpuid_1_eax) < 6)
++			return;
+ 		break;
++
+ 	case X86_VENDOR_AMD:
+-		if (x86_family(cpuid_1_eax) >= 0x10)
+-			load_ucode_amd_bsp(cpuid_1_eax);
++		if (x86_family(cpuid_1_eax) < 0x10)
++			return;
++		intel = false;
+ 		break;
++
+ 	default:
+-		break;
++		return;
+ 	}
++
++	if (check_loader_disabled_bsp())
++		return;
++
++	if (intel)
++		load_ucode_intel_bsp();
++	else
++		load_ucode_amd_bsp(cpuid_1_eax);
+ }
+ 
+ static bool check_loader_disabled_ap(void)
+diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
+index 7558531c1215..bd4e058c25a4 100644
+--- a/arch/x86/kvm/mmu.c
++++ b/arch/x86/kvm/mmu.c
+@@ -3934,13 +3934,6 @@ static bool sync_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn,
+ static inline bool is_last_gpte(struct kvm_mmu *mmu,
+ 				unsigned level, unsigned gpte)
+ {
+-	/*
+-	 * PT_PAGE_TABLE_LEVEL always terminates.  The RHS has bit 7 set
+-	 * iff level <= PT_PAGE_TABLE_LEVEL, which for our purpose means
+-	 * level == PT_PAGE_TABLE_LEVEL; set PT_PAGE_SIZE_MASK in gpte then.
+-	 */
+-	gpte |= level - PT_PAGE_TABLE_LEVEL - 1;
+-
+ 	/*
+ 	 * The RHS has bit 7 set iff level < mmu->last_nonleaf_level.
+ 	 * If it is clear, there are no large pages at this level, so clear
+@@ -3948,6 +3941,13 @@ static inline bool is_last_gpte(struct kvm_mmu *mmu,
+ 	 */
+ 	gpte &= level - mmu->last_nonleaf_level;
+ 
++	/*
++	 * PT_PAGE_TABLE_LEVEL always terminates.  The RHS has bit 7 set
++	 * iff level <= PT_PAGE_TABLE_LEVEL, which for our purpose means
++	 * level == PT_PAGE_TABLE_LEVEL; set PT_PAGE_SIZE_MASK in gpte then.
++	 */
++	gpte |= level - PT_PAGE_TABLE_LEVEL - 1;
++
+ 	return gpte & PT_PAGE_SIZE_MASK;
+ }
+ 
+@@ -4459,6 +4459,7 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
+ 
+ 	update_permission_bitmask(vcpu, context, true);
+ 	update_pkru_bitmask(vcpu, context, true);
++	update_last_nonleaf_level(vcpu, context);
+ 	reset_rsvds_bits_mask_ept(vcpu, context, execonly);
+ 	reset_ept_shadow_zero_bits_mask(vcpu, context, execonly);
+ }
+diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
+index b0454c7e4cff..da06dc8c4fc4 100644
+--- a/arch/x86/kvm/paging_tmpl.h
++++ b/arch/x86/kvm/paging_tmpl.h
+@@ -334,10 +334,11 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,
+ 		--walker->level;
+ 
+ 		index = PT_INDEX(addr, walker->level);
+-
+ 		table_gfn = gpte_to_gfn(pte);
+ 		offset    = index * sizeof(pt_element_t);
+ 		pte_gpa   = gfn_to_gpa(table_gfn) + offset;
++
++		BUG_ON(walker->level < 1);
+ 		walker->table_gfn[walker->level - 1] = table_gfn;
+ 		walker->pte_gpa[walker->level - 1] = pte_gpa;
+ 
+diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
+index 95796e2efc38..118709e7597d 100644
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -11013,7 +11013,7 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
+ 
+ 	/* Same as above - no reason to call set_cr4_guest_host_mask().  */
+ 	vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK);
+-	kvm_set_cr4(vcpu, vmcs12->host_cr4);
++	vmx_set_cr4(vcpu, vmcs12->host_cr4);
+ 
+ 	nested_ept_uninit_mmu_context(vcpu);
+ 
+diff --git a/block/bio.c b/block/bio.c
+index 9a63597aaacc..30f56b8b1fb2 100644
+--- a/block/bio.c
++++ b/block/bio.c
+@@ -1235,8 +1235,8 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
+ 	 */
+ 	bmd->is_our_pages = map_data ? 0 : 1;
+ 	memcpy(bmd->iov, iter->iov, sizeof(struct iovec) * iter->nr_segs);
+-	iov_iter_init(&bmd->iter, iter->type, bmd->iov,
+-			iter->nr_segs, iter->count);
++	bmd->iter = *iter;
++	bmd->iter.iov = bmd->iov;
+ 
+ 	ret = -ENOMEM;
+ 	bio = bio_kmalloc(gfp_mask, nr_pages);
+@@ -1327,6 +1327,7 @@ struct bio *bio_map_user_iov(struct request_queue *q,
+ 	int ret, offset;
+ 	struct iov_iter i;
+ 	struct iovec iov;
++	struct bio_vec *bvec;
+ 
+ 	iov_for_each(iov, i, *iter) {
+ 		unsigned long uaddr = (unsigned long) iov.iov_base;
+@@ -1371,7 +1372,12 @@ struct bio *bio_map_user_iov(struct request_queue *q,
+ 		ret = get_user_pages_fast(uaddr, local_nr_pages,
+ 				(iter->type & WRITE) != WRITE,
+ 				&pages[cur_page]);
+-		if (ret < local_nr_pages) {
++		if (unlikely(ret < local_nr_pages)) {
++			for (j = cur_page; j < page_limit; j++) {
++				if (!pages[j])
++					break;
++				put_page(pages[j]);
++			}
+ 			ret = -EFAULT;
+ 			goto out_unmap;
+ 		}
+@@ -1379,6 +1385,7 @@ struct bio *bio_map_user_iov(struct request_queue *q,
+ 		offset = offset_in_page(uaddr);
+ 		for (j = cur_page; j < page_limit; j++) {
+ 			unsigned int bytes = PAGE_SIZE - offset;
++			unsigned short prev_bi_vcnt = bio->bi_vcnt;
+ 
+ 			if (len <= 0)
+ 				break;
+@@ -1393,6 +1400,13 @@ struct bio *bio_map_user_iov(struct request_queue *q,
+ 					    bytes)
+ 				break;
+ 
++			/*
++			 * check if vector was merged with previous
++			 * drop page reference if needed
++			 */
++			if (bio->bi_vcnt == prev_bi_vcnt)
++				put_page(pages[j]);
++
+ 			len -= bytes;
+ 			offset = 0;
+ 		}
+@@ -1419,10 +1433,8 @@ struct bio *bio_map_user_iov(struct request_queue *q,
+ 	return bio;
+ 
+  out_unmap:
+-	for (j = 0; j < nr_pages; j++) {
+-		if (!pages[j])
+-			break;
+-		put_page(pages[j]);
++	bio_for_each_segment_all(bvec, bio, j) {
++		put_page(bvec->bv_page);
+ 	}
+  out:
+ 	kfree(pages);
+diff --git a/crypto/shash.c b/crypto/shash.c
+index 5e31c8d776df..f4161f3cfed6 100644
+--- a/crypto/shash.c
++++ b/crypto/shash.c
+@@ -275,12 +275,14 @@ static int shash_async_finup(struct ahash_request *req)
+ 
+ int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc)
+ {
+-	struct scatterlist *sg = req->src;
+-	unsigned int offset = sg->offset;
+ 	unsigned int nbytes = req->nbytes;
++	struct scatterlist *sg;
++	unsigned int offset;
+ 	int err;
+ 
+-	if (nbytes < min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset)) {
++	if (nbytes &&
++	    (sg = req->src, offset = sg->offset,
++	     nbytes < min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset))) {
+ 		void *data;
+ 
+ 		data = kmap_atomic(sg_page(sg));
+diff --git a/crypto/skcipher.c b/crypto/skcipher.c
+index 4faa0fd53b0c..d5692e35fab1 100644
+--- a/crypto/skcipher.c
++++ b/crypto/skcipher.c
+@@ -426,14 +426,9 @@ static int skcipher_copy_iv(struct skcipher_walk *walk)
+ 
+ static int skcipher_walk_first(struct skcipher_walk *walk)
+ {
+-	walk->nbytes = 0;
+-
+ 	if (WARN_ON_ONCE(in_irq()))
+ 		return -EDEADLK;
+ 
+-	if (unlikely(!walk->total))
+-		return 0;
+-
+ 	walk->buffer = NULL;
+ 	if (unlikely(((unsigned long)walk->iv & walk->alignmask))) {
+ 		int err = skcipher_copy_iv(walk);
+@@ -452,10 +447,15 @@ static int skcipher_walk_skcipher(struct skcipher_walk *walk,
+ {
+ 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ 
++	walk->total = req->cryptlen;
++	walk->nbytes = 0;
++
++	if (unlikely(!walk->total))
++		return 0;
++
+ 	scatterwalk_start(&walk->in, req->src);
+ 	scatterwalk_start(&walk->out, req->dst);
+ 
+-	walk->total = req->cryptlen;
+ 	walk->iv = req->iv;
+ 	walk->oiv = req->iv;
+ 
+@@ -509,6 +509,11 @@ static int skcipher_walk_aead_common(struct skcipher_walk *walk,
+ 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ 	int err;
+ 
++	walk->nbytes = 0;
++
++	if (unlikely(!walk->total))
++		return 0;
++
+ 	walk->flags &= ~SKCIPHER_WALK_PHYS;
+ 
+ 	scatterwalk_start(&walk->in, req->src);
+diff --git a/drivers/base/property.c b/drivers/base/property.c
+index edf02c1b5845..5d0c26a53876 100644
+--- a/drivers/base/property.c
++++ b/drivers/base/property.c
+@@ -21,6 +21,7 @@
+ #include <linux/phy.h>
+ 
+ struct property_set {
++	struct device *dev;
+ 	struct fwnode_handle fwnode;
+ 	const struct property_entry *properties;
+ };
+@@ -855,6 +856,7 @@ static struct property_set *pset_copy_set(const struct property_set *pset)
+ void device_remove_properties(struct device *dev)
+ {
+ 	struct fwnode_handle *fwnode;
++	struct property_set *pset;
+ 
+ 	fwnode = dev_fwnode(dev);
+ 	if (!fwnode)
+@@ -864,16 +866,16 @@ void device_remove_properties(struct device *dev)
+ 	 * the pset. If there is no real firmware node (ACPI/DT) primary
+ 	 * will hold the pset.
+ 	 */
+-	if (is_pset_node(fwnode)) {
++	pset = to_pset_node(fwnode);
++	if (pset) {
+ 		set_primary_fwnode(dev, NULL);
+-		pset_free_set(to_pset_node(fwnode));
+ 	} else {
+-		fwnode = fwnode->secondary;
+-		if (!IS_ERR(fwnode) && is_pset_node(fwnode)) {
++		pset = to_pset_node(fwnode->secondary);
++		if (pset && dev == pset->dev)
+ 			set_secondary_fwnode(dev, NULL);
+-			pset_free_set(to_pset_node(fwnode));
+-		}
+ 	}
++	if (pset && dev == pset->dev)
++		pset_free_set(pset);
+ }
+ EXPORT_SYMBOL_GPL(device_remove_properties);
+ 
+@@ -903,6 +905,7 @@ int device_add_properties(struct device *dev,
+ 	p->fwnode.type = FWNODE_PDATA;
+ 	p->fwnode.ops = &pset_fwnode_ops;
+ 	set_secondary_fwnode(dev, &p->fwnode);
++	p->dev = dev;
+ 	return 0;
+ }
+ EXPORT_SYMBOL_GPL(device_add_properties);
+diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
+index 3879f80a4815..a7ea20e7b8e9 100644
+--- a/drivers/dma/edma.c
++++ b/drivers/dma/edma.c
+@@ -1143,11 +1143,24 @@ static struct dma_async_tx_descriptor *edma_prep_dma_memcpy(
+ 	struct edma_desc *edesc;
+ 	struct device *dev = chan->device->dev;
+ 	struct edma_chan *echan = to_edma_chan(chan);
+-	unsigned int width, pset_len;
++	unsigned int width, pset_len, array_size;
+ 
+ 	if (unlikely(!echan || !len))
+ 		return NULL;
+ 
++	/* Align the array size (acnt block) with the transfer properties */
++	switch (__ffs((src | dest | len))) {
++	case 0:
++		array_size = SZ_32K - 1;
++		break;
++	case 1:
++		array_size = SZ_32K - 2;
++		break;
++	default:
++		array_size = SZ_32K - 4;
++		break;
++	}
++
+ 	if (len < SZ_64K) {
+ 		/*
+ 		 * Transfer size less than 64K can be handled with one paRAM
+@@ -1169,7 +1182,7 @@ static struct dma_async_tx_descriptor *edma_prep_dma_memcpy(
+ 		 * When the full_length is multibple of 32767 one slot can be
+ 		 * used to complete the transfer.
+ 		 */
+-		width = SZ_32K - 1;
++		width = array_size;
+ 		pset_len = rounddown(len, width);
+ 		/* One slot is enough for lengths multiple of (SZ_32K -1) */
+ 		if (unlikely(pset_len == len))
+@@ -1217,7 +1230,7 @@ static struct dma_async_tx_descriptor *edma_prep_dma_memcpy(
+ 		}
+ 		dest += pset_len;
+ 		src += pset_len;
+-		pset_len = width = len % (SZ_32K - 1);
++		pset_len = width = len % array_size;
+ 
+ 		ret = edma_config_pset(chan, &edesc->pset[1], src, dest, 1,
+ 				       width, pset_len, DMA_MEM_TO_MEM);
+diff --git a/drivers/dma/ti-dma-crossbar.c b/drivers/dma/ti-dma-crossbar.c
+index 2403475a37cf..88a00d06def6 100644
+--- a/drivers/dma/ti-dma-crossbar.c
++++ b/drivers/dma/ti-dma-crossbar.c
+@@ -262,13 +262,14 @@ static void *ti_dra7_xbar_route_allocate(struct of_phandle_args *dma_spec,
+ 	mutex_lock(&xbar->mutex);
+ 	map->xbar_out = find_first_zero_bit(xbar->dma_inuse,
+ 					    xbar->dma_requests);
+-	mutex_unlock(&xbar->mutex);
+ 	if (map->xbar_out == xbar->dma_requests) {
++		mutex_unlock(&xbar->mutex);
+ 		dev_err(&pdev->dev, "Run out of free DMA requests\n");
+ 		kfree(map);
+ 		return ERR_PTR(-ENOMEM);
+ 	}
+ 	set_bit(map->xbar_out, xbar->dma_inuse);
++	mutex_unlock(&xbar->mutex);
+ 
+ 	map->xbar_in = (u16)dma_spec->args[0];
+ 
+diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
+index 86d3093c6c9b..c73763959945 100644
+--- a/drivers/gpu/drm/drm_atomic_helper.c
++++ b/drivers/gpu/drm/drm_atomic_helper.c
+@@ -2756,6 +2756,7 @@ int drm_atomic_helper_resume(struct drm_device *dev,
+ 		drm_modeset_backoff(&ctx);
+ 	}
+ 
++	drm_atomic_state_put(state);
+ 	drm_modeset_drop_locks(&ctx);
+ 	drm_modeset_acquire_fini(&ctx);
+ 
+diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
+index 645488071944..f814359c86c9 100644
+--- a/drivers/gpu/drm/i915/intel_bios.c
++++ b/drivers/gpu/drm/i915/intel_bios.c
+@@ -1231,7 +1231,7 @@ static void parse_ddi_ports(struct drm_i915_private *dev_priv,
+ {
+ 	enum port port;
+ 
+-	if (!HAS_DDI(dev_priv))
++	if (!HAS_DDI(dev_priv) && !IS_CHERRYVIEW(dev_priv))
+ 		return;
+ 
+ 	if (!dev_priv->vbt.child_dev_num)
+diff --git a/drivers/gpu/drm/i915/intel_color.c b/drivers/gpu/drm/i915/intel_color.c
+index 17c4ae7e4e7c..824042ed04f6 100644
+--- a/drivers/gpu/drm/i915/intel_color.c
++++ b/drivers/gpu/drm/i915/intel_color.c
+@@ -58,7 +58,7 @@
+ #define I9XX_CSC_COEFF_1_0		\
+ 	((7 << 12) | I9XX_CSC_COEFF_FP(CTM_COEFF_1_0, 8))
+ 
+-static bool crtc_state_is_legacy(struct drm_crtc_state *state)
++static bool crtc_state_is_legacy_gamma(struct drm_crtc_state *state)
+ {
+ 	return !state->degamma_lut &&
+ 		!state->ctm &&
+@@ -245,7 +245,7 @@ static void cherryview_load_csc_matrix(struct drm_crtc_state *state)
+ 	}
+ 
+ 	mode = (state->ctm ? CGM_PIPE_MODE_CSC : 0);
+-	if (!crtc_state_is_legacy(state)) {
++	if (!crtc_state_is_legacy_gamma(state)) {
+ 		mode |= (state->degamma_lut ? CGM_PIPE_MODE_DEGAMMA : 0) |
+ 			(state->gamma_lut ? CGM_PIPE_MODE_GAMMA : 0);
+ 	}
+@@ -426,7 +426,7 @@ static void broadwell_load_luts(struct drm_crtc_state *state)
+ 	struct intel_crtc_state *intel_state = to_intel_crtc_state(state);
+ 	enum pipe pipe = to_intel_crtc(state->crtc)->pipe;
+ 
+-	if (crtc_state_is_legacy(state)) {
++	if (crtc_state_is_legacy_gamma(state)) {
+ 		haswell_load_luts(state);
+ 		return;
+ 	}
+@@ -486,7 +486,7 @@ static void glk_load_luts(struct drm_crtc_state *state)
+ 
+ 	glk_load_degamma_lut(state);
+ 
+-	if (crtc_state_is_legacy(state)) {
++	if (crtc_state_is_legacy_gamma(state)) {
+ 		haswell_load_luts(state);
+ 		return;
+ 	}
+@@ -508,7 +508,7 @@ static void cherryview_load_luts(struct drm_crtc_state *state)
+ 	uint32_t i, lut_size;
+ 	uint32_t word0, word1;
+ 
+-	if (crtc_state_is_legacy(state)) {
++	if (crtc_state_is_legacy_gamma(state)) {
+ 		/* Turn off degamma/gamma on CGM block. */
+ 		I915_WRITE(CGM_PIPE_MODE(pipe),
+ 			   (state->ctm ? CGM_PIPE_MODE_CSC : 0));
+@@ -589,12 +589,10 @@ int intel_color_check(struct drm_crtc *crtc,
+ 		return 0;
+ 
+ 	/*
+-	 * We also allow no degamma lut and a gamma lut at the legacy
++	 * We also allow no degamma lut/ctm and a gamma lut at the legacy
+ 	 * size (256 entries).
+ 	 */
+-	if (!crtc_state->degamma_lut &&
+-	    crtc_state->gamma_lut &&
+-	    crtc_state->gamma_lut->length == LEGACY_LUT_LENGTH)
++	if (crtc_state_is_legacy_gamma(crtc_state))
+ 		return 0;
+ 
+ 	return -EINVAL;
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index cc484b56eeaa..20b458551157 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -10059,13 +10059,10 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
+ {
+ 	struct drm_i915_private *dev_priv = to_i915(dev);
+ 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+-	enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
++	enum transcoder cpu_transcoder;
+ 	struct drm_display_mode *mode;
+ 	struct intel_crtc_state *pipe_config;
+-	int htot = I915_READ(HTOTAL(cpu_transcoder));
+-	int hsync = I915_READ(HSYNC(cpu_transcoder));
+-	int vtot = I915_READ(VTOTAL(cpu_transcoder));
+-	int vsync = I915_READ(VSYNC(cpu_transcoder));
++	u32 htot, hsync, vtot, vsync;
+ 	enum pipe pipe = intel_crtc->pipe;
+ 
+ 	mode = kzalloc(sizeof(*mode), GFP_KERNEL);
+@@ -10093,6 +10090,13 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
+ 	i9xx_crtc_clock_get(intel_crtc, pipe_config);
+ 
+ 	mode->clock = pipe_config->port_clock / pipe_config->pixel_multiplier;
++
++	cpu_transcoder = pipe_config->cpu_transcoder;
++	htot = I915_READ(HTOTAL(cpu_transcoder));
++	hsync = I915_READ(HSYNC(cpu_transcoder));
++	vtot = I915_READ(VTOTAL(cpu_transcoder));
++	vsync = I915_READ(VSYNC(cpu_transcoder));
++
+ 	mode->hdisplay = (htot & 0xffff) + 1;
+ 	mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
+ 	mode->hsync_start = (hsync & 0xffff) + 1;
+diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
+index 64fa774c855b..61c313e21a91 100644
+--- a/drivers/gpu/drm/i915/intel_dp.c
++++ b/drivers/gpu/drm/i915/intel_dp.c
+@@ -2263,8 +2263,8 @@ static void edp_panel_off(struct intel_dp *intel_dp)
+ 	I915_WRITE(pp_ctrl_reg, pp);
+ 	POSTING_READ(pp_ctrl_reg);
+ 
+-	intel_dp->panel_power_off_time = ktime_get_boottime();
+ 	wait_panel_off(intel_dp);
++	intel_dp->panel_power_off_time = ktime_get_boottime();
+ 
+ 	/* We got a reference when we enabled the VDD. */
+ 	intel_display_power_put(dev_priv, intel_dp->aux_power_domain);
+diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
+index c008847e0b20..3f11b02f9857 100644
+--- a/drivers/hid/usbhid/hid-core.c
++++ b/drivers/hid/usbhid/hid-core.c
+@@ -975,6 +975,8 @@ static int usbhid_parse(struct hid_device *hid)
+ 	unsigned int rsize = 0;
+ 	char *rdesc;
+ 	int ret, n;
++	int num_descriptors;
++	size_t offset = offsetof(struct hid_descriptor, desc);
+ 
+ 	quirks = usbhid_lookup_quirk(le16_to_cpu(dev->descriptor.idVendor),
+ 			le16_to_cpu(dev->descriptor.idProduct));
+@@ -997,10 +999,18 @@ static int usbhid_parse(struct hid_device *hid)
+ 		return -ENODEV;
+ 	}
+ 
++	if (hdesc->bLength < sizeof(struct hid_descriptor)) {
++		dbg_hid("hid descriptor is too short\n");
++		return -EINVAL;
++	}
++
+ 	hid->version = le16_to_cpu(hdesc->bcdHID);
+ 	hid->country = hdesc->bCountryCode;
+ 
+-	for (n = 0; n < hdesc->bNumDescriptors; n++)
++	num_descriptors = min_t(int, hdesc->bNumDescriptors,
++	       (hdesc->bLength - offset) / sizeof(struct hid_class_descriptor));
++
++	for (n = 0; n < num_descriptors; n++)
+ 		if (hdesc->desc[n].bDescriptorType == HID_DT_REPORT)
+ 			rsize = le16_to_cpu(hdesc->desc[n].wDescriptorLength);
+ 
+diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
+index 354cbd6392cd..632643939147 100644
+--- a/drivers/iommu/amd_iommu.c
++++ b/drivers/iommu/amd_iommu.c
+@@ -3262,6 +3262,7 @@ static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
+ 	mutex_unlock(&domain->api_lock);
+ 
+ 	domain_flush_tlb_pde(domain);
++	domain_flush_complete(domain);
+ 
+ 	return unmap_size;
+ }
+diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
+index bac33311f55a..1d37a4782c78 100644
+--- a/drivers/misc/mei/pci-me.c
++++ b/drivers/misc/mei/pci-me.c
+@@ -222,12 +222,15 @@ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 	pdev->dev_flags |= PCI_DEV_FLAGS_NEEDS_RESUME;
+ 
+ 	/*
+-	* For not wake-able HW runtime pm framework
+-	* can't be used on pci device level.
+-	* Use domain runtime pm callbacks instead.
+-	*/
+-	if (!pci_dev_run_wake(pdev))
+-		mei_me_set_pm_domain(dev);
++	 * ME maps runtime suspend/resume to D0i states,
++	 * hence we need to go around native PCI runtime service which
++	 * eventually brings the device into D3cold/hot state,
++	 * but the mei device cannot wake up from D3 unlike from D0i3.
++	 * To get around the PCI device native runtime pm,
++	 * ME uses runtime pm domain handlers which take precedence
++	 * over the driver's pm handlers.
++	 */
++	mei_me_set_pm_domain(dev);
+ 
+ 	if (mei_pg_is_enabled(dev))
+ 		pm_runtime_put_noidle(&pdev->dev);
+@@ -267,8 +270,7 @@ static void mei_me_shutdown(struct pci_dev *pdev)
+ 	dev_dbg(&pdev->dev, "shutdown\n");
+ 	mei_stop(dev);
+ 
+-	if (!pci_dev_run_wake(pdev))
+-		mei_me_unset_pm_domain(dev);
++	mei_me_unset_pm_domain(dev);
+ 
+ 	mei_disable_interrupts(dev);
+ 	free_irq(pdev->irq, dev);
+@@ -296,8 +298,7 @@ static void mei_me_remove(struct pci_dev *pdev)
+ 	dev_dbg(&pdev->dev, "stop\n");
+ 	mei_stop(dev);
+ 
+-	if (!pci_dev_run_wake(pdev))
+-		mei_me_unset_pm_domain(dev);
++	mei_me_unset_pm_domain(dev);
+ 
+ 	mei_disable_interrupts(dev);
+ 
+diff --git a/drivers/misc/mei/pci-txe.c b/drivers/misc/mei/pci-txe.c
+index e38a5f144373..0566f9bfa7de 100644
+--- a/drivers/misc/mei/pci-txe.c
++++ b/drivers/misc/mei/pci-txe.c
+@@ -144,12 +144,14 @@ static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 	pdev->dev_flags |= PCI_DEV_FLAGS_NEEDS_RESUME;
+ 
+ 	/*
+-	* For not wake-able HW runtime pm framework
+-	* can't be used on pci device level.
+-	* Use domain runtime pm callbacks instead.
+-	*/
+-	if (!pci_dev_run_wake(pdev))
+-		mei_txe_set_pm_domain(dev);
++	 * TXE maps runtime suspend/resume to own power gating states,
++	 * hence we need to go around native PCI runtime service which
++	 * eventually brings the device into D3cold/hot state.
++	 * But the TXE device cannot wake up from D3 unlike from own
++	 * power gating. To get around PCI device native runtime pm,
++	 * TXE uses runtime pm domain handlers which take precedence.
++	 */
++	mei_txe_set_pm_domain(dev);
+ 
+ 	pm_runtime_put_noidle(&pdev->dev);
+ 
+@@ -186,8 +188,7 @@ static void mei_txe_shutdown(struct pci_dev *pdev)
+ 	dev_dbg(&pdev->dev, "shutdown\n");
+ 	mei_stop(dev);
+ 
+-	if (!pci_dev_run_wake(pdev))
+-		mei_txe_unset_pm_domain(dev);
++	mei_txe_unset_pm_domain(dev);
+ 
+ 	mei_disable_interrupts(dev);
+ 	free_irq(pdev->irq, dev);
+@@ -215,8 +216,7 @@ static void mei_txe_remove(struct pci_dev *pdev)
+ 
+ 	mei_stop(dev);
+ 
+-	if (!pci_dev_run_wake(pdev))
+-		mei_txe_unset_pm_domain(dev);
++	mei_txe_unset_pm_domain(dev);
+ 
+ 	mei_disable_interrupts(dev);
+ 	free_irq(pdev->irq, dev);
+@@ -318,15 +318,7 @@ static int mei_txe_pm_runtime_suspend(struct device *device)
+ 	else
+ 		ret = -EAGAIN;
+ 
+-	/*
+-	 * If everything is okay we're about to enter PCI low
+-	 * power state (D3) therefor we need to disable the
+-	 * interrupts towards host.
+-	 * However if device is not wakeable we do not enter
+-	 * D-low state and we need to keep the interrupt kicking
+-	 */
+-	if (!ret && pci_dev_run_wake(pdev))
+-		mei_disable_interrupts(dev);
++	/* keep irq on we are staying in D0 */
+ 
+ 	dev_dbg(&pdev->dev, "rpm: txe: runtime suspend ret=%d\n", ret);
+ 
+diff --git a/drivers/pci/host/pci-aardvark.c b/drivers/pci/host/pci-aardvark.c
+index 5fb9b620ac78..20f1d048582f 100644
+--- a/drivers/pci/host/pci-aardvark.c
++++ b/drivers/pci/host/pci-aardvark.c
+@@ -936,6 +936,8 @@ static int advk_pcie_probe(struct platform_device *pdev)
+ 	bridge->sysdata = pcie;
+ 	bridge->busnr = 0;
+ 	bridge->ops = &advk_pcie_ops;
++	bridge->map_irq = of_irq_parse_and_map_pci;
++	bridge->swizzle_irq = pci_common_swizzle;
+ 
+ 	ret = pci_scan_root_bus_bridge(bridge);
+ 	if (ret < 0) {
+diff --git a/drivers/pci/host/pci-tegra.c b/drivers/pci/host/pci-tegra.c
+index b3722b7709df..1d1d87e8bcbf 100644
+--- a/drivers/pci/host/pci-tegra.c
++++ b/drivers/pci/host/pci-tegra.c
+@@ -233,6 +233,7 @@ struct tegra_msi {
+ 	struct msi_controller chip;
+ 	DECLARE_BITMAP(used, INT_PCI_MSI_NR);
+ 	struct irq_domain *domain;
++	unsigned long pages;
+ 	struct mutex lock;
+ 	u64 phys;
+ 	int irq;
+@@ -1529,22 +1530,9 @@ static int tegra_pcie_enable_msi(struct tegra_pcie *pcie)
+ 		goto err;
+ 	}
+ 
+-	/*
+-	 * The PCI host bridge on Tegra contains some logic that intercepts
+-	 * MSI writes, which means that the MSI target address doesn't have
+-	 * to point to actual physical memory. Rather than allocating one 4
+-	 * KiB page of system memory that's never used, we can simply pick
+-	 * an arbitrary address within an area reserved for system memory
+-	 * in the FPCI address map.
+-	 *
+-	 * However, in order to avoid confusion, we pick an address that
+-	 * doesn't map to physical memory. The FPCI address map reserves a
+-	 * 1012 GiB region for system memory and memory-mapped I/O. Since
+-	 * none of the Tegra SoCs that contain this PCI host bridge can
+-	 * address more than 16 GiB of system memory, the last 4 KiB of
+-	 * these 1012 GiB is a good candidate.
+-	 */
+-	msi->phys = 0xfcfffff000;
++	/* setup AFI/FPCI range */
++	msi->pages = __get_free_pages(GFP_KERNEL, 0);
++	msi->phys = virt_to_phys((void *)msi->pages);
+ 
+ 	afi_writel(pcie, msi->phys >> soc->msi_base_shift, AFI_MSI_FPCI_BAR_ST);
+ 	afi_writel(pcie, msi->phys, AFI_MSI_AXI_BAR_ST);
+@@ -1596,6 +1584,8 @@ static int tegra_pcie_disable_msi(struct tegra_pcie *pcie)
+ 	afi_writel(pcie, 0, AFI_MSI_EN_VEC6);
+ 	afi_writel(pcie, 0, AFI_MSI_EN_VEC7);
+ 
++	free_pages(msi->pages, 0);
++
+ 	if (msi->irq > 0)
+ 		free_irq(msi->irq, pcie);
+ 
+diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
+index e14b46c7b37f..8c054f5ccc11 100644
+--- a/drivers/pinctrl/Kconfig
++++ b/drivers/pinctrl/Kconfig
+@@ -100,6 +100,7 @@ config PINCTRL_AMD
+ 	tristate "AMD GPIO pin control"
+ 	depends on GPIOLIB
+ 	select GPIOLIB_IRQCHIP
++	select PINMUX
+ 	select PINCONF
+ 	select GENERIC_PINCONF
+ 	help
+diff --git a/drivers/ras/cec.c b/drivers/ras/cec.c
+index d0e5d6ee882c..e2c1988cd7c0 100644
+--- a/drivers/ras/cec.c
++++ b/drivers/ras/cec.c
+@@ -523,7 +523,7 @@ int __init parse_cec_param(char *str)
+ 	if (*str == '=')
+ 		str++;
+ 
+-	if (!strncmp(str, "cec_disable", 7))
++	if (!strcmp(str, "cec_disable"))
+ 		ce_arr.disabled = 1;
+ 	else
+ 		return 0;
+diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
+index dd74c99d6ce1..5d061b3d8224 100644
+--- a/drivers/usb/gadget/composite.c
++++ b/drivers/usb/gadget/composite.c
+@@ -2026,6 +2026,8 @@ static DEVICE_ATTR_RO(suspended);
+ static void __composite_unbind(struct usb_gadget *gadget, bool unbind_driver)
+ {
+ 	struct usb_composite_dev	*cdev = get_gadget_data(gadget);
++	struct usb_gadget_strings	*gstr = cdev->driver->strings[0];
++	struct usb_string		*dev_str = gstr->strings;
+ 
+ 	/* composite_disconnect() must already have been called
+ 	 * by the underlying peripheral controller driver!
+@@ -2045,6 +2047,9 @@ static void __composite_unbind(struct usb_gadget *gadget, bool unbind_driver)
+ 
+ 	composite_dev_cleanup(cdev);
+ 
++	if (dev_str[USB_GADGET_MANUFACTURER_IDX].s == cdev->def_manufacturer)
++		dev_str[USB_GADGET_MANUFACTURER_IDX].s = "";
++
+ 	kfree(cdev->def_manufacturer);
+ 	kfree(cdev);
+ 	set_gadget_data(gadget, NULL);
+diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
+index a22a892de7b7..aeb9f3c40521 100644
+--- a/drivers/usb/gadget/configfs.c
++++ b/drivers/usb/gadget/configfs.c
+@@ -1143,11 +1143,12 @@ static struct configfs_attribute *interf_grp_attrs[] = {
+ 	NULL
+ };
+ 
+-int usb_os_desc_prepare_interf_dir(struct config_group *parent,
+-				   int n_interf,
+-				   struct usb_os_desc **desc,
+-				   char **names,
+-				   struct module *owner)
++struct config_group *usb_os_desc_prepare_interf_dir(
++		struct config_group *parent,
++		int n_interf,
++		struct usb_os_desc **desc,
++		char **names,
++		struct module *owner)
+ {
+ 	struct config_group *os_desc_group;
+ 	struct config_item_type *os_desc_type, *interface_type;
+@@ -1159,7 +1160,7 @@ int usb_os_desc_prepare_interf_dir(struct config_group *parent,
+ 
+ 	char *vlabuf = kzalloc(vla_group_size(data_chunk), GFP_KERNEL);
+ 	if (!vlabuf)
+-		return -ENOMEM;
++		return ERR_PTR(-ENOMEM);
+ 
+ 	os_desc_group = vla_ptr(vlabuf, data_chunk, os_desc_group);
+ 	os_desc_type = vla_ptr(vlabuf, data_chunk, os_desc_type);
+@@ -1184,7 +1185,7 @@ int usb_os_desc_prepare_interf_dir(struct config_group *parent,
+ 		configfs_add_default_group(&d->group, os_desc_group);
+ 	}
+ 
+-	return 0;
++	return os_desc_group;
+ }
+ EXPORT_SYMBOL(usb_os_desc_prepare_interf_dir);
+ 
+diff --git a/drivers/usb/gadget/configfs.h b/drivers/usb/gadget/configfs.h
+index 36c468c4f5e9..540d5e92ed22 100644
+--- a/drivers/usb/gadget/configfs.h
++++ b/drivers/usb/gadget/configfs.h
+@@ -5,11 +5,12 @@
+ 
+ void unregister_gadget_item(struct config_item *item);
+ 
+-int usb_os_desc_prepare_interf_dir(struct config_group *parent,
+-				   int n_interf,
+-				   struct usb_os_desc **desc,
+-				   char **names,
+-				   struct module *owner);
++struct config_group *usb_os_desc_prepare_interf_dir(
++		struct config_group *parent,
++		int n_interf,
++		struct usb_os_desc **desc,
++		char **names,
++		struct module *owner);
+ 
+ static inline struct usb_os_desc *to_usb_os_desc(struct config_item *item)
+ {
+diff --git a/drivers/usb/gadget/function/f_rndis.c b/drivers/usb/gadget/function/f_rndis.c
+index 16562e461121..ba00cdb809d6 100644
+--- a/drivers/usb/gadget/function/f_rndis.c
++++ b/drivers/usb/gadget/function/f_rndis.c
+@@ -892,6 +892,7 @@ static void rndis_free_inst(struct usb_function_instance *f)
+ 			free_netdev(opts->net);
+ 	}
+ 
++	kfree(opts->rndis_interf_group);	/* single VLA chunk */
+ 	kfree(opts);
+ }
+ 
+@@ -900,6 +901,7 @@ static struct usb_function_instance *rndis_alloc_inst(void)
+ 	struct f_rndis_opts *opts;
+ 	struct usb_os_desc *descs[1];
+ 	char *names[1];
++	struct config_group *rndis_interf_group;
+ 
+ 	opts = kzalloc(sizeof(*opts), GFP_KERNEL);
+ 	if (!opts)
+@@ -920,8 +922,14 @@ static struct usb_function_instance *rndis_alloc_inst(void)
+ 	names[0] = "rndis";
+ 	config_group_init_type_name(&opts->func_inst.group, "",
+ 				    &rndis_func_type);
+-	usb_os_desc_prepare_interf_dir(&opts->func_inst.group, 1, descs,
+-				       names, THIS_MODULE);
++	rndis_interf_group =
++		usb_os_desc_prepare_interf_dir(&opts->func_inst.group, 1, descs,
++					       names, THIS_MODULE);
++	if (IS_ERR(rndis_interf_group)) {
++		rndis_free_inst(&opts->func_inst);
++		return ERR_CAST(rndis_interf_group);
++	}
++	opts->rndis_interf_group = rndis_interf_group;
+ 
+ 	return &opts->func_inst;
+ }
+diff --git a/drivers/usb/gadget/function/u_rndis.h b/drivers/usb/gadget/function/u_rndis.h
+index 4eafd5050545..4e2ad04fe8d6 100644
+--- a/drivers/usb/gadget/function/u_rndis.h
++++ b/drivers/usb/gadget/function/u_rndis.h
+@@ -26,6 +26,7 @@ struct f_rndis_opts {
+ 	bool				bound;
+ 	bool				borrowed_net;
+ 
++	struct config_group		*rndis_interf_group;
+ 	struct usb_os_desc		rndis_os_desc;
+ 	char				rndis_ext_compat_id[16];
+ 
+diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c
+index 374f85f612d9..1c6cfdf0457e 100644
+--- a/drivers/usb/gadget/udc/dummy_hcd.c
++++ b/drivers/usb/gadget/udc/dummy_hcd.c
+@@ -420,6 +420,7 @@ static void set_link_state_by_speed(struct dummy_hcd *dum_hcd)
+ static void set_link_state(struct dummy_hcd *dum_hcd)
+ {
+ 	struct dummy *dum = dum_hcd->dum;
++	unsigned int power_bit;
+ 
+ 	dum_hcd->active = 0;
+ 	if (dum->pullup)
+@@ -430,17 +431,19 @@ static void set_link_state(struct dummy_hcd *dum_hcd)
+ 			return;
+ 
+ 	set_link_state_by_speed(dum_hcd);
++	power_bit = (dummy_hcd_to_hcd(dum_hcd)->speed == HCD_USB3 ?
++			USB_SS_PORT_STAT_POWER : USB_PORT_STAT_POWER);
+ 
+ 	if ((dum_hcd->port_status & USB_PORT_STAT_ENABLE) == 0 ||
+ 	     dum_hcd->active)
+ 		dum_hcd->resuming = 0;
+ 
+ 	/* Currently !connected or in reset */
+-	if ((dum_hcd->port_status & USB_PORT_STAT_CONNECTION) == 0 ||
++	if ((dum_hcd->port_status & power_bit) == 0 ||
+ 			(dum_hcd->port_status & USB_PORT_STAT_RESET) != 0) {
+-		unsigned disconnect = USB_PORT_STAT_CONNECTION &
++		unsigned int disconnect = power_bit &
+ 				dum_hcd->old_status & (~dum_hcd->port_status);
+-		unsigned reset = USB_PORT_STAT_RESET &
++		unsigned int reset = USB_PORT_STAT_RESET &
+ 				(~dum_hcd->old_status) & dum_hcd->port_status;
+ 
+ 		/* Report reset and disconnect events to the driver */
+diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c
+index 68f26904c316..50285b01da92 100644
+--- a/drivers/usb/renesas_usbhs/fifo.c
++++ b/drivers/usb/renesas_usbhs/fifo.c
+@@ -857,9 +857,9 @@ static void xfer_work(struct work_struct *work)
+ 		fifo->name, usbhs_pipe_number(pipe), pkt->length, pkt->zero);
+ 
+ 	usbhs_pipe_running(pipe, 1);
+-	usbhsf_dma_start(pipe, fifo);
+ 	usbhs_pipe_set_trans_count_if_bulk(pipe, pkt->trans);
+ 	dma_async_issue_pending(chan);
++	usbhsf_dma_start(pipe, fifo);
+ 	usbhs_pipe_enable(pipe);
+ 
+ xfer_work_end:
+diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c
+index fdf89800ebc3..43a862a90a77 100644
+--- a/drivers/usb/serial/console.c
++++ b/drivers/usb/serial/console.c
+@@ -186,6 +186,7 @@ static int usb_console_setup(struct console *co, char *options)
+ 	tty_kref_put(tty);
+  reset_open_count:
+ 	port->port.count = 0;
++	info->port = NULL;
+ 	usb_autopm_put_interface(serial->interface);
+  error_get_interface:
+ 	usb_serial_put(serial);
+@@ -265,7 +266,7 @@ static struct console usbcons = {
+ 
+ void usb_serial_console_disconnect(struct usb_serial *serial)
+ {
+-	if (serial->port[0] == usbcons_info.port) {
++	if (serial->port[0] && serial->port[0] == usbcons_info.port) {
+ 		usb_serial_console_exit();
+ 		usb_serial_put(serial);
+ 	}
+diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
+index 2d945c9f975c..412f812522ee 100644
+--- a/drivers/usb/serial/cp210x.c
++++ b/drivers/usb/serial/cp210x.c
+@@ -177,6 +177,7 @@ static const struct usb_device_id id_table[] = {
+ 	{ USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
+ 	{ USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
+ 	{ USB_DEVICE(0x18EF, 0xE025) }, /* ELV Marble Sound Board 1 */
++	{ USB_DEVICE(0x18EF, 0xE032) }, /* ELV TFD500 Data Logger */
+ 	{ USB_DEVICE(0x1901, 0x0190) }, /* GE B850 CP2105 Recorder interface */
+ 	{ USB_DEVICE(0x1901, 0x0193) }, /* GE B650 CP2104 PMC interface */
+ 	{ USB_DEVICE(0x1901, 0x0194) },	/* GE Healthcare Remote Alarm Box */
+@@ -352,6 +353,7 @@ static struct usb_serial_driver * const serial_drivers[] = {
+ #define CP210X_PARTNUM_CP2104	0x04
+ #define CP210X_PARTNUM_CP2105	0x05
+ #define CP210X_PARTNUM_CP2108	0x08
++#define CP210X_PARTNUM_UNKNOWN	0xFF
+ 
+ /* CP210X_GET_COMM_STATUS returns these 0x13 bytes */
+ struct cp210x_comm_status {
+@@ -1491,8 +1493,11 @@ static int cp210x_attach(struct usb_serial *serial)
+ 	result = cp210x_read_vendor_block(serial, REQTYPE_DEVICE_TO_HOST,
+ 					  CP210X_GET_PARTNUM, &priv->partnum,
+ 					  sizeof(priv->partnum));
+-	if (result < 0)
+-		goto err_free_priv;
++	if (result < 0) {
++		dev_warn(&serial->interface->dev,
++			 "querying part number failed\n");
++		priv->partnum = CP210X_PARTNUM_UNKNOWN;
++	}
+ 
+ 	usb_set_serial_data(serial, priv);
+ 
+@@ -1505,10 +1510,6 @@ static int cp210x_attach(struct usb_serial *serial)
+ 	}
+ 
+ 	return 0;
+-err_free_priv:
+-	kfree(priv);
+-
+-	return result;
+ }
+ 
+ static void cp210x_disconnect(struct usb_serial *serial)
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index 1cec03799cdf..49d1b2d4606d 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -1015,6 +1015,8 @@ static const struct usb_device_id id_table_combined[] = {
+ 	{ USB_DEVICE(WICED_VID, WICED_USB20706V2_PID) },
+ 	{ USB_DEVICE(TI_VID, TI_CC3200_LAUNCHPAD_PID),
+ 		.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
++	{ USB_DEVICE(CYPRESS_VID, CYPRESS_WICED_BT_USB_PID) },
++	{ USB_DEVICE(CYPRESS_VID, CYPRESS_WICED_WL_USB_PID) },
+ 	{ }					/* Terminating entry */
+ };
+ 
+diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
+index 4fcf1cecb6d7..f9d15bd62785 100644
+--- a/drivers/usb/serial/ftdi_sio_ids.h
++++ b/drivers/usb/serial/ftdi_sio_ids.h
+@@ -609,6 +609,13 @@
+ #define ADI_GNICE_PID		0xF000
+ #define ADI_GNICEPLUS_PID	0xF001
+ 
++/*
++ * Cypress WICED USB UART
++ */
++#define CYPRESS_VID			0x04B4
++#define CYPRESS_WICED_BT_USB_PID	0x009B
++#define CYPRESS_WICED_WL_USB_PID	0xF900
++
+ /*
+  * Microchip Technology, Inc.
+  *
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 2a9944326210..db3d34c2c82e 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -522,6 +522,7 @@ static void option_instat_callback(struct urb *urb);
+ 
+ /* TP-LINK Incorporated products */
+ #define TPLINK_VENDOR_ID			0x2357
++#define TPLINK_PRODUCT_LTE			0x000D
+ #define TPLINK_PRODUCT_MA180			0x0201
+ 
+ /* Changhong products */
+@@ -2011,6 +2012,7 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE(CELLIENT_VENDOR_ID, CELLIENT_PRODUCT_MEN200) },
+ 	{ USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T_600A) },
+ 	{ USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T_600E) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(TPLINK_VENDOR_ID, TPLINK_PRODUCT_LTE, 0xff, 0x00, 0x00) },	/* TP-Link LTE Module */
+ 	{ USB_DEVICE(TPLINK_VENDOR_ID, TPLINK_PRODUCT_MA180),
+ 	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ 	{ USB_DEVICE(TPLINK_VENDOR_ID, 0x9000),					/* TP-Link MA260 */
+diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
+index ebc0beea69d6..eb9928963a53 100644
+--- a/drivers/usb/serial/qcserial.c
++++ b/drivers/usb/serial/qcserial.c
+@@ -174,6 +174,10 @@ static const struct usb_device_id id_table[] = {
+ 	{DEVICE_SWI(0x413c, 0x81b3)},	/* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */
+ 	{DEVICE_SWI(0x413c, 0x81b5)},	/* Dell Wireless 5811e QDL */
+ 	{DEVICE_SWI(0x413c, 0x81b6)},	/* Dell Wireless 5811e QDL */
++	{DEVICE_SWI(0x413c, 0x81cf)},   /* Dell Wireless 5819 */
++	{DEVICE_SWI(0x413c, 0x81d0)},   /* Dell Wireless 5819 */
++	{DEVICE_SWI(0x413c, 0x81d1)},   /* Dell Wireless 5818 */
++	{DEVICE_SWI(0x413c, 0x81d2)},   /* Dell Wireless 5818 */
+ 
+ 	/* Huawei devices */
+ 	{DEVICE_HWI(0x03f0, 0x581d)},	/* HP lt4112 LTE/HSPA+ Gobi 4G Modem (Huawei me906e) */
+diff --git a/fs/block_dev.c b/fs/block_dev.c
+index 9941dc8342df..3fbe75bdd257 100644
+--- a/fs/block_dev.c
++++ b/fs/block_dev.c
+@@ -716,10 +716,12 @@ int bdev_write_page(struct block_device *bdev, sector_t sector,
+ 
+ 	set_page_writeback(page);
+ 	result = ops->rw_page(bdev, sector + get_start_sect(bdev), page, true);
+-	if (result)
++	if (result) {
+ 		end_page_writeback(page);
+-	else
++	} else {
++		clean_page_buffers(page);
+ 		unlock_page(page);
++	}
+ 	blk_queue_exit(bdev->bd_queue);
+ 	return result;
+ }
+diff --git a/fs/direct-io.c b/fs/direct-io.c
+index 08cf27811e5a..ad379f082d83 100644
+--- a/fs/direct-io.c
++++ b/fs/direct-io.c
+@@ -838,7 +838,8 @@ submit_page_section(struct dio *dio, struct dio_submit *sdio, struct page *page,
+ 	 */
+ 	if (sdio->boundary) {
+ 		ret = dio_send_cur_page(dio, sdio, map_bh);
+-		dio_bio_submit(dio, sdio);
++		if (sdio->bio)
++			dio_bio_submit(dio, sdio);
+ 		put_page(sdio->cur_page);
+ 		sdio->cur_page = NULL;
+ 	}
+diff --git a/fs/mpage.c b/fs/mpage.c
+index 2e4c41ccb5c9..9feb169fbd5c 100644
+--- a/fs/mpage.c
++++ b/fs/mpage.c
+@@ -468,6 +468,16 @@ static void clean_buffers(struct page *page, unsigned first_unmapped)
+ 		try_to_free_buffers(page);
+ }
+ 
++/*
++ * For situations where we want to clean all buffers attached to a page.
++ * We don't need to calculate how many buffers are attached to the page,
++ * we just need to specify a number larger than the maximum number of buffers.
++ */
++void clean_page_buffers(struct page *page)
++{
++	clean_buffers(page, ~0U);
++}
++
+ static int __mpage_writepage(struct page *page, struct writeback_control *wbc,
+ 		      void *data)
+ {
+@@ -605,10 +615,8 @@ static int __mpage_writepage(struct page *page, struct writeback_control *wbc,
+ 	if (bio == NULL) {
+ 		if (first_unmapped == blocks_per_page) {
+ 			if (!bdev_write_page(bdev, blocks[0] << (blkbits - 9),
+-								page, wbc)) {
+-				clean_buffers(page, first_unmapped);
++								page, wbc))
+ 				goto out;
+-			}
+ 		}
+ 		bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9),
+ 				BIO_MAX_PAGES, GFP_NOFS|__GFP_HIGH);
+diff --git a/fs/nfs/client.c b/fs/nfs/client.c
+index efebe6cf4378..22880ef6d8dd 100644
+--- a/fs/nfs/client.c
++++ b/fs/nfs/client.c
+@@ -218,7 +218,6 @@ static void nfs_cb_idr_remove_locked(struct nfs_client *clp)
+ static void pnfs_init_server(struct nfs_server *server)
+ {
+ 	rpc_init_wait_queue(&server->roc_rpcwaitq, "pNFS ROC");
+-	rpc_init_wait_queue(&server->uoc_rpcwaitq, "NFS UOC");
+ }
+ 
+ #else
+@@ -888,6 +887,7 @@ struct nfs_server *nfs_alloc_server(void)
+ 	ida_init(&server->openowner_id);
+ 	ida_init(&server->lockowner_id);
+ 	pnfs_init_server(server);
++	rpc_init_wait_queue(&server->uoc_rpcwaitq, "NFS UOC");
+ 
+ 	return server;
+ }
+diff --git a/fs/nfs/filelayout/filelayout.c b/fs/nfs/filelayout/filelayout.c
+index 44c638b7876c..508126eb49f9 100644
+--- a/fs/nfs/filelayout/filelayout.c
++++ b/fs/nfs/filelayout/filelayout.c
+@@ -745,7 +745,8 @@ filelayout_free_lseg(struct pnfs_layout_segment *lseg)
+ 	struct nfs4_filelayout_segment *fl = FILELAYOUT_LSEG(lseg);
+ 
+ 	dprintk("--> %s\n", __func__);
+-	nfs4_fl_put_deviceid(fl->dsaddr);
++	if (fl->dsaddr != NULL)
++		nfs4_fl_put_deviceid(fl->dsaddr);
+ 	/* This assumes a single RW lseg */
+ 	if (lseg->pls_range.iomode == IOMODE_RW) {
+ 		struct nfs4_filelayout *flo;
+diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
+index c8dae555eccf..446b24cac67d 100644
+--- a/include/linux/buffer_head.h
++++ b/include/linux/buffer_head.h
+@@ -232,6 +232,7 @@ int generic_write_end(struct file *, struct address_space *,
+ 				loff_t, unsigned, unsigned,
+ 				struct page *, void *);
+ void page_zero_new_buffers(struct page *page, unsigned from, unsigned to);
++void clean_page_buffers(struct page *page);
+ int cont_write_begin(struct file *, struct address_space *, loff_t,
+ 			unsigned, unsigned, struct page **, void **,
+ 			get_block_t *, loff_t *);
+diff --git a/include/sound/seq_virmidi.h b/include/sound/seq_virmidi.h
+index a03acd0d398a..695257ae64ac 100644
+--- a/include/sound/seq_virmidi.h
++++ b/include/sound/seq_virmidi.h
+@@ -60,6 +60,7 @@ struct snd_virmidi_dev {
+ 	int port;			/* created/attached port */
+ 	unsigned int flags;		/* SNDRV_VIRMIDI_* */
+ 	rwlock_t filelist_lock;
++	struct rw_semaphore filelist_sem;
+ 	struct list_head filelist;
+ };
+ 
+diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
+index 75a70a267029..406fc428d580 100644
+--- a/kernel/irq/chip.c
++++ b/kernel/irq/chip.c
+@@ -265,8 +265,8 @@ int irq_startup(struct irq_desc *desc, bool resend, bool force)
+ 			irq_setup_affinity(desc);
+ 			break;
+ 		case IRQ_STARTUP_MANAGED:
++			irq_do_set_affinity(d, aff, false);
+ 			ret = __irq_startup(desc);
+-			irq_set_affinity_locked(d, aff, false);
+ 			break;
+ 		case IRQ_STARTUP_ABORT:
+ 			return 0;
+diff --git a/kernel/irq/cpuhotplug.c b/kernel/irq/cpuhotplug.c
+index 638eb9c83d9f..9eb09aef0313 100644
+--- a/kernel/irq/cpuhotplug.c
++++ b/kernel/irq/cpuhotplug.c
+@@ -18,8 +18,34 @@
+ static inline bool irq_needs_fixup(struct irq_data *d)
+ {
+ 	const struct cpumask *m = irq_data_get_effective_affinity_mask(d);
++	unsigned int cpu = smp_processor_id();
+ 
+-	return cpumask_test_cpu(smp_processor_id(), m);
++#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
++	/*
++	 * The cpumask_empty() check is a workaround for interrupt chips,
++	 * which do not implement effective affinity, but the architecture has
++	 * enabled the config switch. Use the general affinity mask instead.
++	 */
++	if (cpumask_empty(m))
++		m = irq_data_get_affinity_mask(d);
++
++	/*
++	 * Sanity check. If the mask is not empty when excluding the outgoing
++	 * CPU then it must contain at least one online CPU. The outgoing CPU
++	 * has been removed from the online mask already.
++	 */
++	if (cpumask_any_but(m, cpu) < nr_cpu_ids &&
++	    cpumask_any_and(m, cpu_online_mask) >= nr_cpu_ids) {
++		/*
++		 * If this happens then there was a missed IRQ fixup at some
++		 * point. Warn about it and enforce fixup.
++		 */
++		pr_warn("Eff. affinity %*pbl of IRQ %u contains only offline CPUs after offlining CPU %u\n",
++			cpumask_pr_args(m), d->irq, cpu);
++		return true;
++	}
++#endif
++	return cpumask_test_cpu(cpu, m);
+ }
+ 
+ static bool migrate_one_irq(struct irq_desc *desc)
+diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
+index 1d1a5b945ab4..70dc8da8737f 100644
+--- a/kernel/irq/manage.c
++++ b/kernel/irq/manage.c
+@@ -175,6 +175,9 @@ int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
+ 	struct irq_chip *chip = irq_data_get_irq_chip(data);
+ 	int ret;
+ 
++	if (!chip || !chip->irq_set_affinity)
++		return -EINVAL;
++
+ 	ret = chip->irq_set_affinity(data, mask, force);
+ 	switch (ret) {
+ 	case IRQ_SET_MASK_OK:
+diff --git a/mm/vmalloc.c b/mm/vmalloc.c
+index a47e3894c775..ceacc6e01904 100644
+--- a/mm/vmalloc.c
++++ b/mm/vmalloc.c
+@@ -1697,11 +1697,6 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
+ 	for (i = 0; i < area->nr_pages; i++) {
+ 		struct page *page;
+ 
+-		if (fatal_signal_pending(current)) {
+-			area->nr_pages = i;
+-			goto fail_no_warn;
+-		}
+-
+ 		if (node == NUMA_NO_NODE)
+ 			page = alloc_page(alloc_mask|highmem_mask);
+ 		else
+@@ -1725,7 +1720,6 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
+ 	warn_alloc(gfp_mask, NULL,
+ 			  "vmalloc: allocation failure, allocated %ld of %ld bytes",
+ 			  (area->nr_pages*PAGE_SIZE), area->size);
+-fail_no_warn:
+ 	vfree(area->addr);
+ 	return NULL;
+ }
+diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
+index ea2d0ae85bd3..6c9cba2166d9 100644
+--- a/sound/core/seq/seq_clientmgr.c
++++ b/sound/core/seq/seq_clientmgr.c
+@@ -1259,6 +1259,7 @@ static int snd_seq_ioctl_create_port(struct snd_seq_client *client, void *arg)
+ 	struct snd_seq_port_info *info = arg;
+ 	struct snd_seq_client_port *port;
+ 	struct snd_seq_port_callback *callback;
++	int port_idx;
+ 
+ 	/* it is not allowed to create the port for an another client */
+ 	if (info->addr.client != client->number)
+@@ -1269,7 +1270,9 @@ static int snd_seq_ioctl_create_port(struct snd_seq_client *client, void *arg)
+ 		return -ENOMEM;
+ 
+ 	if (client->type == USER_CLIENT && info->kernel) {
+-		snd_seq_delete_port(client, port->addr.port);
++		port_idx = port->addr.port;
++		snd_seq_port_unlock(port);
++		snd_seq_delete_port(client, port_idx);
+ 		return -EINVAL;
+ 	}
+ 	if (client->type == KERNEL_CLIENT) {
+@@ -1290,6 +1293,7 @@ static int snd_seq_ioctl_create_port(struct snd_seq_client *client, void *arg)
+ 
+ 	snd_seq_set_port_info(port, info);
+ 	snd_seq_system_client_ev_port_start(port->addr.client, port->addr.port);
++	snd_seq_port_unlock(port);
+ 
+ 	return 0;
+ }
+diff --git a/sound/core/seq/seq_ports.c b/sound/core/seq/seq_ports.c
+index 0a7020c82bfc..d21ece9f8d73 100644
+--- a/sound/core/seq/seq_ports.c
++++ b/sound/core/seq/seq_ports.c
+@@ -122,7 +122,9 @@ static void port_subs_info_init(struct snd_seq_port_subs_info *grp)
+ }
+ 
+ 
+-/* create a port, port number is returned (-1 on failure) */
++/* create a port, port number is returned (-1 on failure);
++ * the caller needs to unref the port via snd_seq_port_unlock() appropriately
++ */
+ struct snd_seq_client_port *snd_seq_create_port(struct snd_seq_client *client,
+ 						int port)
+ {
+@@ -151,6 +153,7 @@ struct snd_seq_client_port *snd_seq_create_port(struct snd_seq_client *client,
+ 	snd_use_lock_init(&new_port->use_lock);
+ 	port_subs_info_init(&new_port->c_src);
+ 	port_subs_info_init(&new_port->c_dest);
++	snd_use_lock_use(&new_port->use_lock);
+ 
+ 	num = port >= 0 ? port : 0;
+ 	mutex_lock(&client->ports_mutex);
+@@ -165,9 +168,9 @@ struct snd_seq_client_port *snd_seq_create_port(struct snd_seq_client *client,
+ 	list_add_tail(&new_port->list, &p->list);
+ 	client->num_ports++;
+ 	new_port->addr.port = num;	/* store the port number in the port */
++	sprintf(new_port->name, "port-%d", num);
+ 	write_unlock_irqrestore(&client->ports_lock, flags);
+ 	mutex_unlock(&client->ports_mutex);
+-	sprintf(new_port->name, "port-%d", num);
+ 
+ 	return new_port;
+ }
+diff --git a/sound/core/seq/seq_virmidi.c b/sound/core/seq/seq_virmidi.c
+index 8d93a4021c78..f48a4cd24ffc 100644
+--- a/sound/core/seq/seq_virmidi.c
++++ b/sound/core/seq/seq_virmidi.c
+@@ -77,13 +77,17 @@ static void snd_virmidi_init_event(struct snd_virmidi *vmidi,
+  * decode input event and put to read buffer of each opened file
+  */
+ static int snd_virmidi_dev_receive_event(struct snd_virmidi_dev *rdev,
+-					 struct snd_seq_event *ev)
++					 struct snd_seq_event *ev,
++					 bool atomic)
+ {
+ 	struct snd_virmidi *vmidi;
+ 	unsigned char msg[4];
+ 	int len;
+ 
+-	read_lock(&rdev->filelist_lock);
++	if (atomic)
++		read_lock(&rdev->filelist_lock);
++	else
++		down_read(&rdev->filelist_sem);
+ 	list_for_each_entry(vmidi, &rdev->filelist, list) {
+ 		if (!vmidi->trigger)
+ 			continue;
+@@ -97,7 +101,10 @@ static int snd_virmidi_dev_receive_event(struct snd_virmidi_dev *rdev,
+ 				snd_rawmidi_receive(vmidi->substream, msg, len);
+ 		}
+ 	}
+-	read_unlock(&rdev->filelist_lock);
++	if (atomic)
++		read_unlock(&rdev->filelist_lock);
++	else
++		up_read(&rdev->filelist_sem);
+ 
+ 	return 0;
+ }
+@@ -115,7 +122,7 @@ int snd_virmidi_receive(struct snd_rawmidi *rmidi, struct snd_seq_event *ev)
+ 	struct snd_virmidi_dev *rdev;
+ 
+ 	rdev = rmidi->private_data;
+-	return snd_virmidi_dev_receive_event(rdev, ev);
++	return snd_virmidi_dev_receive_event(rdev, ev, true);
+ }
+ #endif  /*  0  */
+ 
+@@ -130,7 +137,7 @@ static int snd_virmidi_event_input(struct snd_seq_event *ev, int direct,
+ 	rdev = private_data;
+ 	if (!(rdev->flags & SNDRV_VIRMIDI_USE))
+ 		return 0; /* ignored */
+-	return snd_virmidi_dev_receive_event(rdev, ev);
++	return snd_virmidi_dev_receive_event(rdev, ev, atomic);
+ }
+ 
+ /*
+@@ -209,7 +216,6 @@ static int snd_virmidi_input_open(struct snd_rawmidi_substream *substream)
+ 	struct snd_virmidi_dev *rdev = substream->rmidi->private_data;
+ 	struct snd_rawmidi_runtime *runtime = substream->runtime;
+ 	struct snd_virmidi *vmidi;
+-	unsigned long flags;
+ 
+ 	vmidi = kzalloc(sizeof(*vmidi), GFP_KERNEL);
+ 	if (vmidi == NULL)
+@@ -223,9 +229,11 @@ static int snd_virmidi_input_open(struct snd_rawmidi_substream *substream)
+ 	vmidi->client = rdev->client;
+ 	vmidi->port = rdev->port;	
+ 	runtime->private_data = vmidi;
+-	write_lock_irqsave(&rdev->filelist_lock, flags);
++	down_write(&rdev->filelist_sem);
++	write_lock_irq(&rdev->filelist_lock);
+ 	list_add_tail(&vmidi->list, &rdev->filelist);
+-	write_unlock_irqrestore(&rdev->filelist_lock, flags);
++	write_unlock_irq(&rdev->filelist_lock);
++	up_write(&rdev->filelist_sem);
+ 	vmidi->rdev = rdev;
+ 	return 0;
+ }
+@@ -264,9 +272,11 @@ static int snd_virmidi_input_close(struct snd_rawmidi_substream *substream)
+ 	struct snd_virmidi_dev *rdev = substream->rmidi->private_data;
+ 	struct snd_virmidi *vmidi = substream->runtime->private_data;
+ 
++	down_write(&rdev->filelist_sem);
+ 	write_lock_irq(&rdev->filelist_lock);
+ 	list_del(&vmidi->list);
+ 	write_unlock_irq(&rdev->filelist_lock);
++	up_write(&rdev->filelist_sem);
+ 	snd_midi_event_free(vmidi->parser);
+ 	substream->runtime->private_data = NULL;
+ 	kfree(vmidi);
+@@ -520,6 +530,7 @@ int snd_virmidi_new(struct snd_card *card, int device, struct snd_rawmidi **rrmi
+ 	rdev->rmidi = rmidi;
+ 	rdev->device = device;
+ 	rdev->client = -1;
++	init_rwsem(&rdev->filelist_sem);
+ 	rwlock_init(&rdev->filelist_lock);
+ 	INIT_LIST_HEAD(&rdev->filelist);
+ 	rdev->seq_mode = SNDRV_VIRMIDI_SEQ_DISPATCH;
+diff --git a/sound/usb/caiaq/device.c b/sound/usb/caiaq/device.c
+index b871ba407e4e..4458190149d1 100644
+--- a/sound/usb/caiaq/device.c
++++ b/sound/usb/caiaq/device.c
+@@ -469,10 +469,12 @@ static int init_card(struct snd_usb_caiaqdev *cdev)
+ 
+ 	err = snd_usb_caiaq_send_command(cdev, EP1_CMD_GET_DEVICE_INFO, NULL, 0);
+ 	if (err)
+-		return err;
++		goto err_kill_urb;
+ 
+-	if (!wait_event_timeout(cdev->ep1_wait_queue, cdev->spec_received, HZ))
+-		return -ENODEV;
++	if (!wait_event_timeout(cdev->ep1_wait_queue, cdev->spec_received, HZ)) {
++		err = -ENODEV;
++		goto err_kill_urb;
++	}
+ 
+ 	usb_string(usb_dev, usb_dev->descriptor.iManufacturer,
+ 		   cdev->vendor_name, CAIAQ_USB_STR_LEN);
+@@ -507,6 +509,10 @@ static int init_card(struct snd_usb_caiaqdev *cdev)
+ 
+ 	setup_card(cdev);
+ 	return 0;
++
++ err_kill_urb:
++	usb_kill_urb(&cdev->ep1_in_urb);
++	return err;
+ }
+ 
+ static int snd_probe(struct usb_interface *intf,
+diff --git a/sound/usb/line6/driver.c b/sound/usb/line6/driver.c
+index 0ff5a7d2e19f..c8f723c3a033 100644
+--- a/sound/usb/line6/driver.c
++++ b/sound/usb/line6/driver.c
+@@ -779,9 +779,10 @@ int line6_probe(struct usb_interface *interface,
+ 	return 0;
+ 
+  error:
+-	if (line6->disconnect)
+-		line6->disconnect(line6);
+-	snd_card_free(card);
++	/* we can call disconnect callback here because no close-sync is
++	 * needed yet at this point
++	 */
++	line6_disconnect(interface);
+ 	return ret;
+ }
+ EXPORT_SYMBOL_GPL(line6_probe);
+diff --git a/sound/usb/line6/podhd.c b/sound/usb/line6/podhd.c
+index 956f847a96e4..451007c27743 100644
+--- a/sound/usb/line6/podhd.c
++++ b/sound/usb/line6/podhd.c
+@@ -301,7 +301,8 @@ static void podhd_disconnect(struct usb_line6 *line6)
+ 
+ 		intf = usb_ifnum_to_if(line6->usbdev,
+ 					pod->line6.properties->ctrl_if);
+-		usb_driver_release_interface(&podhd_driver, intf);
++		if (intf)
++			usb_driver_release_interface(&podhd_driver, intf);
+ 	}
+ }
+ 
+@@ -317,6 +318,9 @@ static int podhd_init(struct usb_line6 *line6,
+ 
+ 	line6->disconnect = podhd_disconnect;
+ 
++	init_timer(&pod->startup_timer);
++	INIT_WORK(&pod->startup_work, podhd_startup_workqueue);
++
+ 	if (pod->line6.properties->capabilities & LINE6_CAP_CONTROL) {
+ 		/* claim the data interface */
+ 		intf = usb_ifnum_to_if(line6->usbdev,
+@@ -358,8 +362,6 @@ static int podhd_init(struct usb_line6 *line6,
+ 	}
+ 
+ 	/* init device and delay registering */
+-	init_timer(&pod->startup_timer);
+-	INIT_WORK(&pod->startup_work, podhd_startup_workqueue);
+ 	podhd_startup(pod);
+ 	return 0;
+ }
+diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
+index e630813c5008..a08e90566edc 100644
+--- a/sound/usb/mixer.c
++++ b/sound/usb/mixer.c
+@@ -2228,6 +2228,9 @@ static int parse_audio_unit(struct mixer_build *state, int unitid)
+ 
+ static void snd_usb_mixer_free(struct usb_mixer_interface *mixer)
+ {
++	/* kill pending URBs */
++	snd_usb_mixer_disconnect(mixer);
++
+ 	kfree(mixer->id_elems);
+ 	if (mixer->urb) {
+ 		kfree(mixer->urb->transfer_buffer);
+@@ -2578,8 +2581,13 @@ int snd_usb_create_mixer(struct snd_usb_audio *chip, int ctrlif,
+ 
+ void snd_usb_mixer_disconnect(struct usb_mixer_interface *mixer)
+ {
+-	usb_kill_urb(mixer->urb);
+-	usb_kill_urb(mixer->rc_urb);
++	if (mixer->disconnected)
++		return;
++	if (mixer->urb)
++		usb_kill_urb(mixer->urb);
++	if (mixer->rc_urb)
++		usb_kill_urb(mixer->rc_urb);
++	mixer->disconnected = true;
+ }
+ 
+ #ifdef CONFIG_PM
+diff --git a/sound/usb/mixer.h b/sound/usb/mixer.h
+index 2b4b067646ab..545d99b09706 100644
+--- a/sound/usb/mixer.h
++++ b/sound/usb/mixer.h
+@@ -22,6 +22,8 @@ struct usb_mixer_interface {
+ 	struct urb *rc_urb;
+ 	struct usb_ctrlrequest *rc_setup_packet;
+ 	u8 rc_buffer[6];
++
++	bool disconnected;
+ };
+ 
+ #define MAX_CHANNELS	16	/* max logical channels */
+diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
+index 83cdc0a61fd6..88a484c273e8 100644
+--- a/tools/perf/builtin-script.c
++++ b/tools/perf/builtin-script.c
+@@ -578,7 +578,7 @@ static void print_sample_brstack(struct perf_sample *sample,
+ 			thread__find_addr_map(thread, sample->cpumode, MAP__FUNCTION, to, &alt);
+ 		}
+ 
+-		printf("0x%"PRIx64, from);
++		printf(" 0x%"PRIx64, from);
+ 		if (PRINT_FIELD(DSO)) {
+ 			printf("(");
+ 			map__fprintf_dsoname(alf.map, stdout);
+@@ -673,7 +673,7 @@ static void print_sample_brstackoff(struct perf_sample *sample,
+ 		if (alt.map && !alt.map->dso->adjust_symbols)
+ 			to = map__map_ip(alt.map, to);
+ 
+-		printf("0x%"PRIx64, from);
++		printf(" 0x%"PRIx64, from);
+ 		if (PRINT_FIELD(DSO)) {
+ 			printf("(");
+ 			map__fprintf_dsoname(alf.map, stdout);


^ permalink raw reply related	[flat|nested] 20+ messages in thread
* [gentoo-commits] proj/linux-patches:4.13 commit in: /
@ 2017-10-14 14:28 Mike Pagano
  0 siblings, 0 replies; 20+ messages in thread
From: Mike Pagano @ 2017-10-14 14:28 UTC (permalink / raw
  To: gentoo-commits

commit:     45a997db178a0677381de56d0e6dedc59e79cbce
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sat Oct 14 14:28:02 2017 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sat Oct 14 14:28:02 2017 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=45a997db

Linux patch 4.13.7

 0000_README             |  4 ++
 1006_linux-4.13.7.patch | 99 +++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 103 insertions(+)

diff --git a/0000_README b/0000_README
index 92f54b8..b6a0ed9 100644
--- a/0000_README
+++ b/0000_README
@@ -67,6 +67,10 @@ Patch:  1005_linux-4.13.6.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.13.6
 
+Patch:  1006_linux-4.13.7.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.13.7
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1006_linux-4.13.7.patch b/1006_linux-4.13.7.patch
new file mode 100644
index 0000000..038d91d
--- /dev/null
+++ b/1006_linux-4.13.7.patch
@@ -0,0 +1,99 @@
+diff --git a/Documentation/watchdog/watchdog-parameters.txt b/Documentation/watchdog/watchdog-parameters.txt
+index b3526365ea8e..6f9d7b418917 100644
+--- a/Documentation/watchdog/watchdog-parameters.txt
++++ b/Documentation/watchdog/watchdog-parameters.txt
+@@ -117,7 +117,7 @@ nowayout: Watchdog cannot be stopped once started
+ -------------------------------------------------
+ iTCO_wdt:
+ heartbeat: Watchdog heartbeat in seconds.
+-	(5<=heartbeat<=74 (TCO v1) or 1226 (TCO v2), default=30)
++	(2<heartbeat<39 (TCO v1) or 613 (TCO v2), default=30)
+ nowayout: Watchdog cannot be stopped once started
+ 	(default=kernel config parameter)
+ -------------------------------------------------
+diff --git a/Makefile b/Makefile
+index 9e1af1af327b..0d4f1b19869d 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 13
+-SUBLEVEL = 6
++SUBLEVEL = 7
+ EXTRAVERSION =
+ NAME = Fearless Coyote
+ 
+diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c
+index c4f65873bfa4..347f0389b089 100644
+--- a/drivers/watchdog/iTCO_wdt.c
++++ b/drivers/watchdog/iTCO_wdt.c
+@@ -306,15 +306,16 @@ static int iTCO_wdt_ping(struct watchdog_device *wd_dev)
+ 
+ 	iTCO_vendor_pre_keepalive(p->smi_res, wd_dev->timeout);
+ 
+-	/* Reset the timeout status bit so that the timer
+-	 * needs to count down twice again before rebooting */
+-	outw(0x0008, TCO1_STS(p));	/* write 1 to clear bit */
+-
+ 	/* Reload the timer by writing to the TCO Timer Counter register */
+-	if (p->iTCO_version >= 2)
++	if (p->iTCO_version >= 2) {
+ 		outw(0x01, TCO_RLD(p));
+-	else if (p->iTCO_version == 1)
++	} else if (p->iTCO_version == 1) {
++		/* Reset the timeout status bit so that the timer
++		 * needs to count down twice again before rebooting */
++		outw(0x0008, TCO1_STS(p));	/* write 1 to clear bit */
++
+ 		outb(0x01, TCO_RLD(p));
++	}
+ 
+ 	spin_unlock(&p->io_lock);
+ 	return 0;
+@@ -327,8 +328,11 @@ static int iTCO_wdt_set_timeout(struct watchdog_device *wd_dev, unsigned int t)
+ 	unsigned char val8;
+ 	unsigned int tmrval;
+ 
+-	/* The timer counts down twice before rebooting */
+-	tmrval = seconds_to_ticks(p, t) / 2;
++	tmrval = seconds_to_ticks(p, t);
++
++	/* For TCO v1 the timer counts down twice before rebooting */
++	if (p->iTCO_version == 1)
++		tmrval /= 2;
+ 
+ 	/* from the specs: */
+ 	/* "Values of 0h-3h are ignored and should not be attempted" */
+@@ -381,8 +385,6 @@ static unsigned int iTCO_wdt_get_timeleft(struct watchdog_device *wd_dev)
+ 		spin_lock(&p->io_lock);
+ 		val16 = inw(TCO_RLD(p));
+ 		val16 &= 0x3ff;
+-		if (!(inw(TCO1_STS(p)) & 0x0008))
+-			val16 += (inw(TCOv2_TMR(p)) & 0x3ff);
+ 		spin_unlock(&p->io_lock);
+ 
+ 		time_left = ticks_to_seconds(p, val16);
+diff --git a/kernel/exit.c b/kernel/exit.c
+index 6d31fc5ba50d..135b36985f8a 100644
+--- a/kernel/exit.c
++++ b/kernel/exit.c
+@@ -1611,6 +1611,9 @@ SYSCALL_DEFINE5(waitid, int, which, pid_t, upid, struct siginfo __user *,
+ 	if (!infop)
+ 		return err;
+ 
++	if (!access_ok(VERIFY_WRITE, infop, sizeof(*infop)))
++		goto Efault;
++
+ 	user_access_begin();
+ 	unsafe_put_user(signo, &infop->si_signo, Efault);
+ 	unsafe_put_user(0, &infop->si_errno, Efault);
+@@ -1736,6 +1739,9 @@ COMPAT_SYSCALL_DEFINE5(waitid,
+ 	if (!infop)
+ 		return err;
+ 
++	if (!access_ok(VERIFY_WRITE, infop, sizeof(*infop)))
++		goto Efault;
++
+ 	user_access_begin();
+ 	unsafe_put_user(signo, &infop->si_signo, Efault);
+ 	unsafe_put_user(0, &infop->si_errno, Efault);


^ permalink raw reply related	[flat|nested] 20+ messages in thread
* [gentoo-commits] proj/linux-patches:4.13 commit in: /
@ 2017-10-12 12:55 Mike Pagano
  0 siblings, 0 replies; 20+ messages in thread
From: Mike Pagano @ 2017-10-12 12:55 UTC (permalink / raw
  To: gentoo-commits

commit:     d6f163b8ca634fb14f52aec8aa90846f0a96d460
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu Oct 12 12:55:43 2017 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu Oct 12 12:55:43 2017 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=d6f163b8

Linux patch 4.13.6

 0000_README             |    4 +
 1005_linux-4.13.6.patch | 5660 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 5664 insertions(+)

diff --git a/0000_README b/0000_README
index 382473c..92f54b8 100644
--- a/0000_README
+++ b/0000_README
@@ -63,6 +63,10 @@ Patch:  1004_linux-4.13.5.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.13.5
 
+Patch:  1005_linux-4.13.6.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.13.6
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1005_linux-4.13.6.patch b/1005_linux-4.13.6.patch
new file mode 100644
index 0000000..1953e4a
--- /dev/null
+++ b/1005_linux-4.13.6.patch
@@ -0,0 +1,5660 @@
+diff --git a/Documentation/filesystems/overlayfs.txt b/Documentation/filesystems/overlayfs.txt
+index 36f528a7fdd6..8caa60734647 100644
+--- a/Documentation/filesystems/overlayfs.txt
++++ b/Documentation/filesystems/overlayfs.txt
+@@ -210,8 +210,11 @@ path as another overlay mount and it may use a lower layer path that is
+ beneath or above the path of another overlay lower layer path.
+ 
+ Using an upper layer path and/or a workdir path that are already used by
+-another overlay mount is not allowed and will fail with EBUSY.  Using
++another overlay mount is not allowed and may fail with EBUSY.  Using
+ partially overlapping paths is not allowed but will not fail with EBUSY.
++If files are accessed from two overlayfs mounts which share or overlap the
++upper layer and/or workdir path the behavior of the overlay is undefined,
++though it will not result in a crash or deadlock.
+ 
+ Mounting an overlay using an upper layer path, where the upper layer path
+ was previously used by another mounted overlay in combination with a
+diff --git a/Makefile b/Makefile
+index 189f1a748e4c..9e1af1af327b 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 13
+-SUBLEVEL = 5
++SUBLEVEL = 6
+ EXTRAVERSION =
+ NAME = Fearless Coyote
+ 
+diff --git a/arch/arm64/boot/dts/marvell/armada-ap806.dtsi b/arch/arm64/boot/dts/marvell/armada-ap806.dtsi
+index 4d360713ed12..30d48ecf46e0 100644
+--- a/arch/arm64/boot/dts/marvell/armada-ap806.dtsi
++++ b/arch/arm64/boot/dts/marvell/armada-ap806.dtsi
+@@ -254,7 +254,7 @@
+ 
+ 			ap_syscon: system-controller@6f4000 {
+ 				compatible = "syscon", "simple-mfd";
+-				reg = <0x6f4000 0x1000>;
++				reg = <0x6f4000 0x2000>;
+ 
+ 				ap_clk: clock {
+ 					compatible = "marvell,ap806-clock";
+@@ -265,7 +265,7 @@
+ 					compatible = "marvell,ap806-pinctrl";
+ 				};
+ 
+-				ap_gpio: gpio {
++				ap_gpio: gpio@1040 {
+ 					compatible = "marvell,armada-8k-gpio";
+ 					offset = <0x1040>;
+ 					ngpios = <20>;
+diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c
+index f0e6d717885b..d06fbe4cd38d 100644
+--- a/arch/arm64/kernel/armv8_deprecated.c
++++ b/arch/arm64/kernel/armv8_deprecated.c
+@@ -649,4 +649,4 @@ static int __init armv8_deprecated_init(void)
+ 	return 0;
+ }
+ 
+-late_initcall(armv8_deprecated_init);
++core_initcall(armv8_deprecated_init);
+diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
+index 9f9e0064c8c1..276eecab6cea 100644
+--- a/arch/arm64/kernel/cpufeature.c
++++ b/arch/arm64/kernel/cpufeature.c
+@@ -1294,4 +1294,4 @@ static int __init enable_mrs_emulation(void)
+ 	return 0;
+ }
+ 
+-late_initcall(enable_mrs_emulation);
++core_initcall(enable_mrs_emulation);
+diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c
+index 1df770e8cbe0..7275fed271af 100644
+--- a/arch/powerpc/kernel/dt_cpu_ftrs.c
++++ b/arch/powerpc/kernel/dt_cpu_ftrs.c
+@@ -102,10 +102,10 @@ static void cpufeatures_flush_tlb(void)
+ 	case PVR_POWER8:
+ 	case PVR_POWER8E:
+ 	case PVR_POWER8NVL:
+-		__flush_tlb_power8(POWER8_TLB_SETS);
++		__flush_tlb_power8(TLB_INVAL_SCOPE_GLOBAL);
+ 		break;
+ 	case PVR_POWER9:
+-		__flush_tlb_power9(POWER9_TLB_SETS_HASH);
++		__flush_tlb_power9(TLB_INVAL_SCOPE_GLOBAL);
+ 		break;
+ 	default:
+ 		pr_err("unknown CPU version for boot TLB flush\n");
+diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
+index f14f3c04ec7e..d9dfdf7ede45 100644
+--- a/arch/powerpc/kernel/exceptions-64s.S
++++ b/arch/powerpc/kernel/exceptions-64s.S
+@@ -734,7 +734,29 @@ EXC_REAL(program_check, 0x700, 0x100)
+ EXC_VIRT(program_check, 0x4700, 0x100, 0x700)
+ TRAMP_KVM(PACA_EXGEN, 0x700)
+ EXC_COMMON_BEGIN(program_check_common)
+-	EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN)
++	/*
++	 * It's possible to receive a TM Bad Thing type program check with
++	 * userspace register values (in particular r1), but with SRR1 reporting
++	 * that we came from the kernel. Normally that would confuse the bad
++	 * stack logic, and we would report a bad kernel stack pointer. Instead
++	 * we switch to the emergency stack if we're taking a TM Bad Thing from
++	 * the kernel.
++	 */
++	li	r10,MSR_PR		/* Build a mask of MSR_PR ..	*/
++	oris	r10,r10,0x200000@h	/* .. and SRR1_PROGTM		*/
++	and	r10,r10,r12		/* Mask SRR1 with that.		*/
++	srdi	r10,r10,8		/* Shift it so we can compare	*/
++	cmpldi	r10,(0x200000 >> 8)	/* .. with an immediate.	*/
++	bne 1f				/* If != go to normal path.	*/
++
++	/* SRR1 had PR=0 and SRR1_PROGTM=1, so use the emergency stack	*/
++	andi.	r10,r12,MSR_PR;		/* Set CR0 correctly for label	*/
++					/* 3 in EXCEPTION_PROLOG_COMMON	*/
++	mr	r10,r1			/* Save r1			*/
++	ld	r1,PACAEMERGSP(r13)	/* Use emergency stack		*/
++	subi	r1,r1,INT_FRAME_SIZE	/* alloc stack frame		*/
++	b 3f				/* Jump into the macro !!	*/
++1:	EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN)
+ 	bl	save_nvgprs
+ 	RECONCILE_IRQ_STATE(r10, r11)
+ 	addi	r3,r1,STACK_FRAME_OVERHEAD
+diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
+index c83c115858c1..b2c002993d78 100644
+--- a/arch/powerpc/kernel/signal_64.c
++++ b/arch/powerpc/kernel/signal_64.c
+@@ -452,9 +452,20 @@ static long restore_tm_sigcontexts(struct task_struct *tsk,
+ 	if (MSR_TM_RESV(msr))
+ 		return -EINVAL;
+ 
+-	/* pull in MSR TM from user context */
++	/* pull in MSR TS bits from user context */
+ 	regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr & MSR_TS_MASK);
+ 
++	/*
++	 * Ensure that TM is enabled in regs->msr before we leave the signal
++	 * handler. It could be the case that (a) user disabled the TM bit
++	 * through the manipulation of the MSR bits in uc_mcontext or (b) the
++	 * TM bit was disabled because a sufficient number of context switches
++	 * happened whilst in the signal handler and load_tm overflowed,
++	 * disabling the TM bit. In either case we can end up with an illegal
++	 * TM state leading to a TM Bad Thing when we return to userspace.
++	 */
++	regs->msr |= MSR_TM;
++
+ 	/* pull in MSR LE from user context */
+ 	regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
+ 
+diff --git a/arch/powerpc/kvm/book3s_xive.c b/arch/powerpc/kvm/book3s_xive.c
+index 13304622ab1c..bf457843e032 100644
+--- a/arch/powerpc/kvm/book3s_xive.c
++++ b/arch/powerpc/kvm/book3s_xive.c
+@@ -622,7 +622,7 @@ int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server,
+ 		return -EINVAL;
+ 	state = &sb->irq_state[idx];
+ 	arch_spin_lock(&sb->lock);
+-	*server = state->guest_server;
++	*server = state->act_server;
+ 	*priority = state->guest_priority;
+ 	arch_spin_unlock(&sb->lock);
+ 
+@@ -1331,7 +1331,7 @@ static int xive_get_source(struct kvmppc_xive *xive, long irq, u64 addr)
+ 	xive->saved_src_count++;
+ 
+ 	/* Convert saved state into something compatible with xics */
+-	val = state->guest_server;
++	val = state->act_server;
+ 	prio = state->saved_scan_prio;
+ 
+ 	if (prio == MASKED) {
+@@ -1507,7 +1507,6 @@ static int xive_set_source(struct kvmppc_xive *xive, long irq, u64 addr)
+ 	/* First convert prio and mark interrupt as untargetted */
+ 	act_prio = xive_prio_from_guest(guest_prio);
+ 	state->act_priority = MASKED;
+-	state->guest_server = server;
+ 
+ 	/*
+ 	 * We need to drop the lock due to the mutex below. Hopefully
+diff --git a/arch/powerpc/kvm/book3s_xive.h b/arch/powerpc/kvm/book3s_xive.h
+index 5938f7644dc1..6ba63f8e8a61 100644
+--- a/arch/powerpc/kvm/book3s_xive.h
++++ b/arch/powerpc/kvm/book3s_xive.h
+@@ -35,7 +35,6 @@ struct kvmppc_xive_irq_state {
+ 	struct xive_irq_data *pt_data;	/* XIVE Pass-through associated data */
+ 
+ 	/* Targetting as set by guest */
+-	u32 guest_server;		/* Current guest selected target */
+ 	u8 guest_priority;		/* Guest set priority */
+ 	u8 saved_priority;		/* Saved priority when masking */
+ 
+diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c
+index 897aa1400eb8..bbb73aa0eb8f 100644
+--- a/arch/powerpc/platforms/powernv/setup.c
++++ b/arch/powerpc/platforms/powernv/setup.c
+@@ -272,7 +272,15 @@ static void pnv_kexec_cpu_down(int crash_shutdown, int secondary)
+ #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
+ static unsigned long pnv_memory_block_size(void)
+ {
+-	return 256UL * 1024 * 1024;
++	/*
++	 * We map the kernel linear region with 1GB large pages on radix. For
++	 * memory hot unplug to work our memory block size must be at least
++	 * this size.
++	 */
++	if (radix_enabled())
++		return 1UL * 1024 * 1024 * 1024;
++	else
++		return 256UL * 1024 * 1024;
+ }
+ #endif
+ 
+diff --git a/arch/x86/include/asm/kvm_para.h b/arch/x86/include/asm/kvm_para.h
+index bc62e7cbf1b1..59ad3d132353 100644
+--- a/arch/x86/include/asm/kvm_para.h
++++ b/arch/x86/include/asm/kvm_para.h
+@@ -88,7 +88,7 @@ static inline long kvm_hypercall4(unsigned int nr, unsigned long p1,
+ bool kvm_para_available(void);
+ unsigned int kvm_arch_para_features(void);
+ void __init kvm_guest_init(void);
+-void kvm_async_pf_task_wait(u32 token);
++void kvm_async_pf_task_wait(u32 token, int interrupt_kernel);
+ void kvm_async_pf_task_wake(u32 token);
+ u32 kvm_read_and_reset_pf_reason(void);
+ extern void kvm_disable_steal_time(void);
+@@ -103,7 +103,7 @@ static inline void kvm_spinlock_init(void)
+ 
+ #else /* CONFIG_KVM_GUEST */
+ #define kvm_guest_init() do {} while (0)
+-#define kvm_async_pf_task_wait(T) do {} while(0)
++#define kvm_async_pf_task_wait(T, I) do {} while(0)
+ #define kvm_async_pf_task_wake(T) do {} while(0)
+ 
+ static inline bool kvm_para_available(void)
+diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
+index 58590a698a1a..e5e4306e4546 100644
+--- a/arch/x86/kernel/kvm.c
++++ b/arch/x86/kernel/kvm.c
+@@ -117,7 +117,11 @@ static struct kvm_task_sleep_node *_find_apf_task(struct kvm_task_sleep_head *b,
+ 	return NULL;
+ }
+ 
+-void kvm_async_pf_task_wait(u32 token)
++/*
++ * @interrupt_kernel: Is this called from a routine which interrupts the kernel
++ * 		      (other than user space)?
++ */
++void kvm_async_pf_task_wait(u32 token, int interrupt_kernel)
+ {
+ 	u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
+ 	struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
+@@ -140,8 +144,10 @@ void kvm_async_pf_task_wait(u32 token)
+ 
+ 	n.token = token;
+ 	n.cpu = smp_processor_id();
+-	n.halted = is_idle_task(current) || preempt_count() > 1 ||
+-		   rcu_preempt_depth();
++	n.halted = is_idle_task(current) ||
++		   (IS_ENABLED(CONFIG_PREEMPT_COUNT)
++		    ? preempt_count() > 1 || rcu_preempt_depth()
++		    : interrupt_kernel);
+ 	init_swait_queue_head(&n.wq);
+ 	hlist_add_head(&n.link, &b->list);
+ 	raw_spin_unlock(&b->lock);
+@@ -269,7 +275,7 @@ do_async_page_fault(struct pt_regs *regs, unsigned long error_code)
+ 	case KVM_PV_REASON_PAGE_NOT_PRESENT:
+ 		/* page is swapped out by the host. */
+ 		prev_state = exception_enter();
+-		kvm_async_pf_task_wait((u32)read_cr2());
++		kvm_async_pf_task_wait((u32)read_cr2(), !user_mode(regs));
+ 		exception_exit(prev_state);
+ 		break;
+ 	case KVM_PV_REASON_PAGE_READY:
+diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
+index 56e68dfac974..7558531c1215 100644
+--- a/arch/x86/kvm/mmu.c
++++ b/arch/x86/kvm/mmu.c
+@@ -3799,7 +3799,7 @@ int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
+ 	case KVM_PV_REASON_PAGE_NOT_PRESENT:
+ 		vcpu->arch.apf.host_apf_reason = 0;
+ 		local_irq_disable();
+-		kvm_async_pf_task_wait(fault_address);
++		kvm_async_pf_task_wait(fault_address, 0);
+ 		local_irq_enable();
+ 		break;
+ 	case KVM_PV_REASON_PAGE_READY:
+diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
+index e1324f280e06..4d50ced94686 100644
+--- a/arch/x86/net/bpf_jit_comp.c
++++ b/arch/x86/net/bpf_jit_comp.c
+@@ -282,9 +282,9 @@ static void emit_bpf_tail_call(u8 **pprog)
+ 	/* if (index >= array->map.max_entries)
+ 	 *   goto out;
+ 	 */
+-	EMIT4(0x48, 0x8B, 0x46,                   /* mov rax, qword ptr [rsi + 16] */
++	EMIT2(0x89, 0xD2);                        /* mov edx, edx */
++	EMIT3(0x39, 0x56,                         /* cmp dword ptr [rsi + 16], edx */
+ 	      offsetof(struct bpf_array, map.max_entries));
+-	EMIT3(0x48, 0x39, 0xD0);                  /* cmp rax, rdx */
+ #define OFFSET1 47 /* number of bytes to jump */
+ 	EMIT2(X86_JBE, OFFSET1);                  /* jbe out */
+ 	label1 = cnt;
+diff --git a/block/bsg-lib.c b/block/bsg-lib.c
+index c587c71d78af..82ddfcd23939 100644
+--- a/block/bsg-lib.c
++++ b/block/bsg-lib.c
+@@ -207,20 +207,34 @@ static int bsg_init_rq(struct request_queue *q, struct request *req, gfp_t gfp)
+ 	struct bsg_job *job = blk_mq_rq_to_pdu(req);
+ 	struct scsi_request *sreq = &job->sreq;
+ 
++	/* called right after the request is allocated for the request_queue */
++
++	sreq->sense = kzalloc(SCSI_SENSE_BUFFERSIZE, gfp);
++	if (!sreq->sense)
++		return -ENOMEM;
++
++	return 0;
++}
++
++static void bsg_initialize_rq(struct request *req)
++{
++	struct bsg_job *job = blk_mq_rq_to_pdu(req);
++	struct scsi_request *sreq = &job->sreq;
++	void *sense = sreq->sense;
++
++	/* called right before the request is given to the request_queue user */
++
+ 	memset(job, 0, sizeof(*job));
+ 
+ 	scsi_req_init(sreq);
++
++	sreq->sense = sense;
+ 	sreq->sense_len = SCSI_SENSE_BUFFERSIZE;
+-	sreq->sense = kzalloc(sreq->sense_len, gfp);
+-	if (!sreq->sense)
+-		return -ENOMEM;
+ 
+ 	job->req = req;
+-	job->reply = sreq->sense;
++	job->reply = sense;
+ 	job->reply_len = sreq->sense_len;
+ 	job->dd_data = job + 1;
+-
+-	return 0;
+ }
+ 
+ static void bsg_exit_rq(struct request_queue *q, struct request *req)
+@@ -250,6 +264,7 @@ struct request_queue *bsg_setup_queue(struct device *dev, char *name,
+ 	q->cmd_size = sizeof(struct bsg_job) + dd_job_size;
+ 	q->init_rq_fn = bsg_init_rq;
+ 	q->exit_rq_fn = bsg_exit_rq;
++	q->initialize_rq_fn = bsg_initialize_rq;
+ 	q->request_fn = bsg_request_fn;
+ 
+ 	ret = blk_init_allocated_queue(q);
+diff --git a/drivers/auxdisplay/charlcd.c b/drivers/auxdisplay/charlcd.c
+index cfeb049a01ef..642afd88870b 100644
+--- a/drivers/auxdisplay/charlcd.c
++++ b/drivers/auxdisplay/charlcd.c
+@@ -647,18 +647,25 @@ static ssize_t charlcd_write(struct file *file, const char __user *buf,
+ static int charlcd_open(struct inode *inode, struct file *file)
+ {
+ 	struct charlcd_priv *priv = to_priv(the_charlcd);
++	int ret;
+ 
++	ret = -EBUSY;
+ 	if (!atomic_dec_and_test(&charlcd_available))
+-		return -EBUSY;	/* open only once at a time */
++		goto fail;	/* open only once at a time */
+ 
++	ret = -EPERM;
+ 	if (file->f_mode & FMODE_READ)	/* device is write-only */
+-		return -EPERM;
++		goto fail;
+ 
+ 	if (priv->must_clear) {
+ 		charlcd_clear_display(&priv->lcd);
+ 		priv->must_clear = false;
+ 	}
+ 	return nonseekable_open(inode, file);
++
++ fail:
++	atomic_inc(&charlcd_available);
++	return ret;
+ }
+ 
+ static int charlcd_release(struct inode *inode, struct file *file)
+diff --git a/drivers/auxdisplay/panel.c b/drivers/auxdisplay/panel.c
+index 7a8b8fb2f572..c54c20700d37 100644
+--- a/drivers/auxdisplay/panel.c
++++ b/drivers/auxdisplay/panel.c
+@@ -1105,14 +1105,21 @@ static ssize_t keypad_read(struct file *file,
+ 
+ static int keypad_open(struct inode *inode, struct file *file)
+ {
++	int ret;
++
++	ret = -EBUSY;
+ 	if (!atomic_dec_and_test(&keypad_available))
+-		return -EBUSY;	/* open only once at a time */
++		goto fail;	/* open only once at a time */
+ 
++	ret = -EPERM;
+ 	if (file->f_mode & FMODE_WRITE)	/* device is read-only */
+-		return -EPERM;
++		goto fail;
+ 
+ 	keypad_buflen = 0;	/* flush the buffer on opening */
+ 	return 0;
++ fail:
++	atomic_inc(&keypad_available);
++	return ret;
+ }
+ 
+ static int keypad_release(struct inode *inode, struct file *file)
+diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c
+index d1c33a85059e..df8945d7f009 100644
+--- a/drivers/base/arch_topology.c
++++ b/drivers/base/arch_topology.c
+@@ -160,12 +160,12 @@ int __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu)
+ }
+ 
+ #ifdef CONFIG_CPU_FREQ
+-static cpumask_var_t cpus_to_visit;
+-static bool cap_parsing_done;
+-static void parsing_done_workfn(struct work_struct *work);
+-static DECLARE_WORK(parsing_done_work, parsing_done_workfn);
++static bool cap_parsing_done __initdata;
++static cpumask_var_t cpus_to_visit __initdata;
++static void __init parsing_done_workfn(struct work_struct *work);
++static __initdata DECLARE_WORK(parsing_done_work, parsing_done_workfn);
+ 
+-static int
++static int __init
+ init_cpu_capacity_callback(struct notifier_block *nb,
+ 			   unsigned long val,
+ 			   void *data)
+@@ -200,7 +200,7 @@ init_cpu_capacity_callback(struct notifier_block *nb,
+ 	return 0;
+ }
+ 
+-static struct notifier_block init_cpu_capacity_notifier = {
++static struct notifier_block init_cpu_capacity_notifier __initdata = {
+ 	.notifier_call = init_cpu_capacity_callback,
+ };
+ 
+@@ -226,7 +226,7 @@ static int __init register_cpufreq_notifier(void)
+ }
+ core_initcall(register_cpufreq_notifier);
+ 
+-static void parsing_done_workfn(struct work_struct *work)
++static void __init parsing_done_workfn(struct work_struct *work)
+ {
+ 	cpufreq_unregister_notifier(&init_cpu_capacity_notifier,
+ 					 CPUFREQ_POLICY_NOTIFIER);
+diff --git a/drivers/base/platform.c b/drivers/base/platform.c
+index d1bd99271066..9045c5f3734e 100644
+--- a/drivers/base/platform.c
++++ b/drivers/base/platform.c
+@@ -868,7 +868,8 @@ static ssize_t driver_override_store(struct device *dev,
+ 	struct platform_device *pdev = to_platform_device(dev);
+ 	char *driver_override, *old, *cp;
+ 
+-	if (count > PATH_MAX)
++	/* We need to keep extra room for a newline */
++	if (count >= (PAGE_SIZE - 1))
+ 		return -EINVAL;
+ 
+ 	driver_override = kstrndup(buf, count, GFP_KERNEL);
+diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c
+index e40b77583c47..d8d3cb67b402 100644
+--- a/drivers/clk/samsung/clk-exynos4.c
++++ b/drivers/clk/samsung/clk-exynos4.c
+@@ -294,6 +294,18 @@ static const struct samsung_clk_reg_dump src_mask_suspend_e4210[] = {
+ #define PLL_ENABLED	(1 << 31)
+ #define PLL_LOCKED	(1 << 29)
+ 
++static void exynos4_clk_enable_pll(u32 reg)
++{
++	u32 pll_con = readl(reg_base + reg);
++	pll_con |= PLL_ENABLED;
++	writel(pll_con, reg_base + reg);
++
++	while (!(pll_con & PLL_LOCKED)) {
++		cpu_relax();
++		pll_con = readl(reg_base + reg);
++	}
++}
++
+ static void exynos4_clk_wait_for_pll(u32 reg)
+ {
+ 	u32 pll_con;
+@@ -315,6 +327,9 @@ static int exynos4_clk_suspend(void)
+ 	samsung_clk_save(reg_base, exynos4_save_pll,
+ 				ARRAY_SIZE(exynos4_clk_pll_regs));
+ 
++	exynos4_clk_enable_pll(EPLL_CON0);
++	exynos4_clk_enable_pll(VPLL_CON0);
++
+ 	if (exynos4_soc == EXYNOS4210) {
+ 		samsung_clk_save(reg_base, exynos4_save_soc,
+ 					ARRAY_SIZE(exynos4210_clk_save));
+diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
+index d805b6e6fe71..27743be5b768 100644
+--- a/drivers/gpu/drm/i915/intel_audio.c
++++ b/drivers/gpu/drm/i915/intel_audio.c
+@@ -606,11 +606,6 @@ void intel_audio_codec_enable(struct intel_encoder *intel_encoder,
+ 			 connector->encoder->base.id,
+ 			 connector->encoder->name);
+ 
+-	/* ELD Conn_Type */
+-	connector->eld[5] &= ~(3 << 2);
+-	if (intel_crtc_has_dp_encoder(crtc_state))
+-		connector->eld[5] |= (1 << 2);
+-
+ 	connector->eld[6] = drm_av_sync_delay(connector, adjusted_mode) / 2;
+ 
+ 	if (dev_priv->display.audio_codec_enable)
+diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
+index 7ea7fd1e8856..645488071944 100644
+--- a/drivers/gpu/drm/i915/intel_bios.c
++++ b/drivers/gpu/drm/i915/intel_bios.c
+@@ -1163,6 +1163,13 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
+ 	is_hdmi = is_dvi && (child->common.device_type & DEVICE_TYPE_NOT_HDMI_OUTPUT) == 0;
+ 	is_edp = is_dp && (child->common.device_type & DEVICE_TYPE_INTERNAL_CONNECTOR);
+ 
++	if (port == PORT_A && is_dvi) {
++		DRM_DEBUG_KMS("VBT claims port A supports DVI%s, ignoring\n",
++			      is_hdmi ? "/HDMI" : "");
++		is_dvi = false;
++		is_hdmi = false;
++	}
++
+ 	info->supports_dvi = is_dvi;
+ 	info->supports_hdmi = is_hdmi;
+ 	info->supports_dp = is_dp;
+diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c
+index 951e834dd274..28a778b785ac 100644
+--- a/drivers/gpu/drm/i915/intel_modes.c
++++ b/drivers/gpu/drm/i915/intel_modes.c
+@@ -30,6 +30,21 @@
+ #include "intel_drv.h"
+ #include "i915_drv.h"
+ 
++static void intel_connector_update_eld_conn_type(struct drm_connector *connector)
++{
++	u8 conn_type;
++
++	if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
++	    connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
++		conn_type = DRM_ELD_CONN_TYPE_DP;
++	} else {
++		conn_type = DRM_ELD_CONN_TYPE_HDMI;
++	}
++
++	connector->eld[DRM_ELD_SAD_COUNT_CONN_TYPE] &= ~DRM_ELD_CONN_TYPE_MASK;
++	connector->eld[DRM_ELD_SAD_COUNT_CONN_TYPE] |= conn_type;
++}
++
+ /**
+  * intel_connector_update_modes - update connector from edid
+  * @connector: DRM connector device to use
+@@ -44,6 +59,8 @@ int intel_connector_update_modes(struct drm_connector *connector,
+ 	ret = drm_add_edid_modes(connector, edid);
+ 	drm_edid_to_eld(connector, edid);
+ 
++	intel_connector_update_eld_conn_type(connector);
++
+ 	return ret;
+ }
+ 
+diff --git a/drivers/hid/hid-rmi.c b/drivers/hid/hid-rmi.c
+index 5b40c2614599..ef241d66562e 100644
+--- a/drivers/hid/hid-rmi.c
++++ b/drivers/hid/hid-rmi.c
+@@ -436,17 +436,24 @@ static int rmi_post_resume(struct hid_device *hdev)
+ 	if (!(data->device_flags & RMI_DEVICE))
+ 		return 0;
+ 
+-	ret = rmi_reset_attn_mode(hdev);
++	/* Make sure the HID device is ready to receive events */
++	ret = hid_hw_open(hdev);
+ 	if (ret)
+ 		return ret;
+ 
++	ret = rmi_reset_attn_mode(hdev);
++	if (ret)
++		goto out;
++
+ 	ret = rmi_driver_resume(rmi_dev, false);
+ 	if (ret) {
+ 		hid_warn(hdev, "Failed to resume device: %d\n", ret);
+-		return ret;
++		goto out;
+ 	}
+ 
+-	return 0;
++out:
++	hid_hw_close(hdev);
++	return ret;
+ }
+ #endif /* CONFIG_PM */
+ 
+diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
+index 046f692fd0a2..364150435c62 100644
+--- a/drivers/hid/i2c-hid/i2c-hid.c
++++ b/drivers/hid/i2c-hid/i2c-hid.c
+@@ -543,7 +543,8 @@ static int i2c_hid_alloc_buffers(struct i2c_hid *ihid, size_t report_size)
+ {
+ 	/* the worst case is computed from the set_report command with a
+ 	 * reportID > 15 and the maximum report length */
+-	int args_len = sizeof(__u8) + /* optional ReportID byte */
++	int args_len = sizeof(__u8) + /* ReportID */
++		       sizeof(__u8) + /* optional ReportID byte */
+ 		       sizeof(__u16) + /* data register */
+ 		       sizeof(__u16) + /* size of the report */
+ 		       report_size; /* report */
+diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
+index 838c1ebfffa9..a805ee2989cb 100644
+--- a/drivers/hid/wacom_sys.c
++++ b/drivers/hid/wacom_sys.c
+@@ -668,8 +668,10 @@ static struct wacom_hdev_data *wacom_get_hdev_data(struct hid_device *hdev)
+ 
+ 	/* Try to find an already-probed interface from the same device */
+ 	list_for_each_entry(data, &wacom_udev_list, list) {
+-		if (compare_device_paths(hdev, data->dev, '/'))
++		if (compare_device_paths(hdev, data->dev, '/')) {
++			kref_get(&data->kref);
+ 			return data;
++		}
+ 	}
+ 
+ 	/* Fallback to finding devices that appear to be "siblings" */
+@@ -766,6 +768,9 @@ static int wacom_led_control(struct wacom *wacom)
+ 	if (!wacom->led.groups)
+ 		return -ENOTSUPP;
+ 
++	if (wacom->wacom_wac.features.type == REMOTE)
++		return -ENOTSUPP;
++
+ 	if (wacom->wacom_wac.pid) { /* wireless connected */
+ 		report_id = WAC_CMD_WL_LED_CONTROL;
+ 		buf_size = 13;
+diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
+index bb17d7bbefd3..aa692e28b2cd 100644
+--- a/drivers/hid/wacom_wac.c
++++ b/drivers/hid/wacom_wac.c
+@@ -567,8 +567,8 @@ static int wacom_intuos_pad(struct wacom_wac *wacom)
+ 				keys = data[9] & 0x07;
+ 			}
+ 		} else {
+-			buttons = ((data[6] & 0x10) << 10) |
+-			          ((data[5] & 0x10) << 9)  |
++			buttons = ((data[6] & 0x10) << 5)  |
++			          ((data[5] & 0x10) << 4)  |
+ 			          ((data[6] & 0x0F) << 4)  |
+ 			          (data[5] & 0x0F);
+ 		}
+@@ -1227,11 +1227,17 @@ static void wacom_intuos_pro2_bt_pen(struct wacom_wac *wacom)
+ 			continue;
+ 
+ 		if (range) {
++			/* Fix rotation alignment: userspace expects zero at left */
++			int16_t rotation = (int16_t)get_unaligned_le16(&frame[9]);
++			rotation += 1800/4;
++			if (rotation > 899)
++				rotation -= 1800;
++
+ 			input_report_abs(pen_input, ABS_X, get_unaligned_le16(&frame[1]));
+ 			input_report_abs(pen_input, ABS_Y, get_unaligned_le16(&frame[3]));
+-			input_report_abs(pen_input, ABS_TILT_X, frame[7]);
+-			input_report_abs(pen_input, ABS_TILT_Y, frame[8]);
+-			input_report_abs(pen_input, ABS_Z, get_unaligned_le16(&frame[9]));
++			input_report_abs(pen_input, ABS_TILT_X, (char)frame[7]);
++			input_report_abs(pen_input, ABS_TILT_Y, (char)frame[8]);
++			input_report_abs(pen_input, ABS_Z, rotation);
+ 			input_report_abs(pen_input, ABS_WHEEL, get_unaligned_le16(&frame[11]));
+ 		}
+ 		input_report_abs(pen_input, ABS_PRESSURE, get_unaligned_le16(&frame[5]));
+@@ -1319,12 +1325,19 @@ static void wacom_intuos_pro2_bt_pad(struct wacom_wac *wacom)
+ 	unsigned char *data = wacom->data;
+ 
+ 	int buttons = (data[282] << 1) | ((data[281] >> 6) & 0x01);
+-	int ring = data[285];
+-	int prox = buttons | (ring & 0x80);
++	int ring = data[285] & 0x7F;
++	bool ringstatus = data[285] & 0x80;
++	bool prox = buttons || ringstatus;
++
++	/* Fix touchring data: userspace expects 0 at left and increasing clockwise */
++	ring = 71 - ring;
++	ring += 3*72/16;
++	if (ring > 71)
++		ring -= 72;
+ 
+ 	wacom_report_numbered_buttons(pad_input, 9, buttons);
+ 
+-	input_report_abs(pad_input, ABS_WHEEL, (ring & 0x80) ? (ring & 0x7f) : 0);
++	input_report_abs(pad_input, ABS_WHEEL, ringstatus ? ring : 0);
+ 
+ 	input_report_key(pad_input, wacom->tool[1], prox ? 1 : 0);
+ 	input_report_abs(pad_input, ABS_MISC, prox ? PAD_DEVICE_ID : 0);
+@@ -1616,6 +1629,20 @@ static int wacom_tpc_irq(struct wacom_wac *wacom, size_t len)
+ 	return 0;
+ }
+ 
++static int wacom_offset_rotation(struct input_dev *input, struct hid_usage *usage,
++				 int value, int num, int denom)
++{
++	struct input_absinfo *abs = &input->absinfo[usage->code];
++	int range = (abs->maximum - abs->minimum + 1);
++
++	value += num*range/denom;
++	if (value > abs->maximum)
++		value -= range;
++	else if (value < abs->minimum)
++		value += range;
++	return value;
++}
++
+ int wacom_equivalent_usage(int usage)
+ {
+ 	if ((usage & HID_USAGE_PAGE) == WACOM_HID_UP_WACOMDIGITIZER) {
+@@ -1898,6 +1925,7 @@ static void wacom_wac_pad_event(struct hid_device *hdev, struct hid_field *field
+ 	unsigned equivalent_usage = wacom_equivalent_usage(usage->hid);
+ 	int i;
+ 	bool is_touch_on = value;
++	bool do_report = false;
+ 
+ 	/*
+ 	 * Avoid reporting this event and setting inrange_state if this usage
+@@ -1912,6 +1940,29 @@ static void wacom_wac_pad_event(struct hid_device *hdev, struct hid_field *field
+ 	}
+ 
+ 	switch (equivalent_usage) {
++	case WACOM_HID_WD_TOUCHRING:
++		/*
++		 * Userspace expects touchrings to increase in value with
++		 * clockwise gestures and have their zero point at the
++		 * tablet's left. HID events "should" be clockwise-
++		 * increasing and zero at top, though the MobileStudio
++		 * Pro and 2nd-gen Intuos Pro don't do this...
++		 */
++		if (hdev->vendor == 0x56a &&
++		    (hdev->product == 0x34d || hdev->product == 0x34e ||  /* MobileStudio Pro */
++		     hdev->product == 0x357 || hdev->product == 0x358)) { /* Intuos Pro 2 */
++			value = (field->logical_maximum - value);
++
++			if (hdev->product == 0x357 || hdev->product == 0x358)
++				value = wacom_offset_rotation(input, usage, value, 3, 16);
++			else if (hdev->product == 0x34d || hdev->product == 0x34e)
++				value = wacom_offset_rotation(input, usage, value, 1, 2);
++		}
++		else {
++			value = wacom_offset_rotation(input, usage, value, 1, 4);
++		}
++		do_report = true;
++		break;
+ 	case WACOM_HID_WD_TOUCHRINGSTATUS:
+ 		if (!value)
+ 			input_event(input, usage->type, usage->code, 0);
+@@ -1945,10 +1996,14 @@ static void wacom_wac_pad_event(struct hid_device *hdev, struct hid_field *field
+ 					 value, i);
+ 		 /* fall through*/
+ 	default:
++		do_report = true;
++		break;
++	}
++
++	if (do_report) {
+ 		input_event(input, usage->type, usage->code, value);
+ 		if (value)
+ 			wacom_wac->hid_data.pad_input_event_flag = true;
+-		break;
+ 	}
+ }
+ 
+@@ -2086,22 +2141,34 @@ static void wacom_wac_pen_event(struct hid_device *hdev, struct hid_field *field
+ 		wacom_wac->hid_data.tipswitch |= value;
+ 		return;
+ 	case HID_DG_TOOLSERIALNUMBER:
+-		wacom_wac->serial[0] = (wacom_wac->serial[0] & ~0xFFFFFFFFULL);
+-		wacom_wac->serial[0] |= (__u32)value;
++		if (value) {
++			wacom_wac->serial[0] = (wacom_wac->serial[0] & ~0xFFFFFFFFULL);
++			wacom_wac->serial[0] |= (__u32)value;
++		}
+ 		return;
++	case HID_DG_TWIST:
++		/*
++		 * Userspace expects pen twist to have its zero point when
++		 * the buttons/finger is on the tablet's left. HID values
++		 * are zero when buttons are toward the top.
++		 */
++		value = wacom_offset_rotation(input, usage, value, 1, 4);
++		break;
+ 	case WACOM_HID_WD_SENSE:
+ 		wacom_wac->hid_data.sense_state = value;
+ 		return;
+ 	case WACOM_HID_WD_SERIALHI:
+-		wacom_wac->serial[0] = (wacom_wac->serial[0] & 0xFFFFFFFF);
+-		wacom_wac->serial[0] |= ((__u64)value) << 32;
+-		/*
+-		 * Non-USI EMR devices may contain additional tool type
+-		 * information here. See WACOM_HID_WD_TOOLTYPE case for
+-		 * more details.
+-		 */
+-		if (value >> 20 == 1) {
+-			wacom_wac->id[0] |= value & 0xFFFFF;
++		if (value) {
++			wacom_wac->serial[0] = (wacom_wac->serial[0] & 0xFFFFFFFF);
++			wacom_wac->serial[0] |= ((__u64)value) << 32;
++			/*
++			 * Non-USI EMR devices may contain additional tool type
++			 * information here. See WACOM_HID_WD_TOOLTYPE case for
++			 * more details.
++			 */
++			if (value >> 20 == 1) {
++				wacom_wac->id[0] |= value & 0xFFFFF;
++			}
+ 		}
+ 		return;
+ 	case WACOM_HID_WD_TOOLTYPE:
+@@ -2205,7 +2272,7 @@ static void wacom_wac_pen_report(struct hid_device *hdev,
+ 		input_report_key(input, wacom_wac->tool[0], prox);
+ 		if (wacom_wac->serial[0]) {
+ 			input_event(input, EV_MSC, MSC_SERIAL, wacom_wac->serial[0]);
+-			input_report_abs(input, ABS_MISC, id);
++			input_report_abs(input, ABS_MISC, prox ? id : 0);
+ 		}
+ 
+ 		wacom_wac->hid_data.tipswitch = false;
+@@ -2216,6 +2283,7 @@ static void wacom_wac_pen_report(struct hid_device *hdev,
+ 	if (!prox) {
+ 		wacom_wac->tool[0] = 0;
+ 		wacom_wac->id[0] = 0;
++		wacom_wac->serial[0] = 0;
+ 	}
+ }
+ 
+diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
+index 4bbb8dea4727..037361158074 100644
+--- a/drivers/hv/channel_mgmt.c
++++ b/drivers/hv/channel_mgmt.c
+@@ -922,14 +922,10 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
+ 
+ void vmbus_hvsock_device_unregister(struct vmbus_channel *channel)
+ {
+-	mutex_lock(&vmbus_connection.channel_mutex);
+-
+ 	BUG_ON(!is_hvsock_channel(channel));
+ 
+ 	channel->rescind = true;
+ 	vmbus_device_unregister(channel->device_obj);
+-
+-	mutex_unlock(&vmbus_connection.channel_mutex);
+ }
+ EXPORT_SYMBOL_GPL(vmbus_hvsock_device_unregister);
+ 
+diff --git a/drivers/hv/hv_fcopy.c b/drivers/hv/hv_fcopy.c
+index daa75bd41f86..2364281d8593 100644
+--- a/drivers/hv/hv_fcopy.c
++++ b/drivers/hv/hv_fcopy.c
+@@ -170,6 +170,10 @@ static void fcopy_send_data(struct work_struct *dummy)
+ 		out_src = smsg_out;
+ 		break;
+ 
++	case WRITE_TO_FILE:
++		out_src = fcopy_transaction.fcopy_msg;
++		out_len = sizeof(struct hv_do_fcopy);
++		break;
+ 	default:
+ 		out_src = fcopy_transaction.fcopy_msg;
+ 		out_len = fcopy_transaction.recv_len;
+diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c
+index da40df2ff27d..ed6262be3643 100644
+--- a/drivers/hwtracing/intel_th/pci.c
++++ b/drivers/hwtracing/intel_th/pci.c
+@@ -90,6 +90,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
+ 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x19e1),
+ 		.driver_data = (kernel_ulong_t)0,
+ 	},
++	{
++		/* Lewisburg PCH */
++		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa1a6),
++		.driver_data = (kernel_ulong_t)0,
++	},
+ 	{
+ 		/* Gemini Lake */
+ 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x318e),
+diff --git a/drivers/hwtracing/stm/core.c b/drivers/hwtracing/stm/core.c
+index 0e731143f6a4..08b8305fee44 100644
+--- a/drivers/hwtracing/stm/core.c
++++ b/drivers/hwtracing/stm/core.c
+@@ -1119,7 +1119,7 @@ void stm_source_unregister_device(struct stm_source_data *data)
+ 
+ 	stm_source_link_drop(src);
+ 
+-	device_destroy(&stm_source_class, src->dev.devt);
++	device_unregister(&src->dev);
+ }
+ EXPORT_SYMBOL_GPL(stm_source_unregister_device);
+ 
+diff --git a/drivers/iio/adc/ad7793.c b/drivers/iio/adc/ad7793.c
+index e6706a09e100..47c3d7f32900 100644
+--- a/drivers/iio/adc/ad7793.c
++++ b/drivers/iio/adc/ad7793.c
+@@ -257,7 +257,7 @@ static int ad7793_setup(struct iio_dev *indio_dev,
+ 	unsigned int vref_mv)
+ {
+ 	struct ad7793_state *st = iio_priv(indio_dev);
+-	int i, ret = -1;
++	int i, ret;
+ 	unsigned long long scale_uv;
+ 	u32 id;
+ 
+@@ -266,7 +266,7 @@ static int ad7793_setup(struct iio_dev *indio_dev,
+ 		return ret;
+ 
+ 	/* reset the serial interface */
+-	ret = spi_write(st->sd.spi, (u8 *)&ret, sizeof(ret));
++	ret = ad_sd_reset(&st->sd, 32);
+ 	if (ret < 0)
+ 		goto out;
+ 	usleep_range(500, 2000); /* Wait for at least 500us */
+diff --git a/drivers/iio/adc/ad_sigma_delta.c b/drivers/iio/adc/ad_sigma_delta.c
+index d10bd0c97233..22c4c17cd996 100644
+--- a/drivers/iio/adc/ad_sigma_delta.c
++++ b/drivers/iio/adc/ad_sigma_delta.c
+@@ -177,6 +177,34 @@ int ad_sd_read_reg(struct ad_sigma_delta *sigma_delta,
+ }
+ EXPORT_SYMBOL_GPL(ad_sd_read_reg);
+ 
++/**
++ * ad_sd_reset() - Reset the serial interface
++ *
++ * @sigma_delta: The sigma delta device
++ * @reset_length: Number of SCLKs with DIN = 1
++ *
++ * Returns 0 on success, an error code otherwise.
++ **/
++int ad_sd_reset(struct ad_sigma_delta *sigma_delta,
++	unsigned int reset_length)
++{
++	uint8_t *buf;
++	unsigned int size;
++	int ret;
++
++	size = DIV_ROUND_UP(reset_length, 8);
++	buf = kcalloc(size, sizeof(*buf), GFP_KERNEL);
++	if (!buf)
++		return -ENOMEM;
++
++	memset(buf, 0xff, size);
++	ret = spi_write(sigma_delta->spi, buf, size);
++	kfree(buf);
++
++	return ret;
++}
++EXPORT_SYMBOL_GPL(ad_sd_reset);
++
+ static int ad_sd_calibrate(struct ad_sigma_delta *sigma_delta,
+ 	unsigned int mode, unsigned int channel)
+ {
+diff --git a/drivers/iio/adc/mcp320x.c b/drivers/iio/adc/mcp320x.c
+index 634717ae12f3..071dd23a33d9 100644
+--- a/drivers/iio/adc/mcp320x.c
++++ b/drivers/iio/adc/mcp320x.c
+@@ -17,6 +17,8 @@
+  * MCP3204
+  * MCP3208
+  * ------------
++ * 13 bit converter
++ * MCP3301
+  *
+  * Datasheet can be found here:
+  * http://ww1.microchip.com/downloads/en/DeviceDoc/21293C.pdf  mcp3001
+@@ -96,7 +98,7 @@ static int mcp320x_channel_to_tx_data(int device_index,
+ }
+ 
+ static int mcp320x_adc_conversion(struct mcp320x *adc, u8 channel,
+-				  bool differential, int device_index)
++				  bool differential, int device_index, int *val)
+ {
+ 	int ret;
+ 
+@@ -117,19 +119,25 @@ static int mcp320x_adc_conversion(struct mcp320x *adc, u8 channel,
+ 
+ 	switch (device_index) {
+ 	case mcp3001:
+-		return (adc->rx_buf[0] << 5 | adc->rx_buf[1] >> 3);
++		*val = (adc->rx_buf[0] << 5 | adc->rx_buf[1] >> 3);
++		return 0;
+ 	case mcp3002:
+ 	case mcp3004:
+ 	case mcp3008:
+-		return (adc->rx_buf[0] << 2 | adc->rx_buf[1] >> 6);
++		*val = (adc->rx_buf[0] << 2 | adc->rx_buf[1] >> 6);
++		return 0;
+ 	case mcp3201:
+-		return (adc->rx_buf[0] << 7 | adc->rx_buf[1] >> 1);
++		*val = (adc->rx_buf[0] << 7 | adc->rx_buf[1] >> 1);
++		return 0;
+ 	case mcp3202:
+ 	case mcp3204:
+ 	case mcp3208:
+-		return (adc->rx_buf[0] << 4 | adc->rx_buf[1] >> 4);
++		*val = (adc->rx_buf[0] << 4 | adc->rx_buf[1] >> 4);
++		return 0;
+ 	case mcp3301:
+-		return sign_extend32((adc->rx_buf[0] & 0x1f) << 8 | adc->rx_buf[1], 12);
++		*val = sign_extend32((adc->rx_buf[0] & 0x1f) << 8
++				    | adc->rx_buf[1], 12);
++		return 0;
+ 	default:
+ 		return -EINVAL;
+ 	}
+@@ -150,12 +158,10 @@ static int mcp320x_read_raw(struct iio_dev *indio_dev,
+ 	switch (mask) {
+ 	case IIO_CHAN_INFO_RAW:
+ 		ret = mcp320x_adc_conversion(adc, channel->address,
+-			channel->differential, device_index);
+-
++			channel->differential, device_index, val);
+ 		if (ret < 0)
+ 			goto out;
+ 
+-		*val = ret;
+ 		ret = IIO_VAL_INT;
+ 		break;
+ 
+@@ -312,6 +318,7 @@ static int mcp320x_probe(struct spi_device *spi)
+ 	indio_dev->name = spi_get_device_id(spi)->name;
+ 	indio_dev->modes = INDIO_DIRECT_MODE;
+ 	indio_dev->info = &mcp320x_info;
++	spi_set_drvdata(spi, indio_dev);
+ 
+ 	chip_info = &mcp320x_chip_infos[spi_get_device_id(spi)->driver_data];
+ 	indio_dev->channels = chip_info->channels;
+diff --git a/drivers/iio/adc/stm32-adc.c b/drivers/iio/adc/stm32-adc.c
+index 5bfcc1f13105..10e1d8328461 100644
+--- a/drivers/iio/adc/stm32-adc.c
++++ b/drivers/iio/adc/stm32-adc.c
+@@ -1543,7 +1543,7 @@ static int stm32_adc_chan_of_init(struct iio_dev *indio_dev)
+ 
+ 	num_channels = of_property_count_u32_elems(node, "st,adc-channels");
+ 	if (num_channels < 0 ||
+-	    num_channels >= adc_info->max_channels) {
++	    num_channels > adc_info->max_channels) {
+ 		dev_err(&indio_dev->dev, "Bad st,adc-channels?\n");
+ 		return num_channels < 0 ? num_channels : -EINVAL;
+ 	}
+diff --git a/drivers/iio/adc/twl4030-madc.c b/drivers/iio/adc/twl4030-madc.c
+index bd3d37fc2144..0c86fbb3033e 100644
+--- a/drivers/iio/adc/twl4030-madc.c
++++ b/drivers/iio/adc/twl4030-madc.c
+@@ -887,8 +887,10 @@ static int twl4030_madc_probe(struct platform_device *pdev)
+ 
+ 	/* Enable 3v1 bias regulator for MADC[3:6] */
+ 	madc->usb3v1 = devm_regulator_get(madc->dev, "vusb3v1");
+-	if (IS_ERR(madc->usb3v1))
+-		return -ENODEV;
++	if (IS_ERR(madc->usb3v1)) {
++		ret = -ENODEV;
++		goto err_i2c;
++	}
+ 
+ 	ret = regulator_enable(madc->usb3v1);
+ 	if (ret)
+@@ -897,11 +899,13 @@ static int twl4030_madc_probe(struct platform_device *pdev)
+ 	ret = iio_device_register(iio_dev);
+ 	if (ret) {
+ 		dev_err(&pdev->dev, "could not register iio device\n");
+-		goto err_i2c;
++		goto err_usb3v1;
+ 	}
+ 
+ 	return 0;
+ 
++err_usb3v1:
++	regulator_disable(madc->usb3v1);
+ err_i2c:
+ 	twl4030_madc_set_current_generator(madc, 0, 0);
+ err_current_generator:
+diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
+index 17ec4cee51dc..a47428b4d31b 100644
+--- a/drivers/iio/industrialio-core.c
++++ b/drivers/iio/industrialio-core.c
+@@ -310,8 +310,10 @@ static ssize_t iio_debugfs_read_reg(struct file *file, char __user *userbuf,
+ 	ret = indio_dev->info->debugfs_reg_access(indio_dev,
+ 						  indio_dev->cached_reg_addr,
+ 						  0, &val);
+-	if (ret)
++	if (ret) {
+ 		dev_err(indio_dev->dev.parent, "%s: read failed\n", __func__);
++		return ret;
++	}
+ 
+ 	len = snprintf(buf, sizeof(buf), "0x%X\n", val);
+ 
+diff --git a/drivers/iio/pressure/bmp280-core.c b/drivers/iio/pressure/bmp280-core.c
+index 0d2ea3ee371b..8f26428804a2 100644
+--- a/drivers/iio/pressure/bmp280-core.c
++++ b/drivers/iio/pressure/bmp280-core.c
+@@ -573,7 +573,7 @@ static int bmp280_chip_config(struct bmp280_data *data)
+ 	u8 osrs = BMP280_OSRS_TEMP_X(data->oversampling_temp + 1) |
+ 		  BMP280_OSRS_PRESS_X(data->oversampling_press + 1);
+ 
+-	ret = regmap_update_bits(data->regmap, BMP280_REG_CTRL_MEAS,
++	ret = regmap_write_bits(data->regmap, BMP280_REG_CTRL_MEAS,
+ 				 BMP280_OSRS_TEMP_MASK |
+ 				 BMP280_OSRS_PRESS_MASK |
+ 				 BMP280_MODE_MASK,
+diff --git a/drivers/iio/trigger/stm32-timer-trigger.c b/drivers/iio/trigger/stm32-timer-trigger.c
+index 25ad6abfee22..ea128bd82a28 100644
+--- a/drivers/iio/trigger/stm32-timer-trigger.c
++++ b/drivers/iio/trigger/stm32-timer-trigger.c
+@@ -138,6 +138,7 @@ static void stm32_timer_stop(struct stm32_timer_trigger *priv)
+ 		clk_disable(priv->clk);
+ 
+ 	/* Stop timer */
++	regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_ARPE, 0);
+ 	regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_CEN, 0);
+ 	regmap_write(priv->regmap, TIM_PSC, 0);
+ 	regmap_write(priv->regmap, TIM_ARR, 0);
+@@ -679,8 +680,9 @@ static ssize_t stm32_count_set_preset(struct iio_dev *indio_dev,
+ 	if (ret)
+ 		return ret;
+ 
++	/* TIMx_ARR register shouldn't be buffered (ARPE=0) */
++	regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_ARPE, 0);
+ 	regmap_write(priv->regmap, TIM_ARR, preset);
+-	regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_ARPE, TIM_CR1_ARPE);
+ 
+ 	return len;
+ }
+diff --git a/drivers/isdn/i4l/isdn_ppp.c b/drivers/isdn/i4l/isdn_ppp.c
+index 6c44609fd83a..cd2b3c69771a 100644
+--- a/drivers/isdn/i4l/isdn_ppp.c
++++ b/drivers/isdn/i4l/isdn_ppp.c
+@@ -825,7 +825,6 @@ isdn_ppp_write(int min, struct file *file, const char __user *buf, int count)
+ 	isdn_net_local *lp;
+ 	struct ippp_struct *is;
+ 	int proto;
+-	unsigned char protobuf[4];
+ 
+ 	is = file->private_data;
+ 
+@@ -839,24 +838,28 @@ isdn_ppp_write(int min, struct file *file, const char __user *buf, int count)
+ 	if (!lp)
+ 		printk(KERN_DEBUG "isdn_ppp_write: lp == NULL\n");
+ 	else {
+-		/*
+-		 * Don't reset huptimer for
+-		 * LCP packets. (Echo requests).
+-		 */
+-		if (copy_from_user(protobuf, buf, 4))
+-			return -EFAULT;
+-		proto = PPP_PROTOCOL(protobuf);
+-		if (proto != PPP_LCP)
+-			lp->huptimer = 0;
++		if (lp->isdn_device < 0 || lp->isdn_channel < 0) {
++			unsigned char protobuf[4];
++			/*
++			 * Don't reset huptimer for
++			 * LCP packets. (Echo requests).
++			 */
++			if (copy_from_user(protobuf, buf, 4))
++				return -EFAULT;
++
++			proto = PPP_PROTOCOL(protobuf);
++			if (proto != PPP_LCP)
++				lp->huptimer = 0;
+ 
+-		if (lp->isdn_device < 0 || lp->isdn_channel < 0)
+ 			return 0;
++		}
+ 
+ 		if ((dev->drv[lp->isdn_device]->flags & DRV_FLAG_RUNNING) &&
+ 		    lp->dialstate == 0 &&
+ 		    (lp->flags & ISDN_NET_CONNECTED)) {
+ 			unsigned short hl;
+ 			struct sk_buff *skb;
++			unsigned char *cpy_buf;
+ 			/*
+ 			 * we need to reserve enough space in front of
+ 			 * sk_buff. old call to dev_alloc_skb only reserved
+@@ -869,11 +872,21 @@ isdn_ppp_write(int min, struct file *file, const char __user *buf, int count)
+ 				return count;
+ 			}
+ 			skb_reserve(skb, hl);
+-			if (copy_from_user(skb_put(skb, count), buf, count))
++			cpy_buf = skb_put(skb, count);
++			if (copy_from_user(cpy_buf, buf, count))
+ 			{
+ 				kfree_skb(skb);
+ 				return -EFAULT;
+ 			}
++
++			/*
++			 * Don't reset huptimer for
++			 * LCP packets. (Echo requests).
++			 */
++			proto = PPP_PROTOCOL(cpy_buf);
++			if (proto != PPP_LCP)
++				lp->huptimer = 0;
++
+ 			if (is->debug & 0x40) {
+ 				printk(KERN_DEBUG "ppp xmit: len %d\n", (int) skb->len);
+ 				isdn_ppp_frame_log("xmit", skb->data, skb->len, 32, is->unit, lp->ppp_slot);
+diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h
+index 24eddbdf2ab4..203144762f36 100644
+--- a/drivers/md/dm-core.h
++++ b/drivers/md/dm-core.h
+@@ -149,5 +149,6 @@ static inline bool dm_message_test_buffer_overflow(char *result, unsigned maxlen
+ 
+ extern atomic_t dm_global_event_nr;
+ extern wait_queue_head_t dm_global_eventq;
++void dm_issue_global_event(void);
+ 
+ #endif
+diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
+index cdf6b1e12460..e3dd64a12f55 100644
+--- a/drivers/md/dm-crypt.c
++++ b/drivers/md/dm-crypt.c
+@@ -2470,6 +2470,7 @@ static int crypt_ctr_cipher_old(struct dm_target *ti, char *cipher_in, char *key
+ 		kfree(cipher_api);
+ 		return ret;
+ 	}
++	kfree(cipher_api);
+ 
+ 	return 0;
+ bad_mem:
+@@ -2588,6 +2589,10 @@ static int crypt_ctr_optional(struct dm_target *ti, unsigned int argc, char **ar
+ 				ti->error = "Invalid feature value for sector_size";
+ 				return -EINVAL;
+ 			}
++			if (ti->len & ((cc->sector_size >> SECTOR_SHIFT) - 1)) {
++				ti->error = "Device size is not multiple of sector_size feature";
++				return -EINVAL;
++			}
+ 			cc->sector_shift = __ffs(cc->sector_size) - SECTOR_SHIFT;
+ 		} else if (!strcasecmp(opt_string, "iv_large_sectors"))
+ 			set_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags);
+diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
+index e06f0ef7d2ec..e9f9884b66a8 100644
+--- a/drivers/md/dm-ioctl.c
++++ b/drivers/md/dm-ioctl.c
+@@ -477,9 +477,13 @@ static int remove_all(struct file *filp, struct dm_ioctl *param, size_t param_si
+  * Round up the ptr to an 8-byte boundary.
+  */
+ #define ALIGN_MASK 7
++static inline size_t align_val(size_t val)
++{
++	return (val + ALIGN_MASK) & ~ALIGN_MASK;
++}
+ static inline void *align_ptr(void *ptr)
+ {
+-	return (void *) (((size_t) (ptr + ALIGN_MASK)) & ~ALIGN_MASK);
++	return (void *)align_val((size_t)ptr);
+ }
+ 
+ /*
+@@ -505,7 +509,7 @@ static int list_devices(struct file *filp, struct dm_ioctl *param, size_t param_
+ 	struct hash_cell *hc;
+ 	size_t len, needed = 0;
+ 	struct gendisk *disk;
+-	struct dm_name_list *nl, *old_nl = NULL;
++	struct dm_name_list *orig_nl, *nl, *old_nl = NULL;
+ 	uint32_t *event_nr;
+ 
+ 	down_write(&_hash_lock);
+@@ -516,17 +520,15 @@ static int list_devices(struct file *filp, struct dm_ioctl *param, size_t param_
+ 	 */
+ 	for (i = 0; i < NUM_BUCKETS; i++) {
+ 		list_for_each_entry (hc, _name_buckets + i, name_list) {
+-			needed += sizeof(struct dm_name_list);
+-			needed += strlen(hc->name) + 1;
+-			needed += ALIGN_MASK;
+-			needed += (sizeof(uint32_t) + ALIGN_MASK) & ~ALIGN_MASK;
++			needed += align_val(offsetof(struct dm_name_list, name) + strlen(hc->name) + 1);
++			needed += align_val(sizeof(uint32_t));
+ 		}
+ 	}
+ 
+ 	/*
+ 	 * Grab our output buffer.
+ 	 */
+-	nl = get_result_buffer(param, param_size, &len);
++	nl = orig_nl = get_result_buffer(param, param_size, &len);
+ 	if (len < needed) {
+ 		param->flags |= DM_BUFFER_FULL_FLAG;
+ 		goto out;
+@@ -549,11 +551,16 @@ static int list_devices(struct file *filp, struct dm_ioctl *param, size_t param_
+ 			strcpy(nl->name, hc->name);
+ 
+ 			old_nl = nl;
+-			event_nr = align_ptr(((void *) (nl + 1)) + strlen(hc->name) + 1);
++			event_nr = align_ptr(nl->name + strlen(hc->name) + 1);
+ 			*event_nr = dm_get_event_nr(hc->md);
+ 			nl = align_ptr(event_nr + 1);
+ 		}
+ 	}
++	/*
++	 * If mismatch happens, security may be compromised due to buffer
++	 * overflow, so it's better to crash.
++	 */
++	BUG_ON((char *)nl - (char *)orig_nl != needed);
+ 
+  out:
+ 	up_write(&_hash_lock);
+@@ -1621,7 +1628,8 @@ static int target_message(struct file *filp, struct dm_ioctl *param, size_t para
+  * which has a variable size, is not used by the function processing
+  * the ioctl.
+  */
+-#define IOCTL_FLAGS_NO_PARAMS	1
++#define IOCTL_FLAGS_NO_PARAMS		1
++#define IOCTL_FLAGS_ISSUE_GLOBAL_EVENT	2
+ 
+ /*-----------------------------------------------------------------
+  * Implementation of open/close/ioctl on the special char
+@@ -1635,12 +1643,12 @@ static ioctl_fn lookup_ioctl(unsigned int cmd, int *ioctl_flags)
+ 		ioctl_fn fn;
+ 	} _ioctls[] = {
+ 		{DM_VERSION_CMD, 0, NULL}, /* version is dealt with elsewhere */
+-		{DM_REMOVE_ALL_CMD, IOCTL_FLAGS_NO_PARAMS, remove_all},
++		{DM_REMOVE_ALL_CMD, IOCTL_FLAGS_NO_PARAMS | IOCTL_FLAGS_ISSUE_GLOBAL_EVENT, remove_all},
+ 		{DM_LIST_DEVICES_CMD, 0, list_devices},
+ 
+-		{DM_DEV_CREATE_CMD, IOCTL_FLAGS_NO_PARAMS, dev_create},
+-		{DM_DEV_REMOVE_CMD, IOCTL_FLAGS_NO_PARAMS, dev_remove},
+-		{DM_DEV_RENAME_CMD, 0, dev_rename},
++		{DM_DEV_CREATE_CMD, IOCTL_FLAGS_NO_PARAMS | IOCTL_FLAGS_ISSUE_GLOBAL_EVENT, dev_create},
++		{DM_DEV_REMOVE_CMD, IOCTL_FLAGS_NO_PARAMS | IOCTL_FLAGS_ISSUE_GLOBAL_EVENT, dev_remove},
++		{DM_DEV_RENAME_CMD, IOCTL_FLAGS_ISSUE_GLOBAL_EVENT, dev_rename},
+ 		{DM_DEV_SUSPEND_CMD, IOCTL_FLAGS_NO_PARAMS, dev_suspend},
+ 		{DM_DEV_STATUS_CMD, IOCTL_FLAGS_NO_PARAMS, dev_status},
+ 		{DM_DEV_WAIT_CMD, 0, dev_wait},
+@@ -1869,6 +1877,9 @@ static int ctl_ioctl(struct file *file, uint command, struct dm_ioctl __user *us
+ 	    unlikely(ioctl_flags & IOCTL_FLAGS_NO_PARAMS))
+ 		DMERR("ioctl %d tried to output some data but has IOCTL_FLAGS_NO_PARAMS set", cmd);
+ 
++	if (!r && ioctl_flags & IOCTL_FLAGS_ISSUE_GLOBAL_EVENT)
++		dm_issue_global_event();
++
+ 	/*
+ 	 * Copy the results back to userland.
+ 	 */
+diff --git a/drivers/md/dm.c b/drivers/md/dm.c
+index 825eaffc24da..eed539a4eec2 100644
+--- a/drivers/md/dm.c
++++ b/drivers/md/dm.c
+@@ -52,6 +52,12 @@ static struct workqueue_struct *deferred_remove_workqueue;
+ atomic_t dm_global_event_nr = ATOMIC_INIT(0);
+ DECLARE_WAIT_QUEUE_HEAD(dm_global_eventq);
+ 
++void dm_issue_global_event(void)
++{
++	atomic_inc(&dm_global_event_nr);
++	wake_up(&dm_global_eventq);
++}
++
+ /*
+  * One of these is allocated per bio.
+  */
+@@ -1865,9 +1871,8 @@ static void event_callback(void *context)
+ 	dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
+ 
+ 	atomic_inc(&md->event_nr);
+-	atomic_inc(&dm_global_event_nr);
+ 	wake_up(&md->eventq);
+-	wake_up(&dm_global_eventq);
++	dm_issue_global_event();
+ }
+ 
+ /*
+@@ -2283,6 +2288,7 @@ struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table)
+ 	}
+ 
+ 	map = __bind(md, table, &limits);
++	dm_issue_global_event();
+ 
+ out:
+ 	mutex_unlock(&md->suspend_lock);
+diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
+index 2bae69e39544..b64be0ba1222 100644
+--- a/drivers/mmc/core/mmc.c
++++ b/drivers/mmc/core/mmc.c
+@@ -1286,6 +1286,23 @@ int mmc_hs400_to_hs200(struct mmc_card *card)
+ 	return err;
+ }
+ 
++static void mmc_select_driver_type(struct mmc_card *card)
++{
++	int card_drv_type, drive_strength, drv_type;
++
++	card_drv_type = card->ext_csd.raw_driver_strength |
++			mmc_driver_type_mask(0);
++
++	drive_strength = mmc_select_drive_strength(card,
++						   card->ext_csd.hs200_max_dtr,
++						   card_drv_type, &drv_type);
++
++	card->drive_strength = drive_strength;
++
++	if (drv_type)
++		mmc_set_driver_type(card->host, drv_type);
++}
++
+ static int mmc_select_hs400es(struct mmc_card *card)
+ {
+ 	struct mmc_host *host = card->host;
+@@ -1341,6 +1358,8 @@ static int mmc_select_hs400es(struct mmc_card *card)
+ 		goto out_err;
+ 	}
+ 
++	mmc_select_driver_type(card);
++
+ 	/* Switch card to HS400 */
+ 	val = EXT_CSD_TIMING_HS400 |
+ 	      card->drive_strength << EXT_CSD_DRV_STR_SHIFT;
+@@ -1374,23 +1393,6 @@ static int mmc_select_hs400es(struct mmc_card *card)
+ 	return err;
+ }
+ 
+-static void mmc_select_driver_type(struct mmc_card *card)
+-{
+-	int card_drv_type, drive_strength, drv_type;
+-
+-	card_drv_type = card->ext_csd.raw_driver_strength |
+-			mmc_driver_type_mask(0);
+-
+-	drive_strength = mmc_select_drive_strength(card,
+-						   card->ext_csd.hs200_max_dtr,
+-						   card_drv_type, &drv_type);
+-
+-	card->drive_strength = drive_strength;
+-
+-	if (drv_type)
+-		mmc_set_driver_type(card->host, drv_type);
+-}
+-
+ /*
+  * For device supporting HS200 mode, the following sequence
+  * should be done before executing the tuning process.
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index fc63992ab0e0..c99dc59d729b 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -4289,7 +4289,7 @@ static int bond_check_params(struct bond_params *params)
+ 	int bond_mode	= BOND_MODE_ROUNDROBIN;
+ 	int xmit_hashtype = BOND_XMIT_POLICY_LAYER2;
+ 	int lacp_fast = 0;
+-	int tlb_dynamic_lb = 0;
++	int tlb_dynamic_lb;
+ 
+ 	/* Convert string parameters. */
+ 	if (mode) {
+@@ -4601,16 +4601,13 @@ static int bond_check_params(struct bond_params *params)
+ 	}
+ 	ad_user_port_key = valptr->value;
+ 
+-	if ((bond_mode == BOND_MODE_TLB) || (bond_mode == BOND_MODE_ALB)) {
+-		bond_opt_initstr(&newval, "default");
+-		valptr = bond_opt_parse(bond_opt_get(BOND_OPT_TLB_DYNAMIC_LB),
+-					&newval);
+-		if (!valptr) {
+-			pr_err("Error: No tlb_dynamic_lb default value");
+-			return -EINVAL;
+-		}
+-		tlb_dynamic_lb = valptr->value;
++	bond_opt_initstr(&newval, "default");
++	valptr = bond_opt_parse(bond_opt_get(BOND_OPT_TLB_DYNAMIC_LB), &newval);
++	if (!valptr) {
++		pr_err("Error: No tlb_dynamic_lb default value");
++		return -EINVAL;
+ 	}
++	tlb_dynamic_lb = valptr->value;
+ 
+ 	if (lp_interval == 0) {
+ 		pr_warn("Warning: ip_interval must be between 1 and %d, so it was reset to %d\n",
+diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
+index a12d603d41c6..5931aa2fe997 100644
+--- a/drivers/net/bonding/bond_options.c
++++ b/drivers/net/bonding/bond_options.c
+@@ -754,6 +754,9 @@ static int bond_option_mode_set(struct bonding *bond,
+ 			   bond->params.miimon);
+ 	}
+ 
++	if (newval->value == BOND_MODE_ALB)
++		bond->params.tlb_dynamic_lb = 1;
++
+ 	/* don't cache arp_validate between modes */
+ 	bond->params.arp_validate = BOND_ARP_VALIDATE_NONE;
+ 	bond->params.mode = newval->value;
+diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
+index 5bcdd33101b0..c75271c722a7 100644
+--- a/drivers/net/dsa/mv88e6xxx/chip.c
++++ b/drivers/net/dsa/mv88e6xxx/chip.c
+@@ -1184,6 +1184,10 @@ static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port,
+ 	};
+ 	int i, err;
+ 
++	/* DSA and CPU ports have to be members of multiple vlans */
++	if (dsa_is_dsa_port(ds, port) || dsa_is_cpu_port(ds, port))
++		return 0;
++
+ 	if (!vid_begin)
+ 		return -EOPNOTSUPP;
+ 
+@@ -4015,7 +4019,9 @@ static void mv88e6xxx_remove(struct mdio_device *mdiodev)
+ 	if (chip->irq > 0) {
+ 		if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_G2_INT))
+ 			mv88e6xxx_g2_irq_free(chip);
++		mutex_lock(&chip->reg_lock);
+ 		mv88e6xxx_g1_irq_free(chip);
++		mutex_unlock(&chip->reg_lock);
+ 	}
+ }
+ 
+diff --git a/drivers/net/ethernet/ibm/emac/mal.c b/drivers/net/ethernet/ibm/emac/mal.c
+index 91b1a558f37d..248888328232 100644
+--- a/drivers/net/ethernet/ibm/emac/mal.c
++++ b/drivers/net/ethernet/ibm/emac/mal.c
+@@ -402,7 +402,7 @@ static int mal_poll(struct napi_struct *napi, int budget)
+ 	unsigned long flags;
+ 
+ 	MAL_DBG2(mal, "poll(%d)" NL, budget);
+- again:
++
+ 	/* Process TX skbs */
+ 	list_for_each(l, &mal->poll_list) {
+ 		struct mal_commac *mc =
+@@ -451,7 +451,6 @@ static int mal_poll(struct napi_struct *napi, int budget)
+ 			spin_lock_irqsave(&mal->lock, flags);
+ 			mal_disable_eob_irq(mal);
+ 			spin_unlock_irqrestore(&mal->lock, flags);
+-			goto again;
+ 		}
+ 		mc->ops->poll_tx(mc->dev);
+ 	}
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+index 85298051a3e4..145e392ab849 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+@@ -572,12 +572,13 @@ void mlx5_rdma_netdev_free(struct net_device *netdev)
+ {
+ 	struct mlx5e_priv          *priv    = mlx5i_epriv(netdev);
+ 	const struct mlx5e_profile *profile = priv->profile;
++	struct mlx5_core_dev       *mdev    = priv->mdev;
+ 
+ 	mlx5e_detach_netdev(priv);
+ 	profile->cleanup(priv);
+ 	destroy_workqueue(priv->wq);
+ 	free_netdev(netdev);
+ 
+-	mlx5e_destroy_mdev_resources(priv->mdev);
++	mlx5e_destroy_mdev_resources(mdev);
+ }
+ EXPORT_SYMBOL(mlx5_rdma_netdev_free);
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+index c6a3e61b53bd..73390f90b581 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+@@ -572,15 +572,14 @@ static void mlxsw_sp_span_entry_destroy(struct mlxsw_sp *mlxsw_sp,
+ }
+ 
+ static struct mlxsw_sp_span_entry *
+-mlxsw_sp_span_entry_find(struct mlxsw_sp_port *port)
++mlxsw_sp_span_entry_find(struct mlxsw_sp *mlxsw_sp, u8 local_port)
+ {
+-	struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
+ 	int i;
+ 
+ 	for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
+ 		struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
+ 
+-		if (curr->used && curr->local_port == port->local_port)
++		if (curr->used && curr->local_port == local_port)
+ 			return curr;
+ 	}
+ 	return NULL;
+@@ -591,7 +590,8 @@ static struct mlxsw_sp_span_entry
+ {
+ 	struct mlxsw_sp_span_entry *span_entry;
+ 
+-	span_entry = mlxsw_sp_span_entry_find(port);
++	span_entry = mlxsw_sp_span_entry_find(port->mlxsw_sp,
++					      port->local_port);
+ 	if (span_entry) {
+ 		/* Already exists, just take a reference */
+ 		span_entry->ref_count++;
+@@ -780,12 +780,13 @@ static int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port *from,
+ }
+ 
+ static void mlxsw_sp_span_mirror_remove(struct mlxsw_sp_port *from,
+-					struct mlxsw_sp_port *to,
++					u8 destination_port,
+ 					enum mlxsw_sp_span_type type)
+ {
+ 	struct mlxsw_sp_span_entry *span_entry;
+ 
+-	span_entry = mlxsw_sp_span_entry_find(to);
++	span_entry = mlxsw_sp_span_entry_find(from->mlxsw_sp,
++					      destination_port);
+ 	if (!span_entry) {
+ 		netdev_err(from->dev, "no span entry found\n");
+ 		return;
+@@ -1560,14 +1561,12 @@ static void
+ mlxsw_sp_port_del_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port,
+ 				      struct mlxsw_sp_port_mall_mirror_tc_entry *mirror)
+ {
+-	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ 	enum mlxsw_sp_span_type span_type;
+-	struct mlxsw_sp_port *to_port;
+ 
+-	to_port = mlxsw_sp->ports[mirror->to_local_port];
+ 	span_type = mirror->ingress ?
+ 			MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
+-	mlxsw_sp_span_mirror_remove(mlxsw_sp_port, to_port, span_type);
++	mlxsw_sp_span_mirror_remove(mlxsw_sp_port, mirror->to_local_port,
++				    span_type);
+ }
+ 
+ static int
+@@ -2519,7 +2518,9 @@ static int mlxsw_sp_flash_device(struct net_device *dev,
+ 	return err;
+ }
+ 
+-#define MLXSW_SP_QSFP_I2C_ADDR 0x50
++#define MLXSW_SP_I2C_ADDR_LOW 0x50
++#define MLXSW_SP_I2C_ADDR_HIGH 0x51
++#define MLXSW_SP_EEPROM_PAGE_LENGTH 256
+ 
+ static int mlxsw_sp_query_module_eeprom(struct mlxsw_sp_port *mlxsw_sp_port,
+ 					u16 offset, u16 size, void *data,
+@@ -2528,12 +2529,25 @@ static int mlxsw_sp_query_module_eeprom(struct mlxsw_sp_port *mlxsw_sp_port,
+ 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ 	char eeprom_tmp[MLXSW_SP_REG_MCIA_EEPROM_SIZE];
+ 	char mcia_pl[MLXSW_REG_MCIA_LEN];
++	u16 i2c_addr;
+ 	int status;
+ 	int err;
+ 
+ 	size = min_t(u16, size, MLXSW_SP_REG_MCIA_EEPROM_SIZE);
++
++	if (offset < MLXSW_SP_EEPROM_PAGE_LENGTH &&
++	    offset + size > MLXSW_SP_EEPROM_PAGE_LENGTH)
++		/* Cross pages read, read until offset 256 in low page */
++		size = MLXSW_SP_EEPROM_PAGE_LENGTH - offset;
++
++	i2c_addr = MLXSW_SP_I2C_ADDR_LOW;
++	if (offset >= MLXSW_SP_EEPROM_PAGE_LENGTH) {
++		i2c_addr = MLXSW_SP_I2C_ADDR_HIGH;
++		offset -= MLXSW_SP_EEPROM_PAGE_LENGTH;
++	}
++
+ 	mlxsw_reg_mcia_pack(mcia_pl, mlxsw_sp_port->mapping.module,
+-			    0, 0, offset, size, MLXSW_SP_QSFP_I2C_ADDR);
++			    0, 0, offset, size, i2c_addr);
+ 
+ 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcia), mcia_pl);
+ 	if (err)
+diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
+index bcd4708b3745..97f18cdc9516 100644
+--- a/drivers/net/ethernet/qualcomm/emac/emac-mac.c
++++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
+@@ -876,7 +876,8 @@ static void emac_mac_rx_descs_refill(struct emac_adapter *adpt,
+ 
+ 		curr_rxbuf->dma_addr =
+ 			dma_map_single(adpt->netdev->dev.parent, skb->data,
+-				       curr_rxbuf->length, DMA_FROM_DEVICE);
++				       adpt->rxbuf_size, DMA_FROM_DEVICE);
++
+ 		ret = dma_mapping_error(adpt->netdev->dev.parent,
+ 					curr_rxbuf->dma_addr);
+ 		if (ret) {
+diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
+index ca22f2898664..d24b47b8e0b2 100644
+--- a/drivers/net/ethernet/realtek/8139too.c
++++ b/drivers/net/ethernet/realtek/8139too.c
+@@ -2135,11 +2135,12 @@ static int rtl8139_poll(struct napi_struct *napi, int budget)
+ 	if (likely(RTL_R16(IntrStatus) & RxAckBits))
+ 		work_done += rtl8139_rx(dev, tp, budget);
+ 
+-	if (work_done < budget && napi_complete_done(napi, work_done)) {
++	if (work_done < budget) {
+ 		unsigned long flags;
+ 
+ 		spin_lock_irqsave(&tp->lock, flags);
+-		RTL_W16_F(IntrMask, rtl8139_intr_mask);
++		if (napi_complete_done(napi, work_done))
++			RTL_W16_F(IntrMask, rtl8139_intr_mask);
+ 		spin_unlock_irqrestore(&tp->lock, flags);
+ 	}
+ 	spin_unlock(&tp->rx_lock);
+diff --git a/drivers/net/ethernet/rocker/rocker_tlv.h b/drivers/net/ethernet/rocker/rocker_tlv.h
+index a63ef82e7c72..dfae3c9d57c6 100644
+--- a/drivers/net/ethernet/rocker/rocker_tlv.h
++++ b/drivers/net/ethernet/rocker/rocker_tlv.h
+@@ -139,40 +139,52 @@ rocker_tlv_start(struct rocker_desc_info *desc_info)
+ int rocker_tlv_put(struct rocker_desc_info *desc_info,
+ 		   int attrtype, int attrlen, const void *data);
+ 
+-static inline int rocker_tlv_put_u8(struct rocker_desc_info *desc_info,
+-				    int attrtype, u8 value)
++static inline int
++rocker_tlv_put_u8(struct rocker_desc_info *desc_info, int attrtype, u8 value)
+ {
+-	return rocker_tlv_put(desc_info, attrtype, sizeof(u8), &value);
++	u8 tmp = value; /* work around GCC PR81715 */
++
++	return rocker_tlv_put(desc_info, attrtype, sizeof(u8), &tmp);
+ }
+ 
+-static inline int rocker_tlv_put_u16(struct rocker_desc_info *desc_info,
+-				     int attrtype, u16 value)
++static inline int
++rocker_tlv_put_u16(struct rocker_desc_info *desc_info, int attrtype, u16 value)
+ {
+-	return rocker_tlv_put(desc_info, attrtype, sizeof(u16), &value);
++	u16 tmp = value;
++
++	return rocker_tlv_put(desc_info, attrtype, sizeof(u16), &tmp);
+ }
+ 
+-static inline int rocker_tlv_put_be16(struct rocker_desc_info *desc_info,
+-				      int attrtype, __be16 value)
++static inline int
++rocker_tlv_put_be16(struct rocker_desc_info *desc_info, int attrtype, __be16 value)
+ {
+-	return rocker_tlv_put(desc_info, attrtype, sizeof(__be16), &value);
++	__be16 tmp = value;
++
++	return rocker_tlv_put(desc_info, attrtype, sizeof(__be16), &tmp);
+ }
+ 
+-static inline int rocker_tlv_put_u32(struct rocker_desc_info *desc_info,
+-				     int attrtype, u32 value)
++static inline int
++rocker_tlv_put_u32(struct rocker_desc_info *desc_info, int attrtype, u32 value)
+ {
+-	return rocker_tlv_put(desc_info, attrtype, sizeof(u32), &value);
++	u32 tmp = value;
++
++	return rocker_tlv_put(desc_info, attrtype, sizeof(u32), &tmp);
+ }
+ 
+-static inline int rocker_tlv_put_be32(struct rocker_desc_info *desc_info,
+-				      int attrtype, __be32 value)
++static inline int
++rocker_tlv_put_be32(struct rocker_desc_info *desc_info, int attrtype, __be32 value)
+ {
+-	return rocker_tlv_put(desc_info, attrtype, sizeof(__be32), &value);
++	__be32 tmp = value;
++
++	return rocker_tlv_put(desc_info, attrtype, sizeof(__be32), &tmp);
+ }
+ 
+-static inline int rocker_tlv_put_u64(struct rocker_desc_info *desc_info,
+-				     int attrtype, u64 value)
++static inline int
++rocker_tlv_put_u64(struct rocker_desc_info *desc_info, int attrtype, u64 value)
+ {
+-	return rocker_tlv_put(desc_info, attrtype, sizeof(u64), &value);
++	u64 tmp = value;
++
++	return rocker_tlv_put(desc_info, attrtype, sizeof(u64), &tmp);
+ }
+ 
+ static inline struct rocker_tlv *
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+index a366b3747eeb..8a280b48e3a9 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+@@ -315,6 +315,7 @@ static int stmmac_dt_phy(struct plat_stmmacenet_data *plat,
+ 		{ .compatible = "allwinner,sun8i-h3-emac" },
+ 		{ .compatible = "allwinner,sun8i-v3s-emac" },
+ 		{ .compatible = "allwinner,sun50i-a64-emac" },
++		{},
+ 	};
+ 
+ 	/* If phy-handle property is passed from DT, use it as the PHY */
+diff --git a/drivers/net/phy/xilinx_gmii2rgmii.c b/drivers/net/phy/xilinx_gmii2rgmii.c
+index d15dd3938ba8..2e5150b0b8d5 100644
+--- a/drivers/net/phy/xilinx_gmii2rgmii.c
++++ b/drivers/net/phy/xilinx_gmii2rgmii.c
+@@ -44,7 +44,7 @@ static int xgmiitorgmii_read_status(struct phy_device *phydev)
+ 	priv->phy_drv->read_status(phydev);
+ 
+ 	val = mdiobus_read(phydev->mdio.bus, priv->addr, XILINX_GMII2RGMII_REG);
+-	val &= XILINX_GMII2RGMII_SPEED_MASK;
++	val &= ~XILINX_GMII2RGMII_SPEED_MASK;
+ 
+ 	if (phydev->speed == SPEED_1000)
+ 		val |= BMCR_SPEED1000;
+diff --git a/drivers/net/tun.c b/drivers/net/tun.c
+index 0a2c0a42283f..cb1f7747adad 100644
+--- a/drivers/net/tun.c
++++ b/drivers/net/tun.c
+@@ -1298,11 +1298,13 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
+ 	switch (tun->flags & TUN_TYPE_MASK) {
+ 	case IFF_TUN:
+ 		if (tun->flags & IFF_NO_PI) {
+-			switch (skb->data[0] & 0xf0) {
+-			case 0x40:
++			u8 ip_version = skb->len ? (skb->data[0] >> 4) : 0;
++
++			switch (ip_version) {
++			case 4:
+ 				pi.proto = htons(ETH_P_IP);
+ 				break;
+-			case 0x60:
++			case 6:
+ 				pi.proto = htons(ETH_P_IPV6);
+ 				break;
+ 			default:
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+index 7e689c86d565..f27d6fe4d5c0 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+@@ -980,7 +980,7 @@ static void brcmf_escan_prep(struct brcmf_cfg80211_info *cfg,
+ 
+ 	eth_broadcast_addr(params_le->bssid);
+ 	params_le->bss_type = DOT11_BSSTYPE_ANY;
+-	params_le->scan_type = 0;
++	params_le->scan_type = BRCMF_SCANTYPE_ACTIVE;
+ 	params_le->channel_num = 0;
+ 	params_le->nprobes = cpu_to_le32(-1);
+ 	params_le->active_time = cpu_to_le32(-1);
+@@ -988,12 +988,9 @@ static void brcmf_escan_prep(struct brcmf_cfg80211_info *cfg,
+ 	params_le->home_time = cpu_to_le32(-1);
+ 	memset(&params_le->ssid_le, 0, sizeof(params_le->ssid_le));
+ 
+-	/* if request is null exit so it will be all channel broadcast scan */
+-	if (!request)
+-		return;
+-
+ 	n_ssids = request->n_ssids;
+ 	n_channels = request->n_channels;
++
+ 	/* Copy channel array if applicable */
+ 	brcmf_dbg(SCAN, "### List of channelspecs to scan ### %d\n",
+ 		  n_channels);
+@@ -1030,16 +1027,8 @@ static void brcmf_escan_prep(struct brcmf_cfg80211_info *cfg,
+ 			ptr += sizeof(ssid_le);
+ 		}
+ 	} else {
+-		brcmf_dbg(SCAN, "Broadcast scan %p\n", request->ssids);
+-		if ((request->ssids) && request->ssids->ssid_len) {
+-			brcmf_dbg(SCAN, "SSID %s len=%d\n",
+-				  params_le->ssid_le.SSID,
+-				  request->ssids->ssid_len);
+-			params_le->ssid_le.SSID_len =
+-				cpu_to_le32(request->ssids->ssid_len);
+-			memcpy(&params_le->ssid_le.SSID, request->ssids->ssid,
+-				request->ssids->ssid_len);
+-		}
++		brcmf_dbg(SCAN, "Performing passive scan\n");
++		params_le->scan_type = BRCMF_SCANTYPE_PASSIVE;
+ 	}
+ 	/* Adding mask to channel numbers */
+ 	params_le->channel_num =
+@@ -3162,6 +3151,7 @@ brcmf_cfg80211_escan_handler(struct brcmf_if *ifp,
+ 	struct brcmf_cfg80211_info *cfg = ifp->drvr->config;
+ 	s32 status;
+ 	struct brcmf_escan_result_le *escan_result_le;
++	u32 escan_buflen;
+ 	struct brcmf_bss_info_le *bss_info_le;
+ 	struct brcmf_bss_info_le *bss = NULL;
+ 	u32 bi_length;
+@@ -3181,11 +3171,23 @@ brcmf_cfg80211_escan_handler(struct brcmf_if *ifp,
+ 
+ 	if (status == BRCMF_E_STATUS_PARTIAL) {
+ 		brcmf_dbg(SCAN, "ESCAN Partial result\n");
++		if (e->datalen < sizeof(*escan_result_le)) {
++			brcmf_err("invalid event data length\n");
++			goto exit;
++		}
+ 		escan_result_le = (struct brcmf_escan_result_le *) data;
+ 		if (!escan_result_le) {
+ 			brcmf_err("Invalid escan result (NULL pointer)\n");
+ 			goto exit;
+ 		}
++		escan_buflen = le32_to_cpu(escan_result_le->buflen);
++		if (escan_buflen > BRCMF_ESCAN_BUF_SIZE ||
++		    escan_buflen > e->datalen ||
++		    escan_buflen < sizeof(*escan_result_le)) {
++			brcmf_err("Invalid escan buffer length: %d\n",
++				  escan_buflen);
++			goto exit;
++		}
+ 		if (le16_to_cpu(escan_result_le->bss_count) != 1) {
+ 			brcmf_err("Invalid bss_count %d: ignoring\n",
+ 				  escan_result_le->bss_count);
+@@ -3202,9 +3204,8 @@ brcmf_cfg80211_escan_handler(struct brcmf_if *ifp,
+ 		}
+ 
+ 		bi_length = le32_to_cpu(bss_info_le->length);
+-		if (bi_length != (le32_to_cpu(escan_result_le->buflen) -
+-					WL_ESCAN_RESULTS_FIXED_SIZE)) {
+-			brcmf_err("Invalid bss_info length %d: ignoring\n",
++		if (bi_length != escan_buflen -	WL_ESCAN_RESULTS_FIXED_SIZE) {
++			brcmf_err("Ignoring invalid bss_info length: %d\n",
+ 				  bi_length);
+ 			goto exit;
+ 		}
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
+index 8391989b1882..e0d22fedb2b4 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
+@@ -45,6 +45,11 @@
+ #define BRCMF_SCAN_PARAMS_COUNT_MASK	0x0000ffff
+ #define BRCMF_SCAN_PARAMS_NSSID_SHIFT	16
+ 
++/* scan type definitions */
++#define BRCMF_SCANTYPE_DEFAULT		0xFF
++#define BRCMF_SCANTYPE_ACTIVE		0
++#define BRCMF_SCANTYPE_PASSIVE		1
++
+ #define BRCMF_WSEC_MAX_PSK_LEN		32
+ #define	BRCMF_WSEC_PASSPHRASE		BIT(0)
+ 
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+index ce901be5fba8..f0132c492a79 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+@@ -1589,6 +1589,11 @@ static void iwl_mvm_mc_iface_iterator(void *_data, u8 *mac,
+ 	struct iwl_mvm_mc_iter_data *data = _data;
+ 	struct iwl_mvm *mvm = data->mvm;
+ 	struct iwl_mcast_filter_cmd *cmd = mvm->mcast_filter_cmd;
++	struct iwl_host_cmd hcmd = {
++		.id = MCAST_FILTER_CMD,
++		.flags = CMD_ASYNC,
++		.dataflags[0] = IWL_HCMD_DFL_NOCOPY,
++	};
+ 	int ret, len;
+ 
+ 	/* if we don't have free ports, mcast frames will be dropped */
+@@ -1603,7 +1608,10 @@ static void iwl_mvm_mc_iface_iterator(void *_data, u8 *mac,
+ 	memcpy(cmd->bssid, vif->bss_conf.bssid, ETH_ALEN);
+ 	len = roundup(sizeof(*cmd) + cmd->count * ETH_ALEN, 4);
+ 
+-	ret = iwl_mvm_send_cmd_pdu(mvm, MCAST_FILTER_CMD, CMD_ASYNC, len, cmd);
++	hcmd.len[0] = len;
++	hcmd.data[0] = cmd;
++
++	ret = iwl_mvm_send_cmd(mvm, &hcmd);
+ 	if (ret)
+ 		IWL_ERR(mvm, "mcast filter cmd error. ret=%d\n", ret);
+ }
+diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
+index cdf4c0e471b9..ba41b660b259 100644
+--- a/drivers/nvme/host/pci.c
++++ b/drivers/nvme/host/pci.c
+@@ -93,7 +93,7 @@ struct nvme_dev {
+ 	struct mutex shutdown_lock;
+ 	bool subsystem;
+ 	void __iomem *cmb;
+-	dma_addr_t cmb_dma_addr;
++	pci_bus_addr_t cmb_bus_addr;
+ 	u64 cmb_size;
+ 	u32 cmbsz;
+ 	u32 cmbloc;
+@@ -1218,7 +1218,7 @@ static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq,
+ 	if (qid && dev->cmb && use_cmb_sqes && NVME_CMB_SQS(dev->cmbsz)) {
+ 		unsigned offset = (qid - 1) * roundup(SQ_SIZE(depth),
+ 						      dev->ctrl.page_size);
+-		nvmeq->sq_dma_addr = dev->cmb_dma_addr + offset;
++		nvmeq->sq_dma_addr = dev->cmb_bus_addr + offset;
+ 		nvmeq->sq_cmds_io = dev->cmb + offset;
+ 	} else {
+ 		nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(depth),
+@@ -1517,7 +1517,7 @@ static void __iomem *nvme_map_cmb(struct nvme_dev *dev)
+ 	resource_size_t bar_size;
+ 	struct pci_dev *pdev = to_pci_dev(dev->dev);
+ 	void __iomem *cmb;
+-	dma_addr_t dma_addr;
++	int bar;
+ 
+ 	dev->cmbsz = readl(dev->bar + NVME_REG_CMBSZ);
+ 	if (!(NVME_CMB_SZ(dev->cmbsz)))
+@@ -1530,7 +1530,8 @@ static void __iomem *nvme_map_cmb(struct nvme_dev *dev)
+ 	szu = (u64)1 << (12 + 4 * NVME_CMB_SZU(dev->cmbsz));
+ 	size = szu * NVME_CMB_SZ(dev->cmbsz);
+ 	offset = szu * NVME_CMB_OFST(dev->cmbloc);
+-	bar_size = pci_resource_len(pdev, NVME_CMB_BIR(dev->cmbloc));
++	bar = NVME_CMB_BIR(dev->cmbloc);
++	bar_size = pci_resource_len(pdev, bar);
+ 
+ 	if (offset > bar_size)
+ 		return NULL;
+@@ -1543,12 +1544,11 @@ static void __iomem *nvme_map_cmb(struct nvme_dev *dev)
+ 	if (size > bar_size - offset)
+ 		size = bar_size - offset;
+ 
+-	dma_addr = pci_resource_start(pdev, NVME_CMB_BIR(dev->cmbloc)) + offset;
+-	cmb = ioremap_wc(dma_addr, size);
++	cmb = ioremap_wc(pci_resource_start(pdev, bar) + offset, size);
+ 	if (!cmb)
+ 		return NULL;
+ 
+-	dev->cmb_dma_addr = dma_addr;
++	dev->cmb_bus_addr = pci_bus_address(pdev, bar) + offset;
+ 	dev->cmb_size = size;
+ 	return cmb;
+ }
+diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
+index fd88dabd599d..9f1d53e18956 100644
+--- a/drivers/scsi/scsi_scan.c
++++ b/drivers/scsi/scsi_scan.c
+@@ -956,6 +956,9 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
+ 	if (*bflags & BLIST_NO_DIF)
+ 		sdev->no_dif = 1;
+ 
++	if (*bflags & BLIST_UNMAP_LIMIT_WS)
++		sdev->unmap_limit_for_ws = 1;
++
+ 	sdev->eh_timeout = SCSI_DEFAULT_EH_TIMEOUT;
+ 
+ 	if (*bflags & BLIST_TRY_VPD_PAGES)
+diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
+index e2647f2d4430..b93d92572c01 100644
+--- a/drivers/scsi/sd.c
++++ b/drivers/scsi/sd.c
+@@ -715,13 +715,21 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode)
+ 		break;
+ 
+ 	case SD_LBP_WS16:
+-		max_blocks = min_not_zero(sdkp->max_ws_blocks,
+-					  (u32)SD_MAX_WS16_BLOCKS);
++		if (sdkp->device->unmap_limit_for_ws)
++			max_blocks = sdkp->max_unmap_blocks;
++		else
++			max_blocks = sdkp->max_ws_blocks;
++
++		max_blocks = min_not_zero(max_blocks, (u32)SD_MAX_WS16_BLOCKS);
+ 		break;
+ 
+ 	case SD_LBP_WS10:
+-		max_blocks = min_not_zero(sdkp->max_ws_blocks,
+-					  (u32)SD_MAX_WS10_BLOCKS);
++		if (sdkp->device->unmap_limit_for_ws)
++			max_blocks = sdkp->max_unmap_blocks;
++		else
++			max_blocks = sdkp->max_ws_blocks;
++
++		max_blocks = min_not_zero(max_blocks, (u32)SD_MAX_WS10_BLOCKS);
+ 		break;
+ 
+ 	case SD_LBP_ZERO:
+@@ -3101,8 +3109,6 @@ static int sd_revalidate_disk(struct gendisk *disk)
+ 		sd_read_security(sdkp, buffer);
+ 	}
+ 
+-	sdkp->first_scan = 0;
+-
+ 	/*
+ 	 * We now have all cache related info, determine how we deal
+ 	 * with flush requests.
+@@ -3117,7 +3123,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
+ 	q->limits.max_dev_sectors = logical_to_sectors(sdp, dev_max);
+ 
+ 	/*
+-	 * Use the device's preferred I/O size for reads and writes
++	 * Determine the device's preferred I/O size for reads and writes
+ 	 * unless the reported value is unreasonably small, large, or
+ 	 * garbage.
+ 	 */
+@@ -3131,8 +3137,19 @@ static int sd_revalidate_disk(struct gendisk *disk)
+ 		rw_max = min_not_zero(logical_to_sectors(sdp, dev_max),
+ 				      (sector_t)BLK_DEF_MAX_SECTORS);
+ 
+-	/* Combine with controller limits */
+-	q->limits.max_sectors = min(rw_max, queue_max_hw_sectors(q));
++	/* Do not exceed controller limit */
++	rw_max = min(rw_max, queue_max_hw_sectors(q));
++
++	/*
++	 * Only update max_sectors if previously unset or if the current value
++	 * exceeds the capabilities of the hardware.
++	 */
++	if (sdkp->first_scan ||
++	    q->limits.max_sectors > q->limits.max_dev_sectors ||
++	    q->limits.max_sectors > q->limits.max_hw_sectors)
++		q->limits.max_sectors = rw_max;
++
++	sdkp->first_scan = 0;
+ 
+ 	set_capacity(disk, logical_to_sectors(sdp, sdkp->capacity));
+ 	sd_config_write_same(sdkp);
+diff --git a/drivers/staging/iio/adc/ad7192.c b/drivers/staging/iio/adc/ad7192.c
+index d11c6de9c777..6150d2780e22 100644
+--- a/drivers/staging/iio/adc/ad7192.c
++++ b/drivers/staging/iio/adc/ad7192.c
+@@ -223,11 +223,9 @@ static int ad7192_setup(struct ad7192_state *st,
+ 	struct iio_dev *indio_dev = spi_get_drvdata(st->sd.spi);
+ 	unsigned long long scale_uv;
+ 	int i, ret, id;
+-	u8 ones[6];
+ 
+ 	/* reset the serial interface */
+-	memset(&ones, 0xFF, 6);
+-	ret = spi_write(st->sd.spi, &ones, 6);
++	ret = ad_sd_reset(&st->sd, 48);
+ 	if (ret < 0)
+ 		goto out;
+ 	usleep_range(500, 1000); /* Wait for at least 500us */
+diff --git a/drivers/staging/media/imx/imx-media-of.c b/drivers/staging/media/imx/imx-media-of.c
+index b026fe66467c..90e7e702a411 100644
+--- a/drivers/staging/media/imx/imx-media-of.c
++++ b/drivers/staging/media/imx/imx-media-of.c
+@@ -167,7 +167,7 @@ of_parse_subdev(struct imx_media_dev *imxmd, struct device_node *sd_np,
+ 		of_parse_sensor(imxmd, imxsd, sd_np);
+ 
+ 	for (i = 0; i < num_pads; i++) {
+-		struct device_node *epnode = NULL, *port, *remote_np;
++		struct device_node *epnode = NULL, *port, *remote_np = NULL;
+ 		struct imx_media_subdev *remote_imxsd;
+ 		struct imx_media_pad *pad;
+ 		int remote_pad;
+diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
+index 0159ca4407d8..be08849175ea 100644
+--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
++++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
+@@ -612,18 +612,20 @@ free_pagelist(struct vchiq_pagelist_info *pagelistinfo,
+ 			if (head_bytes > actual)
+ 				head_bytes = actual;
+ 
+-			memcpy((char *)page_address(pages[0]) +
++			memcpy((char *)kmap(pages[0]) +
+ 				pagelist->offset,
+ 				fragments,
+ 				head_bytes);
++			kunmap(pages[0]);
+ 		}
+ 		if ((actual >= 0) && (head_bytes < actual) &&
+ 			(tail_bytes != 0)) {
+-			memcpy((char *)page_address(pages[num_pages - 1]) +
++			memcpy((char *)kmap(pages[num_pages - 1]) +
+ 				((pagelist->offset + actual) &
+ 				(PAGE_SIZE - 1) & ~(g_cache_line_size - 1)),
+ 				fragments + g_cache_line_size,
+ 				tail_bytes);
++			kunmap(pages[num_pages - 1]);
+ 		}
+ 
+ 		down(&g_free_fragments_mutex);
+diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
+index 8f972247b1c1..6499391695b7 100644
+--- a/drivers/usb/class/cdc-wdm.c
++++ b/drivers/usb/class/cdc-wdm.c
+@@ -194,8 +194,10 @@ static void wdm_in_callback(struct urb *urb)
+ 	/*
+ 	 * only set a new error if there is no previous error.
+ 	 * Errors are only cleared during read/open
++	 * Avoid propagating -EPIPE (stall) to userspace since it is
++	 * better handled as an empty read
+ 	 */
+-	if (desc->rerr  == 0)
++	if (desc->rerr == 0 && status != -EPIPE)
+ 		desc->rerr = status;
+ 
+ 	if (length + desc->length > desc->wMaxCommand) {
+diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
+index 4be52c602e9b..68b54bd88d1e 100644
+--- a/drivers/usb/core/config.c
++++ b/drivers/usb/core/config.c
+@@ -643,15 +643,23 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx,
+ 
+ 		} else if (header->bDescriptorType ==
+ 				USB_DT_INTERFACE_ASSOCIATION) {
++			struct usb_interface_assoc_descriptor *d;
++
++			d = (struct usb_interface_assoc_descriptor *)header;
++			if (d->bLength < USB_DT_INTERFACE_ASSOCIATION_SIZE) {
++				dev_warn(ddev,
++					 "config %d has an invalid interface association descriptor of length %d, skipping\n",
++					 cfgno, d->bLength);
++				continue;
++			}
++
+ 			if (iad_num == USB_MAXIADS) {
+ 				dev_warn(ddev, "found more Interface "
+ 					       "Association Descriptors "
+ 					       "than allocated for in "
+ 					       "configuration %d\n", cfgno);
+ 			} else {
+-				config->intf_assoc[iad_num] =
+-					(struct usb_interface_assoc_descriptor
+-					*)header;
++				config->intf_assoc[iad_num] = d;
+ 				iad_num++;
+ 			}
+ 
+@@ -852,7 +860,7 @@ int usb_get_configuration(struct usb_device *dev)
+ 		}
+ 
+ 		if (dev->quirks & USB_QUIRK_DELAY_INIT)
+-			msleep(100);
++			msleep(200);
+ 
+ 		result = usb_get_descriptor(dev, USB_DT_CONFIG, cfgno,
+ 		    bigbuffer, length);
+diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
+index 0ff0feddfd1f..1d4dfdeb61c1 100644
+--- a/drivers/usb/core/devio.c
++++ b/drivers/usb/core/devio.c
+@@ -140,6 +140,9 @@ module_param(usbfs_memory_mb, uint, 0644);
+ MODULE_PARM_DESC(usbfs_memory_mb,
+ 		"maximum MB allowed for usbfs buffers (0 = no limit)");
+ 
++/* Hard limit, necessary to avoid arithmetic overflow */
++#define USBFS_XFER_MAX         (UINT_MAX / 2 - 1000000)
++
+ static atomic64_t usbfs_memory_usage;	/* Total memory currently allocated */
+ 
+ /* Check whether it's okay to allocate more memory for a transfer */
+@@ -1460,6 +1463,8 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
+ 				USBDEVFS_URB_ZERO_PACKET |
+ 				USBDEVFS_URB_NO_INTERRUPT))
+ 		return -EINVAL;
++	if ((unsigned int)uurb->buffer_length >= USBFS_XFER_MAX)
++		return -EINVAL;
+ 	if (uurb->buffer_length > 0 && !uurb->buffer)
+ 		return -EINVAL;
+ 	if (!(uurb->type == USBDEVFS_URB_TYPE_CONTROL &&
+@@ -1571,7 +1576,11 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
+ 			totlen += isopkt[u].length;
+ 		}
+ 		u *= sizeof(struct usb_iso_packet_descriptor);
+-		uurb->buffer_length = totlen;
++		if (totlen <= uurb->buffer_length)
++			uurb->buffer_length = totlen;
++		else
++			WARN_ONCE(1, "uurb->buffer_length is too short %d vs %d",
++				  totlen, uurb->buffer_length);
+ 		break;
+ 
+ 	default:
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index 822f8c50e423..78c2aca5b0fc 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -4825,7 +4825,7 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
+ 			goto loop;
+ 
+ 		if (udev->quirks & USB_QUIRK_DELAY_INIT)
+-			msleep(1000);
++			msleep(2000);
+ 
+ 		/* consecutive bus-powered hubs aren't reliable; they can
+ 		 * violate the voltage drop budget.  if the new child has
+diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
+index 4c38ea41ae96..371a07d874a3 100644
+--- a/drivers/usb/core/message.c
++++ b/drivers/usb/core/message.c
+@@ -2069,6 +2069,10 @@ int cdc_parse_cdc_header(struct usb_cdc_parsed_header *hdr,
+ 			elength = 1;
+ 			goto next_desc;
+ 		}
++		if ((buflen < elength) || (elength < 3)) {
++			dev_err(&intf->dev, "invalid descriptor buffer length\n");
++			break;
++		}
+ 		if (buffer[1] != USB_DT_CS_INTERFACE) {
+ 			dev_err(&intf->dev, "skipping garbage\n");
+ 			goto next_desc;
+diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
+index 827e376bfa97..75e6cb044eb2 100644
+--- a/drivers/usb/dwc3/ep0.c
++++ b/drivers/usb/dwc3/ep0.c
+@@ -990,6 +990,8 @@ static void __dwc3_ep0_do_control_data(struct dwc3 *dwc,
+ 					 DWC3_TRBCTL_CONTROL_DATA,
+ 					 true);
+ 
++		req->trb = &dwc->ep0_trb[dep->trb_enqueue - 1];
++
+ 		/* Now prepare one extra TRB to align transfer size */
+ 		dwc3_ep0_prepare_one_trb(dep, dwc->bounce_addr,
+ 					 maxpacket - rem,
+@@ -1015,6 +1017,8 @@ static void __dwc3_ep0_do_control_data(struct dwc3 *dwc,
+ 					 DWC3_TRBCTL_CONTROL_DATA,
+ 					 true);
+ 
++		req->trb = &dwc->ep0_trb[dep->trb_enqueue - 1];
++
+ 		/* Now prepare one extra TRB to align transfer size */
+ 		dwc3_ep0_prepare_one_trb(dep, dwc->bounce_addr,
+ 					 0, DWC3_TRBCTL_CONTROL_DATA,
+@@ -1029,6 +1033,9 @@ static void __dwc3_ep0_do_control_data(struct dwc3 *dwc,
+ 		dwc3_ep0_prepare_one_trb(dep, req->request.dma,
+ 				req->request.length, DWC3_TRBCTL_CONTROL_DATA,
+ 				false);
++
++		req->trb = &dwc->ep0_trb[dep->trb_enqueue];
++
+ 		ret = dwc3_ep0_start_trans(dep);
+ 	}
+ 
+diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c
+index f95bddd6513f..daf3a07e3ffb 100644
+--- a/drivers/usb/gadget/function/f_mass_storage.c
++++ b/drivers/usb/gadget/function/f_mass_storage.c
+@@ -307,8 +307,6 @@ struct fsg_common {
+ 	struct completion	thread_notifier;
+ 	struct task_struct	*thread_task;
+ 
+-	/* Callback functions. */
+-	const struct fsg_operations	*ops;
+ 	/* Gadget's private data. */
+ 	void			*private_data;
+ 
+@@ -2440,6 +2438,7 @@ static void handle_exception(struct fsg_common *common)
+ static int fsg_main_thread(void *common_)
+ {
+ 	struct fsg_common	*common = common_;
++	int			i;
+ 
+ 	/*
+ 	 * Allow the thread to be killed by a signal, but set the signal mask
+@@ -2485,21 +2484,16 @@ static int fsg_main_thread(void *common_)
+ 	common->thread_task = NULL;
+ 	spin_unlock_irq(&common->lock);
+ 
+-	if (!common->ops || !common->ops->thread_exits
+-	 || common->ops->thread_exits(common) < 0) {
+-		int i;
++	/* Eject media from all LUNs */
+ 
+-		down_write(&common->filesem);
+-		for (i = 0; i < ARRAY_SIZE(common->luns); i++) {
+-			struct fsg_lun *curlun = common->luns[i];
+-			if (!curlun || !fsg_lun_is_open(curlun))
+-				continue;
++	down_write(&common->filesem);
++	for (i = 0; i < ARRAY_SIZE(common->luns); i++) {
++		struct fsg_lun *curlun = common->luns[i];
+ 
++		if (curlun && fsg_lun_is_open(curlun))
+ 			fsg_lun_close(curlun);
+-			curlun->unit_attention_data = SS_MEDIUM_NOT_PRESENT;
+-		}
+-		up_write(&common->filesem);
+ 	}
++	up_write(&common->filesem);
+ 
+ 	/* Let fsg_unbind() know the thread has exited */
+ 	complete_and_exit(&common->thread_notifier, 0);
+@@ -2690,13 +2684,6 @@ void fsg_common_remove_luns(struct fsg_common *common)
+ }
+ EXPORT_SYMBOL_GPL(fsg_common_remove_luns);
+ 
+-void fsg_common_set_ops(struct fsg_common *common,
+-			const struct fsg_operations *ops)
+-{
+-	common->ops = ops;
+-}
+-EXPORT_SYMBOL_GPL(fsg_common_set_ops);
+-
+ void fsg_common_free_buffers(struct fsg_common *common)
+ {
+ 	_fsg_common_free_buffers(common->buffhds, common->fsg_num_buffers);
+diff --git a/drivers/usb/gadget/function/f_mass_storage.h b/drivers/usb/gadget/function/f_mass_storage.h
+index d3902313b8ac..dc05ca0c4359 100644
+--- a/drivers/usb/gadget/function/f_mass_storage.h
++++ b/drivers/usb/gadget/function/f_mass_storage.h
+@@ -60,17 +60,6 @@ struct fsg_module_parameters {
+ struct fsg_common;
+ 
+ /* FSF callback functions */
+-struct fsg_operations {
+-	/*
+-	 * Callback function to call when thread exits.  If no
+-	 * callback is set or it returns value lower then zero MSF
+-	 * will force eject all LUNs it operates on (including those
+-	 * marked as non-removable or with prevent_medium_removal flag
+-	 * set).
+-	 */
+-	int (*thread_exits)(struct fsg_common *common);
+-};
+-
+ struct fsg_lun_opts {
+ 	struct config_group group;
+ 	struct fsg_lun *lun;
+@@ -142,9 +131,6 @@ void fsg_common_remove_lun(struct fsg_lun *lun);
+ 
+ void fsg_common_remove_luns(struct fsg_common *common);
+ 
+-void fsg_common_set_ops(struct fsg_common *common,
+-			const struct fsg_operations *ops);
+-
+ int fsg_common_create_lun(struct fsg_common *common, struct fsg_lun_config *cfg,
+ 			  unsigned int id, const char *name,
+ 			  const char **name_pfx);
+diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
+index 684900fcfe24..5c28bee327e1 100644
+--- a/drivers/usb/gadget/legacy/inode.c
++++ b/drivers/usb/gadget/legacy/inode.c
+@@ -28,7 +28,7 @@
+ #include <linux/aio.h>
+ #include <linux/uio.h>
+ #include <linux/refcount.h>
+-
++#include <linux/delay.h>
+ #include <linux/device.h>
+ #include <linux/moduleparam.h>
+ 
+@@ -116,6 +116,7 @@ enum ep0_state {
+ struct dev_data {
+ 	spinlock_t			lock;
+ 	refcount_t			count;
++	int				udc_usage;
+ 	enum ep0_state			state;		/* P: lock */
+ 	struct usb_gadgetfs_event	event [N_EVENT];
+ 	unsigned			ev_next;
+@@ -513,9 +514,9 @@ static void ep_aio_complete(struct usb_ep *ep, struct usb_request *req)
+ 		INIT_WORK(&priv->work, ep_user_copy_worker);
+ 		schedule_work(&priv->work);
+ 	}
+-	spin_unlock(&epdata->dev->lock);
+ 
+ 	usb_ep_free_request(ep, req);
++	spin_unlock(&epdata->dev->lock);
+ 	put_ep(epdata);
+ }
+ 
+@@ -939,9 +940,11 @@ ep0_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr)
+ 			struct usb_request	*req = dev->req;
+ 
+ 			if ((retval = setup_req (ep, req, 0)) == 0) {
++				++dev->udc_usage;
+ 				spin_unlock_irq (&dev->lock);
+ 				retval = usb_ep_queue (ep, req, GFP_KERNEL);
+ 				spin_lock_irq (&dev->lock);
++				--dev->udc_usage;
+ 			}
+ 			dev->state = STATE_DEV_CONNECTED;
+ 
+@@ -983,11 +986,14 @@ ep0_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr)
+ 				retval = -EIO;
+ 			else {
+ 				len = min (len, (size_t)dev->req->actual);
+-// FIXME don't call this with the spinlock held ...
++				++dev->udc_usage;
++				spin_unlock_irq(&dev->lock);
+ 				if (copy_to_user (buf, dev->req->buf, len))
+ 					retval = -EFAULT;
+ 				else
+ 					retval = len;
++				spin_lock_irq(&dev->lock);
++				--dev->udc_usage;
+ 				clean_req (dev->gadget->ep0, dev->req);
+ 				/* NOTE userspace can't yet choose to stall */
+ 			}
+@@ -1131,6 +1137,7 @@ ep0_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
+ 			retval = setup_req (dev->gadget->ep0, dev->req, len);
+ 			if (retval == 0) {
+ 				dev->state = STATE_DEV_CONNECTED;
++				++dev->udc_usage;
+ 				spin_unlock_irq (&dev->lock);
+ 				if (copy_from_user (dev->req->buf, buf, len))
+ 					retval = -EFAULT;
+@@ -1142,6 +1149,7 @@ ep0_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
+ 						GFP_KERNEL);
+ 				}
+ 				spin_lock_irq(&dev->lock);
++				--dev->udc_usage;
+ 				if (retval < 0) {
+ 					clean_req (dev->gadget->ep0, dev->req);
+ 				} else
+@@ -1243,9 +1251,21 @@ static long dev_ioctl (struct file *fd, unsigned code, unsigned long value)
+ 	struct usb_gadget	*gadget = dev->gadget;
+ 	long ret = -ENOTTY;
+ 
+-	if (gadget->ops->ioctl)
++	spin_lock_irq(&dev->lock);
++	if (dev->state == STATE_DEV_OPENED ||
++			dev->state == STATE_DEV_UNBOUND) {
++		/* Not bound to a UDC */
++	} else if (gadget->ops->ioctl) {
++		++dev->udc_usage;
++		spin_unlock_irq(&dev->lock);
++
+ 		ret = gadget->ops->ioctl (gadget, code, value);
+ 
++		spin_lock_irq(&dev->lock);
++		--dev->udc_usage;
++	}
++	spin_unlock_irq(&dev->lock);
++
+ 	return ret;
+ }
+ 
+@@ -1463,10 +1483,12 @@ gadgetfs_setup (struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
+ 				if (value < 0)
+ 					break;
+ 
++				++dev->udc_usage;
+ 				spin_unlock (&dev->lock);
+ 				value = usb_ep_queue (gadget->ep0, dev->req,
+ 							GFP_KERNEL);
+ 				spin_lock (&dev->lock);
++				--dev->udc_usage;
+ 				if (value < 0) {
+ 					clean_req (gadget->ep0, dev->req);
+ 					break;
+@@ -1490,8 +1512,12 @@ gadgetfs_setup (struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
+ 		req->length = value;
+ 		req->zero = value < w_length;
+ 
++		++dev->udc_usage;
+ 		spin_unlock (&dev->lock);
+ 		value = usb_ep_queue (gadget->ep0, req, GFP_KERNEL);
++		spin_lock(&dev->lock);
++		--dev->udc_usage;
++		spin_unlock(&dev->lock);
+ 		if (value < 0) {
+ 			DBG (dev, "ep_queue --> %d\n", value);
+ 			req->status = 0;
+@@ -1518,21 +1544,24 @@ static void destroy_ep_files (struct dev_data *dev)
+ 		/* break link to FS */
+ 		ep = list_first_entry (&dev->epfiles, struct ep_data, epfiles);
+ 		list_del_init (&ep->epfiles);
++		spin_unlock_irq (&dev->lock);
++
+ 		dentry = ep->dentry;
+ 		ep->dentry = NULL;
+ 		parent = d_inode(dentry->d_parent);
+ 
+ 		/* break link to controller */
++		mutex_lock(&ep->lock);
+ 		if (ep->state == STATE_EP_ENABLED)
+ 			(void) usb_ep_disable (ep->ep);
+ 		ep->state = STATE_EP_UNBOUND;
+ 		usb_ep_free_request (ep->ep, ep->req);
+ 		ep->ep = NULL;
++		mutex_unlock(&ep->lock);
++
+ 		wake_up (&ep->wait);
+ 		put_ep (ep);
+ 
+-		spin_unlock_irq (&dev->lock);
+-
+ 		/* break link to dcache */
+ 		inode_lock(parent);
+ 		d_delete (dentry);
+@@ -1603,6 +1632,11 @@ gadgetfs_unbind (struct usb_gadget *gadget)
+ 
+ 	spin_lock_irq (&dev->lock);
+ 	dev->state = STATE_DEV_UNBOUND;
++	while (dev->udc_usage > 0) {
++		spin_unlock_irq(&dev->lock);
++		usleep_range(1000, 2000);
++		spin_lock_irq(&dev->lock);
++	}
+ 	spin_unlock_irq (&dev->lock);
+ 
+ 	destroy_ep_files (dev);
+diff --git a/drivers/usb/gadget/legacy/mass_storage.c b/drivers/usb/gadget/legacy/mass_storage.c
+index e99ab57ee3e5..fcba59782f26 100644
+--- a/drivers/usb/gadget/legacy/mass_storage.c
++++ b/drivers/usb/gadget/legacy/mass_storage.c
+@@ -107,15 +107,6 @@ static unsigned int fsg_num_buffers = CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS;
+ 
+ FSG_MODULE_PARAMETERS(/* no prefix */, mod_data);
+ 
+-static unsigned long msg_registered;
+-static void msg_cleanup(void);
+-
+-static int msg_thread_exits(struct fsg_common *common)
+-{
+-	msg_cleanup();
+-	return 0;
+-}
+-
+ static int msg_do_config(struct usb_configuration *c)
+ {
+ 	struct fsg_opts *opts;
+@@ -154,9 +145,6 @@ static struct usb_configuration msg_config_driver = {
+ 
+ static int msg_bind(struct usb_composite_dev *cdev)
+ {
+-	static const struct fsg_operations ops = {
+-		.thread_exits = msg_thread_exits,
+-	};
+ 	struct fsg_opts *opts;
+ 	struct fsg_config config;
+ 	int status;
+@@ -173,8 +161,6 @@ static int msg_bind(struct usb_composite_dev *cdev)
+ 	if (status)
+ 		goto fail;
+ 
+-	fsg_common_set_ops(opts->common, &ops);
+-
+ 	status = fsg_common_set_cdev(opts->common, cdev, config.can_stall);
+ 	if (status)
+ 		goto fail_set_cdev;
+@@ -256,18 +242,12 @@ MODULE_LICENSE("GPL");
+ 
+ static int __init msg_init(void)
+ {
+-	int ret;
+-
+-	ret = usb_composite_probe(&msg_driver);
+-	set_bit(0, &msg_registered);
+-
+-	return ret;
++	return usb_composite_probe(&msg_driver);
+ }
+ module_init(msg_init);
+ 
+-static void msg_cleanup(void)
++static void __exit msg_cleanup(void)
+ {
+-	if (test_and_clear_bit(0, &msg_registered))
+-		usb_composite_unregister(&msg_driver);
++	usb_composite_unregister(&msg_driver);
+ }
+ module_exit(msg_cleanup);
+diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c
+index 98d71400f8a1..a884c022df7a 100644
+--- a/drivers/usb/gadget/udc/atmel_usba_udc.c
++++ b/drivers/usb/gadget/udc/atmel_usba_udc.c
+@@ -29,6 +29,8 @@
+ #include <linux/of_gpio.h>
+ 
+ #include "atmel_usba_udc.h"
++#define USBA_VBUS_IRQFLAGS (IRQF_ONESHOT \
++			   | IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING)
+ 
+ #ifdef CONFIG_USB_GADGET_DEBUG_FS
+ #include <linux/debugfs.h>
+@@ -2361,7 +2363,7 @@ static int usba_udc_probe(struct platform_device *pdev)
+ 					IRQ_NOAUTOEN);
+ 			ret = devm_request_threaded_irq(&pdev->dev,
+ 					gpio_to_irq(udc->vbus_pin), NULL,
+-					usba_vbus_irq_thread, IRQF_ONESHOT,
++					usba_vbus_irq_thread, USBA_VBUS_IRQFLAGS,
+ 					"atmel_usba_udc", udc);
+ 			if (ret) {
+ 				udc->vbus_pin = -ENODEV;
+diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
+index e6f04eee95c4..63c5fe6f7bd4 100644
+--- a/drivers/usb/gadget/udc/core.c
++++ b/drivers/usb/gadget/udc/core.c
+@@ -1314,8 +1314,7 @@ static int udc_bind_to_driver(struct usb_udc *udc, struct usb_gadget_driver *dri
+ 	udc->dev.driver = &driver->driver;
+ 	udc->gadget->dev.driver = &driver->driver;
+ 
+-	if (driver->max_speed < udc->gadget->max_speed)
+-		usb_gadget_udc_set_speed(udc, driver->max_speed);
++	usb_gadget_udc_set_speed(udc, driver->max_speed);
+ 
+ 	ret = driver->bind(udc->gadget, driver);
+ 	if (ret)
+diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c
+index 3c3760315910..374f85f612d9 100644
+--- a/drivers/usb/gadget/udc/dummy_hcd.c
++++ b/drivers/usb/gadget/udc/dummy_hcd.c
+@@ -237,6 +237,8 @@ struct dummy_hcd {
+ 
+ 	struct usb_device		*udev;
+ 	struct list_head		urbp_list;
++	struct urbp			*next_frame_urbp;
++
+ 	u32				stream_en_ep;
+ 	u8				num_stream[30 / 2];
+ 
+@@ -253,11 +255,13 @@ struct dummy {
+ 	 */
+ 	struct dummy_ep			ep[DUMMY_ENDPOINTS];
+ 	int				address;
++	int				callback_usage;
+ 	struct usb_gadget		gadget;
+ 	struct usb_gadget_driver	*driver;
+ 	struct dummy_request		fifo_req;
+ 	u8				fifo_buf[FIFO_SIZE];
+ 	u16				devstatus;
++	unsigned			ints_enabled:1;
+ 	unsigned			udc_suspended:1;
+ 	unsigned			pullup:1;
+ 
+@@ -440,18 +444,27 @@ static void set_link_state(struct dummy_hcd *dum_hcd)
+ 				(~dum_hcd->old_status) & dum_hcd->port_status;
+ 
+ 		/* Report reset and disconnect events to the driver */
+-		if (dum->driver && (disconnect || reset)) {
++		if (dum->ints_enabled && (disconnect || reset)) {
+ 			stop_activity(dum);
++			++dum->callback_usage;
++			spin_unlock(&dum->lock);
+ 			if (reset)
+ 				usb_gadget_udc_reset(&dum->gadget, dum->driver);
+ 			else
+ 				dum->driver->disconnect(&dum->gadget);
++			spin_lock(&dum->lock);
++			--dum->callback_usage;
+ 		}
+-	} else if (dum_hcd->active != dum_hcd->old_active) {
++	} else if (dum_hcd->active != dum_hcd->old_active &&
++			dum->ints_enabled) {
++		++dum->callback_usage;
++		spin_unlock(&dum->lock);
+ 		if (dum_hcd->old_active && dum->driver->suspend)
+ 			dum->driver->suspend(&dum->gadget);
+ 		else if (!dum_hcd->old_active &&  dum->driver->resume)
+ 			dum->driver->resume(&dum->gadget);
++		spin_lock(&dum->lock);
++		--dum->callback_usage;
+ 	}
+ 
+ 	dum_hcd->old_status = dum_hcd->port_status;
+@@ -972,8 +985,11 @@ static int dummy_udc_start(struct usb_gadget *g,
+ 	 * can't enumerate without help from the driver we're binding.
+ 	 */
+ 
++	spin_lock_irq(&dum->lock);
+ 	dum->devstatus = 0;
+ 	dum->driver = driver;
++	dum->ints_enabled = 1;
++	spin_unlock_irq(&dum->lock);
+ 
+ 	return 0;
+ }
+@@ -984,6 +1000,16 @@ static int dummy_udc_stop(struct usb_gadget *g)
+ 	struct dummy		*dum = dum_hcd->dum;
+ 
+ 	spin_lock_irq(&dum->lock);
++	dum->ints_enabled = 0;
++	stop_activity(dum);
++
++	/* emulate synchronize_irq(): wait for callbacks to finish */
++	while (dum->callback_usage > 0) {
++		spin_unlock_irq(&dum->lock);
++		usleep_range(1000, 2000);
++		spin_lock_irq(&dum->lock);
++	}
++
+ 	dum->driver = NULL;
+ 	spin_unlock_irq(&dum->lock);
+ 
+@@ -1037,7 +1063,12 @@ static int dummy_udc_probe(struct platform_device *pdev)
+ 	memzero_explicit(&dum->gadget, sizeof(struct usb_gadget));
+ 	dum->gadget.name = gadget_name;
+ 	dum->gadget.ops = &dummy_ops;
+-	dum->gadget.max_speed = USB_SPEED_SUPER;
++	if (mod_data.is_super_speed)
++		dum->gadget.max_speed = USB_SPEED_SUPER;
++	else if (mod_data.is_high_speed)
++		dum->gadget.max_speed = USB_SPEED_HIGH;
++	else
++		dum->gadget.max_speed = USB_SPEED_FULL;
+ 
+ 	dum->gadget.dev.parent = &pdev->dev;
+ 	init_dummy_udc_hw(dum);
+@@ -1246,6 +1277,8 @@ static int dummy_urb_enqueue(
+ 
+ 	list_add_tail(&urbp->urbp_list, &dum_hcd->urbp_list);
+ 	urb->hcpriv = urbp;
++	if (!dum_hcd->next_frame_urbp)
++		dum_hcd->next_frame_urbp = urbp;
+ 	if (usb_pipetype(urb->pipe) == PIPE_CONTROL)
+ 		urb->error_count = 1;		/* mark as a new urb */
+ 
+@@ -1521,6 +1554,8 @@ static struct dummy_ep *find_endpoint(struct dummy *dum, u8 address)
+ 	if (!is_active((dum->gadget.speed == USB_SPEED_SUPER ?
+ 			dum->ss_hcd : dum->hs_hcd)))
+ 		return NULL;
++	if (!dum->ints_enabled)
++		return NULL;
+ 	if ((address & ~USB_DIR_IN) == 0)
+ 		return &dum->ep[0];
+ 	for (i = 1; i < DUMMY_ENDPOINTS; i++) {
+@@ -1762,6 +1797,7 @@ static void dummy_timer(unsigned long _dum_hcd)
+ 		spin_unlock_irqrestore(&dum->lock, flags);
+ 		return;
+ 	}
++	dum_hcd->next_frame_urbp = NULL;
+ 
+ 	for (i = 0; i < DUMMY_ENDPOINTS; i++) {
+ 		if (!ep_info[i].name)
+@@ -1778,6 +1814,10 @@ static void dummy_timer(unsigned long _dum_hcd)
+ 		int			type;
+ 		int			status = -EINPROGRESS;
+ 
++		/* stop when we reach URBs queued after the timer interrupt */
++		if (urbp == dum_hcd->next_frame_urbp)
++			break;
++
+ 		urb = urbp->urb;
+ 		if (urb->unlinked)
+ 			goto return_urb;
+@@ -1857,10 +1897,12 @@ static void dummy_timer(unsigned long _dum_hcd)
+ 			 * until setup() returns; no reentrancy issues etc.
+ 			 */
+ 			if (value > 0) {
++				++dum->callback_usage;
+ 				spin_unlock(&dum->lock);
+ 				value = dum->driver->setup(&dum->gadget,
+ 						&setup);
+ 				spin_lock(&dum->lock);
++				--dum->callback_usage;
+ 
+ 				if (value >= 0) {
+ 					/* no delays (max 64KB data stage) */
+@@ -2561,8 +2603,6 @@ static struct hc_driver dummy_hcd = {
+ 	.product_desc =		"Dummy host controller",
+ 	.hcd_priv_size =	sizeof(struct dummy_hcd),
+ 
+-	.flags =		HCD_USB3 | HCD_SHARED,
+-
+ 	.reset =		dummy_setup,
+ 	.start =		dummy_start,
+ 	.stop =			dummy_stop,
+@@ -2591,8 +2631,12 @@ static int dummy_hcd_probe(struct platform_device *pdev)
+ 	dev_info(&pdev->dev, "%s, driver " DRIVER_VERSION "\n", driver_desc);
+ 	dum = *((void **)dev_get_platdata(&pdev->dev));
+ 
+-	if (!mod_data.is_super_speed)
++	if (mod_data.is_super_speed)
++		dummy_hcd.flags = HCD_USB3 | HCD_SHARED;
++	else if (mod_data.is_high_speed)
+ 		dummy_hcd.flags = HCD_USB2;
++	else
++		dummy_hcd.flags = HCD_USB11;
+ 	hs_hcd = usb_create_hcd(&dummy_hcd, &pdev->dev, dev_name(&pdev->dev));
+ 	if (!hs_hcd)
+ 		return -ENOMEM;
+diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c
+index e1de8fe599a3..89ce1eddfe77 100644
+--- a/drivers/usb/gadget/udc/renesas_usb3.c
++++ b/drivers/usb/gadget/udc/renesas_usb3.c
+@@ -1032,7 +1032,7 @@ static int usb3_write_pipe(struct renesas_usb3_ep *usb3_ep,
+ 			usb3_ep->ep.maxpacket);
+ 	u8 *buf = usb3_req->req.buf + usb3_req->req.actual;
+ 	u32 tmp = 0;
+-	bool is_last;
++	bool is_last = !len ? true : false;
+ 
+ 	if (usb3_wait_pipe_status(usb3_ep, PX_STA_BUFSTS) < 0)
+ 		return -EBUSY;
+@@ -1053,7 +1053,8 @@ static int usb3_write_pipe(struct renesas_usb3_ep *usb3_ep,
+ 		usb3_write(usb3, tmp, fifo_reg);
+ 	}
+ 
+-	is_last = usb3_is_transfer_complete(usb3_ep, usb3_req);
++	if (!is_last)
++		is_last = usb3_is_transfer_complete(usb3_ep, usb3_req);
+ 	/* Send the data */
+ 	usb3_set_px_con_send(usb3_ep, len, is_last);
+ 
+@@ -1144,7 +1145,8 @@ static void usb3_start_pipe0(struct renesas_usb3_ep *usb3_ep,
+ 		usb3_set_p0_con_for_ctrl_read_data(usb3);
+ 	} else {
+ 		usb3_clear_bit(usb3, P0_MOD_DIR, USB3_P0_MOD);
+-		usb3_set_p0_con_for_ctrl_write_data(usb3);
++		if (usb3_req->req.length)
++			usb3_set_p0_con_for_ctrl_write_data(usb3);
+ 	}
+ 
+ 	usb3_p0_xfer(usb3_ep, usb3_req);
+@@ -2047,7 +2049,16 @@ static u32 usb3_calc_ramarea(int ram_size)
+ static u32 usb3_calc_rammap_val(struct renesas_usb3_ep *usb3_ep,
+ 				const struct usb_endpoint_descriptor *desc)
+ {
+-	return usb3_ep->rammap_val | PN_RAMMAP_MPKT(usb_endpoint_maxp(desc));
++	int i;
++	const u32 max_packet_array[] = {8, 16, 32, 64, 512};
++	u32 mpkt = PN_RAMMAP_MPKT(1024);
++
++	for (i = 0; i < ARRAY_SIZE(max_packet_array); i++) {
++		if (usb_endpoint_maxp(desc) <= max_packet_array[i])
++			mpkt = PN_RAMMAP_MPKT(max_packet_array[i]);
++	}
++
++	return usb3_ep->rammap_val | mpkt;
+ }
+ 
+ static int usb3_enable_pipe_n(struct renesas_usb3_ep *usb3_ep,
+diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
+index 658d9d1f9ea3..6dda3623a276 100644
+--- a/drivers/usb/host/pci-quirks.c
++++ b/drivers/usb/host/pci-quirks.c
+@@ -447,7 +447,7 @@ static int usb_asmedia_wait_write(struct pci_dev *pdev)
+ 		if ((value & ASMT_CONTROL_WRITE_BIT) == 0)
+ 			return 0;
+ 
+-		usleep_range(40, 60);
++		udelay(50);
+ 	}
+ 
+ 	dev_warn(&pdev->dev, "%s: check_write_ready timeout", __func__);
+@@ -1022,7 +1022,7 @@ EXPORT_SYMBOL_GPL(usb_disable_xhci_ports);
+  *
+  * Takes care of the handoff between the Pre-OS (i.e. BIOS) and the OS.
+  * It signals to the BIOS that the OS wants control of the host controller,
+- * and then waits 5 seconds for the BIOS to hand over control.
++ * and then waits 1 second for the BIOS to hand over control.
+  * If we timeout, assume the BIOS is broken and take control anyway.
+  */
+ static void quirk_usb_handoff_xhci(struct pci_dev *pdev)
+@@ -1069,9 +1069,9 @@ static void quirk_usb_handoff_xhci(struct pci_dev *pdev)
+ 	if (val & XHCI_HC_BIOS_OWNED) {
+ 		writel(val | XHCI_HC_OS_OWNED, base + ext_cap_offset);
+ 
+-		/* Wait for 5 seconds with 10 microsecond polling interval */
++		/* Wait for 1 second with 10 microsecond polling interval */
+ 		timeout = handshake(base + ext_cap_offset, XHCI_HC_BIOS_OWNED,
+-				0, 5000, 10);
++				0, 1000000, 10);
+ 
+ 		/* Assume a buggy BIOS and take HC ownership anyway */
+ 		if (timeout) {
+@@ -1100,7 +1100,7 @@ static void quirk_usb_handoff_xhci(struct pci_dev *pdev)
+ 	 * operational or runtime registers.  Wait 5 seconds and no more.
+ 	 */
+ 	timeout = handshake(op_reg_base + XHCI_STS_OFFSET, XHCI_STS_CNR, 0,
+-			5000, 10);
++			5000000, 10);
+ 	/* Assume a buggy HC and start HC initialization anyway */
+ 	if (timeout) {
+ 		val = readl(op_reg_base + XHCI_STS_OFFSET);
+diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
+index 00721e8807ab..950dee33bfcc 100644
+--- a/drivers/usb/host/xhci-hub.c
++++ b/drivers/usb/host/xhci-hub.c
+@@ -112,7 +112,7 @@ static int xhci_create_usb3_bos_desc(struct xhci_hcd *xhci, char *buf,
+ 
+ 	/* If PSI table exists, add the custom speed attributes from it */
+ 	if (usb3_1 && xhci->usb3_rhub.psi_count) {
+-		u32 ssp_cap_base, bm_attrib, psi;
++		u32 ssp_cap_base, bm_attrib, psi, psi_mant, psi_exp;
+ 		int offset;
+ 
+ 		ssp_cap_base = USB_DT_BOS_SIZE + USB_DT_USB_SS_CAP_SIZE;
+@@ -139,6 +139,15 @@ static int xhci_create_usb3_bos_desc(struct xhci_hcd *xhci, char *buf,
+ 		for (i = 0; i < xhci->usb3_rhub.psi_count; i++) {
+ 			psi = xhci->usb3_rhub.psi[i];
+ 			psi &= ~USB_SSP_SUBLINK_SPEED_RSVD;
++			psi_exp = XHCI_EXT_PORT_PSIE(psi);
++			psi_mant = XHCI_EXT_PORT_PSIM(psi);
++
++			/* Shift to Gbps and set SSP Link BIT(14) if 10Gpbs */
++			for (; psi_exp < 3; psi_exp++)
++				psi_mant /= 1000;
++			if (psi_mant >= 10)
++				psi |= BIT(14);
++
+ 			if ((psi & PLT_MASK) == PLT_SYM) {
+ 			/* Symmetric, create SSA RX and TX from one PSI entry */
+ 				put_unaligned_le32(psi, &buf[offset]);
+@@ -1473,9 +1482,6 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
+ 				t2 |= PORT_WKOC_E | PORT_WKCONN_E;
+ 				t2 &= ~PORT_WKDISC_E;
+ 			}
+-			if ((xhci->quirks & XHCI_U2_DISABLE_WAKE) &&
+-			    (hcd->speed < HCD_USB3))
+-				t2 &= ~PORT_WAKE_BITS;
+ 		} else
+ 			t2 &= ~PORT_WAKE_BITS;
+ 
+diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
+index 8071c8fdd15e..76f392954733 100644
+--- a/drivers/usb/host/xhci-pci.c
++++ b/drivers/usb/host/xhci-pci.c
+@@ -54,11 +54,6 @@
+ #define PCI_DEVICE_ID_INTEL_APL_XHCI			0x5aa8
+ #define PCI_DEVICE_ID_INTEL_DNV_XHCI			0x19d0
+ 
+-#define PCI_DEVICE_ID_AMD_PROMONTORYA_4			0x43b9
+-#define PCI_DEVICE_ID_AMD_PROMONTORYA_3			0x43ba
+-#define PCI_DEVICE_ID_AMD_PROMONTORYA_2			0x43bb
+-#define PCI_DEVICE_ID_AMD_PROMONTORYA_1			0x43bc
+-
+ #define PCI_DEVICE_ID_ASMEDIA_1042A_XHCI		0x1142
+ 
+ static const char hcd_name[] = "xhci_hcd";
+@@ -142,13 +137,6 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
+ 	if (pdev->vendor == PCI_VENDOR_ID_AMD)
+ 		xhci->quirks |= XHCI_TRUST_TX_LENGTH;
+ 
+-	if ((pdev->vendor == PCI_VENDOR_ID_AMD) &&
+-		((pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_4) ||
+-		(pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_3) ||
+-		(pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_2) ||
+-		(pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_1)))
+-		xhci->quirks |= XHCI_U2_DISABLE_WAKE;
+-
+ 	if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
+ 		xhci->quirks |= XHCI_LPM_SUPPORT;
+ 		xhci->quirks |= XHCI_INTEL_HOST;
+diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
+index c04144b25a67..208740771ff9 100644
+--- a/drivers/usb/host/xhci-plat.c
++++ b/drivers/usb/host/xhci-plat.c
+@@ -186,14 +186,18 @@ static int xhci_plat_probe(struct platform_device *pdev)
+ 	 * 2. xhci_plat is child of a device from firmware (dwc3-plat)
+ 	 * 3. xhci_plat is grandchild of a pci device (dwc3-pci)
+ 	 */
+-	sysdev = &pdev->dev;
+-	if (sysdev->parent && !sysdev->of_node && sysdev->parent->of_node)
+-		sysdev = sysdev->parent;
++	for (sysdev = &pdev->dev; sysdev; sysdev = sysdev->parent) {
++		if (is_of_node(sysdev->fwnode) ||
++			is_acpi_device_node(sysdev->fwnode))
++			break;
+ #ifdef CONFIG_PCI
+-	else if (sysdev->parent && sysdev->parent->parent &&
+-		 sysdev->parent->parent->bus == &pci_bus_type)
+-		sysdev = sysdev->parent->parent;
++		else if (sysdev->bus == &pci_bus_type)
++			break;
+ #endif
++	}
++
++	if (!sysdev)
++		sysdev = &pdev->dev;
+ 
+ 	/* Try to set 64-bit DMA first */
+ 	if (WARN_ON(!sysdev->dma_mask))
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index b2ff1ff1a02f..ee198ea47f49 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -1703,7 +1703,8 @@ static int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
+ 	if (xhci->quirks & XHCI_MTK_HOST) {
+ 		ret = xhci_mtk_add_ep_quirk(hcd, udev, ep);
+ 		if (ret < 0) {
+-			xhci_free_endpoint_ring(xhci, virt_dev, ep_index);
++			xhci_ring_free(xhci, virt_dev->eps[ep_index].new_ring);
++			virt_dev->eps[ep_index].new_ring = NULL;
+ 			return ret;
+ 		}
+ 	}
+diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
+index e3e935291ed6..d7420bb9f2e2 100644
+--- a/drivers/usb/host/xhci.h
++++ b/drivers/usb/host/xhci.h
+@@ -728,6 +728,8 @@ struct xhci_ep_ctx {
+ #define EP_MAXPSTREAMS(p)	(((p) << 10) & EP_MAXPSTREAMS_MASK)
+ /* Endpoint is set up with a Linear Stream Array (vs. Secondary Stream Array) */
+ #define	EP_HAS_LSA		(1 << 15)
++/* hosts with LEC=1 use bits 31:24 as ESIT high bits. */
++#define CTX_TO_MAX_ESIT_PAYLOAD_HI(p)	(((p) >> 24) & 0xff)
+ 
+ /* ep_info2 bitmasks */
+ /*
+@@ -1674,7 +1676,7 @@ struct xhci_bus_state {
+ 
+ static inline unsigned int hcd_index(struct usb_hcd *hcd)
+ {
+-	if (hcd->speed == HCD_USB3)
++	if (hcd->speed >= HCD_USB3)
+ 		return 0;
+ 	else
+ 		return 1;
+@@ -1819,7 +1821,7 @@ struct xhci_hcd {
+ /* For controller with a broken Port Disable implementation */
+ #define XHCI_BROKEN_PORT_PED	(1 << 25)
+ #define XHCI_LIMIT_ENDPOINT_INTERVAL_7	(1 << 26)
+-#define XHCI_U2_DISABLE_WAKE	(1 << 27)
++/* Reserved. It was XHCI_U2_DISABLE_WAKE */
+ #define XHCI_ASMEDIA_MODIFY_FLOWCONTROL	(1 << 28)
+ 
+ 	unsigned int		num_active_eps;
+@@ -2452,8 +2454,8 @@ static inline const char *xhci_decode_ep_context(u32 info, u32 info2, u64 deq,
+ 	u8 lsa;
+ 	u8 hid;
+ 
+-	esit = EP_MAX_ESIT_PAYLOAD_HI(info) << 16 |
+-		EP_MAX_ESIT_PAYLOAD_LO(tx_info);
++	esit = CTX_TO_MAX_ESIT_PAYLOAD_HI(info) << 16 |
++		CTX_TO_MAX_ESIT_PAYLOAD(tx_info);
+ 
+ 	ep_state = info & EP_STATE_MASK;
+ 	max_pstr = info & EP_MAXPSTREAMS_MASK;
+diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c
+index d1af831f43eb..68f26904c316 100644
+--- a/drivers/usb/renesas_usbhs/fifo.c
++++ b/drivers/usb/renesas_usbhs/fifo.c
+@@ -282,11 +282,26 @@ static void usbhsf_fifo_clear(struct usbhs_pipe *pipe,
+ 			      struct usbhs_fifo *fifo)
+ {
+ 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
++	int ret = 0;
+ 
+-	if (!usbhs_pipe_is_dcp(pipe))
+-		usbhsf_fifo_barrier(priv, fifo);
++	if (!usbhs_pipe_is_dcp(pipe)) {
++		/*
++		 * This driver checks the pipe condition first to avoid -EBUSY
++		 * from usbhsf_fifo_barrier() with about 10 msec delay in
++		 * the interrupt handler if the pipe is RX direction and empty.
++		 */
++		if (usbhs_pipe_is_dir_in(pipe))
++			ret = usbhs_pipe_is_accessible(pipe);
++		if (!ret)
++			ret = usbhsf_fifo_barrier(priv, fifo);
++	}
+ 
+-	usbhs_write(priv, fifo->ctr, BCLR);
++	/*
++	 * if non-DCP pipe, this driver should set BCLR when
++	 * usbhsf_fifo_barrier() returns 0.
++	 */
++	if (!ret)
++		usbhs_write(priv, fifo->ctr, BCLR);
+ }
+ 
+ static int usbhsf_fifo_rcv_len(struct usbhs_priv *priv,
+diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c
+index 1a59f335b063..a3ccb899df60 100644
+--- a/drivers/usb/storage/transport.c
++++ b/drivers/usb/storage/transport.c
+@@ -834,13 +834,25 @@ void usb_stor_invoke_transport(struct scsi_cmnd *srb, struct us_data *us)
+ 			if (result == USB_STOR_TRANSPORT_GOOD) {
+ 				srb->result = SAM_STAT_GOOD;
+ 				srb->sense_buffer[0] = 0x0;
++			}
++
++			/*
++			 * ATA-passthru commands use sense data to report
++			 * the command completion status, and often devices
++			 * return Check Condition status when nothing is
++			 * wrong.
++			 */
++			else if (srb->cmnd[0] == ATA_16 ||
++					srb->cmnd[0] == ATA_12) {
++				/* leave the data alone */
++			}
+ 
+ 			/*
+ 			 * If there was a problem, report an unspecified
+ 			 * hardware error to prevent the higher layers from
+ 			 * entering an infinite retry loop.
+ 			 */
+-			} else {
++			else {
+ 				srb->result = DID_ERROR << 16;
+ 				if ((sshdr.response_code & 0x72) == 0x72)
+ 					srb->sense_buffer[1] = HARDWARE_ERROR;
+diff --git a/drivers/usb/storage/uas-detect.h b/drivers/usb/storage/uas-detect.h
+index f58caa9e6a27..a155cd02bce2 100644
+--- a/drivers/usb/storage/uas-detect.h
++++ b/drivers/usb/storage/uas-detect.h
+@@ -9,7 +9,8 @@ static int uas_is_interface(struct usb_host_interface *intf)
+ 		intf->desc.bInterfaceProtocol == USB_PR_UAS);
+ }
+ 
+-static int uas_find_uas_alt_setting(struct usb_interface *intf)
++static struct usb_host_interface *uas_find_uas_alt_setting(
++		struct usb_interface *intf)
+ {
+ 	int i;
+ 
+@@ -17,10 +18,10 @@ static int uas_find_uas_alt_setting(struct usb_interface *intf)
+ 		struct usb_host_interface *alt = &intf->altsetting[i];
+ 
+ 		if (uas_is_interface(alt))
+-			return alt->desc.bAlternateSetting;
++			return alt;
+ 	}
+ 
+-	return -ENODEV;
++	return NULL;
+ }
+ 
+ static int uas_find_endpoints(struct usb_host_interface *alt,
+@@ -58,14 +59,14 @@ static int uas_use_uas_driver(struct usb_interface *intf,
+ 	struct usb_device *udev = interface_to_usbdev(intf);
+ 	struct usb_hcd *hcd = bus_to_hcd(udev->bus);
+ 	unsigned long flags = id->driver_info;
+-	int r, alt;
+-
++	struct usb_host_interface *alt;
++	int r;
+ 
+ 	alt = uas_find_uas_alt_setting(intf);
+-	if (alt < 0)
++	if (!alt)
+ 		return 0;
+ 
+-	r = uas_find_endpoints(&intf->altsetting[alt], eps);
++	r = uas_find_endpoints(alt, eps);
+ 	if (r < 0)
+ 		return 0;
+ 
+diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
+index 5ef014ba6ae8..9876af4ab64e 100644
+--- a/drivers/usb/storage/uas.c
++++ b/drivers/usb/storage/uas.c
+@@ -873,14 +873,14 @@ MODULE_DEVICE_TABLE(usb, uas_usb_ids);
+ static int uas_switch_interface(struct usb_device *udev,
+ 				struct usb_interface *intf)
+ {
+-	int alt;
++	struct usb_host_interface *alt;
+ 
+ 	alt = uas_find_uas_alt_setting(intf);
+-	if (alt < 0)
+-		return alt;
++	if (!alt)
++		return -ENODEV;
+ 
+-	return usb_set_interface(udev,
+-			intf->altsetting[0].desc.bInterfaceNumber, alt);
++	return usb_set_interface(udev, alt->desc.bInterfaceNumber,
++			alt->desc.bAlternateSetting);
+ }
+ 
+ static int uas_configure_endpoints(struct uas_dev_info *devinfo)
+diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
+index 5a70c33ef0e0..eb06d88b41d6 100644
+--- a/drivers/usb/storage/unusual_devs.h
++++ b/drivers/usb/storage/unusual_devs.h
+@@ -1459,6 +1459,13 @@ UNUSUAL_DEV( 0x0bc2, 0x3010, 0x0000, 0x0000,
+ 		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ 		US_FL_SANE_SENSE ),
+ 
++/* Reported by Kris Lindgren <kris.lindgren@gmail.com> */
++UNUSUAL_DEV( 0x0bc2, 0x3332, 0x0000, 0x9999,
++		"Seagate",
++		"External",
++		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
++		US_FL_NO_WP_DETECT ),
++
+ UNUSUAL_DEV(  0x0d49, 0x7310, 0x0000, 0x9999,
+ 		"Maxtor",
+ 		"USB to SATA",
+diff --git a/drivers/uwb/hwa-rc.c b/drivers/uwb/hwa-rc.c
+index 35a1e777b449..9a53912bdfe9 100644
+--- a/drivers/uwb/hwa-rc.c
++++ b/drivers/uwb/hwa-rc.c
+@@ -825,6 +825,8 @@ static int hwarc_probe(struct usb_interface *iface,
+ 
+ 	if (iface->cur_altsetting->desc.bNumEndpoints < 1)
+ 		return -ENODEV;
++	if (!usb_endpoint_xfer_int(&iface->cur_altsetting->endpoint[0].desc))
++		return -ENODEV;
+ 
+ 	result = -ENOMEM;
+ 	uwb_rc = uwb_rc_alloc();
+diff --git a/drivers/uwb/uwbd.c b/drivers/uwb/uwbd.c
+index 01c20a260a8b..39dd4ef53c77 100644
+--- a/drivers/uwb/uwbd.c
++++ b/drivers/uwb/uwbd.c
+@@ -302,18 +302,22 @@ static int uwbd(void *param)
+ /** Start the UWB daemon */
+ void uwbd_start(struct uwb_rc *rc)
+ {
+-	rc->uwbd.task = kthread_run(uwbd, rc, "uwbd");
+-	if (rc->uwbd.task == NULL)
++	struct task_struct *task = kthread_run(uwbd, rc, "uwbd");
++	if (IS_ERR(task)) {
++		rc->uwbd.task = NULL;
+ 		printk(KERN_ERR "UWB: Cannot start management daemon; "
+ 		       "UWB won't work\n");
+-	else
++	} else {
++		rc->uwbd.task = task;
+ 		rc->uwbd.pid = rc->uwbd.task->pid;
++	}
+ }
+ 
+ /* Stop the UWB daemon and free any unprocessed events */
+ void uwbd_stop(struct uwb_rc *rc)
+ {
+-	kthread_stop(rc->uwbd.task);
++	if (rc->uwbd.task)
++		kthread_stop(rc->uwbd.task);
+ 	uwbd_flush(rc);
+ }
+ 
+diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
+index 3f3eb7b17cac..806eb85343fb 100644
+--- a/fs/btrfs/ctree.h
++++ b/fs/btrfs/ctree.h
+@@ -723,7 +723,7 @@ struct btrfs_delayed_root;
+  * Indicate that a whole-filesystem exclusive operation is running
+  * (device replace, resize, device add/delete, balance)
+  */
+-#define BTRFS_FS_EXCL_OP			14
++#define BTRFS_FS_EXCL_OP			16
+ 
+ struct btrfs_fs_info {
+ 	u8 fsid[BTRFS_FSID_SIZE];
+diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
+index 0aff9b278c19..4aa3d4c27dfe 100644
+--- a/fs/btrfs/extent_io.c
++++ b/fs/btrfs/extent_io.c
+@@ -2799,7 +2799,7 @@ static int submit_extent_page(int op, int op_flags, struct extent_io_tree *tree,
+ 		}
+ 	}
+ 
+-	bio = btrfs_bio_alloc(bdev, sector << 9);
++	bio = btrfs_bio_alloc(bdev, (u64)sector << 9);
+ 	bio_add_page(bio, page, page_size, offset);
+ 	bio->bi_end_io = end_io_func;
+ 	bio->bi_private = tree;
+diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
+index acb6f97deb97..196a07a87179 100644
+--- a/fs/overlayfs/copy_up.c
++++ b/fs/overlayfs/copy_up.c
+@@ -561,10 +561,8 @@ static int ovl_do_copy_up(struct ovl_copy_up_ctx *c)
+ 		c->tmpfile = true;
+ 		err = ovl_copy_up_locked(c);
+ 	} else {
+-		err = -EIO;
+-		if (lock_rename(c->workdir, c->destdir) != NULL) {
+-			pr_err("overlayfs: failed to lock workdir+upperdir\n");
+-		} else {
++		err = ovl_lock_rename_workdir(c->workdir, c->destdir);
++		if (!err) {
+ 			err = ovl_copy_up_locked(c);
+ 			unlock_rename(c->workdir, c->destdir);
+ 		}
+diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c
+index 48b70e6490f3..9b97b35b39c8 100644
+--- a/fs/overlayfs/dir.c
++++ b/fs/overlayfs/dir.c
+@@ -216,26 +216,6 @@ static int ovl_create_upper(struct dentry *dentry, struct inode *inode,
+ 	return err;
+ }
+ 
+-static int ovl_lock_rename_workdir(struct dentry *workdir,
+-				   struct dentry *upperdir)
+-{
+-	/* Workdir should not be the same as upperdir */
+-	if (workdir == upperdir)
+-		goto err;
+-
+-	/* Workdir should not be subdir of upperdir and vice versa */
+-	if (lock_rename(workdir, upperdir) != NULL)
+-		goto err_unlock;
+-
+-	return 0;
+-
+-err_unlock:
+-	unlock_rename(workdir, upperdir);
+-err:
+-	pr_err("overlayfs: failed to lock workdir+upperdir\n");
+-	return -EIO;
+-}
+-
+ static struct dentry *ovl_clear_empty(struct dentry *dentry,
+ 				      struct list_head *list)
+ {
+diff --git a/fs/overlayfs/namei.c b/fs/overlayfs/namei.c
+index 8aef2b304b2d..9deec68075dc 100644
+--- a/fs/overlayfs/namei.c
++++ b/fs/overlayfs/namei.c
+@@ -506,6 +506,7 @@ static struct dentry *ovl_lookup_index(struct dentry *dentry,
+ 
+ 	index = lookup_one_len_unlocked(name.name, ofs->indexdir, name.len);
+ 	if (IS_ERR(index)) {
++		err = PTR_ERR(index);
+ 		pr_warn_ratelimited("overlayfs: failed inode index lookup (ino=%lu, key=%*s, err=%i);\n"
+ 				    "overlayfs: mount with '-o index=off' to disable inodes index.\n",
+ 				    d_inode(origin)->i_ino, name.len, name.name,
+diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h
+index e927a62c97ae..f57f47742f5f 100644
+--- a/fs/overlayfs/overlayfs.h
++++ b/fs/overlayfs/overlayfs.h
+@@ -234,6 +234,7 @@ bool ovl_inuse_trylock(struct dentry *dentry);
+ void ovl_inuse_unlock(struct dentry *dentry);
+ int ovl_nlink_start(struct dentry *dentry, bool *locked);
+ void ovl_nlink_end(struct dentry *dentry, bool locked);
++int ovl_lock_rename_workdir(struct dentry *workdir, struct dentry *upperdir);
+ 
+ static inline bool ovl_is_impuredir(struct dentry *dentry)
+ {
+diff --git a/fs/overlayfs/ovl_entry.h b/fs/overlayfs/ovl_entry.h
+index 878a750986dd..25d9b5adcd42 100644
+--- a/fs/overlayfs/ovl_entry.h
++++ b/fs/overlayfs/ovl_entry.h
+@@ -37,6 +37,9 @@ struct ovl_fs {
+ 	bool noxattr;
+ 	/* sb common to all layers */
+ 	struct super_block *same_sb;
++	/* Did we take the inuse lock? */
++	bool upperdir_locked;
++	bool workdir_locked;
+ };
+ 
+ /* private information held for every overlayfs dentry */
+diff --git a/fs/overlayfs/readdir.c b/fs/overlayfs/readdir.c
+index 3d424a51cabb..74f7ead442f0 100644
+--- a/fs/overlayfs/readdir.c
++++ b/fs/overlayfs/readdir.c
+@@ -672,6 +672,7 @@ int ovl_indexdir_cleanup(struct dentry *dentry, struct vfsmount *mnt,
+ 			 struct path *lowerstack, unsigned int numlower)
+ {
+ 	int err;
++	struct dentry *index = NULL;
+ 	struct inode *dir = dentry->d_inode;
+ 	struct path path = { .mnt = mnt, .dentry = dentry };
+ 	LIST_HEAD(list);
+@@ -690,8 +691,6 @@ int ovl_indexdir_cleanup(struct dentry *dentry, struct vfsmount *mnt,
+ 
+ 	inode_lock_nested(dir, I_MUTEX_PARENT);
+ 	list_for_each_entry(p, &list, l_node) {
+-		struct dentry *index;
+-
+ 		if (p->name[0] == '.') {
+ 			if (p->len == 1)
+ 				continue;
+@@ -701,6 +700,7 @@ int ovl_indexdir_cleanup(struct dentry *dentry, struct vfsmount *mnt,
+ 		index = lookup_one_len(p->name, dentry, p->len);
+ 		if (IS_ERR(index)) {
+ 			err = PTR_ERR(index);
++			index = NULL;
+ 			break;
+ 		}
+ 		err = ovl_verify_index(index, lowerstack, numlower);
+@@ -712,7 +712,9 @@ int ovl_indexdir_cleanup(struct dentry *dentry, struct vfsmount *mnt,
+ 				break;
+ 		}
+ 		dput(index);
++		index = NULL;
+ 	}
++	dput(index);
+ 	inode_unlock(dir);
+ out:
+ 	ovl_cache_free(&list);
+diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
+index d86e89f97201..a1464905c1ea 100644
+--- a/fs/overlayfs/super.c
++++ b/fs/overlayfs/super.c
+@@ -210,9 +210,10 @@ static void ovl_put_super(struct super_block *sb)
+ 
+ 	dput(ufs->indexdir);
+ 	dput(ufs->workdir);
+-	ovl_inuse_unlock(ufs->workbasedir);
++	if (ufs->workdir_locked)
++		ovl_inuse_unlock(ufs->workbasedir);
+ 	dput(ufs->workbasedir);
+-	if (ufs->upper_mnt)
++	if (ufs->upper_mnt && ufs->upperdir_locked)
+ 		ovl_inuse_unlock(ufs->upper_mnt->mnt_root);
+ 	mntput(ufs->upper_mnt);
+ 	for (i = 0; i < ufs->numlower; i++)
+@@ -880,9 +881,13 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
+ 			goto out_put_upperpath;
+ 
+ 		err = -EBUSY;
+-		if (!ovl_inuse_trylock(upperpath.dentry)) {
+-			pr_err("overlayfs: upperdir is in-use by another mount\n");
++		if (ovl_inuse_trylock(upperpath.dentry)) {
++			ufs->upperdir_locked = true;
++		} else if (ufs->config.index) {
++			pr_err("overlayfs: upperdir is in-use by another mount, mount with '-o index=off' to override exclusive upperdir protection.\n");
+ 			goto out_put_upperpath;
++		} else {
++			pr_warn("overlayfs: upperdir is in-use by another mount, accessing files from both mounts will result in undefined behavior.\n");
+ 		}
+ 
+ 		err = ovl_mount_dir(ufs->config.workdir, &workpath);
+@@ -900,9 +905,13 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
+ 		}
+ 
+ 		err = -EBUSY;
+-		if (!ovl_inuse_trylock(workpath.dentry)) {
+-			pr_err("overlayfs: workdir is in-use by another mount\n");
++		if (ovl_inuse_trylock(workpath.dentry)) {
++			ufs->workdir_locked = true;
++		} else if (ufs->config.index) {
++			pr_err("overlayfs: workdir is in-use by another mount, mount with '-o index=off' to override exclusive workdir protection.\n");
+ 			goto out_put_workpath;
++		} else {
++			pr_warn("overlayfs: workdir is in-use by another mount, accessing files from both mounts will result in undefined behavior.\n");
+ 		}
+ 
+ 		ufs->workbasedir = workpath.dentry;
+@@ -1155,11 +1164,13 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
+ out_free_lowertmp:
+ 	kfree(lowertmp);
+ out_unlock_workdentry:
+-	ovl_inuse_unlock(workpath.dentry);
++	if (ufs->workdir_locked)
++		ovl_inuse_unlock(workpath.dentry);
+ out_put_workpath:
+ 	path_put(&workpath);
+ out_unlock_upperdentry:
+-	ovl_inuse_unlock(upperpath.dentry);
++	if (ufs->upperdir_locked)
++		ovl_inuse_unlock(upperpath.dentry);
+ out_put_upperpath:
+ 	path_put(&upperpath);
+ out_free_config:
+diff --git a/fs/overlayfs/util.c b/fs/overlayfs/util.c
+index f46ad75dc96a..8bff64f14190 100644
+--- a/fs/overlayfs/util.c
++++ b/fs/overlayfs/util.c
+@@ -418,7 +418,7 @@ void ovl_inuse_unlock(struct dentry *dentry)
+ 	}
+ }
+ 
+-/* Called must hold OVL_I(inode)->oi_lock */
++/* Caller must hold OVL_I(inode)->lock */
+ static void ovl_cleanup_index(struct dentry *dentry)
+ {
+ 	struct inode *dir = ovl_indexdir(dentry->d_sb)->d_inode;
+@@ -457,6 +457,9 @@ static void ovl_cleanup_index(struct dentry *dentry)
+ 	err = PTR_ERR(index);
+ 	if (!IS_ERR(index))
+ 		err = ovl_cleanup(dir, index);
++	else
++		index = NULL;
++
+ 	inode_unlock(dir);
+ 	if (err)
+ 		goto fail;
+@@ -545,3 +548,22 @@ void ovl_nlink_end(struct dentry *dentry, bool locked)
+ 		mutex_unlock(&OVL_I(d_inode(dentry))->lock);
+ 	}
+ }
++
++int ovl_lock_rename_workdir(struct dentry *workdir, struct dentry *upperdir)
++{
++	/* Workdir should not be the same as upperdir */
++	if (workdir == upperdir)
++		goto err;
++
++	/* Workdir should not be subdir of upperdir and vice versa */
++	if (lock_rename(workdir, upperdir) != NULL)
++		goto err_unlock;
++
++	return 0;
++
++err_unlock:
++	unlock_rename(workdir, upperdir);
++err:
++	pr_err("overlayfs: failed to lock workdir+upperdir\n");
++	return -EIO;
++}
+diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
+index b0d5897bc4e6..be795bf20147 100644
+--- a/fs/userfaultfd.c
++++ b/fs/userfaultfd.c
+@@ -566,6 +566,12 @@ static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx,
+ 			break;
+ 		if (ACCESS_ONCE(ctx->released) ||
+ 		    fatal_signal_pending(current)) {
++			/*
++			 * &ewq->wq may be queued in fork_event, but
++			 * __remove_wait_queue ignores the head
++			 * parameter. It would be a problem if it
++			 * didn't.
++			 */
+ 			__remove_wait_queue(&ctx->event_wqh, &ewq->wq);
+ 			if (ewq->msg.event == UFFD_EVENT_FORK) {
+ 				struct userfaultfd_ctx *new;
+@@ -1039,6 +1045,12 @@ static ssize_t userfaultfd_ctx_read(struct userfaultfd_ctx *ctx, int no_wait,
+ 					(unsigned long)
+ 					uwq->msg.arg.reserved.reserved1;
+ 				list_move(&uwq->wq.entry, &fork_event);
++				/*
++				 * fork_nctx can be freed as soon as
++				 * we drop the lock, unless we take a
++				 * reference on it.
++				 */
++				userfaultfd_ctx_get(fork_nctx);
+ 				spin_unlock(&ctx->event_wqh.lock);
+ 				ret = 0;
+ 				break;
+@@ -1069,19 +1081,53 @@ static ssize_t userfaultfd_ctx_read(struct userfaultfd_ctx *ctx, int no_wait,
+ 
+ 	if (!ret && msg->event == UFFD_EVENT_FORK) {
+ 		ret = resolve_userfault_fork(ctx, fork_nctx, msg);
++		spin_lock(&ctx->event_wqh.lock);
++		if (!list_empty(&fork_event)) {
++			/*
++			 * The fork thread didn't abort, so we can
++			 * drop the temporary refcount.
++			 */
++			userfaultfd_ctx_put(fork_nctx);
++
++			uwq = list_first_entry(&fork_event,
++					       typeof(*uwq),
++					       wq.entry);
++			/*
++			 * If fork_event list wasn't empty and in turn
++			 * the event wasn't already released by fork
++			 * (the event is allocated on fork kernel
++			 * stack), put the event back to its place in
++			 * the event_wq. fork_event head will be freed
++			 * as soon as we return so the event cannot
++			 * stay queued there no matter the current
++			 * "ret" value.
++			 */
++			list_del(&uwq->wq.entry);
++			__add_wait_queue(&ctx->event_wqh, &uwq->wq);
+ 
+-		if (!ret) {
+-			spin_lock(&ctx->event_wqh.lock);
+-			if (!list_empty(&fork_event)) {
+-				uwq = list_first_entry(&fork_event,
+-						       typeof(*uwq),
+-						       wq.entry);
+-				list_del(&uwq->wq.entry);
+-				__add_wait_queue(&ctx->event_wqh, &uwq->wq);
++			/*
++			 * Leave the event in the waitqueue and report
++			 * error to userland if we failed to resolve
++			 * the userfault fork.
++			 */
++			if (likely(!ret))
+ 				userfaultfd_event_complete(ctx, uwq);
+-			}
+-			spin_unlock(&ctx->event_wqh.lock);
++		} else {
++			/*
++			 * Here the fork thread aborted and the
++			 * refcount from the fork thread on fork_nctx
++			 * has already been released. We still hold
++			 * the reference we took before releasing the
++			 * lock above. If resolve_userfault_fork
++			 * failed we've to drop it because the
++			 * fork_nctx has to be freed in such case. If
++			 * it succeeded we'll hold it because the new
++			 * uffd references it.
++			 */
++			if (ret)
++				userfaultfd_ctx_put(fork_nctx);
+ 		}
++		spin_unlock(&ctx->event_wqh.lock);
+ 	}
+ 
+ 	return ret;
+diff --git a/fs/xattr.c b/fs/xattr.c
+index 464c94bf65f9..5441a6d95396 100644
+--- a/fs/xattr.c
++++ b/fs/xattr.c
+@@ -249,7 +249,7 @@ xattr_getsecurity(struct inode *inode, const char *name, void *value,
+ 	}
+ 	memcpy(value, buffer, len);
+ out:
+-	security_release_secctx(buffer, len);
++	kfree(buffer);
+ out_noalloc:
+ 	return len;
+ }
+diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h
+index 0504ef8f3aa3..976f8ac26665 100644
+--- a/include/asm-generic/percpu.h
++++ b/include/asm-generic/percpu.h
+@@ -115,15 +115,35 @@ do {									\
+ 	(__ret);							\
+ })
+ 
+-#define this_cpu_generic_read(pcp)					\
++#define __this_cpu_generic_read_nopreempt(pcp)				\
+ ({									\
+ 	typeof(pcp) __ret;						\
+ 	preempt_disable_notrace();					\
+-	__ret = raw_cpu_generic_read(pcp);				\
++	__ret = READ_ONCE(*raw_cpu_ptr(&(pcp)));			\
+ 	preempt_enable_notrace();					\
+ 	__ret;								\
+ })
+ 
++#define __this_cpu_generic_read_noirq(pcp)				\
++({									\
++	typeof(pcp) __ret;						\
++	unsigned long __flags;						\
++	raw_local_irq_save(__flags);					\
++	__ret = raw_cpu_generic_read(pcp);				\
++	raw_local_irq_restore(__flags);					\
++	__ret;								\
++})
++
++#define this_cpu_generic_read(pcp)					\
++({									\
++	typeof(pcp) __ret;						\
++	if (__native_word(pcp))						\
++		__ret = __this_cpu_generic_read_nopreempt(pcp);		\
++	else								\
++		__ret = __this_cpu_generic_read_noirq(pcp);		\
++	__ret;								\
++})
++
+ #define this_cpu_generic_to_op(pcp, val, op)				\
+ do {									\
+ 	unsigned long __flags;						\
+diff --git a/include/linux/iio/adc/ad_sigma_delta.h b/include/linux/iio/adc/ad_sigma_delta.h
+index 5ba430cc9a87..1fc7abd28b0b 100644
+--- a/include/linux/iio/adc/ad_sigma_delta.h
++++ b/include/linux/iio/adc/ad_sigma_delta.h
+@@ -111,6 +111,9 @@ int ad_sd_write_reg(struct ad_sigma_delta *sigma_delta, unsigned int reg,
+ int ad_sd_read_reg(struct ad_sigma_delta *sigma_delta, unsigned int reg,
+ 	unsigned int size, unsigned int *val);
+ 
++int ad_sd_reset(struct ad_sigma_delta *sigma_delta,
++	unsigned int reset_length);
++
+ int ad_sigma_delta_single_conversion(struct iio_dev *indio_dev,
+ 	const struct iio_chan_spec *chan, int *val);
+ int ad_sd_calibrate_all(struct ad_sigma_delta *sigma_delta,
+diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
+index 7b2e31b1745a..6866e8126982 100644
+--- a/include/linux/mmu_notifier.h
++++ b/include/linux/mmu_notifier.h
+@@ -400,6 +400,11 @@ extern void mmu_notifier_synchronize(void);
+ 
+ #else /* CONFIG_MMU_NOTIFIER */
+ 
++static inline int mm_has_notifiers(struct mm_struct *mm)
++{
++	return 0;
++}
++
+ static inline void mmu_notifier_release(struct mm_struct *mm)
+ {
+ }
+diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
+index 5012b524283d..60248d644b6f 100644
+--- a/include/linux/trace_events.h
++++ b/include/linux/trace_events.h
+@@ -277,6 +277,7 @@ struct trace_event_call {
+ 	int				perf_refcount;
+ 	struct hlist_head __percpu	*perf_events;
+ 	struct bpf_prog			*prog;
++	struct perf_event		*bpf_prog_owner;
+ 
+ 	int	(*perf_perm)(struct trace_event_call *,
+ 			     struct perf_event *);
+diff --git a/include/net/netlink.h b/include/net/netlink.h
+index ef8e6c3a80a6..4c72c7866da5 100644
+--- a/include/net/netlink.h
++++ b/include/net/netlink.h
+@@ -768,7 +768,10 @@ static inline int nla_parse_nested(struct nlattr *tb[], int maxtype,
+  */
+ static inline int nla_put_u8(struct sk_buff *skb, int attrtype, u8 value)
+ {
+-	return nla_put(skb, attrtype, sizeof(u8), &value);
++	/* temporary variables to work around GCC PR81715 with asan-stack=1 */
++	u8 tmp = value;
++
++	return nla_put(skb, attrtype, sizeof(u8), &tmp);
+ }
+ 
+ /**
+@@ -779,7 +782,9 @@ static inline int nla_put_u8(struct sk_buff *skb, int attrtype, u8 value)
+  */
+ static inline int nla_put_u16(struct sk_buff *skb, int attrtype, u16 value)
+ {
+-	return nla_put(skb, attrtype, sizeof(u16), &value);
++	u16 tmp = value;
++
++	return nla_put(skb, attrtype, sizeof(u16), &tmp);
+ }
+ 
+ /**
+@@ -790,7 +795,9 @@ static inline int nla_put_u16(struct sk_buff *skb, int attrtype, u16 value)
+  */
+ static inline int nla_put_be16(struct sk_buff *skb, int attrtype, __be16 value)
+ {
+-	return nla_put(skb, attrtype, sizeof(__be16), &value);
++	__be16 tmp = value;
++
++	return nla_put(skb, attrtype, sizeof(__be16), &tmp);
+ }
+ 
+ /**
+@@ -801,7 +808,9 @@ static inline int nla_put_be16(struct sk_buff *skb, int attrtype, __be16 value)
+  */
+ static inline int nla_put_net16(struct sk_buff *skb, int attrtype, __be16 value)
+ {
+-	return nla_put_be16(skb, attrtype | NLA_F_NET_BYTEORDER, value);
++	__be16 tmp = value;
++
++	return nla_put_be16(skb, attrtype | NLA_F_NET_BYTEORDER, tmp);
+ }
+ 
+ /**
+@@ -812,7 +821,9 @@ static inline int nla_put_net16(struct sk_buff *skb, int attrtype, __be16 value)
+  */
+ static inline int nla_put_le16(struct sk_buff *skb, int attrtype, __le16 value)
+ {
+-	return nla_put(skb, attrtype, sizeof(__le16), &value);
++	__le16 tmp = value;
++
++	return nla_put(skb, attrtype, sizeof(__le16), &tmp);
+ }
+ 
+ /**
+@@ -823,7 +834,9 @@ static inline int nla_put_le16(struct sk_buff *skb, int attrtype, __le16 value)
+  */
+ static inline int nla_put_u32(struct sk_buff *skb, int attrtype, u32 value)
+ {
+-	return nla_put(skb, attrtype, sizeof(u32), &value);
++	u32 tmp = value;
++
++	return nla_put(skb, attrtype, sizeof(u32), &tmp);
+ }
+ 
+ /**
+@@ -834,7 +847,9 @@ static inline int nla_put_u32(struct sk_buff *skb, int attrtype, u32 value)
+  */
+ static inline int nla_put_be32(struct sk_buff *skb, int attrtype, __be32 value)
+ {
+-	return nla_put(skb, attrtype, sizeof(__be32), &value);
++	__be32 tmp = value;
++
++	return nla_put(skb, attrtype, sizeof(__be32), &tmp);
+ }
+ 
+ /**
+@@ -845,7 +860,9 @@ static inline int nla_put_be32(struct sk_buff *skb, int attrtype, __be32 value)
+  */
+ static inline int nla_put_net32(struct sk_buff *skb, int attrtype, __be32 value)
+ {
+-	return nla_put_be32(skb, attrtype | NLA_F_NET_BYTEORDER, value);
++	__be32 tmp = value;
++
++	return nla_put_be32(skb, attrtype | NLA_F_NET_BYTEORDER, tmp);
+ }
+ 
+ /**
+@@ -856,7 +873,9 @@ static inline int nla_put_net32(struct sk_buff *skb, int attrtype, __be32 value)
+  */
+ static inline int nla_put_le32(struct sk_buff *skb, int attrtype, __le32 value)
+ {
+-	return nla_put(skb, attrtype, sizeof(__le32), &value);
++	__le32 tmp = value;
++
++	return nla_put(skb, attrtype, sizeof(__le32), &tmp);
+ }
+ 
+ /**
+@@ -869,7 +888,9 @@ static inline int nla_put_le32(struct sk_buff *skb, int attrtype, __le32 value)
+ static inline int nla_put_u64_64bit(struct sk_buff *skb, int attrtype,
+ 				    u64 value, int padattr)
+ {
+-	return nla_put_64bit(skb, attrtype, sizeof(u64), &value, padattr);
++	u64 tmp = value;
++
++	return nla_put_64bit(skb, attrtype, sizeof(u64), &tmp, padattr);
+ }
+ 
+ /**
+@@ -882,7 +903,9 @@ static inline int nla_put_u64_64bit(struct sk_buff *skb, int attrtype,
+ static inline int nla_put_be64(struct sk_buff *skb, int attrtype, __be64 value,
+ 			       int padattr)
+ {
+-	return nla_put_64bit(skb, attrtype, sizeof(__be64), &value, padattr);
++	__be64 tmp = value;
++
++	return nla_put_64bit(skb, attrtype, sizeof(__be64), &tmp, padattr);
+ }
+ 
+ /**
+@@ -895,7 +918,9 @@ static inline int nla_put_be64(struct sk_buff *skb, int attrtype, __be64 value,
+ static inline int nla_put_net64(struct sk_buff *skb, int attrtype, __be64 value,
+ 				int padattr)
+ {
+-	return nla_put_be64(skb, attrtype | NLA_F_NET_BYTEORDER, value,
++	__be64 tmp = value;
++
++	return nla_put_be64(skb, attrtype | NLA_F_NET_BYTEORDER, tmp,
+ 			    padattr);
+ }
+ 
+@@ -909,7 +934,9 @@ static inline int nla_put_net64(struct sk_buff *skb, int attrtype, __be64 value,
+ static inline int nla_put_le64(struct sk_buff *skb, int attrtype, __le64 value,
+ 			       int padattr)
+ {
+-	return nla_put_64bit(skb, attrtype, sizeof(__le64), &value, padattr);
++	__le64 tmp = value;
++
++	return nla_put_64bit(skb, attrtype, sizeof(__le64), &tmp, padattr);
+ }
+ 
+ /**
+@@ -920,7 +947,9 @@ static inline int nla_put_le64(struct sk_buff *skb, int attrtype, __le64 value,
+  */
+ static inline int nla_put_s8(struct sk_buff *skb, int attrtype, s8 value)
+ {
+-	return nla_put(skb, attrtype, sizeof(s8), &value);
++	s8 tmp = value;
++
++	return nla_put(skb, attrtype, sizeof(s8), &tmp);
+ }
+ 
+ /**
+@@ -931,7 +960,9 @@ static inline int nla_put_s8(struct sk_buff *skb, int attrtype, s8 value)
+  */
+ static inline int nla_put_s16(struct sk_buff *skb, int attrtype, s16 value)
+ {
+-	return nla_put(skb, attrtype, sizeof(s16), &value);
++	s16 tmp = value;
++
++	return nla_put(skb, attrtype, sizeof(s16), &tmp);
+ }
+ 
+ /**
+@@ -942,7 +973,9 @@ static inline int nla_put_s16(struct sk_buff *skb, int attrtype, s16 value)
+  */
+ static inline int nla_put_s32(struct sk_buff *skb, int attrtype, s32 value)
+ {
+-	return nla_put(skb, attrtype, sizeof(s32), &value);
++	s32 tmp = value;
++
++	return nla_put(skb, attrtype, sizeof(s32), &tmp);
+ }
+ 
+ /**
+@@ -955,7 +988,9 @@ static inline int nla_put_s32(struct sk_buff *skb, int attrtype, s32 value)
+ static inline int nla_put_s64(struct sk_buff *skb, int attrtype, s64 value,
+ 			      int padattr)
+ {
+-	return nla_put_64bit(skb, attrtype, sizeof(s64), &value, padattr);
++	s64 tmp = value;
++
++	return nla_put_64bit(skb, attrtype, sizeof(s64), &tmp, padattr);
+ }
+ 
+ /**
+@@ -1005,7 +1040,9 @@ static inline int nla_put_msecs(struct sk_buff *skb, int attrtype,
+ static inline int nla_put_in_addr(struct sk_buff *skb, int attrtype,
+ 				  __be32 addr)
+ {
+-	return nla_put_be32(skb, attrtype, addr);
++	__be32 tmp = addr;
++
++	return nla_put_be32(skb, attrtype, tmp);
+ }
+ 
+ /**
+diff --git a/include/net/protocol.h b/include/net/protocol.h
+index 65ba335b0e7e..4fc75f7ae23b 100644
+--- a/include/net/protocol.h
++++ b/include/net/protocol.h
+@@ -39,8 +39,8 @@
+ 
+ /* This is used to register protocols. */
+ struct net_protocol {
+-	void			(*early_demux)(struct sk_buff *skb);
+-	void                    (*early_demux_handler)(struct sk_buff *skb);
++	int			(*early_demux)(struct sk_buff *skb);
++	int			(*early_demux_handler)(struct sk_buff *skb);
+ 	int			(*handler)(struct sk_buff *skb);
+ 	void			(*err_handler)(struct sk_buff *skb, u32 info);
+ 	unsigned int		no_policy:1,
+diff --git a/include/net/route.h b/include/net/route.h
+index cb0a76d9dde1..58458966e31e 100644
+--- a/include/net/route.h
++++ b/include/net/route.h
+@@ -175,7 +175,9 @@ static inline struct rtable *ip_route_output_gre(struct net *net, struct flowi4
+ 	fl4->fl4_gre_key = gre_key;
+ 	return ip_route_output_key(net, fl4);
+ }
+-
++int ip_mc_validate_source(struct sk_buff *skb, __be32 daddr, __be32 saddr,
++			  u8 tos, struct net_device *dev,
++			  struct in_device *in_dev, u32 *itag);
+ int ip_route_input_noref(struct sk_buff *skb, __be32 dst, __be32 src,
+ 			 u8 tos, struct net_device *devin);
+ int ip_route_input_rcu(struct sk_buff *skb, __be32 dst, __be32 src,
+diff --git a/include/net/sctp/ulpevent.h b/include/net/sctp/ulpevent.h
+index 1060494ac230..b8c86ec1a8f5 100644
+--- a/include/net/sctp/ulpevent.h
++++ b/include/net/sctp/ulpevent.h
+@@ -153,8 +153,12 @@ __u16 sctp_ulpevent_get_notification_type(const struct sctp_ulpevent *event);
+ static inline int sctp_ulpevent_type_enabled(__u16 sn_type,
+ 					     struct sctp_event_subscribe *mask)
+ {
++	int offset = sn_type - SCTP_SN_TYPE_BASE;
+ 	char *amask = (char *) mask;
+-	return amask[sn_type - SCTP_SN_TYPE_BASE];
++
++	if (offset >= sizeof(struct sctp_event_subscribe))
++		return 0;
++	return amask[offset];
+ }
+ 
+ /* Given an event subscription, is this event enabled? */
+diff --git a/include/net/tcp.h b/include/net/tcp.h
+index f642a39f9eee..48978125947b 100644
+--- a/include/net/tcp.h
++++ b/include/net/tcp.h
+@@ -347,7 +347,7 @@ void tcp_v4_err(struct sk_buff *skb, u32);
+ 
+ void tcp_shutdown(struct sock *sk, int how);
+ 
+-void tcp_v4_early_demux(struct sk_buff *skb);
++int tcp_v4_early_demux(struct sk_buff *skb);
+ int tcp_v4_rcv(struct sk_buff *skb);
+ 
+ int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw);
+diff --git a/include/net/udp.h b/include/net/udp.h
+index 626c2d8a70c5..1e6b2476d427 100644
+--- a/include/net/udp.h
++++ b/include/net/udp.h
+@@ -259,7 +259,7 @@ static inline struct sk_buff *skb_recv_udp(struct sock *sk, unsigned int flags,
+ 	return __skb_recv_udp(sk, flags, noblock, &peeked, &off, err);
+ }
+ 
+-void udp_v4_early_demux(struct sk_buff *skb);
++int udp_v4_early_demux(struct sk_buff *skb);
+ bool udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst);
+ int udp_get_port(struct sock *sk, unsigned short snum,
+ 		 int (*saddr_cmp)(const struct sock *,
+diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
+index 0979a5f3b69a..6ff1bab23679 100644
+--- a/include/scsi/scsi_device.h
++++ b/include/scsi/scsi_device.h
+@@ -182,6 +182,7 @@ struct scsi_device {
+ 	unsigned no_dif:1;	/* T10 PI (DIF) should be disabled */
+ 	unsigned broken_fua:1;		/* Don't set FUA bit */
+ 	unsigned lun_in_cdb:1;		/* Store LUN bits in CDB[1] */
++	unsigned unmap_limit_for_ws:1;	/* Use the UNMAP limit for WRITE SAME */
+ 
+ 	atomic_t disk_events_disable_depth; /* disable depth for disk events */
+ 
+diff --git a/include/scsi/scsi_devinfo.h b/include/scsi/scsi_devinfo.h
+index 9592570e092a..36b03013d629 100644
+--- a/include/scsi/scsi_devinfo.h
++++ b/include/scsi/scsi_devinfo.h
+@@ -29,5 +29,6 @@
+ #define BLIST_TRY_VPD_PAGES	0x10000000 /* Attempt to read VPD pages */
+ #define BLIST_NO_RSOC		0x20000000 /* don't try to issue RSOC */
+ #define BLIST_MAX_1024		0x40000000 /* maximum 1024 sector cdb length */
++#define BLIST_UNMAP_LIMIT_WS	0x80000000 /* Use UNMAP limit for WRITE SAME */
+ 
+ #endif
+diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
+index e99e3e6f8b37..f0add86219f0 100644
+--- a/include/uapi/linux/bpf.h
++++ b/include/uapi/linux/bpf.h
+@@ -294,7 +294,7 @@ union bpf_attr {
+  *     jump into another BPF program
+  *     @ctx: context pointer passed to next program
+  *     @prog_array_map: pointer to map which type is BPF_MAP_TYPE_PROG_ARRAY
+- *     @index: index inside array that selects specific program to run
++ *     @index: 32-bit index inside array that selects specific program to run
+  *     Return: 0 on success or negative error
+  *
+  * int bpf_clone_redirect(skb, ifindex, flags)
+diff --git a/include/uapi/linux/dm-ioctl.h b/include/uapi/linux/dm-ioctl.h
+index 412c06a624c8..ccaea525340b 100644
+--- a/include/uapi/linux/dm-ioctl.h
++++ b/include/uapi/linux/dm-ioctl.h
+@@ -269,9 +269,9 @@ enum {
+ #define DM_DEV_SET_GEOMETRY	_IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl)
+ 
+ #define DM_VERSION_MAJOR	4
+-#define DM_VERSION_MINOR	36
++#define DM_VERSION_MINOR	37
+ #define DM_VERSION_PATCHLEVEL	0
+-#define DM_VERSION_EXTRA	"-ioctl (2017-06-09)"
++#define DM_VERSION_EXTRA	"-ioctl (2017-09-20)"
+ 
+ /* Status bits */
+ #define DM_READONLY_FLAG	(1 << 0) /* In/Out */
+diff --git a/include/uapi/linux/usb/ch9.h b/include/uapi/linux/usb/ch9.h
+index ce1169af39d7..2a5d63040a0b 100644
+--- a/include/uapi/linux/usb/ch9.h
++++ b/include/uapi/linux/usb/ch9.h
+@@ -780,6 +780,7 @@ struct usb_interface_assoc_descriptor {
+ 	__u8  iFunction;
+ } __attribute__ ((packed));
+ 
++#define USB_DT_INTERFACE_ASSOCIATION_SIZE	8
+ 
+ /*-------------------------------------------------------------------------*/
+ 
+diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
+index ad5f55922a13..9a1bed1f3029 100644
+--- a/kernel/bpf/core.c
++++ b/kernel/bpf/core.c
+@@ -1010,7 +1010,7 @@ static unsigned int ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn,
+ 		struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2;
+ 		struct bpf_array *array = container_of(map, struct bpf_array, map);
+ 		struct bpf_prog *prog;
+-		u64 index = BPF_R3;
++		u32 index = BPF_R3;
+ 
+ 		if (unlikely(index >= array->map.max_entries))
+ 			goto out;
+diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
+index 6c772adabad2..1939d91da1f8 100644
+--- a/kernel/bpf/syscall.c
++++ b/kernel/bpf/syscall.c
+@@ -144,15 +144,17 @@ static int bpf_map_alloc_id(struct bpf_map *map)
+ 
+ static void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock)
+ {
++	unsigned long flags;
++
+ 	if (do_idr_lock)
+-		spin_lock_bh(&map_idr_lock);
++		spin_lock_irqsave(&map_idr_lock, flags);
+ 	else
+ 		__acquire(&map_idr_lock);
+ 
+ 	idr_remove(&map_idr, map->id);
+ 
+ 	if (do_idr_lock)
+-		spin_unlock_bh(&map_idr_lock);
++		spin_unlock_irqrestore(&map_idr_lock, flags);
+ 	else
+ 		__release(&map_idr_lock);
+ }
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index 664d93972373..3940019b9740 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -1978,7 +1978,8 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
+ 			}
+ 		} else {
+ 			if (insn->src_reg != BPF_REG_0 || insn->off != 0 ||
+-			    (insn->imm != 16 && insn->imm != 32 && insn->imm != 64)) {
++			    (insn->imm != 16 && insn->imm != 32 && insn->imm != 64) ||
++			    BPF_CLASS(insn->code) == BPF_ALU64) {
+ 				verbose("BPF_END uses reserved fields\n");
+ 				return -EINVAL;
+ 			}
+diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
+index df2e0f14a95d..6d60aafbe8c1 100644
+--- a/kernel/cgroup/cgroup.c
++++ b/kernel/cgroup/cgroup.c
+@@ -2168,6 +2168,14 @@ static int cgroup_migrate_execute(struct cgroup_mgctx *mgctx)
+ 		list_del_init(&cset->mg_node);
+ 	}
+ 	spin_unlock_irq(&css_set_lock);
++
++	/*
++	 * Re-initialize the cgroup_taskset structure in case it is reused
++	 * again in another cgroup_migrate_add_task()/cgroup_migrate_execute()
++	 * iteration.
++	 */
++	tset->nr_tasks = 0;
++	tset->csets    = &tset->src_csets;
+ 	return ret;
+ }
+ 
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index 03ac9c8b02fb..7242a6e1ec76 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -8121,6 +8121,7 @@ static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd)
+ 		}
+ 	}
+ 	event->tp_event->prog = prog;
++	event->tp_event->bpf_prog_owner = event;
+ 
+ 	return 0;
+ }
+@@ -8135,7 +8136,7 @@ static void perf_event_free_bpf_prog(struct perf_event *event)
+ 		return;
+ 
+ 	prog = event->tp_event->prog;
+-	if (prog) {
++	if (prog && event->tp_event->bpf_prog_owner == event) {
+ 		event->tp_event->prog = NULL;
+ 		bpf_prog_put(prog);
+ 	}
+diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
+index 725819569fa7..2ee3e3345ff3 100644
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -4954,9 +4954,6 @@ static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata;
+ static char ftrace_graph_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
+ static int ftrace_graph_set_hash(struct ftrace_hash *hash, char *buffer);
+ 
+-static unsigned long save_global_trampoline;
+-static unsigned long save_global_flags;
+-
+ static int __init set_graph_function(char *str)
+ {
+ 	strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE);
+@@ -6756,17 +6753,6 @@ void unregister_ftrace_graph(void)
+ 	unregister_pm_notifier(&ftrace_suspend_notifier);
+ 	unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
+ 
+-#ifdef CONFIG_DYNAMIC_FTRACE
+-	/*
+-	 * Function graph does not allocate the trampoline, but
+-	 * other global_ops do. We need to reset the ALLOC_TRAMP flag
+-	 * if one was used.
+-	 */
+-	global_ops.trampoline = save_global_trampoline;
+-	if (save_global_flags & FTRACE_OPS_FL_ALLOC_TRAMP)
+-		global_ops.flags |= FTRACE_OPS_FL_ALLOC_TRAMP;
+-#endif
+-
+  out:
+ 	mutex_unlock(&ftrace_lock);
+ }
+diff --git a/lib/ratelimit.c b/lib/ratelimit.c
+index 08f8043cac61..d01f47135239 100644
+--- a/lib/ratelimit.c
++++ b/lib/ratelimit.c
+@@ -48,7 +48,9 @@ int ___ratelimit(struct ratelimit_state *rs, const char *func)
+ 	if (time_is_before_jiffies(rs->begin + rs->interval)) {
+ 		if (rs->missed) {
+ 			if (!(rs->flags & RATELIMIT_MSG_ON_RELEASE)) {
+-				pr_warn("%s: %d callbacks suppressed\n", func, rs->missed);
++				printk_deferred(KERN_WARNING
++						"%s: %d callbacks suppressed\n",
++						func, rs->missed);
+ 				rs->missed = 0;
+ 			}
+ 		}
+diff --git a/mm/ksm.c b/mm/ksm.c
+index db20f8436bc3..86f0db3d6cdb 100644
+--- a/mm/ksm.c
++++ b/mm/ksm.c
+@@ -1990,6 +1990,7 @@ static void stable_tree_append(struct rmap_item *rmap_item,
+  */
+ static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item)
+ {
++	struct mm_struct *mm = rmap_item->mm;
+ 	struct rmap_item *tree_rmap_item;
+ 	struct page *tree_page = NULL;
+ 	struct stable_node *stable_node;
+@@ -2062,9 +2063,11 @@ static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item)
+ 	if (ksm_use_zero_pages && (checksum == zero_checksum)) {
+ 		struct vm_area_struct *vma;
+ 
+-		vma = find_mergeable_vma(rmap_item->mm, rmap_item->address);
++		down_read(&mm->mmap_sem);
++		vma = find_mergeable_vma(mm, rmap_item->address);
+ 		err = try_to_merge_one_page(vma, page,
+ 					    ZERO_PAGE(rmap_item->address));
++		up_read(&mm->mmap_sem);
+ 		/*
+ 		 * In case of failure, the page was not really empty, so we
+ 		 * need to continue. Otherwise we're done.
+diff --git a/mm/madvise.c b/mm/madvise.c
+index 4d7d1e5ddba9..4edca1d86339 100644
+--- a/mm/madvise.c
++++ b/mm/madvise.c
+@@ -614,18 +614,26 @@ static int madvise_inject_error(int behavior,
+ {
+ 	struct page *page;
+ 	struct zone *zone;
++	unsigned int order;
+ 
+ 	if (!capable(CAP_SYS_ADMIN))
+ 		return -EPERM;
+ 
+-	for (; start < end; start += PAGE_SIZE <<
+-				compound_order(compound_head(page))) {
++
++	for (; start < end; start += PAGE_SIZE << order) {
+ 		int ret;
+ 
+ 		ret = get_user_pages_fast(start, 1, 0, &page);
+ 		if (ret != 1)
+ 			return ret;
+ 
++		/*
++		 * When soft offlining hugepages, after migrating the page
++		 * we dissolve it, therefore in the second loop "page" will
++		 * no longer be a compound page, and order will be 0.
++		 */
++		order = compound_order(compound_head(page));
++
+ 		if (PageHWPoison(page)) {
+ 			put_page(page);
+ 			continue;
+diff --git a/mm/oom_kill.c b/mm/oom_kill.c
+index 9e8b4f030c1c..5f6a52903770 100644
+--- a/mm/oom_kill.c
++++ b/mm/oom_kill.c
+@@ -40,6 +40,7 @@
+ #include <linux/ratelimit.h>
+ #include <linux/kthread.h>
+ #include <linux/init.h>
++#include <linux/mmu_notifier.h>
+ 
+ #include <asm/tlb.h>
+ #include "internal.h"
+@@ -494,6 +495,21 @@ static bool __oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm)
+ 		goto unlock_oom;
+ 	}
+ 
++	/*
++	 * If the mm has notifiers then we would need to invalidate them around
++	 * unmap_page_range and that is risky because notifiers can sleep and
++	 * what they do is basically undeterministic.  So let's have a short
++	 * sleep to give the oom victim some more time.
++	 * TODO: we really want to get rid of this ugly hack and make sure that
++	 * notifiers cannot block for unbounded amount of time and add
++	 * mmu_notifier_invalidate_range_{start,end} around unmap_page_range
++	 */
++	if (mm_has_notifiers(mm)) {
++		up_read(&mm->mmap_sem);
++		schedule_timeout_idle(HZ);
++		goto unlock_oom;
++	}
++
+ 	/*
+ 	 * increase mm_users only after we know we will reap something so
+ 	 * that the mmput_async is called only when we have reaped something
+diff --git a/mm/rodata_test.c b/mm/rodata_test.c
+index 6bb4deb12e78..d908c8769b48 100644
+--- a/mm/rodata_test.c
++++ b/mm/rodata_test.c
+@@ -14,7 +14,7 @@
+ #include <linux/uaccess.h>
+ #include <asm/sections.h>
+ 
+-const int rodata_test_data = 0xC3;
++static const int rodata_test_data = 0xC3;
+ 
+ void rodata_test(void)
+ {
+diff --git a/mm/swap.c b/mm/swap.c
+index 60b1d2a75852..ea84f04d75a4 100644
+--- a/mm/swap.c
++++ b/mm/swap.c
+@@ -575,7 +575,7 @@ static void lru_lazyfree_fn(struct page *page, struct lruvec *lruvec,
+ 			    void *arg)
+ {
+ 	if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) &&
+-	    !PageUnevictable(page)) {
++	    !PageSwapCache(page) && !PageUnevictable(page)) {
+ 		bool active = PageActive(page);
+ 
+ 		del_page_from_lru_list(page, lruvec,
+@@ -665,7 +665,7 @@ void deactivate_file_page(struct page *page)
+ void mark_page_lazyfree(struct page *page)
+ {
+ 	if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) &&
+-	    !PageUnevictable(page)) {
++	    !PageSwapCache(page) && !PageUnevictable(page)) {
+ 		struct pagevec *pvec = &get_cpu_var(lru_lazyfree_pvecs);
+ 
+ 		get_page(page);
+diff --git a/mm/swap_state.c b/mm/swap_state.c
+index b68c93014f50..fe9309ba948c 100644
+--- a/mm/swap_state.c
++++ b/mm/swap_state.c
+@@ -219,6 +219,17 @@ int add_to_swap(struct page *page)
+ 		 * clear SWAP_HAS_CACHE flag.
+ 		 */
+ 		goto fail;
++	/*
++	 * Normally the page will be dirtied in unmap because its pte should be
++	 * dirty. A special case is MADV_FREE page. The page'e pte could have
++	 * dirty bit cleared but the page's SwapBacked bit is still set because
++	 * clearing the dirty bit and SwapBacked bit has no lock protected. For
++	 * such page, unmap will not set dirty bit for it, so page reclaim will
++	 * not write the page out. This can cause data corruption when the page
++	 * is swap in later. Always setting the dirty bit for the page solves
++	 * the problem.
++	 */
++	set_page_dirty(page);
+ 
+ 	return 1;
+ 
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 86b4b0a79e7a..6fa30a4c60ef 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -4408,6 +4408,7 @@ static u32 netif_receive_generic_xdp(struct sk_buff *skb,
+ 		__skb_pull(skb, off);
+ 	else if (off < 0)
+ 		__skb_push(skb, -off);
++	skb->mac_header += off;
+ 
+ 	switch (act) {
+ 	case XDP_TX:
+diff --git a/net/core/filter.c b/net/core/filter.c
+index 169974998c76..18d591f1ae5a 100644
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -975,10 +975,14 @@ static bool __sk_filter_charge(struct sock *sk, struct sk_filter *fp)
+ 
+ bool sk_filter_charge(struct sock *sk, struct sk_filter *fp)
+ {
+-	bool ret = __sk_filter_charge(sk, fp);
+-	if (ret)
+-		refcount_inc(&fp->refcnt);
+-	return ret;
++	if (!refcount_inc_not_zero(&fp->refcnt))
++		return false;
++
++	if (!__sk_filter_charge(sk, fp)) {
++		sk_filter_release(fp);
++		return false;
++	}
++	return true;
+ }
+ 
+ static struct bpf_prog *bpf_migrate_filter(struct bpf_prog *fp)
+diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c
+index 0385dece1f6f..7c1ffd6f9501 100644
+--- a/net/core/gen_estimator.c
++++ b/net/core/gen_estimator.c
+@@ -83,10 +83,10 @@ static void est_timer(unsigned long arg)
+ 	u64 rate, brate;
+ 
+ 	est_fetch_counters(est, &b);
+-	brate = (b.bytes - est->last_bytes) << (8 - est->ewma_log);
++	brate = (b.bytes - est->last_bytes) << (10 - est->ewma_log - est->intvl_log);
+ 	brate -= (est->avbps >> est->ewma_log);
+ 
+-	rate = (u64)(b.packets - est->last_packets) << (8 - est->ewma_log);
++	rate = (u64)(b.packets - est->last_packets) << (10 - est->ewma_log - est->intvl_log);
+ 	rate -= (est->avpps >> est->ewma_log);
+ 
+ 	write_seqcount_begin(&est->seq);
+diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
+index 9201e3621351..e07c8847c6cf 100644
+--- a/net/core/rtnetlink.c
++++ b/net/core/rtnetlink.c
+@@ -3867,6 +3867,9 @@ static int rtnl_fill_statsinfo(struct sk_buff *skb, struct net_device *dev,
+ 		return -EMSGSIZE;
+ 
+ 	ifsm = nlmsg_data(nlh);
++	ifsm->family = PF_UNSPEC;
++	ifsm->pad1 = 0;
++	ifsm->pad2 = 0;
+ 	ifsm->ifindex = dev->ifindex;
+ 	ifsm->filter_mask = filter_mask;
+ 
+diff --git a/net/core/sock.c b/net/core/sock.c
+index ac2a404c73eb..0967da925022 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -1646,6 +1646,8 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
+ 
+ 		sock_copy(newsk, sk);
+ 
++		newsk->sk_prot_creator = sk->sk_prot;
++
+ 		/* SANITY */
+ 		if (likely(newsk->sk_net_refcnt))
+ 			get_net(sock_net(newsk));
+@@ -1673,13 +1675,16 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
+ 
+ 		sock_reset_flag(newsk, SOCK_DONE);
+ 
+-		filter = rcu_dereference_protected(newsk->sk_filter, 1);
++		rcu_read_lock();
++		filter = rcu_dereference(sk->sk_filter);
+ 		if (filter != NULL)
+ 			/* though it's an empty new sock, the charging may fail
+ 			 * if sysctl_optmem_max was changed between creation of
+ 			 * original socket and cloning
+ 			 */
+ 			is_charged = sk_filter_charge(newsk, filter);
++		RCU_INIT_POINTER(newsk->sk_filter, filter);
++		rcu_read_unlock();
+ 
+ 		if (unlikely(!is_charged || xfrm_sk_clone_policy(newsk, sk))) {
+ 			/* We need to make sure that we don't uncharge the new
+diff --git a/net/dsa/slave.c b/net/dsa/slave.c
+index 9507bd38cf04..07677540129a 100644
+--- a/net/dsa/slave.c
++++ b/net/dsa/slave.c
+@@ -1180,26 +1180,32 @@ int dsa_slave_create(struct dsa_switch *ds, struct device *parent,
+ 	p->old_duplex = -1;
+ 
+ 	ds->ports[port].netdev = slave_dev;
+-	ret = register_netdev(slave_dev);
+-	if (ret) {
+-		netdev_err(master, "error %d registering interface %s\n",
+-			   ret, slave_dev->name);
+-		ds->ports[port].netdev = NULL;
+-		free_netdev(slave_dev);
+-		return ret;
+-	}
+ 
+ 	netif_carrier_off(slave_dev);
+ 
+ 	ret = dsa_slave_phy_setup(p, slave_dev);
+ 	if (ret) {
+ 		netdev_err(master, "error %d setting up slave phy\n", ret);
+-		unregister_netdev(slave_dev);
+-		free_netdev(slave_dev);
+-		return ret;
++		goto out_free;
++	}
++
++	ret = register_netdev(slave_dev);
++	if (ret) {
++		netdev_err(master, "error %d registering interface %s\n",
++			   ret, slave_dev->name);
++		goto out_phy;
+ 	}
+ 
+ 	return 0;
++
++out_phy:
++	phy_disconnect(p->phy);
++	if (of_phy_is_fixed_link(p->dp->dn))
++		of_phy_deregister_fixed_link(p->dp->dn);
++out_free:
++	free_netdev(slave_dev);
++	ds->ports[port].netdev = NULL;
++	return ret;
+ }
+ 
+ void dsa_slave_destroy(struct net_device *slave_dev)
+diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
+index fa2dc8f692c6..57fc13c6ab2b 100644
+--- a/net/ipv4/ip_input.c
++++ b/net/ipv4/ip_input.c
+@@ -311,9 +311,10 @@ static inline bool ip_rcv_options(struct sk_buff *skb)
+ static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
+ {
+ 	const struct iphdr *iph = ip_hdr(skb);
+-	struct rtable *rt;
++	int (*edemux)(struct sk_buff *skb);
+ 	struct net_device *dev = skb->dev;
+-	void (*edemux)(struct sk_buff *skb);
++	struct rtable *rt;
++	int err;
+ 
+ 	/* if ingress device is enslaved to an L3 master device pass the
+ 	 * skb to its handler for processing
+@@ -331,7 +332,9 @@ static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
+ 
+ 		ipprot = rcu_dereference(inet_protos[protocol]);
+ 		if (ipprot && (edemux = READ_ONCE(ipprot->early_demux))) {
+-			edemux(skb);
++			err = edemux(skb);
++			if (unlikely(err))
++				goto drop_error;
+ 			/* must reload iph, skb->head might have changed */
+ 			iph = ip_hdr(skb);
+ 		}
+@@ -342,13 +345,10 @@ static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
+ 	 *	how the packet travels inside Linux networking.
+ 	 */
+ 	if (!skb_valid_dst(skb)) {
+-		int err = ip_route_input_noref(skb, iph->daddr, iph->saddr,
+-					       iph->tos, dev);
+-		if (unlikely(err)) {
+-			if (err == -EXDEV)
+-				__NET_INC_STATS(net, LINUX_MIB_IPRPFILTER);
+-			goto drop;
+-		}
++		err = ip_route_input_noref(skb, iph->daddr, iph->saddr,
++					   iph->tos, dev);
++		if (unlikely(err))
++			goto drop_error;
+ 	}
+ 
+ #ifdef CONFIG_IP_ROUTE_CLASSID
+@@ -399,6 +399,11 @@ static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
+ drop:
+ 	kfree_skb(skb);
+ 	return NET_RX_DROP;
++
++drop_error:
++	if (err == -EXDEV)
++		__NET_INC_STATS(net, LINUX_MIB_IPRPFILTER);
++	goto drop;
+ }
+ 
+ /*
+diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
+index 0192c255e508..74bd46c5bda7 100644
+--- a/net/ipv4/ip_vti.c
++++ b/net/ipv4/ip_vti.c
+@@ -168,6 +168,7 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev,
+ 	struct ip_tunnel_parm *parms = &tunnel->parms;
+ 	struct dst_entry *dst = skb_dst(skb);
+ 	struct net_device *tdev;	/* Device to other host */
++	int pkt_len = skb->len;
+ 	int err;
+ 	int mtu;
+ 
+@@ -229,7 +230,7 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev,
+ 
+ 	err = dst_output(tunnel->net, skb->sk, skb);
+ 	if (net_xmit_eval(err) == 0)
+-		err = skb->len;
++		err = pkt_len;
+ 	iptunnel_xmit_stats(dev, err);
+ 	return NETDEV_TX_OK;
+ 
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c
+index 2331de20ca50..c5aa25be7108 100644
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -1520,43 +1520,56 @@ struct rtable *rt_dst_alloc(struct net_device *dev,
+ EXPORT_SYMBOL(rt_dst_alloc);
+ 
+ /* called in rcu_read_lock() section */
+-static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
+-				u8 tos, struct net_device *dev, int our)
++int ip_mc_validate_source(struct sk_buff *skb, __be32 daddr, __be32 saddr,
++			  u8 tos, struct net_device *dev,
++			  struct in_device *in_dev, u32 *itag)
+ {
+-	struct rtable *rth;
+-	struct in_device *in_dev = __in_dev_get_rcu(dev);
+-	unsigned int flags = RTCF_MULTICAST;
+-	u32 itag = 0;
+ 	int err;
+ 
+ 	/* Primary sanity checks. */
+-
+ 	if (!in_dev)
+ 		return -EINVAL;
+ 
+ 	if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) ||
+ 	    skb->protocol != htons(ETH_P_IP))
+-		goto e_inval;
++		return -EINVAL;
+ 
+ 	if (ipv4_is_loopback(saddr) && !IN_DEV_ROUTE_LOCALNET(in_dev))
+-		goto e_inval;
++		return -EINVAL;
+ 
+ 	if (ipv4_is_zeronet(saddr)) {
+ 		if (!ipv4_is_local_multicast(daddr))
+-			goto e_inval;
++			return -EINVAL;
+ 	} else {
+ 		err = fib_validate_source(skb, saddr, 0, tos, 0, dev,
+-					  in_dev, &itag);
++					  in_dev, itag);
+ 		if (err < 0)
+-			goto e_err;
++			return err;
+ 	}
++	return 0;
++}
++
++/* called in rcu_read_lock() section */
++static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
++			     u8 tos, struct net_device *dev, int our)
++{
++	struct in_device *in_dev = __in_dev_get_rcu(dev);
++	unsigned int flags = RTCF_MULTICAST;
++	struct rtable *rth;
++	u32 itag = 0;
++	int err;
++
++	err = ip_mc_validate_source(skb, daddr, saddr, tos, dev, in_dev, &itag);
++	if (err)
++		return err;
++
+ 	if (our)
+ 		flags |= RTCF_LOCAL;
+ 
+ 	rth = rt_dst_alloc(dev_net(dev)->loopback_dev, flags, RTN_MULTICAST,
+ 			   IN_DEV_CONF_GET(in_dev, NOPOLICY), false, false);
+ 	if (!rth)
+-		goto e_nobufs;
++		return -ENOBUFS;
+ 
+ #ifdef CONFIG_IP_ROUTE_CLASSID
+ 	rth->dst.tclassid = itag;
+@@ -1572,13 +1585,6 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
+ 
+ 	skb_dst_set(skb, &rth->dst);
+ 	return 0;
+-
+-e_nobufs:
+-	return -ENOBUFS;
+-e_inval:
+-	return -EINVAL;
+-e_err:
+-	return err;
+ }
+ 
+ 
+diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
+index 21022db7a2a6..b1441bc8192f 100644
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -1504,23 +1504,23 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
+ }
+ EXPORT_SYMBOL(tcp_v4_do_rcv);
+ 
+-void tcp_v4_early_demux(struct sk_buff *skb)
++int tcp_v4_early_demux(struct sk_buff *skb)
+ {
+ 	const struct iphdr *iph;
+ 	const struct tcphdr *th;
+ 	struct sock *sk;
+ 
+ 	if (skb->pkt_type != PACKET_HOST)
+-		return;
++		return 0;
+ 
+ 	if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
+-		return;
++		return 0;
+ 
+ 	iph = ip_hdr(skb);
+ 	th = tcp_hdr(skb);
+ 
+ 	if (th->doff < sizeof(struct tcphdr) / 4)
+-		return;
++		return 0;
+ 
+ 	sk = __inet_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
+ 				       iph->saddr, th->source,
+@@ -1539,6 +1539,7 @@ void tcp_v4_early_demux(struct sk_buff *skb)
+ 				skb_dst_set_noref(skb, dst);
+ 		}
+ 	}
++	return 0;
+ }
+ 
+ /* Packet is added to VJ-style prequeue for processing in process
+diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
+index b7661a68d498..40f7c8ee9ba6 100644
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -991,6 +991,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
+ 	struct tcp_skb_cb *tcb;
+ 	struct tcp_out_options opts;
+ 	unsigned int tcp_options_size, tcp_header_size;
++	struct sk_buff *oskb = NULL;
+ 	struct tcp_md5sig_key *md5;
+ 	struct tcphdr *th;
+ 	int err;
+@@ -998,12 +999,10 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
+ 	BUG_ON(!skb || !tcp_skb_pcount(skb));
+ 	tp = tcp_sk(sk);
+ 
+-	skb->skb_mstamp = tp->tcp_mstamp;
+ 	if (clone_it) {
+ 		TCP_SKB_CB(skb)->tx.in_flight = TCP_SKB_CB(skb)->end_seq
+ 			- tp->snd_una;
+-		tcp_rate_skb_sent(sk, skb);
+-
++		oskb = skb;
+ 		if (unlikely(skb_cloned(skb)))
+ 			skb = pskb_copy(skb, gfp_mask);
+ 		else
+@@ -1011,6 +1010,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
+ 		if (unlikely(!skb))
+ 			return -ENOBUFS;
+ 	}
++	skb->skb_mstamp = tp->tcp_mstamp;
+ 
+ 	inet = inet_sk(sk);
+ 	tcb = TCP_SKB_CB(skb);
+@@ -1122,12 +1122,15 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
+ 
+ 	err = icsk->icsk_af_ops->queue_xmit(sk, skb, &inet->cork.fl);
+ 
+-	if (likely(err <= 0))
+-		return err;
+-
+-	tcp_enter_cwr(sk);
+-
+-	return net_xmit_eval(err);
++	if (unlikely(err > 0)) {
++		tcp_enter_cwr(sk);
++		err = net_xmit_eval(err);
++	}
++	if (!err && oskb) {
++		oskb->skb_mstamp = tp->tcp_mstamp;
++		tcp_rate_skb_sent(sk, oskb);
++	}
++	return err;
+ }
+ 
+ /* This routine just queues the buffer for sending.
+@@ -2866,10 +2869,11 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
+ 		     skb_headroom(skb) >= 0xFFFF)) {
+ 		struct sk_buff *nskb;
+ 
+-		skb->skb_mstamp = tp->tcp_mstamp;
+ 		nskb = __pskb_copy(skb, MAX_TCP_HEADER, GFP_ATOMIC);
+ 		err = nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) :
+ 			     -ENOBUFS;
++		if (!err)
++			skb->skb_mstamp = tp->tcp_mstamp;
+ 	} else {
+ 		err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
+ 	}
+@@ -3416,6 +3420,10 @@ static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn)
+ 		goto done;
+ 	}
+ 
++	/* data was not sent, this is our new send_head */
++	sk->sk_send_head = syn_data;
++	tp->packets_out -= tcp_skb_pcount(syn_data);
++
+ fallback:
+ 	/* Send a regular SYN with Fast Open cookie request option */
+ 	if (fo->cookie.len > 0)
+@@ -3468,6 +3476,11 @@ int tcp_connect(struct sock *sk)
+ 	 */
+ 	tp->snd_nxt = tp->write_seq;
+ 	tp->pushed_seq = tp->write_seq;
++	buff = tcp_send_head(sk);
++	if (unlikely(buff)) {
++		tp->snd_nxt	= TCP_SKB_CB(buff)->seq;
++		tp->pushed_seq	= TCP_SKB_CB(buff)->seq;
++	}
+ 	TCP_INC_STATS(sock_net(sk), TCP_MIB_ACTIVEOPENS);
+ 
+ 	/* Timer for repeating the SYN until an answer. */
+diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
+index 979e4d8526ba..84861d71face 100644
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -2217,9 +2217,10 @@ static struct sock *__udp4_lib_demux_lookup(struct net *net,
+ 	return NULL;
+ }
+ 
+-void udp_v4_early_demux(struct sk_buff *skb)
++int udp_v4_early_demux(struct sk_buff *skb)
+ {
+ 	struct net *net = dev_net(skb->dev);
++	struct in_device *in_dev = NULL;
+ 	const struct iphdr *iph;
+ 	const struct udphdr *uh;
+ 	struct sock *sk = NULL;
+@@ -2229,25 +2230,21 @@ void udp_v4_early_demux(struct sk_buff *skb)
+ 
+ 	/* validate the packet */
+ 	if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct udphdr)))
+-		return;
++		return 0;
+ 
+ 	iph = ip_hdr(skb);
+ 	uh = udp_hdr(skb);
+ 
+-	if (skb->pkt_type == PACKET_BROADCAST ||
+-	    skb->pkt_type == PACKET_MULTICAST) {
+-		struct in_device *in_dev = __in_dev_get_rcu(skb->dev);
++	if (skb->pkt_type == PACKET_MULTICAST) {
++		in_dev = __in_dev_get_rcu(skb->dev);
+ 
+ 		if (!in_dev)
+-			return;
++			return 0;
+ 
+-		/* we are supposed to accept bcast packets */
+-		if (skb->pkt_type == PACKET_MULTICAST) {
+-			ours = ip_check_mc_rcu(in_dev, iph->daddr, iph->saddr,
+-					       iph->protocol);
+-			if (!ours)
+-				return;
+-		}
++		ours = ip_check_mc_rcu(in_dev, iph->daddr, iph->saddr,
++				       iph->protocol);
++		if (!ours)
++			return 0;
+ 
+ 		sk = __udp4_lib_mcast_demux_lookup(net, uh->dest, iph->daddr,
+ 						   uh->source, iph->saddr, dif);
+@@ -2257,7 +2254,7 @@ void udp_v4_early_demux(struct sk_buff *skb)
+ 	}
+ 
+ 	if (!sk || !refcount_inc_not_zero(&sk->sk_refcnt))
+-		return;
++		return 0;
+ 
+ 	skb->sk = sk;
+ 	skb->destructor = sock_efree;
+@@ -2266,12 +2263,23 @@ void udp_v4_early_demux(struct sk_buff *skb)
+ 	if (dst)
+ 		dst = dst_check(dst, 0);
+ 	if (dst) {
++		u32 itag = 0;
++
+ 		/* set noref for now.
+ 		 * any place which wants to hold dst has to call
+ 		 * dst_hold_safe()
+ 		 */
+ 		skb_dst_set_noref(skb, dst);
++
++		/* for unconnected multicast sockets we need to validate
++		 * the source on each packet
++		 */
++		if (!inet_sk(sk)->inet_daddr && in_dev)
++			return ip_mc_validate_source(skb, iph->daddr,
++						     iph->saddr, iph->tos,
++						     skb->dev, in_dev, &itag);
+ 	}
++	return 0;
+ }
+ 
+ int udp_rcv(struct sk_buff *skb)
+diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
+index 936e9ab4dda5..ba757c28a301 100644
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -4982,9 +4982,10 @@ static void inet6_ifa_notify(int event, struct inet6_ifaddr *ifa)
+ 
+ 	/* Don't send DELADDR notification for TENTATIVE address,
+ 	 * since NEWADDR notification is sent only after removing
+-	 * TENTATIVE flag.
++	 * TENTATIVE flag, if DAD has not failed.
+ 	 */
+-	if (ifa->flags & IFA_F_TENTATIVE && event == RTM_DELADDR)
++	if (ifa->flags & IFA_F_TENTATIVE && !(ifa->flags & IFA_F_DADFAILED) &&
++	    event == RTM_DELADDR)
+ 		return;
+ 
+ 	skb = nlmsg_new(inet6_ifaddr_msgsize(), GFP_ATOMIC);
+diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
+index b7a72d409334..1602b491b281 100644
+--- a/net/ipv6/ip6_gre.c
++++ b/net/ipv6/ip6_gre.c
+@@ -940,24 +940,25 @@ static int ip6gre_tunnel_ioctl(struct net_device *dev,
+ }
+ 
+ static int ip6gre_header(struct sk_buff *skb, struct net_device *dev,
+-			unsigned short type,
+-			const void *daddr, const void *saddr, unsigned int len)
++			 unsigned short type, const void *daddr,
++			 const void *saddr, unsigned int len)
+ {
+ 	struct ip6_tnl *t = netdev_priv(dev);
+-	struct ipv6hdr *ipv6h = skb_push(skb, t->hlen);
+-	__be16 *p = (__be16 *)(ipv6h+1);
++	struct ipv6hdr *ipv6h;
++	__be16 *p;
+ 
+-	ip6_flow_hdr(ipv6h, 0,
+-		     ip6_make_flowlabel(dev_net(dev), skb,
+-					t->fl.u.ip6.flowlabel, true,
+-					&t->fl.u.ip6));
++	ipv6h = skb_push(skb, t->hlen + sizeof(*ipv6h));
++	ip6_flow_hdr(ipv6h, 0, ip6_make_flowlabel(dev_net(dev), skb,
++						  t->fl.u.ip6.flowlabel,
++						  true, &t->fl.u.ip6));
+ 	ipv6h->hop_limit = t->parms.hop_limit;
+ 	ipv6h->nexthdr = NEXTHDR_GRE;
+ 	ipv6h->saddr = t->parms.laddr;
+ 	ipv6h->daddr = t->parms.raddr;
+ 
+-	p[0]		= t->parms.o_flags;
+-	p[1]		= htons(type);
++	p = (__be16 *)(ipv6h + 1);
++	p[0] = t->parms.o_flags;
++	p[1] = htons(type);
+ 
+ 	/*
+ 	 *	Set the source hardware address.
+@@ -1310,6 +1311,7 @@ static void ip6gre_tap_setup(struct net_device *dev)
+ 	dev->features |= NETIF_F_NETNS_LOCAL;
+ 	dev->priv_flags &= ~IFF_TX_SKB_SHARING;
+ 	dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
++	netif_keep_dst(dev);
+ }
+ 
+ static bool ip6gre_netlink_encap_parms(struct nlattr *data[],
+diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
+index 3a0ba2ae4b0f..4425b4411bb9 100644
+--- a/net/ipv6/ip6_tunnel.c
++++ b/net/ipv6/ip6_tunnel.c
+@@ -1043,6 +1043,7 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
+ 	struct dst_entry *dst = NULL, *ndst = NULL;
+ 	struct net_device *tdev;
+ 	int mtu;
++	unsigned int eth_hlen = t->dev->type == ARPHRD_ETHER ? ETH_HLEN : 0;
+ 	unsigned int psh_hlen = sizeof(struct ipv6hdr) + t->encap_hlen;
+ 	unsigned int max_headroom = psh_hlen;
+ 	bool use_cache = false;
+@@ -1124,7 +1125,7 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
+ 				     t->parms.name);
+ 		goto tx_err_dst_release;
+ 	}
+-	mtu = dst_mtu(dst) - psh_hlen - t->tun_hlen;
++	mtu = dst_mtu(dst) - eth_hlen - psh_hlen - t->tun_hlen;
+ 	if (encap_limit >= 0) {
+ 		max_headroom += 8;
+ 		mtu -= 8;
+@@ -1133,7 +1134,7 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
+ 		mtu = IPV6_MIN_MTU;
+ 	if (skb_dst(skb) && !t->parms.collect_md)
+ 		skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
+-	if (skb->len - t->tun_hlen > mtu && !skb_is_gso(skb)) {
++	if (skb->len - t->tun_hlen - eth_hlen > mtu && !skb_is_gso(skb)) {
+ 		*pmtu = mtu;
+ 		err = -EMSGSIZE;
+ 		goto tx_err_dst_release;
+@@ -2258,6 +2259,9 @@ static int __init ip6_tunnel_init(void)
+ {
+ 	int  err;
+ 
++	if (!ipv6_mod_enabled())
++		return -EOPNOTSUPP;
++
+ 	err = register_pernet_device(&ip6_tnl_net_ops);
+ 	if (err < 0)
+ 		goto out_pernet;
+diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
+index 486c2305f53c..e3e3ea655464 100644
+--- a/net/ipv6/ip6_vti.c
++++ b/net/ipv6/ip6_vti.c
+@@ -445,6 +445,7 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
+ 	struct dst_entry *dst = skb_dst(skb);
+ 	struct net_device *tdev;
+ 	struct xfrm_state *x;
++	int pkt_len = skb->len;
+ 	int err = -1;
+ 	int mtu;
+ 
+@@ -502,7 +503,7 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
+ 		struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
+ 
+ 		u64_stats_update_begin(&tstats->syncp);
+-		tstats->tx_bytes += skb->len;
++		tstats->tx_bytes += pkt_len;
+ 		tstats->tx_packets++;
+ 		u64_stats_update_end(&tstats->syncp);
+ 	} else {
+diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
+index d6886228e1d0..30b4d55e88f3 100644
+--- a/net/ipv6/udp.c
++++ b/net/ipv6/udp.c
+@@ -1011,6 +1011,7 @@ static void udp6_hwcsum_outgoing(struct sock *sk, struct sk_buff *skb,
+ 		 */
+ 		offset = skb_transport_offset(skb);
+ 		skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
++		csum = skb->csum;
+ 
+ 		skb->ip_summed = CHECKSUM_NONE;
+ 
+diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
+index 90165a6874bc..525c66f1121a 100644
+--- a/net/l2tp/l2tp_core.c
++++ b/net/l2tp/l2tp_core.c
+@@ -1665,14 +1665,12 @@ EXPORT_SYMBOL_GPL(l2tp_tunnel_create);
+ 
+ /* This function is used by the netlink TUNNEL_DELETE command.
+  */
+-int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel)
++void l2tp_tunnel_delete(struct l2tp_tunnel *tunnel)
+ {
+-	l2tp_tunnel_inc_refcount(tunnel);
+-	if (false == queue_work(l2tp_wq, &tunnel->del_work)) {
+-		l2tp_tunnel_dec_refcount(tunnel);
+-		return 1;
++	if (!test_and_set_bit(0, &tunnel->dead)) {
++		l2tp_tunnel_inc_refcount(tunnel);
++		queue_work(l2tp_wq, &tunnel->del_work);
+ 	}
+-	return 0;
+ }
+ EXPORT_SYMBOL_GPL(l2tp_tunnel_delete);
+ 
+diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
+index 9101297f27ad..7c5a51f62afc 100644
+--- a/net/l2tp/l2tp_core.h
++++ b/net/l2tp/l2tp_core.h
+@@ -160,6 +160,9 @@ struct l2tp_tunnel_cfg {
+ 
+ struct l2tp_tunnel {
+ 	int			magic;		/* Should be L2TP_TUNNEL_MAGIC */
++
++	unsigned long		dead;
++
+ 	struct rcu_head rcu;
+ 	rwlock_t		hlist_lock;	/* protect session_hlist */
+ 	struct hlist_head	session_hlist[L2TP_HASH_SIZE];
+@@ -248,7 +251,7 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id,
+ 		       u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg,
+ 		       struct l2tp_tunnel **tunnelp);
+ void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel);
+-int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel);
++void l2tp_tunnel_delete(struct l2tp_tunnel *tunnel);
+ struct l2tp_session *l2tp_session_create(int priv_size,
+ 					 struct l2tp_tunnel *tunnel,
+ 					 u32 session_id, u32 peer_session_id,
+diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c
+index 4de2ec94b08c..cf456720930c 100644
+--- a/net/l2tp/l2tp_eth.c
++++ b/net/l2tp/l2tp_eth.c
+@@ -44,7 +44,6 @@ struct l2tp_eth {
+ 	struct net_device	*dev;
+ 	struct sock		*tunnel_sock;
+ 	struct l2tp_session	*session;
+-	struct list_head	list;
+ 	atomic_long_t		tx_bytes;
+ 	atomic_long_t		tx_packets;
+ 	atomic_long_t		tx_dropped;
+@@ -58,17 +57,6 @@ struct l2tp_eth_sess {
+ 	struct net_device	*dev;
+ };
+ 
+-/* per-net private data for this module */
+-static unsigned int l2tp_eth_net_id;
+-struct l2tp_eth_net {
+-	struct list_head l2tp_eth_dev_list;
+-	spinlock_t l2tp_eth_lock;
+-};
+-
+-static inline struct l2tp_eth_net *l2tp_eth_pernet(struct net *net)
+-{
+-	return net_generic(net, l2tp_eth_net_id);
+-}
+ 
+ static int l2tp_eth_dev_init(struct net_device *dev)
+ {
+@@ -84,12 +72,6 @@ static int l2tp_eth_dev_init(struct net_device *dev)
+ 
+ static void l2tp_eth_dev_uninit(struct net_device *dev)
+ {
+-	struct l2tp_eth *priv = netdev_priv(dev);
+-	struct l2tp_eth_net *pn = l2tp_eth_pernet(dev_net(dev));
+-
+-	spin_lock(&pn->l2tp_eth_lock);
+-	list_del_init(&priv->list);
+-	spin_unlock(&pn->l2tp_eth_lock);
+ 	dev_put(dev);
+ }
+ 
+@@ -272,7 +254,6 @@ static int l2tp_eth_create(struct net *net, u32 tunnel_id, u32 session_id, u32 p
+ 	struct l2tp_eth *priv;
+ 	struct l2tp_eth_sess *spriv;
+ 	int rc;
+-	struct l2tp_eth_net *pn;
+ 
+ 	tunnel = l2tp_tunnel_find(net, tunnel_id);
+ 	if (!tunnel) {
+@@ -310,7 +291,6 @@ static int l2tp_eth_create(struct net *net, u32 tunnel_id, u32 session_id, u32 p
+ 	priv = netdev_priv(dev);
+ 	priv->dev = dev;
+ 	priv->session = session;
+-	INIT_LIST_HEAD(&priv->list);
+ 
+ 	priv->tunnel_sock = tunnel->sock;
+ 	session->recv_skb = l2tp_eth_dev_recv;
+@@ -331,10 +311,6 @@ static int l2tp_eth_create(struct net *net, u32 tunnel_id, u32 session_id, u32 p
+ 	strlcpy(session->ifname, dev->name, IFNAMSIZ);
+ 
+ 	dev_hold(dev);
+-	pn = l2tp_eth_pernet(dev_net(dev));
+-	spin_lock(&pn->l2tp_eth_lock);
+-	list_add(&priv->list, &pn->l2tp_eth_dev_list);
+-	spin_unlock(&pn->l2tp_eth_lock);
+ 
+ 	return 0;
+ 
+@@ -347,22 +323,6 @@ static int l2tp_eth_create(struct net *net, u32 tunnel_id, u32 session_id, u32 p
+ 	return rc;
+ }
+ 
+-static __net_init int l2tp_eth_init_net(struct net *net)
+-{
+-	struct l2tp_eth_net *pn = net_generic(net, l2tp_eth_net_id);
+-
+-	INIT_LIST_HEAD(&pn->l2tp_eth_dev_list);
+-	spin_lock_init(&pn->l2tp_eth_lock);
+-
+-	return 0;
+-}
+-
+-static struct pernet_operations l2tp_eth_net_ops = {
+-	.init = l2tp_eth_init_net,
+-	.id   = &l2tp_eth_net_id,
+-	.size = sizeof(struct l2tp_eth_net),
+-};
+-
+ 
+ static const struct l2tp_nl_cmd_ops l2tp_eth_nl_cmd_ops = {
+ 	.session_create	= l2tp_eth_create,
+@@ -376,25 +336,18 @@ static int __init l2tp_eth_init(void)
+ 
+ 	err = l2tp_nl_register_ops(L2TP_PWTYPE_ETH, &l2tp_eth_nl_cmd_ops);
+ 	if (err)
+-		goto out;
+-
+-	err = register_pernet_device(&l2tp_eth_net_ops);
+-	if (err)
+-		goto out_unreg;
++		goto err;
+ 
+ 	pr_info("L2TP ethernet pseudowire support (L2TPv3)\n");
+ 
+ 	return 0;
+ 
+-out_unreg:
+-	l2tp_nl_unregister_ops(L2TP_PWTYPE_ETH);
+-out:
++err:
+ 	return err;
+ }
+ 
+ static void __exit l2tp_eth_exit(void)
+ {
+-	unregister_pernet_device(&l2tp_eth_net_ops);
+ 	l2tp_nl_unregister_ops(L2TP_PWTYPE_ETH);
+ }
+ 
+diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
+index 5acee49db90b..7e794ad50cb0 100644
+--- a/net/netlink/af_netlink.c
++++ b/net/netlink/af_netlink.c
+@@ -2262,10 +2262,13 @@ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
+ 
+ 	mutex_unlock(nlk->cb_mutex);
+ 
++	ret = 0;
+ 	if (cb->start)
+-		cb->start(cb);
++		ret = cb->start(cb);
++
++	if (!ret)
++		ret = netlink_dump(sk);
+ 
+-	ret = netlink_dump(sk);
+ 	sock_put(sk);
+ 
+ 	if (ret)
+diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
+index 6b44fe405282..294444bb075c 100644
+--- a/net/openvswitch/datapath.c
++++ b/net/openvswitch/datapath.c
+@@ -1126,7 +1126,8 @@ static int ovs_nla_init_match_and_action(struct net *net,
+ 		if (!a[OVS_FLOW_ATTR_KEY]) {
+ 			OVS_NLERR(log,
+ 				  "Flow key attribute not present in set flow.");
+-			return -EINVAL;
++			error = -EINVAL;
++			goto error;
+ 		}
+ 
+ 		*acts = get_flow_actions(net, a[OVS_FLOW_ATTR_ACTIONS], key,
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index 1c61af9af67d..29d7b7e5b128 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -1686,10 +1686,6 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
+ 
+ 	mutex_lock(&fanout_mutex);
+ 
+-	err = -EINVAL;
+-	if (!po->running)
+-		goto out;
+-
+ 	err = -EALREADY;
+ 	if (po->fanout)
+ 		goto out;
+@@ -1751,7 +1747,10 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
+ 		list_add(&match->list, &fanout_list);
+ 	}
+ 	err = -EINVAL;
+-	if (match->type == type &&
++
++	spin_lock(&po->bind_lock);
++	if (po->running &&
++	    match->type == type &&
+ 	    match->prot_hook.type == po->prot_hook.type &&
+ 	    match->prot_hook.dev == po->prot_hook.dev) {
+ 		err = -ENOSPC;
+@@ -1763,6 +1762,13 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
+ 			err = 0;
+ 		}
+ 	}
++	spin_unlock(&po->bind_lock);
++
++	if (err && !refcount_read(&match->sk_ref)) {
++		list_del(&match->list);
++		kfree(match);
++	}
++
+ out:
+ 	if (err && rollover) {
+ 		kfree(rollover);
+@@ -2836,6 +2842,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
+ 	struct virtio_net_hdr vnet_hdr = { 0 };
+ 	int offset = 0;
+ 	struct packet_sock *po = pkt_sk(sk);
++	bool has_vnet_hdr = false;
+ 	int hlen, tlen, linear;
+ 	int extra_len = 0;
+ 
+@@ -2879,6 +2886,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
+ 		err = packet_snd_vnet_parse(msg, &len, &vnet_hdr);
+ 		if (err)
+ 			goto out_unlock;
++		has_vnet_hdr = true;
+ 	}
+ 
+ 	if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
+@@ -2937,7 +2945,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
+ 	skb->priority = sk->sk_priority;
+ 	skb->mark = sockc.mark;
+ 
+-	if (po->has_vnet_hdr) {
++	if (has_vnet_hdr) {
+ 		err = virtio_net_hdr_to_skb(skb, &vnet_hdr, vio_le());
+ 		if (err)
+ 			goto out_free;
+@@ -3065,13 +3073,15 @@ static int packet_do_bind(struct sock *sk, const char *name, int ifindex,
+ 	int ret = 0;
+ 	bool unlisted = false;
+ 
+-	if (po->fanout)
+-		return -EINVAL;
+-
+ 	lock_sock(sk);
+ 	spin_lock(&po->bind_lock);
+ 	rcu_read_lock();
+ 
++	if (po->fanout) {
++		ret = -EINVAL;
++		goto out_unlock;
++	}
++
+ 	if (name) {
+ 		dev = dev_get_by_name_rcu(sock_net(sk), name);
+ 		if (!dev) {
+diff --git a/net/sched/act_api.c b/net/sched/act_api.c
+index f2e9ed34a963..0c5dbb172437 100644
+--- a/net/sched/act_api.c
++++ b/net/sched/act_api.c
+@@ -174,7 +174,7 @@ static int tcf_del_walker(struct tcf_hashinfo *hinfo, struct sk_buff *skb,
+ 		hlist_for_each_entry_safe(p, n, head, tcfa_head) {
+ 			ret = __tcf_hash_release(p, false, true);
+ 			if (ret == ACT_P_DELETED) {
+-				module_put(p->ops->owner);
++				module_put(ops->owner);
+ 				n_i++;
+ 			} else if (ret < 0)
+ 				goto nla_put_failure;
+@@ -506,13 +506,15 @@ EXPORT_SYMBOL(tcf_action_exec);
+ 
+ int tcf_action_destroy(struct list_head *actions, int bind)
+ {
++	const struct tc_action_ops *ops;
+ 	struct tc_action *a, *tmp;
+ 	int ret = 0;
+ 
+ 	list_for_each_entry_safe(a, tmp, actions, list) {
++		ops = a->ops;
+ 		ret = __tcf_hash_release(a, bind, true);
+ 		if (ret == ACT_P_DELETED)
+-			module_put(a->ops->owner);
++			module_put(ops->owner);
+ 		else if (ret < 0)
+ 			return ret;
+ 	}
+diff --git a/net/sched/cls_matchall.c b/net/sched/cls_matchall.c
+index 9dc26c32cf32..d720f9376add 100644
+--- a/net/sched/cls_matchall.c
++++ b/net/sched/cls_matchall.c
+@@ -32,6 +32,7 @@ static int mall_classify(struct sk_buff *skb, const struct tcf_proto *tp,
+ 	if (tc_skip_sw(head->flags))
+ 		return -1;
+ 
++	*res = head->res;
+ 	return tcf_exts_exec(skb, &head->exts, res);
+ }
+ 
+diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
+index 4ba6da5fb254..ff49421ee48a 100644
+--- a/net/sched/sch_generic.c
++++ b/net/sched/sch_generic.c
+@@ -681,6 +681,7 @@ void qdisc_reset(struct Qdisc *qdisc)
+ 		qdisc->gso_skb = NULL;
+ 	}
+ 	qdisc->q.qlen = 0;
++	qdisc->qstats.backlog = 0;
+ }
+ EXPORT_SYMBOL(qdisc_reset);
+ 
+diff --git a/net/tipc/msg.c b/net/tipc/msg.c
+index 6ef379f004ac..121e59a1d0e7 100644
+--- a/net/tipc/msg.c
++++ b/net/tipc/msg.c
+@@ -551,7 +551,7 @@ bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb, int *err)
+ 		return false;
+ 	if (msg_errcode(msg))
+ 		return false;
+-	*err = -TIPC_ERR_NO_NAME;
++	*err = TIPC_ERR_NO_NAME;
+ 	if (skb_linearize(skb))
+ 		return false;
+ 	msg = buf_msg(skb);
+diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
+index 750ba5d24a49..359b1f34c805 100644
+--- a/net/wireless/nl80211.c
++++ b/net/wireless/nl80211.c
+@@ -549,6 +549,14 @@ nl80211_nan_srf_policy[NL80211_NAN_SRF_ATTR_MAX + 1] = {
+ 	[NL80211_NAN_SRF_MAC_ADDRS] = { .type = NLA_NESTED },
+ };
+ 
++/* policy for packet pattern attributes */
++static const struct nla_policy
++nl80211_packet_pattern_policy[MAX_NL80211_PKTPAT + 1] = {
++	[NL80211_PKTPAT_MASK] = { .type = NLA_BINARY, },
++	[NL80211_PKTPAT_PATTERN] = { .type = NLA_BINARY, },
++	[NL80211_PKTPAT_OFFSET] = { .type = NLA_U32 },
++};
++
+ static int nl80211_prepare_wdev_dump(struct sk_buff *skb,
+ 				     struct netlink_callback *cb,
+ 				     struct cfg80211_registered_device **rdev,
+@@ -10529,7 +10537,8 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info)
+ 			u8 *mask_pat;
+ 
+ 			nla_parse_nested(pat_tb, MAX_NL80211_PKTPAT, pat,
+-					 NULL, info->extack);
++					 nl80211_packet_pattern_policy,
++					 info->extack);
+ 			err = -EINVAL;
+ 			if (!pat_tb[NL80211_PKTPAT_MASK] ||
+ 			    !pat_tb[NL80211_PKTPAT_PATTERN])
+@@ -10778,7 +10787,8 @@ static int nl80211_parse_coalesce_rule(struct cfg80211_registered_device *rdev,
+ 			    rem) {
+ 		u8 *mask_pat;
+ 
+-		nla_parse_nested(pat_tb, MAX_NL80211_PKTPAT, pat, NULL, NULL);
++		nla_parse_nested(pat_tb, MAX_NL80211_PKTPAT, pat,
++				 nl80211_packet_pattern_policy, NULL);
+ 		if (!pat_tb[NL80211_PKTPAT_MASK] ||
+ 		    !pat_tb[NL80211_PKTPAT_PATTERN])
+ 			return -EINVAL;
+diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
+index 463af86812c7..a9e89177a346 100644
+--- a/security/smack/smack_lsm.c
++++ b/security/smack/smack_lsm.c
+@@ -1499,7 +1499,7 @@ static int smack_inode_removexattr(struct dentry *dentry, const char *name)
+  * @inode: the object
+  * @name: attribute name
+  * @buffer: where to put the result
+- * @alloc: unused
++ * @alloc: duplicate memory
+  *
+  * Returns the size of the attribute or an error code
+  */
+@@ -1512,43 +1512,38 @@ static int smack_inode_getsecurity(struct inode *inode,
+ 	struct super_block *sbp;
+ 	struct inode *ip = (struct inode *)inode;
+ 	struct smack_known *isp;
+-	int ilen;
+-	int rc = 0;
+ 
+-	if (strcmp(name, XATTR_SMACK_SUFFIX) == 0) {
++	if (strcmp(name, XATTR_SMACK_SUFFIX) == 0)
+ 		isp = smk_of_inode(inode);
+-		ilen = strlen(isp->smk_known);
+-		*buffer = isp->smk_known;
+-		return ilen;
+-	}
++	else {
++		/*
++		 * The rest of the Smack xattrs are only on sockets.
++		 */
++		sbp = ip->i_sb;
++		if (sbp->s_magic != SOCKFS_MAGIC)
++			return -EOPNOTSUPP;
+ 
+-	/*
+-	 * The rest of the Smack xattrs are only on sockets.
+-	 */
+-	sbp = ip->i_sb;
+-	if (sbp->s_magic != SOCKFS_MAGIC)
+-		return -EOPNOTSUPP;
++		sock = SOCKET_I(ip);
++		if (sock == NULL || sock->sk == NULL)
++			return -EOPNOTSUPP;
+ 
+-	sock = SOCKET_I(ip);
+-	if (sock == NULL || sock->sk == NULL)
+-		return -EOPNOTSUPP;
+-
+-	ssp = sock->sk->sk_security;
++		ssp = sock->sk->sk_security;
+ 
+-	if (strcmp(name, XATTR_SMACK_IPIN) == 0)
+-		isp = ssp->smk_in;
+-	else if (strcmp(name, XATTR_SMACK_IPOUT) == 0)
+-		isp = ssp->smk_out;
+-	else
+-		return -EOPNOTSUPP;
++		if (strcmp(name, XATTR_SMACK_IPIN) == 0)
++			isp = ssp->smk_in;
++		else if (strcmp(name, XATTR_SMACK_IPOUT) == 0)
++			isp = ssp->smk_out;
++		else
++			return -EOPNOTSUPP;
++	}
+ 
+-	ilen = strlen(isp->smk_known);
+-	if (rc == 0) {
+-		*buffer = isp->smk_known;
+-		rc = ilen;
++	if (alloc) {
++		*buffer = kstrdup(isp->smk_known, GFP_KERNEL);
++		if (*buffer == NULL)
++			return -ENOMEM;
+ 	}
+ 
+-	return rc;
++	return strlen(isp->smk_known);
+ }
+ 
+ 
+diff --git a/sound/core/compress_offload.c b/sound/core/compress_offload.c
+index fec1dfdb14ad..4490a699030b 100644
+--- a/sound/core/compress_offload.c
++++ b/sound/core/compress_offload.c
+@@ -948,14 +948,13 @@ static const struct file_operations snd_compr_file_ops = {
+ static int snd_compress_dev_register(struct snd_device *device)
+ {
+ 	int ret = -EINVAL;
+-	char str[16];
+ 	struct snd_compr *compr;
+ 
+ 	if (snd_BUG_ON(!device || !device->device_data))
+ 		return -EBADFD;
+ 	compr = device->device_data;
+ 
+-	pr_debug("reg %s for device %s, direction %d\n", str, compr->name,
++	pr_debug("reg device %s, direction %d\n", compr->name,
+ 			compr->direction);
+ 	/* register compressed device */
+ 	ret = snd_register_device(SNDRV_DEVICE_TYPE_COMPRESS,
+diff --git a/sound/pci/echoaudio/echoaudio.c b/sound/pci/echoaudio/echoaudio.c
+index d15ecf9febbf..e54f5f549e38 100644
+--- a/sound/pci/echoaudio/echoaudio.c
++++ b/sound/pci/echoaudio/echoaudio.c
+@@ -1272,11 +1272,11 @@ static int snd_echo_mixer_info(struct snd_kcontrol *kcontrol,
+ 
+ 	chip = snd_kcontrol_chip(kcontrol);
+ 	uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
++	uinfo->count = 1;
+ 	uinfo->value.integer.min = ECHOGAIN_MINOUT;
+ 	uinfo->value.integer.max = ECHOGAIN_MAXOUT;
+ 	uinfo->dimen.d[0] = num_busses_out(chip);
+ 	uinfo->dimen.d[1] = num_busses_in(chip);
+-	uinfo->count = uinfo->dimen.d[0] * uinfo->dimen.d[1];
+ 	return 0;
+ }
+ 
+@@ -1344,11 +1344,11 @@ static int snd_echo_vmixer_info(struct snd_kcontrol *kcontrol,
+ 
+ 	chip = snd_kcontrol_chip(kcontrol);
+ 	uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
++	uinfo->count = 1;
+ 	uinfo->value.integer.min = ECHOGAIN_MINOUT;
+ 	uinfo->value.integer.max = ECHOGAIN_MAXOUT;
+ 	uinfo->dimen.d[0] = num_busses_out(chip);
+ 	uinfo->dimen.d[1] = num_pipes_out(chip);
+-	uinfo->count = uinfo->dimen.d[0] * uinfo->dimen.d[1];
+ 	return 0;
+ }
+ 
+@@ -1728,6 +1728,7 @@ static int snd_echo_vumeters_info(struct snd_kcontrol *kcontrol,
+ 				  struct snd_ctl_elem_info *uinfo)
+ {
+ 	uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
++	uinfo->count = 96;
+ 	uinfo->value.integer.min = ECHOGAIN_MINOUT;
+ 	uinfo->value.integer.max = 0;
+ #ifdef ECHOCARD_HAS_VMIXER
+@@ -1737,7 +1738,6 @@ static int snd_echo_vumeters_info(struct snd_kcontrol *kcontrol,
+ #endif
+ 	uinfo->dimen.d[1] = 16;	/* 16 channels */
+ 	uinfo->dimen.d[2] = 2;	/* 0=level, 1=peak */
+-	uinfo->count = uinfo->dimen.d[0] * uinfo->dimen.d[1] * uinfo->dimen.d[2];
+ 	return 0;
+ }
+ 
+diff --git a/sound/usb/card.c b/sound/usb/card.c
+index 6640277a725b..383facf2dc11 100644
+--- a/sound/usb/card.c
++++ b/sound/usb/card.c
+@@ -221,6 +221,7 @@ static int snd_usb_create_streams(struct snd_usb_audio *chip, int ctrlif)
+ 	struct usb_interface_descriptor *altsd;
+ 	void *control_header;
+ 	int i, protocol;
++	int rest_bytes;
+ 
+ 	/* find audiocontrol interface */
+ 	host_iface = &usb_ifnum_to_if(dev, ctrlif)->altsetting[0];
+@@ -235,6 +236,15 @@ static int snd_usb_create_streams(struct snd_usb_audio *chip, int ctrlif)
+ 		return -EINVAL;
+ 	}
+ 
++	rest_bytes = (void *)(host_iface->extra + host_iface->extralen) -
++		control_header;
++
++	/* just to be sure -- this shouldn't hit at all */
++	if (rest_bytes <= 0) {
++		dev_err(&dev->dev, "invalid control header\n");
++		return -EINVAL;
++	}
++
+ 	switch (protocol) {
+ 	default:
+ 		dev_warn(&dev->dev,
+@@ -245,11 +255,21 @@ static int snd_usb_create_streams(struct snd_usb_audio *chip, int ctrlif)
+ 	case UAC_VERSION_1: {
+ 		struct uac1_ac_header_descriptor *h1 = control_header;
+ 
++		if (rest_bytes < sizeof(*h1)) {
++			dev_err(&dev->dev, "too short v1 buffer descriptor\n");
++			return -EINVAL;
++		}
++
+ 		if (!h1->bInCollection) {
+ 			dev_info(&dev->dev, "skipping empty audio interface (v1)\n");
+ 			return -EINVAL;
+ 		}
+ 
++		if (rest_bytes < h1->bLength) {
++			dev_err(&dev->dev, "invalid buffer length (v1)\n");
++			return -EINVAL;
++		}
++
+ 		if (h1->bLength < sizeof(*h1) + h1->bInCollection) {
+ 			dev_err(&dev->dev, "invalid UAC_HEADER (v1)\n");
+ 			return -EINVAL;
+diff --git a/sound/usb/usx2y/usb_stream.c b/sound/usb/usx2y/usb_stream.c
+index bf618e1500ac..e7b934f4d837 100644
+--- a/sound/usb/usx2y/usb_stream.c
++++ b/sound/usb/usx2y/usb_stream.c
+@@ -191,7 +191,8 @@ struct usb_stream *usb_stream_new(struct usb_stream_kernel *sk,
+ 	}
+ 
+ 	pg = get_order(read_size);
+-	sk->s = (void *) __get_free_pages(GFP_KERNEL|__GFP_COMP|__GFP_ZERO, pg);
++	sk->s = (void *) __get_free_pages(GFP_KERNEL|__GFP_COMP|__GFP_ZERO|
++					  __GFP_NOWARN, pg);
+ 	if (!sk->s) {
+ 		snd_printk(KERN_WARNING "couldn't __get_free_pages()\n");
+ 		goto out;
+@@ -211,7 +212,8 @@ struct usb_stream *usb_stream_new(struct usb_stream_kernel *sk,
+ 	pg = get_order(write_size);
+ 
+ 	sk->write_page =
+-		(void *)__get_free_pages(GFP_KERNEL|__GFP_COMP|__GFP_ZERO, pg);
++		(void *)__get_free_pages(GFP_KERNEL|__GFP_COMP|__GFP_ZERO|
++					 __GFP_NOWARN, pg);
+ 	if (!sk->write_page) {
+ 		snd_printk(KERN_WARNING "couldn't __get_free_pages()\n");
+ 		usb_stream_free(sk);
+diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
+index d3ed7324105e..48c145eeeaf7 100644
+--- a/tools/testing/selftests/bpf/test_verifier.c
++++ b/tools/testing/selftests/bpf/test_verifier.c
+@@ -6009,6 +6009,22 @@ static struct bpf_test tests[] = {
+ 		.result = REJECT,
+ 		.result_unpriv = REJECT,
+ 	},
++	{
++		"invalid 64-bit BPF_END",
++		.insns = {
++			BPF_MOV32_IMM(BPF_REG_0, 0),
++			{
++				.code  = BPF_ALU64 | BPF_END | BPF_TO_LE,
++				.dst_reg = BPF_REG_0,
++				.src_reg = 0,
++				.off   = 0,
++				.imm   = 32,
++			},
++			BPF_EXIT_INSN(),
++		},
++		.errstr = "BPF_END uses reserved fields",
++		.result = REJECT,
++	},
+ };
+ 
+ static int probe_filter_length(const struct bpf_insn *fp)


^ permalink raw reply related	[flat|nested] 20+ messages in thread
* [gentoo-commits] proj/linux-patches:4.13 commit in: /
@ 2017-10-05 11:36 Mike Pagano
  0 siblings, 0 replies; 20+ messages in thread
From: Mike Pagano @ 2017-10-05 11:36 UTC (permalink / raw
  To: gentoo-commits

commit:     21faefe25f606a7594f1cebfaddef79aeb70587a
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu Oct  5 11:36:30 2017 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu Oct  5 11:36:30 2017 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=21faefe2

Linux patch 4.13.5

 0000_README             |    4 +
 1004_linux-4.13.5.patch | 4459 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 4463 insertions(+)

diff --git a/0000_README b/0000_README
index aee01b0..382473c 100644
--- a/0000_README
+++ b/0000_README
@@ -59,6 +59,10 @@ Patch:  1003_linux-4.13.4.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.13.4
 
+Patch:  1004_linux-4.13.5.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.13.5
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1004_linux-4.13.5.patch b/1004_linux-4.13.5.patch
new file mode 100644
index 0000000..1a481f0
--- /dev/null
+++ b/1004_linux-4.13.5.patch
@@ -0,0 +1,4459 @@
+diff --git a/Makefile b/Makefile
+index 159901979dec..189f1a748e4c 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 13
+-SUBLEVEL = 4
++SUBLEVEL = 5
+ EXTRAVERSION =
+ NAME = Fearless Coyote
+ 
+diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
+index 6eae342ced6b..8d9832870ff4 100644
+--- a/arch/arm64/include/asm/pgtable.h
++++ b/arch/arm64/include/asm/pgtable.h
+@@ -412,7 +412,7 @@ static inline phys_addr_t pmd_page_paddr(pmd_t pmd)
+ /* Find an entry in the third-level page table. */
+ #define pte_index(addr)		(((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
+ 
+-#define pte_offset_phys(dir,addr)	(pmd_page_paddr(*(dir)) + pte_index(addr) * sizeof(pte_t))
++#define pte_offset_phys(dir,addr)	(pmd_page_paddr(READ_ONCE(*(dir))) + pte_index(addr) * sizeof(pte_t))
+ #define pte_offset_kernel(dir,addr)	((pte_t *)__va(pte_offset_phys((dir), (addr))))
+ 
+ #define pte_offset_map(dir,addr)	pte_offset_kernel((dir), (addr))
+diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
+index adb0910b88f5..8c908829d3c4 100644
+--- a/arch/arm64/kernel/head.S
++++ b/arch/arm64/kernel/head.S
+@@ -381,6 +381,7 @@ ENTRY(kimage_vaddr)
+  * booted in EL1 or EL2 respectively.
+  */
+ ENTRY(el2_setup)
++	msr	SPsel, #1			// We want to use SP_EL{1,2}
+ 	mrs	x0, CurrentEL
+ 	cmp	x0, #CurrentEL_EL2
+ 	b.eq	1f
+diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
+index 1f22a41565a3..92f3bc3bc74e 100644
+--- a/arch/arm64/mm/fault.c
++++ b/arch/arm64/mm/fault.c
+@@ -614,7 +614,7 @@ static const struct fault_info fault_info[] = {
+ 	{ do_translation_fault,	SIGSEGV, SEGV_MAPERR,	"level 0 translation fault"	},
+ 	{ do_translation_fault,	SIGSEGV, SEGV_MAPERR,	"level 1 translation fault"	},
+ 	{ do_translation_fault,	SIGSEGV, SEGV_MAPERR,	"level 2 translation fault"	},
+-	{ do_page_fault,	SIGSEGV, SEGV_MAPERR,	"level 3 translation fault"	},
++	{ do_translation_fault,	SIGSEGV, SEGV_MAPERR,	"level 3 translation fault"	},
+ 	{ do_bad,		SIGBUS,  0,		"unknown 8"			},
+ 	{ do_page_fault,	SIGSEGV, SEGV_ACCERR,	"level 1 access flag fault"	},
+ 	{ do_page_fault,	SIGSEGV, SEGV_ACCERR,	"level 2 access flag fault"	},
+diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c
+index 9e6c74bf66c4..6668f67a61c3 100644
+--- a/arch/mips/kernel/perf_event_mipsxx.c
++++ b/arch/mips/kernel/perf_event_mipsxx.c
+@@ -618,8 +618,7 @@ static int mipspmu_event_init(struct perf_event *event)
+ 		return -ENOENT;
+ 	}
+ 
+-	if ((unsigned int)event->cpu >= nr_cpumask_bits ||
+-	    (event->cpu >= 0 && !cpu_online(event->cpu)))
++	if (event->cpu >= 0 && !cpu_online(event->cpu))
+ 		return -ENODEV;
+ 
+ 	if (!atomic_inc_not_zero(&active_events)) {
+diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c
+index 63992b2d8e15..f27eecd5ec7f 100644
+--- a/arch/powerpc/kernel/eeh.c
++++ b/arch/powerpc/kernel/eeh.c
+@@ -1018,6 +1018,10 @@ int eeh_init(void)
+ 	} else if ((ret = eeh_ops->init()))
+ 		return ret;
+ 
++	/* Initialize PHB PEs */
++	list_for_each_entry_safe(hose, tmp, &hose_list, list_node)
++		eeh_dev_phb_init_dynamic(hose);
++
+ 	/* Initialize EEH event */
+ 	ret = eeh_event_init();
+ 	if (ret)
+diff --git a/arch/powerpc/kernel/eeh_dev.c b/arch/powerpc/kernel/eeh_dev.c
+index d6b2ca70d14d..0820b73288c0 100644
+--- a/arch/powerpc/kernel/eeh_dev.c
++++ b/arch/powerpc/kernel/eeh_dev.c
+@@ -83,21 +83,3 @@ void eeh_dev_phb_init_dynamic(struct pci_controller *phb)
+ 	/* EEH PE for PHB */
+ 	eeh_phb_pe_create(phb);
+ }
+-
+-/**
+- * eeh_dev_phb_init - Create EEH devices for devices included in existing PHBs
+- *
+- * Scan all the existing PHBs and create EEH devices for their OF
+- * nodes and their children OF nodes
+- */
+-static int __init eeh_dev_phb_init(void)
+-{
+-	struct pci_controller *phb, *tmp;
+-
+-	list_for_each_entry_safe(phb, tmp, &hose_list, list_node)
+-		eeh_dev_phb_init_dynamic(phb);
+-
+-	return 0;
+-}
+-
+-core_initcall(eeh_dev_phb_init);
+diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
+index 660ed39e9c9a..b8d4f07f332c 100644
+--- a/arch/powerpc/kernel/ptrace.c
++++ b/arch/powerpc/kernel/ptrace.c
+@@ -131,7 +131,7 @@ static void flush_tmregs_to_thread(struct task_struct *tsk)
+ 	 * in the appropriate thread structures from live.
+ 	 */
+ 
+-	if (tsk != current)
++	if ((!cpu_has_feature(CPU_FTR_TM)) || (tsk != current))
+ 		return;
+ 
+ 	if (MSR_TM_SUSPENDED(mfmsr())) {
+diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
+index 359c79cdf0cc..9ecd9aea0b54 100644
+--- a/arch/powerpc/kvm/book3s_hv.c
++++ b/arch/powerpc/kvm/book3s_hv.c
+@@ -4187,11 +4187,13 @@ static int kvmhv_configure_mmu(struct kvm *kvm, struct kvm_ppc_mmuv3_cfg *cfg)
+ 	if ((cfg->process_table & PRTS_MASK) > 24)
+ 		return -EINVAL;
+ 
++	mutex_lock(&kvm->lock);
+ 	kvm->arch.process_table = cfg->process_table;
+ 	kvmppc_setup_partition_table(kvm);
+ 
+ 	lpcr = (cfg->flags & KVM_PPC_MMUV3_GTSE) ? LPCR_GTSE : 0;
+ 	kvmppc_update_lpcr(kvm, lpcr, LPCR_GTSE);
++	mutex_unlock(&kvm->lock);
+ 
+ 	return 0;
+ }
+diff --git a/arch/powerpc/kvm/book3s_hv_rm_xive.c b/arch/powerpc/kvm/book3s_hv_rm_xive.c
+index abf5f01b6eb1..5b81a807d742 100644
+--- a/arch/powerpc/kvm/book3s_hv_rm_xive.c
++++ b/arch/powerpc/kvm/book3s_hv_rm_xive.c
+@@ -38,7 +38,6 @@ static inline void __iomem *get_tima_phys(void)
+ #define __x_tima		get_tima_phys()
+ #define __x_eoi_page(xd)	((void __iomem *)((xd)->eoi_page))
+ #define __x_trig_page(xd)	((void __iomem *)((xd)->trig_page))
+-#define __x_readb	__raw_rm_readb
+ #define __x_writeb	__raw_rm_writeb
+ #define __x_readw	__raw_rm_readw
+ #define __x_readq	__raw_rm_readq
+diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+index 9c9c983b864f..dc58c2a560f9 100644
+--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
++++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+@@ -765,6 +765,9 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
+ 
+ #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ BEGIN_FTR_SECTION
++	/*
++	 * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR
++	 */
+ 	bl	kvmppc_restore_tm
+ END_FTR_SECTION_IFSET(CPU_FTR_TM)
+ #endif
+@@ -1623,6 +1626,9 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
+ 
+ #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ BEGIN_FTR_SECTION
++	/*
++	 * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR
++	 */
+ 	bl	kvmppc_save_tm
+ END_FTR_SECTION_IFSET(CPU_FTR_TM)
+ #endif
+@@ -1742,7 +1748,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
+ 	/*
+ 	 * Are we running hash or radix ?
+ 	 */
+-	beq	cr2,3f
++	ld	r5, VCPU_KVM(r9)
++	lbz	r0, KVM_RADIX(r5)
++	cmpwi	cr2, r0, 0
++	beq	cr2, 3f
+ 
+ 	/* Radix: Handle the case where the guest used an illegal PID */
+ 	LOAD_REG_ADDR(r4, mmu_base_pid)
+@@ -2459,6 +2468,9 @@ _GLOBAL(kvmppc_h_cede)		/* r3 = vcpu pointer, r11 = msr, r13 = paca */
+ 
+ #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ BEGIN_FTR_SECTION
++	/*
++	 * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR
++	 */
+ 	ld	r9, HSTATE_KVM_VCPU(r13)
+ 	bl	kvmppc_save_tm
+ END_FTR_SECTION_IFSET(CPU_FTR_TM)
+@@ -2569,6 +2581,9 @@ kvm_end_cede:
+ 
+ #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ BEGIN_FTR_SECTION
++	/*
++	 * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR
++	 */
+ 	bl	kvmppc_restore_tm
+ END_FTR_SECTION_IFSET(CPU_FTR_TM)
+ #endif
+diff --git a/arch/powerpc/kvm/book3s_xive.c b/arch/powerpc/kvm/book3s_xive.c
+index 08b200a0bbce..13304622ab1c 100644
+--- a/arch/powerpc/kvm/book3s_xive.c
++++ b/arch/powerpc/kvm/book3s_xive.c
+@@ -48,7 +48,6 @@
+ #define __x_tima		xive_tima
+ #define __x_eoi_page(xd)	((void __iomem *)((xd)->eoi_mmio))
+ #define __x_trig_page(xd)	((void __iomem *)((xd)->trig_mmio))
+-#define __x_readb	__raw_readb
+ #define __x_writeb	__raw_writeb
+ #define __x_readw	__raw_readw
+ #define __x_readq	__raw_readq
+diff --git a/arch/powerpc/kvm/book3s_xive_template.c b/arch/powerpc/kvm/book3s_xive_template.c
+index d1ed2c41b5d2..c7a5deadd1cc 100644
+--- a/arch/powerpc/kvm/book3s_xive_template.c
++++ b/arch/powerpc/kvm/book3s_xive_template.c
+@@ -28,7 +28,8 @@ static void GLUE(X_PFX,ack_pending)(struct kvmppc_xive_vcpu *xc)
+ 	 * bit.
+ 	 */
+ 	if (cpu_has_feature(CPU_FTR_POWER9_DD1)) {
+-		u8 pipr = __x_readb(__x_tima + TM_QW1_OS + TM_PIPR);
++		__be64 qw1 = __x_readq(__x_tima + TM_QW1_OS);
++		u8 pipr = be64_to_cpu(qw1) & 0xff;
+ 		if (pipr >= xc->hw_cppr)
+ 			return;
+ 	}
+@@ -336,7 +337,6 @@ X_STATIC unsigned long GLUE(X_PFX,h_ipoll)(struct kvm_vcpu *vcpu, unsigned long
+ 	struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
+ 	u8 pending = xc->pending;
+ 	u32 hirq;
+-	u8 pipr;
+ 
+ 	pr_devel("H_IPOLL(server=%ld)\n", server);
+ 
+@@ -353,7 +353,8 @@ X_STATIC unsigned long GLUE(X_PFX,h_ipoll)(struct kvm_vcpu *vcpu, unsigned long
+ 		pending = 0xff;
+ 	} else {
+ 		/* Grab pending interrupt if any */
+-		pipr = __x_readb(__x_tima + TM_QW1_OS + TM_PIPR);
++		__be64 qw1 = __x_readq(__x_tima + TM_QW1_OS);
++		u8 pipr = be64_to_cpu(qw1) & 0xff;
+ 		if (pipr < 8)
+ 			pending |= 1 << pipr;
+ 	}
+diff --git a/arch/powerpc/platforms/pseries/mobility.c b/arch/powerpc/platforms/pseries/mobility.c
+index 2da4851eff99..37f622444a04 100644
+--- a/arch/powerpc/platforms/pseries/mobility.c
++++ b/arch/powerpc/platforms/pseries/mobility.c
+@@ -226,8 +226,10 @@ static int add_dt_node(__be32 parent_phandle, __be32 drc_index)
+ 		return -ENOENT;
+ 
+ 	dn = dlpar_configure_connector(drc_index, parent_dn);
+-	if (!dn)
++	if (!dn) {
++		of_node_put(parent_dn);
+ 		return -ENOENT;
++	}
+ 
+ 	rc = dlpar_attach_node(dn);
+ 	if (rc)
+diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
+index 65ab11d654e1..80c1583d033f 100644
+--- a/arch/s390/include/asm/pgtable.h
++++ b/arch/s390/include/asm/pgtable.h
+@@ -1462,7 +1462,9 @@ static inline pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma,
+ static inline void pmdp_invalidate(struct vm_area_struct *vma,
+ 				   unsigned long addr, pmd_t *pmdp)
+ {
+-	pmdp_xchg_direct(vma->vm_mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
++	pmd_t pmd = __pmd(pmd_val(*pmdp) | _SEGMENT_ENTRY_INVALID);
++
++	pmdp_xchg_direct(vma->vm_mm, addr, pmdp, pmd);
+ }
+ 
+ #define __HAVE_ARCH_PMDP_SET_WRPROTECT
+diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c
+index c1bf75ffb875..7e1e40323b78 100644
+--- a/arch/s390/kernel/perf_cpum_sf.c
++++ b/arch/s390/kernel/perf_cpum_sf.c
+@@ -823,9 +823,12 @@ static int cpumsf_pmu_event_init(struct perf_event *event)
+ 	}
+ 
+ 	/* Check online status of the CPU to which the event is pinned */
+-	if ((unsigned int)event->cpu >= nr_cpumask_bits ||
+-	    (event->cpu >= 0 && !cpu_online(event->cpu)))
+-		return -ENODEV;
++	if (event->cpu >= 0) {
++		if ((unsigned int)event->cpu >= nr_cpumask_bits)
++			return -ENODEV;
++		if (!cpu_online(event->cpu))
++			return -ENODEV;
++	}
+ 
+ 	/* Force reset of idle/hv excludes regardless of what the
+ 	 * user requested.
+diff --git a/arch/s390/mm/gup.c b/arch/s390/mm/gup.c
+index 8ecc25e760fa..98ffe3ee9411 100644
+--- a/arch/s390/mm/gup.c
++++ b/arch/s390/mm/gup.c
+@@ -56,13 +56,12 @@ static inline int gup_pte_range(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
+ static inline int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
+ 		unsigned long end, int write, struct page **pages, int *nr)
+ {
+-	unsigned long mask, result;
+ 	struct page *head, *page;
++	unsigned long mask;
+ 	int refs;
+ 
+-	result = write ? 0 : _SEGMENT_ENTRY_PROTECT;
+-	mask = result | _SEGMENT_ENTRY_INVALID;
+-	if ((pmd_val(pmd) & mask) != result)
++	mask = (write ? _SEGMENT_ENTRY_PROTECT : 0) | _SEGMENT_ENTRY_INVALID;
++	if ((pmd_val(pmd) & mask) != 0)
+ 		return 0;
+ 	VM_BUG_ON(!pfn_valid(pmd_val(pmd) >> PAGE_SHIFT));
+ 
+diff --git a/arch/x86/kernel/fpu/regset.c b/arch/x86/kernel/fpu/regset.c
+index b188b16841e3..8ab1a1f4d1c1 100644
+--- a/arch/x86/kernel/fpu/regset.c
++++ b/arch/x86/kernel/fpu/regset.c
+@@ -131,11 +131,16 @@ int xstateregs_set(struct task_struct *target, const struct user_regset *regset,
+ 
+ 	fpu__activate_fpstate_write(fpu);
+ 
+-	if (boot_cpu_has(X86_FEATURE_XSAVES))
++	if (boot_cpu_has(X86_FEATURE_XSAVES)) {
+ 		ret = copyin_to_xsaves(kbuf, ubuf, xsave);
+-	else
++	} else {
+ 		ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, xsave, 0, -1);
+ 
++		/* xcomp_bv must be 0 when using uncompacted format */
++		if (!ret && xsave->header.xcomp_bv)
++			ret = -EINVAL;
++	}
++
+ 	/*
+ 	 * In case of failure, mark all states as init:
+ 	 */
+diff --git a/arch/x86/kernel/fpu/signal.c b/arch/x86/kernel/fpu/signal.c
+index 83c23c230b4c..3a9318610c4d 100644
+--- a/arch/x86/kernel/fpu/signal.c
++++ b/arch/x86/kernel/fpu/signal.c
+@@ -329,6 +329,10 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
+ 		} else {
+ 			err = __copy_from_user(&fpu->state.xsave,
+ 					       buf_fx, state_size);
++
++			/* xcomp_bv must be 0 when using uncompacted format */
++			if (!err && state_size > offsetof(struct xregs_state, header) && fpu->state.xsave.header.xcomp_bv)
++				err = -EINVAL;
+ 		}
+ 
+ 		if (err || __copy_from_user(&env, buf, sizeof(env))) {
+diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
+index d04e30e3c0ff..58590a698a1a 100644
+--- a/arch/x86/kernel/kvm.c
++++ b/arch/x86/kernel/kvm.c
+@@ -140,7 +140,8 @@ void kvm_async_pf_task_wait(u32 token)
+ 
+ 	n.token = token;
+ 	n.cpu = smp_processor_id();
+-	n.halted = is_idle_task(current) || preempt_count() > 1;
++	n.halted = is_idle_task(current) || preempt_count() > 1 ||
++		   rcu_preempt_depth();
+ 	init_swait_queue_head(&n.wq);
+ 	hlist_add_head(&n.link, &b->list);
+ 	raw_spin_unlock(&b->lock);
+diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
+index c6ef2940119b..95796e2efc38 100644
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -200,6 +200,8 @@ struct loaded_vmcs {
+ 	int cpu;
+ 	bool launched;
+ 	bool nmi_known_unmasked;
++	unsigned long vmcs_host_cr3;	/* May not match real cr3 */
++	unsigned long vmcs_host_cr4;	/* May not match real cr4 */
+ 	struct list_head loaded_vmcss_on_cpu_link;
+ };
+ 
+@@ -595,8 +597,6 @@ struct vcpu_vmx {
+ 		int           gs_ldt_reload_needed;
+ 		int           fs_reload_needed;
+ 		u64           msr_host_bndcfgs;
+-		unsigned long vmcs_host_cr3;	/* May not match real cr3 */
+-		unsigned long vmcs_host_cr4;	/* May not match real cr4 */
+ 	} host_state;
+ 	struct {
+ 		int vm86_active;
+@@ -2187,46 +2187,44 @@ static void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu)
+ 	struct pi_desc old, new;
+ 	unsigned int dest;
+ 
+-	if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
+-		!irq_remapping_cap(IRQ_POSTING_CAP)  ||
+-		!kvm_vcpu_apicv_active(vcpu))
++	/*
++	 * In case of hot-plug or hot-unplug, we may have to undo
++	 * vmx_vcpu_pi_put even if there is no assigned device.  And we
++	 * always keep PI.NDST up to date for simplicity: it makes the
++	 * code easier, and CPU migration is not a fast path.
++	 */
++	if (!pi_test_sn(pi_desc) && vcpu->cpu == cpu)
++		return;
++
++	/*
++	 * First handle the simple case where no cmpxchg is necessary; just
++	 * allow posting non-urgent interrupts.
++	 *
++	 * If the 'nv' field is POSTED_INTR_WAKEUP_VECTOR, do not change
++	 * PI.NDST: pi_post_block will do it for us and the wakeup_handler
++	 * expects the VCPU to be on the blocked_vcpu_list that matches
++	 * PI.NDST.
++	 */
++	if (pi_desc->nv == POSTED_INTR_WAKEUP_VECTOR ||
++	    vcpu->cpu == cpu) {
++		pi_clear_sn(pi_desc);
+ 		return;
++	}
+ 
++	/* The full case.  */
+ 	do {
+ 		old.control = new.control = pi_desc->control;
+ 
+-		/*
+-		 * If 'nv' field is POSTED_INTR_WAKEUP_VECTOR, there
+-		 * are two possible cases:
+-		 * 1. After running 'pre_block', context switch
+-		 *    happened. For this case, 'sn' was set in
+-		 *    vmx_vcpu_put(), so we need to clear it here.
+-		 * 2. After running 'pre_block', we were blocked,
+-		 *    and woken up by some other guy. For this case,
+-		 *    we don't need to do anything, 'pi_post_block'
+-		 *    will do everything for us. However, we cannot
+-		 *    check whether it is case #1 or case #2 here
+-		 *    (maybe, not needed), so we also clear sn here,
+-		 *    I think it is not a big deal.
+-		 */
+-		if (pi_desc->nv != POSTED_INTR_WAKEUP_VECTOR) {
+-			if (vcpu->cpu != cpu) {
+-				dest = cpu_physical_id(cpu);
+-
+-				if (x2apic_enabled())
+-					new.ndst = dest;
+-				else
+-					new.ndst = (dest << 8) & 0xFF00;
+-			}
++		dest = cpu_physical_id(cpu);
+ 
+-			/* set 'NV' to 'notification vector' */
+-			new.nv = POSTED_INTR_VECTOR;
+-		}
++		if (x2apic_enabled())
++			new.ndst = dest;
++		else
++			new.ndst = (dest << 8) & 0xFF00;
+ 
+-		/* Allow posting non-urgent interrupts */
+ 		new.sn = 0;
+-	} while (cmpxchg(&pi_desc->control, old.control,
+-			new.control) != old.control);
++	} while (cmpxchg64(&pi_desc->control, old.control,
++			   new.control) != old.control);
+ }
+ 
+ static void decache_tsc_multiplier(struct vcpu_vmx *vmx)
+@@ -5048,21 +5046,30 @@ static inline bool kvm_vcpu_trigger_posted_interrupt(struct kvm_vcpu *vcpu,
+ 	int pi_vec = nested ? POSTED_INTR_NESTED_VECTOR : POSTED_INTR_VECTOR;
+ 
+ 	if (vcpu->mode == IN_GUEST_MODE) {
+-		struct vcpu_vmx *vmx = to_vmx(vcpu);
+-
+ 		/*
+-		 * Currently, we don't support urgent interrupt,
+-		 * all interrupts are recognized as non-urgent
+-		 * interrupt, so we cannot post interrupts when
+-		 * 'SN' is set.
++		 * The vector of interrupt to be delivered to vcpu had
++		 * been set in PIR before this function.
+ 		 *
+-		 * If the vcpu is in guest mode, it means it is
+-		 * running instead of being scheduled out and
+-		 * waiting in the run queue, and that's the only
+-		 * case when 'SN' is set currently, warning if
+-		 * 'SN' is set.
++		 * Following cases will be reached in this block, and
++		 * we always send a notification event in all cases as
++		 * explained below.
++		 *
++		 * Case 1: vcpu keeps in non-root mode. Sending a
++		 * notification event posts the interrupt to vcpu.
++		 *
++		 * Case 2: vcpu exits to root mode and is still
++		 * runnable. PIR will be synced to vIRR before the
++		 * next vcpu entry. Sending a notification event in
++		 * this case has no effect, as vcpu is not in root
++		 * mode.
++		 *
++		 * Case 3: vcpu exits to root mode and is blocked.
++		 * vcpu_block() has already synced PIR to vIRR and
++		 * never blocks vcpu if vIRR is not cleared. Therefore,
++		 * a blocked vcpu here does not wait for any requested
++		 * interrupts in PIR, and sending a notification event
++		 * which has no effect is safe here.
+ 		 */
+-		WARN_ON_ONCE(pi_test_sn(&vmx->pi_desc));
+ 
+ 		apic->send_IPI_mask(get_cpu_mask(vcpu->cpu), pi_vec);
+ 		return true;
+@@ -5140,12 +5147,12 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
+ 	 */
+ 	cr3 = __read_cr3();
+ 	vmcs_writel(HOST_CR3, cr3);		/* 22.2.3  FIXME: shadow tables */
+-	vmx->host_state.vmcs_host_cr3 = cr3;
++	vmx->loaded_vmcs->vmcs_host_cr3 = cr3;
+ 
+ 	/* Save the most likely value for this task's CR4 in the VMCS. */
+ 	cr4 = cr4_read_shadow();
+ 	vmcs_writel(HOST_CR4, cr4);			/* 22.2.3, 22.2.5 */
+-	vmx->host_state.vmcs_host_cr4 = cr4;
++	vmx->loaded_vmcs->vmcs_host_cr4 = cr4;
+ 
+ 	vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS);  /* 22.2.4 */
+ #ifdef CONFIG_X86_64
+@@ -8994,15 +9001,15 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
+ 		vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]);
+ 
+ 	cr3 = __get_current_cr3_fast();
+-	if (unlikely(cr3 != vmx->host_state.vmcs_host_cr3)) {
++	if (unlikely(cr3 != vmx->loaded_vmcs->vmcs_host_cr3)) {
+ 		vmcs_writel(HOST_CR3, cr3);
+-		vmx->host_state.vmcs_host_cr3 = cr3;
++		vmx->loaded_vmcs->vmcs_host_cr3 = cr3;
+ 	}
+ 
+ 	cr4 = cr4_read_shadow();
+-	if (unlikely(cr4 != vmx->host_state.vmcs_host_cr4)) {
++	if (unlikely(cr4 != vmx->loaded_vmcs->vmcs_host_cr4)) {
+ 		vmcs_writel(HOST_CR4, cr4);
+-		vmx->host_state.vmcs_host_cr4 = cr4;
++		vmx->loaded_vmcs->vmcs_host_cr4 = cr4;
+ 	}
+ 
+ 	/* When single-stepping over STI and MOV SS, we must clear the
+@@ -9310,6 +9317,13 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
+ 
+ 	vmx->msr_ia32_feature_control_valid_bits = FEATURE_CONTROL_LOCKED;
+ 
++	/*
++	 * Enforce invariant: pi_desc.nv is always either POSTED_INTR_VECTOR
++	 * or POSTED_INTR_WAKEUP_VECTOR.
++	 */
++	vmx->pi_desc.nv = POSTED_INTR_VECTOR;
++	vmx->pi_desc.sn = 1;
++
+ 	return &vmx->vcpu;
+ 
+ free_vmcs:
+@@ -10266,6 +10280,11 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
+ 	if (exec_control & CPU_BASED_TPR_SHADOW) {
+ 		vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, -1ull);
+ 		vmcs_write32(TPR_THRESHOLD, vmcs12->tpr_threshold);
++	} else {
++#ifdef CONFIG_X86_64
++		exec_control |= CPU_BASED_CR8_LOAD_EXITING |
++				CPU_BASED_CR8_STORE_EXITING;
++#endif
+ 	}
+ 
+ 	/*
+@@ -11389,6 +11408,37 @@ static void vmx_enable_log_dirty_pt_masked(struct kvm *kvm,
+ 	kvm_mmu_clear_dirty_pt_masked(kvm, memslot, offset, mask);
+ }
+ 
++static void __pi_post_block(struct kvm_vcpu *vcpu)
++{
++	struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
++	struct pi_desc old, new;
++	unsigned int dest;
++
++	do {
++		old.control = new.control = pi_desc->control;
++		WARN(old.nv != POSTED_INTR_WAKEUP_VECTOR,
++		     "Wakeup handler not enabled while the VCPU is blocked\n");
++
++		dest = cpu_physical_id(vcpu->cpu);
++
++		if (x2apic_enabled())
++			new.ndst = dest;
++		else
++			new.ndst = (dest << 8) & 0xFF00;
++
++		/* set 'NV' to 'notification vector' */
++		new.nv = POSTED_INTR_VECTOR;
++	} while (cmpxchg64(&pi_desc->control, old.control,
++			   new.control) != old.control);
++
++	if (!WARN_ON_ONCE(vcpu->pre_pcpu == -1)) {
++		spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu));
++		list_del(&vcpu->blocked_vcpu_list);
++		spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu));
++		vcpu->pre_pcpu = -1;
++	}
++}
++
+ /*
+  * This routine does the following things for vCPU which is going
+  * to be blocked if VT-d PI is enabled.
+@@ -11404,7 +11454,6 @@ static void vmx_enable_log_dirty_pt_masked(struct kvm *kvm,
+  */
+ static int pi_pre_block(struct kvm_vcpu *vcpu)
+ {
+-	unsigned long flags;
+ 	unsigned int dest;
+ 	struct pi_desc old, new;
+ 	struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
+@@ -11414,34 +11463,20 @@ static int pi_pre_block(struct kvm_vcpu *vcpu)
+ 		!kvm_vcpu_apicv_active(vcpu))
+ 		return 0;
+ 
+-	vcpu->pre_pcpu = vcpu->cpu;
+-	spin_lock_irqsave(&per_cpu(blocked_vcpu_on_cpu_lock,
+-			  vcpu->pre_pcpu), flags);
+-	list_add_tail(&vcpu->blocked_vcpu_list,
+-		      &per_cpu(blocked_vcpu_on_cpu,
+-		      vcpu->pre_pcpu));
+-	spin_unlock_irqrestore(&per_cpu(blocked_vcpu_on_cpu_lock,
+-			       vcpu->pre_pcpu), flags);
++	WARN_ON(irqs_disabled());
++	local_irq_disable();
++	if (!WARN_ON_ONCE(vcpu->pre_pcpu != -1)) {
++		vcpu->pre_pcpu = vcpu->cpu;
++		spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu));
++		list_add_tail(&vcpu->blocked_vcpu_list,
++			      &per_cpu(blocked_vcpu_on_cpu,
++				       vcpu->pre_pcpu));
++		spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu));
++	}
+ 
+ 	do {
+ 		old.control = new.control = pi_desc->control;
+ 
+-		/*
+-		 * We should not block the vCPU if
+-		 * an interrupt is posted for it.
+-		 */
+-		if (pi_test_on(pi_desc) == 1) {
+-			spin_lock_irqsave(&per_cpu(blocked_vcpu_on_cpu_lock,
+-					  vcpu->pre_pcpu), flags);
+-			list_del(&vcpu->blocked_vcpu_list);
+-			spin_unlock_irqrestore(
+-					&per_cpu(blocked_vcpu_on_cpu_lock,
+-					vcpu->pre_pcpu), flags);
+-			vcpu->pre_pcpu = -1;
+-
+-			return 1;
+-		}
+-
+ 		WARN((pi_desc->sn == 1),
+ 		     "Warning: SN field of posted-interrupts "
+ 		     "is set before blocking\n");
+@@ -11463,10 +11498,15 @@ static int pi_pre_block(struct kvm_vcpu *vcpu)
+ 
+ 		/* set 'NV' to 'wakeup vector' */
+ 		new.nv = POSTED_INTR_WAKEUP_VECTOR;
+-	} while (cmpxchg(&pi_desc->control, old.control,
+-			new.control) != old.control);
++	} while (cmpxchg64(&pi_desc->control, old.control,
++			   new.control) != old.control);
+ 
+-	return 0;
++	/* We should not block the vCPU if an interrupt is posted for it.  */
++	if (pi_test_on(pi_desc) == 1)
++		__pi_post_block(vcpu);
++
++	local_irq_enable();
++	return (vcpu->pre_pcpu == -1);
+ }
+ 
+ static int vmx_pre_block(struct kvm_vcpu *vcpu)
+@@ -11482,44 +11522,13 @@ static int vmx_pre_block(struct kvm_vcpu *vcpu)
+ 
+ static void pi_post_block(struct kvm_vcpu *vcpu)
+ {
+-	struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
+-	struct pi_desc old, new;
+-	unsigned int dest;
+-	unsigned long flags;
+-
+-	if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
+-		!irq_remapping_cap(IRQ_POSTING_CAP)  ||
+-		!kvm_vcpu_apicv_active(vcpu))
++	if (vcpu->pre_pcpu == -1)
+ 		return;
+ 
+-	do {
+-		old.control = new.control = pi_desc->control;
+-
+-		dest = cpu_physical_id(vcpu->cpu);
+-
+-		if (x2apic_enabled())
+-			new.ndst = dest;
+-		else
+-			new.ndst = (dest << 8) & 0xFF00;
+-
+-		/* Allow posting non-urgent interrupts */
+-		new.sn = 0;
+-
+-		/* set 'NV' to 'notification vector' */
+-		new.nv = POSTED_INTR_VECTOR;
+-	} while (cmpxchg(&pi_desc->control, old.control,
+-			new.control) != old.control);
+-
+-	if(vcpu->pre_pcpu != -1) {
+-		spin_lock_irqsave(
+-			&per_cpu(blocked_vcpu_on_cpu_lock,
+-			vcpu->pre_pcpu), flags);
+-		list_del(&vcpu->blocked_vcpu_list);
+-		spin_unlock_irqrestore(
+-			&per_cpu(blocked_vcpu_on_cpu_lock,
+-			vcpu->pre_pcpu), flags);
+-		vcpu->pre_pcpu = -1;
+-	}
++	WARN_ON(irqs_disabled());
++	local_irq_disable();
++	__pi_post_block(vcpu);
++	local_irq_enable();
+ }
+ 
+ static void vmx_post_block(struct kvm_vcpu *vcpu)
+@@ -11547,7 +11556,7 @@ static int vmx_update_pi_irte(struct kvm *kvm, unsigned int host_irq,
+ 	struct kvm_lapic_irq irq;
+ 	struct kvm_vcpu *vcpu;
+ 	struct vcpu_data vcpu_info;
+-	int idx, ret = -EINVAL;
++	int idx, ret = 0;
+ 
+ 	if (!kvm_arch_has_assigned_device(kvm) ||
+ 		!irq_remapping_cap(IRQ_POSTING_CAP) ||
+@@ -11556,7 +11565,12 @@ static int vmx_update_pi_irte(struct kvm *kvm, unsigned int host_irq,
+ 
+ 	idx = srcu_read_lock(&kvm->irq_srcu);
+ 	irq_rt = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu);
+-	BUG_ON(guest_irq >= irq_rt->nr_rt_entries);
++	if (guest_irq >= irq_rt->nr_rt_entries ||
++	    hlist_empty(&irq_rt->map[guest_irq])) {
++		pr_warn_once("no route for guest_irq %u/%u (broken user space?)\n",
++			     guest_irq, irq_rt->nr_rt_entries);
++		goto out;
++	}
+ 
+ 	hlist_for_each_entry(e, &irq_rt->map[guest_irq], link) {
+ 		if (e->type != KVM_IRQ_ROUTING_MSI)
+@@ -11599,12 +11613,8 @@ static int vmx_update_pi_irte(struct kvm *kvm, unsigned int host_irq,
+ 
+ 		if (set)
+ 			ret = irq_set_vcpu_affinity(host_irq, &vcpu_info);
+-		else {
+-			/* suppress notification event before unposting */
+-			pi_set_sn(vcpu_to_pi_desc(vcpu));
++		else
+ 			ret = irq_set_vcpu_affinity(host_irq, NULL);
+-			pi_clear_sn(vcpu_to_pi_desc(vcpu));
+-		}
+ 
+ 		if (ret < 0) {
+ 			printk(KERN_INFO "%s: failed to update PI IRTE\n",
+diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
+index 2a1fa10c6a98..955be01dd9cc 100644
+--- a/arch/x86/mm/fault.c
++++ b/arch/x86/mm/fault.c
+@@ -192,8 +192,7 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
+  * 6. T1   : reaches here, sees vma_pkey(vma)=5, when we really
+  *	     faulted on a pte with its pkey=4.
+  */
+-static void fill_sig_info_pkey(int si_code, siginfo_t *info,
+-		struct vm_area_struct *vma)
++static void fill_sig_info_pkey(int si_code, siginfo_t *info, u32 *pkey)
+ {
+ 	/* This is effectively an #ifdef */
+ 	if (!boot_cpu_has(X86_FEATURE_OSPKE))
+@@ -209,7 +208,7 @@ static void fill_sig_info_pkey(int si_code, siginfo_t *info,
+ 	 * valid VMA, so we should never reach this without a
+ 	 * valid VMA.
+ 	 */
+-	if (!vma) {
++	if (!pkey) {
+ 		WARN_ONCE(1, "PKU fault with no VMA passed in");
+ 		info->si_pkey = 0;
+ 		return;
+@@ -219,13 +218,12 @@ static void fill_sig_info_pkey(int si_code, siginfo_t *info,
+ 	 * absolutely guranteed to be 100% accurate because of
+ 	 * the race explained above.
+ 	 */
+-	info->si_pkey = vma_pkey(vma);
++	info->si_pkey = *pkey;
+ }
+ 
+ static void
+ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
+-		     struct task_struct *tsk, struct vm_area_struct *vma,
+-		     int fault)
++		     struct task_struct *tsk, u32 *pkey, int fault)
+ {
+ 	unsigned lsb = 0;
+ 	siginfo_t info;
+@@ -240,7 +238,7 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
+ 		lsb = PAGE_SHIFT;
+ 	info.si_addr_lsb = lsb;
+ 
+-	fill_sig_info_pkey(si_code, &info, vma);
++	fill_sig_info_pkey(si_code, &info, pkey);
+ 
+ 	force_sig_info(si_signo, &info, tsk);
+ }
+@@ -758,8 +756,6 @@ no_context(struct pt_regs *regs, unsigned long error_code,
+ 	struct task_struct *tsk = current;
+ 	unsigned long flags;
+ 	int sig;
+-	/* No context means no VMA to pass down */
+-	struct vm_area_struct *vma = NULL;
+ 
+ 	/* Are we prepared to handle this kernel fault? */
+ 	if (fixup_exception(regs, X86_TRAP_PF)) {
+@@ -784,7 +780,7 @@ no_context(struct pt_regs *regs, unsigned long error_code,
+ 
+ 			/* XXX: hwpoison faults will set the wrong code. */
+ 			force_sig_info_fault(signal, si_code, address,
+-					     tsk, vma, 0);
++					     tsk, NULL, 0);
+ 		}
+ 
+ 		/*
+@@ -893,8 +889,7 @@ show_signal_msg(struct pt_regs *regs, unsigned long error_code,
+ 
+ static void
+ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
+-		       unsigned long address, struct vm_area_struct *vma,
+-		       int si_code)
++		       unsigned long address, u32 *pkey, int si_code)
+ {
+ 	struct task_struct *tsk = current;
+ 
+@@ -942,7 +937,7 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
+ 		tsk->thread.error_code	= error_code;
+ 		tsk->thread.trap_nr	= X86_TRAP_PF;
+ 
+-		force_sig_info_fault(SIGSEGV, si_code, address, tsk, vma, 0);
++		force_sig_info_fault(SIGSEGV, si_code, address, tsk, pkey, 0);
+ 
+ 		return;
+ 	}
+@@ -955,9 +950,9 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
+ 
+ static noinline void
+ bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
+-		     unsigned long address, struct vm_area_struct *vma)
++		     unsigned long address, u32 *pkey)
+ {
+-	__bad_area_nosemaphore(regs, error_code, address, vma, SEGV_MAPERR);
++	__bad_area_nosemaphore(regs, error_code, address, pkey, SEGV_MAPERR);
+ }
+ 
+ static void
+@@ -965,6 +960,10 @@ __bad_area(struct pt_regs *regs, unsigned long error_code,
+ 	   unsigned long address,  struct vm_area_struct *vma, int si_code)
+ {
+ 	struct mm_struct *mm = current->mm;
++	u32 pkey;
++
++	if (vma)
++		pkey = vma_pkey(vma);
+ 
+ 	/*
+ 	 * Something tried to access memory that isn't in our memory map..
+@@ -972,7 +971,8 @@ __bad_area(struct pt_regs *regs, unsigned long error_code,
+ 	 */
+ 	up_read(&mm->mmap_sem);
+ 
+-	__bad_area_nosemaphore(regs, error_code, address, vma, si_code);
++	__bad_area_nosemaphore(regs, error_code, address,
++			       (vma) ? &pkey : NULL, si_code);
+ }
+ 
+ static noinline void
+@@ -1015,7 +1015,7 @@ bad_area_access_error(struct pt_regs *regs, unsigned long error_code,
+ 
+ static void
+ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
+-	  struct vm_area_struct *vma, unsigned int fault)
++	  u32 *pkey, unsigned int fault)
+ {
+ 	struct task_struct *tsk = current;
+ 	int code = BUS_ADRERR;
+@@ -1042,13 +1042,12 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
+ 		code = BUS_MCEERR_AR;
+ 	}
+ #endif
+-	force_sig_info_fault(SIGBUS, code, address, tsk, vma, fault);
++	force_sig_info_fault(SIGBUS, code, address, tsk, pkey, fault);
+ }
+ 
+ static noinline void
+ mm_fault_error(struct pt_regs *regs, unsigned long error_code,
+-	       unsigned long address, struct vm_area_struct *vma,
+-	       unsigned int fault)
++	       unsigned long address, u32 *pkey, unsigned int fault)
+ {
+ 	if (fatal_signal_pending(current) && !(error_code & PF_USER)) {
+ 		no_context(regs, error_code, address, 0, 0);
+@@ -1072,9 +1071,9 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code,
+ 	} else {
+ 		if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON|
+ 			     VM_FAULT_HWPOISON_LARGE))
+-			do_sigbus(regs, error_code, address, vma, fault);
++			do_sigbus(regs, error_code, address, pkey, fault);
+ 		else if (fault & VM_FAULT_SIGSEGV)
+-			bad_area_nosemaphore(regs, error_code, address, vma);
++			bad_area_nosemaphore(regs, error_code, address, pkey);
+ 		else
+ 			BUG();
+ 	}
+@@ -1268,6 +1267,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
+ 	struct mm_struct *mm;
+ 	int fault, major = 0;
+ 	unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
++	u32 pkey;
+ 
+ 	tsk = current;
+ 	mm = tsk->mm;
+@@ -1468,9 +1468,10 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
+ 		return;
+ 	}
+ 
++	pkey = vma_pkey(vma);
+ 	up_read(&mm->mmap_sem);
+ 	if (unlikely(fault & VM_FAULT_ERROR)) {
+-		mm_fault_error(regs, error_code, address, vma, fault);
++		mm_fault_error(regs, error_code, address, &pkey, fault);
+ 		return;
+ 	}
+ 
+diff --git a/block/bsg-lib.c b/block/bsg-lib.c
+index dd56d7460cb9..c587c71d78af 100644
+--- a/block/bsg-lib.c
++++ b/block/bsg-lib.c
+@@ -154,7 +154,6 @@ static int bsg_prepare_job(struct device *dev, struct request *req)
+ failjob_rls_rqst_payload:
+ 	kfree(job->request_payload.sg_list);
+ failjob_rls_job:
+-	kfree(job);
+ 	return -ENOMEM;
+ }
+ 
+diff --git a/crypto/drbg.c b/crypto/drbg.c
+index 633a88e93ab0..70018397e59a 100644
+--- a/crypto/drbg.c
++++ b/crypto/drbg.c
+@@ -1133,10 +1133,10 @@ static inline void drbg_dealloc_state(struct drbg_state *drbg)
+ {
+ 	if (!drbg)
+ 		return;
+-	kzfree(drbg->V);
+-	drbg->Vbuf = NULL;
+-	kzfree(drbg->C);
+-	drbg->Cbuf = NULL;
++	kzfree(drbg->Vbuf);
++	drbg->V = NULL;
++	kzfree(drbg->Cbuf);
++	drbg->C = NULL;
+ 	kzfree(drbg->scratchpadbuf);
+ 	drbg->scratchpadbuf = NULL;
+ 	drbg->reseed_ctr = 0;
+diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
+index c99f8730de82..6ce97fc6d22c 100644
+--- a/drivers/base/power/main.c
++++ b/drivers/base/power/main.c
+@@ -1835,10 +1835,13 @@ void device_pm_check_callbacks(struct device *dev)
+ {
+ 	spin_lock_irq(&dev->power.lock);
+ 	dev->power.no_pm_callbacks =
+-		(!dev->bus || pm_ops_is_empty(dev->bus->pm)) &&
+-		(!dev->class || pm_ops_is_empty(dev->class->pm)) &&
++		(!dev->bus || (pm_ops_is_empty(dev->bus->pm) &&
++		 !dev->bus->suspend && !dev->bus->resume)) &&
++		(!dev->class || (pm_ops_is_empty(dev->class->pm) &&
++		 !dev->class->suspend && !dev->class->resume)) &&
+ 		(!dev->type || pm_ops_is_empty(dev->type->pm)) &&
+ 		(!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) &&
+-		(!dev->driver || pm_ops_is_empty(dev->driver->pm));
++		(!dev->driver || (pm_ops_is_empty(dev->driver->pm) &&
++		 !dev->driver->suspend && !dev->driver->resume));
+ 	spin_unlock_irq(&dev->power.lock);
+ }
+diff --git a/drivers/base/power/opp/core.c b/drivers/base/power/opp/core.c
+index a8cc14fd8ae4..a6de32530693 100644
+--- a/drivers/base/power/opp/core.c
++++ b/drivers/base/power/opp/core.c
+@@ -1581,6 +1581,9 @@ static int _opp_set_availability(struct device *dev, unsigned long freq,
+ 
+ 	opp->available = availability_req;
+ 
++	dev_pm_opp_get(opp);
++	mutex_unlock(&opp_table->lock);
++
+ 	/* Notify the change of the OPP availability */
+ 	if (availability_req)
+ 		blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_ENABLE,
+@@ -1589,8 +1592,12 @@ static int _opp_set_availability(struct device *dev, unsigned long freq,
+ 		blocking_notifier_call_chain(&opp_table->head,
+ 					     OPP_EVENT_DISABLE, opp);
+ 
++	dev_pm_opp_put(opp);
++	goto put_table;
++
+ unlock:
+ 	mutex_unlock(&opp_table->lock);
++put_table:
+ 	dev_pm_opp_put_opp_table(opp_table);
+ 	return r;
+ }
+diff --git a/drivers/block/brd.c b/drivers/block/brd.c
+index 104b71c0490d..b7dce4e3f5ff 100644
+--- a/drivers/block/brd.c
++++ b/drivers/block/brd.c
+@@ -339,7 +339,7 @@ static long __brd_direct_access(struct brd_device *brd, pgoff_t pgoff,
+ 
+ 	if (!brd)
+ 		return -ENODEV;
+-	page = brd_insert_page(brd, PFN_PHYS(pgoff) / 512);
++	page = brd_insert_page(brd, (sector_t)pgoff << PAGE_SECTORS_SHIFT);
+ 	if (!page)
+ 		return -ENOSPC;
+ 	*kaddr = page_address(page);
+diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
+index 79791c690858..dff88838dce7 100644
+--- a/drivers/crypto/talitos.c
++++ b/drivers/crypto/talitos.c
+@@ -1756,9 +1756,9 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc,
+ 		req_ctx->swinit = 0;
+ 	} else {
+ 		desc->ptr[1] = zero_entry;
+-		/* Indicate next op is not the first. */
+-		req_ctx->first = 0;
+ 	}
++	/* Indicate next op is not the first. */
++	req_ctx->first = 0;
+ 
+ 	/* HMAC key */
+ 	if (ctx->keylen)
+@@ -1769,7 +1769,7 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc,
+ 
+ 	sg_count = edesc->src_nents ?: 1;
+ 	if (is_sec1 && sg_count > 1)
+-		sg_copy_to_buffer(areq->src, sg_count, edesc->buf, length);
++		sg_copy_to_buffer(req_ctx->psrc, sg_count, edesc->buf, length);
+ 	else
+ 		sg_count = dma_map_sg(dev, req_ctx->psrc, sg_count,
+ 				      DMA_TO_DEVICE);
+@@ -3057,7 +3057,8 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
+ 		t_alg->algt.alg.hash.final = ahash_final;
+ 		t_alg->algt.alg.hash.finup = ahash_finup;
+ 		t_alg->algt.alg.hash.digest = ahash_digest;
+-		t_alg->algt.alg.hash.setkey = ahash_setkey;
++		if (!strncmp(alg->cra_name, "hmac", 4))
++			t_alg->algt.alg.hash.setkey = ahash_setkey;
+ 		t_alg->algt.alg.hash.import = ahash_import;
+ 		t_alg->algt.alg.hash.export = ahash_export;
+ 
+diff --git a/drivers/dax/super.c b/drivers/dax/super.c
+index 938eb4868f7f..8b458f1b30c7 100644
+--- a/drivers/dax/super.c
++++ b/drivers/dax/super.c
+@@ -189,8 +189,10 @@ static umode_t dax_visible(struct kobject *kobj, struct attribute *a, int n)
+ 	if (!dax_dev)
+ 		return 0;
+ 
+-	if (a == &dev_attr_write_cache.attr && !dax_dev->ops->flush)
++#ifndef CONFIG_ARCH_HAS_PMEM_API
++	if (a == &dev_attr_write_cache.attr)
+ 		return 0;
++#endif
+ 	return a->mode;
+ }
+ 
+@@ -255,18 +257,23 @@ size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
+ }
+ EXPORT_SYMBOL_GPL(dax_copy_from_iter);
+ 
+-void dax_flush(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
+-		size_t size)
++#ifdef CONFIG_ARCH_HAS_PMEM_API
++void arch_wb_cache_pmem(void *addr, size_t size);
++void dax_flush(struct dax_device *dax_dev, void *addr, size_t size)
+ {
+-	if (!dax_alive(dax_dev))
++	if (unlikely(!dax_alive(dax_dev)))
+ 		return;
+ 
+-	if (!test_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags))
++	if (unlikely(!test_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags)))
+ 		return;
+ 
+-	if (dax_dev->ops->flush)
+-		dax_dev->ops->flush(dax_dev, pgoff, addr, size);
++	arch_wb_cache_pmem(addr, size);
+ }
++#else
++void dax_flush(struct dax_device *dax_dev, void *addr, size_t size)
++{
++}
++#endif
+ EXPORT_SYMBOL_GPL(dax_flush);
+ 
+ void dax_write_cache(struct dax_device *dax_dev, bool wc)
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
+index 5173ca1fd159..5e371abf3633 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
+@@ -636,7 +636,194 @@ static void gfx_v6_0_tiling_mode_table_init(struct amdgpu_device *adev)
+ 				NUM_BANKS(ADDR_SURF_2_BANK);
+ 		for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
+ 			WREG32(mmGB_TILE_MODE0 + reg_offset, tilemode[reg_offset]);
+-	} else if (adev->asic_type == CHIP_OLAND || adev->asic_type == CHIP_HAINAN) {
++	} else if (adev->asic_type == CHIP_OLAND) {
++		tilemode[0] =   MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
++				ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
++				PIPE_CONFIG(ADDR_SURF_P4_8x16) |
++				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
++				NUM_BANKS(ADDR_SURF_16_BANK) |
++				BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
++				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
++				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4);
++		tilemode[1] =   MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
++				ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
++				PIPE_CONFIG(ADDR_SURF_P4_8x16) |
++				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
++				NUM_BANKS(ADDR_SURF_16_BANK) |
++				BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
++				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
++				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4);
++		tilemode[2] =   MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
++				ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
++				PIPE_CONFIG(ADDR_SURF_P4_8x16) |
++				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
++				NUM_BANKS(ADDR_SURF_16_BANK) |
++				BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
++				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
++				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4);
++		tilemode[3] =   MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
++				ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
++				PIPE_CONFIG(ADDR_SURF_P4_8x16) |
++				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
++				NUM_BANKS(ADDR_SURF_16_BANK) |
++				BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
++				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
++				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4);
++		tilemode[4] =   MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
++				ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
++				PIPE_CONFIG(ADDR_SURF_P4_8x16) |
++				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
++				NUM_BANKS(ADDR_SURF_16_BANK) |
++				BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
++				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
++				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
++		tilemode[5] =   MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
++				ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
++				PIPE_CONFIG(ADDR_SURF_P4_8x16) |
++				TILE_SPLIT(split_equal_to_row_size) |
++				NUM_BANKS(ADDR_SURF_16_BANK) |
++				BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
++				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
++				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
++		tilemode[6] =   MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
++				ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
++				PIPE_CONFIG(ADDR_SURF_P4_8x16) |
++				TILE_SPLIT(split_equal_to_row_size) |
++				NUM_BANKS(ADDR_SURF_16_BANK) |
++				BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
++				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
++				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
++		tilemode[7] =   MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
++				ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
++				PIPE_CONFIG(ADDR_SURF_P4_8x16) |
++				TILE_SPLIT(split_equal_to_row_size) |
++				NUM_BANKS(ADDR_SURF_16_BANK) |
++				BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
++				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
++				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4);
++		tilemode[8] =   MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
++				ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
++				PIPE_CONFIG(ADDR_SURF_P4_8x16) |
++				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
++				NUM_BANKS(ADDR_SURF_16_BANK) |
++				BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
++				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
++				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
++		tilemode[9] =   MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
++				ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
++				PIPE_CONFIG(ADDR_SURF_P4_8x16) |
++				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
++				NUM_BANKS(ADDR_SURF_16_BANK) |
++				BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
++				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
++				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
++		tilemode[10] =  MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
++				ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
++				PIPE_CONFIG(ADDR_SURF_P4_8x16) |
++				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
++				NUM_BANKS(ADDR_SURF_16_BANK) |
++				BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
++				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
++				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4);
++		tilemode[11] =  MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
++				ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
++				PIPE_CONFIG(ADDR_SURF_P4_8x16) |
++				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
++				NUM_BANKS(ADDR_SURF_16_BANK) |
++				BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
++				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
++				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
++		tilemode[12] =  MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
++				ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
++				PIPE_CONFIG(ADDR_SURF_P4_8x16) |
++				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
++				NUM_BANKS(ADDR_SURF_16_BANK) |
++				BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
++				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
++				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
++		tilemode[13] =  MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
++				ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
++				PIPE_CONFIG(ADDR_SURF_P4_8x16) |
++				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
++				NUM_BANKS(ADDR_SURF_16_BANK) |
++				BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
++				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
++				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
++		tilemode[14] =  MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
++				ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
++				PIPE_CONFIG(ADDR_SURF_P4_8x16) |
++				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
++				NUM_BANKS(ADDR_SURF_16_BANK) |
++				BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
++				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
++				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
++		tilemode[15] =  MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
++				ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
++				PIPE_CONFIG(ADDR_SURF_P4_8x16) |
++				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
++				NUM_BANKS(ADDR_SURF_16_BANK) |
++				BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
++				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
++				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
++		tilemode[16] =  MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
++				ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
++				PIPE_CONFIG(ADDR_SURF_P4_8x16) |
++				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
++				NUM_BANKS(ADDR_SURF_16_BANK) |
++				BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
++				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
++				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
++		tilemode[17] =  MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
++				ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
++				PIPE_CONFIG(ADDR_SURF_P4_8x16) |
++				TILE_SPLIT(split_equal_to_row_size) |
++				NUM_BANKS(ADDR_SURF_16_BANK) |
++				BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
++				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
++				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
++		tilemode[21] =  MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
++				ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
++				PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
++				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
++				NUM_BANKS(ADDR_SURF_16_BANK) |
++				BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
++				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
++				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
++		tilemode[22] =  MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
++				ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
++				PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
++				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
++				NUM_BANKS(ADDR_SURF_16_BANK) |
++				BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
++				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
++				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4);
++		tilemode[23] =  MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
++				ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
++				PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
++				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
++				NUM_BANKS(ADDR_SURF_16_BANK) |
++				BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
++				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
++				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
++		tilemode[24] =  MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
++				ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
++				PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
++				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
++				NUM_BANKS(ADDR_SURF_16_BANK) |
++				BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
++				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
++				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
++		tilemode[25] =  MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
++				ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
++				PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
++				TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
++				NUM_BANKS(ADDR_SURF_8_BANK) |
++				BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
++				BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
++				MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1);
++		for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
++			WREG32(mmGB_TILE_MODE0 + reg_offset, tilemode[reg_offset]);
++	} else if (adev->asic_type == CHIP_HAINAN) {
+ 		tilemode[0] =   MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ 				ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ 				PIPE_CONFIG(ADDR_SURF_P2) |
+diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+index 9a3bea738330..87b95eeedd9e 100644
+--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
++++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+@@ -551,12 +551,15 @@ static const struct etnaviv_gem_ops etnaviv_gem_shmem_ops = {
+ void etnaviv_gem_free_object(struct drm_gem_object *obj)
+ {
+ 	struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
++	struct etnaviv_drm_private *priv = obj->dev->dev_private;
+ 	struct etnaviv_vram_mapping *mapping, *tmp;
+ 
+ 	/* object should not be active */
+ 	WARN_ON(is_active(etnaviv_obj));
+ 
++	mutex_lock(&priv->gem_lock);
+ 	list_del(&etnaviv_obj->gem_node);
++	mutex_unlock(&priv->gem_lock);
+ 
+ 	list_for_each_entry_safe(mapping, tmp, &etnaviv_obj->vram_list,
+ 				 obj_node) {
+diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
+index 6463fc2c736f..b95362186f9c 100644
+--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
++++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
+@@ -445,8 +445,10 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
+ 	cmdbuf->user_size = ALIGN(args->stream_size, 8);
+ 
+ 	ret = etnaviv_gpu_submit(gpu, submit, cmdbuf);
+-	if (ret == 0)
+-		cmdbuf = NULL;
++	if (ret)
++		goto out;
++
++	cmdbuf = NULL;
+ 
+ 	if (args->flags & ETNA_SUBMIT_FENCE_FD_OUT) {
+ 		/*
+diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
+index 242bd50faa26..bcc94e559cd0 100644
+--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
++++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
+@@ -176,6 +176,7 @@ static int exynos_drm_suspend(struct device *dev)
+ 	if (pm_runtime_suspended(dev) || !drm_dev)
+ 		return 0;
+ 
++	drm_modeset_lock_all(drm_dev);
+ 	drm_connector_list_iter_begin(drm_dev, &conn_iter);
+ 	drm_for_each_connector_iter(connector, &conn_iter) {
+ 		int old_dpms = connector->dpms;
+@@ -187,6 +188,7 @@ static int exynos_drm_suspend(struct device *dev)
+ 		connector->dpms = old_dpms;
+ 	}
+ 	drm_connector_list_iter_end(&conn_iter);
++	drm_modeset_unlock_all(drm_dev);
+ 
+ 	return 0;
+ }
+@@ -200,6 +202,7 @@ static int exynos_drm_resume(struct device *dev)
+ 	if (pm_runtime_suspended(dev) || !drm_dev)
+ 		return 0;
+ 
++	drm_modeset_lock_all(drm_dev);
+ 	drm_connector_list_iter_begin(drm_dev, &conn_iter);
+ 	drm_for_each_connector_iter(connector, &conn_iter) {
+ 		if (connector->funcs->dpms) {
+@@ -210,6 +213,7 @@ static int exynos_drm_resume(struct device *dev)
+ 		}
+ 	}
+ 	drm_connector_list_iter_end(&conn_iter);
++	drm_modeset_unlock_all(drm_dev);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/gpu/drm/i915/gvt/cfg_space.c b/drivers/gpu/drm/i915/gvt/cfg_space.c
+index 40af17ec6312..ff3154fe6588 100644
+--- a/drivers/gpu/drm/i915/gvt/cfg_space.c
++++ b/drivers/gpu/drm/i915/gvt/cfg_space.c
+@@ -197,78 +197,65 @@ static int emulate_pci_command_write(struct intel_vgpu *vgpu,
+ static int emulate_pci_bar_write(struct intel_vgpu *vgpu, unsigned int offset,
+ 	void *p_data, unsigned int bytes)
+ {
+-	unsigned int bar_index =
+-		(rounddown(offset, 8) % PCI_BASE_ADDRESS_0) / 8;
+ 	u32 new = *(u32 *)(p_data);
+ 	bool lo = IS_ALIGNED(offset, 8);
+ 	u64 size;
+ 	int ret = 0;
+ 	bool mmio_enabled =
+ 		vgpu_cfg_space(vgpu)[PCI_COMMAND] & PCI_COMMAND_MEMORY;
++	struct intel_vgpu_pci_bar *bars = vgpu->cfg_space.bar;
+ 
+-	if (WARN_ON(bar_index >= INTEL_GVT_PCI_BAR_MAX))
+-		return -EINVAL;
+-
++	/*
++	 * Power-up software can determine how much address
++	 * space the device requires by writing a value of
++	 * all 1's to the register and then reading the value
++	 * back. The device will return 0's in all don't-care
++	 * address bits.
++	 */
+ 	if (new == 0xffffffff) {
+-		/*
+-		 * Power-up software can determine how much address
+-		 * space the device requires by writing a value of
+-		 * all 1's to the register and then reading the value
+-		 * back. The device will return 0's in all don't-care
+-		 * address bits.
+-		 */
+-		size = vgpu->cfg_space.bar[bar_index].size;
+-		if (lo) {
+-			new = rounddown(new, size);
+-		} else {
+-			u32 val = vgpu_cfg_space(vgpu)[rounddown(offset, 8)];
+-			/* for 32bit mode bar it returns all-0 in upper 32
+-			 * bit, for 64bit mode bar it will calculate the
+-			 * size with lower 32bit and return the corresponding
+-			 * value
++		switch (offset) {
++		case PCI_BASE_ADDRESS_0:
++		case PCI_BASE_ADDRESS_1:
++			size = ~(bars[INTEL_GVT_PCI_BAR_GTTMMIO].size -1);
++			intel_vgpu_write_pci_bar(vgpu, offset,
++						size >> (lo ? 0 : 32), lo);
++			/*
++			 * Untrap the BAR, since guest hasn't configured a
++			 * valid GPA
+ 			 */
+-			if (val & PCI_BASE_ADDRESS_MEM_TYPE_64)
+-				new &= (~(size-1)) >> 32;
+-			else
+-				new = 0;
+-		}
+-		/*
+-		 * Unmapp & untrap the BAR, since guest hasn't configured a
+-		 * valid GPA
+-		 */
+-		switch (bar_index) {
+-		case INTEL_GVT_PCI_BAR_GTTMMIO:
+ 			ret = trap_gttmmio(vgpu, false);
+ 			break;
+-		case INTEL_GVT_PCI_BAR_APERTURE:
++		case PCI_BASE_ADDRESS_2:
++		case PCI_BASE_ADDRESS_3:
++			size = ~(bars[INTEL_GVT_PCI_BAR_APERTURE].size -1);
++			intel_vgpu_write_pci_bar(vgpu, offset,
++						size >> (lo ? 0 : 32), lo);
+ 			ret = map_aperture(vgpu, false);
+ 			break;
++		default:
++			/* Unimplemented BARs */
++			intel_vgpu_write_pci_bar(vgpu, offset, 0x0, false);
+ 		}
+-		intel_vgpu_write_pci_bar(vgpu, offset, new, lo);
+ 	} else {
+-		/*
+-		 * Unmapp & untrap the old BAR first, since guest has
+-		 * re-configured the BAR
+-		 */
+-		switch (bar_index) {
+-		case INTEL_GVT_PCI_BAR_GTTMMIO:
+-			ret = trap_gttmmio(vgpu, false);
++		switch (offset) {
++		case PCI_BASE_ADDRESS_0:
++		case PCI_BASE_ADDRESS_1:
++			/*
++			 * Untrap the old BAR first, since guest has
++			 * re-configured the BAR
++			 */
++			trap_gttmmio(vgpu, false);
++			intel_vgpu_write_pci_bar(vgpu, offset, new, lo);
++			ret = trap_gttmmio(vgpu, mmio_enabled);
+ 			break;
+-		case INTEL_GVT_PCI_BAR_APERTURE:
+-			ret = map_aperture(vgpu, false);
++		case PCI_BASE_ADDRESS_2:
++		case PCI_BASE_ADDRESS_3:
++			map_aperture(vgpu, false);
++			intel_vgpu_write_pci_bar(vgpu, offset, new, lo);
++			ret = map_aperture(vgpu, mmio_enabled);
+ 			break;
+-		}
+-		intel_vgpu_write_pci_bar(vgpu, offset, new, lo);
+-		/* Track the new BAR */
+-		if (mmio_enabled) {
+-			switch (bar_index) {
+-			case INTEL_GVT_PCI_BAR_GTTMMIO:
+-				ret = trap_gttmmio(vgpu, true);
+-				break;
+-			case INTEL_GVT_PCI_BAR_APERTURE:
+-				ret = map_aperture(vgpu, true);
+-				break;
+-			}
++		default:
++			intel_vgpu_write_pci_bar(vgpu, offset, new, lo);
+ 		}
+ 	}
+ 	return ret;
+@@ -299,10 +286,7 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
+ 	}
+ 
+ 	switch (rounddown(offset, 4)) {
+-	case PCI_BASE_ADDRESS_0:
+-	case PCI_BASE_ADDRESS_1:
+-	case PCI_BASE_ADDRESS_2:
+-	case PCI_BASE_ADDRESS_3:
++	case PCI_BASE_ADDRESS_0 ... PCI_BASE_ADDRESS_5:
+ 		if (WARN_ON(!IS_ALIGNED(offset, 4)))
+ 			return -EINVAL;
+ 		return emulate_pci_bar_write(vgpu, offset, p_data, bytes);
+@@ -344,7 +328,6 @@ void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu,
+ 	struct intel_gvt *gvt = vgpu->gvt;
+ 	const struct intel_gvt_device_info *info = &gvt->device_info;
+ 	u16 *gmch_ctl;
+-	int i;
+ 
+ 	memcpy(vgpu_cfg_space(vgpu), gvt->firmware.cfg_space,
+ 	       info->cfg_space_size);
+@@ -371,13 +354,13 @@ void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu,
+ 	 */
+ 	memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_1, 0, 4);
+ 	memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_3, 0, 4);
++	memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_4, 0, 8);
+ 	memset(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_OPREGION, 0, 4);
+ 
+-	for (i = 0; i < INTEL_GVT_MAX_BAR_NUM; i++) {
+-		vgpu->cfg_space.bar[i].size = pci_resource_len(
+-					      gvt->dev_priv->drm.pdev, i * 2);
+-		vgpu->cfg_space.bar[i].tracked = false;
+-	}
++	vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].size =
++				pci_resource_len(gvt->dev_priv->drm.pdev, 0);
++	vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].size =
++				pci_resource_len(gvt->dev_priv->drm.pdev, 2);
+ }
+ 
+ /**
+diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
+index 50ec836da8b1..4b8f6e070b5f 100644
+--- a/drivers/gpu/drm/i915/intel_dsi.c
++++ b/drivers/gpu/drm/i915/intel_dsi.c
+@@ -892,8 +892,6 @@ static void intel_dsi_disable(struct intel_encoder *encoder,
+ 			      struct intel_crtc_state *old_crtc_state,
+ 			      struct drm_connector_state *old_conn_state)
+ {
+-	struct drm_device *dev = encoder->base.dev;
+-	struct drm_i915_private *dev_priv = dev->dev_private;
+ 	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ 	enum port port;
+ 
+@@ -902,15 +900,6 @@ static void intel_dsi_disable(struct intel_encoder *encoder,
+ 	intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_OFF);
+ 	intel_panel_disable_backlight(old_conn_state);
+ 
+-	/*
+-	 * Disable Device ready before the port shutdown in order
+-	 * to avoid split screen
+-	 */
+-	if (IS_BROXTON(dev_priv)) {
+-		for_each_dsi_port(port, intel_dsi->ports)
+-			I915_WRITE(MIPI_DEVICE_READY(port), 0);
+-	}
+-
+ 	/*
+ 	 * According to the spec we should send SHUTDOWN before
+ 	 * MIPI_SEQ_DISPLAY_OFF only for v3+ VBTs, but field testing
+diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
+index 997131d58c7f..ffc10cadcf34 100644
+--- a/drivers/gpu/drm/radeon/radeon_device.c
++++ b/drivers/gpu/drm/radeon/radeon_device.c
+@@ -1663,7 +1663,7 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend,
+ 	radeon_agp_suspend(rdev);
+ 
+ 	pci_save_state(dev->pdev);
+-	if (freeze && rdev->family >= CHIP_CEDAR) {
++	if (freeze && rdev->family >= CHIP_CEDAR && !(rdev->flags & RADEON_IS_IGP)) {
+ 		rdev->asic->asic_reset(rdev, true);
+ 		pci_restore_state(dev->pdev);
+ 	} else if (suspend) {
+diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
+index e49b34c3b136..ca846fbe16c4 100644
+--- a/drivers/infiniband/hw/cxgb4/cm.c
++++ b/drivers/infiniband/hw/cxgb4/cm.c
+@@ -2333,9 +2333,14 @@ static int close_listsrv_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
+ 	unsigned int stid = GET_TID(rpl);
+ 	struct c4iw_listen_ep *ep = get_ep_from_stid(dev, stid);
+ 
++	if (!ep) {
++		pr_debug("%s stid %d lookup failure!\n", __func__, stid);
++		goto out;
++	}
+ 	pr_debug("%s ep %p\n", __func__, ep);
+ 	c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status));
+ 	c4iw_put_ep(&ep->com);
++out:
+ 	return 0;
+ }
+ 
+@@ -2594,9 +2599,9 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
+ 	c4iw_put_ep(&child_ep->com);
+ reject:
+ 	reject_cr(dev, hwtid, skb);
++out:
+ 	if (parent_ep)
+ 		c4iw_put_ep(&parent_ep->com);
+-out:
+ 	return 0;
+ }
+ 
+@@ -3458,7 +3463,7 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
+ 		cm_id->provider_data = ep;
+ 		goto out;
+ 	}
+-
++	remove_handle(ep->com.dev, &ep->com.dev->stid_idr, ep->stid);
+ 	cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid,
+ 			ep->com.local_addr.ss_family);
+ fail2:
+diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+index 2e075377242e..6cd61638b441 100644
+--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
++++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+@@ -1000,19 +1000,6 @@ static inline int update_parent_pkey(struct ipoib_dev_priv *priv)
+ 		 */
+ 		priv->dev->broadcast[8] = priv->pkey >> 8;
+ 		priv->dev->broadcast[9] = priv->pkey & 0xff;
+-
+-		/*
+-		 * Update the broadcast address in the priv->broadcast object,
+-		 * in case it already exists, otherwise no one will do that.
+-		 */
+-		if (priv->broadcast) {
+-			spin_lock_irq(&priv->lock);
+-			memcpy(priv->broadcast->mcmember.mgid.raw,
+-			       priv->dev->broadcast + 4,
+-			sizeof(union ib_gid));
+-			spin_unlock_irq(&priv->lock);
+-		}
+-
+ 		return 0;
+ 	}
+ 
+diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
+index 3acce09bba35..240941eb3f68 100644
+--- a/drivers/md/dm-integrity.c
++++ b/drivers/md/dm-integrity.c
+@@ -1697,7 +1697,11 @@ static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map
+ 
+ 	if (need_sync_io) {
+ 		wait_for_completion_io(&read_comp);
+-		integrity_metadata(&dio->work);
++		if (likely(!bio->bi_status))
++			integrity_metadata(&dio->work);
++		else
++			dec_in_flight(dio);
++
+ 	} else {
+ 		INIT_WORK(&dio->work, integrity_metadata);
+ 		queue_work(ic->metadata_wq, &dio->work);
+diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c
+index 41971a090e34..208800610af8 100644
+--- a/drivers/md/dm-linear.c
++++ b/drivers/md/dm-linear.c
+@@ -184,20 +184,6 @@ static size_t linear_dax_copy_from_iter(struct dm_target *ti, pgoff_t pgoff,
+ 	return dax_copy_from_iter(dax_dev, pgoff, addr, bytes, i);
+ }
+ 
+-static void linear_dax_flush(struct dm_target *ti, pgoff_t pgoff, void *addr,
+-		size_t size)
+-{
+-	struct linear_c *lc = ti->private;
+-	struct block_device *bdev = lc->dev->bdev;
+-	struct dax_device *dax_dev = lc->dev->dax_dev;
+-	sector_t dev_sector, sector = pgoff * PAGE_SECTORS;
+-
+-	dev_sector = linear_map_sector(ti, sector);
+-	if (bdev_dax_pgoff(bdev, dev_sector, ALIGN(size, PAGE_SIZE), &pgoff))
+-		return;
+-	dax_flush(dax_dev, pgoff, addr, size);
+-}
+-
+ static struct target_type linear_target = {
+ 	.name   = "linear",
+ 	.version = {1, 4, 0},
+@@ -212,7 +198,6 @@ static struct target_type linear_target = {
+ 	.iterate_devices = linear_iterate_devices,
+ 	.direct_access = linear_dax_direct_access,
+ 	.dax_copy_from_iter = linear_dax_copy_from_iter,
+-	.dax_flush = linear_dax_flush,
+ };
+ 
+ int __init dm_linear_init(void)
+diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
+index a0375530b07f..1690bb299b3f 100644
+--- a/drivers/md/dm-stripe.c
++++ b/drivers/md/dm-stripe.c
+@@ -351,25 +351,6 @@ static size_t stripe_dax_copy_from_iter(struct dm_target *ti, pgoff_t pgoff,
+ 	return dax_copy_from_iter(dax_dev, pgoff, addr, bytes, i);
+ }
+ 
+-static void stripe_dax_flush(struct dm_target *ti, pgoff_t pgoff, void *addr,
+-		size_t size)
+-{
+-	sector_t dev_sector, sector = pgoff * PAGE_SECTORS;
+-	struct stripe_c *sc = ti->private;
+-	struct dax_device *dax_dev;
+-	struct block_device *bdev;
+-	uint32_t stripe;
+-
+-	stripe_map_sector(sc, sector, &stripe, &dev_sector);
+-	dev_sector += sc->stripe[stripe].physical_start;
+-	dax_dev = sc->stripe[stripe].dev->dax_dev;
+-	bdev = sc->stripe[stripe].dev->bdev;
+-
+-	if (bdev_dax_pgoff(bdev, dev_sector, ALIGN(size, PAGE_SIZE), &pgoff))
+-		return;
+-	dax_flush(dax_dev, pgoff, addr, size);
+-}
+-
+ /*
+  * Stripe status:
+  *
+@@ -491,7 +472,6 @@ static struct target_type stripe_target = {
+ 	.io_hints = stripe_io_hints,
+ 	.direct_access = stripe_dax_direct_access,
+ 	.dax_copy_from_iter = stripe_dax_copy_from_iter,
+-	.dax_flush = stripe_dax_flush,
+ };
+ 
+ int __init dm_stripe_init(void)
+diff --git a/drivers/md/dm.c b/drivers/md/dm.c
+index d669fddd9290..825eaffc24da 100644
+--- a/drivers/md/dm.c
++++ b/drivers/md/dm.c
+@@ -987,24 +987,6 @@ static size_t dm_dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff,
+ 	return ret;
+ }
+ 
+-static void dm_dax_flush(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
+-		size_t size)
+-{
+-	struct mapped_device *md = dax_get_private(dax_dev);
+-	sector_t sector = pgoff * PAGE_SECTORS;
+-	struct dm_target *ti;
+-	int srcu_idx;
+-
+-	ti = dm_dax_get_live_target(md, sector, &srcu_idx);
+-
+-	if (!ti)
+-		goto out;
+-	if (ti->type->dax_flush)
+-		ti->type->dax_flush(ti, pgoff, addr, size);
+- out:
+-	dm_put_live_table(md, srcu_idx);
+-}
+-
+ /*
+  * A target may call dm_accept_partial_bio only from the map routine.  It is
+  * allowed for all bio types except REQ_PREFLUSH.
+@@ -2992,7 +2974,6 @@ static const struct block_device_operations dm_blk_dops = {
+ static const struct dax_operations dm_dax_ops = {
+ 	.direct_access = dm_dax_direct_access,
+ 	.copy_from_iter = dm_dax_copy_from_iter,
+-	.flush = dm_dax_flush,
+ };
+ 
+ /*
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index b01e458d31e9..0d993ea63043 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -266,6 +266,37 @@ static DEFINE_SPINLOCK(all_mddevs_lock);
+  * call has finished, the bio has been linked into some internal structure
+  * and so is visible to ->quiesce(), so we don't need the refcount any more.
+  */
++void md_handle_request(struct mddev *mddev, struct bio *bio)
++{
++check_suspended:
++	rcu_read_lock();
++	if (mddev->suspended) {
++		DEFINE_WAIT(__wait);
++		for (;;) {
++			prepare_to_wait(&mddev->sb_wait, &__wait,
++					TASK_UNINTERRUPTIBLE);
++			if (!mddev->suspended)
++				break;
++			rcu_read_unlock();
++			schedule();
++			rcu_read_lock();
++		}
++		finish_wait(&mddev->sb_wait, &__wait);
++	}
++	atomic_inc(&mddev->active_io);
++	rcu_read_unlock();
++
++	if (!mddev->pers->make_request(mddev, bio)) {
++		atomic_dec(&mddev->active_io);
++		wake_up(&mddev->sb_wait);
++		goto check_suspended;
++	}
++
++	if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended)
++		wake_up(&mddev->sb_wait);
++}
++EXPORT_SYMBOL(md_handle_request);
++
+ static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio)
+ {
+ 	const int rw = bio_data_dir(bio);
+@@ -285,23 +316,6 @@ static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio)
+ 		bio_endio(bio);
+ 		return BLK_QC_T_NONE;
+ 	}
+-check_suspended:
+-	rcu_read_lock();
+-	if (mddev->suspended) {
+-		DEFINE_WAIT(__wait);
+-		for (;;) {
+-			prepare_to_wait(&mddev->sb_wait, &__wait,
+-					TASK_UNINTERRUPTIBLE);
+-			if (!mddev->suspended)
+-				break;
+-			rcu_read_unlock();
+-			schedule();
+-			rcu_read_lock();
+-		}
+-		finish_wait(&mddev->sb_wait, &__wait);
+-	}
+-	atomic_inc(&mddev->active_io);
+-	rcu_read_unlock();
+ 
+ 	/*
+ 	 * save the sectors now since our bio can
+@@ -310,20 +324,14 @@ static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio)
+ 	sectors = bio_sectors(bio);
+ 	/* bio could be mergeable after passing to underlayer */
+ 	bio->bi_opf &= ~REQ_NOMERGE;
+-	if (!mddev->pers->make_request(mddev, bio)) {
+-		atomic_dec(&mddev->active_io);
+-		wake_up(&mddev->sb_wait);
+-		goto check_suspended;
+-	}
++
++	md_handle_request(mddev, bio);
+ 
+ 	cpu = part_stat_lock();
+ 	part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
+ 	part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], sectors);
+ 	part_stat_unlock();
+ 
+-	if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended)
+-		wake_up(&mddev->sb_wait);
+-
+ 	return BLK_QC_T_NONE;
+ }
+ 
+@@ -439,16 +447,22 @@ static void md_submit_flush_data(struct work_struct *ws)
+ 	struct mddev *mddev = container_of(ws, struct mddev, flush_work);
+ 	struct bio *bio = mddev->flush_bio;
+ 
++	/*
++	 * must reset flush_bio before calling into md_handle_request to avoid a
++	 * deadlock, because other bios passed md_handle_request suspend check
++	 * could wait for this and below md_handle_request could wait for those
++	 * bios because of suspend check
++	 */
++	mddev->flush_bio = NULL;
++	wake_up(&mddev->sb_wait);
++
+ 	if (bio->bi_iter.bi_size == 0)
+ 		/* an empty barrier - all done */
+ 		bio_endio(bio);
+ 	else {
+ 		bio->bi_opf &= ~REQ_PREFLUSH;
+-		mddev->pers->make_request(mddev, bio);
++		md_handle_request(mddev, bio);
+ 	}
+-
+-	mddev->flush_bio = NULL;
+-	wake_up(&mddev->sb_wait);
+ }
+ 
+ void md_flush_request(struct mddev *mddev, struct bio *bio)
+diff --git a/drivers/md/md.h b/drivers/md/md.h
+index 09db03455801..60b09bea886b 100644
+--- a/drivers/md/md.h
++++ b/drivers/md/md.h
+@@ -686,6 +686,7 @@ extern void md_stop_writes(struct mddev *mddev);
+ extern int md_rdev_init(struct md_rdev *rdev);
+ extern void md_rdev_clear(struct md_rdev *rdev);
+ 
++extern void md_handle_request(struct mddev *mddev, struct bio *bio);
+ extern void mddev_suspend(struct mddev *mddev);
+ extern void mddev_resume(struct mddev *mddev);
+ extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
+diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
+index e13a8ce7f589..fc48813eaa08 100644
+--- a/drivers/md/raid5.c
++++ b/drivers/md/raid5.c
+@@ -812,6 +812,14 @@ static void stripe_add_to_batch_list(struct r5conf *conf, struct stripe_head *sh
+ 			spin_unlock(&head->batch_head->batch_lock);
+ 			goto unlock_out;
+ 		}
++		/*
++		 * We must assign batch_head of this stripe within the
++		 * batch_lock, otherwise clear_batch_ready of batch head
++		 * stripe could clear BATCH_READY bit of this stripe and
++		 * this stripe->batch_head doesn't get assigned, which
++		 * could confuse clear_batch_ready for this stripe
++		 */
++		sh->batch_head = head->batch_head;
+ 
+ 		/*
+ 		 * at this point, head's BATCH_READY could be cleared, but we
+@@ -819,8 +827,6 @@ static void stripe_add_to_batch_list(struct r5conf *conf, struct stripe_head *sh
+ 		 */
+ 		list_add(&sh->batch_list, &head->batch_list);
+ 		spin_unlock(&head->batch_head->batch_lock);
+-
+-		sh->batch_head = head->batch_head;
+ 	} else {
+ 		head->batch_head = head;
+ 		sh->batch_head = head->batch_head;
+@@ -4608,7 +4614,8 @@ static void break_stripe_batch_list(struct stripe_head *head_sh,
+ 
+ 		set_mask_bits(&sh->state, ~(STRIPE_EXPAND_SYNC_FLAGS |
+ 					    (1 << STRIPE_PREREAD_ACTIVE) |
+-					    (1 << STRIPE_DEGRADED)),
++					    (1 << STRIPE_DEGRADED) |
++					    (1 << STRIPE_ON_UNPLUG_LIST)),
+ 			      head_sh->state & (1 << STRIPE_INSYNC));
+ 
+ 		sh->check_state = head_sh->check_state;
+diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c
+index affa7370ba82..74c663b1c0a7 100644
+--- a/drivers/mmc/core/queue.c
++++ b/drivers/mmc/core/queue.c
+@@ -242,6 +242,12 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
+ 	if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
+ 		limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;
+ 
++	/*
++	 * mmc_init_request() depends on card->bouncesz so it must be calculated
++	 * before blk_init_allocated_queue() starts allocating requests.
++	 */
++	card->bouncesz = mmc_queue_calc_bouncesz(host);
++
+ 	mq->card = card;
+ 	mq->queue = blk_alloc_queue(GFP_KERNEL);
+ 	if (!mq->queue)
+@@ -265,7 +271,6 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
+ 	if (mmc_can_erase(card))
+ 		mmc_queue_setup_discard(mq->queue, card);
+ 
+-	card->bouncesz = mmc_queue_calc_bouncesz(host);
+ 	if (card->bouncesz) {
+ 		blk_queue_max_hw_sectors(mq->queue, card->bouncesz / 512);
+ 		blk_queue_max_segments(mq->queue, card->bouncesz / 512);
+diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
+index e1721ac37919..ba8a0f58fe08 100644
+--- a/drivers/mmc/host/sdhci-pci-core.c
++++ b/drivers/mmc/host/sdhci-pci-core.c
+@@ -393,6 +393,7 @@ static const struct sdhci_pci_fixes sdhci_intel_pch_sdio = {
+ 
+ enum {
+ 	INTEL_DSM_FNS		=  0,
++	INTEL_DSM_V18_SWITCH	=  3,
+ 	INTEL_DSM_DRV_STRENGTH	=  9,
+ 	INTEL_DSM_D3_RETUNE	= 10,
+ };
+@@ -558,6 +559,19 @@ static void intel_hs400_enhanced_strobe(struct mmc_host *mmc,
+ 	sdhci_writel(host, val, INTEL_HS400_ES_REG);
+ }
+ 
++static void sdhci_intel_voltage_switch(struct sdhci_host *host)
++{
++	struct sdhci_pci_slot *slot = sdhci_priv(host);
++	struct intel_host *intel_host = sdhci_pci_priv(slot);
++	struct device *dev = &slot->chip->pdev->dev;
++	u32 result = 0;
++	int err;
++
++	err = intel_dsm(intel_host, dev, INTEL_DSM_V18_SWITCH, &result);
++	pr_debug("%s: %s DSM error %d result %u\n",
++		 mmc_hostname(host->mmc), __func__, err, result);
++}
++
+ static const struct sdhci_ops sdhci_intel_byt_ops = {
+ 	.set_clock		= sdhci_set_clock,
+ 	.set_power		= sdhci_intel_set_power,
+@@ -566,6 +580,7 @@ static const struct sdhci_ops sdhci_intel_byt_ops = {
+ 	.reset			= sdhci_reset,
+ 	.set_uhs_signaling	= sdhci_set_uhs_signaling,
+ 	.hw_reset		= sdhci_pci_hw_reset,
++	.voltage_switch		= sdhci_intel_voltage_switch,
+ };
+ 
+ static void byt_read_dsm(struct sdhci_pci_slot *slot)
+diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
+index 5736b0c90b33..a308e707392d 100644
+--- a/drivers/mtd/mtdpart.c
++++ b/drivers/mtd/mtdpart.c
+@@ -581,6 +581,14 @@ static struct mtd_part *allocate_partition(struct mtd_info *parent,
+ 		slave->mtd.erasesize = parent->erasesize;
+ 	}
+ 
++	/*
++	 * Slave erasesize might differ from the master one if the master
++	 * exposes several regions with different erasesize. Adjust
++	 * wr_alignment accordingly.
++	 */
++	if (!(slave->mtd.flags & MTD_NO_ERASE))
++		wr_alignment = slave->mtd.erasesize;
++
+ 	tmp = slave->offset;
+ 	remainder = do_div(tmp, wr_alignment);
+ 	if ((slave->mtd.flags & MTD_WRITEABLE) && remainder) {
+diff --git a/drivers/mtd/nand/atmel/pmecc.c b/drivers/mtd/nand/atmel/pmecc.c
+index 8c210a5776bc..24be19fb9591 100644
+--- a/drivers/mtd/nand/atmel/pmecc.c
++++ b/drivers/mtd/nand/atmel/pmecc.c
+@@ -363,7 +363,7 @@ atmel_pmecc_create_user(struct atmel_pmecc *pmecc,
+ 	size += (req->ecc.strength + 1) * sizeof(u16);
+ 	/* Reserve space for mu, dmu and delta. */
+ 	size = ALIGN(size, sizeof(s32));
+-	size += (req->ecc.strength + 1) * sizeof(s32);
++	size += (req->ecc.strength + 1) * sizeof(s32) * 3;
+ 
+ 	user = kzalloc(size, GFP_KERNEL);
+ 	if (!user)
+diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
+index c8852acc1462..6467ffac9811 100644
+--- a/drivers/net/wireless/mac80211_hwsim.c
++++ b/drivers/net/wireless/mac80211_hwsim.c
+@@ -1362,8 +1362,6 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw,
+ 				       txi->control.rates,
+ 				       ARRAY_SIZE(txi->control.rates));
+ 
+-	txi->rate_driver_data[0] = channel;
+-
+ 	if (skb->len >= 24 + 8 &&
+ 	    ieee80211_is_probe_resp(hdr->frame_control)) {
+ 		/* fake header transmission time */
+diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
+index 5f1c6756e57c..f49a29abb11f 100644
+--- a/drivers/nvdimm/namespace_devs.c
++++ b/drivers/nvdimm/namespace_devs.c
+@@ -1417,6 +1417,15 @@ static int btt_claim_class(struct device *dev)
+ 		struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
+ 		struct nd_namespace_index *nsindex;
+ 
++		/*
++		 * If any of the DIMMs do not support labels the only
++		 * possible BTT format is v1.
++		 */
++		if (!ndd) {
++			loop_bitmask = 0;
++			break;
++		}
++
+ 		nsindex = to_namespace_index(ndd, ndd->ns_current);
+ 		if (nsindex == NULL)
+ 			loop_bitmask |= 1;
+diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
+index f7099adaabc0..88c128258760 100644
+--- a/drivers/nvdimm/pmem.c
++++ b/drivers/nvdimm/pmem.c
+@@ -243,16 +243,9 @@ static size_t pmem_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff,
+ 	return copy_from_iter_flushcache(addr, bytes, i);
+ }
+ 
+-static void pmem_dax_flush(struct dax_device *dax_dev, pgoff_t pgoff,
+-		void *addr, size_t size)
+-{
+-	arch_wb_cache_pmem(addr, size);
+-}
+-
+ static const struct dax_operations pmem_dax_ops = {
+ 	.direct_access = pmem_dax_direct_access,
+ 	.copy_from_iter = pmem_copy_from_iter,
+-	.flush = pmem_dax_flush,
+ };
+ 
+ static const struct attribute_group *pmem_attribute_groups[] = {
+diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
+index ea892e732268..cdf4c0e471b9 100644
+--- a/drivers/nvme/host/pci.c
++++ b/drivers/nvme/host/pci.c
+@@ -1609,18 +1609,16 @@ static void nvme_free_host_mem(struct nvme_dev *dev)
+ 	dev->host_mem_descs = NULL;
+ }
+ 
+-static int nvme_alloc_host_mem(struct nvme_dev *dev, u64 min, u64 preferred)
++static int __nvme_alloc_host_mem(struct nvme_dev *dev, u64 preferred,
++		u32 chunk_size)
+ {
+ 	struct nvme_host_mem_buf_desc *descs;
+-	u32 chunk_size, max_entries, len;
++	u32 max_entries, len;
+ 	dma_addr_t descs_dma;
+ 	int i = 0;
+ 	void **bufs;
+ 	u64 size = 0, tmp;
+ 
+-	/* start big and work our way down */
+-	chunk_size = min(preferred, (u64)PAGE_SIZE << MAX_ORDER);
+-retry:
+ 	tmp = (preferred + chunk_size - 1);
+ 	do_div(tmp, chunk_size);
+ 	max_entries = tmp;
+@@ -1647,15 +1645,9 @@ static int nvme_alloc_host_mem(struct nvme_dev *dev, u64 min, u64 preferred)
+ 		i++;
+ 	}
+ 
+-	if (!size || (min && size < min)) {
+-		dev_warn(dev->ctrl.device,
+-			"failed to allocate host memory buffer.\n");
++	if (!size)
+ 		goto out_free_bufs;
+-	}
+ 
+-	dev_info(dev->ctrl.device,
+-		"allocated %lld MiB host memory buffer.\n",
+-		size >> ilog2(SZ_1M));
+ 	dev->nr_host_mem_descs = i;
+ 	dev->host_mem_size = size;
+ 	dev->host_mem_descs = descs;
+@@ -1676,21 +1668,35 @@ static int nvme_alloc_host_mem(struct nvme_dev *dev, u64 min, u64 preferred)
+ 	dma_free_coherent(dev->dev, max_entries * sizeof(*descs), descs,
+ 			descs_dma);
+ out:
+-	/* try a smaller chunk size if we failed early */
+-	if (chunk_size >= PAGE_SIZE * 2 && (i == 0 || size < min)) {
+-		chunk_size /= 2;
+-		goto retry;
+-	}
+ 	dev->host_mem_descs = NULL;
+ 	return -ENOMEM;
+ }
+ 
+-static void nvme_setup_host_mem(struct nvme_dev *dev)
++static int nvme_alloc_host_mem(struct nvme_dev *dev, u64 min, u64 preferred)
++{
++	u32 chunk_size;
++
++	/* start big and work our way down */
++	for (chunk_size = min_t(u64, preferred, PAGE_SIZE * MAX_ORDER_NR_PAGES);
++	     chunk_size >= PAGE_SIZE * 2;
++	     chunk_size /= 2) {
++		if (!__nvme_alloc_host_mem(dev, preferred, chunk_size)) {
++			if (!min || dev->host_mem_size >= min)
++				return 0;
++			nvme_free_host_mem(dev);
++		}
++	}
++
++	return -ENOMEM;
++}
++
++static int nvme_setup_host_mem(struct nvme_dev *dev)
+ {
+ 	u64 max = (u64)max_host_mem_size_mb * SZ_1M;
+ 	u64 preferred = (u64)dev->ctrl.hmpre * 4096;
+ 	u64 min = (u64)dev->ctrl.hmmin * 4096;
+ 	u32 enable_bits = NVME_HOST_MEM_ENABLE;
++	int ret = 0;
+ 
+ 	preferred = min(preferred, max);
+ 	if (min > max) {
+@@ -1698,7 +1704,7 @@ static void nvme_setup_host_mem(struct nvme_dev *dev)
+ 			"min host memory (%lld MiB) above limit (%d MiB).\n",
+ 			min >> ilog2(SZ_1M), max_host_mem_size_mb);
+ 		nvme_free_host_mem(dev);
+-		return;
++		return 0;
+ 	}
+ 
+ 	/*
+@@ -1712,12 +1718,21 @@ static void nvme_setup_host_mem(struct nvme_dev *dev)
+ 	}
+ 
+ 	if (!dev->host_mem_descs) {
+-		if (nvme_alloc_host_mem(dev, min, preferred))
+-			return;
++		if (nvme_alloc_host_mem(dev, min, preferred)) {
++			dev_warn(dev->ctrl.device,
++				"failed to allocate host memory buffer.\n");
++			return 0; /* controller must work without HMB */
++		}
++
++		dev_info(dev->ctrl.device,
++			"allocated %lld MiB host memory buffer.\n",
++			dev->host_mem_size >> ilog2(SZ_1M));
+ 	}
+ 
+-	if (nvme_set_host_mem(dev, enable_bits))
++	ret = nvme_set_host_mem(dev, enable_bits);
++	if (ret)
+ 		nvme_free_host_mem(dev);
++	return ret;
+ }
+ 
+ static int nvme_setup_io_queues(struct nvme_dev *dev)
+@@ -2161,8 +2176,11 @@ static void nvme_reset_work(struct work_struct *work)
+ 				 "unable to allocate dma for dbbuf\n");
+ 	}
+ 
+-	if (dev->ctrl.hmpre)
+-		nvme_setup_host_mem(dev);
++	if (dev->ctrl.hmpre) {
++		result = nvme_setup_host_mem(dev);
++		if (result < 0)
++			goto out;
++	}
+ 
+ 	result = nvme_setup_io_queues(dev);
+ 	if (result)
+diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
+index 2f3780b50723..6337bce27c36 100644
+--- a/drivers/pci/pci-sysfs.c
++++ b/drivers/pci/pci-sysfs.c
+@@ -686,7 +686,7 @@ static ssize_t driver_override_store(struct device *dev,
+ 				     const char *buf, size_t count)
+ {
+ 	struct pci_dev *pdev = to_pci_dev(dev);
+-	char *driver_override, *old = pdev->driver_override, *cp;
++	char *driver_override, *old, *cp;
+ 
+ 	/* We need to keep extra room for a newline */
+ 	if (count >= (PAGE_SIZE - 1))
+@@ -700,12 +700,15 @@ static ssize_t driver_override_store(struct device *dev,
+ 	if (cp)
+ 		*cp = '\0';
+ 
++	device_lock(dev);
++	old = pdev->driver_override;
+ 	if (strlen(driver_override)) {
+ 		pdev->driver_override = driver_override;
+ 	} else {
+ 		kfree(driver_override);
+ 		pdev->driver_override = NULL;
+ 	}
++	device_unlock(dev);
+ 
+ 	kfree(old);
+ 
+@@ -716,8 +719,12 @@ static ssize_t driver_override_show(struct device *dev,
+ 				    struct device_attribute *attr, char *buf)
+ {
+ 	struct pci_dev *pdev = to_pci_dev(dev);
++	ssize_t len;
+ 
+-	return snprintf(buf, PAGE_SIZE, "%s\n", pdev->driver_override);
++	device_lock(dev);
++	len = snprintf(buf, PAGE_SIZE, "%s\n", pdev->driver_override);
++	device_unlock(dev);
++	return len;
+ }
+ static DEVICE_ATTR_RW(driver_override);
+ 
+diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c
+index 85de30f93a9c..56a8195096a2 100644
+--- a/drivers/platform/x86/fujitsu-laptop.c
++++ b/drivers/platform/x86/fujitsu-laptop.c
+@@ -254,10 +254,12 @@ static int bl_update_status(struct backlight_device *b)
+ {
+ 	struct acpi_device *device = bl_get_data(b);
+ 
+-	if (b->props.power == FB_BLANK_POWERDOWN)
+-		call_fext_func(fext, FUNC_BACKLIGHT, 0x1, 0x4, 0x3);
+-	else
+-		call_fext_func(fext, FUNC_BACKLIGHT, 0x1, 0x4, 0x0);
++	if (fext) {
++		if (b->props.power == FB_BLANK_POWERDOWN)
++			call_fext_func(fext, FUNC_BACKLIGHT, 0x1, 0x4, 0x3);
++		else
++			call_fext_func(fext, FUNC_BACKLIGHT, 0x1, 0x4, 0x0);
++	}
+ 
+ 	return set_lcd_level(device, b->props.brightness);
+ }
+diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
+index b051d97af468..e431ad40b533 100644
+--- a/drivers/scsi/aacraid/aachba.c
++++ b/drivers/scsi/aacraid/aachba.c
+@@ -699,13 +699,13 @@ static void _aac_probe_container1(void * context, struct fib * fibptr)
+ 	int status;
+ 
+ 	dresp = (struct aac_mount *) fib_data(fibptr);
+-	if (!(fibptr->dev->supplement_adapter_info.supported_options2 &
+-	    AAC_OPTION_VARIABLE_BLOCK_SIZE))
++	if (!aac_supports_2T(fibptr->dev)) {
+ 		dresp->mnt[0].capacityhigh = 0;
+-	if ((le32_to_cpu(dresp->status) != ST_OK) ||
+-	    (le32_to_cpu(dresp->mnt[0].vol) != CT_NONE)) {
+-		_aac_probe_container2(context, fibptr);
+-		return;
++		if ((le32_to_cpu(dresp->status) == ST_OK) &&
++			(le32_to_cpu(dresp->mnt[0].vol) != CT_NONE)) {
++			_aac_probe_container2(context, fibptr);
++			return;
++		}
+ 	}
+ 	scsicmd = (struct scsi_cmnd *) context;
+ 
+diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
+index ee2667e20e42..c9e2170fa22d 100644
+--- a/drivers/scsi/aacraid/aacraid.h
++++ b/drivers/scsi/aacraid/aacraid.h
+@@ -2700,6 +2700,11 @@ static inline int aac_is_src(struct aac_dev *dev)
+ 	return 0;
+ }
+ 
++static inline int aac_supports_2T(struct aac_dev *dev)
++{
++	return (dev->adapter_info.options & AAC_OPT_NEW_COMM_64);
++}
++
+ char * get_container_type(unsigned type);
+ extern int numacb;
+ extern char aac_driver_version[];
+diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c
+index 48c2b2b34b72..0c9361c87ec8 100644
+--- a/drivers/scsi/aacraid/src.c
++++ b/drivers/scsi/aacraid/src.c
+@@ -740,6 +740,8 @@ static void aac_send_iop_reset(struct aac_dev *dev)
+ 	aac_set_intx_mode(dev);
+ 
+ 	src_writel(dev, MUnit.IDR, IOP_SRC_RESET_MASK);
++
++	msleep(5000);
+ }
+ 
+ static void aac_send_hardware_soft_reset(struct aac_dev *dev)
+diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
+index 892fbd9800d9..bea06de60827 100644
+--- a/drivers/scsi/scsi_transport_fc.c
++++ b/drivers/scsi/scsi_transport_fc.c
+@@ -3550,7 +3550,7 @@ fc_vport_sched_delete(struct work_struct *work)
+ static enum blk_eh_timer_return
+ fc_bsg_job_timeout(struct request *req)
+ {
+-	struct bsg_job *job = (void *) req->special;
++	struct bsg_job *job = blk_mq_rq_to_pdu(req);
+ 	struct Scsi_Host *shost = fc_bsg_to_shost(job);
+ 	struct fc_rport *rport = fc_bsg_to_rport(job);
+ 	struct fc_internal *i = to_fc_internal(shost->transportt);
+diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
+index a424eaeafeb0..c55c6f3147ae 100644
+--- a/drivers/scsi/scsi_transport_iscsi.c
++++ b/drivers/scsi/scsi_transport_iscsi.c
+@@ -3689,7 +3689,7 @@ iscsi_if_rx(struct sk_buff *skb)
+ 		uint32_t group;
+ 
+ 		nlh = nlmsg_hdr(skb);
+-		if (nlh->nlmsg_len < sizeof(*nlh) ||
++		if (nlh->nlmsg_len < sizeof(*nlh) + sizeof(*ev) ||
+ 		    skb->len < nlh->nlmsg_len) {
+ 			break;
+ 		}
+diff --git a/drivers/video/fbdev/aty/atyfb_base.c b/drivers/video/fbdev/aty/atyfb_base.c
+index b55fdac9c9f5..e4c91d748732 100644
+--- a/drivers/video/fbdev/aty/atyfb_base.c
++++ b/drivers/video/fbdev/aty/atyfb_base.c
+@@ -1855,7 +1855,7 @@ static int atyfb_ioctl(struct fb_info *info, u_int cmd, u_long arg)
+ #if defined(DEBUG) && defined(CONFIG_FB_ATY_CT)
+ 	case ATYIO_CLKR:
+ 		if (M64_HAS(INTEGRATED)) {
+-			struct atyclk clk;
++			struct atyclk clk = { 0 };
+ 			union aty_pll *pll = &par->pll;
+ 			u32 dsp_config = pll->ct.dsp_config;
+ 			u32 dsp_on_off = pll->ct.dsp_on_off;
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index 24bcd5cd9cf2..df77ba89acbe 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -135,6 +135,18 @@ static inline void btrfs_cleanup_ordered_extents(struct inode *inode,
+ 						 const u64 offset,
+ 						 const u64 bytes)
+ {
++	unsigned long index = offset >> PAGE_SHIFT;
++	unsigned long end_index = (offset + bytes - 1) >> PAGE_SHIFT;
++	struct page *page;
++
++	while (index <= end_index) {
++		page = find_get_page(inode->i_mapping, index);
++		index++;
++		if (!page)
++			continue;
++		ClearPagePrivate2(page);
++		put_page(page);
++	}
+ 	return __endio_write_update_ordered(inode, offset + PAGE_SIZE,
+ 					    bytes - PAGE_SIZE, false);
+ }
+@@ -8297,6 +8309,7 @@ static void __endio_write_update_ordered(struct inode *inode,
+ 	btrfs_work_func_t func;
+ 	u64 ordered_offset = offset;
+ 	u64 ordered_bytes = bytes;
++	u64 last_offset;
+ 	int ret;
+ 
+ 	if (btrfs_is_free_space_inode(BTRFS_I(inode))) {
+@@ -8308,6 +8321,7 @@ static void __endio_write_update_ordered(struct inode *inode,
+ 	}
+ 
+ again:
++	last_offset = ordered_offset;
+ 	ret = btrfs_dec_test_first_ordered_pending(inode, &ordered,
+ 						   &ordered_offset,
+ 						   ordered_bytes,
+@@ -8318,6 +8332,12 @@ static void __endio_write_update_ordered(struct inode *inode,
+ 	btrfs_init_work(&ordered->work, func, finish_ordered_fn, NULL, NULL);
+ 	btrfs_queue_work(wq, &ordered->work);
+ out_test:
++	/*
++	 * If btrfs_dec_test_ordered_pending does not find any ordered extent
++	 * in the range, we can exit.
++	 */
++	if (ordered_offset == last_offset)
++		return;
+ 	/*
+ 	 * our bio might span multiple ordered extents.  If we haven't
+ 	 * completed the accounting for the whole dio, go back and try again
+diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
+index fa1b78cf25f6..9afd08539519 100644
+--- a/fs/btrfs/ioctl.c
++++ b/fs/btrfs/ioctl.c
+@@ -3063,7 +3063,7 @@ static int btrfs_cmp_data_prepare(struct inode *src, u64 loff,
+ out:
+ 	if (ret)
+ 		btrfs_cmp_data_free(cmp);
+-	return 0;
++	return ret;
+ }
+ 
+ static int btrfs_cmp_data(u64 len, struct cmp_pages *cmp)
+@@ -4072,6 +4072,10 @@ static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp)
+ 		ret = PTR_ERR(new_root);
+ 		goto out;
+ 	}
++	if (!is_fstree(new_root->objectid)) {
++		ret = -ENOENT;
++		goto out;
++	}
+ 
+ 	path = btrfs_alloc_path();
+ 	if (!path) {
+diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
+index 65661d1aae4e..6445de8e9ece 100644
+--- a/fs/btrfs/relocation.c
++++ b/fs/btrfs/relocation.c
+@@ -2393,11 +2393,11 @@ void free_reloc_roots(struct list_head *list)
+ 	while (!list_empty(list)) {
+ 		reloc_root = list_entry(list->next, struct btrfs_root,
+ 					root_list);
++		__del_reloc_root(reloc_root);
+ 		free_extent_buffer(reloc_root->node);
+ 		free_extent_buffer(reloc_root->commit_root);
+ 		reloc_root->node = NULL;
+ 		reloc_root->commit_root = NULL;
+-		__del_reloc_root(reloc_root);
+ 	}
+ }
+ 
+diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
+index 180b3356ff86..a92bdb89bde3 100644
+--- a/fs/cifs/cifsfs.c
++++ b/fs/cifs/cifsfs.c
+@@ -1447,7 +1447,7 @@ exit_cifs(void)
+ 	exit_cifs_idmap();
+ #endif
+ #ifdef CONFIG_CIFS_UPCALL
+-	unregister_key_type(&cifs_spnego_key_type);
++	exit_cifs_spnego();
+ #endif
+ 	cifs_destroy_request_bufs();
+ 	cifs_destroy_mids();
+diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
+index 221693fe49ec..03b6eae0ae28 100644
+--- a/fs/cifs/cifsglob.h
++++ b/fs/cifs/cifsglob.h
+@@ -188,6 +188,8 @@ enum smb_version {
+ #ifdef CONFIG_CIFS_SMB311
+ 	Smb_311,
+ #endif /* SMB311 */
++	Smb_3any,
++	Smb_default,
+ 	Smb_version_err
+ };
+ 
+@@ -1701,6 +1703,10 @@ extern struct smb_version_values smb20_values;
+ #define SMB21_VERSION_STRING	"2.1"
+ extern struct smb_version_operations smb21_operations;
+ extern struct smb_version_values smb21_values;
++#define SMBDEFAULT_VERSION_STRING "default"
++extern struct smb_version_values smbdefault_values;
++#define SMB3ANY_VERSION_STRING "3"
++extern struct smb_version_values smb3any_values;
+ #define SMB30_VERSION_STRING	"3.0"
+ extern struct smb_version_operations smb30_operations;
+ extern struct smb_version_values smb30_values;
+diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
+index 83a8f52cd879..9e12679ffef5 100644
+--- a/fs/cifs/connect.c
++++ b/fs/cifs/connect.c
+@@ -301,6 +301,8 @@ static const match_table_t cifs_smb_version_tokens = {
+ 	{ Smb_311, SMB311_VERSION_STRING },
+ 	{ Smb_311, ALT_SMB311_VERSION_STRING },
+ #endif /* SMB311 */
++	{ Smb_3any, SMB3ANY_VERSION_STRING },
++	{ Smb_default, SMBDEFAULT_VERSION_STRING },
+ 	{ Smb_version_err, NULL }
+ };
+ 
+@@ -1147,6 +1149,14 @@ cifs_parse_smb_version(char *value, struct smb_vol *vol)
+ 		vol->vals = &smb311_values;
+ 		break;
+ #endif /* SMB311 */
++	case Smb_3any:
++		vol->ops = &smb30_operations; /* currently identical with 3.0 */
++		vol->vals = &smb3any_values;
++		break;
++	case Smb_default:
++		vol->ops = &smb30_operations; /* currently identical with 3.0 */
++		vol->vals = &smbdefault_values;
++		break;
+ 	default:
+ 		cifs_dbg(VFS, "Unknown vers= option specified: %s\n", value);
+ 		return 1;
+@@ -1273,9 +1283,9 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
+ 
+ 	vol->actimeo = CIFS_DEF_ACTIMEO;
+ 
+-	/* FIXME: add autonegotiation for SMB3 or later rather than just SMB3 */
+-	vol->ops = &smb30_operations; /* both secure and accepted widely */
+-	vol->vals = &smb30_values;
++	/* offer SMB2.1 and later (SMB3 etc). Secure and widely accepted */
++	vol->ops = &smb30_operations;
++	vol->vals = &smbdefault_values;
+ 
+ 	vol->echo_interval = SMB_ECHO_INTERVAL_DEFAULT;
+ 
+@@ -1987,11 +1997,10 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
+ 
+ 	if (got_version == false)
+ 		pr_warn("No dialect specified on mount. Default has changed to "
+-			"a more secure dialect, SMB3 (vers=3.0), from CIFS "
++			"a more secure dialect, SMB2.1 or later (e.g. SMB3), from CIFS "
+ 			"(SMB1). To use the less secure SMB1 dialect to access "
+-			"old servers which do not support SMB3 specify vers=1.0"
+-			" on mount. For somewhat newer servers such as Windows "
+-			"7 try vers=2.1.\n");
++			"old servers which do not support SMB3 (or SMB2.1) specify vers=1.0"
++			" on mount.\n");
+ 
+ 	kfree(mountdata_copy);
+ 	return 0;
+@@ -2132,6 +2141,7 @@ static int match_server(struct TCP_Server_Info *server, struct smb_vol *vol)
+ 	if (vol->nosharesock)
+ 		return 0;
+ 
++	/* BB update this for smb3any and default case */
+ 	if ((server->vals != vol->vals) || (server->ops != vol->ops))
+ 		return 0;
+ 
+@@ -4143,6 +4153,14 @@ cifs_setup_session(const unsigned int xid, struct cifs_ses *ses,
+ 	cifs_dbg(FYI, "Security Mode: 0x%x Capabilities: 0x%x TimeAdjust: %d\n",
+ 		 server->sec_mode, server->capabilities, server->timeAdj);
+ 
++	if (ses->auth_key.response) {
++		cifs_dbg(VFS, "Free previous auth_key.response = %p\n",
++			 ses->auth_key.response);
++		kfree(ses->auth_key.response);
++		ses->auth_key.response = NULL;
++		ses->auth_key.len = 0;
++	}
++
+ 	if (server->ops->sess_setup)
+ 		rc = server->ops->sess_setup(xid, ses, nls_info);
+ 
+diff --git a/fs/cifs/file.c b/fs/cifs/file.c
+index bc09df6b473a..c3bf300e7c47 100644
+--- a/fs/cifs/file.c
++++ b/fs/cifs/file.c
+@@ -224,6 +224,13 @@ cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
+ 	if (backup_cred(cifs_sb))
+ 		create_options |= CREATE_OPEN_BACKUP_INTENT;
+ 
++	/* O_SYNC also has bit for O_DSYNC so following check picks up either */
++	if (f_flags & O_SYNC)
++		create_options |= CREATE_WRITE_THROUGH;
++
++	if (f_flags & O_DIRECT)
++		create_options |= CREATE_NO_BUFFER;
++
+ 	oparms.tcon = tcon;
+ 	oparms.cifs_sb = cifs_sb;
+ 	oparms.desired_access = desired_access;
+diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
+index a8693632235f..7c732cb44164 100644
+--- a/fs/cifs/inode.c
++++ b/fs/cifs/inode.c
+@@ -234,6 +234,8 @@ cifs_unix_basic_to_fattr(struct cifs_fattr *fattr, FILE_UNIX_BASIC_INFO *info,
+ 	fattr->cf_atime = cifs_NTtimeToUnix(info->LastAccessTime);
+ 	fattr->cf_mtime = cifs_NTtimeToUnix(info->LastModificationTime);
+ 	fattr->cf_ctime = cifs_NTtimeToUnix(info->LastStatusChange);
++	/* old POSIX extensions don't get create time */
++
+ 	fattr->cf_mode = le64_to_cpu(info->Permissions);
+ 
+ 	/*
+@@ -2024,6 +2026,19 @@ int cifs_getattr(const struct path *path, struct kstat *stat,
+ 	stat->blksize = CIFS_MAX_MSGSIZE;
+ 	stat->ino = CIFS_I(inode)->uniqueid;
+ 
++	/* old CIFS Unix Extensions doesn't return create time */
++	if (CIFS_I(inode)->createtime) {
++		stat->result_mask |= STATX_BTIME;
++		stat->btime =
++		      cifs_NTtimeToUnix(cpu_to_le64(CIFS_I(inode)->createtime));
++	}
++
++	stat->attributes_mask |= (STATX_ATTR_COMPRESSED | STATX_ATTR_ENCRYPTED);
++	if (CIFS_I(inode)->cifsAttrs & FILE_ATTRIBUTE_COMPRESSED)
++		stat->attributes |= STATX_ATTR_COMPRESSED;
++	if (CIFS_I(inode)->cifsAttrs & FILE_ATTRIBUTE_ENCRYPTED)
++		stat->attributes |= STATX_ATTR_ENCRYPTED;
++
+ 	/*
+ 	 * If on a multiuser mount without unix extensions or cifsacl being
+ 	 * enabled, and the admin hasn't overridden them, set the ownership
+diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
+index cfacf2c97e94..a6c94812cfa3 100644
+--- a/fs/cifs/smb2ops.c
++++ b/fs/cifs/smb2ops.c
+@@ -2906,6 +2906,46 @@ struct smb_version_values smb21_values = {
+ 	.create_lease_size = sizeof(struct create_lease),
+ };
+ 
++struct smb_version_values smb3any_values = {
++	.version_string = SMB3ANY_VERSION_STRING,
++	.protocol_id = SMB302_PROT_ID, /* doesn't matter, send protocol array */
++	.req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION,
++	.large_lock_type = 0,
++	.exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
++	.shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
++	.unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
++	.header_size = sizeof(struct smb2_hdr),
++	.max_header_size = MAX_SMB2_HDR_SIZE,
++	.read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
++	.lock_cmd = SMB2_LOCK,
++	.cap_unix = 0,
++	.cap_nt_find = SMB2_NT_FIND,
++	.cap_large_files = SMB2_LARGE_FILES,
++	.signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
++	.signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
++	.create_lease_size = sizeof(struct create_lease_v2),
++};
++
++struct smb_version_values smbdefault_values = {
++	.version_string = SMBDEFAULT_VERSION_STRING,
++	.protocol_id = SMB302_PROT_ID, /* doesn't matter, send protocol array */
++	.req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION,
++	.large_lock_type = 0,
++	.exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
++	.shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
++	.unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
++	.header_size = sizeof(struct smb2_hdr),
++	.max_header_size = MAX_SMB2_HDR_SIZE,
++	.read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
++	.lock_cmd = SMB2_LOCK,
++	.cap_unix = 0,
++	.cap_nt_find = SMB2_NT_FIND,
++	.cap_large_files = SMB2_LARGE_FILES,
++	.signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
++	.signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
++	.create_lease_size = sizeof(struct create_lease_v2),
++};
++
+ struct smb_version_values smb30_values = {
+ 	.version_string = SMB30_VERSION_STRING,
+ 	.protocol_id = SMB30_PROT_ID,
+diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
+index 7aa67206f6da..ddc633ef6064 100644
+--- a/fs/cifs/smb2pdu.c
++++ b/fs/cifs/smb2pdu.c
+@@ -427,7 +427,7 @@ assemble_neg_contexts(struct smb2_negotiate_req *req)
+ 	build_encrypt_ctxt((struct smb2_encryption_neg_context *)pneg_ctxt);
+ 	req->NegotiateContextOffset = cpu_to_le32(OFFSET_OF_NEG_CONTEXT);
+ 	req->NegotiateContextCount = cpu_to_le16(2);
+-	inc_rfc1001_len(req, 4 + sizeof(struct smb2_preauth_neg_context) + 2
++	inc_rfc1001_len(req, 4 + sizeof(struct smb2_preauth_neg_context)
+ 			+ sizeof(struct smb2_encryption_neg_context)); /* calculate hash */
+ }
+ #else
+@@ -479,10 +479,25 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
+ 
+ 	req->hdr.sync_hdr.SessionId = 0;
+ 
+-	req->Dialects[0] = cpu_to_le16(ses->server->vals->protocol_id);
+-
+-	req->DialectCount = cpu_to_le16(1); /* One vers= at a time for now */
+-	inc_rfc1001_len(req, 2);
++	if (strcmp(ses->server->vals->version_string,
++		   SMB3ANY_VERSION_STRING) == 0) {
++		req->Dialects[0] = cpu_to_le16(SMB30_PROT_ID);
++		req->Dialects[1] = cpu_to_le16(SMB302_PROT_ID);
++		req->DialectCount = cpu_to_le16(2);
++		inc_rfc1001_len(req, 4);
++	} else if (strcmp(ses->server->vals->version_string,
++		   SMBDEFAULT_VERSION_STRING) == 0) {
++		req->Dialects[0] = cpu_to_le16(SMB21_PROT_ID);
++		req->Dialects[1] = cpu_to_le16(SMB30_PROT_ID);
++		req->Dialects[2] = cpu_to_le16(SMB302_PROT_ID);
++		req->DialectCount = cpu_to_le16(3);
++		inc_rfc1001_len(req, 6);
++	} else {
++		/* otherwise send specific dialect */
++		req->Dialects[0] = cpu_to_le16(ses->server->vals->protocol_id);
++		req->DialectCount = cpu_to_le16(1);
++		inc_rfc1001_len(req, 2);
++	}
+ 
+ 	/* only one of SMB2 signing flags may be set in SMB2 request */
+ 	if (ses->sign)
+@@ -516,16 +531,43 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
+ 	 */
+ 	if (rc == -EOPNOTSUPP) {
+ 		cifs_dbg(VFS, "Dialect not supported by server. Consider "
+-			"specifying vers=1.0 or vers=2.1 on mount for accessing"
++			"specifying vers=1.0 or vers=2.0 on mount for accessing"
+ 			" older servers\n");
+ 		goto neg_exit;
+ 	} else if (rc != 0)
+ 		goto neg_exit;
+ 
++	if (strcmp(ses->server->vals->version_string,
++		   SMB3ANY_VERSION_STRING) == 0) {
++		if (rsp->DialectRevision == cpu_to_le16(SMB20_PROT_ID)) {
++			cifs_dbg(VFS,
++				"SMB2 dialect returned but not requested\n");
++			return -EIO;
++		} else if (rsp->DialectRevision == cpu_to_le16(SMB21_PROT_ID)) {
++			cifs_dbg(VFS,
++				"SMB2.1 dialect returned but not requested\n");
++			return -EIO;
++		}
++	} else if (strcmp(ses->server->vals->version_string,
++		   SMBDEFAULT_VERSION_STRING) == 0) {
++		if (rsp->DialectRevision == cpu_to_le16(SMB20_PROT_ID)) {
++			cifs_dbg(VFS,
++				"SMB2 dialect returned but not requested\n");
++			return -EIO;
++		} else if (rsp->DialectRevision == cpu_to_le16(SMB21_PROT_ID)) {
++			/* ops set to 3.0 by default for default so update */
++			ses->server->ops = &smb21_operations;
++		}
++	} else if (le16_to_cpu(rsp->DialectRevision) !=
++				ses->server->vals->protocol_id) {
++		/* if requested single dialect ensure returned dialect matched */
++		cifs_dbg(VFS, "Illegal 0x%x dialect returned: not requested\n",
++			le16_to_cpu(rsp->DialectRevision));
++		return -EIO;
++	}
++
+ 	cifs_dbg(FYI, "mode 0x%x\n", rsp->SecurityMode);
+ 
+-	/* BB we may eventually want to match the negotiated vs. requested
+-	   dialect, even though we are only requesting one at a time */
+ 	if (rsp->DialectRevision == cpu_to_le16(SMB20_PROT_ID))
+ 		cifs_dbg(FYI, "negotiated smb2.0 dialect\n");
+ 	else if (rsp->DialectRevision == cpu_to_le16(SMB21_PROT_ID))
+@@ -546,6 +588,8 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
+ 	}
+ 	server->dialect = le16_to_cpu(rsp->DialectRevision);
+ 
++	/* BB: add check that dialect was valid given dialect(s) we asked for */
++
+ 	/* SMB2 only has an extended negflavor */
+ 	server->negflavor = CIFS_NEGFLAVOR_EXTENDED;
+ 	/* set it to the maximum buffer size value we can send with 1 credit */
+@@ -594,20 +638,28 @@ int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon)
+ 	struct validate_negotiate_info_req vneg_inbuf;
+ 	struct validate_negotiate_info_rsp *pneg_rsp;
+ 	u32 rsplen;
++	u32 inbuflen; /* max of 4 dialects */
+ 
+ 	cifs_dbg(FYI, "validate negotiate\n");
+ 
+ 	/*
+ 	 * validation ioctl must be signed, so no point sending this if we
+-	 * can not sign it.  We could eventually change this to selectively
++	 * can not sign it (ie are not known user).  Even if signing is not
++	 * required (enabled but not negotiated), in those cases we selectively
+ 	 * sign just this, the first and only signed request on a connection.
+-	 * This is good enough for now since a user who wants better security
+-	 * would also enable signing on the mount. Having validation of
+-	 * negotiate info for signed connections helps reduce attack vectors
++	 * Having validation of negotiate info  helps reduce attack vectors.
+ 	 */
+-	if (tcon->ses->server->sign == false)
++	if (tcon->ses->session_flags & SMB2_SESSION_FLAG_IS_GUEST)
+ 		return 0; /* validation requires signing */
+ 
++	if (tcon->ses->user_name == NULL) {
++		cifs_dbg(FYI, "Can't validate negotiate: null user mount\n");
++		return 0; /* validation requires signing */
++	}
++
++	if (tcon->ses->session_flags & SMB2_SESSION_FLAG_IS_NULL)
++		cifs_dbg(VFS, "Unexpected null user (anonymous) auth flag sent by server\n");
++
+ 	vneg_inbuf.Capabilities =
+ 			cpu_to_le32(tcon->ses->server->vals->req_capabilities);
+ 	memcpy(vneg_inbuf.Guid, tcon->ses->server->client_guid,
+@@ -622,9 +674,30 @@ int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon)
+ 	else
+ 		vneg_inbuf.SecurityMode = 0;
+ 
+-	vneg_inbuf.DialectCount = cpu_to_le16(1);
+-	vneg_inbuf.Dialects[0] =
+-		cpu_to_le16(tcon->ses->server->vals->protocol_id);
++
++	if (strcmp(tcon->ses->server->vals->version_string,
++		SMB3ANY_VERSION_STRING) == 0) {
++		vneg_inbuf.Dialects[0] = cpu_to_le16(SMB30_PROT_ID);
++		vneg_inbuf.Dialects[1] = cpu_to_le16(SMB302_PROT_ID);
++		vneg_inbuf.DialectCount = cpu_to_le16(2);
++		/* structure is big enough for 3 dialects, sending only 2 */
++		inbuflen = sizeof(struct validate_negotiate_info_req) - 2;
++	} else if (strcmp(tcon->ses->server->vals->version_string,
++		SMBDEFAULT_VERSION_STRING) == 0) {
++		vneg_inbuf.Dialects[0] = cpu_to_le16(SMB21_PROT_ID);
++		vneg_inbuf.Dialects[1] = cpu_to_le16(SMB30_PROT_ID);
++		vneg_inbuf.Dialects[2] = cpu_to_le16(SMB302_PROT_ID);
++		vneg_inbuf.DialectCount = cpu_to_le16(3);
++		/* structure is big enough for 3 dialects */
++		inbuflen = sizeof(struct validate_negotiate_info_req);
++	} else {
++		/* otherwise specific dialect was requested */
++		vneg_inbuf.Dialects[0] =
++			cpu_to_le16(tcon->ses->server->vals->protocol_id);
++		vneg_inbuf.DialectCount = cpu_to_le16(1);
++		/* structure is big enough for 3 dialects, sending only 1 */
++		inbuflen = sizeof(struct validate_negotiate_info_req) - 4;
++	}
+ 
+ 	rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
+ 		FSCTL_VALIDATE_NEGOTIATE_INFO, true /* is_fsctl */,
+@@ -1098,6 +1171,8 @@ SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses,
+ 	while (sess_data->func)
+ 		sess_data->func(sess_data);
+ 
++	if ((ses->session_flags & SMB2_SESSION_FLAG_IS_GUEST) && (ses->sign))
++		cifs_dbg(VFS, "signing requested but authenticated as guest\n");
+ 	rc = sess_data->result;
+ out:
+ 	kfree(sess_data);
+@@ -1622,7 +1697,7 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
+ 	struct cifs_tcon *tcon = oparms->tcon;
+ 	struct cifs_ses *ses = tcon->ses;
+ 	struct kvec iov[4];
+-	struct kvec rsp_iov;
++	struct kvec rsp_iov = {NULL, 0};
+ 	int resp_buftype;
+ 	int uni_path_len;
+ 	__le16 *copy_path = NULL;
+@@ -1751,7 +1826,7 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
+ 
+ 	if (rc != 0) {
+ 		cifs_stats_fail_inc(tcon, SMB2_CREATE_HE);
+-		if (err_buf)
++		if (err_buf && rsp)
+ 			*err_buf = kmemdup(rsp, get_rfc1002_length(rsp) + 4,
+ 					   GFP_KERNEL);
+ 		goto creat_exit;
+diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h
+index 2826882c81d1..46b6cbce9675 100644
+--- a/fs/cifs/smb2pdu.h
++++ b/fs/cifs/smb2pdu.h
+@@ -716,7 +716,7 @@ struct validate_negotiate_info_req {
+ 	__u8   Guid[SMB2_CLIENT_GUID_SIZE];
+ 	__le16 SecurityMode;
+ 	__le16 DialectCount;
+-	__le16 Dialects[1]; /* dialect (someday maybe list) client asked for */
++	__le16 Dialects[3]; /* BB expand this if autonegotiate > 3 dialects */
+ } __packed;
+ 
+ struct validate_negotiate_info_rsp {
+diff --git a/fs/dax.c b/fs/dax.c
+index ab925dc6647a..ede5bc978db3 100644
+--- a/fs/dax.c
++++ b/fs/dax.c
+@@ -786,7 +786,7 @@ static int dax_writeback_one(struct block_device *bdev,
+ 	}
+ 
+ 	dax_mapping_entry_mkclean(mapping, index, pfn_t_to_pfn(pfn));
+-	dax_flush(dax_dev, pgoff, kaddr, size);
++	dax_flush(dax_dev, kaddr, size);
+ 	/*
+ 	 * After we have flushed the cache, we can clear the dirty tag. There
+ 	 * cannot be new dirty data in the pfn after the flush has completed as
+@@ -981,7 +981,7 @@ int __dax_zero_page_range(struct block_device *bdev,
+ 			return rc;
+ 		}
+ 		memset(kaddr + offset, 0, size);
+-		dax_flush(dax_dev, pgoff, kaddr + offset, size);
++		dax_flush(dax_dev, kaddr + offset, size);
+ 		dax_read_unlock(id);
+ 	}
+ 	return 0;
+diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
+index c38ab6c81898..410714c9eff7 100644
+--- a/fs/gfs2/glock.c
++++ b/fs/gfs2/glock.c
+@@ -1863,13 +1863,9 @@ static void *gfs2_glock_seq_start(struct seq_file *seq, loff_t *pos)
+ {
+ 	struct gfs2_glock_iter *gi = seq->private;
+ 	loff_t n = *pos;
+-	int ret;
+-
+-	if (gi->last_pos <= *pos)
+-		n = (*pos - gi->last_pos);
+ 
+-	ret = rhashtable_walk_start(&gi->hti);
+-	if (ret)
++	rhashtable_walk_enter(&gl_hash_table, &gi->hti);
++	if (rhashtable_walk_start(&gi->hti) != 0)
+ 		return NULL;
+ 
+ 	do {
+@@ -1877,6 +1873,7 @@ static void *gfs2_glock_seq_start(struct seq_file *seq, loff_t *pos)
+ 	} while (gi->gl && n--);
+ 
+ 	gi->last_pos = *pos;
++
+ 	return gi->gl;
+ }
+ 
+@@ -1888,6 +1885,7 @@ static void *gfs2_glock_seq_next(struct seq_file *seq, void *iter_ptr,
+ 	(*pos)++;
+ 	gi->last_pos = *pos;
+ 	gfs2_glock_iter_next(gi);
++
+ 	return gi->gl;
+ }
+ 
+@@ -1897,6 +1895,7 @@ static void gfs2_glock_seq_stop(struct seq_file *seq, void *iter_ptr)
+ 
+ 	gi->gl = NULL;
+ 	rhashtable_walk_stop(&gi->hti);
++	rhashtable_walk_exit(&gi->hti);
+ }
+ 
+ static int gfs2_glock_seq_show(struct seq_file *seq, void *iter_ptr)
+@@ -1959,12 +1958,10 @@ static int __gfs2_glocks_open(struct inode *inode, struct file *file,
+ 		struct gfs2_glock_iter *gi = seq->private;
+ 
+ 		gi->sdp = inode->i_private;
+-		gi->last_pos = 0;
+ 		seq->buf = kmalloc(GFS2_SEQ_GOODSIZE, GFP_KERNEL | __GFP_NOWARN);
+ 		if (seq->buf)
+ 			seq->size = GFS2_SEQ_GOODSIZE;
+ 		gi->gl = NULL;
+-		rhashtable_walk_enter(&gl_hash_table, &gi->hti);
+ 	}
+ 	return ret;
+ }
+@@ -1980,7 +1977,6 @@ static int gfs2_glocks_release(struct inode *inode, struct file *file)
+ 	struct gfs2_glock_iter *gi = seq->private;
+ 
+ 	gi->gl = NULL;
+-	rhashtable_walk_exit(&gi->hti);
+ 	return seq_release_private(inode, file);
+ }
+ 
+diff --git a/fs/proc/array.c b/fs/proc/array.c
+index 88c355574aa0..525157ca25cb 100644
+--- a/fs/proc/array.c
++++ b/fs/proc/array.c
+@@ -62,6 +62,7 @@
+ #include <linux/mman.h>
+ #include <linux/sched/mm.h>
+ #include <linux/sched/numa_balancing.h>
++#include <linux/sched/task_stack.h>
+ #include <linux/sched/task.h>
+ #include <linux/sched/cputime.h>
+ #include <linux/proc_fs.h>
+@@ -421,7 +422,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
+ 		 * esp and eip are intentionally zeroed out.  There is no
+ 		 * non-racy way to read them without freezing the task.
+ 		 * Programs that need reliable values can use ptrace(2).
++		 *
++		 * The only exception is if the task is core dumping because
++		 * a program is not able to use ptrace(2) in that case. It is
++		 * safe because the task has stopped executing permanently.
+ 		 */
++		if (permitted && (task->flags & PF_DUMPCORE)) {
++			eip = KSTK_EIP(task);
++			esp = KSTK_ESP(task);
++		}
+ 	}
+ 
+ 	get_task_comm(tcomm, task);
+diff --git a/fs/read_write.c b/fs/read_write.c
+index 0cc7033aa413..52872c1e57dd 100644
+--- a/fs/read_write.c
++++ b/fs/read_write.c
+@@ -112,7 +112,7 @@ generic_file_llseek_size(struct file *file, loff_t offset, int whence,
+ 		 * In the generic case the entire file is data, so as long as
+ 		 * offset isn't at the end of the file then the offset is data.
+ 		 */
+-		if (offset >= eof)
++		if ((unsigned long long)offset >= eof)
+ 			return -ENXIO;
+ 		break;
+ 	case SEEK_HOLE:
+@@ -120,7 +120,7 @@ generic_file_llseek_size(struct file *file, loff_t offset, int whence,
+ 		 * There is a virtual hole at the end of the file, so as long as
+ 		 * offset isn't i_size or larger, return i_size.
+ 		 */
+-		if (offset >= eof)
++		if ((unsigned long long)offset >= eof)
+ 			return -ENXIO;
+ 		offset = eof;
+ 		break;
+diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
+index 5049e8ab6e30..aa75389be8cf 100644
+--- a/fs/xfs/xfs_ioctl.c
++++ b/fs/xfs/xfs_ioctl.c
+@@ -1088,6 +1088,7 @@ xfs_ioctl_setattr_dax_invalidate(
+ 	int			*join_flags)
+ {
+ 	struct inode		*inode = VFS_I(ip);
++	struct super_block	*sb = inode->i_sb;
+ 	int			error;
+ 
+ 	*join_flags = 0;
+@@ -1100,7 +1101,7 @@ xfs_ioctl_setattr_dax_invalidate(
+ 	if (fa->fsx_xflags & FS_XFLAG_DAX) {
+ 		if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode)))
+ 			return -EINVAL;
+-		if (ip->i_mount->m_sb.sb_blocksize != PAGE_SIZE)
++		if (bdev_dax_supported(sb, sb->s_blocksize) < 0)
+ 			return -EINVAL;
+ 	}
+ 
+diff --git a/include/linux/dax.h b/include/linux/dax.h
+index df97b7af7e2c..0d8f35f6c53d 100644
+--- a/include/linux/dax.h
++++ b/include/linux/dax.h
+@@ -19,8 +19,6 @@ struct dax_operations {
+ 	/* copy_from_iter: required operation for fs-dax direct-i/o */
+ 	size_t (*copy_from_iter)(struct dax_device *, pgoff_t, void *, size_t,
+ 			struct iov_iter *);
+-	/* flush: optional driver-specific cache management after writes */
+-	void (*flush)(struct dax_device *, pgoff_t, void *, size_t);
+ };
+ 
+ extern struct attribute_group dax_attribute_group;
+@@ -84,8 +82,7 @@ long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages,
+ 		void **kaddr, pfn_t *pfn);
+ size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
+ 		size_t bytes, struct iov_iter *i);
+-void dax_flush(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
+-		size_t size);
++void dax_flush(struct dax_device *dax_dev, void *addr, size_t size);
+ void dax_write_cache(struct dax_device *dax_dev, bool wc);
+ bool dax_write_cache_enabled(struct dax_device *dax_dev);
+ 
+diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
+index 4f2b3b2076c4..17c378ecbbdd 100644
+--- a/include/linux/device-mapper.h
++++ b/include/linux/device-mapper.h
+@@ -134,8 +134,6 @@ typedef long (*dm_dax_direct_access_fn) (struct dm_target *ti, pgoff_t pgoff,
+ 		long nr_pages, void **kaddr, pfn_t *pfn);
+ typedef size_t (*dm_dax_copy_from_iter_fn)(struct dm_target *ti, pgoff_t pgoff,
+ 		void *addr, size_t bytes, struct iov_iter *i);
+-typedef void (*dm_dax_flush_fn)(struct dm_target *ti, pgoff_t pgoff, void *addr,
+-		size_t size);
+ #define PAGE_SECTORS (PAGE_SIZE / 512)
+ 
+ void dm_error(const char *message);
+@@ -186,7 +184,6 @@ struct target_type {
+ 	dm_io_hints_fn io_hints;
+ 	dm_dax_direct_access_fn direct_access;
+ 	dm_dax_copy_from_iter_fn dax_copy_from_iter;
+-	dm_dax_flush_fn dax_flush;
+ 
+ 	/* For internal device-mapper use. */
+ 	struct list_head list;
+diff --git a/include/linux/key.h b/include/linux/key.h
+index 044114185120..e315e16b6ff8 100644
+--- a/include/linux/key.h
++++ b/include/linux/key.h
+@@ -187,6 +187,7 @@ struct key {
+ #define KEY_FLAG_BUILTIN	8	/* set if key is built in to the kernel */
+ #define KEY_FLAG_ROOT_CAN_INVAL	9	/* set if key can be invalidated by root without permission */
+ #define KEY_FLAG_KEEP		10	/* set if key should not be removed */
++#define KEY_FLAG_UID_KEYRING	11	/* set if key is a user or user session keyring */
+ 
+ 	/* the key type and key description string
+ 	 * - the desc is used to match a key against search criteria
+@@ -243,6 +244,7 @@ extern struct key *key_alloc(struct key_type *type,
+ #define KEY_ALLOC_NOT_IN_QUOTA		0x0002	/* not in quota */
+ #define KEY_ALLOC_BUILT_IN		0x0004	/* Key is built into kernel */
+ #define KEY_ALLOC_BYPASS_RESTRICTION	0x0008	/* Override the check on restricted keyrings */
++#define KEY_ALLOC_UID_KEYRING		0x0010	/* allocating a user or user session keyring */
+ 
+ extern void key_revoke(struct key *key);
+ extern void key_invalidate(struct key *key);
+diff --git a/include/net/mac80211.h b/include/net/mac80211.h
+index f8149ca192b4..885690fa39c8 100644
+--- a/include/net/mac80211.h
++++ b/include/net/mac80211.h
+@@ -919,21 +919,10 @@ struct ieee80211_tx_info {
+ 				unsigned long jiffies;
+ 			};
+ 			/* NB: vif can be NULL for injected frames */
+-			union {
+-				/* NB: vif can be NULL for injected frames */
+-				struct ieee80211_vif *vif;
+-
+-				/* When packets are enqueued on txq it's easy
+-				 * to re-construct the vif pointer. There's no
+-				 * more space in tx_info so it can be used to
+-				 * store the necessary enqueue time for packet
+-				 * sojourn time computation.
+-				 */
+-				codel_time_t enqueue_time;
+-			};
++			struct ieee80211_vif *vif;
+ 			struct ieee80211_key_conf *hw_key;
+ 			u32 flags;
+-			/* 4 bytes free */
++			codel_time_t enqueue_time;
+ 		} control;
+ 		struct {
+ 			u64 cookie;
+diff --git a/kernel/exit.c b/kernel/exit.c
+index c5548faa9f37..6d31fc5ba50d 100644
+--- a/kernel/exit.c
++++ b/kernel/exit.c
+@@ -1601,12 +1601,10 @@ SYSCALL_DEFINE5(waitid, int, which, pid_t, upid, struct siginfo __user *,
+ 	struct waitid_info info = {.status = 0};
+ 	long err = kernel_waitid(which, upid, &info, options, ru ? &r : NULL);
+ 	int signo = 0;
++
+ 	if (err > 0) {
+ 		signo = SIGCHLD;
+ 		err = 0;
+-	}
+-
+-	if (!err) {
+ 		if (ru && copy_to_user(ru, &r, sizeof(struct rusage)))
+ 			return -EFAULT;
+ 	}
+@@ -1724,16 +1722,15 @@ COMPAT_SYSCALL_DEFINE5(waitid,
+ 	if (err > 0) {
+ 		signo = SIGCHLD;
+ 		err = 0;
+-	}
+-
+-	if (!err && uru) {
+-		/* kernel_waitid() overwrites everything in ru */
+-		if (COMPAT_USE_64BIT_TIME)
+-			err = copy_to_user(uru, &ru, sizeof(ru));
+-		else
+-			err = put_compat_rusage(&ru, uru);
+-		if (err)
+-			return -EFAULT;
++		if (uru) {
++			/* kernel_waitid() overwrites everything in ru */
++			if (COMPAT_USE_64BIT_TIME)
++				err = copy_to_user(uru, &ru, sizeof(ru));
++			else
++				err = put_compat_rusage(&ru, uru);
++			if (err)
++				return -EFAULT;
++		}
+ 	}
+ 
+ 	if (!infop)
+diff --git a/kernel/extable.c b/kernel/extable.c
+index 38c2412401a1..9aa1cc41ecf7 100644
+--- a/kernel/extable.c
++++ b/kernel/extable.c
+@@ -102,15 +102,7 @@ int core_kernel_data(unsigned long addr)
+ 
+ int __kernel_text_address(unsigned long addr)
+ {
+-	if (core_kernel_text(addr))
+-		return 1;
+-	if (is_module_text_address(addr))
+-		return 1;
+-	if (is_ftrace_trampoline(addr))
+-		return 1;
+-	if (is_kprobe_optinsn_slot(addr) || is_kprobe_insn_slot(addr))
+-		return 1;
+-	if (is_bpf_text_address(addr))
++	if (kernel_text_address(addr))
+ 		return 1;
+ 	/*
+ 	 * There might be init symbols in saved stacktraces.
+@@ -127,17 +119,42 @@ int __kernel_text_address(unsigned long addr)
+ 
+ int kernel_text_address(unsigned long addr)
+ {
++	bool no_rcu;
++	int ret = 1;
++
+ 	if (core_kernel_text(addr))
+ 		return 1;
++
++	/*
++	 * If a stack dump happens while RCU is not watching, then
++	 * RCU needs to be notified that it requires to start
++	 * watching again. This can happen either by tracing that
++	 * triggers a stack trace, or a WARN() that happens during
++	 * coming back from idle, or cpu on or offlining.
++	 *
++	 * is_module_text_address() as well as the kprobe slots
++	 * and is_bpf_text_address() require RCU to be watching.
++	 */
++	no_rcu = !rcu_is_watching();
++
++	/* Treat this like an NMI as it can happen anywhere */
++	if (no_rcu)
++		rcu_nmi_enter();
++
+ 	if (is_module_text_address(addr))
+-		return 1;
++		goto out;
+ 	if (is_ftrace_trampoline(addr))
+-		return 1;
++		goto out;
+ 	if (is_kprobe_optinsn_slot(addr) || is_kprobe_insn_slot(addr))
+-		return 1;
++		goto out;
+ 	if (is_bpf_text_address(addr))
+-		return 1;
+-	return 0;
++		goto out;
++	ret = 0;
++out:
++	if (no_rcu)
++		rcu_nmi_exit();
++
++	return ret;
+ }
+ 
+ /*
+diff --git a/kernel/futex.c b/kernel/futex.c
+index f50b434756c1..bf57ab12ffe8 100644
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -821,8 +821,6 @@ static void get_pi_state(struct futex_pi_state *pi_state)
+ /*
+  * Drops a reference to the pi_state object and frees or caches it
+  * when the last reference is gone.
+- *
+- * Must be called with the hb lock held.
+  */
+ static void put_pi_state(struct futex_pi_state *pi_state)
+ {
+@@ -837,16 +835,22 @@ static void put_pi_state(struct futex_pi_state *pi_state)
+ 	 * and has cleaned up the pi_state already
+ 	 */
+ 	if (pi_state->owner) {
+-		raw_spin_lock_irq(&pi_state->owner->pi_lock);
+-		list_del_init(&pi_state->list);
+-		raw_spin_unlock_irq(&pi_state->owner->pi_lock);
++		struct task_struct *owner;
+ 
+-		rt_mutex_proxy_unlock(&pi_state->pi_mutex, pi_state->owner);
++		raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
++		owner = pi_state->owner;
++		if (owner) {
++			raw_spin_lock(&owner->pi_lock);
++			list_del_init(&pi_state->list);
++			raw_spin_unlock(&owner->pi_lock);
++		}
++		rt_mutex_proxy_unlock(&pi_state->pi_mutex, owner);
++		raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
+ 	}
+ 
+-	if (current->pi_state_cache)
++	if (current->pi_state_cache) {
+ 		kfree(pi_state);
+-	else {
++	} else {
+ 		/*
+ 		 * pi_state->list is already empty.
+ 		 * clear pi_state->owner.
+@@ -905,13 +909,14 @@ void exit_pi_state_list(struct task_struct *curr)
+ 		raw_spin_unlock_irq(&curr->pi_lock);
+ 
+ 		spin_lock(&hb->lock);
+-
+-		raw_spin_lock_irq(&curr->pi_lock);
++		raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
++		raw_spin_lock(&curr->pi_lock);
+ 		/*
+ 		 * We dropped the pi-lock, so re-check whether this
+ 		 * task still owns the PI-state:
+ 		 */
+ 		if (head->next != next) {
++			raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
+ 			spin_unlock(&hb->lock);
+ 			continue;
+ 		}
+@@ -920,9 +925,10 @@ void exit_pi_state_list(struct task_struct *curr)
+ 		WARN_ON(list_empty(&pi_state->list));
+ 		list_del_init(&pi_state->list);
+ 		pi_state->owner = NULL;
+-		raw_spin_unlock_irq(&curr->pi_lock);
++		raw_spin_unlock(&curr->pi_lock);
+ 
+ 		get_pi_state(pi_state);
++		raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
+ 		spin_unlock(&hb->lock);
+ 
+ 		rt_mutex_futex_unlock(&pi_state->pi_mutex);
+@@ -1204,6 +1210,10 @@ static int attach_to_pi_owner(u32 uval, union futex_key *key,
+ 
+ 	WARN_ON(!list_empty(&pi_state->list));
+ 	list_add(&pi_state->list, &p->pi_state_list);
++	/*
++	 * Assignment without holding pi_state->pi_mutex.wait_lock is safe
++	 * because there is no concurrency as the object is not published yet.
++	 */
+ 	pi_state->owner = p;
+ 	raw_spin_unlock_irq(&p->pi_lock);
+ 
+@@ -2820,6 +2830,7 @@ static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
+ 		raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
+ 		spin_unlock(&hb->lock);
+ 
++		/* drops pi_state->pi_mutex.wait_lock */
+ 		ret = wake_futex_pi(uaddr, uval, pi_state);
+ 
+ 		put_pi_state(pi_state);
+diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
+index 3675c6004f2a..75a70a267029 100644
+--- a/kernel/irq/chip.c
++++ b/kernel/irq/chip.c
+@@ -202,7 +202,7 @@ __irq_startup_managed(struct irq_desc *desc, struct cpumask *aff, bool force)
+ 
+ 	irqd_clr_managed_shutdown(d);
+ 
+-	if (cpumask_any_and(aff, cpu_online_mask) > nr_cpu_ids) {
++	if (cpumask_any_and(aff, cpu_online_mask) >= nr_cpu_ids) {
+ 		/*
+ 		 * Catch code which fiddles with enable_irq() on a managed
+ 		 * and potentially shutdown IRQ. Chained interrupt
+diff --git a/kernel/irq/generic-chip.c b/kernel/irq/generic-chip.c
+index f7086b78ad6e..5270a54b9fa4 100644
+--- a/kernel/irq/generic-chip.c
++++ b/kernel/irq/generic-chip.c
+@@ -322,7 +322,6 @@ int __irq_alloc_domain_generic_chips(struct irq_domain *d, int irqs_per_chip,
+ 		/* Calc pointer to the next generic chip */
+ 		tmp += sizeof(*gc) + num_ct * sizeof(struct irq_chip_type);
+ 	}
+-	d->name = name;
+ 	return 0;
+ }
+ EXPORT_SYMBOL_GPL(__irq_alloc_domain_generic_chips);
+diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
+index 73be2b3909bd..82afb7ed369f 100644
+--- a/kernel/irq/irqdesc.c
++++ b/kernel/irq/irqdesc.c
+@@ -421,10 +421,8 @@ static void free_desc(unsigned int irq)
+ 	 * The sysfs entry must be serialized against a concurrent
+ 	 * irq_sysfs_init() as well.
+ 	 */
+-	mutex_lock(&sparse_irq_lock);
+ 	kobject_del(&desc->kobj);
+ 	delete_irq_desc(irq);
+-	mutex_unlock(&sparse_irq_lock);
+ 
+ 	/*
+ 	 * We free the descriptor, masks and stat fields via RCU. That
+@@ -462,20 +460,15 @@ static int alloc_descs(unsigned int start, unsigned int cnt, int node,
+ 		desc = alloc_desc(start + i, node, flags, mask, owner);
+ 		if (!desc)
+ 			goto err;
+-		mutex_lock(&sparse_irq_lock);
+ 		irq_insert_desc(start + i, desc);
+ 		irq_sysfs_add(start + i, desc);
+-		mutex_unlock(&sparse_irq_lock);
+ 	}
++	bitmap_set(allocated_irqs, start, cnt);
+ 	return start;
+ 
+ err:
+ 	for (i--; i >= 0; i--)
+ 		free_desc(start + i);
+-
+-	mutex_lock(&sparse_irq_lock);
+-	bitmap_clear(allocated_irqs, start, cnt);
+-	mutex_unlock(&sparse_irq_lock);
+ 	return -ENOMEM;
+ }
+ 
+@@ -575,6 +568,7 @@ static inline int alloc_descs(unsigned int start, unsigned int cnt, int node,
+ 
+ 		desc->owner = owner;
+ 	}
++	bitmap_set(allocated_irqs, start, cnt);
+ 	return start;
+ }
+ 
+@@ -670,10 +664,10 @@ void irq_free_descs(unsigned int from, unsigned int cnt)
+ 	if (from >= nr_irqs || (from + cnt) > nr_irqs)
+ 		return;
+ 
++	mutex_lock(&sparse_irq_lock);
+ 	for (i = 0; i < cnt; i++)
+ 		free_desc(from + i);
+ 
+-	mutex_lock(&sparse_irq_lock);
+ 	bitmap_clear(allocated_irqs, from, cnt);
+ 	mutex_unlock(&sparse_irq_lock);
+ }
+@@ -720,19 +714,15 @@ __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node,
+ 					   from, cnt, 0);
+ 	ret = -EEXIST;
+ 	if (irq >=0 && start != irq)
+-		goto err;
++		goto unlock;
+ 
+ 	if (start + cnt > nr_irqs) {
+ 		ret = irq_expand_nr_irqs(start + cnt);
+ 		if (ret)
+-			goto err;
++			goto unlock;
+ 	}
+-
+-	bitmap_set(allocated_irqs, start, cnt);
+-	mutex_unlock(&sparse_irq_lock);
+-	return alloc_descs(start, cnt, node, affinity, owner);
+-
+-err:
++	ret = alloc_descs(start, cnt, node, affinity, owner);
++unlock:
+ 	mutex_unlock(&sparse_irq_lock);
+ 	return ret;
+ }
+diff --git a/kernel/irq/msi.c b/kernel/irq/msi.c
+index 48eadf416c24..3fa4bd59f569 100644
+--- a/kernel/irq/msi.c
++++ b/kernel/irq/msi.c
+@@ -315,11 +315,12 @@ int msi_domain_populate_irqs(struct irq_domain *domain, struct device *dev,
+ 
+ 		ops->set_desc(arg, desc);
+ 		/* Assumes the domain mutex is held! */
+-		ret = irq_domain_alloc_irqs_hierarchy(domain, virq, 1, arg);
++		ret = irq_domain_alloc_irqs_hierarchy(domain, desc->irq, 1,
++						      arg);
+ 		if (ret)
+ 			break;
+ 
+-		irq_set_msi_desc_off(virq, 0, desc);
++		irq_set_msi_desc_off(desc->irq, 0, desc);
+ 	}
+ 
+ 	if (ret) {
+diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
+index 51d4c3acf32d..63bee8e1b193 100644
+--- a/kernel/rcu/tree.c
++++ b/kernel/rcu/tree.c
+@@ -888,6 +888,11 @@ void rcu_irq_exit(void)
+ 
+ 	RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_irq_exit() invoked with irqs enabled!!!");
+ 	rdtp = this_cpu_ptr(&rcu_dynticks);
++
++	/* Page faults can happen in NMI handlers, so check... */
++	if (READ_ONCE(rdtp->dynticks_nmi_nesting))
++		return;
++
+ 	WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
+ 		     rdtp->dynticks_nesting < 1);
+ 	if (rdtp->dynticks_nesting <= 1) {
+@@ -1020,6 +1025,11 @@ void rcu_irq_enter(void)
+ 
+ 	RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_irq_enter() invoked with irqs enabled!!!");
+ 	rdtp = this_cpu_ptr(&rcu_dynticks);
++
++	/* Page faults can happen in NMI handlers, so check... */
++	if (READ_ONCE(rdtp->dynticks_nmi_nesting))
++		return;
++
+ 	oldval = rdtp->dynticks_nesting;
+ 	rdtp->dynticks_nesting++;
+ 	WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
+diff --git a/kernel/seccomp.c b/kernel/seccomp.c
+index 98b59b5db90b..f3218fec7f2d 100644
+--- a/kernel/seccomp.c
++++ b/kernel/seccomp.c
+@@ -458,14 +458,19 @@ static long seccomp_attach_filter(unsigned int flags,
+ 	return 0;
+ }
+ 
++void __get_seccomp_filter(struct seccomp_filter *filter)
++{
++	/* Reference count is bounded by the number of total processes. */
++	refcount_inc(&filter->usage);
++}
++
+ /* get_seccomp_filter - increments the reference count of the filter on @tsk */
+ void get_seccomp_filter(struct task_struct *tsk)
+ {
+ 	struct seccomp_filter *orig = tsk->seccomp.filter;
+ 	if (!orig)
+ 		return;
+-	/* Reference count is bounded by the number of total processes. */
+-	refcount_inc(&orig->usage);
++	__get_seccomp_filter(orig);
+ }
+ 
+ static inline void seccomp_filter_free(struct seccomp_filter *filter)
+@@ -476,10 +481,8 @@ static inline void seccomp_filter_free(struct seccomp_filter *filter)
+ 	}
+ }
+ 
+-/* put_seccomp_filter - decrements the ref count of tsk->seccomp.filter */
+-void put_seccomp_filter(struct task_struct *tsk)
++static void __put_seccomp_filter(struct seccomp_filter *orig)
+ {
+-	struct seccomp_filter *orig = tsk->seccomp.filter;
+ 	/* Clean up single-reference branches iteratively. */
+ 	while (orig && refcount_dec_and_test(&orig->usage)) {
+ 		struct seccomp_filter *freeme = orig;
+@@ -488,6 +491,12 @@ void put_seccomp_filter(struct task_struct *tsk)
+ 	}
+ }
+ 
++/* put_seccomp_filter - decrements the ref count of tsk->seccomp.filter */
++void put_seccomp_filter(struct task_struct *tsk)
++{
++	__put_seccomp_filter(tsk->seccomp.filter);
++}
++
+ static void seccomp_init_siginfo(siginfo_t *info, int syscall, int reason)
+ {
+ 	memset(info, 0, sizeof(*info));
+@@ -908,13 +917,13 @@ long seccomp_get_filter(struct task_struct *task, unsigned long filter_off,
+ 	if (!data)
+ 		goto out;
+ 
+-	get_seccomp_filter(task);
++	__get_seccomp_filter(filter);
+ 	spin_unlock_irq(&task->sighand->siglock);
+ 
+ 	if (copy_to_user(data, fprog->filter, bpf_classic_proglen(fprog)))
+ 		ret = -EFAULT;
+ 
+-	put_seccomp_filter(task);
++	__put_seccomp_filter(filter);
+ 	return ret;
+ 
+ out:
+diff --git a/kernel/sysctl.c b/kernel/sysctl.c
+index 6648fbbb8157..423554ad3610 100644
+--- a/kernel/sysctl.c
++++ b/kernel/sysctl.c
+@@ -367,7 +367,8 @@ static struct ctl_table kern_table[] = {
+ 		.data		= &sysctl_sched_time_avg,
+ 		.maxlen		= sizeof(unsigned int),
+ 		.mode		= 0644,
+-		.proc_handler	= proc_dointvec,
++		.proc_handler	= proc_dointvec_minmax,
++		.extra1		= &one,
+ 	},
+ #ifdef CONFIG_SCHEDSTATS
+ 	{
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index 5efb4b63174e..27a723480b13 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -4017,11 +4017,17 @@ static int tracing_open(struct inode *inode, struct file *file)
+ 	/* If this file was open for write, then erase contents */
+ 	if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
+ 		int cpu = tracing_get_cpu(inode);
++		struct trace_buffer *trace_buf = &tr->trace_buffer;
++
++#ifdef CONFIG_TRACER_MAX_TRACE
++		if (tr->current_trace->print_max)
++			trace_buf = &tr->max_buffer;
++#endif
+ 
+ 		if (cpu == RING_BUFFER_ALL_CPUS)
+-			tracing_reset_online_cpus(&tr->trace_buffer);
++			tracing_reset_online_cpus(trace_buf);
+ 		else
+-			tracing_reset(&tr->trace_buffer, cpu);
++			tracing_reset(trace_buf, cpu);
+ 	}
+ 
+ 	if (file->f_mode & FMODE_READ) {
+@@ -5664,7 +5670,7 @@ static int tracing_wait_pipe(struct file *filp)
+ 		 *
+ 		 * iter->pos will be 0 if we haven't read anything.
+ 		 */
+-		if (!tracing_is_on() && iter->pos)
++		if (!tracer_tracing_is_on(iter->tr) && iter->pos)
+ 			break;
+ 
+ 		mutex_unlock(&iter->mutex);
+diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
+index a4df67cbc711..49cb41412eec 100644
+--- a/kernel/trace/trace_stack.c
++++ b/kernel/trace/trace_stack.c
+@@ -96,23 +96,9 @@ check_stack(unsigned long ip, unsigned long *stack)
+ 	if (in_nmi())
+ 		return;
+ 
+-	/*
+-	 * There's a slight chance that we are tracing inside the
+-	 * RCU infrastructure, and rcu_irq_enter() will not work
+-	 * as expected.
+-	 */
+-	if (unlikely(rcu_irq_enter_disabled()))
+-		return;
+-
+ 	local_irq_save(flags);
+ 	arch_spin_lock(&stack_trace_max_lock);
+ 
+-	/*
+-	 * RCU may not be watching, make it see us.
+-	 * The stack trace code uses rcu_sched.
+-	 */
+-	rcu_irq_enter();
+-
+ 	/* In case another CPU set the tracer_frame on us */
+ 	if (unlikely(!frame_size))
+ 		this_size -= tracer_frame;
+@@ -205,7 +191,6 @@ check_stack(unsigned long ip, unsigned long *stack)
+ 	}
+ 
+  out:
+-	rcu_irq_exit();
+ 	arch_spin_unlock(&stack_trace_max_lock);
+ 	local_irq_restore(flags);
+ }
+diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c
+index f358d0bfa76b..79d14d70b7ea 100644
+--- a/net/ceph/osdmap.c
++++ b/net/ceph/osdmap.c
+@@ -2445,19 +2445,34 @@ static void apply_upmap(struct ceph_osdmap *osdmap,
+ 
+ 	pg = lookup_pg_mapping(&osdmap->pg_upmap_items, pgid);
+ 	if (pg) {
+-		for (i = 0; i < raw->size; i++) {
+-			for (j = 0; j < pg->pg_upmap_items.len; j++) {
+-				int from = pg->pg_upmap_items.from_to[j][0];
+-				int to = pg->pg_upmap_items.from_to[j][1];
+-
+-				if (from == raw->osds[i]) {
+-					if (!(to != CRUSH_ITEM_NONE &&
+-					      to < osdmap->max_osd &&
+-					      osdmap->osd_weight[to] == 0))
+-						raw->osds[i] = to;
++		/*
++		 * Note: this approach does not allow a bidirectional swap,
++		 * e.g., [[1,2],[2,1]] applied to [0,1,2] -> [0,2,1].
++		 */
++		for (i = 0; i < pg->pg_upmap_items.len; i++) {
++			int from = pg->pg_upmap_items.from_to[i][0];
++			int to = pg->pg_upmap_items.from_to[i][1];
++			int pos = -1;
++			bool exists = false;
++
++			/* make sure replacement doesn't already appear */
++			for (j = 0; j < raw->size; j++) {
++				int osd = raw->osds[j];
++
++				if (osd == to) {
++					exists = true;
+ 					break;
+ 				}
++				/* ignore mapping if target is marked out */
++				if (osd == from && pos < 0 &&
++				    !(to != CRUSH_ITEM_NONE &&
++				      to < osdmap->max_osd &&
++				      osdmap->osd_weight[to] == 0)) {
++					pos = j;
++				}
+ 			}
++			if (!exists && pos >= 0)
++				raw->osds[pos] = to;
+ 		}
+ 	}
+ }
+diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c
+index 2b36eff5d97e..2849a1fc41c5 100644
+--- a/net/mac80211/agg-rx.c
++++ b/net/mac80211/agg-rx.c
+@@ -245,10 +245,10 @@ static void ieee80211_send_addba_resp(struct ieee80211_sub_if_data *sdata, u8 *d
+ 	ieee80211_tx_skb(sdata, skb);
+ }
+ 
+-void __ieee80211_start_rx_ba_session(struct sta_info *sta,
+-				     u8 dialog_token, u16 timeout,
+-				     u16 start_seq_num, u16 ba_policy, u16 tid,
+-				     u16 buf_size, bool tx, bool auto_seq)
++void ___ieee80211_start_rx_ba_session(struct sta_info *sta,
++				      u8 dialog_token, u16 timeout,
++				      u16 start_seq_num, u16 ba_policy, u16 tid,
++				      u16 buf_size, bool tx, bool auto_seq)
+ {
+ 	struct ieee80211_local *local = sta->sdata->local;
+ 	struct tid_ampdu_rx *tid_agg_rx;
+@@ -267,7 +267,7 @@ void __ieee80211_start_rx_ba_session(struct sta_info *sta,
+ 		ht_dbg(sta->sdata,
+ 		       "STA %pM requests BA session on unsupported tid %d\n",
+ 		       sta->sta.addr, tid);
+-		goto end_no_lock;
++		goto end;
+ 	}
+ 
+ 	if (!sta->sta.ht_cap.ht_supported) {
+@@ -275,14 +275,14 @@ void __ieee80211_start_rx_ba_session(struct sta_info *sta,
+ 		       "STA %pM erroneously requests BA session on tid %d w/o QoS\n",
+ 		       sta->sta.addr, tid);
+ 		/* send a response anyway, it's an error case if we get here */
+-		goto end_no_lock;
++		goto end;
+ 	}
+ 
+ 	if (test_sta_flag(sta, WLAN_STA_BLOCK_BA)) {
+ 		ht_dbg(sta->sdata,
+ 		       "Suspend in progress - Denying ADDBA request (%pM tid %d)\n",
+ 		       sta->sta.addr, tid);
+-		goto end_no_lock;
++		goto end;
+ 	}
+ 
+ 	/* sanity check for incoming parameters:
+@@ -296,7 +296,7 @@ void __ieee80211_start_rx_ba_session(struct sta_info *sta,
+ 		ht_dbg_ratelimited(sta->sdata,
+ 				   "AddBA Req with bad params from %pM on tid %u. policy %d, buffer size %d\n",
+ 				   sta->sta.addr, tid, ba_policy, buf_size);
+-		goto end_no_lock;
++		goto end;
+ 	}
+ 	/* determine default buffer size */
+ 	if (buf_size == 0)
+@@ -311,7 +311,7 @@ void __ieee80211_start_rx_ba_session(struct sta_info *sta,
+ 	       buf_size, sta->sta.addr);
+ 
+ 	/* examine state machine */
+-	mutex_lock(&sta->ampdu_mlme.mtx);
++	lockdep_assert_held(&sta->ampdu_mlme.mtx);
+ 
+ 	if (test_bit(tid, sta->ampdu_mlme.agg_session_valid)) {
+ 		if (sta->ampdu_mlme.tid_rx_token[tid] == dialog_token) {
+@@ -415,15 +415,25 @@ void __ieee80211_start_rx_ba_session(struct sta_info *sta,
+ 		__clear_bit(tid, sta->ampdu_mlme.unexpected_agg);
+ 		sta->ampdu_mlme.tid_rx_token[tid] = dialog_token;
+ 	}
+-	mutex_unlock(&sta->ampdu_mlme.mtx);
+ 
+-end_no_lock:
+ 	if (tx)
+ 		ieee80211_send_addba_resp(sta->sdata, sta->sta.addr, tid,
+ 					  dialog_token, status, 1, buf_size,
+ 					  timeout);
+ }
+ 
++void __ieee80211_start_rx_ba_session(struct sta_info *sta,
++				     u8 dialog_token, u16 timeout,
++				     u16 start_seq_num, u16 ba_policy, u16 tid,
++				     u16 buf_size, bool tx, bool auto_seq)
++{
++	mutex_lock(&sta->ampdu_mlme.mtx);
++	___ieee80211_start_rx_ba_session(sta, dialog_token, timeout,
++					 start_seq_num, ba_policy, tid,
++					 buf_size, tx, auto_seq);
++	mutex_unlock(&sta->ampdu_mlme.mtx);
++}
++
+ void ieee80211_process_addba_request(struct ieee80211_local *local,
+ 				     struct sta_info *sta,
+ 				     struct ieee80211_mgmt *mgmt,
+diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c
+index c92df492e898..198b2d3e56fd 100644
+--- a/net/mac80211/ht.c
++++ b/net/mac80211/ht.c
+@@ -333,9 +333,9 @@ void ieee80211_ba_session_work(struct work_struct *work)
+ 
+ 		if (test_and_clear_bit(tid,
+ 				       sta->ampdu_mlme.tid_rx_manage_offl))
+-			__ieee80211_start_rx_ba_session(sta, 0, 0, 0, 1, tid,
+-							IEEE80211_MAX_AMPDU_BUF,
+-							false, true);
++			___ieee80211_start_rx_ba_session(sta, 0, 0, 0, 1, tid,
++							 IEEE80211_MAX_AMPDU_BUF,
++							 false, true);
+ 
+ 		if (test_and_clear_bit(tid + IEEE80211_NUM_TIDS,
+ 				       sta->ampdu_mlme.tid_rx_manage_offl))
+diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
+index 2197c62a0a6e..9675814f64db 100644
+--- a/net/mac80211/ieee80211_i.h
++++ b/net/mac80211/ieee80211_i.h
+@@ -1760,6 +1760,10 @@ void __ieee80211_start_rx_ba_session(struct sta_info *sta,
+ 				     u8 dialog_token, u16 timeout,
+ 				     u16 start_seq_num, u16 ba_policy, u16 tid,
+ 				     u16 buf_size, bool tx, bool auto_seq);
++void ___ieee80211_start_rx_ba_session(struct sta_info *sta,
++				      u8 dialog_token, u16 timeout,
++				      u16 start_seq_num, u16 ba_policy, u16 tid,
++				      u16 buf_size, bool tx, bool auto_seq);
+ void ieee80211_sta_tear_down_BA_sessions(struct sta_info *sta,
+ 					 enum ieee80211_agg_stop_reason reason);
+ void ieee80211_process_delba(struct ieee80211_sub_if_data *sdata,
+diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
+index 9228ac73c429..44399322f356 100644
+--- a/net/mac80211/iface.c
++++ b/net/mac80211/iface.c
+@@ -792,6 +792,7 @@ static int ieee80211_open(struct net_device *dev)
+ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
+ 			      bool going_down)
+ {
++	struct ieee80211_sub_if_data *txq_sdata = sdata;
+ 	struct ieee80211_local *local = sdata->local;
+ 	struct fq *fq = &local->fq;
+ 	unsigned long flags;
+@@ -937,6 +938,9 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
+ 
+ 	switch (sdata->vif.type) {
+ 	case NL80211_IFTYPE_AP_VLAN:
++		txq_sdata = container_of(sdata->bss,
++					 struct ieee80211_sub_if_data, u.ap);
++
+ 		mutex_lock(&local->mtx);
+ 		list_del(&sdata->u.vlan.list);
+ 		mutex_unlock(&local->mtx);
+@@ -1007,8 +1011,17 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
+ 	}
+ 	spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
+ 
+-	if (sdata->vif.txq) {
+-		struct txq_info *txqi = to_txq_info(sdata->vif.txq);
++	if (txq_sdata->vif.txq) {
++		struct txq_info *txqi = to_txq_info(txq_sdata->vif.txq);
++
++		/*
++		 * FIXME FIXME
++		 *
++		 * We really shouldn't purge the *entire* txqi since that
++		 * contains frames for the other AP_VLANs (and possibly
++		 * the AP itself) as well, but there's no API in FQ now
++		 * to be able to filter.
++		 */
+ 
+ 		spin_lock_bh(&fq->lock);
+ 		ieee80211_txq_purge(local, txqi);
+diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c
+index f8e7a8bbc618..faf4f6055000 100644
+--- a/net/mac80211/offchannel.c
++++ b/net/mac80211/offchannel.c
+@@ -707,6 +707,8 @@ static int ieee80211_cancel_roc(struct ieee80211_local *local,
+ 	if (!cookie)
+ 		return -ENOENT;
+ 
++	flush_work(&local->hw_roc_start);
++
+ 	mutex_lock(&local->mtx);
+ 	list_for_each_entry_safe(roc, tmp, &local->roc_list, list) {
+ 		if (!mgmt_tx && roc->cookie != cookie)
+diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
+index 8858f4f185e9..94826680cf2b 100644
+--- a/net/mac80211/tx.c
++++ b/net/mac80211/tx.c
+@@ -1276,11 +1276,6 @@ static void ieee80211_set_skb_enqueue_time(struct sk_buff *skb)
+ 	IEEE80211_SKB_CB(skb)->control.enqueue_time = codel_get_time();
+ }
+ 
+-static void ieee80211_set_skb_vif(struct sk_buff *skb, struct txq_info *txqi)
+-{
+-	IEEE80211_SKB_CB(skb)->control.vif = txqi->txq.vif;
+-}
+-
+ static u32 codel_skb_len_func(const struct sk_buff *skb)
+ {
+ 	return skb->len;
+@@ -3414,6 +3409,7 @@ struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw,
+ 	struct ieee80211_tx_info *info;
+ 	struct ieee80211_tx_data tx;
+ 	ieee80211_tx_result r;
++	struct ieee80211_vif *vif;
+ 
+ 	spin_lock_bh(&fq->lock);
+ 
+@@ -3430,8 +3426,6 @@ struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw,
+ 	if (!skb)
+ 		goto out;
+ 
+-	ieee80211_set_skb_vif(skb, txqi);
+-
+ 	hdr = (struct ieee80211_hdr *)skb->data;
+ 	info = IEEE80211_SKB_CB(skb);
+ 
+@@ -3488,6 +3482,34 @@ struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw,
+ 		}
+ 	}
+ 
++	switch (tx.sdata->vif.type) {
++	case NL80211_IFTYPE_MONITOR:
++		if (tx.sdata->u.mntr.flags & MONITOR_FLAG_ACTIVE) {
++			vif = &tx.sdata->vif;
++			break;
++		}
++		tx.sdata = rcu_dereference(local->monitor_sdata);
++		if (tx.sdata) {
++			vif = &tx.sdata->vif;
++			info->hw_queue =
++				vif->hw_queue[skb_get_queue_mapping(skb)];
++		} else if (ieee80211_hw_check(&local->hw, QUEUE_CONTROL)) {
++			ieee80211_free_txskb(&local->hw, skb);
++			goto begin;
++		} else {
++			vif = NULL;
++		}
++		break;
++	case NL80211_IFTYPE_AP_VLAN:
++		tx.sdata = container_of(tx.sdata->bss,
++					struct ieee80211_sub_if_data, u.ap);
++		/* fall through */
++	default:
++		vif = &tx.sdata->vif;
++		break;
++	}
++
++	IEEE80211_SKB_CB(skb)->control.vif = vif;
+ out:
+ 	spin_unlock_bh(&fq->lock);
+ 
+diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
+index 8ce85420ecb0..750ba5d24a49 100644
+--- a/net/wireless/nl80211.c
++++ b/net/wireless/nl80211.c
+@@ -10903,6 +10903,9 @@ static int nl80211_set_rekey_data(struct sk_buff *skb, struct genl_info *info)
+ 	if (err)
+ 		return err;
+ 
++	if (!tb[NL80211_REKEY_DATA_REPLAY_CTR] || !tb[NL80211_REKEY_DATA_KEK] ||
++	    !tb[NL80211_REKEY_DATA_KCK])
++		return -EINVAL;
+ 	if (nla_len(tb[NL80211_REKEY_DATA_REPLAY_CTR]) != NL80211_REPLAY_CTR_LEN)
+ 		return -ERANGE;
+ 	if (nla_len(tb[NL80211_REKEY_DATA_KEK]) != NL80211_KEK_LEN)
+diff --git a/security/keys/Kconfig b/security/keys/Kconfig
+index a7a23b5541f8..91eafada3164 100644
+--- a/security/keys/Kconfig
++++ b/security/keys/Kconfig
+@@ -45,10 +45,8 @@ config BIG_KEYS
+ 	bool "Large payload keys"
+ 	depends on KEYS
+ 	depends on TMPFS
+-	depends on (CRYPTO_ANSI_CPRNG = y || CRYPTO_DRBG = y)
+ 	select CRYPTO_AES
+-	select CRYPTO_ECB
+-	select CRYPTO_RNG
++	select CRYPTO_GCM
+ 	help
+ 	  This option provides support for holding large keys within the kernel
+ 	  (for example Kerberos ticket caches).  The data may be stored out to
+diff --git a/security/keys/big_key.c b/security/keys/big_key.c
+index 835c1ab30d01..9c3b16ee1768 100644
+--- a/security/keys/big_key.c
++++ b/security/keys/big_key.c
+@@ -1,5 +1,6 @@
+ /* Large capacity key type
+  *
++ * Copyright (C) 2017 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+  * Copyright (C) 2013 Red Hat, Inc. All Rights Reserved.
+  * Written by David Howells (dhowells@redhat.com)
+  *
+@@ -16,10 +17,10 @@
+ #include <linux/shmem_fs.h>
+ #include <linux/err.h>
+ #include <linux/scatterlist.h>
++#include <linux/random.h>
+ #include <keys/user-type.h>
+ #include <keys/big_key-type.h>
+-#include <crypto/rng.h>
+-#include <crypto/skcipher.h>
++#include <crypto/aead.h>
+ 
+ /*
+  * Layout of key payload words.
+@@ -49,7 +50,12 @@ enum big_key_op {
+ /*
+  * Key size for big_key data encryption
+  */
+-#define ENC_KEY_SIZE	16
++#define ENC_KEY_SIZE 32
++
++/*
++ * Authentication tag length
++ */
++#define ENC_AUTHTAG_SIZE 16
+ 
+ /*
+  * big_key defined keys take an arbitrary string as the description and an
+@@ -64,57 +70,62 @@ struct key_type key_type_big_key = {
+ 	.destroy		= big_key_destroy,
+ 	.describe		= big_key_describe,
+ 	.read			= big_key_read,
++	/* no ->update(); don't add it without changing big_key_crypt() nonce */
+ };
+ 
+ /*
+- * Crypto names for big_key data encryption
++ * Crypto names for big_key data authenticated encryption
+  */
+-static const char big_key_rng_name[] = "stdrng";
+-static const char big_key_alg_name[] = "ecb(aes)";
++static const char big_key_alg_name[] = "gcm(aes)";
+ 
+ /*
+- * Crypto algorithms for big_key data encryption
++ * Crypto algorithms for big_key data authenticated encryption
+  */
+-static struct crypto_rng *big_key_rng;
+-static struct crypto_skcipher *big_key_skcipher;
++static struct crypto_aead *big_key_aead;
+ 
+ /*
+- * Generate random key to encrypt big_key data
++ * Since changing the key affects the entire object, we need a mutex.
+  */
+-static inline int big_key_gen_enckey(u8 *key)
+-{
+-	return crypto_rng_get_bytes(big_key_rng, key, ENC_KEY_SIZE);
+-}
++static DEFINE_MUTEX(big_key_aead_lock);
+ 
+ /*
+  * Encrypt/decrypt big_key data
+  */
+ static int big_key_crypt(enum big_key_op op, u8 *data, size_t datalen, u8 *key)
+ {
+-	int ret = -EINVAL;
++	int ret;
+ 	struct scatterlist sgio;
+-	SKCIPHER_REQUEST_ON_STACK(req, big_key_skcipher);
+-
+-	if (crypto_skcipher_setkey(big_key_skcipher, key, ENC_KEY_SIZE)) {
++	struct aead_request *aead_req;
++	/* We always use a zero nonce. The reason we can get away with this is
++	 * because we're using a different randomly generated key for every
++	 * different encryption. Notably, too, key_type_big_key doesn't define
++	 * an .update function, so there's no chance we'll wind up reusing the
++	 * key to encrypt updated data. Simply put: one key, one encryption.
++	 */
++	u8 zero_nonce[crypto_aead_ivsize(big_key_aead)];
++
++	aead_req = aead_request_alloc(big_key_aead, GFP_KERNEL);
++	if (!aead_req)
++		return -ENOMEM;
++
++	memset(zero_nonce, 0, sizeof(zero_nonce));
++	sg_init_one(&sgio, data, datalen + (op == BIG_KEY_ENC ? ENC_AUTHTAG_SIZE : 0));
++	aead_request_set_crypt(aead_req, &sgio, &sgio, datalen, zero_nonce);
++	aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
++	aead_request_set_ad(aead_req, 0);
++
++	mutex_lock(&big_key_aead_lock);
++	if (crypto_aead_setkey(big_key_aead, key, ENC_KEY_SIZE)) {
+ 		ret = -EAGAIN;
+ 		goto error;
+ 	}
+-
+-	skcipher_request_set_tfm(req, big_key_skcipher);
+-	skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP,
+-				      NULL, NULL);
+-
+-	sg_init_one(&sgio, data, datalen);
+-	skcipher_request_set_crypt(req, &sgio, &sgio, datalen, NULL);
+-
+ 	if (op == BIG_KEY_ENC)
+-		ret = crypto_skcipher_encrypt(req);
++		ret = crypto_aead_encrypt(aead_req);
+ 	else
+-		ret = crypto_skcipher_decrypt(req);
+-
+-	skcipher_request_zero(req);
+-
++		ret = crypto_aead_decrypt(aead_req);
+ error:
++	mutex_unlock(&big_key_aead_lock);
++	aead_request_free(aead_req);
+ 	return ret;
+ }
+ 
+@@ -146,15 +157,13 @@ int big_key_preparse(struct key_preparsed_payload *prep)
+ 		 *
+ 		 * File content is stored encrypted with randomly generated key.
+ 		 */
+-		size_t enclen = ALIGN(datalen, crypto_skcipher_blocksize(big_key_skcipher));
++		size_t enclen = datalen + ENC_AUTHTAG_SIZE;
+ 
+-		/* prepare aligned data to encrypt */
+ 		data = kmalloc(enclen, GFP_KERNEL);
+ 		if (!data)
+ 			return -ENOMEM;
+ 
+ 		memcpy(data, prep->data, datalen);
+-		memset(data + datalen, 0x00, enclen - datalen);
+ 
+ 		/* generate random key */
+ 		enckey = kmalloc(ENC_KEY_SIZE, GFP_KERNEL);
+@@ -162,13 +171,12 @@ int big_key_preparse(struct key_preparsed_payload *prep)
+ 			ret = -ENOMEM;
+ 			goto error;
+ 		}
+-
+-		ret = big_key_gen_enckey(enckey);
+-		if (ret)
++		ret = get_random_bytes_wait(enckey, ENC_KEY_SIZE);
++		if (unlikely(ret))
+ 			goto err_enckey;
+ 
+ 		/* encrypt aligned data */
+-		ret = big_key_crypt(BIG_KEY_ENC, data, enclen, enckey);
++		ret = big_key_crypt(BIG_KEY_ENC, data, datalen, enckey);
+ 		if (ret)
+ 			goto err_enckey;
+ 
+@@ -194,7 +202,7 @@ int big_key_preparse(struct key_preparsed_payload *prep)
+ 		*path = file->f_path;
+ 		path_get(path);
+ 		fput(file);
+-		kfree(data);
++		kzfree(data);
+ 	} else {
+ 		/* Just store the data in a buffer */
+ 		void *data = kmalloc(datalen, GFP_KERNEL);
+@@ -210,9 +218,9 @@ int big_key_preparse(struct key_preparsed_payload *prep)
+ err_fput:
+ 	fput(file);
+ err_enckey:
+-	kfree(enckey);
++	kzfree(enckey);
+ error:
+-	kfree(data);
++	kzfree(data);
+ 	return ret;
+ }
+ 
+@@ -226,7 +234,7 @@ void big_key_free_preparse(struct key_preparsed_payload *prep)
+ 
+ 		path_put(path);
+ 	}
+-	kfree(prep->payload.data[big_key_data]);
++	kzfree(prep->payload.data[big_key_data]);
+ }
+ 
+ /*
+@@ -258,7 +266,7 @@ void big_key_destroy(struct key *key)
+ 		path->mnt = NULL;
+ 		path->dentry = NULL;
+ 	}
+-	kfree(key->payload.data[big_key_data]);
++	kzfree(key->payload.data[big_key_data]);
+ 	key->payload.data[big_key_data] = NULL;
+ }
+ 
+@@ -294,7 +302,7 @@ long big_key_read(const struct key *key, char __user *buffer, size_t buflen)
+ 		struct file *file;
+ 		u8 *data;
+ 		u8 *enckey = (u8 *)key->payload.data[big_key_data];
+-		size_t enclen = ALIGN(datalen, crypto_skcipher_blocksize(big_key_skcipher));
++		size_t enclen = datalen + ENC_AUTHTAG_SIZE;
+ 
+ 		data = kmalloc(enclen, GFP_KERNEL);
+ 		if (!data)
+@@ -326,7 +334,7 @@ long big_key_read(const struct key *key, char __user *buffer, size_t buflen)
+ err_fput:
+ 		fput(file);
+ error:
+-		kfree(data);
++		kzfree(data);
+ 	} else {
+ 		ret = datalen;
+ 		if (copy_to_user(buffer, key->payload.data[big_key_data],
+@@ -342,47 +350,31 @@ long big_key_read(const struct key *key, char __user *buffer, size_t buflen)
+  */
+ static int __init big_key_init(void)
+ {
+-	struct crypto_skcipher *cipher;
+-	struct crypto_rng *rng;
+ 	int ret;
+ 
+-	rng = crypto_alloc_rng(big_key_rng_name, 0, 0);
+-	if (IS_ERR(rng)) {
+-		pr_err("Can't alloc rng: %ld\n", PTR_ERR(rng));
+-		return PTR_ERR(rng);
+-	}
+-
+-	big_key_rng = rng;
+-
+-	/* seed RNG */
+-	ret = crypto_rng_reset(rng, NULL, crypto_rng_seedsize(rng));
+-	if (ret) {
+-		pr_err("Can't reset rng: %d\n", ret);
+-		goto error_rng;
+-	}
+-
+ 	/* init block cipher */
+-	cipher = crypto_alloc_skcipher(big_key_alg_name, 0, CRYPTO_ALG_ASYNC);
+-	if (IS_ERR(cipher)) {
+-		ret = PTR_ERR(cipher);
++	big_key_aead = crypto_alloc_aead(big_key_alg_name, 0, CRYPTO_ALG_ASYNC);
++	if (IS_ERR(big_key_aead)) {
++		ret = PTR_ERR(big_key_aead);
+ 		pr_err("Can't alloc crypto: %d\n", ret);
+-		goto error_rng;
++		return ret;
++	}
++	ret = crypto_aead_setauthsize(big_key_aead, ENC_AUTHTAG_SIZE);
++	if (ret < 0) {
++		pr_err("Can't set crypto auth tag len: %d\n", ret);
++		goto free_aead;
+ 	}
+-
+-	big_key_skcipher = cipher;
+ 
+ 	ret = register_key_type(&key_type_big_key);
+ 	if (ret < 0) {
+ 		pr_err("Can't register type: %d\n", ret);
+-		goto error_cipher;
++		goto free_aead;
+ 	}
+ 
+ 	return 0;
+ 
+-error_cipher:
+-	crypto_free_skcipher(big_key_skcipher);
+-error_rng:
+-	crypto_free_rng(big_key_rng);
++free_aead:
++	crypto_free_aead(big_key_aead);
+ 	return ret;
+ }
+ 
+diff --git a/security/keys/internal.h b/security/keys/internal.h
+index 1c02c6547038..503adbae7b0d 100644
+--- a/security/keys/internal.h
++++ b/security/keys/internal.h
+@@ -141,7 +141,7 @@ extern key_ref_t keyring_search_aux(key_ref_t keyring_ref,
+ extern key_ref_t search_my_process_keyrings(struct keyring_search_context *ctx);
+ extern key_ref_t search_process_keyrings(struct keyring_search_context *ctx);
+ 
+-extern struct key *find_keyring_by_name(const char *name, bool skip_perm_check);
++extern struct key *find_keyring_by_name(const char *name, bool uid_keyring);
+ 
+ extern int install_user_keyrings(void);
+ extern int install_thread_keyring_to_cred(struct cred *);
+diff --git a/security/keys/key.c b/security/keys/key.c
+index 83da68d98b40..e5c0896c3a8f 100644
+--- a/security/keys/key.c
++++ b/security/keys/key.c
+@@ -302,6 +302,8 @@ struct key *key_alloc(struct key_type *type, const char *desc,
+ 		key->flags |= 1 << KEY_FLAG_IN_QUOTA;
+ 	if (flags & KEY_ALLOC_BUILT_IN)
+ 		key->flags |= 1 << KEY_FLAG_BUILTIN;
++	if (flags & KEY_ALLOC_UID_KEYRING)
++		key->flags |= 1 << KEY_FLAG_UID_KEYRING;
+ 
+ #ifdef KEY_DEBUGGING
+ 	key->magic = KEY_DEBUG_MAGIC;
+diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
+index ab0b337c84b4..6a82090c7fc1 100644
+--- a/security/keys/keyctl.c
++++ b/security/keys/keyctl.c
+@@ -766,6 +766,11 @@ long keyctl_read_key(key_serial_t keyid, char __user *buffer, size_t buflen)
+ 
+ 	key = key_ref_to_ptr(key_ref);
+ 
++	if (test_bit(KEY_FLAG_NEGATIVE, &key->flags)) {
++		ret = -ENOKEY;
++		goto error2;
++	}
++
+ 	/* see if we can read it directly */
+ 	ret = key_permission(key_ref, KEY_NEED_READ);
+ 	if (ret == 0)
+diff --git a/security/keys/keyring.c b/security/keys/keyring.c
+index de81793f9920..4fa82a8a9c0e 100644
+--- a/security/keys/keyring.c
++++ b/security/keys/keyring.c
+@@ -423,7 +423,7 @@ static void keyring_describe(const struct key *keyring, struct seq_file *m)
+ }
+ 
+ struct keyring_read_iterator_context {
+-	size_t			qty;
++	size_t			buflen;
+ 	size_t			count;
+ 	key_serial_t __user	*buffer;
+ };
+@@ -435,9 +435,9 @@ static int keyring_read_iterator(const void *object, void *data)
+ 	int ret;
+ 
+ 	kenter("{%s,%d},,{%zu/%zu}",
+-	       key->type->name, key->serial, ctx->count, ctx->qty);
++	       key->type->name, key->serial, ctx->count, ctx->buflen);
+ 
+-	if (ctx->count >= ctx->qty)
++	if (ctx->count >= ctx->buflen)
+ 		return 1;
+ 
+ 	ret = put_user(key->serial, ctx->buffer);
+@@ -472,16 +472,12 @@ static long keyring_read(const struct key *keyring,
+ 		return 0;
+ 
+ 	/* Calculate how much data we could return */
+-	ctx.qty = nr_keys * sizeof(key_serial_t);
+-
+ 	if (!buffer || !buflen)
+-		return ctx.qty;
+-
+-	if (buflen > ctx.qty)
+-		ctx.qty = buflen;
++		return nr_keys * sizeof(key_serial_t);
+ 
+ 	/* Copy the IDs of the subscribed keys into the buffer */
+ 	ctx.buffer = (key_serial_t __user *)buffer;
++	ctx.buflen = buflen;
+ 	ctx.count = 0;
+ 	ret = assoc_array_iterate(&keyring->keys, keyring_read_iterator, &ctx);
+ 	if (ret < 0) {
+@@ -1101,15 +1097,15 @@ key_ref_t find_key_to_update(key_ref_t keyring_ref,
+ /*
+  * Find a keyring with the specified name.
+  *
+- * All named keyrings in the current user namespace are searched, provided they
+- * grant Search permission directly to the caller (unless this check is
+- * skipped).  Keyrings whose usage points have reached zero or who have been
+- * revoked are skipped.
++ * Only keyrings that have nonzero refcount, are not revoked, and are owned by a
++ * user in the current user namespace are considered.  If @uid_keyring is %true,
++ * the keyring additionally must have been allocated as a user or user session
++ * keyring; otherwise, it must grant Search permission directly to the caller.
+  *
+  * Returns a pointer to the keyring with the keyring's refcount having being
+  * incremented on success.  -ENOKEY is returned if a key could not be found.
+  */
+-struct key *find_keyring_by_name(const char *name, bool skip_perm_check)
++struct key *find_keyring_by_name(const char *name, bool uid_keyring)
+ {
+ 	struct key *keyring;
+ 	int bucket;
+@@ -1137,10 +1133,15 @@ struct key *find_keyring_by_name(const char *name, bool skip_perm_check)
+ 			if (strcmp(keyring->description, name) != 0)
+ 				continue;
+ 
+-			if (!skip_perm_check &&
+-			    key_permission(make_key_ref(keyring, 0),
+-					   KEY_NEED_SEARCH) < 0)
+-				continue;
++			if (uid_keyring) {
++				if (!test_bit(KEY_FLAG_UID_KEYRING,
++					      &keyring->flags))
++					continue;
++			} else {
++				if (key_permission(make_key_ref(keyring, 0),
++						   KEY_NEED_SEARCH) < 0)
++					continue;
++			}
+ 
+ 			/* we've got a match but we might end up racing with
+ 			 * key_cleanup() if the keyring is currently 'dead'
+diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
+index 86bced9fdbdf..293d3598153b 100644
+--- a/security/keys/process_keys.c
++++ b/security/keys/process_keys.c
+@@ -77,7 +77,8 @@ int install_user_keyrings(void)
+ 		if (IS_ERR(uid_keyring)) {
+ 			uid_keyring = keyring_alloc(buf, user->uid, INVALID_GID,
+ 						    cred, user_keyring_perm,
+-						    KEY_ALLOC_IN_QUOTA,
++						    KEY_ALLOC_UID_KEYRING |
++							KEY_ALLOC_IN_QUOTA,
+ 						    NULL, NULL);
+ 			if (IS_ERR(uid_keyring)) {
+ 				ret = PTR_ERR(uid_keyring);
+@@ -94,7 +95,8 @@ int install_user_keyrings(void)
+ 			session_keyring =
+ 				keyring_alloc(buf, user->uid, INVALID_GID,
+ 					      cred, user_keyring_perm,
+-					      KEY_ALLOC_IN_QUOTA,
++					      KEY_ALLOC_UID_KEYRING |
++						  KEY_ALLOC_IN_QUOTA,
+ 					      NULL, NULL);
+ 			if (IS_ERR(session_keyring)) {
+ 				ret = PTR_ERR(session_keyring);
+diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c
+index 73f5ea6778ce..9380c3fc7cfe 100644
+--- a/tools/testing/selftests/seccomp/seccomp_bpf.c
++++ b/tools/testing/selftests/seccomp/seccomp_bpf.c
+@@ -6,10 +6,18 @@
+  */
+ 
+ #include <sys/types.h>
+-#include <asm/siginfo.h>
+-#define __have_siginfo_t 1
+-#define __have_sigval_t 1
+-#define __have_sigevent_t 1
++
++/*
++ * glibc 2.26 and later have SIGSYS in siginfo_t. Before that,
++ * we need to use the kernel's siginfo.h file and trick glibc
++ * into accepting it.
++ */
++#if !__GLIBC_PREREQ(2, 26)
++# include <asm/siginfo.h>
++# define __have_siginfo_t 1
++# define __have_sigval_t 1
++# define __have_sigevent_t 1
++#endif
+ 
+ #include <errno.h>
+ #include <linux/filter.h>
+@@ -676,7 +684,7 @@ TEST_F_SIGNAL(TRAP, ign, SIGSYS)
+ 	syscall(__NR_getpid);
+ }
+ 
+-static struct siginfo TRAP_info;
++static siginfo_t TRAP_info;
+ static volatile int TRAP_nr;
+ static void TRAP_action(int nr, siginfo_t *info, void *void_context)
+ {


^ permalink raw reply related	[flat|nested] 20+ messages in thread
* [gentoo-commits] proj/linux-patches:4.13 commit in: /
@ 2017-09-27 16:44 Mike Pagano
  0 siblings, 0 replies; 20+ messages in thread
From: Mike Pagano @ 2017-09-27 16:44 UTC (permalink / raw
  To: gentoo-commits

commit:     6480b3fa1da27836df3f2d330135e093e74e79a0
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Sep 27 16:44:36 2017 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Sep 27 16:44:36 2017 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=6480b3fa

Linux patch 4.13.4

 0000_README             |    4 +
 1003_linux-4.13.4.patch | 5143 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 5147 insertions(+)

diff --git a/0000_README b/0000_README
index 70f03ff..aee01b0 100644
--- a/0000_README
+++ b/0000_README
@@ -55,6 +55,10 @@ Patch:  1002_linux-4.13.3.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.13.3
 
+Patch:  1003_linux-4.13.4.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.13.4
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1003_linux-4.13.4.patch b/1003_linux-4.13.4.patch
new file mode 100644
index 0000000..b118fe3
--- /dev/null
+++ b/1003_linux-4.13.4.patch
@@ -0,0 +1,5143 @@
+diff --git a/Documentation/dev-tools/gdb-kernel-debugging.rst b/Documentation/dev-tools/gdb-kernel-debugging.rst
+index 5e93c9bc6619..19df79286f00 100644
+--- a/Documentation/dev-tools/gdb-kernel-debugging.rst
++++ b/Documentation/dev-tools/gdb-kernel-debugging.rst
+@@ -31,11 +31,13 @@ Setup
+   CONFIG_DEBUG_INFO_REDUCED off. If your architecture supports
+   CONFIG_FRAME_POINTER, keep it enabled.
+ 
+-- Install that kernel on the guest.
++- Install that kernel on the guest, turn off KASLR if necessary by adding
++  "nokaslr" to the kernel command line.
+   Alternatively, QEMU allows to boot the kernel directly using -kernel,
+   -append, -initrd command line switches. This is generally only useful if
+   you do not depend on modules. See QEMU documentation for more details on
+-  this mode.
++  this mode. In this case, you should build the kernel with
++  CONFIG_RANDOMIZE_BASE disabled if the architecture supports KASLR.
+ 
+ - Enable the gdb stub of QEMU/KVM, either
+ 
+diff --git a/Makefile b/Makefile
+index 0f31ef4aea7b..159901979dec 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 13
+-SUBLEVEL = 3
++SUBLEVEL = 4
+ EXTRAVERSION =
+ NAME = Fearless Coyote
+ 
+diff --git a/arch/arc/kernel/entry.S b/arch/arc/kernel/entry.S
+index 1eea99beecc3..85d9ea4a0acc 100644
+--- a/arch/arc/kernel/entry.S
++++ b/arch/arc/kernel/entry.S
+@@ -92,6 +92,12 @@ ENTRY(EV_MachineCheck)
+ 	lr  r0, [efa]
+ 	mov r1, sp
+ 
++	; hardware auto-disables MMU, re-enable it to allow kernel vaddr
++	; access for say stack unwinding of modules for crash dumps
++	lr	r3, [ARC_REG_PID]
++	or	r3, r3, MMU_ENABLE
++	sr	r3, [ARC_REG_PID]
++
+ 	lsr  	r3, r2, 8
+ 	bmsk 	r3, r3, 7
+ 	brne    r3, ECR_C_MCHK_DUP_TLB, 1f
+diff --git a/arch/arc/mm/tlb.c b/arch/arc/mm/tlb.c
+index b181f3ee38aa..ac81502055f8 100644
+--- a/arch/arc/mm/tlb.c
++++ b/arch/arc/mm/tlb.c
+@@ -908,9 +908,6 @@ void do_tlb_overlap_fault(unsigned long cause, unsigned long address,
+ 
+ 	local_irq_save(flags);
+ 
+-	/* re-enable the MMU */
+-	write_aux_reg(ARC_REG_PID, MMU_ENABLE | read_aux_reg(ARC_REG_PID));
+-
+ 	/* loop thru all sets of TLB */
+ 	for (set = 0; set < mmu->sets; set++) {
+ 
+diff --git a/arch/mips/math-emu/dp_fmax.c b/arch/mips/math-emu/dp_fmax.c
+index fd71b8daaaf2..5bec64f2884e 100644
+--- a/arch/mips/math-emu/dp_fmax.c
++++ b/arch/mips/math-emu/dp_fmax.c
+@@ -47,14 +47,26 @@ union ieee754dp ieee754dp_fmax(union ieee754dp x, union ieee754dp y)
+ 	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
+ 		return ieee754dp_nanxcpt(x);
+ 
+-	/* numbers are preferred to NaNs */
++	/*
++	 * Quiet NaN handling
++	 */
++
++	/*
++	 *    The case of both inputs quiet NaNs
++	 */
++	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
++		return x;
++
++	/*
++	 *    The cases of exactly one input quiet NaN (numbers
++	 *    are here preferred as returned values to NaNs)
++	 */
+ 	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
+ 	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
+ 	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
+ 	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
+ 		return x;
+ 
+-	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
+ 	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
+ 	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
+ 	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
+@@ -80,9 +92,7 @@ union ieee754dp ieee754dp_fmax(union ieee754dp x, union ieee754dp y)
+ 		return ys ? x : y;
+ 
+ 	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
+-		if (xs == ys)
+-			return x;
+-		return ieee754dp_zero(1);
++		return ieee754dp_zero(xs & ys);
+ 
+ 	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
+ 		DPDNORMX;
+@@ -106,16 +116,32 @@ union ieee754dp ieee754dp_fmax(union ieee754dp x, union ieee754dp y)
+ 	else if (xs < ys)
+ 		return x;
+ 
+-	/* Compare exponent */
+-	if (xe > ye)
+-		return x;
+-	else if (xe < ye)
+-		return y;
++	/* Signs of inputs are equal, let's compare exponents */
++	if (xs == 0) {
++		/* Inputs are both positive */
++		if (xe > ye)
++			return x;
++		else if (xe < ye)
++			return y;
++	} else {
++		/* Inputs are both negative */
++		if (xe > ye)
++			return y;
++		else if (xe < ye)
++			return x;
++	}
+ 
+-	/* Compare mantissa */
++	/* Signs and exponents of inputs are equal, let's compare mantissas */
++	if (xs == 0) {
++		/* Inputs are both positive, with equal signs and exponents */
++		if (xm <= ym)
++			return y;
++		return x;
++	}
++	/* Inputs are both negative, with equal signs and exponents */
+ 	if (xm <= ym)
+-		return y;
+-	return x;
++		return x;
++	return y;
+ }
+ 
+ union ieee754dp ieee754dp_fmaxa(union ieee754dp x, union ieee754dp y)
+@@ -147,14 +173,26 @@ union ieee754dp ieee754dp_fmaxa(union ieee754dp x, union ieee754dp y)
+ 	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
+ 		return ieee754dp_nanxcpt(x);
+ 
+-	/* numbers are preferred to NaNs */
++	/*
++	 * Quiet NaN handling
++	 */
++
++	/*
++	 *    The case of both inputs quiet NaNs
++	 */
++	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
++		return x;
++
++	/*
++	 *    The cases of exactly one input quiet NaN (numbers
++	 *    are here preferred as returned values to NaNs)
++	 */
+ 	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
+ 	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
+ 	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
+ 	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
+ 		return x;
+ 
+-	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
+ 	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
+ 	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
+ 	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
+@@ -164,6 +202,9 @@ union ieee754dp ieee754dp_fmaxa(union ieee754dp x, union ieee754dp y)
+ 	/*
+ 	 * Infinity and zero handling
+ 	 */
++	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
++		return ieee754dp_inf(xs & ys);
++
+ 	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
+ 	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
+ 	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
+@@ -171,7 +212,6 @@ union ieee754dp ieee754dp_fmaxa(union ieee754dp x, union ieee754dp y)
+ 	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
+ 		return x;
+ 
+-	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
+ 	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
+ 	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
+ 	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
+@@ -180,9 +220,7 @@ union ieee754dp ieee754dp_fmaxa(union ieee754dp x, union ieee754dp y)
+ 		return y;
+ 
+ 	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
+-		if (xs == ys)
+-			return x;
+-		return ieee754dp_zero(1);
++		return ieee754dp_zero(xs & ys);
+ 
+ 	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
+ 		DPDNORMX;
+@@ -207,7 +245,11 @@ union ieee754dp ieee754dp_fmaxa(union ieee754dp x, union ieee754dp y)
+ 		return y;
+ 
+ 	/* Compare mantissa */
+-	if (xm <= ym)
++	if (xm < ym)
+ 		return y;
+-	return x;
++	else if (xm > ym)
++		return x;
++	else if (xs == 0)
++		return x;
++	return y;
+ }
+diff --git a/arch/mips/math-emu/dp_fmin.c b/arch/mips/math-emu/dp_fmin.c
+index c1072b0dfb95..a287b23818d8 100644
+--- a/arch/mips/math-emu/dp_fmin.c
++++ b/arch/mips/math-emu/dp_fmin.c
+@@ -47,14 +47,26 @@ union ieee754dp ieee754dp_fmin(union ieee754dp x, union ieee754dp y)
+ 	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
+ 		return ieee754dp_nanxcpt(x);
+ 
+-	/* numbers are preferred to NaNs */
++	/*
++	 * Quiet NaN handling
++	 */
++
++	/*
++	 *    The case of both inputs quiet NaNs
++	 */
++	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
++		return x;
++
++	/*
++	 *    The cases of exactly one input quiet NaN (numbers
++	 *    are here preferred as returned values to NaNs)
++	 */
+ 	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
+ 	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
+ 	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
+ 	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
+ 		return x;
+ 
+-	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
+ 	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
+ 	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
+ 	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
+@@ -80,9 +92,7 @@ union ieee754dp ieee754dp_fmin(union ieee754dp x, union ieee754dp y)
+ 		return ys ? y : x;
+ 
+ 	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
+-		if (xs == ys)
+-			return x;
+-		return ieee754dp_zero(1);
++		return ieee754dp_zero(xs | ys);
+ 
+ 	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
+ 		DPDNORMX;
+@@ -106,16 +116,32 @@ union ieee754dp ieee754dp_fmin(union ieee754dp x, union ieee754dp y)
+ 	else if (xs < ys)
+ 		return y;
+ 
+-	/* Compare exponent */
+-	if (xe > ye)
+-		return y;
+-	else if (xe < ye)
+-		return x;
++	/* Signs of inputs are the same, let's compare exponents */
++	if (xs == 0) {
++		/* Inputs are both positive */
++		if (xe > ye)
++			return y;
++		else if (xe < ye)
++			return x;
++	} else {
++		/* Inputs are both negative */
++		if (xe > ye)
++			return x;
++		else if (xe < ye)
++			return y;
++	}
+ 
+-	/* Compare mantissa */
++	/* Signs and exponents of inputs are equal, let's compare mantissas */
++	if (xs == 0) {
++		/* Inputs are both positive, with equal signs and exponents */
++		if (xm <= ym)
++			return x;
++		return y;
++	}
++	/* Inputs are both negative, with equal signs and exponents */
+ 	if (xm <= ym)
+-		return x;
+-	return y;
++		return y;
++	return x;
+ }
+ 
+ union ieee754dp ieee754dp_fmina(union ieee754dp x, union ieee754dp y)
+@@ -147,14 +173,26 @@ union ieee754dp ieee754dp_fmina(union ieee754dp x, union ieee754dp y)
+ 	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
+ 		return ieee754dp_nanxcpt(x);
+ 
+-	/* numbers are preferred to NaNs */
++	/*
++	 * Quiet NaN handling
++	 */
++
++	/*
++	 *    The case of both inputs quiet NaNs
++	 */
++	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
++		return x;
++
++	/*
++	 *    The cases of exactly one input quiet NaN (numbers
++	 *    are here preferred as returned values to NaNs)
++	 */
+ 	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
+ 	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
+ 	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
+ 	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
+ 		return x;
+ 
+-	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
+ 	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
+ 	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
+ 	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
+@@ -164,25 +202,25 @@ union ieee754dp ieee754dp_fmina(union ieee754dp x, union ieee754dp y)
+ 	/*
+ 	 * Infinity and zero handling
+ 	 */
++	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
++		return ieee754dp_inf(xs | ys);
++
+ 	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
+ 	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
+ 	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
+ 	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO):
+ 	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
+-		return x;
++		return y;
+ 
+-	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
+ 	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
+ 	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
+ 	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
+ 	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM):
+ 	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM):
+-		return y;
++		return x;
+ 
+ 	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
+-		if (xs == ys)
+-			return x;
+-		return ieee754dp_zero(1);
++		return ieee754dp_zero(xs | ys);
+ 
+ 	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
+ 		DPDNORMX;
+@@ -207,7 +245,11 @@ union ieee754dp ieee754dp_fmina(union ieee754dp x, union ieee754dp y)
+ 		return x;
+ 
+ 	/* Compare mantissa */
+-	if (xm <= ym)
++	if (xm < ym)
++		return x;
++	else if (xm > ym)
++		return y;
++	else if (xs == 1)
+ 		return x;
+ 	return y;
+ }
+diff --git a/arch/mips/math-emu/dp_maddf.c b/arch/mips/math-emu/dp_maddf.c
+index caa62f20a888..e0d9be5fbf4c 100644
+--- a/arch/mips/math-emu/dp_maddf.c
++++ b/arch/mips/math-emu/dp_maddf.c
+@@ -14,22 +14,45 @@
+ 
+ #include "ieee754dp.h"
+ 
+-enum maddf_flags {
+-	maddf_negate_product	= 1 << 0,
+-};
++
++/* 128 bits shift right logical with rounding. */
++void srl128(u64 *hptr, u64 *lptr, int count)
++{
++	u64 low;
++
++	if (count >= 128) {
++		*lptr = *hptr != 0 || *lptr != 0;
++		*hptr = 0;
++	} else if (count >= 64) {
++		if (count == 64) {
++			*lptr = *hptr | (*lptr != 0);
++		} else {
++			low = *lptr;
++			*lptr = *hptr >> (count - 64);
++			*lptr |= (*hptr << (128 - count)) != 0 || low != 0;
++		}
++		*hptr = 0;
++	} else {
++		low = *lptr;
++		*lptr = low >> count | *hptr << (64 - count);
++		*lptr |= (low << (64 - count)) != 0;
++		*hptr = *hptr >> count;
++	}
++}
+ 
+ static union ieee754dp _dp_maddf(union ieee754dp z, union ieee754dp x,
+ 				 union ieee754dp y, enum maddf_flags flags)
+ {
+ 	int re;
+ 	int rs;
+-	u64 rm;
+ 	unsigned lxm;
+ 	unsigned hxm;
+ 	unsigned lym;
+ 	unsigned hym;
+ 	u64 lrm;
+ 	u64 hrm;
++	u64 lzm;
++	u64 hzm;
+ 	u64 t;
+ 	u64 at;
+ 	int s;
+@@ -48,52 +71,34 @@ static union ieee754dp _dp_maddf(union ieee754dp z, union ieee754dp x,
+ 
+ 	ieee754_clearcx();
+ 
+-	switch (zc) {
+-	case IEEE754_CLASS_SNAN:
+-		ieee754_setcx(IEEE754_INVALID_OPERATION);
++	/*
++	 * Handle the cases when at least one of x, y or z is a NaN.
++	 * Order of precedence is sNaN, qNaN and z, x, y.
++	 */
++	if (zc == IEEE754_CLASS_SNAN)
+ 		return ieee754dp_nanxcpt(z);
+-	case IEEE754_CLASS_DNORM:
+-		DPDNORMZ;
+-	/* QNAN and ZERO cases are handled separately below */
+-	}
+-
+-	switch (CLPAIR(xc, yc)) {
+-	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_SNAN):
+-	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_SNAN):
+-	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_SNAN):
+-	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_SNAN):
+-	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_SNAN):
+-		return ieee754dp_nanxcpt(y);
+-
+-	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN):
+-	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN):
+-	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_ZERO):
+-	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM):
+-	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM):
+-	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
++	if (xc == IEEE754_CLASS_SNAN)
+ 		return ieee754dp_nanxcpt(x);
+-
+-	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
+-	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
+-	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
+-	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
++	if (yc == IEEE754_CLASS_SNAN)
++		return ieee754dp_nanxcpt(y);
++	if (zc == IEEE754_CLASS_QNAN)
++		return z;
++	if (xc == IEEE754_CLASS_QNAN)
++		return x;
++	if (yc == IEEE754_CLASS_QNAN)
+ 		return y;
+ 
+-	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
+-	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
+-	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
+-	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
+-	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_INF):
+-		return x;
++	if (zc == IEEE754_CLASS_DNORM)
++		DPDNORMZ;
++	/* ZERO z cases are handled separately below */
+ 
++	switch (CLPAIR(xc, yc)) {
+ 
+ 	/*
+ 	 * Infinity handling
+ 	 */
+ 	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
+ 	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
+-		if (zc == IEEE754_CLASS_QNAN)
+-			return z;
+ 		ieee754_setcx(IEEE754_INVALID_OPERATION);
+ 		return ieee754dp_indef();
+ 
+@@ -102,9 +107,27 @@ static union ieee754dp _dp_maddf(union ieee754dp z, union ieee754dp x,
+ 	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
+ 	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
+ 	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
+-		if (zc == IEEE754_CLASS_QNAN)
+-			return z;
+-		return ieee754dp_inf(xs ^ ys);
++		if ((zc == IEEE754_CLASS_INF) &&
++		    ((!(flags & MADDF_NEGATE_PRODUCT) && (zs != (xs ^ ys))) ||
++		     ((flags & MADDF_NEGATE_PRODUCT) && (zs == (xs ^ ys))))) {
++			/*
++			 * Cases of addition of infinities with opposite signs
++			 * or subtraction of infinities with same signs.
++			 */
++			ieee754_setcx(IEEE754_INVALID_OPERATION);
++			return ieee754dp_indef();
++		}
++		/*
++		 * z is here either not an infinity, or an infinity having the
++		 * same sign as product (x*y) (in case of MADDF.D instruction)
++		 * or product -(x*y) (in MSUBF.D case). The result must be an
++		 * infinity, and its sign is determined only by the value of
++		 * (flags & MADDF_NEGATE_PRODUCT) and the signs of x and y.
++		 */
++		if (flags & MADDF_NEGATE_PRODUCT)
++			return ieee754dp_inf(1 ^ (xs ^ ys));
++		else
++			return ieee754dp_inf(xs ^ ys);
+ 
+ 	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
+ 	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM):
+@@ -113,32 +136,42 @@ static union ieee754dp _dp_maddf(union ieee754dp z, union ieee754dp x,
+ 	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
+ 		if (zc == IEEE754_CLASS_INF)
+ 			return ieee754dp_inf(zs);
+-		/* Multiplication is 0 so just return z */
++		if (zc == IEEE754_CLASS_ZERO) {
++			/* Handle cases +0 + (-0) and similar ones. */
++			if ((!(flags & MADDF_NEGATE_PRODUCT)
++					&& (zs == (xs ^ ys))) ||
++			    ((flags & MADDF_NEGATE_PRODUCT)
++					&& (zs != (xs ^ ys))))
++				/*
++				 * Cases of addition of zeros of equal signs
++				 * or subtraction of zeroes of opposite signs.
++				 * The sign of the resulting zero is in any
++				 * such case determined only by the sign of z.
++				 */
++				return z;
++
++			return ieee754dp_zero(ieee754_csr.rm == FPU_CSR_RD);
++		}
++		/* x*y is here 0, and z is not 0, so just return z */
+ 		return z;
+ 
+ 	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
+ 		DPDNORMX;
+ 
+ 	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_DNORM):
+-		if (zc == IEEE754_CLASS_QNAN)
+-			return z;
+-		else if (zc == IEEE754_CLASS_INF)
++		if (zc == IEEE754_CLASS_INF)
+ 			return ieee754dp_inf(zs);
+ 		DPDNORMY;
+ 		break;
+ 
+ 	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_NORM):
+-		if (zc == IEEE754_CLASS_QNAN)
+-			return z;
+-		else if (zc == IEEE754_CLASS_INF)
++		if (zc == IEEE754_CLASS_INF)
+ 			return ieee754dp_inf(zs);
+ 		DPDNORMX;
+ 		break;
+ 
+ 	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_NORM):
+-		if (zc == IEEE754_CLASS_QNAN)
+-			return z;
+-		else if (zc == IEEE754_CLASS_INF)
++		if (zc == IEEE754_CLASS_INF)
+ 			return ieee754dp_inf(zs);
+ 		/* fall through to real computations */
+ 	}
+@@ -157,7 +190,7 @@ static union ieee754dp _dp_maddf(union ieee754dp z, union ieee754dp x,
+ 
+ 	re = xe + ye;
+ 	rs = xs ^ ys;
+-	if (flags & maddf_negate_product)
++	if (flags & MADDF_NEGATE_PRODUCT)
+ 		rs ^= 1;
+ 
+ 	/* shunt to top of word */
+@@ -165,7 +198,7 @@ static union ieee754dp _dp_maddf(union ieee754dp z, union ieee754dp x,
+ 	ym <<= 64 - (DP_FBITS + 1);
+ 
+ 	/*
+-	 * Multiply 64 bits xm, ym to give high 64 bits rm with stickness.
++	 * Multiply 64 bits xm and ym to give 128 bits result in hrm:lrm.
+ 	 */
+ 
+ 	/* 32 * 32 => 64 */
+@@ -195,81 +228,110 @@ static union ieee754dp _dp_maddf(union ieee754dp z, union ieee754dp x,
+ 
+ 	hrm = hrm + (t >> 32);
+ 
+-	rm = hrm | (lrm != 0);
+-
+-	/*
+-	 * Sticky shift down to normal rounding precision.
+-	 */
+-	if ((s64) rm < 0) {
+-		rm = (rm >> (64 - (DP_FBITS + 1 + 3))) |
+-		     ((rm << (DP_FBITS + 1 + 3)) != 0);
++	/* Put explicit bit at bit 126 if necessary */
++	if ((int64_t)hrm < 0) {
++		lrm = (hrm << 63) | (lrm >> 1);
++		hrm = hrm >> 1;
+ 		re++;
+-	} else {
+-		rm = (rm >> (64 - (DP_FBITS + 1 + 3 + 1))) |
+-		     ((rm << (DP_FBITS + 1 + 3 + 1)) != 0);
+ 	}
+-	assert(rm & (DP_HIDDEN_BIT << 3));
+ 
+-	if (zc == IEEE754_CLASS_ZERO)
+-		return ieee754dp_format(rs, re, rm);
++	assert(hrm & (1 << 62));
+ 
+-	/* And now the addition */
+-	assert(zm & DP_HIDDEN_BIT);
++	if (zc == IEEE754_CLASS_ZERO) {
++		/*
++		 * Move explicit bit from bit 126 to bit 55 since the
++		 * ieee754dp_format code expects the mantissa to be
++		 * 56 bits wide (53 + 3 rounding bits).
++		 */
++		srl128(&hrm, &lrm, (126 - 55));
++		return ieee754dp_format(rs, re, lrm);
++	}
+ 
+-	/*
+-	 * Provide guard,round and stick bit space.
+-	 */
+-	zm <<= 3;
++	/* Move explicit bit from bit 52 to bit 126 */
++	lzm = 0;
++	hzm = zm << 10;
++	assert(hzm & (1 << 62));
+ 
++	/* Make the exponents the same */
+ 	if (ze > re) {
+ 		/*
+ 		 * Have to shift y fraction right to align.
+ 		 */
+ 		s = ze - re;
+-		rm = XDPSRS(rm, s);
++		srl128(&hrm, &lrm, s);
+ 		re += s;
+ 	} else if (re > ze) {
+ 		/*
+ 		 * Have to shift x fraction right to align.
+ 		 */
+ 		s = re - ze;
+-		zm = XDPSRS(zm, s);
++		srl128(&hzm, &lzm, s);
+ 		ze += s;
+ 	}
+ 	assert(ze == re);
+ 	assert(ze <= DP_EMAX);
+ 
++	/* Do the addition */
+ 	if (zs == rs) {
+ 		/*
+-		 * Generate 28 bit result of adding two 27 bit numbers
+-		 * leaving result in xm, xs and xe.
++		 * Generate 128 bit result by adding two 127 bit numbers
++		 * leaving result in hzm:lzm, zs and ze.
+ 		 */
+-		zm = zm + rm;
+-
+-		if (zm >> (DP_FBITS + 1 + 3)) { /* carry out */
+-			zm = XDPSRS1(zm);
++		hzm = hzm + hrm + (lzm > (lzm + lrm));
++		lzm = lzm + lrm;
++		if ((int64_t)hzm < 0) {        /* carry out */
++			srl128(&hzm, &lzm, 1);
+ 			ze++;
+ 		}
+ 	} else {
+-		if (zm >= rm) {
+-			zm = zm - rm;
++		if (hzm > hrm || (hzm == hrm && lzm >= lrm)) {
++			hzm = hzm - hrm - (lzm < lrm);
++			lzm = lzm - lrm;
+ 		} else {
+-			zm = rm - zm;
++			hzm = hrm - hzm - (lrm < lzm);
++			lzm = lrm - lzm;
+ 			zs = rs;
+ 		}
+-		if (zm == 0)
++		if (lzm == 0 && hzm == 0)
+ 			return ieee754dp_zero(ieee754_csr.rm == FPU_CSR_RD);
+ 
+ 		/*
+-		 * Normalize to rounding precision.
++		 * Put explicit bit at bit 126 if necessary.
+ 		 */
+-		while ((zm >> (DP_FBITS + 3)) == 0) {
+-			zm <<= 1;
+-			ze--;
++		if (hzm == 0) {
++			/* left shift by 63 or 64 bits */
++			if ((int64_t)lzm < 0) {
++				/* MSB of lzm is the explicit bit */
++				hzm = lzm >> 1;
++				lzm = lzm << 63;
++				ze -= 63;
++			} else {
++				hzm = lzm;
++				lzm = 0;
++				ze -= 64;
++			}
++		}
++
++		t = 0;
++		while ((hzm >> (62 - t)) == 0)
++			t++;
++
++		assert(t <= 62);
++		if (t) {
++			hzm = hzm << t | lzm >> (64 - t);
++			lzm = lzm << t;
++			ze -= t;
+ 		}
+ 	}
+ 
+-	return ieee754dp_format(zs, ze, zm);
++	/*
++	 * Move explicit bit from bit 126 to bit 55 since the
++	 * ieee754dp_format code expects the mantissa to be
++	 * 56 bits wide (53 + 3 rounding bits).
++	 */
++	srl128(&hzm, &lzm, (126 - 55));
++
++	return ieee754dp_format(zs, ze, lzm);
+ }
+ 
+ union ieee754dp ieee754dp_maddf(union ieee754dp z, union ieee754dp x,
+@@ -281,5 +343,5 @@ union ieee754dp ieee754dp_maddf(union ieee754dp z, union ieee754dp x,
+ union ieee754dp ieee754dp_msubf(union ieee754dp z, union ieee754dp x,
+ 				union ieee754dp y)
+ {
+-	return _dp_maddf(z, x, y, maddf_negate_product);
++	return _dp_maddf(z, x, y, MADDF_NEGATE_PRODUCT);
+ }
+diff --git a/arch/mips/math-emu/ieee754int.h b/arch/mips/math-emu/ieee754int.h
+index 8bc2f6963324..dd2071f430e0 100644
+--- a/arch/mips/math-emu/ieee754int.h
++++ b/arch/mips/math-emu/ieee754int.h
+@@ -26,6 +26,10 @@
+ 
+ #define CLPAIR(x, y)	((x)*6+(y))
+ 
++enum maddf_flags {
++	MADDF_NEGATE_PRODUCT	= 1 << 0,
++};
++
+ static inline void ieee754_clearcx(void)
+ {
+ 	ieee754_csr.cx = 0;
+diff --git a/arch/mips/math-emu/ieee754sp.h b/arch/mips/math-emu/ieee754sp.h
+index 8476067075fe..0f63e4202cff 100644
+--- a/arch/mips/math-emu/ieee754sp.h
++++ b/arch/mips/math-emu/ieee754sp.h
+@@ -45,6 +45,10 @@ static inline int ieee754sp_finite(union ieee754sp x)
+ 	return SPBEXP(x) != SP_EMAX + 1 + SP_EBIAS;
+ }
+ 
++/* 64 bit right shift with rounding */
++#define XSPSRS64(v, rs)						\
++	(((rs) >= 64) ? ((v) != 0) : ((v) >> (rs)) | ((v) << (64-(rs)) != 0))
++
+ /* 3bit extended single precision sticky right shift */
+ #define XSPSRS(v, rs)						\
+ 	((rs > (SP_FBITS+3))?1:((v) >> (rs)) | ((v) << (32-(rs)) != 0))
+diff --git a/arch/mips/math-emu/sp_fmax.c b/arch/mips/math-emu/sp_fmax.c
+index 4d000844e48e..74a5a00d2f22 100644
+--- a/arch/mips/math-emu/sp_fmax.c
++++ b/arch/mips/math-emu/sp_fmax.c
+@@ -47,14 +47,26 @@ union ieee754sp ieee754sp_fmax(union ieee754sp x, union ieee754sp y)
+ 	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
+ 		return ieee754sp_nanxcpt(x);
+ 
+-	/* numbers are preferred to NaNs */
++	/*
++	 * Quiet NaN handling
++	 */
++
++	/*
++	 *    The case of both inputs quiet NaNs
++	 */
++	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
++		return x;
++
++	/*
++	 *    The cases of exactly one input quiet NaN (numbers
++	 *    are here preferred as returned values to NaNs)
++	 */
+ 	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
+ 	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
+ 	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
+ 	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
+ 		return x;
+ 
+-	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
+ 	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
+ 	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
+ 	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
+@@ -80,9 +92,7 @@ union ieee754sp ieee754sp_fmax(union ieee754sp x, union ieee754sp y)
+ 		return ys ? x : y;
+ 
+ 	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
+-		if (xs == ys)
+-			return x;
+-		return ieee754sp_zero(1);
++		return ieee754sp_zero(xs & ys);
+ 
+ 	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
+ 		SPDNORMX;
+@@ -106,16 +116,32 @@ union ieee754sp ieee754sp_fmax(union ieee754sp x, union ieee754sp y)
+ 	else if (xs < ys)
+ 		return x;
+ 
+-	/* Compare exponent */
+-	if (xe > ye)
+-		return x;
+-	else if (xe < ye)
+-		return y;
++	/* Signs of inputs are equal, let's compare exponents */
++	if (xs == 0) {
++		/* Inputs are both positive */
++		if (xe > ye)
++			return x;
++		else if (xe < ye)
++			return y;
++	} else {
++		/* Inputs are both negative */
++		if (xe > ye)
++			return y;
++		else if (xe < ye)
++			return x;
++	}
+ 
+-	/* Compare mantissa */
++	/* Signs and exponents of inputs are equal, let's compare mantissas */
++	if (xs == 0) {
++		/* Inputs are both positive, with equal signs and exponents */
++		if (xm <= ym)
++			return y;
++		return x;
++	}
++	/* Inputs are both negative, with equal signs and exponents */
+ 	if (xm <= ym)
+-		return y;
+-	return x;
++		return x;
++	return y;
+ }
+ 
+ union ieee754sp ieee754sp_fmaxa(union ieee754sp x, union ieee754sp y)
+@@ -147,14 +173,26 @@ union ieee754sp ieee754sp_fmaxa(union ieee754sp x, union ieee754sp y)
+ 	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
+ 		return ieee754sp_nanxcpt(x);
+ 
+-	/* numbers are preferred to NaNs */
++	/*
++	 * Quiet NaN handling
++	 */
++
++	/*
++	 *    The case of both inputs quiet NaNs
++	 */
++	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
++		return x;
++
++	/*
++	 *    The cases of exactly one input quiet NaN (numbers
++	 *    are here preferred as returned values to NaNs)
++	 */
+ 	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
+ 	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
+ 	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
+ 	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
+ 		return x;
+ 
+-	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
+ 	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
+ 	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
+ 	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
+@@ -164,6 +202,9 @@ union ieee754sp ieee754sp_fmaxa(union ieee754sp x, union ieee754sp y)
+ 	/*
+ 	 * Infinity and zero handling
+ 	 */
++	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
++		return ieee754sp_inf(xs & ys);
++
+ 	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
+ 	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
+ 	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
+@@ -171,7 +212,6 @@ union ieee754sp ieee754sp_fmaxa(union ieee754sp x, union ieee754sp y)
+ 	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
+ 		return x;
+ 
+-	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
+ 	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
+ 	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
+ 	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
+@@ -180,9 +220,7 @@ union ieee754sp ieee754sp_fmaxa(union ieee754sp x, union ieee754sp y)
+ 		return y;
+ 
+ 	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
+-		if (xs == ys)
+-			return x;
+-		return ieee754sp_zero(1);
++		return ieee754sp_zero(xs & ys);
+ 
+ 	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
+ 		SPDNORMX;
+@@ -207,7 +245,11 @@ union ieee754sp ieee754sp_fmaxa(union ieee754sp x, union ieee754sp y)
+ 		return y;
+ 
+ 	/* Compare mantissa */
+-	if (xm <= ym)
++	if (xm < ym)
+ 		return y;
+-	return x;
++	else if (xm > ym)
++		return x;
++	else if (xs == 0)
++		return x;
++	return y;
+ }
+diff --git a/arch/mips/math-emu/sp_fmin.c b/arch/mips/math-emu/sp_fmin.c
+index 4eb1bb9e9dec..c51385f46b09 100644
+--- a/arch/mips/math-emu/sp_fmin.c
++++ b/arch/mips/math-emu/sp_fmin.c
+@@ -47,14 +47,26 @@ union ieee754sp ieee754sp_fmin(union ieee754sp x, union ieee754sp y)
+ 	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
+ 		return ieee754sp_nanxcpt(x);
+ 
+-	/* numbers are preferred to NaNs */
++	/*
++	 * Quiet NaN handling
++	 */
++
++	/*
++	 *    The case of both inputs quiet NaNs
++	 */
++	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
++		return x;
++
++	/*
++	 *    The cases of exactly one input quiet NaN (numbers
++	 *    are here preferred as returned values to NaNs)
++	 */
+ 	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
+ 	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
+ 	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
+ 	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
+ 		return x;
+ 
+-	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
+ 	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
+ 	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
+ 	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
+@@ -80,9 +92,7 @@ union ieee754sp ieee754sp_fmin(union ieee754sp x, union ieee754sp y)
+ 		return ys ? y : x;
+ 
+ 	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
+-		if (xs == ys)
+-			return x;
+-		return ieee754sp_zero(1);
++		return ieee754sp_zero(xs | ys);
+ 
+ 	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
+ 		SPDNORMX;
+@@ -106,16 +116,32 @@ union ieee754sp ieee754sp_fmin(union ieee754sp x, union ieee754sp y)
+ 	else if (xs < ys)
+ 		return y;
+ 
+-	/* Compare exponent */
+-	if (xe > ye)
+-		return y;
+-	else if (xe < ye)
+-		return x;
++	/* Signs of inputs are the same, let's compare exponents */
++	if (xs == 0) {
++		/* Inputs are both positive */
++		if (xe > ye)
++			return y;
++		else if (xe < ye)
++			return x;
++	} else {
++		/* Inputs are both negative */
++		if (xe > ye)
++			return x;
++		else if (xe < ye)
++			return y;
++	}
+ 
+-	/* Compare mantissa */
++	/* Signs and exponents of inputs are equal, let's compare mantissas */
++	if (xs == 0) {
++		/* Inputs are both positive, with equal signs and exponents */
++		if (xm <= ym)
++			return x;
++		return y;
++	}
++	/* Inputs are both negative, with equal signs and exponents */
+ 	if (xm <= ym)
+-		return x;
+-	return y;
++		return y;
++	return x;
+ }
+ 
+ union ieee754sp ieee754sp_fmina(union ieee754sp x, union ieee754sp y)
+@@ -147,14 +173,26 @@ union ieee754sp ieee754sp_fmina(union ieee754sp x, union ieee754sp y)
+ 	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
+ 		return ieee754sp_nanxcpt(x);
+ 
+-	/* numbers are preferred to NaNs */
++	/*
++	 * Quiet NaN handling
++	 */
++
++	/*
++	 *    The case of both inputs quiet NaNs
++	 */
++	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
++		return x;
++
++	/*
++	 *    The cases of exactly one input quiet NaN (numbers
++	 *    are here preferred as returned values to NaNs)
++	 */
+ 	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
+ 	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
+ 	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
+ 	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
+ 		return x;
+ 
+-	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
+ 	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
+ 	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
+ 	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
+@@ -164,25 +202,25 @@ union ieee754sp ieee754sp_fmina(union ieee754sp x, union ieee754sp y)
+ 	/*
+ 	 * Infinity and zero handling
+ 	 */
++	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
++		return ieee754sp_inf(xs | ys);
++
+ 	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
+ 	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
+ 	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
+ 	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO):
+ 	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
+-		return x;
++		return y;
+ 
+-	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
+ 	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
+ 	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
+ 	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
+ 	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM):
+ 	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM):
+-		return y;
++		return x;
+ 
+ 	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
+-		if (xs == ys)
+-			return x;
+-		return ieee754sp_zero(1);
++		return ieee754sp_zero(xs | ys);
+ 
+ 	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
+ 		SPDNORMX;
+@@ -207,7 +245,11 @@ union ieee754sp ieee754sp_fmina(union ieee754sp x, union ieee754sp y)
+ 		return x;
+ 
+ 	/* Compare mantissa */
+-	if (xm <= ym)
++	if (xm < ym)
++		return x;
++	else if (xm > ym)
++		return y;
++	else if (xs == 1)
+ 		return x;
+ 	return y;
+ }
+diff --git a/arch/mips/math-emu/sp_maddf.c b/arch/mips/math-emu/sp_maddf.c
+index c91d5e5d9b5f..7195fe785d81 100644
+--- a/arch/mips/math-emu/sp_maddf.c
++++ b/arch/mips/math-emu/sp_maddf.c
+@@ -14,9 +14,6 @@
+ 
+ #include "ieee754sp.h"
+ 
+-enum maddf_flags {
+-	maddf_negate_product	= 1 << 0,
+-};
+ 
+ static union ieee754sp _sp_maddf(union ieee754sp z, union ieee754sp x,
+ 				 union ieee754sp y, enum maddf_flags flags)
+@@ -24,14 +21,8 @@ static union ieee754sp _sp_maddf(union ieee754sp z, union ieee754sp x,
+ 	int re;
+ 	int rs;
+ 	unsigned rm;
+-	unsigned short lxm;
+-	unsigned short hxm;
+-	unsigned short lym;
+-	unsigned short hym;
+-	unsigned lrm;
+-	unsigned hrm;
+-	unsigned t;
+-	unsigned at;
++	uint64_t rm64;
++	uint64_t zm64;
+ 	int s;
+ 
+ 	COMPXSP;
+@@ -48,51 +39,35 @@ static union ieee754sp _sp_maddf(union ieee754sp z, union ieee754sp x,
+ 
+ 	ieee754_clearcx();
+ 
+-	switch (zc) {
+-	case IEEE754_CLASS_SNAN:
+-		ieee754_setcx(IEEE754_INVALID_OPERATION);
++	/*
++	 * Handle the cases when at least one of x, y or z is a NaN.
++	 * Order of precedence is sNaN, qNaN and z, x, y.
++	 */
++	if (zc == IEEE754_CLASS_SNAN)
+ 		return ieee754sp_nanxcpt(z);
+-	case IEEE754_CLASS_DNORM:
+-		SPDNORMZ;
+-	/* QNAN and ZERO cases are handled separately below */
+-	}
+-
+-	switch (CLPAIR(xc, yc)) {
+-	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_SNAN):
+-	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_SNAN):
+-	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_SNAN):
+-	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_SNAN):
+-	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_SNAN):
++	if (xc == IEEE754_CLASS_SNAN)
++		return ieee754sp_nanxcpt(x);
++	if (yc == IEEE754_CLASS_SNAN)
+ 		return ieee754sp_nanxcpt(y);
++	if (zc == IEEE754_CLASS_QNAN)
++		return z;
++	if (xc == IEEE754_CLASS_QNAN)
++		return x;
++	if (yc == IEEE754_CLASS_QNAN)
++		return y;
+ 
+-	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN):
+-	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN):
+-	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_ZERO):
+-	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM):
+-	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM):
+-	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
+-		return ieee754sp_nanxcpt(x);
++	if (zc == IEEE754_CLASS_DNORM)
++		SPDNORMZ;
++	/* ZERO z cases are handled separately below */
+ 
+-	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
+-	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
+-	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
+-	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
+-		return y;
++	switch (CLPAIR(xc, yc)) {
+ 
+-	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
+-	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
+-	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
+-	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
+-	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_INF):
+-		return x;
+ 
+ 	/*
+ 	 * Infinity handling
+ 	 */
+ 	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
+ 	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
+-		if (zc == IEEE754_CLASS_QNAN)
+-			return z;
+ 		ieee754_setcx(IEEE754_INVALID_OPERATION);
+ 		return ieee754sp_indef();
+ 
+@@ -101,9 +76,27 @@ static union ieee754sp _sp_maddf(union ieee754sp z, union ieee754sp x,
+ 	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
+ 	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
+ 	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
+-		if (zc == IEEE754_CLASS_QNAN)
+-			return z;
+-		return ieee754sp_inf(xs ^ ys);
++		if ((zc == IEEE754_CLASS_INF) &&
++		    ((!(flags & MADDF_NEGATE_PRODUCT) && (zs != (xs ^ ys))) ||
++		     ((flags & MADDF_NEGATE_PRODUCT) && (zs == (xs ^ ys))))) {
++			/*
++			 * Cases of addition of infinities with opposite signs
++			 * or subtraction of infinities with same signs.
++			 */
++			ieee754_setcx(IEEE754_INVALID_OPERATION);
++			return ieee754sp_indef();
++		}
++		/*
++		 * z is here either not an infinity, or an infinity having the
++		 * same sign as product (x*y) (in case of MADDF.D instruction)
++		 * or product -(x*y) (in MSUBF.D case). The result must be an
++		 * infinity, and its sign is determined only by the value of
++		 * (flags & MADDF_NEGATE_PRODUCT) and the signs of x and y.
++		 */
++		if (flags & MADDF_NEGATE_PRODUCT)
++			return ieee754sp_inf(1 ^ (xs ^ ys));
++		else
++			return ieee754sp_inf(xs ^ ys);
+ 
+ 	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
+ 	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM):
+@@ -112,32 +105,42 @@ static union ieee754sp _sp_maddf(union ieee754sp z, union ieee754sp x,
+ 	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
+ 		if (zc == IEEE754_CLASS_INF)
+ 			return ieee754sp_inf(zs);
+-		/* Multiplication is 0 so just return z */
++		if (zc == IEEE754_CLASS_ZERO) {
++			/* Handle cases +0 + (-0) and similar ones. */
++			if ((!(flags & MADDF_NEGATE_PRODUCT)
++					&& (zs == (xs ^ ys))) ||
++			    ((flags & MADDF_NEGATE_PRODUCT)
++					&& (zs != (xs ^ ys))))
++				/*
++				 * Cases of addition of zeros of equal signs
++				 * or subtraction of zeroes of opposite signs.
++				 * The sign of the resulting zero is in any
++				 * such case determined only by the sign of z.
++				 */
++				return z;
++
++			return ieee754sp_zero(ieee754_csr.rm == FPU_CSR_RD);
++		}
++		/* x*y is here 0, and z is not 0, so just return z */
+ 		return z;
+ 
+ 	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
+ 		SPDNORMX;
+ 
+ 	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_DNORM):
+-		if (zc == IEEE754_CLASS_QNAN)
+-			return z;
+-		else if (zc == IEEE754_CLASS_INF)
++		if (zc == IEEE754_CLASS_INF)
+ 			return ieee754sp_inf(zs);
+ 		SPDNORMY;
+ 		break;
+ 
+ 	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_NORM):
+-		if (zc == IEEE754_CLASS_QNAN)
+-			return z;
+-		else if (zc == IEEE754_CLASS_INF)
++		if (zc == IEEE754_CLASS_INF)
+ 			return ieee754sp_inf(zs);
+ 		SPDNORMX;
+ 		break;
+ 
+ 	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_NORM):
+-		if (zc == IEEE754_CLASS_QNAN)
+-			return z;
+-		else if (zc == IEEE754_CLASS_INF)
++		if (zc == IEEE754_CLASS_INF)
+ 			return ieee754sp_inf(zs);
+ 		/* fall through to real computations */
+ 	}
+@@ -158,111 +161,93 @@ static union ieee754sp _sp_maddf(union ieee754sp z, union ieee754sp x,
+ 
+ 	re = xe + ye;
+ 	rs = xs ^ ys;
+-	if (flags & maddf_negate_product)
++	if (flags & MADDF_NEGATE_PRODUCT)
+ 		rs ^= 1;
+ 
+-	/* shunt to top of word */
+-	xm <<= 32 - (SP_FBITS + 1);
+-	ym <<= 32 - (SP_FBITS + 1);
+-
+-	/*
+-	 * Multiply 32 bits xm, ym to give high 32 bits rm with stickness.
+-	 */
+-	lxm = xm & 0xffff;
+-	hxm = xm >> 16;
+-	lym = ym & 0xffff;
+-	hym = ym >> 16;
+-
+-	lrm = lxm * lym;	/* 16 * 16 => 32 */
+-	hrm = hxm * hym;	/* 16 * 16 => 32 */
+-
+-	t = lxm * hym; /* 16 * 16 => 32 */
+-	at = lrm + (t << 16);
+-	hrm += at < lrm;
+-	lrm = at;
+-	hrm = hrm + (t >> 16);
++	/* Multiple 24 bit xm and ym to give 48 bit results */
++	rm64 = (uint64_t)xm * ym;
+ 
+-	t = hxm * lym; /* 16 * 16 => 32 */
+-	at = lrm + (t << 16);
+-	hrm += at < lrm;
+-	lrm = at;
+-	hrm = hrm + (t >> 16);
++	/* Shunt to top of word */
++	rm64 = rm64 << 16;
+ 
+-	rm = hrm | (lrm != 0);
+-
+-	/*
+-	 * Sticky shift down to normal rounding precision.
+-	 */
+-	if ((int) rm < 0) {
+-		rm = (rm >> (32 - (SP_FBITS + 1 + 3))) |
+-		    ((rm << (SP_FBITS + 1 + 3)) != 0);
++	/* Put explicit bit at bit 62 if necessary */
++	if ((int64_t) rm64 < 0) {
++		rm64 = rm64 >> 1;
+ 		re++;
+-	} else {
+-		rm = (rm >> (32 - (SP_FBITS + 1 + 3 + 1))) |
+-		     ((rm << (SP_FBITS + 1 + 3 + 1)) != 0);
+ 	}
+-	assert(rm & (SP_HIDDEN_BIT << 3));
+-
+-	if (zc == IEEE754_CLASS_ZERO)
+-		return ieee754sp_format(rs, re, rm);
+ 
+-	/* And now the addition */
++	assert(rm64 & (1 << 62));
+ 
+-	assert(zm & SP_HIDDEN_BIT);
++	if (zc == IEEE754_CLASS_ZERO) {
++		/*
++		 * Move explicit bit from bit 62 to bit 26 since the
++		 * ieee754sp_format code expects the mantissa to be
++		 * 27 bits wide (24 + 3 rounding bits).
++		 */
++		rm = XSPSRS64(rm64, (62 - 26));
++		return ieee754sp_format(rs, re, rm);
++	}
+ 
+-	/*
+-	 * Provide guard,round and stick bit space.
+-	 */
+-	zm <<= 3;
++	/* Move explicit bit from bit 23 to bit 62 */
++	zm64 = (uint64_t)zm << (62 - 23);
++	assert(zm64 & (1 << 62));
+ 
++	/* Make the exponents the same */
+ 	if (ze > re) {
+ 		/*
+ 		 * Have to shift r fraction right to align.
+ 		 */
+ 		s = ze - re;
+-		rm = XSPSRS(rm, s);
++		rm64 = XSPSRS64(rm64, s);
+ 		re += s;
+ 	} else if (re > ze) {
+ 		/*
+ 		 * Have to shift z fraction right to align.
+ 		 */
+ 		s = re - ze;
+-		zm = XSPSRS(zm, s);
++		zm64 = XSPSRS64(zm64, s);
+ 		ze += s;
+ 	}
+ 	assert(ze == re);
+ 	assert(ze <= SP_EMAX);
+ 
++	/* Do the addition */
+ 	if (zs == rs) {
+ 		/*
+-		 * Generate 28 bit result of adding two 27 bit numbers
+-		 * leaving result in zm, zs and ze.
++		 * Generate 64 bit result by adding two 63 bit numbers
++		 * leaving result in zm64, zs and ze.
+ 		 */
+-		zm = zm + rm;
+-
+-		if (zm >> (SP_FBITS + 1 + 3)) { /* carry out */
+-			zm = XSPSRS1(zm);
++		zm64 = zm64 + rm64;
++		if ((int64_t)zm64 < 0) {	/* carry out */
++			zm64 = XSPSRS1(zm64);
+ 			ze++;
+ 		}
+ 	} else {
+-		if (zm >= rm) {
+-			zm = zm - rm;
++		if (zm64 >= rm64) {
++			zm64 = zm64 - rm64;
+ 		} else {
+-			zm = rm - zm;
++			zm64 = rm64 - zm64;
+ 			zs = rs;
+ 		}
+-		if (zm == 0)
++		if (zm64 == 0)
+ 			return ieee754sp_zero(ieee754_csr.rm == FPU_CSR_RD);
+ 
+ 		/*
+-		 * Normalize in extended single precision
++		 * Put explicit bit at bit 62 if necessary.
+ 		 */
+-		while ((zm >> (SP_MBITS + 3)) == 0) {
+-			zm <<= 1;
++		while ((zm64 >> 62) == 0) {
++			zm64 <<= 1;
+ 			ze--;
+ 		}
+-
+ 	}
++
++	/*
++	 * Move explicit bit from bit 62 to bit 26 since the
++	 * ieee754sp_format code expects the mantissa to be
++	 * 27 bits wide (24 + 3 rounding bits).
++	 */
++	zm = XSPSRS64(zm64, (62 - 26));
++
+ 	return ieee754sp_format(zs, ze, zm);
+ }
+ 
+@@ -275,5 +260,5 @@ union ieee754sp ieee754sp_maddf(union ieee754sp z, union ieee754sp x,
+ union ieee754sp ieee754sp_msubf(union ieee754sp z, union ieee754sp x,
+ 				union ieee754sp y)
+ {
+-	return _sp_maddf(z, x, y, maddf_negate_product);
++	return _sp_maddf(z, x, y, MADDF_NEGATE_PRODUCT);
+ }
+diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c
+index ec7a8b099dd9..fd3c1fcc73eb 100644
+--- a/arch/powerpc/kernel/align.c
++++ b/arch/powerpc/kernel/align.c
+@@ -235,6 +235,28 @@ static int emulate_dcbz(struct pt_regs *regs, unsigned char __user *addr)
+ 
+ #define SWIZ_PTR(p)		((unsigned char __user *)((p) ^ swiz))
+ 
++#define __get_user_or_set_dar(_regs, _dest, _addr)		\
++	({							\
++		int rc = 0;					\
++		typeof(_addr) __addr = (_addr);			\
++		if (__get_user_inatomic(_dest, __addr)) {	\
++			_regs->dar = (unsigned long)__addr;	\
++			rc = -EFAULT;				\
++		}						\
++		rc;						\
++	})
++
++#define __put_user_or_set_dar(_regs, _src, _addr)		\
++	({							\
++		int rc = 0;					\
++		typeof(_addr) __addr = (_addr);			\
++		if (__put_user_inatomic(_src, __addr)) {	\
++			_regs->dar = (unsigned long)__addr;	\
++			rc = -EFAULT;				\
++		}						\
++		rc;						\
++	})
++
+ static int emulate_multiple(struct pt_regs *regs, unsigned char __user *addr,
+ 			    unsigned int reg, unsigned int nb,
+ 			    unsigned int flags, unsigned int instr,
+@@ -263,9 +285,10 @@ static int emulate_multiple(struct pt_regs *regs, unsigned char __user *addr,
+ 		} else {
+ 			unsigned long pc = regs->nip ^ (swiz & 4);
+ 
+-			if (__get_user_inatomic(instr,
+-						(unsigned int __user *)pc))
++			if (__get_user_or_set_dar(regs, instr,
++						  (unsigned int __user *)pc))
+ 				return -EFAULT;
++
+ 			if (swiz == 0 && (flags & SW))
+ 				instr = cpu_to_le32(instr);
+ 			nb = (instr >> 11) & 0x1f;
+@@ -309,31 +332,31 @@ static int emulate_multiple(struct pt_regs *regs, unsigned char __user *addr,
+ 			       ((nb0 + 3) / 4) * sizeof(unsigned long));
+ 
+ 		for (i = 0; i < nb; ++i, ++p)
+-			if (__get_user_inatomic(REG_BYTE(rptr, i ^ bswiz),
+-						SWIZ_PTR(p)))
++			if (__get_user_or_set_dar(regs, REG_BYTE(rptr, i ^ bswiz),
++						  SWIZ_PTR(p)))
+ 				return -EFAULT;
+ 		if (nb0 > 0) {
+ 			rptr = &regs->gpr[0];
+ 			addr += nb;
+ 			for (i = 0; i < nb0; ++i, ++p)
+-				if (__get_user_inatomic(REG_BYTE(rptr,
+-								 i ^ bswiz),
+-							SWIZ_PTR(p)))
++				if (__get_user_or_set_dar(regs,
++							  REG_BYTE(rptr, i ^ bswiz),
++							  SWIZ_PTR(p)))
+ 					return -EFAULT;
+ 		}
+ 
+ 	} else {
+ 		for (i = 0; i < nb; ++i, ++p)
+-			if (__put_user_inatomic(REG_BYTE(rptr, i ^ bswiz),
+-						SWIZ_PTR(p)))
++			if (__put_user_or_set_dar(regs, REG_BYTE(rptr, i ^ bswiz),
++						  SWIZ_PTR(p)))
+ 				return -EFAULT;
+ 		if (nb0 > 0) {
+ 			rptr = &regs->gpr[0];
+ 			addr += nb;
+ 			for (i = 0; i < nb0; ++i, ++p)
+-				if (__put_user_inatomic(REG_BYTE(rptr,
+-								 i ^ bswiz),
+-							SWIZ_PTR(p)))
++				if (__put_user_or_set_dar(regs,
++							  REG_BYTE(rptr, i ^ bswiz),
++							  SWIZ_PTR(p)))
+ 					return -EFAULT;
+ 		}
+ 	}
+@@ -345,29 +368,32 @@ static int emulate_multiple(struct pt_regs *regs, unsigned char __user *addr,
+  * Only POWER6 has these instructions, and it does true little-endian,
+  * so we don't need the address swizzling.
+  */
+-static int emulate_fp_pair(unsigned char __user *addr, unsigned int reg,
+-			   unsigned int flags)
++static int emulate_fp_pair(struct pt_regs *regs, unsigned char __user *addr,
++			   unsigned int reg, unsigned int flags)
+ {
+ 	char *ptr0 = (char *) &current->thread.TS_FPR(reg);
+ 	char *ptr1 = (char *) &current->thread.TS_FPR(reg+1);
+-	int i, ret, sw = 0;
++	int i, sw = 0;
+ 
+ 	if (reg & 1)
+ 		return 0;	/* invalid form: FRS/FRT must be even */
+ 	if (flags & SW)
+ 		sw = 7;
+-	ret = 0;
++
+ 	for (i = 0; i < 8; ++i) {
+ 		if (!(flags & ST)) {
+-			ret |= __get_user(ptr0[i^sw], addr + i);
+-			ret |= __get_user(ptr1[i^sw], addr + i + 8);
++			if (__get_user_or_set_dar(regs, ptr0[i^sw], addr + i))
++				return -EFAULT;
++			if (__get_user_or_set_dar(regs, ptr1[i^sw], addr + i + 8))
++				return -EFAULT;
+ 		} else {
+-			ret |= __put_user(ptr0[i^sw], addr + i);
+-			ret |= __put_user(ptr1[i^sw], addr + i + 8);
++			if (__put_user_or_set_dar(regs, ptr0[i^sw], addr + i))
++				return -EFAULT;
++			if (__put_user_or_set_dar(regs, ptr1[i^sw], addr + i + 8))
++				return -EFAULT;
+ 		}
+ 	}
+-	if (ret)
+-		return -EFAULT;
++
+ 	return 1;	/* exception handled and fixed up */
+ }
+ 
+@@ -377,24 +403,27 @@ static int emulate_lq_stq(struct pt_regs *regs, unsigned char __user *addr,
+ {
+ 	char *ptr0 = (char *)&regs->gpr[reg];
+ 	char *ptr1 = (char *)&regs->gpr[reg+1];
+-	int i, ret, sw = 0;
++	int i, sw = 0;
+ 
+ 	if (reg & 1)
+ 		return 0;	/* invalid form: GPR must be even */
+ 	if (flags & SW)
+ 		sw = 7;
+-	ret = 0;
++
+ 	for (i = 0; i < 8; ++i) {
+ 		if (!(flags & ST)) {
+-			ret |= __get_user(ptr0[i^sw], addr + i);
+-			ret |= __get_user(ptr1[i^sw], addr + i + 8);
++			if (__get_user_or_set_dar(regs, ptr0[i^sw], addr + i))
++				return -EFAULT;
++			if (__get_user_or_set_dar(regs, ptr1[i^sw], addr + i + 8))
++				return -EFAULT;
+ 		} else {
+-			ret |= __put_user(ptr0[i^sw], addr + i);
+-			ret |= __put_user(ptr1[i^sw], addr + i + 8);
++			if (__put_user_or_set_dar(regs, ptr0[i^sw], addr + i))
++				return -EFAULT;
++			if (__put_user_or_set_dar(regs, ptr1[i^sw], addr + i + 8))
++				return -EFAULT;
+ 		}
+ 	}
+-	if (ret)
+-		return -EFAULT;
++
+ 	return 1;	/* exception handled and fixed up */
+ }
+ #endif /* CONFIG_PPC64 */
+@@ -687,9 +716,14 @@ static int emulate_vsx(unsigned char __user *addr, unsigned int reg,
+ 	for (j = 0; j < length; j += elsize) {
+ 		for (i = 0; i < elsize; ++i) {
+ 			if (flags & ST)
+-				ret |= __put_user(ptr[i^sw], addr + i);
++				ret = __put_user_or_set_dar(regs, ptr[i^sw],
++							    addr + i);
+ 			else
+-				ret |= __get_user(ptr[i^sw], addr + i);
++				ret = __get_user_or_set_dar(regs, ptr[i^sw],
++							    addr + i);
++
++			if (ret)
++				return ret;
+ 		}
+ 		ptr  += elsize;
+ #ifdef __LITTLE_ENDIAN__
+@@ -739,7 +773,7 @@ int fix_alignment(struct pt_regs *regs)
+ 	unsigned int dsisr;
+ 	unsigned char __user *addr;
+ 	unsigned long p, swiz;
+-	int ret, i;
++	int i;
+ 	union data {
+ 		u64 ll;
+ 		double dd;
+@@ -936,7 +970,7 @@ int fix_alignment(struct pt_regs *regs)
+ 		if (flags & F) {
+ 			/* Special case for 16-byte FP loads and stores */
+ 			PPC_WARN_ALIGNMENT(fp_pair, regs);
+-			return emulate_fp_pair(addr, reg, flags);
++			return emulate_fp_pair(regs, addr, reg, flags);
+ 		} else {
+ #ifdef CONFIG_PPC64
+ 			/* Special case for 16-byte loads and stores */
+@@ -966,15 +1000,12 @@ int fix_alignment(struct pt_regs *regs)
+ 		}
+ 
+ 		data.ll = 0;
+-		ret = 0;
+ 		p = (unsigned long)addr;
+ 
+ 		for (i = 0; i < nb; i++)
+-			ret |= __get_user_inatomic(data.v[start + i],
+-						   SWIZ_PTR(p++));
+-
+-		if (unlikely(ret))
+-			return -EFAULT;
++			if (__get_user_or_set_dar(regs, data.v[start + i],
++						  SWIZ_PTR(p++)))
++				return -EFAULT;
+ 
+ 	} else if (flags & F) {
+ 		data.ll = current->thread.TS_FPR(reg);
+@@ -1046,15 +1077,13 @@ int fix_alignment(struct pt_regs *regs)
+ 			break;
+ 		}
+ 
+-		ret = 0;
+ 		p = (unsigned long)addr;
+ 
+ 		for (i = 0; i < nb; i++)
+-			ret |= __put_user_inatomic(data.v[start + i],
+-						   SWIZ_PTR(p++));
++			if (__put_user_or_set_dar(regs, data.v[start + i],
++						  SWIZ_PTR(p++)))
++				return -EFAULT;
+ 
+-		if (unlikely(ret))
+-			return -EFAULT;
+ 	} else if (flags & F)
+ 		current->thread.TS_FPR(reg) = data.ll;
+ 	else
+diff --git a/arch/powerpc/platforms/powernv/npu-dma.c b/arch/powerpc/platforms/powernv/npu-dma.c
+index 4c7b8591f737..2cb6cbea4b3b 100644
+--- a/arch/powerpc/platforms/powernv/npu-dma.c
++++ b/arch/powerpc/platforms/powernv/npu-dma.c
+@@ -545,6 +545,12 @@ static void mmio_invalidate(struct npu_context *npu_context, int va,
+ 	struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS];
+ 	unsigned long pid = npu_context->mm->context.id;
+ 
++	/*
++	 * Unfortunately the nest mmu does not support flushing specific
++	 * addresses so we have to flush the whole mm.
++	 */
++	flush_tlb_mm(npu_context->mm);
++
+ 	/*
+ 	 * Loop over all the NPUs this process is active on and launch
+ 	 * an invalidate.
+@@ -576,12 +582,6 @@ static void mmio_invalidate(struct npu_context *npu_context, int va,
+ 		}
+ 	}
+ 
+-	/*
+-	 * Unfortunately the nest mmu does not support flushing specific
+-	 * addresses so we have to flush the whole mm.
+-	 */
+-	flush_tlb_mm(npu_context->mm);
+-
+ 	mmio_invalidate_wait(mmio_atsd_reg, flush);
+ 	if (flush)
+ 		/* Wait for the flush to complete */
+diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
+index ca9b2f4aaa22..bf2f43f7ac6a 100644
+--- a/arch/powerpc/platforms/pseries/hotplug-memory.c
++++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
+@@ -817,6 +817,9 @@ static int dlpar_memory_add_by_count(u32 lmbs_to_add, struct property *prop)
+ 		return -EINVAL;
+ 
+ 	for (i = 0; i < num_lmbs && lmbs_to_add != lmbs_added; i++) {
++		if (lmbs[i].flags & DRCONF_MEM_ASSIGNED)
++			continue;
++
+ 		rc = dlpar_acquire_drc(lmbs[i].drc_index);
+ 		if (rc)
+ 			continue;
+@@ -859,6 +862,7 @@ static int dlpar_memory_add_by_count(u32 lmbs_to_add, struct property *prop)
+ 				lmbs[i].base_addr, lmbs[i].drc_index);
+ 			lmbs[i].reserved = 0;
+ 		}
++		rc = 0;
+ 	}
+ 
+ 	return rc;
+diff --git a/arch/s390/include/asm/mmu.h b/arch/s390/include/asm/mmu.h
+index bd6f30304518..3525fe6e7e4c 100644
+--- a/arch/s390/include/asm/mmu.h
++++ b/arch/s390/include/asm/mmu.h
+@@ -5,6 +5,7 @@
+ #include <linux/errno.h>
+ 
+ typedef struct {
++	spinlock_t lock;
+ 	cpumask_t cpu_attach_mask;
+ 	atomic_t flush_count;
+ 	unsigned int flush_mm;
+@@ -27,6 +28,7 @@ typedef struct {
+ } mm_context_t;
+ 
+ #define INIT_MM_CONTEXT(name)						   \
++	.context.lock =	__SPIN_LOCK_UNLOCKED(name.context.lock),	   \
+ 	.context.pgtable_lock =						   \
+ 			__SPIN_LOCK_UNLOCKED(name.context.pgtable_lock),   \
+ 	.context.pgtable_list = LIST_HEAD_INIT(name.context.pgtable_list), \
+diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
+index 24bc41622a98..ebfb2f248ae9 100644
+--- a/arch/s390/include/asm/mmu_context.h
++++ b/arch/s390/include/asm/mmu_context.h
+@@ -16,6 +16,7 @@
+ static inline int init_new_context(struct task_struct *tsk,
+ 				   struct mm_struct *mm)
+ {
++	spin_lock_init(&mm->context.lock);
+ 	spin_lock_init(&mm->context.pgtable_lock);
+ 	INIT_LIST_HEAD(&mm->context.pgtable_list);
+ 	spin_lock_init(&mm->context.gmap_lock);
+@@ -102,7 +103,6 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+ 	if (prev == next)
+ 		return;
+ 	cpumask_set_cpu(cpu, &next->context.cpu_attach_mask);
+-	cpumask_set_cpu(cpu, mm_cpumask(next));
+ 	/* Clear old ASCE by loading the kernel ASCE. */
+ 	__ctl_load(S390_lowcore.kernel_asce, 1, 1);
+ 	__ctl_load(S390_lowcore.kernel_asce, 7, 7);
+@@ -120,9 +120,8 @@ static inline void finish_arch_post_lock_switch(void)
+ 		preempt_disable();
+ 		while (atomic_read(&mm->context.flush_count))
+ 			cpu_relax();
+-
+-		if (mm->context.flush_mm)
+-			__tlb_flush_mm(mm);
++		cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
++		__tlb_flush_mm_lazy(mm);
+ 		preempt_enable();
+ 	}
+ 	set_fs(current->thread.mm_segment);
+@@ -135,6 +134,7 @@ static inline void activate_mm(struct mm_struct *prev,
+                                struct mm_struct *next)
+ {
+ 	switch_mm(prev, next, current);
++	cpumask_set_cpu(smp_processor_id(), mm_cpumask(next));
+ 	set_user_asce(next);
+ }
+ 
+diff --git a/arch/s390/include/asm/tlbflush.h b/arch/s390/include/asm/tlbflush.h
+index 39846100682a..eed927aeb08f 100644
+--- a/arch/s390/include/asm/tlbflush.h
++++ b/arch/s390/include/asm/tlbflush.h
+@@ -43,23 +43,6 @@ static inline void __tlb_flush_global(void)
+  * Flush TLB entries for a specific mm on all CPUs (in case gmap is used
+  * this implicates multiple ASCEs!).
+  */
+-static inline void __tlb_flush_full(struct mm_struct *mm)
+-{
+-	preempt_disable();
+-	atomic_inc(&mm->context.flush_count);
+-	if (cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) {
+-		/* Local TLB flush */
+-		__tlb_flush_local();
+-	} else {
+-		/* Global TLB flush */
+-		__tlb_flush_global();
+-		/* Reset TLB flush mask */
+-		cpumask_copy(mm_cpumask(mm), &mm->context.cpu_attach_mask);
+-	}
+-	atomic_dec(&mm->context.flush_count);
+-	preempt_enable();
+-}
+-
+ static inline void __tlb_flush_mm(struct mm_struct *mm)
+ {
+ 	unsigned long gmap_asce;
+@@ -71,16 +54,18 @@ static inline void __tlb_flush_mm(struct mm_struct *mm)
+ 	 */
+ 	preempt_disable();
+ 	atomic_inc(&mm->context.flush_count);
++	/* Reset TLB flush mask */
++	cpumask_copy(mm_cpumask(mm), &mm->context.cpu_attach_mask);
++	barrier();
+ 	gmap_asce = READ_ONCE(mm->context.gmap_asce);
+ 	if (MACHINE_HAS_IDTE && gmap_asce != -1UL) {
+ 		if (gmap_asce)
+ 			__tlb_flush_idte(gmap_asce);
+ 		__tlb_flush_idte(mm->context.asce);
+ 	} else {
+-		__tlb_flush_full(mm);
++		/* Global TLB flush */
++		__tlb_flush_global();
+ 	}
+-	/* Reset TLB flush mask */
+-	cpumask_copy(mm_cpumask(mm), &mm->context.cpu_attach_mask);
+ 	atomic_dec(&mm->context.flush_count);
+ 	preempt_enable();
+ }
+@@ -94,7 +79,6 @@ static inline void __tlb_flush_kernel(void)
+ }
+ #else
+ #define __tlb_flush_global()	__tlb_flush_local()
+-#define __tlb_flush_full(mm)	__tlb_flush_local()
+ 
+ /*
+  * Flush TLB entries for a specific ASCE on all CPUs.
+@@ -112,10 +96,12 @@ static inline void __tlb_flush_kernel(void)
+ 
+ static inline void __tlb_flush_mm_lazy(struct mm_struct * mm)
+ {
++	spin_lock(&mm->context.lock);
+ 	if (mm->context.flush_mm) {
+-		__tlb_flush_mm(mm);
+ 		mm->context.flush_mm = 0;
++		__tlb_flush_mm(mm);
+ 	}
++	spin_unlock(&mm->context.lock);
+ }
+ 
+ /*
+diff --git a/block/blk-core.c b/block/blk-core.c
+index dbecbf4a64e0..658f67309602 100644
+--- a/block/blk-core.c
++++ b/block/blk-core.c
+@@ -280,7 +280,7 @@ EXPORT_SYMBOL(blk_start_queue_async);
+ void blk_start_queue(struct request_queue *q)
+ {
+ 	lockdep_assert_held(q->queue_lock);
+-	WARN_ON(!irqs_disabled());
++	WARN_ON(!in_interrupt() && !irqs_disabled());
+ 	WARN_ON_ONCE(q->mq_ops);
+ 
+ 	queue_flag_clear(QUEUE_FLAG_STOPPED, q);
+@@ -2330,7 +2330,12 @@ blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *
+ 	if (q->mq_ops) {
+ 		if (blk_queue_io_stat(q))
+ 			blk_account_io_start(rq, true);
+-		blk_mq_sched_insert_request(rq, false, true, false, false);
++		/*
++		 * Since we have a scheduler attached on the top device,
++		 * bypass a potential scheduler on the bottom device for
++		 * insert.
++		 */
++		blk_mq_request_bypass_insert(rq);
+ 		return BLK_STS_OK;
+ 	}
+ 
+diff --git a/block/blk-mq.c b/block/blk-mq.c
+index 4603b115e234..e0523eb8eee1 100644
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -1357,6 +1357,22 @@ void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
+ 	blk_mq_hctx_mark_pending(hctx, ctx);
+ }
+ 
++/*
++ * Should only be used carefully, when the caller knows we want to
++ * bypass a potential IO scheduler on the target device.
++ */
++void blk_mq_request_bypass_insert(struct request *rq)
++{
++	struct blk_mq_ctx *ctx = rq->mq_ctx;
++	struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(rq->q, ctx->cpu);
++
++	spin_lock(&hctx->lock);
++	list_add_tail(&rq->queuelist, &hctx->dispatch);
++	spin_unlock(&hctx->lock);
++
++	blk_mq_run_hw_queue(hctx, false);
++}
++
+ void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
+ 			    struct list_head *list)
+ 
+diff --git a/block/blk-mq.h b/block/blk-mq.h
+index 60b01c0309bc..f64747914560 100644
+--- a/block/blk-mq.h
++++ b/block/blk-mq.h
+@@ -54,6 +54,7 @@ int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
+  */
+ void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
+ 				bool at_head);
++void blk_mq_request_bypass_insert(struct request *rq);
+ void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
+ 				struct list_head *list);
+ 
+diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
+index 903605dbc1a5..76b875c69a95 100644
+--- a/crypto/algif_skcipher.c
++++ b/crypto/algif_skcipher.c
+@@ -144,8 +144,10 @@ static int skcipher_alloc_sgl(struct sock *sk)
+ 		sg_init_table(sgl->sg, MAX_SGL_ENTS + 1);
+ 		sgl->cur = 0;
+ 
+-		if (sg)
++		if (sg) {
+ 			sg_chain(sg, MAX_SGL_ENTS + 1, sgl->sg);
++			sg_unmark_end(sg + (MAX_SGL_ENTS - 1));
++		}
+ 
+ 		list_add_tail(&sgl->list, &ctx->tsgl);
+ 	}
+diff --git a/crypto/scompress.c b/crypto/scompress.c
+index ae1d3cf209e4..0b40d991d65f 100644
+--- a/crypto/scompress.c
++++ b/crypto/scompress.c
+@@ -211,9 +211,7 @@ static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
+ 					      scratch_dst, &req->dlen, *ctx);
+ 	if (!ret) {
+ 		if (!req->dst) {
+-			req->dst = crypto_scomp_sg_alloc(req->dlen,
+-				   req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
+-				   GFP_KERNEL : GFP_ATOMIC);
++			req->dst = crypto_scomp_sg_alloc(req->dlen, GFP_ATOMIC);
+ 			if (!req->dst)
+ 				goto out;
+ 		}
+diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c
+index d0368682bd43..153f20ce318b 100644
+--- a/drivers/block/skd_main.c
++++ b/drivers/block/skd_main.c
+@@ -2160,6 +2160,9 @@ static void skd_send_fitmsg(struct skd_device *skdev,
+ 		 */
+ 		qcmd |= FIT_QCMD_MSGSIZE_64;
+ 
++	/* Make sure skd_msg_buf is written before the doorbell is triggered. */
++	smp_wmb();
++
+ 	SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND);
+ }
+ 
+@@ -2202,6 +2205,9 @@ static void skd_send_special_fitmsg(struct skd_device *skdev,
+ 	qcmd = skspcl->mb_dma_address;
+ 	qcmd |= FIT_QCMD_QID_NORMAL + FIT_QCMD_MSGSIZE_128;
+ 
++	/* Make sure skd_msg_buf is written before the doorbell is triggered. */
++	smp_wmb();
++
+ 	SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND);
+ }
+ 
+@@ -4539,15 +4545,16 @@ static void skd_free_disk(struct skd_device *skdev)
+ {
+ 	struct gendisk *disk = skdev->disk;
+ 
+-	if (disk != NULL) {
+-		struct request_queue *q = disk->queue;
++	if (disk && (disk->flags & GENHD_FL_UP))
++		del_gendisk(disk);
+ 
+-		if (disk->flags & GENHD_FL_UP)
+-			del_gendisk(disk);
+-		if (q)
+-			blk_cleanup_queue(q);
+-		put_disk(disk);
++	if (skdev->queue) {
++		blk_cleanup_queue(skdev->queue);
++		skdev->queue = NULL;
++		disk->queue = NULL;
+ 	}
++
++	put_disk(disk);
+ 	skdev->disk = NULL;
+ }
+ 
+diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c
+index 3425f2d9a2a1..fe0185ceac16 100644
+--- a/drivers/crypto/caam/caamalg_qi.c
++++ b/drivers/crypto/caam/caamalg_qi.c
+@@ -776,9 +776,9 @@ static void ablkcipher_done(struct caam_drv_req *drv_req, u32 status)
+ 	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+ 	struct caam_ctx *caam_ctx = crypto_ablkcipher_ctx(ablkcipher);
+ 	struct device *qidev = caam_ctx->qidev;
+-#ifdef DEBUG
+ 	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+ 
++#ifdef DEBUG
+ 	dev_err(qidev, "%s %d: status 0x%x\n", __func__, __LINE__, status);
+ #endif
+ 
+@@ -799,6 +799,13 @@ static void ablkcipher_done(struct caam_drv_req *drv_req, u32 status)
+ 	ablkcipher_unmap(qidev, edesc, req);
+ 	qi_cache_free(edesc);
+ 
++	/*
++	 * The crypto API expects us to set the IV (req->info) to the last
++	 * ciphertext block. This is used e.g. by the CTS mode.
++	 */
++	scatterwalk_map_and_copy(req->info, req->dst, req->nbytes - ivsize,
++				 ivsize, 0);
++
+ 	ablkcipher_request_complete(req, status);
+ }
+ 
+@@ -1968,7 +1975,7 @@ static struct caam_aead_alg driver_aeads[] = {
+ 				.cra_name = "echainiv(authenc(hmac(sha256),"
+ 					    "cbc(des)))",
+ 				.cra_driver_name = "echainiv-authenc-"
+-						   "hmac-sha256-cbc-desi-"
++						   "hmac-sha256-cbc-des-"
+ 						   "caam-qi",
+ 				.cra_blocksize = DES_BLOCK_SIZE,
+ 			},
+diff --git a/drivers/crypto/ccp/ccp-crypto-aes-xts.c b/drivers/crypto/ccp/ccp-crypto-aes-xts.c
+index 58a4244b4752..3f26a415ef44 100644
+--- a/drivers/crypto/ccp/ccp-crypto-aes-xts.c
++++ b/drivers/crypto/ccp/ccp-crypto-aes-xts.c
+@@ -1,8 +1,9 @@
+ /*
+  * AMD Cryptographic Coprocessor (CCP) AES XTS crypto API support
+  *
+- * Copyright (C) 2013 Advanced Micro Devices, Inc.
++ * Copyright (C) 2013,2017 Advanced Micro Devices, Inc.
+  *
++ * Author: Gary R Hook <gary.hook@amd.com>
+  * Author: Tom Lendacky <thomas.lendacky@amd.com>
+  *
+  * This program is free software; you can redistribute it and/or modify
+@@ -164,6 +165,7 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request *req,
+ 	memset(&rctx->cmd, 0, sizeof(rctx->cmd));
+ 	INIT_LIST_HEAD(&rctx->cmd.entry);
+ 	rctx->cmd.engine = CCP_ENGINE_XTS_AES_128;
++	rctx->cmd.u.xts.type = CCP_AES_TYPE_128;
+ 	rctx->cmd.u.xts.action = (encrypt) ? CCP_AES_ACTION_ENCRYPT
+ 					   : CCP_AES_ACTION_DECRYPT;
+ 	rctx->cmd.u.xts.unit_size = unit_size;
+diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c
+index b10d2d2075cb..9bc134a4ebf0 100644
+--- a/drivers/crypto/ccp/ccp-dev-v5.c
++++ b/drivers/crypto/ccp/ccp-dev-v5.c
+@@ -145,6 +145,7 @@ union ccp_function {
+ #define	CCP_AES_MODE(p)		((p)->aes.mode)
+ #define	CCP_AES_TYPE(p)		((p)->aes.type)
+ #define	CCP_XTS_SIZE(p)		((p)->aes_xts.size)
++#define	CCP_XTS_TYPE(p)		((p)->aes_xts.type)
+ #define	CCP_XTS_ENCRYPT(p)	((p)->aes_xts.encrypt)
+ #define	CCP_DES3_SIZE(p)	((p)->des3.size)
+ #define	CCP_DES3_ENCRYPT(p)	((p)->des3.encrypt)
+@@ -344,6 +345,7 @@ static int ccp5_perform_xts_aes(struct ccp_op *op)
+ 	CCP5_CMD_PROT(&desc) = 0;
+ 
+ 	function.raw = 0;
++	CCP_XTS_TYPE(&function) = op->u.xts.type;
+ 	CCP_XTS_ENCRYPT(&function) = op->u.xts.action;
+ 	CCP_XTS_SIZE(&function) = op->u.xts.unit_size;
+ 	CCP5_CMD_FUNCTION(&desc) = function.raw;
+diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h
+index a70154ac7405..7b8370e9c42e 100644
+--- a/drivers/crypto/ccp/ccp-dev.h
++++ b/drivers/crypto/ccp/ccp-dev.h
+@@ -192,6 +192,7 @@
+ #define CCP_AES_CTX_SB_COUNT		1
+ 
+ #define CCP_XTS_AES_KEY_SB_COUNT	1
++#define CCP5_XTS_AES_KEY_SB_COUNT	2
+ #define CCP_XTS_AES_CTX_SB_COUNT	1
+ 
+ #define CCP_DES3_KEY_SB_COUNT		1
+@@ -497,6 +498,7 @@ struct ccp_aes_op {
+ };
+ 
+ struct ccp_xts_aes_op {
++	enum ccp_aes_type type;
+ 	enum ccp_aes_action action;
+ 	enum ccp_xts_aes_unit_size unit_size;
+ };
+diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
+index c0dfdacbdff5..f3542aede519 100644
+--- a/drivers/crypto/ccp/ccp-ops.c
++++ b/drivers/crypto/ccp/ccp-ops.c
+@@ -1038,6 +1038,8 @@ static int ccp_run_xts_aes_cmd(struct ccp_cmd_queue *cmd_q,
+ 	struct ccp_op op;
+ 	unsigned int unit_size, dm_offset;
+ 	bool in_place = false;
++	unsigned int sb_count;
++	enum ccp_aes_type aestype;
+ 	int ret;
+ 
+ 	switch (xts->unit_size) {
+@@ -1061,7 +1063,9 @@ static int ccp_run_xts_aes_cmd(struct ccp_cmd_queue *cmd_q,
+ 		return -EINVAL;
+ 	}
+ 
+-	if (xts->key_len != AES_KEYSIZE_128)
++	if (xts->key_len == AES_KEYSIZE_128)
++		aestype = CCP_AES_TYPE_128;
++	else
+ 		return -EINVAL;
+ 
+ 	if (!xts->final && (xts->src_len & (AES_BLOCK_SIZE - 1)))
+@@ -1083,23 +1087,44 @@ static int ccp_run_xts_aes_cmd(struct ccp_cmd_queue *cmd_q,
+ 	op.sb_key = cmd_q->sb_key;
+ 	op.sb_ctx = cmd_q->sb_ctx;
+ 	op.init = 1;
++	op.u.xts.type = aestype;
+ 	op.u.xts.action = xts->action;
+ 	op.u.xts.unit_size = xts->unit_size;
+ 
+-	/* All supported key sizes fit in a single (32-byte) SB entry
+-	 * and must be in little endian format. Use the 256-bit byte
+-	 * swap passthru option to convert from big endian to little
+-	 * endian.
++	/* A version 3 device only supports 128-bit keys, which fits into a
++	 * single SB entry. A version 5 device uses a 512-bit vector, so two
++	 * SB entries.
+ 	 */
++	if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0))
++		sb_count = CCP_XTS_AES_KEY_SB_COUNT;
++	else
++		sb_count = CCP5_XTS_AES_KEY_SB_COUNT;
+ 	ret = ccp_init_dm_workarea(&key, cmd_q,
+-				   CCP_XTS_AES_KEY_SB_COUNT * CCP_SB_BYTES,
++				   sb_count * CCP_SB_BYTES,
+ 				   DMA_TO_DEVICE);
+ 	if (ret)
+ 		return ret;
+ 
+-	dm_offset = CCP_SB_BYTES - AES_KEYSIZE_128;
+-	ccp_set_dm_area(&key, dm_offset, xts->key, 0, xts->key_len);
+-	ccp_set_dm_area(&key, 0, xts->key, dm_offset, xts->key_len);
++	if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0)) {
++		/* All supported key sizes must be in little endian format.
++		 * Use the 256-bit byte swap passthru option to convert from
++		 * big endian to little endian.
++		 */
++		dm_offset = CCP_SB_BYTES - AES_KEYSIZE_128;
++		ccp_set_dm_area(&key, dm_offset, xts->key, 0, xts->key_len);
++		ccp_set_dm_area(&key, 0, xts->key, xts->key_len, xts->key_len);
++	} else {
++		/* Version 5 CCPs use a 512-bit space for the key: each portion
++		 * occupies 256 bits, or one entire slot, and is zero-padded.
++		 */
++		unsigned int pad;
++
++		dm_offset = CCP_SB_BYTES;
++		pad = dm_offset - xts->key_len;
++		ccp_set_dm_area(&key, pad, xts->key, 0, xts->key_len);
++		ccp_set_dm_area(&key, dm_offset + pad, xts->key, xts->key_len,
++				xts->key_len);
++	}
+ 	ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
+ 			     CCP_PASSTHRU_BYTESWAP_256BIT);
+ 	if (ret) {
+diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
+index dea04871b50d..a1c4ee818614 100644
+--- a/drivers/devfreq/devfreq.c
++++ b/drivers/devfreq/devfreq.c
+@@ -564,7 +564,7 @@ struct devfreq *devfreq_add_device(struct device *dev,
+ 	err = device_register(&devfreq->dev);
+ 	if (err) {
+ 		mutex_unlock(&devfreq->lock);
+-		goto err_out;
++		goto err_dev;
+ 	}
+ 
+ 	devfreq->trans_table =	devm_kzalloc(&devfreq->dev,
+@@ -610,6 +610,9 @@ struct devfreq *devfreq_add_device(struct device *dev,
+ 	mutex_unlock(&devfreq_list_lock);
+ 
+ 	device_unregister(&devfreq->dev);
++err_dev:
++	if (devfreq)
++		kfree(devfreq);
+ err_out:
+ 	return ERR_PTR(err);
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+index 4083be61b328..6417febe18b9 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+@@ -95,9 +95,8 @@ int psp_wait_for(struct psp_context *psp, uint32_t reg_index,
+ 	int i;
+ 	struct amdgpu_device *adev = psp->adev;
+ 
+-	val = RREG32(reg_index);
+-
+ 	for (i = 0; i < adev->usec_timeout; i++) {
++		val = RREG32(reg_index);
+ 		if (check_changed) {
+ 			if (val != reg_val)
+ 				return 0;
+diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
+index c98d77d0c8f8..6f80ad8f588b 100644
+--- a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
++++ b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
+@@ -237,11 +237,9 @@ int psp_v3_1_bootloader_load_sos(struct psp_context *psp)
+ 
+ 	/* there might be handshake issue with hardware which needs delay */
+ 	mdelay(20);
+-#if 0
+ 	ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_81),
+ 			   RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81),
+ 			   0, true);
+-#endif
+ 
+ 	return ret;
+ }
+diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c
+index 4a11d4da4c92..ff30b34c8984 100644
+--- a/drivers/infiniband/hw/hfi1/init.c
++++ b/drivers/infiniband/hw/hfi1/init.c
+@@ -483,7 +483,6 @@ void hfi1_init_pportdata(struct pci_dev *pdev, struct hfi1_pportdata *ppd,
+ 
+ 	ppd->pkeys[default_pkey_idx] = DEFAULT_P_KEY;
+ 	ppd->part_enforce |= HFI1_PART_ENFORCE_IN;
+-	ppd->part_enforce |= HFI1_PART_ENFORCE_OUT;
+ 
+ 	if (loopback) {
+ 		hfi1_early_err(&pdev->dev,
+diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c
+index 1080778a1f7c..0c73fb0c2c1b 100644
+--- a/drivers/infiniband/hw/hfi1/rc.c
++++ b/drivers/infiniband/hw/hfi1/rc.c
+@@ -425,7 +425,7 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
+ 		case IB_WR_RDMA_WRITE:
+ 			if (newreq && !(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
+ 				qp->s_lsn++;
+-			/* FALLTHROUGH */
++			goto no_flow_control;
+ 		case IB_WR_RDMA_WRITE_WITH_IMM:
+ 			/* If no credit, return. */
+ 			if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) &&
+@@ -433,6 +433,7 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
+ 				qp->s_flags |= RVT_S_WAIT_SSN_CREDIT;
+ 				goto bail;
+ 			}
++no_flow_control:
+ 			put_ib_reth_vaddr(
+ 				wqe->rdma_wr.remote_addr,
+ 				&ohdr->u.rc.reth);
+diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
+index 2c40a2e989d2..a0eb2f96179a 100644
+--- a/drivers/infiniband/hw/mlx5/mr.c
++++ b/drivers/infiniband/hw/mlx5/mr.c
+@@ -48,6 +48,7 @@ enum {
+ #define MLX5_UMR_ALIGN 2048
+ 
+ static int clean_mr(struct mlx5_ib_mr *mr);
++static int max_umr_order(struct mlx5_ib_dev *dev);
+ static int use_umr(struct mlx5_ib_dev *dev, int order);
+ static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
+ 
+@@ -491,16 +492,18 @@ static struct mlx5_ib_mr *alloc_cached_mr(struct mlx5_ib_dev *dev, int order)
+ 	struct mlx5_mr_cache *cache = &dev->cache;
+ 	struct mlx5_ib_mr *mr = NULL;
+ 	struct mlx5_cache_ent *ent;
++	int last_umr_cache_entry;
+ 	int c;
+ 	int i;
+ 
+ 	c = order2idx(dev, order);
+-	if (c < 0 || c > MAX_UMR_CACHE_ENTRY) {
++	last_umr_cache_entry = order2idx(dev, max_umr_order(dev));
++	if (c < 0 || c > last_umr_cache_entry) {
+ 		mlx5_ib_warn(dev, "order %d, cache index %d\n", order, c);
+ 		return NULL;
+ 	}
+ 
+-	for (i = c; i < MAX_UMR_CACHE_ENTRY; i++) {
++	for (i = c; i <= last_umr_cache_entry; i++) {
+ 		ent = &cache->ent[i];
+ 
+ 		mlx5_ib_dbg(dev, "order %d, cache index %d\n", ent->order, i);
+@@ -816,11 +819,16 @@ static int get_octo_len(u64 addr, u64 len, int page_size)
+ 	return (npages + 1) / 2;
+ }
+ 
+-static int use_umr(struct mlx5_ib_dev *dev, int order)
++static int max_umr_order(struct mlx5_ib_dev *dev)
+ {
+ 	if (MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset))
+-		return order <= MAX_UMR_CACHE_ENTRY + 2;
+-	return order <= MLX5_MAX_UMR_SHIFT;
++		return MAX_UMR_CACHE_ENTRY + 2;
++	return MLX5_MAX_UMR_SHIFT;
++}
++
++static int use_umr(struct mlx5_ib_dev *dev, int order)
++{
++	return order <= max_umr_order(dev);
+ }
+ 
+ static int mr_umem_get(struct ib_pd *pd, u64 start, u64 length,
+diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c
+index 4ddbcac5eabe..e9a91736b12d 100644
+--- a/drivers/infiniband/hw/qib/qib_rc.c
++++ b/drivers/infiniband/hw/qib/qib_rc.c
+@@ -348,7 +348,7 @@ int qib_make_rc_req(struct rvt_qp *qp, unsigned long *flags)
+ 		case IB_WR_RDMA_WRITE:
+ 			if (newreq && !(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
+ 				qp->s_lsn++;
+-			/* FALLTHROUGH */
++			goto no_flow_control;
+ 		case IB_WR_RDMA_WRITE_WITH_IMM:
+ 			/* If no credit, return. */
+ 			if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) &&
+@@ -356,7 +356,7 @@ int qib_make_rc_req(struct rvt_qp *qp, unsigned long *flags)
+ 				qp->s_flags |= RVT_S_WAIT_SSN_CREDIT;
+ 				goto bail;
+ 			}
+-
++no_flow_control:
+ 			ohdr->u.rc.reth.vaddr =
+ 				cpu_to_be64(wqe->rdma_wr.remote_addr);
+ 			ohdr->u.rc.reth.rkey =
+diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
+index ca0e19ae7a90..f6d0c8f51613 100644
+--- a/drivers/input/joystick/xpad.c
++++ b/drivers/input/joystick/xpad.c
+@@ -1764,10 +1764,12 @@ static int xpad_probe(struct usb_interface *intf, const struct usb_device_id *id
+ 		struct usb_endpoint_descriptor *ep =
+ 				&intf->cur_altsetting->endpoint[i].desc;
+ 
+-		if (usb_endpoint_dir_in(ep))
+-			ep_irq_in = ep;
+-		else
+-			ep_irq_out = ep;
++		if (usb_endpoint_xfer_int(ep)) {
++			if (usb_endpoint_dir_in(ep))
++				ep_irq_in = ep;
++			else
++				ep_irq_out = ep;
++		}
+ 	}
+ 
+ 	if (!ep_irq_in || !ep_irq_out) {
+diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
+index f932a83b4990..9125ad017eda 100644
+--- a/drivers/input/serio/i8042-x86ia64io.h
++++ b/drivers/input/serio/i8042-x86ia64io.h
+@@ -839,6 +839,13 @@ static const struct dmi_system_id __initconst i8042_dmi_kbdreset_table[] = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "P34"),
+ 		},
+ 	},
++	{
++		/* Gigabyte P57 - Elantech touchpad */
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "P57"),
++		},
++	},
+ 	{
+ 		/* Schenker XMG C504 - Elantech touchpad */
+ 		.matches = {
+diff --git a/drivers/mailbox/bcm-flexrm-mailbox.c b/drivers/mailbox/bcm-flexrm-mailbox.c
+index da67882caa7b..0e298ed42ae0 100644
+--- a/drivers/mailbox/bcm-flexrm-mailbox.c
++++ b/drivers/mailbox/bcm-flexrm-mailbox.c
+@@ -95,7 +95,7 @@
+ 
+ /* Register RING_CMPL_START_ADDR fields */
+ #define CMPL_START_ADDR_VALUE(pa)			\
+-	((u32)((((u64)(pa)) >> RING_CMPL_ALIGN_ORDER) & 0x03ffffff))
++	((u32)((((u64)(pa)) >> RING_CMPL_ALIGN_ORDER) & 0x07ffffff))
+ 
+ /* Register RING_CONTROL fields */
+ #define CONTROL_MASK_DISABLE_CONTROL			12
+diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
+index dee542fff68e..2ed9bd231d84 100644
+--- a/drivers/md/bcache/bcache.h
++++ b/drivers/md/bcache/bcache.h
+@@ -333,6 +333,7 @@ struct cached_dev {
+ 	/* Limit number of writeback bios in flight */
+ 	struct semaphore	in_flight;
+ 	struct task_struct	*writeback_thread;
++	struct workqueue_struct	*writeback_write_wq;
+ 
+ 	struct keybuf		writeback_keys;
+ 
+diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
+index 019b3df9f1c6..4b413db99276 100644
+--- a/drivers/md/bcache/request.c
++++ b/drivers/md/bcache/request.c
+@@ -196,12 +196,12 @@ static void bch_data_insert_start(struct closure *cl)
+ 	struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
+ 	struct bio *bio = op->bio, *n;
+ 
+-	if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0)
+-		wake_up_gc(op->c);
+-
+ 	if (op->bypass)
+ 		return bch_data_invalidate(cl);
+ 
++	if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0)
++		wake_up_gc(op->c);
++
+ 	/*
+ 	 * Journal writes are marked REQ_PREFLUSH; if the original write was a
+ 	 * flush, it'll wait on the journal write.
+@@ -400,12 +400,6 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
+ 	if (!congested && !dc->sequential_cutoff)
+ 		goto rescale;
+ 
+-	if (!congested &&
+-	    mode == CACHE_MODE_WRITEBACK &&
+-	    op_is_write(bio->bi_opf) &&
+-	    op_is_sync(bio->bi_opf))
+-		goto rescale;
+-
+ 	spin_lock(&dc->io_lock);
+ 
+ 	hlist_for_each_entry(i, iohash(dc, bio->bi_iter.bi_sector), hash)
+diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
+index 8352fad765f6..046fc5bddf54 100644
+--- a/drivers/md/bcache/super.c
++++ b/drivers/md/bcache/super.c
+@@ -1026,7 +1026,7 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c)
+ 	}
+ 
+ 	if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) {
+-		bch_sectors_dirty_init(dc);
++		bch_sectors_dirty_init(&dc->disk);
+ 		atomic_set(&dc->has_dirty, 1);
+ 		atomic_inc(&dc->count);
+ 		bch_writeback_queue(dc);
+@@ -1059,6 +1059,8 @@ static void cached_dev_free(struct closure *cl)
+ 	cancel_delayed_work_sync(&dc->writeback_rate_update);
+ 	if (!IS_ERR_OR_NULL(dc->writeback_thread))
+ 		kthread_stop(dc->writeback_thread);
++	if (dc->writeback_write_wq)
++		destroy_workqueue(dc->writeback_write_wq);
+ 
+ 	mutex_lock(&bch_register_lock);
+ 
+@@ -1228,6 +1230,7 @@ static int flash_dev_run(struct cache_set *c, struct uuid_entry *u)
+ 		goto err;
+ 
+ 	bcache_device_attach(d, c, u - c->uuids);
++	bch_sectors_dirty_init(d);
+ 	bch_flash_dev_request_init(d);
+ 	add_disk(d->disk);
+ 
+@@ -1964,6 +1967,8 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
+ 			else
+ 				err = "device busy";
+ 			mutex_unlock(&bch_register_lock);
++			if (!IS_ERR(bdev))
++				bdput(bdev);
+ 			if (attr == &ksysfs_register_quiet)
+ 				goto out;
+ 		}
+diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
+index f90f13616980..ab2f8ce1e3bc 100644
+--- a/drivers/md/bcache/sysfs.c
++++ b/drivers/md/bcache/sysfs.c
+@@ -192,7 +192,7 @@ STORE(__cached_dev)
+ {
+ 	struct cached_dev *dc = container_of(kobj, struct cached_dev,
+ 					     disk.kobj);
+-	unsigned v = size;
++	ssize_t v = size;
+ 	struct cache_set *c;
+ 	struct kobj_uevent_env *env;
+ 
+@@ -227,7 +227,7 @@ STORE(__cached_dev)
+ 		bch_cached_dev_run(dc);
+ 
+ 	if (attr == &sysfs_cache_mode) {
+-		ssize_t v = bch_read_string_list(buf, bch_cache_modes + 1);
++		v = bch_read_string_list(buf, bch_cache_modes + 1);
+ 
+ 		if (v < 0)
+ 			return v;
+diff --git a/drivers/md/bcache/util.c b/drivers/md/bcache/util.c
+index 8c3a938f4bf0..176d3c2ef5f5 100644
+--- a/drivers/md/bcache/util.c
++++ b/drivers/md/bcache/util.c
+@@ -74,24 +74,44 @@ STRTO_H(strtouint, unsigned int)
+ STRTO_H(strtoll, long long)
+ STRTO_H(strtoull, unsigned long long)
+ 
++/**
++ * bch_hprint() - formats @v to human readable string for sysfs.
++ *
++ * @v - signed 64 bit integer
++ * @buf - the (at least 8 byte) buffer to format the result into.
++ *
++ * Returns the number of bytes used by format.
++ */
+ ssize_t bch_hprint(char *buf, int64_t v)
+ {
+ 	static const char units[] = "?kMGTPEZY";
+-	char dec[4] = "";
+-	int u, t = 0;
+-
+-	for (u = 0; v >= 1024 || v <= -1024; u++) {
+-		t = v & ~(~0 << 10);
+-		v >>= 10;
+-	}
+-
+-	if (!u)
+-		return sprintf(buf, "%llu", v);
+-
+-	if (v < 100 && v > -100)
+-		snprintf(dec, sizeof(dec), ".%i", t / 100);
+-
+-	return sprintf(buf, "%lli%s%c", v, dec, units[u]);
++	int u = 0, t;
++
++	uint64_t q;
++
++	if (v < 0)
++		q = -v;
++	else
++		q = v;
++
++	/* For as long as the number is more than 3 digits, but at least
++	 * once, shift right / divide by 1024.  Keep the remainder for
++	 * a digit after the decimal point.
++	 */
++	do {
++		u++;
++
++		t = q & ~(~0 << 10);
++		q >>= 10;
++	} while (q >= 1000);
++
++	if (v < 0)
++		/* '-', up to 3 digits, '.', 1 digit, 1 character, null;
++		 * yields 8 bytes.
++		 */
++		return sprintf(buf, "-%llu.%i%c", q, t * 10 / 1024, units[u]);
++	else
++		return sprintf(buf, "%llu.%i%c", q, t * 10 / 1024, units[u]);
+ }
+ 
+ ssize_t bch_snprint_string_list(char *buf, size_t size, const char * const list[],
+diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
+index 42c66e76f05e..a635d6ac7fde 100644
+--- a/drivers/md/bcache/writeback.c
++++ b/drivers/md/bcache/writeback.c
+@@ -21,7 +21,8 @@
+ static void __update_writeback_rate(struct cached_dev *dc)
+ {
+ 	struct cache_set *c = dc->disk.c;
+-	uint64_t cache_sectors = c->nbuckets * c->sb.bucket_size;
++	uint64_t cache_sectors = c->nbuckets * c->sb.bucket_size -
++				bcache_flash_devs_sectors_dirty(c);
+ 	uint64_t cache_dirty_target =
+ 		div_u64(cache_sectors * dc->writeback_percent, 100);
+ 
+@@ -186,7 +187,7 @@ static void write_dirty(struct closure *cl)
+ 
+ 	closure_bio_submit(&io->bio, cl);
+ 
+-	continue_at(cl, write_dirty_finish, system_wq);
++	continue_at(cl, write_dirty_finish, io->dc->writeback_write_wq);
+ }
+ 
+ static void read_dirty_endio(struct bio *bio)
+@@ -206,7 +207,7 @@ static void read_dirty_submit(struct closure *cl)
+ 
+ 	closure_bio_submit(&io->bio, cl);
+ 
+-	continue_at(cl, write_dirty, system_wq);
++	continue_at(cl, write_dirty, io->dc->writeback_write_wq);
+ }
+ 
+ static void read_dirty(struct cached_dev *dc)
+@@ -482,17 +483,17 @@ static int sectors_dirty_init_fn(struct btree_op *_op, struct btree *b,
+ 	return MAP_CONTINUE;
+ }
+ 
+-void bch_sectors_dirty_init(struct cached_dev *dc)
++void bch_sectors_dirty_init(struct bcache_device *d)
+ {
+ 	struct sectors_dirty_init op;
+ 
+ 	bch_btree_op_init(&op.op, -1);
+-	op.inode = dc->disk.id;
++	op.inode = d->id;
+ 
+-	bch_btree_map_keys(&op.op, dc->disk.c, &KEY(op.inode, 0, 0),
++	bch_btree_map_keys(&op.op, d->c, &KEY(op.inode, 0, 0),
+ 			   sectors_dirty_init_fn, 0);
+ 
+-	dc->disk.sectors_dirty_last = bcache_dev_sectors_dirty(&dc->disk);
++	d->sectors_dirty_last = bcache_dev_sectors_dirty(d);
+ }
+ 
+ void bch_cached_dev_writeback_init(struct cached_dev *dc)
+@@ -516,6 +517,11 @@ void bch_cached_dev_writeback_init(struct cached_dev *dc)
+ 
+ int bch_cached_dev_writeback_start(struct cached_dev *dc)
+ {
++	dc->writeback_write_wq = alloc_workqueue("bcache_writeback_wq",
++						WQ_MEM_RECLAIM, 0);
++	if (!dc->writeback_write_wq)
++		return -ENOMEM;
++
+ 	dc->writeback_thread = kthread_create(bch_writeback_thread, dc,
+ 					      "bcache_writeback");
+ 	if (IS_ERR(dc->writeback_thread))
+diff --git a/drivers/md/bcache/writeback.h b/drivers/md/bcache/writeback.h
+index 629bd1a502fd..e35421d20d2e 100644
+--- a/drivers/md/bcache/writeback.h
++++ b/drivers/md/bcache/writeback.h
+@@ -14,6 +14,25 @@ static inline uint64_t bcache_dev_sectors_dirty(struct bcache_device *d)
+ 	return ret;
+ }
+ 
++static inline uint64_t  bcache_flash_devs_sectors_dirty(struct cache_set *c)
++{
++	uint64_t i, ret = 0;
++
++	mutex_lock(&bch_register_lock);
++
++	for (i = 0; i < c->nr_uuids; i++) {
++		struct bcache_device *d = c->devices[i];
++
++		if (!d || !UUID_FLASH_ONLY(&c->uuids[i]))
++			continue;
++	   ret += bcache_dev_sectors_dirty(d);
++	}
++
++	mutex_unlock(&bch_register_lock);
++
++	return ret;
++}
++
+ static inline unsigned offset_to_stripe(struct bcache_device *d,
+ 					uint64_t offset)
+ {
+@@ -84,7 +103,7 @@ static inline void bch_writeback_add(struct cached_dev *dc)
+ 
+ void bcache_dev_sectors_dirty_add(struct cache_set *, unsigned, uint64_t, int);
+ 
+-void bch_sectors_dirty_init(struct cached_dev *dc);
++void bch_sectors_dirty_init(struct bcache_device *);
+ void bch_cached_dev_writeback_init(struct cached_dev *);
+ int bch_cached_dev_writeback_start(struct cached_dev *);
+ 
+diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
+index 40f3cd7eab0f..d2121637b4ab 100644
+--- a/drivers/md/bitmap.c
++++ b/drivers/md/bitmap.c
+@@ -625,7 +625,7 @@ static int bitmap_read_sb(struct bitmap *bitmap)
+ 		err = read_sb_page(bitmap->mddev,
+ 				   offset,
+ 				   sb_page,
+-				   0, sizeof(bitmap_super_t));
++				   0, PAGE_SIZE);
+ 	}
+ 	if (err)
+ 		return err;
+@@ -2058,6 +2058,11 @@ int bitmap_resize(struct bitmap *bitmap, sector_t blocks,
+ 	long pages;
+ 	struct bitmap_page *new_bp;
+ 
++	if (bitmap->storage.file && !init) {
++		pr_info("md: cannot resize file-based bitmap\n");
++		return -EINVAL;
++	}
++
+ 	if (chunksize == 0) {
+ 		/* If there is enough space, leave the chunk size unchanged,
+ 		 * else increase by factor of two until there is enough space.
+@@ -2118,7 +2123,7 @@ int bitmap_resize(struct bitmap *bitmap, sector_t blocks,
+ 	if (store.sb_page && bitmap->storage.sb_page)
+ 		memcpy(page_address(store.sb_page),
+ 		       page_address(bitmap->storage.sb_page),
+-		       sizeof(bitmap_super_t));
++		       PAGE_SIZE);
+ 	bitmap_file_unmap(&bitmap->storage);
+ 	bitmap->storage = store;
+ 
+diff --git a/drivers/media/i2c/adv7180.c b/drivers/media/i2c/adv7180.c
+index 78de7ddf5081..3df28f2f9b38 100644
+--- a/drivers/media/i2c/adv7180.c
++++ b/drivers/media/i2c/adv7180.c
+@@ -1402,6 +1402,8 @@ static int adv7180_remove(struct i2c_client *client)
+ 
+ static const struct i2c_device_id adv7180_id[] = {
+ 	{ "adv7180", (kernel_ulong_t)&adv7180_info },
++	{ "adv7180cp", (kernel_ulong_t)&adv7180_info },
++	{ "adv7180st", (kernel_ulong_t)&adv7180_info },
+ 	{ "adv7182", (kernel_ulong_t)&adv7182_info },
+ 	{ "adv7280", (kernel_ulong_t)&adv7280_info },
+ 	{ "adv7280-m", (kernel_ulong_t)&adv7280_m_info },
+diff --git a/drivers/media/platform/qcom/venus/helpers.c b/drivers/media/platform/qcom/venus/helpers.c
+index 5f4434c0a8f1..2d6187904552 100644
+--- a/drivers/media/platform/qcom/venus/helpers.c
++++ b/drivers/media/platform/qcom/venus/helpers.c
+@@ -243,7 +243,7 @@ static void return_buf_error(struct venus_inst *inst,
+ 	if (vbuf->vb2_buf.type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ 		v4l2_m2m_src_buf_remove_by_buf(m2m_ctx, vbuf);
+ 	else
+-		v4l2_m2m_src_buf_remove_by_buf(m2m_ctx, vbuf);
++		v4l2_m2m_dst_buf_remove_by_buf(m2m_ctx, vbuf);
+ 
+ 	v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR);
+ }
+diff --git a/drivers/media/rc/lirc_dev.c b/drivers/media/rc/lirc_dev.c
+index db1e7b70c998..9080e39ea391 100644
+--- a/drivers/media/rc/lirc_dev.c
++++ b/drivers/media/rc/lirc_dev.c
+@@ -59,6 +59,8 @@ static void lirc_release(struct device *ld)
+ {
+ 	struct irctl *ir = container_of(ld, struct irctl, dev);
+ 
++	put_device(ir->dev.parent);
++
+ 	if (ir->buf_internal) {
+ 		lirc_buffer_free(ir->buf);
+ 		kfree(ir->buf);
+@@ -218,6 +220,8 @@ int lirc_register_driver(struct lirc_driver *d)
+ 
+ 	mutex_unlock(&lirc_dev_lock);
+ 
++	get_device(ir->dev.parent);
++
+ 	dev_info(ir->d.dev, "lirc_dev: driver %s registered at minor = %d\n",
+ 		 ir->d.name, ir->d.minor);
+ 
+diff --git a/drivers/media/usb/uvc/uvc_ctrl.c b/drivers/media/usb/uvc/uvc_ctrl.c
+index c2ee6e39fd0c..20397aba6849 100644
+--- a/drivers/media/usb/uvc/uvc_ctrl.c
++++ b/drivers/media/usb/uvc/uvc_ctrl.c
+@@ -2002,6 +2002,13 @@ int uvc_ctrl_add_mapping(struct uvc_video_chain *chain,
+ 		goto done;
+ 	}
+ 
++	/* Validate the user-provided bit-size and offset */
++	if (mapping->size > 32 ||
++	    mapping->offset + mapping->size > ctrl->info.size * 8) {
++		ret = -EINVAL;
++		goto done;
++	}
++
+ 	list_for_each_entry(map, &ctrl->info.mappings, list) {
+ 		if (mapping->id == map->id) {
+ 			uvc_trace(UVC_TRACE_CONTROL, "Can't add mapping '%s', "
+diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
+index 6f52970f8b54..0c14e995667c 100644
+--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
++++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
+@@ -796,7 +796,8 @@ static int put_v4l2_event32(struct v4l2_event *kp, struct v4l2_event32 __user *u
+ 		copy_to_user(&up->u, &kp->u, sizeof(kp->u)) ||
+ 		put_user(kp->pending, &up->pending) ||
+ 		put_user(kp->sequence, &up->sequence) ||
+-		compat_put_timespec(&kp->timestamp, &up->timestamp) ||
++		put_user(kp->timestamp.tv_sec, &up->timestamp.tv_sec) ||
++		put_user(kp->timestamp.tv_nsec, &up->timestamp.tv_nsec) ||
+ 		put_user(kp->id, &up->id) ||
+ 		copy_to_user(up->reserved, kp->reserved, 8 * sizeof(__u32)))
+ 			return -EFAULT;
+diff --git a/drivers/misc/cxl/api.c b/drivers/misc/cxl/api.c
+index 1a138c83f877..a0c44d16bf30 100644
+--- a/drivers/misc/cxl/api.c
++++ b/drivers/misc/cxl/api.c
+@@ -336,6 +336,10 @@ int cxl_start_context(struct cxl_context *ctx, u64 wed,
+ 			mmput(ctx->mm);
+ 	}
+ 
++	/*
++	 * Increment driver use count. Enables global TLBIs for hash
++	 * and callbacks to handle the segment table
++	 */
+ 	cxl_ctx_get();
+ 
+ 	if ((rc = cxl_ops->attach_process(ctx, kernel, wed, 0))) {
+diff --git a/drivers/misc/cxl/file.c b/drivers/misc/cxl/file.c
+index 0761271d68c5..4bfad9f6dc9f 100644
+--- a/drivers/misc/cxl/file.c
++++ b/drivers/misc/cxl/file.c
+@@ -95,7 +95,6 @@ static int __afu_open(struct inode *inode, struct file *file, bool master)
+ 
+ 	pr_devel("afu_open pe: %i\n", ctx->pe);
+ 	file->private_data = ctx;
+-	cxl_ctx_get();
+ 
+ 	/* indicate success */
+ 	rc = 0;
+@@ -225,6 +224,12 @@ static long afu_ioctl_start_work(struct cxl_context *ctx,
+ 	if (ctx->mm)
+ 		mmput(ctx->mm);
+ 
++	/*
++	 * Increment driver use count. Enables global TLBIs for hash
++	 * and callbacks to handle the segment table
++	 */
++	cxl_ctx_get();
++
+ 	trace_cxl_attach(ctx, work.work_element_descriptor, work.num_interrupts, amr);
+ 
+ 	if ((rc = cxl_ops->attach_process(ctx, false, work.work_element_descriptor,
+@@ -233,6 +238,7 @@ static long afu_ioctl_start_work(struct cxl_context *ctx,
+ 		cxl_adapter_context_put(ctx->afu->adapter);
+ 		put_pid(ctx->pid);
+ 		ctx->pid = NULL;
++		cxl_ctx_put();
+ 		cxl_context_mm_count_put(ctx);
+ 		goto out;
+ 	}
+diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c
+index 517a315e259b..35bd50bcbbd5 100644
+--- a/drivers/net/wireless/ath/wcn36xx/main.c
++++ b/drivers/net/wireless/ath/wcn36xx/main.c
+@@ -372,6 +372,8 @@ static int wcn36xx_config(struct ieee80211_hw *hw, u32 changed)
+ 
+ 	wcn36xx_dbg(WCN36XX_DBG_MAC, "mac config changed 0x%08x\n", changed);
+ 
++	mutex_lock(&wcn->conf_mutex);
++
+ 	if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
+ 		int ch = WCN36XX_HW_CHANNEL(wcn);
+ 		wcn36xx_dbg(WCN36XX_DBG_MAC, "wcn36xx_config channel switch=%d\n",
+@@ -382,6 +384,8 @@ static int wcn36xx_config(struct ieee80211_hw *hw, u32 changed)
+ 		}
+ 	}
+ 
++	mutex_unlock(&wcn->conf_mutex);
++
+ 	return 0;
+ }
+ 
+@@ -396,6 +400,8 @@ static void wcn36xx_configure_filter(struct ieee80211_hw *hw,
+ 
+ 	wcn36xx_dbg(WCN36XX_DBG_MAC, "mac configure filter\n");
+ 
++	mutex_lock(&wcn->conf_mutex);
++
+ 	*total &= FIF_ALLMULTI;
+ 
+ 	fp = (void *)(unsigned long)multicast;
+@@ -408,6 +414,8 @@ static void wcn36xx_configure_filter(struct ieee80211_hw *hw,
+ 		else if (NL80211_IFTYPE_STATION == vif->type && tmp->sta_assoc)
+ 			wcn36xx_smd_set_mc_list(wcn, vif, fp);
+ 	}
++
++	mutex_unlock(&wcn->conf_mutex);
+ 	kfree(fp);
+ }
+ 
+@@ -471,6 +479,8 @@ static int wcn36xx_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ 			 key_conf->key,
+ 			 key_conf->keylen);
+ 
++	mutex_lock(&wcn->conf_mutex);
++
+ 	switch (key_conf->cipher) {
+ 	case WLAN_CIPHER_SUITE_WEP40:
+ 		vif_priv->encrypt_type = WCN36XX_HAL_ED_WEP40;
+@@ -565,6 +575,8 @@ static int wcn36xx_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ 	}
+ 
+ out:
++	mutex_unlock(&wcn->conf_mutex);
++
+ 	return ret;
+ }
+ 
+@@ -725,6 +737,8 @@ static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw,
+ 	wcn36xx_dbg(WCN36XX_DBG_MAC, "mac bss info changed vif %p changed 0x%08x\n",
+ 		    vif, changed);
+ 
++	mutex_lock(&wcn->conf_mutex);
++
+ 	if (changed & BSS_CHANGED_BEACON_INFO) {
+ 		wcn36xx_dbg(WCN36XX_DBG_MAC,
+ 			    "mac bss changed dtim period %d\n",
+@@ -787,7 +801,13 @@ static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw,
+ 				     bss_conf->aid);
+ 
+ 			vif_priv->sta_assoc = true;
+-			rcu_read_lock();
++
++			/*
++			 * Holding conf_mutex ensures mutal exclusion with
++			 * wcn36xx_sta_remove() and as such ensures that sta
++			 * won't be freed while we're operating on it. As such
++			 * we do not need to hold the rcu_read_lock().
++			 */
+ 			sta = ieee80211_find_sta(vif, bss_conf->bssid);
+ 			if (!sta) {
+ 				wcn36xx_err("sta %pM is not found\n",
+@@ -811,7 +831,6 @@ static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw,
+ 			 * place where AID is available.
+ 			 */
+ 			wcn36xx_smd_config_sta(wcn, vif, sta);
+-			rcu_read_unlock();
+ 		} else {
+ 			wcn36xx_dbg(WCN36XX_DBG_MAC,
+ 				    "disassociated bss %pM vif %pM AID=%d\n",
+@@ -873,6 +892,9 @@ static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw,
+ 		}
+ 	}
+ out:
++
++	mutex_unlock(&wcn->conf_mutex);
++
+ 	return;
+ }
+ 
+@@ -882,7 +904,10 @@ static int wcn36xx_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
+ 	struct wcn36xx *wcn = hw->priv;
+ 	wcn36xx_dbg(WCN36XX_DBG_MAC, "mac set RTS threshold %d\n", value);
+ 
++	mutex_lock(&wcn->conf_mutex);
+ 	wcn36xx_smd_update_cfg(wcn, WCN36XX_HAL_CFG_RTS_THRESHOLD, value);
++	mutex_unlock(&wcn->conf_mutex);
++
+ 	return 0;
+ }
+ 
+@@ -893,8 +918,12 @@ static void wcn36xx_remove_interface(struct ieee80211_hw *hw,
+ 	struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
+ 	wcn36xx_dbg(WCN36XX_DBG_MAC, "mac remove interface vif %p\n", vif);
+ 
++	mutex_lock(&wcn->conf_mutex);
++
+ 	list_del(&vif_priv->list);
+ 	wcn36xx_smd_delete_sta_self(wcn, vif->addr);
++
++	mutex_unlock(&wcn->conf_mutex);
+ }
+ 
+ static int wcn36xx_add_interface(struct ieee80211_hw *hw,
+@@ -915,9 +944,13 @@ static int wcn36xx_add_interface(struct ieee80211_hw *hw,
+ 		return -EOPNOTSUPP;
+ 	}
+ 
++	mutex_lock(&wcn->conf_mutex);
++
+ 	list_add(&vif_priv->list, &wcn->vif_list);
+ 	wcn36xx_smd_add_sta_self(wcn, vif);
+ 
++	mutex_unlock(&wcn->conf_mutex);
++
+ 	return 0;
+ }
+ 
+@@ -930,6 +963,8 @@ static int wcn36xx_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ 	wcn36xx_dbg(WCN36XX_DBG_MAC, "mac sta add vif %p sta %pM\n",
+ 		    vif, sta->addr);
+ 
++	mutex_lock(&wcn->conf_mutex);
++
+ 	spin_lock_init(&sta_priv->ampdu_lock);
+ 	sta_priv->vif = vif_priv;
+ 	/*
+@@ -941,6 +976,9 @@ static int wcn36xx_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ 		sta_priv->aid = sta->aid;
+ 		wcn36xx_smd_config_sta(wcn, vif, sta);
+ 	}
++
++	mutex_unlock(&wcn->conf_mutex);
++
+ 	return 0;
+ }
+ 
+@@ -954,8 +992,13 @@ static int wcn36xx_sta_remove(struct ieee80211_hw *hw,
+ 	wcn36xx_dbg(WCN36XX_DBG_MAC, "mac sta remove vif %p sta %pM index %d\n",
+ 		    vif, sta->addr, sta_priv->sta_index);
+ 
++	mutex_lock(&wcn->conf_mutex);
++
+ 	wcn36xx_smd_delete_sta(wcn, sta_priv->sta_index);
+ 	sta_priv->vif = NULL;
++
++	mutex_unlock(&wcn->conf_mutex);
++
+ 	return 0;
+ }
+ 
+@@ -999,6 +1042,8 @@ static int wcn36xx_ampdu_action(struct ieee80211_hw *hw,
+ 	wcn36xx_dbg(WCN36XX_DBG_MAC, "mac ampdu action action %d tid %d\n",
+ 		    action, tid);
+ 
++	mutex_lock(&wcn->conf_mutex);
++
+ 	switch (action) {
+ 	case IEEE80211_AMPDU_RX_START:
+ 		sta_priv->tid = tid;
+@@ -1038,6 +1083,8 @@ static int wcn36xx_ampdu_action(struct ieee80211_hw *hw,
+ 		wcn36xx_err("Unknown AMPDU action\n");
+ 	}
+ 
++	mutex_unlock(&wcn->conf_mutex);
++
+ 	return 0;
+ }
+ 
+@@ -1216,6 +1263,7 @@ static int wcn36xx_probe(struct platform_device *pdev)
+ 	wcn = hw->priv;
+ 	wcn->hw = hw;
+ 	wcn->dev = &pdev->dev;
++	mutex_init(&wcn->conf_mutex);
+ 	mutex_init(&wcn->hal_mutex);
+ 	mutex_init(&wcn->scan_lock);
+ 
+diff --git a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
+index b52b4da9a967..6aefba4c0cda 100644
+--- a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
++++ b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
+@@ -202,6 +202,9 @@ struct wcn36xx {
+ 	struct qcom_smem_state	*tx_rings_empty_state;
+ 	unsigned		tx_rings_empty_state_bit;
+ 
++	/* prevents concurrent FW reconfiguration */
++	struct mutex		conf_mutex;
++
+ 	/*
+ 	 * smd_buf must be protected with smd_mutex to garantee
+ 	 * that all messages are sent one after another
+diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
+index 3ee6767392b6..d25bad052d78 100644
+--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
++++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
+@@ -79,6 +79,7 @@
+ /* NVM offsets (in words) definitions */
+ enum wkp_nvm_offsets {
+ 	/* NVM HW-Section offset (in words) definitions */
++	SUBSYSTEM_ID = 0x0A,
+ 	HW_ADDR = 0x15,
+ 
+ 	/* NVM SW-Section offset (in words) definitions */
+@@ -254,13 +255,12 @@ static u32 iwl_get_channel_flags(u8 ch_num, int ch_idx, bool is_5ghz,
+ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
+ 				struct iwl_nvm_data *data,
+ 				const __le16 * const nvm_ch_flags,
+-				bool lar_supported)
++				bool lar_supported, bool no_wide_in_5ghz)
+ {
+ 	int ch_idx;
+ 	int n_channels = 0;
+ 	struct ieee80211_channel *channel;
+ 	u16 ch_flags;
+-	bool is_5ghz;
+ 	int num_of_ch, num_2ghz_channels;
+ 	const u8 *nvm_chan;
+ 
+@@ -275,12 +275,20 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
+ 	}
+ 
+ 	for (ch_idx = 0; ch_idx < num_of_ch; ch_idx++) {
++		bool is_5ghz = (ch_idx >= num_2ghz_channels);
++
+ 		ch_flags = __le16_to_cpup(nvm_ch_flags + ch_idx);
+ 
+-		if (ch_idx >= num_2ghz_channels &&
+-		    !data->sku_cap_band_52GHz_enable)
++		if (is_5ghz && !data->sku_cap_band_52GHz_enable)
+ 			continue;
+ 
++		/* workaround to disable wide channels in 5GHz */
++		if (no_wide_in_5ghz && is_5ghz) {
++			ch_flags &= ~(NVM_CHANNEL_40MHZ |
++				     NVM_CHANNEL_80MHZ |
++				     NVM_CHANNEL_160MHZ);
++		}
++
+ 		if (ch_flags & NVM_CHANNEL_160MHZ)
+ 			data->vht160_supported = true;
+ 
+@@ -303,8 +311,8 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
+ 		n_channels++;
+ 
+ 		channel->hw_value = nvm_chan[ch_idx];
+-		channel->band = (ch_idx < num_2ghz_channels) ?
+-				NL80211_BAND_2GHZ : NL80211_BAND_5GHZ;
++		channel->band = is_5ghz ?
++				NL80211_BAND_5GHZ : NL80211_BAND_2GHZ;
+ 		channel->center_freq =
+ 			ieee80211_channel_to_frequency(
+ 				channel->hw_value, channel->band);
+@@ -316,7 +324,6 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
+ 		 * is not used in mvm, and is used for backwards compatibility
+ 		 */
+ 		channel->max_power = IWL_DEFAULT_MAX_TX_POWER;
+-		is_5ghz = channel->band == NL80211_BAND_5GHZ;
+ 
+ 		/* don't put limitations in case we're using LAR */
+ 		if (!lar_supported)
+@@ -432,14 +439,15 @@ static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg,
+ 
+ void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
+ 		     struct iwl_nvm_data *data, const __le16 *nvm_ch_flags,
+-		     u8 tx_chains, u8 rx_chains, bool lar_supported)
++		     u8 tx_chains, u8 rx_chains, bool lar_supported,
++		     bool no_wide_in_5ghz)
+ {
+ 	int n_channels;
+ 	int n_used = 0;
+ 	struct ieee80211_supported_band *sband;
+ 
+ 	n_channels = iwl_init_channel_map(dev, cfg, data, nvm_ch_flags,
+-					  lar_supported);
++					  lar_supported, no_wide_in_5ghz);
+ 	sband = &data->bands[NL80211_BAND_2GHZ];
+ 	sband->band = NL80211_BAND_2GHZ;
+ 	sband->bitrates = &iwl_cfg80211_rates[RATES_24_OFFS];
+@@ -645,6 +653,39 @@ static int iwl_set_hw_address(struct iwl_trans *trans,
+ 	return 0;
+ }
+ 
++static bool
++iwl_nvm_no_wide_in_5ghz(struct device *dev, const struct iwl_cfg *cfg,
++			const __le16 *nvm_hw)
++{
++	/*
++	 * Workaround a bug in Indonesia SKUs where the regulatory in
++	 * some 7000-family OTPs erroneously allow wide channels in
++	 * 5GHz.  To check for Indonesia, we take the SKU value from
++	 * bits 1-4 in the subsystem ID and check if it is either 5 or
++	 * 9.  In those cases, we need to force-disable wide channels
++	 * in 5GHz otherwise the FW will throw a sysassert when we try
++	 * to use them.
++	 */
++	if (cfg->device_family == IWL_DEVICE_FAMILY_7000) {
++		/*
++		 * Unlike the other sections in the NVM, the hw
++		 * section uses big-endian.
++		 */
++		u16 subsystem_id = be16_to_cpup((const __be16 *)nvm_hw
++						+ SUBSYSTEM_ID);
++		u8 sku = (subsystem_id & 0x1e) >> 1;
++
++		if (sku == 5 || sku == 9) {
++			IWL_DEBUG_EEPROM(dev,
++					 "disabling wide channels in 5GHz (0x%0x %d)\n",
++					 subsystem_id, sku);
++			return true;
++		}
++	}
++
++	return false;
++}
++
+ struct iwl_nvm_data *
+ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
+ 		   const __le16 *nvm_hw, const __le16 *nvm_sw,
+@@ -655,6 +696,7 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
+ 	struct device *dev = trans->dev;
+ 	struct iwl_nvm_data *data;
+ 	bool lar_enabled;
++	bool no_wide_in_5ghz = iwl_nvm_no_wide_in_5ghz(dev, cfg, nvm_hw);
+ 	u32 sku, radio_cfg;
+ 	u16 lar_config;
+ 	const __le16 *ch_section;
+@@ -725,7 +767,7 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
+ 	}
+ 
+ 	iwl_init_sbands(dev, cfg, data, ch_section, tx_chains, rx_chains,
+-			lar_fw_supported && lar_enabled);
++			lar_fw_supported && lar_enabled, no_wide_in_5ghz);
+ 	data->calib_version = 255;
+ 
+ 	return data;
+diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h
+index 3fd6506a02ab..50d9b3eaa4f8 100644
+--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h
++++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h
+@@ -93,7 +93,8 @@ void iwl_set_hw_address_from_csr(struct iwl_trans *trans,
+  */
+ void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
+ 		     struct iwl_nvm_data *data, const __le16 *nvm_ch_flags,
+-		     u8 tx_chains, u8 rx_chains, bool lar_supported);
++		     u8 tx_chains, u8 rx_chains, bool lar_supported,
++		     bool no_wide_in_5ghz);
+ 
+ /**
+  * iwl_parse_mcc_info - parse MCC (mobile country code) info coming from FW
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
+index dac7e542a190..4de565cec747 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
+@@ -628,7 +628,8 @@ int iwl_mvm_nvm_get_from_fw(struct iwl_mvm *mvm)
+ 			rsp->regulatory.channel_profile,
+ 			mvm->nvm_data->valid_tx_ant & mvm->fw->valid_tx_ant,
+ 			mvm->nvm_data->valid_rx_ant & mvm->fw->valid_rx_ant,
+-			rsp->regulatory.lar_enabled && lar_fw_supported);
++			rsp->regulatory.lar_enabled && lar_fw_supported,
++			false);
+ 
+ 	iwl_free_resp(&hcmd);
+ 	return 0;
+diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
+index 026830a138ae..e5d5ce9e3010 100644
+--- a/drivers/pci/hotplug/pciehp_hpc.c
++++ b/drivers/pci/hotplug/pciehp_hpc.c
+@@ -586,6 +586,14 @@ static irqreturn_t pciehp_isr(int irq, void *dev_id)
+ 	events = status & (PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD |
+ 			   PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_CC |
+ 			   PCI_EXP_SLTSTA_DLLSC);
++
++	/*
++	 * If we've already reported a power fault, don't report it again
++	 * until we've done something to handle it.
++	 */
++	if (ctrl->power_fault_detected)
++		events &= ~PCI_EXP_SLTSTA_PFD;
++
+ 	if (!events)
+ 		return IRQ_NONE;
+ 
+diff --git a/drivers/pci/hotplug/shpchp_hpc.c b/drivers/pci/hotplug/shpchp_hpc.c
+index de0ea474fb73..e5824c7b7b6b 100644
+--- a/drivers/pci/hotplug/shpchp_hpc.c
++++ b/drivers/pci/hotplug/shpchp_hpc.c
+@@ -1062,6 +1062,8 @@ int shpc_init(struct controller *ctrl, struct pci_dev *pdev)
+ 		if (rc) {
+ 			ctrl_info(ctrl, "Can't get msi for the hotplug controller\n");
+ 			ctrl_info(ctrl, "Use INTx for the hotplug controller\n");
++		} else {
++			pci_set_master(pdev);
+ 		}
+ 
+ 		rc = request_irq(ctrl->pci_dev->irq, shpc_isr, IRQF_SHARED,
+diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
+index e6779d4352a2..7c30fd986560 100644
+--- a/drivers/pinctrl/pinctrl-amd.c
++++ b/drivers/pinctrl/pinctrl-amd.c
+@@ -36,6 +36,7 @@
+ #include <linux/pinctrl/pinconf.h>
+ #include <linux/pinctrl/pinconf-generic.h>
+ 
++#include "core.h"
+ #include "pinctrl-utils.h"
+ #include "pinctrl-amd.h"
+ 
+@@ -725,6 +726,69 @@ static const struct pinconf_ops amd_pinconf_ops = {
+ 	.pin_config_group_set = amd_pinconf_group_set,
+ };
+ 
++#ifdef CONFIG_PM_SLEEP
++static bool amd_gpio_should_save(struct amd_gpio *gpio_dev, unsigned int pin)
++{
++	const struct pin_desc *pd = pin_desc_get(gpio_dev->pctrl, pin);
++
++	if (!pd)
++		return false;
++
++	/*
++	 * Only restore the pin if it is actually in use by the kernel (or
++	 * by userspace).
++	 */
++	if (pd->mux_owner || pd->gpio_owner ||
++	    gpiochip_line_is_irq(&gpio_dev->gc, pin))
++		return true;
++
++	return false;
++}
++
++int amd_gpio_suspend(struct device *dev)
++{
++	struct platform_device *pdev = to_platform_device(dev);
++	struct amd_gpio *gpio_dev = platform_get_drvdata(pdev);
++	struct pinctrl_desc *desc = gpio_dev->pctrl->desc;
++	int i;
++
++	for (i = 0; i < desc->npins; i++) {
++		int pin = desc->pins[i].number;
++
++		if (!amd_gpio_should_save(gpio_dev, pin))
++			continue;
++
++		gpio_dev->saved_regs[i] = readl(gpio_dev->base + pin*4);
++	}
++
++	return 0;
++}
++
++int amd_gpio_resume(struct device *dev)
++{
++	struct platform_device *pdev = to_platform_device(dev);
++	struct amd_gpio *gpio_dev = platform_get_drvdata(pdev);
++	struct pinctrl_desc *desc = gpio_dev->pctrl->desc;
++	int i;
++
++	for (i = 0; i < desc->npins; i++) {
++		int pin = desc->pins[i].number;
++
++		if (!amd_gpio_should_save(gpio_dev, pin))
++			continue;
++
++		writel(gpio_dev->saved_regs[i], gpio_dev->base + pin*4);
++	}
++
++	return 0;
++}
++
++static const struct dev_pm_ops amd_gpio_pm_ops = {
++	SET_LATE_SYSTEM_SLEEP_PM_OPS(amd_gpio_suspend,
++				     amd_gpio_resume)
++};
++#endif
++
+ static struct pinctrl_desc amd_pinctrl_desc = {
+ 	.pins	= kerncz_pins,
+ 	.npins = ARRAY_SIZE(kerncz_pins),
+@@ -764,6 +828,14 @@ static int amd_gpio_probe(struct platform_device *pdev)
+ 		return -EINVAL;
+ 	}
+ 
++#ifdef CONFIG_PM_SLEEP
++	gpio_dev->saved_regs = devm_kcalloc(&pdev->dev, amd_pinctrl_desc.npins,
++					    sizeof(*gpio_dev->saved_regs),
++					    GFP_KERNEL);
++	if (!gpio_dev->saved_regs)
++		return -ENOMEM;
++#endif
++
+ 	gpio_dev->pdev = pdev;
+ 	gpio_dev->gc.direction_input	= amd_gpio_direction_input;
+ 	gpio_dev->gc.direction_output	= amd_gpio_direction_output;
+@@ -853,6 +925,9 @@ static struct platform_driver amd_gpio_driver = {
+ 	.driver		= {
+ 		.name	= "amd_gpio",
+ 		.acpi_match_table = ACPI_PTR(amd_gpio_acpi_match),
++#ifdef CONFIG_PM_SLEEP
++		.pm	= &amd_gpio_pm_ops,
++#endif
+ 	},
+ 	.probe		= amd_gpio_probe,
+ 	.remove		= amd_gpio_remove,
+diff --git a/drivers/pinctrl/pinctrl-amd.h b/drivers/pinctrl/pinctrl-amd.h
+index 5b1cb965c767..8fa453a59da5 100644
+--- a/drivers/pinctrl/pinctrl-amd.h
++++ b/drivers/pinctrl/pinctrl-amd.h
+@@ -97,6 +97,7 @@ struct amd_gpio {
+ 	unsigned int            hwbank_num;
+ 	struct resource         *res;
+ 	struct platform_device  *pdev;
++	u32			*saved_regs;
+ };
+ 
+ /*  KERNCZ configuration*/
+diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.c b/drivers/pinctrl/samsung/pinctrl-exynos.c
+index 731530a9ce38..9ab8faf528a6 100644
+--- a/drivers/pinctrl/samsung/pinctrl-exynos.c
++++ b/drivers/pinctrl/samsung/pinctrl-exynos.c
+@@ -174,10 +174,10 @@ static int exynos_irq_request_resources(struct irq_data *irqd)
+ 
+ 	spin_lock_irqsave(&bank->slock, flags);
+ 
+-	con = readl(bank->eint_base + reg_con);
++	con = readl(bank->pctl_base + reg_con);
+ 	con &= ~(mask << shift);
+ 	con |= EXYNOS_EINT_FUNC << shift;
+-	writel(con, bank->eint_base + reg_con);
++	writel(con, bank->pctl_base + reg_con);
+ 
+ 	spin_unlock_irqrestore(&bank->slock, flags);
+ 
+@@ -202,10 +202,10 @@ static void exynos_irq_release_resources(struct irq_data *irqd)
+ 
+ 	spin_lock_irqsave(&bank->slock, flags);
+ 
+-	con = readl(bank->eint_base + reg_con);
++	con = readl(bank->pctl_base + reg_con);
+ 	con &= ~(mask << shift);
+ 	con |= FUNC_INPUT << shift;
+-	writel(con, bank->eint_base + reg_con);
++	writel(con, bank->pctl_base + reg_con);
+ 
+ 	spin_unlock_irqrestore(&bank->slock, flags);
+ 
+diff --git a/drivers/pinctrl/samsung/pinctrl-s3c24xx.c b/drivers/pinctrl/samsung/pinctrl-s3c24xx.c
+index 49774851e84a..edf27264b603 100644
+--- a/drivers/pinctrl/samsung/pinctrl-s3c24xx.c
++++ b/drivers/pinctrl/samsung/pinctrl-s3c24xx.c
+@@ -151,7 +151,7 @@ static void s3c24xx_eint_set_function(struct samsung_pinctrl_drv_data *d,
+ 	u32 val;
+ 
+ 	/* Make sure that pin is configured as interrupt */
+-	reg = bank->pctl_base + bank->pctl_offset;
++	reg = d->virt_base + bank->pctl_offset;
+ 	shift = pin * bank_type->fld_width[PINCFG_TYPE_FUNC];
+ 	mask = (1 << bank_type->fld_width[PINCFG_TYPE_FUNC]) - 1;
+ 
+@@ -184,7 +184,7 @@ static int s3c24xx_eint_type(struct irq_data *data, unsigned int type)
+ 	s3c24xx_eint_set_handler(data, type);
+ 
+ 	/* Set up interrupt trigger */
+-	reg = bank->eint_base + EINT_REG(index);
++	reg = d->virt_base + EINT_REG(index);
+ 	shift = EINT_OFFS(index);
+ 
+ 	val = readl(reg);
+@@ -259,29 +259,32 @@ static void s3c2410_demux_eint0_3(struct irq_desc *desc)
+ static void s3c2412_eint0_3_ack(struct irq_data *data)
+ {
+ 	struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(data);
++	struct samsung_pinctrl_drv_data *d = bank->drvdata;
+ 
+ 	unsigned long bitval = 1UL << data->hwirq;
+-	writel(bitval, bank->eint_base + EINTPEND_REG);
++	writel(bitval, d->virt_base + EINTPEND_REG);
+ }
+ 
+ static void s3c2412_eint0_3_mask(struct irq_data *data)
+ {
+ 	struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(data);
++	struct samsung_pinctrl_drv_data *d = bank->drvdata;
+ 	unsigned long mask;
+ 
+-	mask = readl(bank->eint_base + EINTMASK_REG);
++	mask = readl(d->virt_base + EINTMASK_REG);
+ 	mask |= (1UL << data->hwirq);
+-	writel(mask, bank->eint_base + EINTMASK_REG);
++	writel(mask, d->virt_base + EINTMASK_REG);
+ }
+ 
+ static void s3c2412_eint0_3_unmask(struct irq_data *data)
+ {
+ 	struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(data);
++	struct samsung_pinctrl_drv_data *d = bank->drvdata;
+ 	unsigned long mask;
+ 
+-	mask = readl(bank->eint_base + EINTMASK_REG);
++	mask = readl(d->virt_base + EINTMASK_REG);
+ 	mask &= ~(1UL << data->hwirq);
+-	writel(mask, bank->eint_base + EINTMASK_REG);
++	writel(mask, d->virt_base + EINTMASK_REG);
+ }
+ 
+ static struct irq_chip s3c2412_eint0_3_chip = {
+@@ -316,31 +319,34 @@ static void s3c2412_demux_eint0_3(struct irq_desc *desc)
+ static void s3c24xx_eint_ack(struct irq_data *data)
+ {
+ 	struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(data);
++	struct samsung_pinctrl_drv_data *d = bank->drvdata;
+ 	unsigned char index = bank->eint_offset + data->hwirq;
+ 
+-	writel(1UL << index, bank->eint_base + EINTPEND_REG);
++	writel(1UL << index, d->virt_base + EINTPEND_REG);
+ }
+ 
+ static void s3c24xx_eint_mask(struct irq_data *data)
+ {
+ 	struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(data);
++	struct samsung_pinctrl_drv_data *d = bank->drvdata;
+ 	unsigned char index = bank->eint_offset + data->hwirq;
+ 	unsigned long mask;
+ 
+-	mask = readl(bank->eint_base + EINTMASK_REG);
++	mask = readl(d->virt_base + EINTMASK_REG);
+ 	mask |= (1UL << index);
+-	writel(mask, bank->eint_base + EINTMASK_REG);
++	writel(mask, d->virt_base + EINTMASK_REG);
+ }
+ 
+ static void s3c24xx_eint_unmask(struct irq_data *data)
+ {
+ 	struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(data);
++	struct samsung_pinctrl_drv_data *d = bank->drvdata;
+ 	unsigned char index = bank->eint_offset + data->hwirq;
+ 	unsigned long mask;
+ 
+-	mask = readl(bank->eint_base + EINTMASK_REG);
++	mask = readl(d->virt_base + EINTMASK_REG);
+ 	mask &= ~(1UL << index);
+-	writel(mask, bank->eint_base + EINTMASK_REG);
++	writel(mask, d->virt_base + EINTMASK_REG);
+ }
+ 
+ static struct irq_chip s3c24xx_eint_chip = {
+@@ -356,14 +362,13 @@ static inline void s3c24xx_demux_eint(struct irq_desc *desc,
+ {
+ 	struct s3c24xx_eint_data *data = irq_desc_get_handler_data(desc);
+ 	struct irq_chip *chip = irq_desc_get_chip(desc);
+-	struct irq_data *irqd = irq_desc_get_irq_data(desc);
+-	struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(irqd);
++	struct samsung_pinctrl_drv_data *d = data->drvdata;
+ 	unsigned int pend, mask;
+ 
+ 	chained_irq_enter(chip, desc);
+ 
+-	pend = readl(bank->eint_base + EINTPEND_REG);
+-	mask = readl(bank->eint_base + EINTMASK_REG);
++	pend = readl(d->virt_base + EINTPEND_REG);
++	mask = readl(d->virt_base + EINTMASK_REG);
+ 
+ 	pend &= ~mask;
+ 	pend &= range;
+diff --git a/drivers/pinctrl/samsung/pinctrl-s3c64xx.c b/drivers/pinctrl/samsung/pinctrl-s3c64xx.c
+index 4a88d7446e87..e63663b32907 100644
+--- a/drivers/pinctrl/samsung/pinctrl-s3c64xx.c
++++ b/drivers/pinctrl/samsung/pinctrl-s3c64xx.c
+@@ -280,7 +280,7 @@ static void s3c64xx_irq_set_function(struct samsung_pinctrl_drv_data *d,
+ 	u32 val;
+ 
+ 	/* Make sure that pin is configured as interrupt */
+-	reg = bank->pctl_base + bank->pctl_offset;
++	reg = d->virt_base + bank->pctl_offset;
+ 	shift = pin;
+ 	if (bank_type->fld_width[PINCFG_TYPE_FUNC] * shift >= 32) {
+ 		/* 4-bit bank type with 2 con regs */
+@@ -308,8 +308,9 @@ static void s3c64xx_irq_set_function(struct samsung_pinctrl_drv_data *d,
+ static inline void s3c64xx_gpio_irq_set_mask(struct irq_data *irqd, bool mask)
+ {
+ 	struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(irqd);
++	struct samsung_pinctrl_drv_data *d = bank->drvdata;
+ 	unsigned char index = EINT_OFFS(bank->eint_offset) + irqd->hwirq;
+-	void __iomem *reg = bank->eint_base + EINTMASK_REG(bank->eint_offset);
++	void __iomem *reg = d->virt_base + EINTMASK_REG(bank->eint_offset);
+ 	u32 val;
+ 
+ 	val = readl(reg);
+@@ -333,8 +334,9 @@ static void s3c64xx_gpio_irq_mask(struct irq_data *irqd)
+ static void s3c64xx_gpio_irq_ack(struct irq_data *irqd)
+ {
+ 	struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(irqd);
++	struct samsung_pinctrl_drv_data *d = bank->drvdata;
+ 	unsigned char index = EINT_OFFS(bank->eint_offset) + irqd->hwirq;
+-	void __iomem *reg = bank->eint_base + EINTPEND_REG(bank->eint_offset);
++	void __iomem *reg = d->virt_base + EINTPEND_REG(bank->eint_offset);
+ 
+ 	writel(1 << index, reg);
+ }
+@@ -357,7 +359,7 @@ static int s3c64xx_gpio_irq_set_type(struct irq_data *irqd, unsigned int type)
+ 	s3c64xx_irq_set_handler(irqd, type);
+ 
+ 	/* Set up interrupt trigger */
+-	reg = bank->eint_base + EINTCON_REG(bank->eint_offset);
++	reg = d->virt_base + EINTCON_REG(bank->eint_offset);
+ 	shift = EINT_OFFS(bank->eint_offset) + irqd->hwirq;
+ 	shift = 4 * (shift / 4); /* 4 EINTs per trigger selector */
+ 
+@@ -409,8 +411,7 @@ static void s3c64xx_eint_gpio_irq(struct irq_desc *desc)
+ {
+ 	struct irq_chip *chip = irq_desc_get_chip(desc);
+ 	struct s3c64xx_eint_gpio_data *data = irq_desc_get_handler_data(desc);
+-	struct irq_data *irqd = irq_desc_get_irq_data(desc);
+-	struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(irqd);
++	struct samsung_pinctrl_drv_data *drvdata = data->drvdata;
+ 
+ 	chained_irq_enter(chip, desc);
+ 
+@@ -420,7 +421,7 @@ static void s3c64xx_eint_gpio_irq(struct irq_desc *desc)
+ 		unsigned int pin;
+ 		unsigned int virq;
+ 
+-		svc = readl(bank->eint_base + SERVICE_REG);
++		svc = readl(drvdata->virt_base + SERVICE_REG);
+ 		group = SVC_GROUP(svc);
+ 		pin = svc & SVC_NUM_MASK;
+ 
+@@ -515,15 +516,15 @@ static inline void s3c64xx_eint0_irq_set_mask(struct irq_data *irqd, bool mask)
+ {
+ 	struct s3c64xx_eint0_domain_data *ddata =
+ 					irq_data_get_irq_chip_data(irqd);
+-	struct samsung_pin_bank *bank = ddata->bank;
++	struct samsung_pinctrl_drv_data *d = ddata->bank->drvdata;
+ 	u32 val;
+ 
+-	val = readl(bank->eint_base + EINT0MASK_REG);
++	val = readl(d->virt_base + EINT0MASK_REG);
+ 	if (mask)
+ 		val |= 1 << ddata->eints[irqd->hwirq];
+ 	else
+ 		val &= ~(1 << ddata->eints[irqd->hwirq]);
+-	writel(val, bank->eint_base + EINT0MASK_REG);
++	writel(val, d->virt_base + EINT0MASK_REG);
+ }
+ 
+ static void s3c64xx_eint0_irq_unmask(struct irq_data *irqd)
+@@ -540,10 +541,10 @@ static void s3c64xx_eint0_irq_ack(struct irq_data *irqd)
+ {
+ 	struct s3c64xx_eint0_domain_data *ddata =
+ 					irq_data_get_irq_chip_data(irqd);
+-	struct samsung_pin_bank *bank = ddata->bank;
++	struct samsung_pinctrl_drv_data *d = ddata->bank->drvdata;
+ 
+ 	writel(1 << ddata->eints[irqd->hwirq],
+-					bank->eint_base + EINT0PEND_REG);
++					d->virt_base + EINT0PEND_REG);
+ }
+ 
+ static int s3c64xx_eint0_irq_set_type(struct irq_data *irqd, unsigned int type)
+@@ -551,7 +552,7 @@ static int s3c64xx_eint0_irq_set_type(struct irq_data *irqd, unsigned int type)
+ 	struct s3c64xx_eint0_domain_data *ddata =
+ 					irq_data_get_irq_chip_data(irqd);
+ 	struct samsung_pin_bank *bank = ddata->bank;
+-	struct samsung_pinctrl_drv_data *d = ddata->bank->drvdata;
++	struct samsung_pinctrl_drv_data *d = bank->drvdata;
+ 	void __iomem *reg;
+ 	int trigger;
+ 	u8 shift;
+@@ -566,7 +567,7 @@ static int s3c64xx_eint0_irq_set_type(struct irq_data *irqd, unsigned int type)
+ 	s3c64xx_irq_set_handler(irqd, type);
+ 
+ 	/* Set up interrupt trigger */
+-	reg = bank->eint_base + EINT0CON0_REG;
++	reg = d->virt_base + EINT0CON0_REG;
+ 	shift = ddata->eints[irqd->hwirq];
+ 	if (shift >= EINT_MAX_PER_REG) {
+ 		reg += 4;
+@@ -598,19 +599,14 @@ static struct irq_chip s3c64xx_eint0_irq_chip = {
+ static inline void s3c64xx_irq_demux_eint(struct irq_desc *desc, u32 range)
+ {
+ 	struct irq_chip *chip = irq_desc_get_chip(desc);
+-	struct irq_data *irqd = irq_desc_get_irq_data(desc);
+-	struct s3c64xx_eint0_domain_data *ddata =
+-					irq_data_get_irq_chip_data(irqd);
+-	struct samsung_pin_bank *bank = ddata->bank;
+-
+ 	struct s3c64xx_eint0_data *data = irq_desc_get_handler_data(desc);
+-
++	struct samsung_pinctrl_drv_data *drvdata = data->drvdata;
+ 	unsigned int pend, mask;
+ 
+ 	chained_irq_enter(chip, desc);
+ 
+-	pend = readl(bank->eint_base + EINT0PEND_REG);
+-	mask = readl(bank->eint_base + EINT0MASK_REG);
++	pend = readl(drvdata->virt_base + EINT0PEND_REG);
++	mask = readl(drvdata->virt_base + EINT0MASK_REG);
+ 
+ 	pend = pend & range & ~mask;
+ 	pend &= range;
+diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.c b/drivers/pinctrl/samsung/pinctrl-samsung.c
+index f542642eed8d..61bbd54e35ba 100644
+--- a/drivers/pinctrl/samsung/pinctrl-samsung.c
++++ b/drivers/pinctrl/samsung/pinctrl-samsung.c
+@@ -1013,6 +1013,12 @@ samsung_pinctrl_get_soc_data(struct samsung_pinctrl_drv_data *d,
+ 		bank->eint_base = virt_base[0];
+ 		bank->pctl_base = virt_base[bdata->pctl_res_idx];
+ 	}
++	/*
++	 * Legacy platforms should provide only one resource with IO memory.
++	 * Store it as virt_base because legacy driver needs to access it
++	 * through samsung_pinctrl_drv_data.
++	 */
++	d->virt_base = virt_base[0];
+ 
+ 	for_each_child_of_node(node, np) {
+ 		if (!of_find_property(np, "gpio-controller", NULL))
+diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.h b/drivers/pinctrl/samsung/pinctrl-samsung.h
+index 515a61035e54..61c4cab0ad24 100644
+--- a/drivers/pinctrl/samsung/pinctrl-samsung.h
++++ b/drivers/pinctrl/samsung/pinctrl-samsung.h
+@@ -247,6 +247,10 @@ struct samsung_pin_ctrl {
+ /**
+  * struct samsung_pinctrl_drv_data: wrapper for holding driver data together.
+  * @node: global list node
++ * @virt_base: register base address of the controller; this will be equal
++ *             to each bank samsung_pin_bank->pctl_base and used on legacy
++ *             platforms (like S3C24XX or S3C64XX) which has to access the base
++ *             through samsung_pinctrl_drv_data, not samsung_pin_bank).
+  * @dev: device instance representing the controller.
+  * @irq: interrpt number used by the controller to notify gpio interrupts.
+  * @ctrl: pin controller instance managed by the driver.
+@@ -262,6 +266,7 @@ struct samsung_pin_ctrl {
+  */
+ struct samsung_pinctrl_drv_data {
+ 	struct list_head		node;
++	void __iomem			*virt_base;
+ 	struct device			*dev;
+ 	int				irq;
+ 
+diff --git a/drivers/regulator/cpcap-regulator.c b/drivers/regulator/cpcap-regulator.c
+index cc98aceed1c1..ce1cab320f6f 100644
+--- a/drivers/regulator/cpcap-regulator.c
++++ b/drivers/regulator/cpcap-regulator.c
+@@ -77,6 +77,8 @@
+ #define CPCAP_BIT_VAUDIO_MODE0		BIT(1)
+ #define CPCAP_BIT_V_AUDIO_EN		BIT(0)
+ 
++#define CPCAP_BIT_AUDIO_NORMAL_MODE	0x00
++
+ /*
+  * Off mode configuration bit. Used currently only by SW5 on omap4. There's
+  * the following comment in Motorola Linux kernel tree for it:
+@@ -217,7 +219,7 @@ static unsigned int cpcap_regulator_get_mode(struct regulator_dev *rdev)
+ 
+ 	regmap_read(rdev->regmap, rdev->desc->enable_reg, &value);
+ 
+-	if (!(value & CPCAP_BIT_AUDIO_LOW_PWR))
++	if (value & CPCAP_BIT_AUDIO_LOW_PWR)
+ 		return REGULATOR_MODE_STANDBY;
+ 
+ 	return REGULATOR_MODE_NORMAL;
+@@ -230,10 +232,10 @@ static int cpcap_regulator_set_mode(struct regulator_dev *rdev,
+ 
+ 	switch (mode) {
+ 	case REGULATOR_MODE_NORMAL:
+-		value = CPCAP_BIT_AUDIO_LOW_PWR;
++		value = CPCAP_BIT_AUDIO_NORMAL_MODE;
+ 		break;
+ 	case REGULATOR_MODE_STANDBY:
+-		value = 0;
++		value = CPCAP_BIT_AUDIO_LOW_PWR;
+ 		break;
+ 	default:
+ 		return -EINVAL;
+diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
+index d5bf36ec8a75..34367d172961 100644
+--- a/drivers/s390/scsi/zfcp_dbf.c
++++ b/drivers/s390/scsi/zfcp_dbf.c
+@@ -3,7 +3,7 @@
+  *
+  * Debug traces for zfcp.
+  *
+- * Copyright IBM Corp. 2002, 2016
++ * Copyright IBM Corp. 2002, 2017
+  */
+ 
+ #define KMSG_COMPONENT "zfcp"
+@@ -447,6 +447,7 @@ static u16 zfcp_dbf_san_res_cap_len_if_gpn_ft(char *tag,
+ 	struct fc_ct_hdr *reqh = sg_virt(ct_els->req);
+ 	struct fc_ns_gid_ft *reqn = (struct fc_ns_gid_ft *)(reqh + 1);
+ 	struct scatterlist *resp_entry = ct_els->resp;
++	struct fc_ct_hdr *resph;
+ 	struct fc_gpn_ft_resp *acc;
+ 	int max_entries, x, last = 0;
+ 
+@@ -473,6 +474,13 @@ static u16 zfcp_dbf_san_res_cap_len_if_gpn_ft(char *tag,
+ 		return len; /* not GPN_FT response so do not cap */
+ 
+ 	acc = sg_virt(resp_entry);
++
++	/* cap all but accept CT responses to at least the CT header */
++	resph = (struct fc_ct_hdr *)acc;
++	if ((ct_els->status) ||
++	    (resph->ct_cmd != cpu_to_be16(FC_FS_ACC)))
++		return max(FC_CT_HDR_LEN, ZFCP_DBF_SAN_MAX_PAYLOAD);
++
+ 	max_entries = (reqh->ct_mr_size * 4 / sizeof(struct fc_gpn_ft_resp))
+ 		+ 1 /* zfcp_fc_scan_ports: bytes correct, entries off-by-one
+ 		     * to account for header as 1st pseudo "entry" */;
+@@ -555,8 +563,8 @@ void zfcp_dbf_scsi(char *tag, int level, struct scsi_cmnd *sc,
+ 	rec->scsi_retries = sc->retries;
+ 	rec->scsi_allowed = sc->allowed;
+ 	rec->scsi_id = sc->device->id;
+-	/* struct zfcp_dbf_scsi needs to be updated to handle 64bit LUNs */
+ 	rec->scsi_lun = (u32)sc->device->lun;
++	rec->scsi_lun_64_hi = (u32)(sc->device->lun >> 32);
+ 	rec->host_scribble = (unsigned long)sc->host_scribble;
+ 
+ 	memcpy(rec->scsi_opcode, sc->cmnd,
+@@ -564,19 +572,32 @@ void zfcp_dbf_scsi(char *tag, int level, struct scsi_cmnd *sc,
+ 
+ 	if (fsf) {
+ 		rec->fsf_req_id = fsf->req_id;
++		rec->pl_len = FCP_RESP_WITH_EXT;
+ 		fcp_rsp = (struct fcp_resp_with_ext *)
+ 				&(fsf->qtcb->bottom.io.fcp_rsp);
++		/* mandatory parts of FCP_RSP IU in this SCSI record */
+ 		memcpy(&rec->fcp_rsp, fcp_rsp, FCP_RESP_WITH_EXT);
+ 		if (fcp_rsp->resp.fr_flags & FCP_RSP_LEN_VAL) {
+ 			fcp_rsp_info = (struct fcp_resp_rsp_info *) &fcp_rsp[1];
+ 			rec->fcp_rsp_info = fcp_rsp_info->rsp_code;
++			rec->pl_len += be32_to_cpu(fcp_rsp->ext.fr_rsp_len);
+ 		}
+ 		if (fcp_rsp->resp.fr_flags & FCP_SNS_LEN_VAL) {
+-			rec->pl_len = min((u16)SCSI_SENSE_BUFFERSIZE,
+-					  (u16)ZFCP_DBF_PAY_MAX_REC);
+-			zfcp_dbf_pl_write(dbf, sc->sense_buffer, rec->pl_len,
+-					  "fcp_sns", fsf->req_id);
++			rec->pl_len += be32_to_cpu(fcp_rsp->ext.fr_sns_len);
+ 		}
++		/* complete FCP_RSP IU in associated PAYload record
++		 * but only if there are optional parts
++		 */
++		if (fcp_rsp->resp.fr_flags != 0)
++			zfcp_dbf_pl_write(
++				dbf, fcp_rsp,
++				/* at least one full PAY record
++				 * but not beyond hardware response field
++				 */
++				min_t(u16, max_t(u16, rec->pl_len,
++						 ZFCP_DBF_PAY_MAX_REC),
++				      FSF_FCP_RSP_SIZE),
++				"fcp_riu", fsf->req_id);
+ 	}
+ 
+ 	debug_event(dbf->scsi, level, rec, sizeof(*rec));
+diff --git a/drivers/s390/scsi/zfcp_dbf.h b/drivers/s390/scsi/zfcp_dbf.h
+index db186d44cfaf..b60667c145fd 100644
+--- a/drivers/s390/scsi/zfcp_dbf.h
++++ b/drivers/s390/scsi/zfcp_dbf.h
+@@ -2,7 +2,7 @@
+  * zfcp device driver
+  * debug feature declarations
+  *
+- * Copyright IBM Corp. 2008, 2016
++ * Copyright IBM Corp. 2008, 2017
+  */
+ 
+ #ifndef ZFCP_DBF_H
+@@ -204,7 +204,7 @@ enum zfcp_dbf_scsi_id {
+  * @id: unique number of recovery record type
+  * @tag: identifier string specifying the location of initiation
+  * @scsi_id: scsi device id
+- * @scsi_lun: scsi device logical unit number
++ * @scsi_lun: scsi device logical unit number, low part of 64 bit, old 32 bit
+  * @scsi_result: scsi result
+  * @scsi_retries: current retry number of scsi request
+  * @scsi_allowed: allowed retries
+@@ -214,6 +214,7 @@ enum zfcp_dbf_scsi_id {
+  * @host_scribble: LLD specific data attached to SCSI request
+  * @pl_len: length of paload stored as zfcp_dbf_pay
+  * @fsf_rsp: response for fsf request
++ * @scsi_lun_64_hi: scsi device logical unit number, high part of 64 bit
+  */
+ struct zfcp_dbf_scsi {
+ 	u8 id;
+@@ -230,6 +231,7 @@ struct zfcp_dbf_scsi {
+ 	u64 host_scribble;
+ 	u16 pl_len;
+ 	struct fcp_resp_with_ext fcp_rsp;
++	u32 scsi_lun_64_hi;
+ } __packed;
+ 
+ /**
+@@ -323,7 +325,11 @@ void zfcp_dbf_hba_fsf_response(struct zfcp_fsf_req *req)
+ {
+ 	struct fsf_qtcb *qtcb = req->qtcb;
+ 
+-	if ((qtcb->prefix.prot_status != FSF_PROT_GOOD) &&
++	if (unlikely(req->status & (ZFCP_STATUS_FSFREQ_DISMISSED |
++				    ZFCP_STATUS_FSFREQ_ERROR))) {
++		zfcp_dbf_hba_fsf_resp("fs_rerr", 3, req);
++
++	} else if ((qtcb->prefix.prot_status != FSF_PROT_GOOD) &&
+ 	    (qtcb->prefix.prot_status != FSF_PROT_FSF_STATUS_PRESENTED)) {
+ 		zfcp_dbf_hba_fsf_resp("fs_perr", 1, req);
+ 
+@@ -401,7 +407,8 @@ void zfcp_dbf_scsi_abort(char *tag, struct scsi_cmnd *scmd,
+  * @flag: indicates type of reset (Target Reset, Logical Unit Reset)
+  */
+ static inline
+-void zfcp_dbf_scsi_devreset(char *tag, struct scsi_cmnd *scmnd, u8 flag)
++void zfcp_dbf_scsi_devreset(char *tag, struct scsi_cmnd *scmnd, u8 flag,
++			    struct zfcp_fsf_req *fsf_req)
+ {
+ 	char tmp_tag[ZFCP_DBF_TAG_LEN];
+ 
+@@ -411,7 +418,7 @@ void zfcp_dbf_scsi_devreset(char *tag, struct scsi_cmnd *scmnd, u8 flag)
+ 		memcpy(tmp_tag, "lr_", 3);
+ 
+ 	memcpy(&tmp_tag[3], tag, 4);
+-	_zfcp_dbf_scsi(tmp_tag, 1, scmnd, NULL);
++	_zfcp_dbf_scsi(tmp_tag, 1, scmnd, fsf_req);
+ }
+ 
+ /**
+diff --git a/drivers/s390/scsi/zfcp_fc.h b/drivers/s390/scsi/zfcp_fc.h
+index df2b541c8287..a2275825186f 100644
+--- a/drivers/s390/scsi/zfcp_fc.h
++++ b/drivers/s390/scsi/zfcp_fc.h
+@@ -4,7 +4,7 @@
+  * Fibre Channel related definitions and inline functions for the zfcp
+  * device driver
+  *
+- * Copyright IBM Corp. 2009
++ * Copyright IBM Corp. 2009, 2017
+  */
+ 
+ #ifndef ZFCP_FC_H
+@@ -279,6 +279,10 @@ void zfcp_fc_eval_fcp_rsp(struct fcp_resp_with_ext *fcp_rsp,
+ 		     !(rsp_flags & FCP_SNS_LEN_VAL) &&
+ 		     fcp_rsp->resp.fr_status == SAM_STAT_GOOD)
+ 			set_host_byte(scsi, DID_ERROR);
++	} else if (unlikely(rsp_flags & FCP_RESID_OVER)) {
++		/* FCP_DL was not sufficient for SCSI data length */
++		if (fcp_rsp->resp.fr_status == SAM_STAT_GOOD)
++			set_host_byte(scsi, DID_ERROR);
+ 	}
+ }
+ 
+diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
+index 27ff38f839fc..1964391db904 100644
+--- a/drivers/s390/scsi/zfcp_fsf.c
++++ b/drivers/s390/scsi/zfcp_fsf.c
+@@ -928,8 +928,8 @@ static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req)
+ 
+ 	switch (header->fsf_status) {
+         case FSF_GOOD:
+-		zfcp_dbf_san_res("fsscth2", req);
+ 		ct->status = 0;
++		zfcp_dbf_san_res("fsscth2", req);
+ 		break;
+         case FSF_SERVICE_CLASS_NOT_SUPPORTED:
+ 		zfcp_fsf_class_not_supp(req);
+@@ -1109,8 +1109,8 @@ static void zfcp_fsf_send_els_handler(struct zfcp_fsf_req *req)
+ 
+ 	switch (header->fsf_status) {
+ 	case FSF_GOOD:
+-		zfcp_dbf_san_res("fsselh1", req);
+ 		send_els->status = 0;
++		zfcp_dbf_san_res("fsselh1", req);
+ 		break;
+ 	case FSF_SERVICE_CLASS_NOT_SUPPORTED:
+ 		zfcp_fsf_class_not_supp(req);
+@@ -2258,7 +2258,8 @@ int zfcp_fsf_fcp_cmnd(struct scsi_cmnd *scsi_cmnd)
+ 	fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd;
+ 	zfcp_fc_scsi_to_fcp(fcp_cmnd, scsi_cmnd, 0);
+ 
+-	if (scsi_prot_sg_count(scsi_cmnd)) {
++	if ((scsi_get_prot_op(scsi_cmnd) != SCSI_PROT_NORMAL) &&
++	    scsi_prot_sg_count(scsi_cmnd)) {
+ 		zfcp_qdio_set_data_div(qdio, &req->qdio_req,
+ 				       scsi_prot_sg_count(scsi_cmnd));
+ 		retval = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req,
+diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
+index 0678cf714c0e..a1eeeaaa0fca 100644
+--- a/drivers/s390/scsi/zfcp_scsi.c
++++ b/drivers/s390/scsi/zfcp_scsi.c
+@@ -3,7 +3,7 @@
+  *
+  * Interface to Linux SCSI midlayer.
+  *
+- * Copyright IBM Corp. 2002, 2016
++ * Copyright IBM Corp. 2002, 2017
+  */
+ 
+ #define KMSG_COMPONENT "zfcp"
+@@ -273,25 +273,29 @@ static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags)
+ 
+ 		zfcp_erp_wait(adapter);
+ 		ret = fc_block_scsi_eh(scpnt);
+-		if (ret)
++		if (ret) {
++			zfcp_dbf_scsi_devreset("fiof", scpnt, tm_flags, NULL);
+ 			return ret;
++		}
+ 
+ 		if (!(atomic_read(&adapter->status) &
+ 		      ZFCP_STATUS_COMMON_RUNNING)) {
+-			zfcp_dbf_scsi_devreset("nres", scpnt, tm_flags);
++			zfcp_dbf_scsi_devreset("nres", scpnt, tm_flags, NULL);
+ 			return SUCCESS;
+ 		}
+ 	}
+-	if (!fsf_req)
++	if (!fsf_req) {
++		zfcp_dbf_scsi_devreset("reqf", scpnt, tm_flags, NULL);
+ 		return FAILED;
++	}
+ 
+ 	wait_for_completion(&fsf_req->completion);
+ 
+ 	if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCFAILED) {
+-		zfcp_dbf_scsi_devreset("fail", scpnt, tm_flags);
++		zfcp_dbf_scsi_devreset("fail", scpnt, tm_flags, fsf_req);
+ 		retval = FAILED;
+ 	} else {
+-		zfcp_dbf_scsi_devreset("okay", scpnt, tm_flags);
++		zfcp_dbf_scsi_devreset("okay", scpnt, tm_flags, fsf_req);
+ 		zfcp_scsi_forget_cmnds(zfcp_sdev, tm_flags);
+ 	}
+ 
+diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
+index a1a2c71e1626..b051d97af468 100644
+--- a/drivers/scsi/aacraid/aachba.c
++++ b/drivers/scsi/aacraid/aachba.c
+@@ -594,6 +594,7 @@ static int aac_get_container_name(struct scsi_cmnd * scsicmd)
+ 
+ 	aac_fib_init(cmd_fibcontext);
+ 	dinfo = (struct aac_get_name *) fib_data(cmd_fibcontext);
++	scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
+ 
+ 	dinfo->command = cpu_to_le32(VM_ContainerConfig);
+ 	dinfo->type = cpu_to_le32(CT_READ_NAME);
+@@ -611,10 +612,8 @@ static int aac_get_container_name(struct scsi_cmnd * scsicmd)
+ 	/*
+ 	 *	Check that the command queued to the controller
+ 	 */
+-	if (status == -EINPROGRESS) {
+-		scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
++	if (status == -EINPROGRESS)
+ 		return 0;
+-	}
+ 
+ 	printk(KERN_WARNING "aac_get_container_name: aac_fib_send failed with status: %d.\n", status);
+ 	aac_fib_complete(cmd_fibcontext);
+@@ -725,6 +724,7 @@ static void _aac_probe_container1(void * context, struct fib * fibptr)
+ 
+ 	dinfo->count = cpu_to_le32(scmd_id(scsicmd));
+ 	dinfo->type = cpu_to_le32(FT_FILESYS);
++	scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
+ 
+ 	status = aac_fib_send(ContainerCommand,
+ 			  fibptr,
+@@ -736,9 +736,7 @@ static void _aac_probe_container1(void * context, struct fib * fibptr)
+ 	/*
+ 	 *	Check that the command queued to the controller
+ 	 */
+-	if (status == -EINPROGRESS)
+-		scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
+-	else if (status < 0) {
++	if (status < 0 && status != -EINPROGRESS) {
+ 		/* Inherit results from VM_NameServe, if any */
+ 		dresp->status = cpu_to_le32(ST_OK);
+ 		_aac_probe_container2(context, fibptr);
+@@ -766,6 +764,7 @@ static int _aac_probe_container(struct scsi_cmnd * scsicmd, int (*callback)(stru
+ 		dinfo->count = cpu_to_le32(scmd_id(scsicmd));
+ 		dinfo->type = cpu_to_le32(FT_FILESYS);
+ 		scsicmd->SCp.ptr = (char *)callback;
++		scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
+ 
+ 		status = aac_fib_send(ContainerCommand,
+ 			  fibptr,
+@@ -777,10 +776,9 @@ static int _aac_probe_container(struct scsi_cmnd * scsicmd, int (*callback)(stru
+ 		/*
+ 		 *	Check that the command queued to the controller
+ 		 */
+-		if (status == -EINPROGRESS) {
+-			scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
++		if (status == -EINPROGRESS)
+ 			return 0;
+-		}
++
+ 		if (status < 0) {
+ 			scsicmd->SCp.ptr = NULL;
+ 			aac_fib_complete(fibptr);
+@@ -1126,6 +1124,7 @@ static int aac_get_container_serial(struct scsi_cmnd * scsicmd)
+ 	dinfo->command = cpu_to_le32(VM_ContainerConfig);
+ 	dinfo->type = cpu_to_le32(CT_CID_TO_32BITS_UID);
+ 	dinfo->cid = cpu_to_le32(scmd_id(scsicmd));
++	scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
+ 
+ 	status = aac_fib_send(ContainerCommand,
+ 		  cmd_fibcontext,
+@@ -1138,10 +1137,8 @@ static int aac_get_container_serial(struct scsi_cmnd * scsicmd)
+ 	/*
+ 	 *	Check that the command queued to the controller
+ 	 */
+-	if (status == -EINPROGRESS) {
+-		scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
++	if (status == -EINPROGRESS)
+ 		return 0;
+-	}
+ 
+ 	printk(KERN_WARNING "aac_get_container_serial: aac_fib_send failed with status: %d.\n", status);
+ 	aac_fib_complete(cmd_fibcontext);
+@@ -2335,16 +2332,14 @@ static int aac_read(struct scsi_cmnd * scsicmd)
+ 	 *	Alocate and initialize a Fib
+ 	 */
+ 	cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd);
+-
++	scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
+ 	status = aac_adapter_read(cmd_fibcontext, scsicmd, lba, count);
+ 
+ 	/*
+ 	 *	Check that the command queued to the controller
+ 	 */
+-	if (status == -EINPROGRESS) {
+-		scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
++	if (status == -EINPROGRESS)
+ 		return 0;
+-	}
+ 
+ 	printk(KERN_WARNING "aac_read: aac_fib_send failed with status: %d.\n", status);
+ 	/*
+@@ -2429,16 +2424,14 @@ static int aac_write(struct scsi_cmnd * scsicmd)
+ 	 *	Allocate and initialize a Fib then setup a BlockWrite command
+ 	 */
+ 	cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd);
+-
++	scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
+ 	status = aac_adapter_write(cmd_fibcontext, scsicmd, lba, count, fua);
+ 
+ 	/*
+ 	 *	Check that the command queued to the controller
+ 	 */
+-	if (status == -EINPROGRESS) {
+-		scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
++	if (status == -EINPROGRESS)
+ 		return 0;
+-	}
+ 
+ 	printk(KERN_WARNING "aac_write: aac_fib_send failed with status: %d\n", status);
+ 	/*
+@@ -2588,6 +2581,7 @@ static int aac_synchronize(struct scsi_cmnd *scsicmd)
+ 	synchronizecmd->cid = cpu_to_le32(scmd_id(scsicmd));
+ 	synchronizecmd->count =
+ 	     cpu_to_le32(sizeof(((struct aac_synchronize_reply *)NULL)->data));
++	scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
+ 
+ 	/*
+ 	 *	Now send the Fib to the adapter
+@@ -2603,10 +2597,8 @@ static int aac_synchronize(struct scsi_cmnd *scsicmd)
+ 	/*
+ 	 *	Check that the command queued to the controller
+ 	 */
+-	if (status == -EINPROGRESS) {
+-		scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
++	if (status == -EINPROGRESS)
+ 		return 0;
+-	}
+ 
+ 	printk(KERN_WARNING
+ 		"aac_synchronize: aac_fib_send failed with status: %d.\n", status);
+@@ -2666,6 +2658,7 @@ static int aac_start_stop(struct scsi_cmnd *scsicmd)
+ 	pmcmd->cid = cpu_to_le32(sdev_id(sdev));
+ 	pmcmd->parm = (scsicmd->cmnd[1] & 1) ?
+ 		cpu_to_le32(CT_PM_UNIT_IMMEDIATE) : 0;
++	scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
+ 
+ 	/*
+ 	 *	Now send the Fib to the adapter
+@@ -2681,10 +2674,8 @@ static int aac_start_stop(struct scsi_cmnd *scsicmd)
+ 	/*
+ 	 *	Check that the command queued to the controller
+ 	 */
+-	if (status == -EINPROGRESS) {
+-		scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
++	if (status == -EINPROGRESS)
+ 		return 0;
+-	}
+ 
+ 	aac_fib_complete(cmd_fibcontext);
+ 	aac_fib_free(cmd_fibcontext);
+@@ -3692,16 +3683,14 @@ static int aac_send_srb_fib(struct scsi_cmnd* scsicmd)
+ 	 *	Allocate and initialize a Fib then setup a BlockWrite command
+ 	 */
+ 	cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd);
+-
++	scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
+ 	status = aac_adapter_scsi(cmd_fibcontext, scsicmd);
+ 
+ 	/*
+ 	 *	Check that the command queued to the controller
+ 	 */
+-	if (status == -EINPROGRESS) {
+-		scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
++	if (status == -EINPROGRESS)
+ 		return 0;
+-	}
+ 
+ 	printk(KERN_WARNING "aac_srb: aac_fib_send failed with status: %d\n", status);
+ 	aac_fib_complete(cmd_fibcontext);
+@@ -3739,15 +3728,14 @@ static int aac_send_hba_fib(struct scsi_cmnd *scsicmd)
+ 	if (!cmd_fibcontext)
+ 		return -1;
+ 
++	scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
+ 	status = aac_adapter_hba(cmd_fibcontext, scsicmd);
+ 
+ 	/*
+ 	 *	Check that the command queued to the controller
+ 	 */
+-	if (status == -EINPROGRESS) {
+-		scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
++	if (status == -EINPROGRESS)
+ 		return 0;
+-	}
+ 
+ 	pr_warn("aac_hba_cmd_req: aac_fib_send failed with status: %d\n",
+ 		status);
+diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
+index 71c4746341ea..3ee4ea79f81a 100644
+--- a/drivers/scsi/megaraid/megaraid_sas_base.c
++++ b/drivers/scsi/megaraid/megaraid_sas_base.c
+@@ -1995,9 +1995,12 @@ static void megasas_complete_outstanding_ioctls(struct megasas_instance *instanc
+ 			if (cmd_fusion->sync_cmd_idx != (u32)ULONG_MAX) {
+ 				cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx];
+ 				if (cmd_mfi->sync_cmd &&
+-					cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT)
++				    (cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT)) {
++					cmd_mfi->frame->hdr.cmd_status =
++							MFI_STAT_WRONG_STATE;
+ 					megasas_complete_cmd(instance,
+ 							     cmd_mfi, DID_OK);
++				}
+ 			}
+ 		}
+ 	} else {
+@@ -5478,7 +5481,8 @@ static int megasas_init_fw(struct megasas_instance *instance)
+ 		instance->throttlequeuedepth =
+ 				MEGASAS_THROTTLE_QUEUE_DEPTH;
+ 
+-	if (resetwaittime > MEGASAS_RESET_WAIT_TIME)
++	if ((resetwaittime < 1) ||
++	    (resetwaittime > MEGASAS_RESET_WAIT_TIME))
+ 		resetwaittime = MEGASAS_RESET_WAIT_TIME;
+ 
+ 	if ((scmd_timeout < 10) || (scmd_timeout > MEGASAS_DEFAULT_CMD_TIMEOUT))
+@@ -5649,6 +5653,14 @@ megasas_register_aen(struct megasas_instance *instance, u32 seq_num,
+ 		prev_aen.word =
+ 			le32_to_cpu(instance->aen_cmd->frame->dcmd.mbox.w[1]);
+ 
++		if ((curr_aen.members.class < MFI_EVT_CLASS_DEBUG) ||
++		    (curr_aen.members.class > MFI_EVT_CLASS_DEAD)) {
++			dev_info(&instance->pdev->dev,
++				 "%s %d out of range class %d send by application\n",
++				 __func__, __LINE__, curr_aen.members.class);
++			return 0;
++		}
++
+ 		/*
+ 		 * A class whose enum value is smaller is inclusive of all
+ 		 * higher values. If a PROGRESS (= -1) was previously
+diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
+index 985510628f56..8152962f152d 100644
+--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
++++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
+@@ -3287,7 +3287,7 @@ build_mpt_mfi_pass_thru(struct megasas_instance *instance,
+ 	mpi25_ieee_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT |
+ 		MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR;
+ 
+-	mpi25_ieee_chain->Length = cpu_to_le32(instance->max_chain_frame_sz);
++	mpi25_ieee_chain->Length = cpu_to_le32(instance->mfi_frame_size);
+ }
+ 
+ /**
+diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
+index 2c3783684815..85e7bae4a7ef 100644
+--- a/drivers/scsi/qedi/qedi_main.c
++++ b/drivers/scsi/qedi/qedi_main.c
+@@ -1575,7 +1575,7 @@ struct qedi_cmd *qedi_get_cmd_from_tid(struct qedi_ctx *qedi, u32 tid)
+ {
+ 	struct qedi_cmd *cmd = NULL;
+ 
+-	if (tid > MAX_ISCSI_TASK_ENTRIES)
++	if (tid >= MAX_ISCSI_TASK_ENTRIES)
+ 		return NULL;
+ 
+ 	cmd = qedi->itt_map[tid].p_cmd;
+diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
+index 08a1feb3a195..8c6ff1682fb1 100644
+--- a/drivers/scsi/qla2xxx/qla_attr.c
++++ b/drivers/scsi/qla2xxx/qla_attr.c
+@@ -318,6 +318,8 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
+ 		return -EINVAL;
+ 	if (start > ha->optrom_size)
+ 		return -EINVAL;
++	if (size > ha->optrom_size - start)
++		size = ha->optrom_size - start;
+ 
+ 	mutex_lock(&ha->optrom_mutex);
+ 	switch (val) {
+@@ -343,8 +345,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
+ 		}
+ 
+ 		ha->optrom_region_start = start;
+-		ha->optrom_region_size = start + size > ha->optrom_size ?
+-		    ha->optrom_size - start : size;
++		ha->optrom_region_size = start + size;
+ 
+ 		ha->optrom_state = QLA_SREADING;
+ 		ha->optrom_buffer = vmalloc(ha->optrom_region_size);
+@@ -417,8 +418,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
+ 		}
+ 
+ 		ha->optrom_region_start = start;
+-		ha->optrom_region_size = start + size > ha->optrom_size ?
+-		    ha->optrom_size - start : size;
++		ha->optrom_region_size = start + size;
+ 
+ 		ha->optrom_state = QLA_SWRITING;
+ 		ha->optrom_buffer = vmalloc(ha->optrom_region_size);
+diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
+index b323a7c71eda..0ec250993e93 100644
+--- a/drivers/scsi/qla2xxx/qla_gs.c
++++ b/drivers/scsi/qla2xxx/qla_gs.c
+@@ -3080,7 +3080,7 @@ int qla24xx_async_gpsc(scsi_qla_host_t *vha, fc_port_t *fcport)
+ 		GPSC_RSP_SIZE);
+ 
+ 	/* GPSC req */
+-	memcpy(ct_req->req.gpsc.port_name, fcport->port_name,
++	memcpy(ct_req->req.gpsc.port_name, fcport->fabric_port_name,
+ 		WWN_SIZE);
+ 
+ 	sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns;
+diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
+index 072ad1aa5505..8f83571afc7b 100644
+--- a/drivers/scsi/qla2xxx/qla_init.c
++++ b/drivers/scsi/qla2xxx/qla_init.c
+@@ -7804,6 +7804,7 @@ struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos,
+ 		ha->queue_pair_map[qpair_id] = qpair;
+ 		qpair->id = qpair_id;
+ 		qpair->vp_idx = vp_idx;
++		qpair->fw_started = ha->flags.fw_started;
+ 		INIT_LIST_HEAD(&qpair->hints_list);
+ 		qpair->chip_reset = ha->base_qpair->chip_reset;
+ 		qpair->enable_class_2 = ha->base_qpair->enable_class_2;
+diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
+index 7b3b702ef622..ec2c398f5663 100644
+--- a/drivers/scsi/qla2xxx/qla_isr.c
++++ b/drivers/scsi/qla2xxx/qla_isr.c
+@@ -3429,7 +3429,7 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
+ 	}
+ 
+ 	/* Enable MSI-X vector for response queue update for queue 0 */
+-	if (IS_QLA25XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
++	if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
+ 		if (ha->msixbase && ha->mqiobase &&
+ 		    (ha->max_rsp_queues > 1 || ha->max_req_queues > 1 ||
+ 		     ql2xmqsupport))
+diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
+index 7c6d1a404011..1f1a81c6eaa9 100644
+--- a/drivers/scsi/qla2xxx/qla_mbx.c
++++ b/drivers/scsi/qla2xxx/qla_mbx.c
+@@ -54,6 +54,7 @@ static struct rom_cmd {
+ 	{ MBC_GET_MEM_OFFLOAD_CNTRL_STAT },
+ 	{ MBC_GET_RETRY_COUNT },
+ 	{ MBC_TRACE_CONTROL },
++	{ MBC_INITIALIZE_MULTIQ },
+ };
+ 
+ static int is_rom_cmd(uint16_t cmd)
+@@ -3689,7 +3690,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
+ 				if (qla_ini_mode_enabled(vha) &&
+ 				    ha->flags.fawwpn_enabled &&
+ 				    (rptid_entry->u.f1.flags &
+-				     VP_FLAGS_NAME_VALID)) {
++				     BIT_6)) {
+ 					memcpy(vha->port_name,
+ 					    rptid_entry->u.f1.port_name,
+ 					    WWN_SIZE);
+diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
+index f0605cd196fb..3089094b48fa 100644
+--- a/drivers/scsi/qla2xxx/qla_mid.c
++++ b/drivers/scsi/qla2xxx/qla_mid.c
+@@ -74,7 +74,7 @@ qla24xx_deallocate_vp_id(scsi_qla_host_t *vha)
+ 	 * ensures no active vp_list traversal while the vport is removed
+ 	 * from the queue)
+ 	 */
+-	wait_event_timeout(vha->vref_waitq, atomic_read(&vha->vref_count),
++	wait_event_timeout(vha->vref_waitq, !atomic_read(&vha->vref_count),
+ 	    10*HZ);
+ 
+ 	spin_lock_irqsave(&ha->vport_slock, flags);
+diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
+index e101cd3043b9..4e2a64773060 100644
+--- a/drivers/scsi/qla2xxx/qla_target.c
++++ b/drivers/scsi/qla2xxx/qla_target.c
+@@ -6796,7 +6796,7 @@ qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha)
+ 	if (!QLA_TGT_MODE_ENABLED())
+ 		return;
+ 
+-	if  (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
++	if  (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
+ 		ISP_ATIO_Q_IN(base_vha) = &ha->mqiobase->isp25mq.atio_q_in;
+ 		ISP_ATIO_Q_OUT(base_vha) = &ha->mqiobase->isp25mq.atio_q_out;
+ 	} else {
+diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
+index aad6ebb51735..1a9de8419997 100644
+--- a/drivers/scsi/sg.c
++++ b/drivers/scsi/sg.c
+@@ -828,6 +828,39 @@ static int max_sectors_bytes(struct request_queue *q)
+ 	return max_sectors << 9;
+ }
+ 
++static void
++sg_fill_request_table(Sg_fd *sfp, sg_req_info_t *rinfo)
++{
++	Sg_request *srp;
++	int val;
++	unsigned int ms;
++
++	val = 0;
++	list_for_each_entry(srp, &sfp->rq_list, entry) {
++		if (val > SG_MAX_QUEUE)
++			break;
++		rinfo[val].req_state = srp->done + 1;
++		rinfo[val].problem =
++			srp->header.masked_status &
++			srp->header.host_status &
++			srp->header.driver_status;
++		if (srp->done)
++			rinfo[val].duration =
++				srp->header.duration;
++		else {
++			ms = jiffies_to_msecs(jiffies);
++			rinfo[val].duration =
++				(ms > srp->header.duration) ?
++				(ms - srp->header.duration) : 0;
++		}
++		rinfo[val].orphan = srp->orphan;
++		rinfo[val].sg_io_owned = srp->sg_io_owned;
++		rinfo[val].pack_id = srp->header.pack_id;
++		rinfo[val].usr_ptr = srp->header.usr_ptr;
++		val++;
++	}
++}
++
+ static long
+ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
+ {
+@@ -1012,38 +1045,13 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
+ 			return -EFAULT;
+ 		else {
+ 			sg_req_info_t *rinfo;
+-			unsigned int ms;
+ 
+-			rinfo = kmalloc(SZ_SG_REQ_INFO * SG_MAX_QUEUE,
+-								GFP_KERNEL);
++			rinfo = kzalloc(SZ_SG_REQ_INFO * SG_MAX_QUEUE,
++					GFP_KERNEL);
+ 			if (!rinfo)
+ 				return -ENOMEM;
+ 			read_lock_irqsave(&sfp->rq_list_lock, iflags);
+-			val = 0;
+-			list_for_each_entry(srp, &sfp->rq_list, entry) {
+-				if (val >= SG_MAX_QUEUE)
+-					break;
+-				memset(&rinfo[val], 0, SZ_SG_REQ_INFO);
+-				rinfo[val].req_state = srp->done + 1;
+-				rinfo[val].problem =
+-					srp->header.masked_status &
+-					srp->header.host_status &
+-					srp->header.driver_status;
+-				if (srp->done)
+-					rinfo[val].duration =
+-						srp->header.duration;
+-				else {
+-					ms = jiffies_to_msecs(jiffies);
+-					rinfo[val].duration =
+-						(ms > srp->header.duration) ?
+-						(ms - srp->header.duration) : 0;
+-				}
+-				rinfo[val].orphan = srp->orphan;
+-				rinfo[val].sg_io_owned = srp->sg_io_owned;
+-				rinfo[val].pack_id = srp->header.pack_id;
+-				rinfo[val].usr_ptr = srp->header.usr_ptr;
+-				val++;
+-			}
++			sg_fill_request_table(sfp, rinfo);
+ 			read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
+ 			result = __copy_to_user(p, rinfo,
+ 						SZ_SG_REQ_INFO * SG_MAX_QUEUE);
+diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
+index 3cc8d67783a1..5e7200f05873 100644
+--- a/drivers/scsi/storvsc_drv.c
++++ b/drivers/scsi/storvsc_drv.c
+@@ -1640,6 +1640,8 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
+ 	put_cpu();
+ 
+ 	if (ret == -EAGAIN) {
++		if (payload_sz > sizeof(cmd_request->mpb))
++			kfree(payload);
+ 		/* no more space */
+ 		return SCSI_MLQUEUE_DEVICE_BUSY;
+ 	}
+diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c
+index 4e7a4e9dcf4d..f8eba1c5412f 100644
+--- a/drivers/tty/tty_buffer.c
++++ b/drivers/tty/tty_buffer.c
+@@ -361,6 +361,32 @@ int tty_insert_flip_string_flags(struct tty_port *port,
+ }
+ EXPORT_SYMBOL(tty_insert_flip_string_flags);
+ 
++/**
++ *	__tty_insert_flip_char   -	Add one character to the tty buffer
++ *	@port: tty port
++ *	@ch: character
++ *	@flag: flag byte
++ *
++ *	Queue a single byte to the tty buffering, with an optional flag.
++ *	This is the slow path of tty_insert_flip_char.
++ */
++int __tty_insert_flip_char(struct tty_port *port, unsigned char ch, char flag)
++{
++	struct tty_buffer *tb;
++	int flags = (flag == TTY_NORMAL) ? TTYB_NORMAL : 0;
++
++	if (!__tty_buffer_request_room(port, 1, flags))
++		return 0;
++
++	tb = port->buf.tail;
++	if (~tb->flags & TTYB_NORMAL)
++		*flag_buf_ptr(tb, tb->used) = flag;
++	*char_buf_ptr(tb, tb->used++) = ch;
++
++	return 1;
++}
++EXPORT_SYMBOL(__tty_insert_flip_char);
++
+ /**
+  *	tty_schedule_flip	-	push characters to ldisc
+  *	@port: tty port to push from
+diff --git a/fs/ext4/file.c b/fs/ext4/file.c
+index 0d7cf0cc9b87..86ea1d92839a 100644
+--- a/fs/ext4/file.c
++++ b/fs/ext4/file.c
+@@ -595,7 +595,7 @@ static loff_t ext4_seek_data(struct file *file, loff_t offset, loff_t maxsize)
+ 	inode_lock(inode);
+ 
+ 	isize = i_size_read(inode);
+-	if (offset >= isize) {
++	if (offset < 0 || offset >= isize) {
+ 		inode_unlock(inode);
+ 		return -ENXIO;
+ 	}
+@@ -658,7 +658,7 @@ static loff_t ext4_seek_hole(struct file *file, loff_t offset, loff_t maxsize)
+ 	inode_lock(inode);
+ 
+ 	isize = i_size_read(inode);
+-	if (offset >= isize) {
++	if (offset < 0 || offset >= isize) {
+ 		inode_unlock(inode);
+ 		return -ENXIO;
+ 	}
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index d61a70e2193a..c9e7be58756b 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -2404,6 +2404,7 @@ static void ext4_orphan_cleanup(struct super_block *sb,
+ 	unsigned int s_flags = sb->s_flags;
+ 	int ret, nr_orphans = 0, nr_truncates = 0;
+ #ifdef CONFIG_QUOTA
++	int quota_update = 0;
+ 	int i;
+ #endif
+ 	if (!es->s_last_orphan) {
+@@ -2442,14 +2443,32 @@ static void ext4_orphan_cleanup(struct super_block *sb,
+ #ifdef CONFIG_QUOTA
+ 	/* Needed for iput() to work correctly and not trash data */
+ 	sb->s_flags |= MS_ACTIVE;
+-	/* Turn on quotas so that they are updated correctly */
++
++	/*
++	 * Turn on quotas which were not enabled for read-only mounts if
++	 * filesystem has quota feature, so that they are updated correctly.
++	 */
++	if (ext4_has_feature_quota(sb) && (s_flags & MS_RDONLY)) {
++		int ret = ext4_enable_quotas(sb);
++
++		if (!ret)
++			quota_update = 1;
++		else
++			ext4_msg(sb, KERN_ERR,
++				"Cannot turn on quotas: error %d", ret);
++	}
++
++	/* Turn on journaled quotas used for old sytle */
+ 	for (i = 0; i < EXT4_MAXQUOTAS; i++) {
+ 		if (EXT4_SB(sb)->s_qf_names[i]) {
+ 			int ret = ext4_quota_on_mount(sb, i);
+-			if (ret < 0)
++
++			if (!ret)
++				quota_update = 1;
++			else
+ 				ext4_msg(sb, KERN_ERR,
+ 					"Cannot turn on journaled "
+-					"quota: error %d", ret);
++					"quota: type %d: error %d", i, ret);
+ 		}
+ 	}
+ #endif
+@@ -2510,10 +2529,12 @@ static void ext4_orphan_cleanup(struct super_block *sb,
+ 		ext4_msg(sb, KERN_INFO, "%d truncate%s cleaned up",
+ 		       PLURAL(nr_truncates));
+ #ifdef CONFIG_QUOTA
+-	/* Turn quotas off */
+-	for (i = 0; i < EXT4_MAXQUOTAS; i++) {
+-		if (sb_dqopt(sb)->files[i])
+-			dquot_quota_off(sb, i);
++	/* Turn off quotas if they were enabled for orphan cleanup */
++	if (quota_update) {
++		for (i = 0; i < EXT4_MAXQUOTAS; i++) {
++			if (sb_dqopt(sb)->files[i])
++				dquot_quota_off(sb, i);
++		}
+ 	}
+ #endif
+ 	sb->s_flags = s_flags; /* Restore MS_RDONLY status */
+@@ -5512,6 +5533,9 @@ static int ext4_enable_quotas(struct super_block *sb)
+ 				DQUOT_USAGE_ENABLED |
+ 				(quota_mopt[type] ? DQUOT_LIMITS_ENABLED : 0));
+ 			if (err) {
++				for (type--; type >= 0; type--)
++					dquot_quota_off(sb, type);
++
+ 				ext4_warning(sb,
+ 					"Failed to enable quota tracking "
+ 					"(type=%d, err=%d). Please run "
+diff --git a/fs/orangefs/acl.c b/fs/orangefs/acl.c
+index 7a3754488312..9409aac232f7 100644
+--- a/fs/orangefs/acl.c
++++ b/fs/orangefs/acl.c
+@@ -61,9 +61,9 @@ struct posix_acl *orangefs_get_acl(struct inode *inode, int type)
+ 	return acl;
+ }
+ 
+-int orangefs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
++static int __orangefs_set_acl(struct inode *inode, struct posix_acl *acl,
++			      int type)
+ {
+-	struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode);
+ 	int error = 0;
+ 	void *value = NULL;
+ 	size_t size = 0;
+@@ -72,22 +72,6 @@ int orangefs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
+ 	switch (type) {
+ 	case ACL_TYPE_ACCESS:
+ 		name = XATTR_NAME_POSIX_ACL_ACCESS;
+-		if (acl) {
+-			umode_t mode;
+-
+-			error = posix_acl_update_mode(inode, &mode, &acl);
+-			if (error) {
+-				gossip_err("%s: posix_acl_update_mode err: %d\n",
+-					   __func__,
+-					   error);
+-				return error;
+-			}
+-
+-			if (inode->i_mode != mode)
+-				SetModeFlag(orangefs_inode);
+-			inode->i_mode = mode;
+-			mark_inode_dirty_sync(inode);
+-		}
+ 		break;
+ 	case ACL_TYPE_DEFAULT:
+ 		name = XATTR_NAME_POSIX_ACL_DEFAULT;
+@@ -132,6 +116,29 @@ int orangefs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
+ 	return error;
+ }
+ 
++int orangefs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
++{
++	int error;
++
++	if (type == ACL_TYPE_ACCESS && acl) {
++		umode_t mode;
++
++		error = posix_acl_update_mode(inode, &mode, &acl);
++		if (error) {
++			gossip_err("%s: posix_acl_update_mode err: %d\n",
++				   __func__,
++				   error);
++			return error;
++		}
++
++		if (inode->i_mode != mode)
++			SetModeFlag(ORANGEFS_I(inode));
++		inode->i_mode = mode;
++		mark_inode_dirty_sync(inode);
++	}
++	return __orangefs_set_acl(inode, acl, type);
++}
++
+ int orangefs_init_acl(struct inode *inode, struct inode *dir)
+ {
+ 	struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode);
+@@ -146,13 +153,14 @@ int orangefs_init_acl(struct inode *inode, struct inode *dir)
+ 		return error;
+ 
+ 	if (default_acl) {
+-		error = orangefs_set_acl(inode, default_acl, ACL_TYPE_DEFAULT);
++		error = __orangefs_set_acl(inode, default_acl,
++					   ACL_TYPE_DEFAULT);
+ 		posix_acl_release(default_acl);
+ 	}
+ 
+ 	if (acl) {
+ 		if (!error)
+-			error = orangefs_set_acl(inode, acl, ACL_TYPE_ACCESS);
++			error = __orangefs_set_acl(inode, acl, ACL_TYPE_ACCESS);
+ 		posix_acl_release(acl);
+ 	}
+ 
+diff --git a/include/linux/ccp.h b/include/linux/ccp.h
+index 3285c944194a..ab693c3afd0d 100644
+--- a/include/linux/ccp.h
++++ b/include/linux/ccp.h
+@@ -1,7 +1,7 @@
+ /*
+  * AMD Cryptographic Coprocessor (CCP) driver
+  *
+- * Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
++ * Copyright (C) 2013,2017 Advanced Micro Devices, Inc.
+  *
+  * Author: Tom Lendacky <thomas.lendacky@amd.com>
+  * Author: Gary R Hook <gary.hook@amd.com>
+@@ -231,6 +231,7 @@ enum ccp_xts_aes_unit_size {
+  * AES operation the new IV overwrites the old IV.
+  */
+ struct ccp_xts_aes_engine {
++	enum ccp_aes_type type;
+ 	enum ccp_aes_action action;
+ 	enum ccp_xts_aes_unit_size unit_size;
+ 
+diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
+index 898cfe2eeb42..bc46e729fdde 100644
+--- a/include/linux/cpuset.h
++++ b/include/linux/cpuset.h
+@@ -57,7 +57,9 @@ static inline void cpuset_dec(void)
+ 
+ extern int cpuset_init(void);
+ extern void cpuset_init_smp(void);
++extern void cpuset_force_rebuild(void);
+ extern void cpuset_update_active_cpus(void);
++extern void cpuset_wait_for_hotplug(void);
+ extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
+ extern void cpuset_cpus_allowed_fallback(struct task_struct *p);
+ extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
+@@ -170,11 +172,15 @@ static inline bool cpusets_enabled(void) { return false; }
+ static inline int cpuset_init(void) { return 0; }
+ static inline void cpuset_init_smp(void) {}
+ 
++static inline void cpuset_force_rebuild(void) { }
++
+ static inline void cpuset_update_active_cpus(void)
+ {
+ 	partition_sched_domains(1, NULL, NULL);
+ }
+ 
++static inline void cpuset_wait_for_hotplug(void) { }
++
+ static inline void cpuset_cpus_allowed(struct task_struct *p,
+ 				       struct cpumask *mask)
+ {
+diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
+index 6383115e9d2c..2e028854bac7 100644
+--- a/include/linux/ftrace.h
++++ b/include/linux/ftrace.h
+@@ -307,7 +307,7 @@ DECLARE_PER_CPU(int, disable_stack_tracer);
+ static inline void stack_tracer_disable(void)
+ {
+ 	/* Preemption or interupts must be disabled */
+-	if (IS_ENABLED(CONFIG_PREEMPT_DEBUG))
++	if (IS_ENABLED(CONFIG_DEBUG_PREEMPT))
+ 		WARN_ON_ONCE(!preempt_count() || !irqs_disabled());
+ 	this_cpu_inc(disable_stack_tracer);
+ }
+@@ -320,7 +320,7 @@ static inline void stack_tracer_disable(void)
+  */
+ static inline void stack_tracer_enable(void)
+ {
+-	if (IS_ENABLED(CONFIG_PREEMPT_DEBUG))
++	if (IS_ENABLED(CONFIG_DEBUG_PREEMPT))
+ 		WARN_ON_ONCE(!preempt_count() || !irqs_disabled());
+ 	this_cpu_dec(disable_stack_tracer);
+ }
+diff --git a/include/linux/tty_flip.h b/include/linux/tty_flip.h
+index c28dd523f96e..d43837f2ce3a 100644
+--- a/include/linux/tty_flip.h
++++ b/include/linux/tty_flip.h
+@@ -12,6 +12,7 @@ extern int tty_prepare_flip_string(struct tty_port *port,
+ 		unsigned char **chars, size_t size);
+ extern void tty_flip_buffer_push(struct tty_port *port);
+ void tty_schedule_flip(struct tty_port *port);
++int __tty_insert_flip_char(struct tty_port *port, unsigned char ch, char flag);
+ 
+ static inline int tty_insert_flip_char(struct tty_port *port,
+ 					unsigned char ch, char flag)
+@@ -26,7 +27,7 @@ static inline int tty_insert_flip_char(struct tty_port *port,
+ 		*char_buf_ptr(tb, tb->used++) = ch;
+ 		return 1;
+ 	}
+-	return tty_insert_flip_string_flags(port, &ch, &flag, 1);
++	return __tty_insert_flip_char(port, ch, flag);
+ }
+ 
+ static inline int tty_insert_flip_string(struct tty_port *port,
+diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
+index acdd6f915a8d..20ef8e6ec2db 100644
+--- a/include/linux/uaccess.h
++++ b/include/linux/uaccess.h
+@@ -156,7 +156,7 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
+ }
+ #ifdef CONFIG_COMPAT
+ static __always_inline unsigned long __must_check
+-copy_in_user(void __user *to, const void *from, unsigned long n)
++copy_in_user(void __user *to, const void __user *from, unsigned long n)
+ {
+ 	might_fault();
+ 	if (access_ok(VERIFY_WRITE, to, n) && access_ok(VERIFY_READ, from, n))
+diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
+index 87a1213dd326..e8cb34193433 100644
+--- a/kernel/cgroup/cpuset.c
++++ b/kernel/cgroup/cpuset.c
+@@ -2260,6 +2260,13 @@ static void cpuset_hotplug_update_tasks(struct cpuset *cs)
+ 	mutex_unlock(&cpuset_mutex);
+ }
+ 
++static bool force_rebuild;
++
++void cpuset_force_rebuild(void)
++{
++	force_rebuild = true;
++}
++
+ /**
+  * cpuset_hotplug_workfn - handle CPU/memory hotunplug for a cpuset
+  *
+@@ -2334,8 +2341,10 @@ static void cpuset_hotplug_workfn(struct work_struct *work)
+ 	}
+ 
+ 	/* rebuild sched domains if cpus_allowed has changed */
+-	if (cpus_updated)
++	if (cpus_updated || force_rebuild) {
++		force_rebuild = false;
+ 		rebuild_sched_domains();
++	}
+ }
+ 
+ void cpuset_update_active_cpus(void)
+@@ -2354,6 +2363,11 @@ void cpuset_update_active_cpus(void)
+ 	schedule_work(&cpuset_hotplug_work);
+ }
+ 
++void cpuset_wait_for_hotplug(void)
++{
++	flush_work(&cpuset_hotplug_work);
++}
++
+ /*
+  * Keep top_cpuset.mems_allowed tracking node_states[N_MEMORY].
+  * Call this routine anytime after node_states[N_MEMORY] changes.
+diff --git a/kernel/cpu.c b/kernel/cpu.c
+index eee033134262..a88c29ab09be 100644
+--- a/kernel/cpu.c
++++ b/kernel/cpu.c
+@@ -1252,7 +1252,17 @@ static int cpuhp_store_callbacks(enum cpuhp_state state, const char *name,
+ 	struct cpuhp_step *sp;
+ 	int ret = 0;
+ 
+-	if (state == CPUHP_AP_ONLINE_DYN || state == CPUHP_BP_PREPARE_DYN) {
++	/*
++	 * If name is NULL, then the state gets removed.
++	 *
++	 * CPUHP_AP_ONLINE_DYN and CPUHP_BP_PREPARE_DYN are handed out on
++	 * the first allocation from these dynamic ranges, so the removal
++	 * would trigger a new allocation and clear the wrong (already
++	 * empty) state, leaving the callbacks of the to be cleared state
++	 * dangling, which causes wreckage on the next hotplug operation.
++	 */
++	if (name && (state == CPUHP_AP_ONLINE_DYN ||
++		     state == CPUHP_BP_PREPARE_DYN)) {
+ 		ret = cpuhp_reserve_state(state);
+ 		if (ret < 0)
+ 			return ret;
+diff --git a/kernel/power/process.c b/kernel/power/process.c
+index 78672d324a6e..50f25cb370c6 100644
+--- a/kernel/power/process.c
++++ b/kernel/power/process.c
+@@ -20,8 +20,9 @@
+ #include <linux/workqueue.h>
+ #include <linux/kmod.h>
+ #include <trace/events/power.h>
++#include <linux/cpuset.h>
+ 
+-/* 
++/*
+  * Timeout for stopping processes
+  */
+ unsigned int __read_mostly freeze_timeout_msecs = 20 * MSEC_PER_SEC;
+@@ -202,6 +203,8 @@ void thaw_processes(void)
+ 	__usermodehelper_set_disable_depth(UMH_FREEZING);
+ 	thaw_workqueues();
+ 
++	cpuset_wait_for_hotplug();
++
+ 	read_lock(&tasklist_lock);
+ 	for_each_process_thread(g, p) {
+ 		/* No other threads should have PF_SUSPEND_TASK set */
+diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c
+index d0ca524bf042..258a9abee0b0 100644
+--- a/kernel/rcu/srcutree.c
++++ b/kernel/rcu/srcutree.c
+@@ -896,6 +896,15 @@ static void __synchronize_srcu(struct srcu_struct *sp, bool do_norm)
+ 	__call_srcu(sp, &rcu.head, wakeme_after_rcu, do_norm);
+ 	wait_for_completion(&rcu.completion);
+ 	destroy_rcu_head_on_stack(&rcu.head);
++
++	/*
++	 * Make sure that later code is ordered after the SRCU grace
++	 * period.  This pairs with the raw_spin_lock_irq_rcu_node()
++	 * in srcu_invoke_callbacks().  Unlike Tree RCU, this is needed
++	 * because the current CPU might have been totally uninvolved with
++	 * (and thus unordered against) that grace period.
++	 */
++	smp_mb();
+ }
+ 
+ /**
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index 0869b20fba81..99326c370c9c 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -5538,16 +5538,15 @@ static void cpuset_cpu_active(void)
+ 		 * operation in the resume sequence, just build a single sched
+ 		 * domain, ignoring cpusets.
+ 		 */
+-		num_cpus_frozen--;
+-		if (likely(num_cpus_frozen)) {
+-			partition_sched_domains(1, NULL, NULL);
++		partition_sched_domains(1, NULL, NULL);
++		if (--num_cpus_frozen)
+ 			return;
+-		}
+ 		/*
+ 		 * This is the last CPU online operation. So fall through and
+ 		 * restore the original sched domains by considering the
+ 		 * cpuset configurations.
+ 		 */
++		cpuset_force_rebuild();
+ 	}
+ 	cpuset_update_active_cpus();
+ }
+diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
+index 96cea88fa00f..725819569fa7 100644
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -2828,13 +2828,14 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command)
+ 
+ 	if (!command || !ftrace_enabled) {
+ 		/*
+-		 * If these are per_cpu ops, they still need their
+-		 * per_cpu field freed. Since, function tracing is
++		 * If these are dynamic or per_cpu ops, they still
++		 * need their data freed. Since, function tracing is
+ 		 * not currently active, we can just free them
+ 		 * without synchronizing all CPUs.
+ 		 */
+-		if (ops->flags & FTRACE_OPS_FL_PER_CPU)
+-			per_cpu_ops_free(ops);
++		if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_PER_CPU))
++			goto free_ops;
++
+ 		return 0;
+ 	}
+ 
+@@ -2900,6 +2901,7 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command)
+ 		if (IS_ENABLED(CONFIG_PREEMPT))
+ 			synchronize_rcu_tasks();
+ 
++ free_ops:
+ 		arch_ftrace_trampoline_free(ops);
+ 
+ 		if (ops->flags & FTRACE_OPS_FL_PER_CPU)
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index 44004d8aa3b3..5efb4b63174e 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -2799,11 +2799,17 @@ static char *get_trace_buf(void)
+ 	if (!buffer || buffer->nesting >= 4)
+ 		return NULL;
+ 
+-	return &buffer->buffer[buffer->nesting++][0];
++	buffer->nesting++;
++
++	/* Interrupts must see nesting incremented before we use the buffer */
++	barrier();
++	return &buffer->buffer[buffer->nesting][0];
+ }
+ 
+ static void put_trace_buf(void)
+ {
++	/* Don't let the decrement of nesting leak before this */
++	barrier();
+ 	this_cpu_dec(trace_percpu_buffer->nesting);
+ }
+ 
+@@ -6220,7 +6226,7 @@ static int tracing_set_clock(struct trace_array *tr, const char *clockstr)
+ 	tracing_reset_online_cpus(&tr->trace_buffer);
+ 
+ #ifdef CONFIG_TRACER_MAX_TRACE
+-	if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer)
++	if (tr->max_buffer.buffer)
+ 		ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
+ 	tracing_reset_online_cpus(&tr->max_buffer);
+ #endif
+diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
+index 36132f9280e6..51a6e09a7410 100644
+--- a/kernel/trace/trace_events.c
++++ b/kernel/trace/trace_events.c
+@@ -406,7 +406,7 @@ static int __ftrace_event_enable_disable(struct trace_event_file *file,
+ 
+ 			if (file->flags & EVENT_FILE_FL_RECORDED_TGID) {
+ 				tracing_stop_tgid_record();
+-				clear_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags);
++				clear_bit(EVENT_FILE_FL_RECORDED_TGID_BIT, &file->flags);
+ 			}
+ 
+ 			call->class->reg(call, TRACE_REG_UNREGISTER, file);
+diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
+index cb917cebae29..b17ec642793b 100644
+--- a/kernel/trace/trace_selftest.c
++++ b/kernel/trace/trace_selftest.c
+@@ -273,7 +273,7 @@ static int trace_selftest_ops(struct trace_array *tr, int cnt)
+ 		goto out_free;
+ 	if (cnt > 1) {
+ 		if (trace_selftest_test_global_cnt == 0)
+-			goto out;
++			goto out_free;
+ 	}
+ 	if (trace_selftest_test_dyn_cnt == 0)
+ 		goto out_free;
+diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
+index 9979f46c81dc..51390febd5e3 100644
+--- a/net/netfilter/nf_conntrack_core.c
++++ b/net/netfilter/nf_conntrack_core.c
+@@ -96,19 +96,26 @@ static struct conntrack_gc_work conntrack_gc_work;
+ 
+ void nf_conntrack_lock(spinlock_t *lock) __acquires(lock)
+ {
++	/* 1) Acquire the lock */
+ 	spin_lock(lock);
+-	while (unlikely(nf_conntrack_locks_all)) {
+-		spin_unlock(lock);
+ 
+-		/*
+-		 * Order the 'nf_conntrack_locks_all' load vs. the
+-		 * spin_unlock_wait() loads below, to ensure
+-		 * that 'nf_conntrack_locks_all_lock' is indeed held:
+-		 */
+-		smp_rmb(); /* spin_lock(&nf_conntrack_locks_all_lock) */
+-		spin_unlock_wait(&nf_conntrack_locks_all_lock);
+-		spin_lock(lock);
+-	}
++	/* 2) read nf_conntrack_locks_all, with ACQUIRE semantics
++	 * It pairs with the smp_store_release() in nf_conntrack_all_unlock()
++	 */
++	if (likely(smp_load_acquire(&nf_conntrack_locks_all) == false))
++		return;
++
++	/* fast path failed, unlock */
++	spin_unlock(lock);
++
++	/* Slow path 1) get global lock */
++	spin_lock(&nf_conntrack_locks_all_lock);
++
++	/* Slow path 2) get the lock we want */
++	spin_lock(lock);
++
++	/* Slow path 3) release the global lock */
++	spin_unlock(&nf_conntrack_locks_all_lock);
+ }
+ EXPORT_SYMBOL_GPL(nf_conntrack_lock);
+ 
+@@ -149,28 +156,27 @@ static void nf_conntrack_all_lock(void)
+ 	int i;
+ 
+ 	spin_lock(&nf_conntrack_locks_all_lock);
+-	nf_conntrack_locks_all = true;
+ 
+-	/*
+-	 * Order the above store of 'nf_conntrack_locks_all' against
+-	 * the spin_unlock_wait() loads below, such that if
+-	 * nf_conntrack_lock() observes 'nf_conntrack_locks_all'
+-	 * we must observe nf_conntrack_locks[] held:
+-	 */
+-	smp_mb(); /* spin_lock(&nf_conntrack_locks_all_lock) */
++	nf_conntrack_locks_all = true;
+ 
+ 	for (i = 0; i < CONNTRACK_LOCKS; i++) {
+-		spin_unlock_wait(&nf_conntrack_locks[i]);
++		spin_lock(&nf_conntrack_locks[i]);
++
++		/* This spin_unlock provides the "release" to ensure that
++		 * nf_conntrack_locks_all==true is visible to everyone that
++		 * acquired spin_lock(&nf_conntrack_locks[]).
++		 */
++		spin_unlock(&nf_conntrack_locks[i]);
+ 	}
+ }
+ 
+ static void nf_conntrack_all_unlock(void)
+ {
+-	/*
+-	 * All prior stores must be complete before we clear
++	/* All prior stores must be complete before we clear
+ 	 * 'nf_conntrack_locks_all'. Otherwise nf_conntrack_lock()
+ 	 * might observe the false value but not the entire
+-	 * critical section:
++	 * critical section.
++	 * It pairs with the smp_load_acquire() in nf_conntrack_lock()
+ 	 */
+ 	smp_store_release(&nf_conntrack_locks_all, false);
+ 	spin_unlock(&nf_conntrack_locks_all_lock);
+diff --git a/sound/core/seq_device.c b/sound/core/seq_device.c
+index c4acf17e9f5e..e40a2cba5002 100644
+--- a/sound/core/seq_device.c
++++ b/sound/core/seq_device.c
+@@ -148,8 +148,10 @@ void snd_seq_device_load_drivers(void)
+ 	flush_work(&autoload_work);
+ }
+ EXPORT_SYMBOL(snd_seq_device_load_drivers);
++#define cancel_autoload_drivers()	cancel_work_sync(&autoload_work)
+ #else
+ #define queue_autoload_drivers() /* NOP */
++#define cancel_autoload_drivers() /* NOP */
+ #endif
+ 
+ /*
+@@ -159,6 +161,7 @@ static int snd_seq_device_dev_free(struct snd_device *device)
+ {
+ 	struct snd_seq_device *dev = device->device_data;
+ 
++	cancel_autoload_drivers();
+ 	put_device(&dev->dev);
+ 	return 0;
+ }


^ permalink raw reply related	[flat|nested] 20+ messages in thread
* [gentoo-commits] proj/linux-patches:4.13 commit in: /
@ 2017-09-20 10:09 Mike Pagano
  0 siblings, 0 replies; 20+ messages in thread
From: Mike Pagano @ 2017-09-20 10:09 UTC (permalink / raw
  To: gentoo-commits

commit:     4a98cf83c330d33c7109041b84409437c9142fb6
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Sep 20 10:09:02 2017 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Sep 20 10:09:02 2017 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=4a98cf83

Linux patch 4.13.3

 0000_README             |    4 +
 1002_linux-4.13.3.patch | 2829 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 2833 insertions(+)

diff --git a/0000_README b/0000_README
index cd98c21..70f03ff 100644
--- a/0000_README
+++ b/0000_README
@@ -51,6 +51,10 @@ Patch:  1001_linux-4.13.2.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.13.2
 
+Patch:  1002_linux-4.13.3.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.13.3
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1002_linux-4.13.3.patch b/1002_linux-4.13.3.patch
new file mode 100644
index 0000000..fe7ad47
--- /dev/null
+++ b/1002_linux-4.13.3.patch
@@ -0,0 +1,2829 @@
+diff --git a/Documentation/ABI/testing/sysfs-bus-thunderbolt b/Documentation/ABI/testing/sysfs-bus-thunderbolt
+index 2a98149943ea..392bef5bd399 100644
+--- a/Documentation/ABI/testing/sysfs-bus-thunderbolt
++++ b/Documentation/ABI/testing/sysfs-bus-thunderbolt
+@@ -45,6 +45,8 @@ Contact:	thunderbolt-software@lists.01.org
+ Description:	When a devices supports Thunderbolt secure connect it will
+ 		have this attribute. Writing 32 byte hex string changes
+ 		authorization to use the secure connection method instead.
++		Writing an empty string clears the key and regular connection
++		method can be used again.
+ 
+ What:		/sys/bus/thunderbolt/devices/.../device
+ Date:		Sep 2017
+diff --git a/Makefile b/Makefile
+index 8aad6bc50d52..0f31ef4aea7b 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 13
+-SUBLEVEL = 2
++SUBLEVEL = 3
+ EXTRAVERSION =
+ NAME = Fearless Coyote
+ 
+diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
+index 9aeb91935ce0..e2c4dd051ef8 100644
+--- a/arch/x86/include/asm/elf.h
++++ b/arch/x86/include/asm/elf.h
+@@ -204,6 +204,7 @@ void set_personality_ia32(bool);
+ 
+ #define ELF_CORE_COPY_REGS(pr_reg, regs)			\
+ do {								\
++	unsigned long base;					\
+ 	unsigned v;						\
+ 	(pr_reg)[0] = (regs)->r15;				\
+ 	(pr_reg)[1] = (regs)->r14;				\
+@@ -226,8 +227,8 @@ do {								\
+ 	(pr_reg)[18] = (regs)->flags;				\
+ 	(pr_reg)[19] = (regs)->sp;				\
+ 	(pr_reg)[20] = (regs)->ss;				\
+-	(pr_reg)[21] = current->thread.fsbase;			\
+-	(pr_reg)[22] = current->thread.gsbase;			\
++	rdmsrl(MSR_FS_BASE, base); (pr_reg)[21] = base;		\
++	rdmsrl(MSR_KERNEL_GS_BASE, base); (pr_reg)[22] = base;	\
+ 	asm("movl %%ds,%0" : "=r" (v)); (pr_reg)[23] = v;	\
+ 	asm("movl %%es,%0" : "=r" (v)); (pr_reg)[24] = v;	\
+ 	asm("movl %%fs,%0" : "=r" (v)); (pr_reg)[25] = v;	\
+diff --git a/arch/x86/include/asm/page_64.h b/arch/x86/include/asm/page_64.h
+index b4a0d43248cf..b50df06ad251 100644
+--- a/arch/x86/include/asm/page_64.h
++++ b/arch/x86/include/asm/page_64.h
+@@ -51,6 +51,10 @@ static inline void clear_page(void *page)
+ 
+ void copy_page(void *to, void *from);
+ 
++#ifdef CONFIG_X86_MCE
++#define arch_unmap_kpfn arch_unmap_kpfn
++#endif
++
+ #endif	/* !__ASSEMBLY__ */
+ 
+ #ifdef CONFIG_X86_VSYSCALL_EMULATION
+diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
+index 6dde0497efc7..3b413065c613 100644
+--- a/arch/x86/kernel/cpu/mcheck/mce.c
++++ b/arch/x86/kernel/cpu/mcheck/mce.c
+@@ -51,6 +51,7 @@
+ #include <asm/mce.h>
+ #include <asm/msr.h>
+ #include <asm/reboot.h>
++#include <asm/set_memory.h>
+ 
+ #include "mce-internal.h"
+ 
+@@ -1051,6 +1052,48 @@ static int do_memory_failure(struct mce *m)
+ 	return ret;
+ }
+ 
++#if defined(arch_unmap_kpfn) && defined(CONFIG_MEMORY_FAILURE)
++
++void arch_unmap_kpfn(unsigned long pfn)
++{
++	unsigned long decoy_addr;
++
++	/*
++	 * Unmap this page from the kernel 1:1 mappings to make sure
++	 * we don't log more errors because of speculative access to
++	 * the page.
++	 * We would like to just call:
++	 *	set_memory_np((unsigned long)pfn_to_kaddr(pfn), 1);
++	 * but doing that would radically increase the odds of a
++	 * speculative access to the posion page because we'd have
++	 * the virtual address of the kernel 1:1 mapping sitting
++	 * around in registers.
++	 * Instead we get tricky.  We create a non-canonical address
++	 * that looks just like the one we want, but has bit 63 flipped.
++	 * This relies on set_memory_np() not checking whether we passed
++	 * a legal address.
++	 */
++
++/*
++ * Build time check to see if we have a spare virtual bit. Don't want
++ * to leave this until run time because most developers don't have a
++ * system that can exercise this code path. This will only become a
++ * problem if/when we move beyond 5-level page tables.
++ *
++ * Hard code "9" here because cpp doesn't grok ilog2(PTRS_PER_PGD)
++ */
++#if PGDIR_SHIFT + 9 < 63
++	decoy_addr = (pfn << PAGE_SHIFT) + (PAGE_OFFSET ^ BIT(63));
++#else
++#error "no unused virtual bit available"
++#endif
++
++	if (set_memory_np(decoy_addr, 1))
++		pr_warn("Could not invalidate pfn=0x%lx from 1:1 map\n", pfn);
++
++}
++#endif
++
+ /*
+  * The actual machine check handler. This only handles real
+  * exceptions when something got corrupted coming in through int 18.
+diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
+index c3169be4c596..8c44e0cb2912 100644
+--- a/arch/x86/kernel/process_64.c
++++ b/arch/x86/kernel/process_64.c
+@@ -149,6 +149,123 @@ void release_thread(struct task_struct *dead_task)
+ 	}
+ }
+ 
++enum which_selector {
++	FS,
++	GS
++};
++
++/*
++ * Saves the FS or GS base for an outgoing thread if FSGSBASE extensions are
++ * not available.  The goal is to be reasonably fast on non-FSGSBASE systems.
++ * It's forcibly inlined because it'll generate better code and this function
++ * is hot.
++ */
++static __always_inline void save_base_legacy(struct task_struct *prev_p,
++					     unsigned short selector,
++					     enum which_selector which)
++{
++	if (likely(selector == 0)) {
++		/*
++		 * On Intel (without X86_BUG_NULL_SEG), the segment base could
++		 * be the pre-existing saved base or it could be zero.  On AMD
++		 * (with X86_BUG_NULL_SEG), the segment base could be almost
++		 * anything.
++		 *
++		 * This branch is very hot (it's hit twice on almost every
++		 * context switch between 64-bit programs), and avoiding
++		 * the RDMSR helps a lot, so we just assume that whatever
++		 * value is already saved is correct.  This matches historical
++		 * Linux behavior, so it won't break existing applications.
++		 *
++		 * To avoid leaking state, on non-X86_BUG_NULL_SEG CPUs, if we
++		 * report that the base is zero, it needs to actually be zero:
++		 * see the corresponding logic in load_seg_legacy.
++		 */
++	} else {
++		/*
++		 * If the selector is 1, 2, or 3, then the base is zero on
++		 * !X86_BUG_NULL_SEG CPUs and could be anything on
++		 * X86_BUG_NULL_SEG CPUs.  In the latter case, Linux
++		 * has never attempted to preserve the base across context
++		 * switches.
++		 *
++		 * If selector > 3, then it refers to a real segment, and
++		 * saving the base isn't necessary.
++		 */
++		if (which == FS)
++			prev_p->thread.fsbase = 0;
++		else
++			prev_p->thread.gsbase = 0;
++	}
++}
++
++static __always_inline void save_fsgs(struct task_struct *task)
++{
++	savesegment(fs, task->thread.fsindex);
++	savesegment(gs, task->thread.gsindex);
++	save_base_legacy(task, task->thread.fsindex, FS);
++	save_base_legacy(task, task->thread.gsindex, GS);
++}
++
++static __always_inline void loadseg(enum which_selector which,
++				    unsigned short sel)
++{
++	if (which == FS)
++		loadsegment(fs, sel);
++	else
++		load_gs_index(sel);
++}
++
++static __always_inline void load_seg_legacy(unsigned short prev_index,
++					    unsigned long prev_base,
++					    unsigned short next_index,
++					    unsigned long next_base,
++					    enum which_selector which)
++{
++	if (likely(next_index <= 3)) {
++		/*
++		 * The next task is using 64-bit TLS, is not using this
++		 * segment at all, or is having fun with arcane CPU features.
++		 */
++		if (next_base == 0) {
++			/*
++			 * Nasty case: on AMD CPUs, we need to forcibly zero
++			 * the base.
++			 */
++			if (static_cpu_has_bug(X86_BUG_NULL_SEG)) {
++				loadseg(which, __USER_DS);
++				loadseg(which, next_index);
++			} else {
++				/*
++				 * We could try to exhaustively detect cases
++				 * under which we can skip the segment load,
++				 * but there's really only one case that matters
++				 * for performance: if both the previous and
++				 * next states are fully zeroed, we can skip
++				 * the load.
++				 *
++				 * (This assumes that prev_base == 0 has no
++				 * false positives.  This is the case on
++				 * Intel-style CPUs.)
++				 */
++				if (likely(prev_index | next_index | prev_base))
++					loadseg(which, next_index);
++			}
++		} else {
++			if (prev_index != next_index)
++				loadseg(which, next_index);
++			wrmsrl(which == FS ? MSR_FS_BASE : MSR_KERNEL_GS_BASE,
++			       next_base);
++		}
++	} else {
++		/*
++		 * The next task is using a real segment.  Loading the selector
++		 * is sufficient.
++		 */
++		loadseg(which, next_index);
++	}
++}
++
+ int copy_thread_tls(unsigned long clone_flags, unsigned long sp,
+ 		unsigned long arg, struct task_struct *p, unsigned long tls)
+ {
+@@ -229,10 +346,19 @@ start_thread_common(struct pt_regs *regs, unsigned long new_ip,
+ 		    unsigned long new_sp,
+ 		    unsigned int _cs, unsigned int _ss, unsigned int _ds)
+ {
++	WARN_ON_ONCE(regs != current_pt_regs());
++
++	if (static_cpu_has(X86_BUG_NULL_SEG)) {
++		/* Loading zero below won't clear the base. */
++		loadsegment(fs, __USER_DS);
++		load_gs_index(__USER_DS);
++	}
++
+ 	loadsegment(fs, 0);
+ 	loadsegment(es, _ds);
+ 	loadsegment(ds, _ds);
+ 	load_gs_index(0);
++
+ 	regs->ip		= new_ip;
+ 	regs->sp		= new_sp;
+ 	regs->cs		= _cs;
+@@ -277,7 +403,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
+ 	struct fpu *next_fpu = &next->fpu;
+ 	int cpu = smp_processor_id();
+ 	struct tss_struct *tss = &per_cpu(cpu_tss, cpu);
+-	unsigned prev_fsindex, prev_gsindex;
+ 
+ 	switch_fpu_prepare(prev_fpu, cpu);
+ 
+@@ -286,8 +411,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
+ 	 *
+ 	 * (e.g. xen_load_tls())
+ 	 */
+-	savesegment(fs, prev_fsindex);
+-	savesegment(gs, prev_gsindex);
++	save_fsgs(prev_p);
+ 
+ 	/*
+ 	 * Load TLS before restoring any segments so that segment loads
+@@ -326,108 +450,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
+ 	if (unlikely(next->ds | prev->ds))
+ 		loadsegment(ds, next->ds);
+ 
+-	/*
+-	 * Switch FS and GS.
+-	 *
+-	 * These are even more complicated than DS and ES: they have
+-	 * 64-bit bases are that controlled by arch_prctl.  The bases
+-	 * don't necessarily match the selectors, as user code can do
+-	 * any number of things to cause them to be inconsistent.
+-	 *
+-	 * We don't promise to preserve the bases if the selectors are
+-	 * nonzero.  We also don't promise to preserve the base if the
+-	 * selector is zero and the base doesn't match whatever was
+-	 * most recently passed to ARCH_SET_FS/GS.  (If/when the
+-	 * FSGSBASE instructions are enabled, we'll need to offer
+-	 * stronger guarantees.)
+-	 *
+-	 * As an invariant,
+-	 * (fsbase != 0 && fsindex != 0) || (gsbase != 0 && gsindex != 0) is
+-	 * impossible.
+-	 */
+-	if (next->fsindex) {
+-		/* Loading a nonzero value into FS sets the index and base. */
+-		loadsegment(fs, next->fsindex);
+-	} else {
+-		if (next->fsbase) {
+-			/* Next index is zero but next base is nonzero. */
+-			if (prev_fsindex)
+-				loadsegment(fs, 0);
+-			wrmsrl(MSR_FS_BASE, next->fsbase);
+-		} else {
+-			/* Next base and index are both zero. */
+-			if (static_cpu_has_bug(X86_BUG_NULL_SEG)) {
+-				/*
+-				 * We don't know the previous base and can't
+-				 * find out without RDMSR.  Forcibly clear it.
+-				 */
+-				loadsegment(fs, __USER_DS);
+-				loadsegment(fs, 0);
+-			} else {
+-				/*
+-				 * If the previous index is zero and ARCH_SET_FS
+-				 * didn't change the base, then the base is
+-				 * also zero and we don't need to do anything.
+-				 */
+-				if (prev->fsbase || prev_fsindex)
+-					loadsegment(fs, 0);
+-			}
+-		}
+-	}
+-	/*
+-	 * Save the old state and preserve the invariant.
+-	 * NB: if prev_fsindex == 0, then we can't reliably learn the base
+-	 * without RDMSR because Intel user code can zero it without telling
+-	 * us and AMD user code can program any 32-bit value without telling
+-	 * us.
+-	 */
+-	if (prev_fsindex)
+-		prev->fsbase = 0;
+-	prev->fsindex = prev_fsindex;
+-
+-	if (next->gsindex) {
+-		/* Loading a nonzero value into GS sets the index and base. */
+-		load_gs_index(next->gsindex);
+-	} else {
+-		if (next->gsbase) {
+-			/* Next index is zero but next base is nonzero. */
+-			if (prev_gsindex)
+-				load_gs_index(0);
+-			wrmsrl(MSR_KERNEL_GS_BASE, next->gsbase);
+-		} else {
+-			/* Next base and index are both zero. */
+-			if (static_cpu_has_bug(X86_BUG_NULL_SEG)) {
+-				/*
+-				 * We don't know the previous base and can't
+-				 * find out without RDMSR.  Forcibly clear it.
+-				 *
+-				 * This contains a pointless SWAPGS pair.
+-				 * Fixing it would involve an explicit check
+-				 * for Xen or a new pvop.
+-				 */
+-				load_gs_index(__USER_DS);
+-				load_gs_index(0);
+-			} else {
+-				/*
+-				 * If the previous index is zero and ARCH_SET_GS
+-				 * didn't change the base, then the base is
+-				 * also zero and we don't need to do anything.
+-				 */
+-				if (prev->gsbase || prev_gsindex)
+-					load_gs_index(0);
+-			}
+-		}
+-	}
+-	/*
+-	 * Save the old state and preserve the invariant.
+-	 * NB: if prev_gsindex == 0, then we can't reliably learn the base
+-	 * without RDMSR because Intel user code can zero it without telling
+-	 * us and AMD user code can program any 32-bit value without telling
+-	 * us.
+-	 */
+-	if (prev_gsindex)
+-		prev->gsbase = 0;
+-	prev->gsindex = prev_gsindex;
++	load_seg_legacy(prev->fsindex, prev->fsbase,
++			next->fsindex, next->fsbase, FS);
++	load_seg_legacy(prev->gsindex, prev->gsbase,
++			next->gsindex, next->gsbase, GS);
+ 
+ 	switch_fpu_finish(next_fpu, cpu);
+ 
+diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
+index f50958ded9f0..79474f47eeef 100644
+--- a/drivers/md/raid1.c
++++ b/drivers/md/raid1.c
+@@ -2564,6 +2564,23 @@ static int init_resync(struct r1conf *conf)
+ 	return 0;
+ }
+ 
++static struct r1bio *raid1_alloc_init_r1buf(struct r1conf *conf)
++{
++	struct r1bio *r1bio = mempool_alloc(conf->r1buf_pool, GFP_NOIO);
++	struct resync_pages *rps;
++	struct bio *bio;
++	int i;
++
++	for (i = conf->poolinfo->raid_disks; i--; ) {
++		bio = r1bio->bios[i];
++		rps = bio->bi_private;
++		bio_reset(bio);
++		bio->bi_private = rps;
++	}
++	r1bio->master_bio = NULL;
++	return r1bio;
++}
++
+ /*
+  * perform a "sync" on one "block"
+  *
+@@ -2649,7 +2666,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
+ 
+ 	bitmap_cond_end_sync(mddev->bitmap, sector_nr,
+ 		mddev_is_clustered(mddev) && (sector_nr + 2 * RESYNC_SECTORS > conf->cluster_sync_high));
+-	r1_bio = mempool_alloc(conf->r1buf_pool, GFP_NOIO);
++	r1_bio = raid1_alloc_init_r1buf(conf);
+ 
+ 	raise_barrier(conf, sector_nr);
+ 
+diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
+index f55d4cc085f6..d51ac02e98ef 100644
+--- a/drivers/md/raid10.c
++++ b/drivers/md/raid10.c
+@@ -2798,6 +2798,35 @@ static int init_resync(struct r10conf *conf)
+ 	return 0;
+ }
+ 
++static struct r10bio *raid10_alloc_init_r10buf(struct r10conf *conf)
++{
++	struct r10bio *r10bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
++	struct rsync_pages *rp;
++	struct bio *bio;
++	int nalloc;
++	int i;
++
++	if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery) ||
++	    test_bit(MD_RECOVERY_RESHAPE, &conf->mddev->recovery))
++		nalloc = conf->copies; /* resync */
++	else
++		nalloc = 2; /* recovery */
++
++	for (i = 0; i < nalloc; i++) {
++		bio = r10bio->devs[i].bio;
++		rp = bio->bi_private;
++		bio_reset(bio);
++		bio->bi_private = rp;
++		bio = r10bio->devs[i].repl_bio;
++		if (bio) {
++			rp = bio->bi_private;
++			bio_reset(bio);
++			bio->bi_private = rp;
++		}
++	}
++	return r10bio;
++}
++
+ /*
+  * perform a "sync" on one "block"
+  *
+@@ -3027,7 +3056,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
+ 				atomic_inc(&mreplace->nr_pending);
+ 			rcu_read_unlock();
+ 
+-			r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
++			r10_bio = raid10_alloc_init_r10buf(conf);
+ 			r10_bio->state = 0;
+ 			raise_barrier(conf, rb2 != NULL);
+ 			atomic_set(&r10_bio->remaining, 0);
+@@ -3236,7 +3265,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
+ 		}
+ 		if (sync_blocks < max_sync)
+ 			max_sync = sync_blocks;
+-		r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
++		r10_bio = raid10_alloc_init_r10buf(conf);
+ 		r10_bio->state = 0;
+ 
+ 		r10_bio->mddev = mddev;
+@@ -4360,7 +4389,7 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
+ 
+ read_more:
+ 	/* Now schedule reads for blocks from sector_nr to last */
+-	r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
++	r10_bio = raid10_alloc_init_r10buf(conf);
+ 	r10_bio->state = 0;
+ 	raise_barrier(conf, sectors_done != 0);
+ 	atomic_set(&r10_bio->remaining, 0);
+diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
+index 0fc2748aaf95..e13a8ce7f589 100644
+--- a/drivers/md/raid5.c
++++ b/drivers/md/raid5.c
+@@ -6235,6 +6235,10 @@ static void raid5_do_work(struct work_struct *work)
+ 
+ 	spin_unlock_irq(&conf->device_lock);
+ 
++	flush_deferred_bios(conf);
++
++	r5l_flush_stripe_to_raid(conf->log);
++
+ 	async_tx_issue_pending_all();
+ 	blk_finish_plug(&plug);
+ 
+diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
+index c4b4b0a1bbf0..5be52d89b182 100644
+--- a/drivers/net/ethernet/freescale/gianfar.c
++++ b/drivers/net/ethernet/freescale/gianfar.c
+@@ -3687,7 +3687,7 @@ static noinline void gfar_update_link_state(struct gfar_private *priv)
+ 		u32 tempval1 = gfar_read(&regs->maccfg1);
+ 		u32 tempval = gfar_read(&regs->maccfg2);
+ 		u32 ecntrl = gfar_read(&regs->ecntrl);
+-		u32 tx_flow_oldval = (tempval & MACCFG1_TX_FLOW);
++		u32 tx_flow_oldval = (tempval1 & MACCFG1_TX_FLOW);
+ 
+ 		if (phydev->duplex != priv->oldduplex) {
+ 			if (!(phydev->duplex))
+diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c
+index 14323faf8bd9..7ec6393b6ba1 100644
+--- a/drivers/nvdimm/btt.c
++++ b/drivers/nvdimm/btt.c
+@@ -1429,6 +1429,8 @@ int nvdimm_namespace_attach_btt(struct nd_namespace_common *ndns)
+ 	}
+ 
+ 	btt_sb = devm_kzalloc(&nd_btt->dev, sizeof(*btt_sb), GFP_KERNEL);
++	if (!btt_sb)
++		return -ENOMEM;
+ 
+ 	/*
+ 	 * If this returns < 0, that is ok as it just means there wasn't
+diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
+index 937fafa1886a..54eb14c7ef90 100644
+--- a/drivers/nvdimm/bus.c
++++ b/drivers/nvdimm/bus.c
+@@ -905,19 +905,20 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
+ 		int read_only, unsigned int ioctl_cmd, unsigned long arg)
+ {
+ 	struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
+-	size_t buf_len = 0, in_len = 0, out_len = 0;
+ 	static char out_env[ND_CMD_MAX_ENVELOPE];
+ 	static char in_env[ND_CMD_MAX_ENVELOPE];
+ 	const struct nd_cmd_desc *desc = NULL;
+ 	unsigned int cmd = _IOC_NR(ioctl_cmd);
+-	unsigned int func = cmd;
+-	void __user *p = (void __user *) arg;
+ 	struct device *dev = &nvdimm_bus->dev;
+-	struct nd_cmd_pkg pkg;
++	void __user *p = (void __user *) arg;
+ 	const char *cmd_name, *dimm_name;
++	u32 in_len = 0, out_len = 0;
++	unsigned int func = cmd;
+ 	unsigned long cmd_mask;
+-	void *buf;
++	struct nd_cmd_pkg pkg;
+ 	int rc, i, cmd_rc;
++	u64 buf_len = 0;
++	void *buf;
+ 
+ 	if (nvdimm) {
+ 		desc = nd_cmd_dimm_desc(cmd);
+@@ -977,7 +978,7 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
+ 
+ 	if (cmd == ND_CMD_CALL) {
+ 		func = pkg.nd_command;
+-		dev_dbg(dev, "%s:%s, idx: %llu, in: %zu, out: %zu, len %zu\n",
++		dev_dbg(dev, "%s:%s, idx: %llu, in: %u, out: %u, len %llu\n",
+ 				__func__, dimm_name, pkg.nd_command,
+ 				in_len, out_len, buf_len);
+ 
+@@ -1007,9 +1008,9 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
+ 		out_len += out_size;
+ 	}
+ 
+-	buf_len = out_len + in_len;
++	buf_len = (u64) out_len + (u64) in_len;
+ 	if (buf_len > ND_IOCTL_MAX_BUFLEN) {
+-		dev_dbg(dev, "%s:%s cmd: %s buf_len: %zu > %d\n", __func__,
++		dev_dbg(dev, "%s:%s cmd: %s buf_len: %llu > %d\n", __func__,
+ 				dimm_name, cmd_name, buf_len,
+ 				ND_IOCTL_MAX_BUFLEN);
+ 		return -EINVAL;
+diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
+index e9391bbd4036..53f40c57df59 100644
+--- a/drivers/thunderbolt/switch.c
++++ b/drivers/thunderbolt/switch.c
+@@ -807,11 +807,11 @@ static ssize_t key_store(struct device *dev, struct device_attribute *attr,
+ 	struct tb_switch *sw = tb_to_switch(dev);
+ 	u8 key[TB_SWITCH_KEY_SIZE];
+ 	ssize_t ret = count;
++	bool clear = false;
+ 
+-	if (count < 64)
+-		return -EINVAL;
+-
+-	if (hex2bin(key, buf, sizeof(key)))
++	if (!strcmp(buf, "\n"))
++		clear = true;
++	else if (hex2bin(key, buf, sizeof(key)))
+ 		return -EINVAL;
+ 
+ 	if (mutex_lock_interruptible(&switch_lock))
+@@ -821,15 +821,19 @@ static ssize_t key_store(struct device *dev, struct device_attribute *attr,
+ 		ret = -EBUSY;
+ 	} else {
+ 		kfree(sw->key);
+-		sw->key = kmemdup(key, sizeof(key), GFP_KERNEL);
+-		if (!sw->key)
+-			ret = -ENOMEM;
++		if (clear) {
++			sw->key = NULL;
++		} else {
++			sw->key = kmemdup(key, sizeof(key), GFP_KERNEL);
++			if (!sw->key)
++				ret = -ENOMEM;
++		}
+ 	}
+ 
+ 	mutex_unlock(&switch_lock);
+ 	return ret;
+ }
+-static DEVICE_ATTR_RW(key);
++static DEVICE_ATTR(key, 0600, key_show, key_store);
+ 
+ static ssize_t nvm_authenticate_show(struct device *dev,
+ 	struct device_attribute *attr, char *buf)
+diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
+index 06d044862e58..1c75572f5a3f 100644
+--- a/drivers/vhost/net.c
++++ b/drivers/vhost/net.c
+@@ -634,8 +634,13 @@ static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk)
+ 
+ 		preempt_enable();
+ 
+-		if (vhost_enable_notify(&net->dev, vq))
++		if (!vhost_vq_avail_empty(&net->dev, vq))
+ 			vhost_poll_queue(&vq->poll);
++		else if (unlikely(vhost_enable_notify(&net->dev, vq))) {
++			vhost_disable_notify(&net->dev, vq);
++			vhost_poll_queue(&vq->poll);
++		}
++
+ 		mutex_unlock(&vq->mutex);
+ 
+ 		len = peek_head_len(rvq, sk);
+diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c
+index 907d6b7dde6a..86d813a3f5d1 100644
+--- a/fs/f2fs/recovery.c
++++ b/fs/f2fs/recovery.c
+@@ -291,7 +291,7 @@ static int check_index_in_prev_nodes(struct f2fs_sb_info *sbi,
+ 		return 0;
+ 
+ 	/* Get the previous summary */
+-	for (i = CURSEG_WARM_DATA; i <= CURSEG_COLD_DATA; i++) {
++	for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
+ 		struct curseg_info *curseg = CURSEG_I(sbi, i);
+ 		if (curseg->segno == segno) {
+ 			sum = curseg->sum_blk->entries[blkoff];
+@@ -599,8 +599,6 @@ int recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only)
+ 	}
+ 
+ 	clear_sbi_flag(sbi, SBI_POR_DOING);
+-	if (err)
+-		set_ckpt_flags(sbi, CP_ERROR_FLAG);
+ 	mutex_unlock(&sbi->cp_mutex);
+ 
+ 	/* let's drop all the directory inodes for clean checkpoint */
+diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
+index c16d00e53264..13c65dd2d37d 100644
+--- a/fs/fuse/dev.c
++++ b/fs/fuse/dev.c
+@@ -1222,9 +1222,6 @@ static ssize_t fuse_dev_do_read(struct fuse_dev *fud, struct file *file,
+ 	struct fuse_in *in;
+ 	unsigned reqsize;
+ 
+-	if (task_active_pid_ns(current) != fc->pid_ns)
+-		return -EIO;
+-
+  restart:
+ 	spin_lock(&fiq->waitq.lock);
+ 	err = -EAGAIN;
+@@ -1262,6 +1259,13 @@ static ssize_t fuse_dev_do_read(struct fuse_dev *fud, struct file *file,
+ 
+ 	in = &req->in;
+ 	reqsize = in->h.len;
++
++	if (task_active_pid_ns(current) != fc->pid_ns) {
++		rcu_read_lock();
++		in->h.pid = pid_vnr(find_pid_ns(in->h.pid, fc->pid_ns));
++		rcu_read_unlock();
++	}
++
+ 	/* If request is too large, reply with an error and restart the read */
+ 	if (nbytes < reqsize) {
+ 		req->out.h.error = -EIO;
+@@ -1823,9 +1827,6 @@ static ssize_t fuse_dev_do_write(struct fuse_dev *fud,
+ 	struct fuse_req *req;
+ 	struct fuse_out_header oh;
+ 
+-	if (task_active_pid_ns(current) != fc->pid_ns)
+-		return -EIO;
+-
+ 	if (nbytes < sizeof(struct fuse_out_header))
+ 		return -EINVAL;
+ 
+diff --git a/fs/fuse/file.c b/fs/fuse/file.c
+index ab60051be6e5..6d8e65cec01a 100644
+--- a/fs/fuse/file.c
++++ b/fs/fuse/file.c
+@@ -2181,9 +2181,6 @@ static int fuse_setlk(struct file *file, struct file_lock *fl, int flock)
+ 	if ((fl->fl_flags & FL_CLOSE_POSIX) == FL_CLOSE_POSIX)
+ 		return 0;
+ 
+-	if (pid && pid_nr == 0)
+-		return -EOVERFLOW;
+-
+ 	fuse_lk_fill(&args, file, fl, opcode, pid_nr, flock, &inarg);
+ 	err = fuse_simple_request(fc, &args);
+ 
+diff --git a/fs/inode.c b/fs/inode.c
+index 50370599e371..6a1626e0edaf 100644
+--- a/fs/inode.c
++++ b/fs/inode.c
+@@ -637,6 +637,7 @@ void evict_inodes(struct super_block *sb)
+ 
+ 	dispose_list(&dispose);
+ }
++EXPORT_SYMBOL_GPL(evict_inodes);
+ 
+ /**
+  * invalidate_inodes	- attempt to free all inodes on a superblock
+diff --git a/fs/internal.h b/fs/internal.h
+index 9676fe11c093..fedfe94d84ba 100644
+--- a/fs/internal.h
++++ b/fs/internal.h
+@@ -132,7 +132,6 @@ static inline bool atime_needs_update_rcu(const struct path *path,
+ extern void inode_io_list_del(struct inode *inode);
+ 
+ extern long get_nr_dirty_inodes(void);
+-extern void evict_inodes(struct super_block *);
+ extern int invalidate_inodes(struct super_block *, bool);
+ 
+ /*
+diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
+index 5bc71642b226..ef55c926463c 100644
+--- a/fs/overlayfs/inode.c
++++ b/fs/overlayfs/inode.c
+@@ -576,10 +576,13 @@ static int ovl_inode_set(struct inode *inode, void *data)
+ static bool ovl_verify_inode(struct inode *inode, struct dentry *lowerdentry,
+ 			     struct dentry *upperdentry)
+ {
+-	struct inode *lowerinode = lowerdentry ? d_inode(lowerdentry) : NULL;
+-
+-	/* Lower (origin) inode must match, even if NULL */
+-	if (ovl_inode_lower(inode) != lowerinode)
++	/*
++	 * Allow non-NULL lower inode in ovl_inode even if lowerdentry is NULL.
++	 * This happens when finding a copied up overlay inode for a renamed
++	 * or hardlinked overlay dentry and lower dentry cannot be followed
++	 * by origin because lower fs does not support file handles.
++	 */
++	if (lowerdentry && ovl_inode_lower(inode) != d_inode(lowerdentry))
+ 		return false;
+ 
+ 	/*
+diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
+index c09c16b1ad3b..6f2a5baded76 100644
+--- a/fs/xfs/libxfs/xfs_bmap.c
++++ b/fs/xfs/libxfs/xfs_bmap.c
+@@ -579,7 +579,7 @@ xfs_bmap_validate_ret(
+ 
+ #else
+ #define xfs_bmap_check_leaf_extents(cur, ip, whichfork)		do { } while (0)
+-#define	xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
++#define	xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)	do { } while (0)
+ #endif /* DEBUG */
+ 
+ /*
+diff --git a/fs/xfs/libxfs/xfs_bmap_btree.c b/fs/xfs/libxfs/xfs_bmap_btree.c
+index 85de22513014..a6331ffa51e3 100644
+--- a/fs/xfs/libxfs/xfs_bmap_btree.c
++++ b/fs/xfs/libxfs/xfs_bmap_btree.c
+@@ -858,6 +858,7 @@ xfs_bmbt_change_owner(
+ 	cur = xfs_bmbt_init_cursor(ip->i_mount, tp, ip, whichfork);
+ 	if (!cur)
+ 		return -ENOMEM;
++	cur->bc_private.b.flags |= XFS_BTCUR_BPRV_INVALID_OWNER;
+ 
+ 	error = xfs_btree_change_owner(cur, new_owner, buffer_list);
+ 	xfs_btree_del_cursor(cur, error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
+diff --git a/fs/xfs/libxfs/xfs_btree.c b/fs/xfs/libxfs/xfs_btree.c
+index e0bcc4a59efd..5bfb88261c7e 100644
+--- a/fs/xfs/libxfs/xfs_btree.c
++++ b/fs/xfs/libxfs/xfs_btree.c
+@@ -1791,6 +1791,7 @@ xfs_btree_lookup_get_block(
+ 
+ 	/* Check the inode owner since the verifiers don't. */
+ 	if (xfs_sb_version_hascrc(&cur->bc_mp->m_sb) &&
++	    !(cur->bc_private.b.flags & XFS_BTCUR_BPRV_INVALID_OWNER) &&
+ 	    (cur->bc_flags & XFS_BTREE_LONG_PTRS) &&
+ 	    be64_to_cpu((*blkp)->bb_u.l.bb_owner) !=
+ 			cur->bc_private.b.ip->i_ino)
+@@ -4451,10 +4452,15 @@ xfs_btree_block_change_owner(
+ 
+ 	/* modify the owner */
+ 	block = xfs_btree_get_block(cur, level, &bp);
+-	if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
++	if (cur->bc_flags & XFS_BTREE_LONG_PTRS) {
++		if (block->bb_u.l.bb_owner == cpu_to_be64(bbcoi->new_owner))
++			return 0;
+ 		block->bb_u.l.bb_owner = cpu_to_be64(bbcoi->new_owner);
+-	else
++	} else {
++		if (block->bb_u.s.bb_owner == cpu_to_be32(bbcoi->new_owner))
++			return 0;
+ 		block->bb_u.s.bb_owner = cpu_to_be32(bbcoi->new_owner);
++	}
+ 
+ 	/*
+ 	 * If the block is a root block hosted in an inode, we might not have a
+@@ -4463,16 +4469,19 @@ xfs_btree_block_change_owner(
+ 	 * block is formatted into the on-disk inode fork. We still change it,
+ 	 * though, so everything is consistent in memory.
+ 	 */
+-	if (bp) {
+-		if (cur->bc_tp) {
+-			xfs_trans_ordered_buf(cur->bc_tp, bp);
++	if (!bp) {
++		ASSERT(cur->bc_flags & XFS_BTREE_ROOT_IN_INODE);
++		ASSERT(level == cur->bc_nlevels - 1);
++		return 0;
++	}
++
++	if (cur->bc_tp) {
++		if (!xfs_trans_ordered_buf(cur->bc_tp, bp)) {
+ 			xfs_btree_log_block(cur, bp, XFS_BB_OWNER);
+-		} else {
+-			xfs_buf_delwri_queue(bp, bbcoi->buffer_list);
++			return -EAGAIN;
+ 		}
+ 	} else {
+-		ASSERT(cur->bc_flags & XFS_BTREE_ROOT_IN_INODE);
+-		ASSERT(level == cur->bc_nlevels - 1);
++		xfs_buf_delwri_queue(bp, bbcoi->buffer_list);
+ 	}
+ 
+ 	return 0;
+diff --git a/fs/xfs/libxfs/xfs_btree.h b/fs/xfs/libxfs/xfs_btree.h
+index 9c95e965cfe5..f2a88c3b1159 100644
+--- a/fs/xfs/libxfs/xfs_btree.h
++++ b/fs/xfs/libxfs/xfs_btree.h
+@@ -233,7 +233,8 @@ typedef struct xfs_btree_cur
+ 			short		forksize;	/* fork's inode space */
+ 			char		whichfork;	/* data or attr fork */
+ 			char		flags;		/* flags */
+-#define	XFS_BTCUR_BPRV_WASDEL	1			/* was delayed */
++#define	XFS_BTCUR_BPRV_WASDEL		(1<<0)		/* was delayed */
++#define	XFS_BTCUR_BPRV_INVALID_OWNER	(1<<1)		/* for ext swap */
+ 		} b;
+ 	}		bc_private;	/* per-btree type data */
+ } xfs_btree_cur_t;
+diff --git a/fs/xfs/libxfs/xfs_ialloc.c b/fs/xfs/libxfs/xfs_ialloc.c
+index abf5beaae907..988bb3f31446 100644
+--- a/fs/xfs/libxfs/xfs_ialloc.c
++++ b/fs/xfs/libxfs/xfs_ialloc.c
+@@ -378,8 +378,6 @@ xfs_ialloc_inode_init(
+ 				 * transaction and pin the log appropriately.
+ 				 */
+ 				xfs_trans_ordered_buf(tp, fbuf);
+-				xfs_trans_log_buf(tp, fbuf, 0,
+-						  BBTOB(fbuf->b_length) - 1);
+ 			}
+ 		} else {
+ 			fbuf->b_flags |= XBF_DONE;
+@@ -1133,6 +1131,7 @@ xfs_dialloc_ag_inobt(
+ 	int			error;
+ 	int			offset;
+ 	int			i, j;
++	int			searchdistance = 10;
+ 
+ 	pag = xfs_perag_get(mp, agno);
+ 
+@@ -1159,7 +1158,6 @@ xfs_dialloc_ag_inobt(
+ 	if (pagno == agno) {
+ 		int		doneleft;	/* done, to the left */
+ 		int		doneright;	/* done, to the right */
+-		int		searchdistance = 10;
+ 
+ 		error = xfs_inobt_lookup(cur, pagino, XFS_LOOKUP_LE, &i);
+ 		if (error)
+@@ -1220,21 +1218,9 @@ xfs_dialloc_ag_inobt(
+ 		/*
+ 		 * Loop until we find an inode chunk with a free inode.
+ 		 */
+-		while (!doneleft || !doneright) {
++		while (--searchdistance > 0 && (!doneleft || !doneright)) {
+ 			int	useleft;  /* using left inode chunk this time */
+ 
+-			if (!--searchdistance) {
+-				/*
+-				 * Not in range - save last search
+-				 * location and allocate a new inode
+-				 */
+-				xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
+-				pag->pagl_leftrec = trec.ir_startino;
+-				pag->pagl_rightrec = rec.ir_startino;
+-				pag->pagl_pagino = pagino;
+-				goto newino;
+-			}
+-
+ 			/* figure out the closer block if both are valid. */
+ 			if (!doneleft && !doneright) {
+ 				useleft = pagino -
+@@ -1278,26 +1264,37 @@ xfs_dialloc_ag_inobt(
+ 				goto error1;
+ 		}
+ 
+-		/*
+-		 * We've reached the end of the btree. because
+-		 * we are only searching a small chunk of the
+-		 * btree each search, there is obviously free
+-		 * inodes closer to the parent inode than we
+-		 * are now. restart the search again.
+-		 */
+-		pag->pagl_pagino = NULLAGINO;
+-		pag->pagl_leftrec = NULLAGINO;
+-		pag->pagl_rightrec = NULLAGINO;
+-		xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
+-		xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
+-		goto restart_pagno;
++		if (searchdistance <= 0) {
++			/*
++			 * Not in range - save last search
++			 * location and allocate a new inode
++			 */
++			xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
++			pag->pagl_leftrec = trec.ir_startino;
++			pag->pagl_rightrec = rec.ir_startino;
++			pag->pagl_pagino = pagino;
++
++		} else {
++			/*
++			 * We've reached the end of the btree. because
++			 * we are only searching a small chunk of the
++			 * btree each search, there is obviously free
++			 * inodes closer to the parent inode than we
++			 * are now. restart the search again.
++			 */
++			pag->pagl_pagino = NULLAGINO;
++			pag->pagl_leftrec = NULLAGINO;
++			pag->pagl_rightrec = NULLAGINO;
++			xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
++			xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
++			goto restart_pagno;
++		}
+ 	}
+ 
+ 	/*
+ 	 * In a different AG from the parent.
+ 	 * See if the most recently allocated block has any free.
+ 	 */
+-newino:
+ 	if (agi->agi_newino != cpu_to_be32(NULLAGINO)) {
+ 		error = xfs_inobt_lookup(cur, be32_to_cpu(agi->agi_newino),
+ 					 XFS_LOOKUP_EQ, &i);
+diff --git a/fs/xfs/libxfs/xfs_inode_fork.c b/fs/xfs/libxfs/xfs_inode_fork.c
+index 0e80f34fe97c..5eb165555934 100644
+--- a/fs/xfs/libxfs/xfs_inode_fork.c
++++ b/fs/xfs/libxfs/xfs_inode_fork.c
+@@ -1499,14 +1499,11 @@ xfs_iext_realloc_indirect(
+ 	xfs_ifork_t	*ifp,		/* inode fork pointer */
+ 	int		new_size)	/* new indirection array size */
+ {
+-	int		nlists;		/* number of irec's (ex lists) */
+-	int		size;		/* current indirection array size */
+-
+ 	ASSERT(ifp->if_flags & XFS_IFEXTIREC);
+-	nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
+-	size = nlists * sizeof(xfs_ext_irec_t);
+ 	ASSERT(ifp->if_real_bytes);
+-	ASSERT((new_size >= 0) && (new_size != size));
++	ASSERT((new_size >= 0) &&
++	       (new_size != ((ifp->if_real_bytes / XFS_IEXT_BUFSZ) *
++			     sizeof(xfs_ext_irec_t))));
+ 	if (new_size == 0) {
+ 		xfs_iext_destroy(ifp);
+ 	} else {
+diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
+index 6bf120bb1a17..f9efd67f6fa1 100644
+--- a/fs/xfs/xfs_aops.c
++++ b/fs/xfs/xfs_aops.c
+@@ -85,11 +85,11 @@ xfs_find_bdev_for_inode(
+  * associated buffer_heads, paying attention to the start and end offsets that
+  * we need to process on the page.
+  *
+- * Landmine Warning: bh->b_end_io() will call end_page_writeback() on the last
+- * buffer in the IO. Once it does this, it is unsafe to access the bufferhead or
+- * the page at all, as we may be racing with memory reclaim and it can free both
+- * the bufferhead chain and the page as it will see the page as clean and
+- * unused.
++ * Note that we open code the action in end_buffer_async_write here so that we
++ * only have to iterate over the buffers attached to the page once.  This is not
++ * only more efficient, but also ensures that we only calls end_page_writeback
++ * at the end of the iteration, and thus avoids the pitfall of having the page
++ * and buffers potentially freed after every call to end_buffer_async_write.
+  */
+ static void
+ xfs_finish_page_writeback(
+@@ -97,29 +97,44 @@ xfs_finish_page_writeback(
+ 	struct bio_vec		*bvec,
+ 	int			error)
+ {
+-	unsigned int		end = bvec->bv_offset + bvec->bv_len - 1;
+-	struct buffer_head	*head, *bh, *next;
++	struct buffer_head	*head = page_buffers(bvec->bv_page), *bh = head;
++	bool			busy = false;
+ 	unsigned int		off = 0;
+-	unsigned int		bsize;
++	unsigned long		flags;
+ 
+ 	ASSERT(bvec->bv_offset < PAGE_SIZE);
+ 	ASSERT((bvec->bv_offset & (i_blocksize(inode) - 1)) == 0);
+-	ASSERT(end < PAGE_SIZE);
++	ASSERT(bvec->bv_offset + bvec->bv_len <= PAGE_SIZE);
+ 	ASSERT((bvec->bv_len & (i_blocksize(inode) - 1)) == 0);
+ 
+-	bh = head = page_buffers(bvec->bv_page);
+-
+-	bsize = bh->b_size;
++	local_irq_save(flags);
++	bit_spin_lock(BH_Uptodate_Lock, &head->b_state);
+ 	do {
+-		if (off > end)
+-			break;
+-		next = bh->b_this_page;
+-		if (off < bvec->bv_offset)
+-			goto next_bh;
+-		bh->b_end_io(bh, !error);
+-next_bh:
+-		off += bsize;
+-	} while ((bh = next) != head);
++		if (off >= bvec->bv_offset &&
++		    off < bvec->bv_offset + bvec->bv_len) {
++			ASSERT(buffer_async_write(bh));
++			ASSERT(bh->b_end_io == NULL);
++
++			if (error) {
++				mark_buffer_write_io_error(bh);
++				clear_buffer_uptodate(bh);
++				SetPageError(bvec->bv_page);
++			} else {
++				set_buffer_uptodate(bh);
++			}
++			clear_buffer_async_write(bh);
++			unlock_buffer(bh);
++		} else if (buffer_async_write(bh)) {
++			ASSERT(buffer_locked(bh));
++			busy = true;
++		}
++		off += bh->b_size;
++	} while ((bh = bh->b_this_page) != head);
++	bit_spin_unlock(BH_Uptodate_Lock, &head->b_state);
++	local_irq_restore(flags);
++
++	if (!busy)
++		end_page_writeback(bvec->bv_page);
+ }
+ 
+ /*
+@@ -133,8 +148,10 @@ xfs_destroy_ioend(
+ 	int			error)
+ {
+ 	struct inode		*inode = ioend->io_inode;
+-	struct bio		*last = ioend->io_bio;
+-	struct bio		*bio, *next;
++	struct bio		*bio = &ioend->io_inline_bio;
++	struct bio		*last = ioend->io_bio, *next;
++	u64			start = bio->bi_iter.bi_sector;
++	bool			quiet = bio_flagged(bio, BIO_QUIET);
+ 
+ 	for (bio = &ioend->io_inline_bio; bio; bio = next) {
+ 		struct bio_vec	*bvec;
+@@ -155,6 +172,11 @@ xfs_destroy_ioend(
+ 
+ 		bio_put(bio);
+ 	}
++
++	if (unlikely(error && !quiet)) {
++		xfs_err_ratelimited(XFS_I(inode)->i_mount,
++			"writeback error on sector %llu", start);
++	}
+ }
+ 
+ /*
+@@ -423,7 +445,8 @@ xfs_start_buffer_writeback(
+ 	ASSERT(!buffer_delay(bh));
+ 	ASSERT(!buffer_unwritten(bh));
+ 
+-	mark_buffer_async_write(bh);
++	bh->b_end_io = NULL;
++	set_buffer_async_write(bh);
+ 	set_buffer_uptodate(bh);
+ 	clear_buffer_dirty(bh);
+ }
+diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
+index 93e955262d07..3e9b7a4fb8fd 100644
+--- a/fs/xfs/xfs_bmap_util.c
++++ b/fs/xfs/xfs_bmap_util.c
+@@ -1840,29 +1840,18 @@ xfs_swap_extent_forks(
+ 	}
+ 
+ 	/*
+-	 * Before we've swapped the forks, lets set the owners of the forks
+-	 * appropriately. We have to do this as we are demand paging the btree
+-	 * buffers, and so the validation done on read will expect the owner
+-	 * field to be correctly set. Once we change the owners, we can swap the
+-	 * inode forks.
++	 * Btree format (v3) inodes have the inode number stamped in the bmbt
++	 * block headers. We can't start changing the bmbt blocks until the
++	 * inode owner change is logged so recovery does the right thing in the
++	 * event of a crash. Set the owner change log flags now and leave the
++	 * bmbt scan as the last step.
+ 	 */
+ 	if (ip->i_d.di_version == 3 &&
+-	    ip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
++	    ip->i_d.di_format == XFS_DINODE_FMT_BTREE)
+ 		(*target_log_flags) |= XFS_ILOG_DOWNER;
+-		error = xfs_bmbt_change_owner(tp, ip, XFS_DATA_FORK,
+-					      tip->i_ino, NULL);
+-		if (error)
+-			return error;
+-	}
+-
+ 	if (tip->i_d.di_version == 3 &&
+-	    tip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
++	    tip->i_d.di_format == XFS_DINODE_FMT_BTREE)
+ 		(*src_log_flags) |= XFS_ILOG_DOWNER;
+-		error = xfs_bmbt_change_owner(tp, tip, XFS_DATA_FORK,
+-					      ip->i_ino, NULL);
+-		if (error)
+-			return error;
+-	}
+ 
+ 	/*
+ 	 * Swap the data forks of the inodes
+@@ -1940,6 +1929,48 @@ xfs_swap_extent_forks(
+ 	return 0;
+ }
+ 
++/*
++ * Fix up the owners of the bmbt blocks to refer to the current inode. The
++ * change owner scan attempts to order all modified buffers in the current
++ * transaction. In the event of ordered buffer failure, the offending buffer is
++ * physically logged as a fallback and the scan returns -EAGAIN. We must roll
++ * the transaction in this case to replenish the fallback log reservation and
++ * restart the scan. This process repeats until the scan completes.
++ */
++static int
++xfs_swap_change_owner(
++	struct xfs_trans	**tpp,
++	struct xfs_inode	*ip,
++	struct xfs_inode	*tmpip)
++{
++	int			error;
++	struct xfs_trans	*tp = *tpp;
++
++	do {
++		error = xfs_bmbt_change_owner(tp, ip, XFS_DATA_FORK, ip->i_ino,
++					      NULL);
++		/* success or fatal error */
++		if (error != -EAGAIN)
++			break;
++
++		error = xfs_trans_roll(tpp, NULL);
++		if (error)
++			break;
++		tp = *tpp;
++
++		/*
++		 * Redirty both inodes so they can relog and keep the log tail
++		 * moving forward.
++		 */
++		xfs_trans_ijoin(tp, ip, 0);
++		xfs_trans_ijoin(tp, tmpip, 0);
++		xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
++		xfs_trans_log_inode(tp, tmpip, XFS_ILOG_CORE);
++	} while (true);
++
++	return error;
++}
++
+ int
+ xfs_swap_extents(
+ 	struct xfs_inode	*ip,	/* target inode */
+@@ -1954,7 +1985,7 @@ xfs_swap_extents(
+ 	int			lock_flags;
+ 	struct xfs_ifork	*cowfp;
+ 	uint64_t		f;
+-	int			resblks;
++	int			resblks = 0;
+ 
+ 	/*
+ 	 * Lock the inodes against other IO, page faults and truncate to
+@@ -2002,11 +2033,8 @@ xfs_swap_extents(
+ 			  XFS_SWAP_RMAP_SPACE_RES(mp,
+ 				XFS_IFORK_NEXTENTS(tip, XFS_DATA_FORK),
+ 				XFS_DATA_FORK);
+-		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks,
+-				0, 0, &tp);
+-	} else
+-		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ichange, 0,
+-				0, 0, &tp);
++	}
++	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0, &tp);
+ 	if (error)
+ 		goto out_unlock;
+ 
+@@ -2091,6 +2119,23 @@ xfs_swap_extents(
+ 	xfs_trans_log_inode(tp, ip,  src_log_flags);
+ 	xfs_trans_log_inode(tp, tip, target_log_flags);
+ 
++	/*
++	 * The extent forks have been swapped, but crc=1,rmapbt=0 filesystems
++	 * have inode number owner values in the bmbt blocks that still refer to
++	 * the old inode. Scan each bmbt to fix up the owner values with the
++	 * inode number of the current inode.
++	 */
++	if (src_log_flags & XFS_ILOG_DOWNER) {
++		error = xfs_swap_change_owner(&tp, ip, tip);
++		if (error)
++			goto out_trans_cancel;
++	}
++	if (target_log_flags & XFS_ILOG_DOWNER) {
++		error = xfs_swap_change_owner(&tp, tip, ip);
++		if (error)
++			goto out_trans_cancel;
++	}
++
+ 	/*
+ 	 * If this is a synchronous mount, make sure that the
+ 	 * transaction goes to disk before returning to the user.
+diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c
+index f6a8422e9562..e0a0af0946f2 100644
+--- a/fs/xfs/xfs_buf_item.c
++++ b/fs/xfs/xfs_buf_item.c
+@@ -29,6 +29,7 @@
+ #include "xfs_error.h"
+ #include "xfs_trace.h"
+ #include "xfs_log.h"
++#include "xfs_inode.h"
+ 
+ 
+ kmem_zone_t	*xfs_buf_item_zone;
+@@ -322,6 +323,8 @@ xfs_buf_item_format(
+ 	ASSERT((bip->bli_flags & XFS_BLI_STALE) ||
+ 	       (xfs_blft_from_flags(&bip->__bli_format) > XFS_BLFT_UNKNOWN_BUF
+ 	        && xfs_blft_from_flags(&bip->__bli_format) < XFS_BLFT_MAX_BUF));
++	ASSERT(!(bip->bli_flags & XFS_BLI_ORDERED) ||
++	       (bip->bli_flags & XFS_BLI_STALE));
+ 
+ 
+ 	/*
+@@ -346,16 +349,6 @@ xfs_buf_item_format(
+ 		bip->bli_flags &= ~XFS_BLI_INODE_BUF;
+ 	}
+ 
+-	if ((bip->bli_flags & (XFS_BLI_ORDERED|XFS_BLI_STALE)) ==
+-							XFS_BLI_ORDERED) {
+-		/*
+-		 * The buffer has been logged just to order it.  It is not being
+-		 * included in the transaction commit, so don't format it.
+-		 */
+-		trace_xfs_buf_item_format_ordered(bip);
+-		return;
+-	}
+-
+ 	for (i = 0; i < bip->bli_format_count; i++) {
+ 		xfs_buf_item_format_segment(bip, lv, &vecp, offset,
+ 					    &bip->bli_formats[i]);
+@@ -574,26 +567,20 @@ xfs_buf_item_unlock(
+ {
+ 	struct xfs_buf_log_item	*bip = BUF_ITEM(lip);
+ 	struct xfs_buf		*bp = bip->bli_buf;
+-	bool			clean;
+-	bool			aborted;
+-	int			flags;
++	bool			aborted = !!(lip->li_flags & XFS_LI_ABORTED);
++	bool			hold = !!(bip->bli_flags & XFS_BLI_HOLD);
++	bool			dirty = !!(bip->bli_flags & XFS_BLI_DIRTY);
++#if defined(DEBUG) || defined(XFS_WARN)
++	bool			ordered = !!(bip->bli_flags & XFS_BLI_ORDERED);
++#endif
+ 
+ 	/* Clear the buffer's association with this transaction. */
+ 	bp->b_transp = NULL;
+ 
+ 	/*
+-	 * If this is a transaction abort, don't return early.  Instead, allow
+-	 * the brelse to happen.  Normally it would be done for stale
+-	 * (cancelled) buffers at unpin time, but we'll never go through the
+-	 * pin/unpin cycle if we abort inside commit.
++	 * The per-transaction state has been copied above so clear it from the
++	 * bli.
+ 	 */
+-	aborted = (lip->li_flags & XFS_LI_ABORTED) ? true : false;
+-	/*
+-	 * Before possibly freeing the buf item, copy the per-transaction state
+-	 * so we can reference it safely later after clearing it from the
+-	 * buffer log item.
+-	 */
+-	flags = bip->bli_flags;
+ 	bip->bli_flags &= ~(XFS_BLI_LOGGED | XFS_BLI_HOLD | XFS_BLI_ORDERED);
+ 
+ 	/*
+@@ -601,7 +588,7 @@ xfs_buf_item_unlock(
+ 	 * unlock the buffer and free the buf item when the buffer is unpinned
+ 	 * for the last time.
+ 	 */
+-	if (flags & XFS_BLI_STALE) {
++	if (bip->bli_flags & XFS_BLI_STALE) {
+ 		trace_xfs_buf_item_unlock_stale(bip);
+ 		ASSERT(bip->__bli_format.blf_flags & XFS_BLF_CANCEL);
+ 		if (!aborted) {
+@@ -619,20 +606,11 @@ xfs_buf_item_unlock(
+ 	 * regardless of whether it is dirty or not. A dirty abort implies a
+ 	 * shutdown, anyway.
+ 	 *
+-	 * Ordered buffers are dirty but may have no recorded changes, so ensure
+-	 * we only release clean items here.
++	 * The bli dirty state should match whether the blf has logged segments
++	 * except for ordered buffers, where only the bli should be dirty.
+ 	 */
+-	clean = (flags & XFS_BLI_DIRTY) ? false : true;
+-	if (clean) {
+-		int i;
+-		for (i = 0; i < bip->bli_format_count; i++) {
+-			if (!xfs_bitmap_empty(bip->bli_formats[i].blf_data_map,
+-				     bip->bli_formats[i].blf_map_size)) {
+-				clean = false;
+-				break;
+-			}
+-		}
+-	}
++	ASSERT((!ordered && dirty == xfs_buf_item_dirty_format(bip)) ||
++	       (ordered && dirty && !xfs_buf_item_dirty_format(bip)));
+ 
+ 	/*
+ 	 * Clean buffers, by definition, cannot be in the AIL. However, aborted
+@@ -651,11 +629,11 @@ xfs_buf_item_unlock(
+ 			ASSERT(XFS_FORCED_SHUTDOWN(lip->li_mountp));
+ 			xfs_trans_ail_remove(lip, SHUTDOWN_LOG_IO_ERROR);
+ 			xfs_buf_item_relse(bp);
+-		} else if (clean)
++		} else if (!dirty)
+ 			xfs_buf_item_relse(bp);
+ 	}
+ 
+-	if (!(flags & XFS_BLI_HOLD))
++	if (!hold)
+ 		xfs_buf_relse(bp);
+ }
+ 
+@@ -945,14 +923,22 @@ xfs_buf_item_log(
+ 
+ 
+ /*
+- * Return 1 if the buffer has been logged or ordered in a transaction (at any
+- * point, not just the current transaction) and 0 if not.
++ * Return true if the buffer has any ranges logged/dirtied by a transaction,
++ * false otherwise.
+  */
+-uint
+-xfs_buf_item_dirty(
+-	xfs_buf_log_item_t	*bip)
++bool
++xfs_buf_item_dirty_format(
++	struct xfs_buf_log_item	*bip)
+ {
+-	return (bip->bli_flags & XFS_BLI_DIRTY);
++	int			i;
++
++	for (i = 0; i < bip->bli_format_count; i++) {
++		if (!xfs_bitmap_empty(bip->bli_formats[i].blf_data_map,
++			     bip->bli_formats[i].blf_map_size))
++			return true;
++	}
++
++	return false;
+ }
+ 
+ STATIC void
+@@ -1054,6 +1040,31 @@ xfs_buf_do_callbacks(
+ 	}
+ }
+ 
++/*
++ * Invoke the error state callback for each log item affected by the failed I/O.
++ *
++ * If a metadata buffer write fails with a non-permanent error, the buffer is
++ * eventually resubmitted and so the completion callbacks are not run. The error
++ * state may need to be propagated to the log items attached to the buffer,
++ * however, so the next AIL push of the item knows hot to handle it correctly.
++ */
++STATIC void
++xfs_buf_do_callbacks_fail(
++	struct xfs_buf		*bp)
++{
++	struct xfs_log_item	*next;
++	struct xfs_log_item	*lip = bp->b_fspriv;
++	struct xfs_ail		*ailp = lip->li_ailp;
++
++	spin_lock(&ailp->xa_lock);
++	for (; lip; lip = next) {
++		next = lip->li_bio_list;
++		if (lip->li_ops->iop_error)
++			lip->li_ops->iop_error(lip, bp);
++	}
++	spin_unlock(&ailp->xa_lock);
++}
++
+ static bool
+ xfs_buf_iodone_callback_error(
+ 	struct xfs_buf		*bp)
+@@ -1123,7 +1134,11 @@ xfs_buf_iodone_callback_error(
+ 	if ((mp->m_flags & XFS_MOUNT_UNMOUNTING) && mp->m_fail_unmount)
+ 		goto permanent_error;
+ 
+-	/* still a transient error, higher layers will retry */
++	/*
++	 * Still a transient error, run IO completion failure callbacks and let
++	 * the higher layers retry the buffer.
++	 */
++	xfs_buf_do_callbacks_fail(bp);
+ 	xfs_buf_ioerror(bp, 0);
+ 	xfs_buf_relse(bp);
+ 	return true;
+@@ -1204,3 +1219,31 @@ xfs_buf_iodone(
+ 	xfs_trans_ail_delete(ailp, lip, SHUTDOWN_CORRUPT_INCORE);
+ 	xfs_buf_item_free(BUF_ITEM(lip));
+ }
++
++/*
++ * Requeue a failed buffer for writeback
++ *
++ * Return true if the buffer has been re-queued properly, false otherwise
++ */
++bool
++xfs_buf_resubmit_failed_buffers(
++	struct xfs_buf		*bp,
++	struct xfs_log_item	*lip,
++	struct list_head	*buffer_list)
++{
++	struct xfs_log_item	*next;
++
++	/*
++	 * Clear XFS_LI_FAILED flag from all items before resubmit
++	 *
++	 * XFS_LI_FAILED set/clear is protected by xa_lock, caller  this
++	 * function already have it acquired
++	 */
++	for (; lip; lip = next) {
++		next = lip->li_bio_list;
++		xfs_clear_li_failed(lip);
++	}
++
++	/* Add this buffer back to the delayed write list */
++	return xfs_buf_delwri_queue(bp, buffer_list);
++}
+diff --git a/fs/xfs/xfs_buf_item.h b/fs/xfs/xfs_buf_item.h
+index f7eba99d19dd..9690ce62c9a7 100644
+--- a/fs/xfs/xfs_buf_item.h
++++ b/fs/xfs/xfs_buf_item.h
+@@ -64,12 +64,15 @@ typedef struct xfs_buf_log_item {
+ int	xfs_buf_item_init(struct xfs_buf *, struct xfs_mount *);
+ void	xfs_buf_item_relse(struct xfs_buf *);
+ void	xfs_buf_item_log(xfs_buf_log_item_t *, uint, uint);
+-uint	xfs_buf_item_dirty(xfs_buf_log_item_t *);
++bool	xfs_buf_item_dirty_format(struct xfs_buf_log_item *);
+ void	xfs_buf_attach_iodone(struct xfs_buf *,
+ 			      void(*)(struct xfs_buf *, xfs_log_item_t *),
+ 			      xfs_log_item_t *);
+ void	xfs_buf_iodone_callbacks(struct xfs_buf *);
+ void	xfs_buf_iodone(struct xfs_buf *, struct xfs_log_item *);
++bool	xfs_buf_resubmit_failed_buffers(struct xfs_buf *,
++					struct xfs_log_item *,
++					struct list_head *);
+ 
+ extern kmem_zone_t	*xfs_buf_item_zone;
+ 
+diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c
+index 0a9e6985a0d0..34227115a5d6 100644
+--- a/fs/xfs/xfs_icache.c
++++ b/fs/xfs/xfs_icache.c
+@@ -1124,11 +1124,11 @@ xfs_reclaim_inode(
+ 	 * Because we use RCU freeing we need to ensure the inode always appears
+ 	 * to be reclaimed with an invalid inode number when in the free state.
+ 	 * We do this as early as possible under the ILOCK so that
+-	 * xfs_iflush_cluster() can be guaranteed to detect races with us here.
+-	 * By doing this, we guarantee that once xfs_iflush_cluster has locked
+-	 * XFS_ILOCK that it will see either a valid, flushable inode that will
+-	 * serialise correctly, or it will see a clean (and invalid) inode that
+-	 * it can skip.
++	 * xfs_iflush_cluster() and xfs_ifree_cluster() can be guaranteed to
++	 * detect races with us here. By doing this, we guarantee that once
++	 * xfs_iflush_cluster() or xfs_ifree_cluster() has locked XFS_ILOCK that
++	 * it will see either a valid inode that will serialise correctly, or it
++	 * will see an invalid inode that it can skip.
+ 	 */
+ 	spin_lock(&ip->i_flags_lock);
+ 	ip->i_flags = XFS_IRECLAIM;
+diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
+index ff48f0096810..97045e8dfed5 100644
+--- a/fs/xfs/xfs_inode.c
++++ b/fs/xfs/xfs_inode.c
+@@ -2359,11 +2359,24 @@ xfs_ifree_cluster(
+ 			 * already marked stale. If we can't lock it, back off
+ 			 * and retry.
+ 			 */
+-			if (ip != free_ip &&
+-			    !xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) {
+-				rcu_read_unlock();
+-				delay(1);
+-				goto retry;
++			if (ip != free_ip) {
++				if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) {
++					rcu_read_unlock();
++					delay(1);
++					goto retry;
++				}
++
++				/*
++				 * Check the inode number again in case we're
++				 * racing with freeing in xfs_reclaim_inode().
++				 * See the comments in that function for more
++				 * information as to why the initial check is
++				 * not sufficient.
++				 */
++				if (ip->i_ino != inum + i) {
++					xfs_iunlock(ip, XFS_ILOCK_EXCL);
++					continue;
++				}
+ 			}
+ 			rcu_read_unlock();
+ 
+diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c
+index 013cc78d7daf..6d0f74ec31e8 100644
+--- a/fs/xfs/xfs_inode_item.c
++++ b/fs/xfs/xfs_inode_item.c
+@@ -27,6 +27,7 @@
+ #include "xfs_error.h"
+ #include "xfs_trace.h"
+ #include "xfs_trans_priv.h"
++#include "xfs_buf_item.h"
+ #include "xfs_log.h"
+ 
+ 
+@@ -475,6 +476,23 @@ xfs_inode_item_unpin(
+ 		wake_up_bit(&ip->i_flags, __XFS_IPINNED_BIT);
+ }
+ 
++/*
++ * Callback used to mark a buffer with XFS_LI_FAILED when items in the buffer
++ * have been failed during writeback
++ *
++ * This informs the AIL that the inode is already flush locked on the next push,
++ * and acquires a hold on the buffer to ensure that it isn't reclaimed before
++ * dirty data makes it to disk.
++ */
++STATIC void
++xfs_inode_item_error(
++	struct xfs_log_item	*lip,
++	struct xfs_buf		*bp)
++{
++	ASSERT(xfs_isiflocked(INODE_ITEM(lip)->ili_inode));
++	xfs_set_li_failed(lip, bp);
++}
++
+ STATIC uint
+ xfs_inode_item_push(
+ 	struct xfs_log_item	*lip,
+@@ -484,13 +502,28 @@ xfs_inode_item_push(
+ {
+ 	struct xfs_inode_log_item *iip = INODE_ITEM(lip);
+ 	struct xfs_inode	*ip = iip->ili_inode;
+-	struct xfs_buf		*bp = NULL;
++	struct xfs_buf		*bp = lip->li_buf;
+ 	uint			rval = XFS_ITEM_SUCCESS;
+ 	int			error;
+ 
+ 	if (xfs_ipincount(ip) > 0)
+ 		return XFS_ITEM_PINNED;
+ 
++	/*
++	 * The buffer containing this item failed to be written back
++	 * previously. Resubmit the buffer for IO.
++	 */
++	if (lip->li_flags & XFS_LI_FAILED) {
++		if (!xfs_buf_trylock(bp))
++			return XFS_ITEM_LOCKED;
++
++		if (!xfs_buf_resubmit_failed_buffers(bp, lip, buffer_list))
++			rval = XFS_ITEM_FLUSHING;
++
++		xfs_buf_unlock(bp);
++		return rval;
++	}
++
+ 	if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED))
+ 		return XFS_ITEM_LOCKED;
+ 
+@@ -622,7 +655,8 @@ static const struct xfs_item_ops xfs_inode_item_ops = {
+ 	.iop_unlock	= xfs_inode_item_unlock,
+ 	.iop_committed	= xfs_inode_item_committed,
+ 	.iop_push	= xfs_inode_item_push,
+-	.iop_committing = xfs_inode_item_committing
++	.iop_committing = xfs_inode_item_committing,
++	.iop_error	= xfs_inode_item_error
+ };
+ 
+ 
+@@ -710,7 +744,8 @@ xfs_iflush_done(
+ 		 * the AIL lock.
+ 		 */
+ 		iip = INODE_ITEM(blip);
+-		if (iip->ili_logged && blip->li_lsn == iip->ili_flush_lsn)
++		if ((iip->ili_logged && blip->li_lsn == iip->ili_flush_lsn) ||
++		    lip->li_flags & XFS_LI_FAILED)
+ 			need_ail++;
+ 
+ 		blip = next;
+@@ -718,7 +753,8 @@ xfs_iflush_done(
+ 
+ 	/* make sure we capture the state of the initial inode. */
+ 	iip = INODE_ITEM(lip);
+-	if (iip->ili_logged && lip->li_lsn == iip->ili_flush_lsn)
++	if ((iip->ili_logged && lip->li_lsn == iip->ili_flush_lsn) ||
++	    lip->li_flags & XFS_LI_FAILED)
+ 		need_ail++;
+ 
+ 	/*
+@@ -739,6 +775,9 @@ xfs_iflush_done(
+ 			if (INODE_ITEM(blip)->ili_logged &&
+ 			    blip->li_lsn == INODE_ITEM(blip)->ili_flush_lsn)
+ 				mlip_changed |= xfs_ail_delete_one(ailp, blip);
++			else {
++				xfs_clear_li_failed(blip);
++			}
+ 		}
+ 
+ 		if (mlip_changed) {
+diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
+index 9c0c7a920304..5049e8ab6e30 100644
+--- a/fs/xfs/xfs_ioctl.c
++++ b/fs/xfs/xfs_ioctl.c
+@@ -931,16 +931,15 @@ xfs_ioc_fsgetxattr(
+ 	return 0;
+ }
+ 
+-STATIC void
+-xfs_set_diflags(
++STATIC uint16_t
++xfs_flags2diflags(
+ 	struct xfs_inode	*ip,
+ 	unsigned int		xflags)
+ {
+-	unsigned int		di_flags;
+-	uint64_t		di_flags2;
+-
+ 	/* can't set PREALLOC this way, just preserve it */
+-	di_flags = (ip->i_d.di_flags & XFS_DIFLAG_PREALLOC);
++	uint16_t		di_flags =
++		(ip->i_d.di_flags & XFS_DIFLAG_PREALLOC);
++
+ 	if (xflags & FS_XFLAG_IMMUTABLE)
+ 		di_flags |= XFS_DIFLAG_IMMUTABLE;
+ 	if (xflags & FS_XFLAG_APPEND)
+@@ -970,19 +969,24 @@ xfs_set_diflags(
+ 		if (xflags & FS_XFLAG_EXTSIZE)
+ 			di_flags |= XFS_DIFLAG_EXTSIZE;
+ 	}
+-	ip->i_d.di_flags = di_flags;
+ 
+-	/* diflags2 only valid for v3 inodes. */
+-	if (ip->i_d.di_version < 3)
+-		return;
++	return di_flags;
++}
++
++STATIC uint64_t
++xfs_flags2diflags2(
++	struct xfs_inode	*ip,
++	unsigned int		xflags)
++{
++	uint64_t		di_flags2 =
++		(ip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK);
+ 
+-	di_flags2 = (ip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK);
+ 	if (xflags & FS_XFLAG_DAX)
+ 		di_flags2 |= XFS_DIFLAG2_DAX;
+ 	if (xflags & FS_XFLAG_COWEXTSIZE)
+ 		di_flags2 |= XFS_DIFLAG2_COWEXTSIZE;
+ 
+-	ip->i_d.di_flags2 = di_flags2;
++	return di_flags2;
+ }
+ 
+ STATIC void
+@@ -1008,11 +1012,12 @@ xfs_diflags_to_linux(
+ 		inode->i_flags |= S_NOATIME;
+ 	else
+ 		inode->i_flags &= ~S_NOATIME;
++#if 0	/* disabled until the flag switching races are sorted out */
+ 	if (xflags & FS_XFLAG_DAX)
+ 		inode->i_flags |= S_DAX;
+ 	else
+ 		inode->i_flags &= ~S_DAX;
+-
++#endif
+ }
+ 
+ static int
+@@ -1022,6 +1027,7 @@ xfs_ioctl_setattr_xflags(
+ 	struct fsxattr		*fa)
+ {
+ 	struct xfs_mount	*mp = ip->i_mount;
++	uint64_t		di_flags2;
+ 
+ 	/* Can't change realtime flag if any extents are allocated. */
+ 	if ((ip->i_d.di_nextents || ip->i_delayed_blks) &&
+@@ -1052,7 +1058,14 @@ xfs_ioctl_setattr_xflags(
+ 	    !capable(CAP_LINUX_IMMUTABLE))
+ 		return -EPERM;
+ 
+-	xfs_set_diflags(ip, fa->fsx_xflags);
++	/* diflags2 only valid for v3 inodes. */
++	di_flags2 = xfs_flags2diflags2(ip, fa->fsx_xflags);
++	if (di_flags2 && ip->i_d.di_version < 3)
++		return -EINVAL;
++
++	ip->i_d.di_flags = xfs_flags2diflags(ip, fa->fsx_xflags);
++	ip->i_d.di_flags2 = di_flags2;
++
+ 	xfs_diflags_to_linux(ip);
+ 	xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
+ 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
+diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
+index 469c9fa4c178..17081c77ef86 100644
+--- a/fs/xfs/xfs_iops.c
++++ b/fs/xfs/xfs_iops.c
+@@ -817,7 +817,7 @@ xfs_vn_setattr_nonsize(
+  * Caution: The caller of this function is responsible for calling
+  * setattr_prepare() or otherwise verifying the change is fine.
+  */
+-int
++STATIC int
+ xfs_setattr_size(
+ 	struct xfs_inode	*ip,
+ 	struct iattr		*iattr)
+diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
+index 4ebd0bafc914..c5107c7bc4bf 100644
+--- a/fs/xfs/xfs_log.c
++++ b/fs/xfs/xfs_log.c
+@@ -743,10 +743,14 @@ xfs_log_mount_finish(
+ 	struct xfs_mount	*mp)
+ {
+ 	int	error = 0;
++	bool	readonly = (mp->m_flags & XFS_MOUNT_RDONLY);
+ 
+ 	if (mp->m_flags & XFS_MOUNT_NORECOVERY) {
+ 		ASSERT(mp->m_flags & XFS_MOUNT_RDONLY);
+ 		return 0;
++	} else if (readonly) {
++		/* Allow unlinked processing to proceed */
++		mp->m_flags &= ~XFS_MOUNT_RDONLY;
+ 	}
+ 
+ 	/*
+@@ -757,12 +761,27 @@ xfs_log_mount_finish(
+ 	 * inodes.  Turn it off immediately after recovery finishes
+ 	 * so that we don't leak the quota inodes if subsequent mount
+ 	 * activities fail.
++	 *
++	 * We let all inodes involved in redo item processing end up on
++	 * the LRU instead of being evicted immediately so that if we do
++	 * something to an unlinked inode, the irele won't cause
++	 * premature truncation and freeing of the inode, which results
++	 * in log recovery failure.  We have to evict the unreferenced
++	 * lru inodes after clearing MS_ACTIVE because we don't
++	 * otherwise clean up the lru if there's a subsequent failure in
++	 * xfs_mountfs, which leads to us leaking the inodes if nothing
++	 * else (e.g. quotacheck) references the inodes before the
++	 * mount failure occurs.
+ 	 */
+ 	mp->m_super->s_flags |= MS_ACTIVE;
+ 	error = xlog_recover_finish(mp->m_log);
+ 	if (!error)
+ 		xfs_log_work_queue(mp);
+ 	mp->m_super->s_flags &= ~MS_ACTIVE;
++	evict_inodes(mp->m_super);
++
++	if (readonly)
++		mp->m_flags |= XFS_MOUNT_RDONLY;
+ 
+ 	return error;
+ }
+@@ -812,11 +831,14 @@ xfs_log_unmount_write(xfs_mount_t *mp)
+ 	int		 error;
+ 
+ 	/*
+-	 * Don't write out unmount record on read-only mounts.
++	 * Don't write out unmount record on norecovery mounts or ro devices.
+ 	 * Or, if we are doing a forced umount (typically because of IO errors).
+ 	 */
+-	if (mp->m_flags & XFS_MOUNT_RDONLY)
++	if (mp->m_flags & XFS_MOUNT_NORECOVERY ||
++	    xfs_readonly_buftarg(log->l_mp->m_logdev_targp)) {
++		ASSERT(mp->m_flags & XFS_MOUNT_RDONLY);
+ 		return 0;
++	}
+ 
+ 	error = _xfs_log_force(mp, XFS_LOG_SYNC, NULL);
+ 	ASSERT(error || !(XLOG_FORCED_SHUTDOWN(log)));
+@@ -3353,8 +3375,6 @@ _xfs_log_force(
+ 		 */
+ 		if (iclog->ic_state & XLOG_STATE_IOERROR)
+ 			return -EIO;
+-		if (log_flushed)
+-			*log_flushed = 1;
+ 	} else {
+ 
+ no_sleep:
+@@ -3458,8 +3478,6 @@ _xfs_log_force_lsn(
+ 
+ 				xlog_wait(&iclog->ic_prev->ic_write_wait,
+ 							&log->l_icloglock);
+-				if (log_flushed)
+-					*log_flushed = 1;
+ 				already_slept = 1;
+ 				goto try_again;
+ 			}
+@@ -3493,9 +3511,6 @@ _xfs_log_force_lsn(
+ 			 */
+ 			if (iclog->ic_state & XLOG_STATE_IOERROR)
+ 				return -EIO;
+-
+-			if (log_flushed)
+-				*log_flushed = 1;
+ 		} else {		/* just return */
+ 			spin_unlock(&log->l_icloglock);
+ 		}
+diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
+index 9549188f5a36..093ee8289057 100644
+--- a/fs/xfs/xfs_log_recover.c
++++ b/fs/xfs/xfs_log_recover.c
+@@ -1029,61 +1029,106 @@ xlog_seek_logrec_hdr(
+ }
+ 
+ /*
+- * Check the log tail for torn writes. This is required when torn writes are
+- * detected at the head and the head had to be walked back to a previous record.
+- * The tail of the previous record must now be verified to ensure the torn
+- * writes didn't corrupt the previous tail.
++ * Calculate distance from head to tail (i.e., unused space in the log).
++ */
++static inline int
++xlog_tail_distance(
++	struct xlog	*log,
++	xfs_daddr_t	head_blk,
++	xfs_daddr_t	tail_blk)
++{
++	if (head_blk < tail_blk)
++		return tail_blk - head_blk;
++
++	return tail_blk + (log->l_logBBsize - head_blk);
++}
++
++/*
++ * Verify the log tail. This is particularly important when torn or incomplete
++ * writes have been detected near the front of the log and the head has been
++ * walked back accordingly.
++ *
++ * We also have to handle the case where the tail was pinned and the head
++ * blocked behind the tail right before a crash. If the tail had been pushed
++ * immediately prior to the crash and the subsequent checkpoint was only
++ * partially written, it's possible it overwrote the last referenced tail in the
++ * log with garbage. This is not a coherency problem because the tail must have
++ * been pushed before it can be overwritten, but appears as log corruption to
++ * recovery because we have no way to know the tail was updated if the
++ * subsequent checkpoint didn't write successfully.
+  *
+- * Return an error if CRC verification fails as recovery cannot proceed.
++ * Therefore, CRC check the log from tail to head. If a failure occurs and the
++ * offending record is within max iclog bufs from the head, walk the tail
++ * forward and retry until a valid tail is found or corruption is detected out
++ * of the range of a possible overwrite.
+  */
+ STATIC int
+ xlog_verify_tail(
+ 	struct xlog		*log,
+ 	xfs_daddr_t		head_blk,
+-	xfs_daddr_t		tail_blk)
++	xfs_daddr_t		*tail_blk,
++	int			hsize)
+ {
+ 	struct xlog_rec_header	*thead;
+ 	struct xfs_buf		*bp;
+ 	xfs_daddr_t		first_bad;
+-	int			count;
+ 	int			error = 0;
+ 	bool			wrapped;
+-	xfs_daddr_t		tmp_head;
++	xfs_daddr_t		tmp_tail;
++	xfs_daddr_t		orig_tail = *tail_blk;
+ 
+ 	bp = xlog_get_bp(log, 1);
+ 	if (!bp)
+ 		return -ENOMEM;
+ 
+ 	/*
+-	 * Seek XLOG_MAX_ICLOGS + 1 records past the current tail record to get
+-	 * a temporary head block that points after the last possible
+-	 * concurrently written record of the tail.
++	 * Make sure the tail points to a record (returns positive count on
++	 * success).
+ 	 */
+-	count = xlog_seek_logrec_hdr(log, head_blk, tail_blk,
+-				     XLOG_MAX_ICLOGS + 1, bp, &tmp_head, &thead,
+-				     &wrapped);
+-	if (count < 0) {
+-		error = count;
++	error = xlog_seek_logrec_hdr(log, head_blk, *tail_blk, 1, bp,
++			&tmp_tail, &thead, &wrapped);
++	if (error < 0)
+ 		goto out;
+-	}
+-
+-	/*
+-	 * If the call above didn't find XLOG_MAX_ICLOGS + 1 records, we ran
+-	 * into the actual log head. tmp_head points to the start of the record
+-	 * so update it to the actual head block.
+-	 */
+-	if (count < XLOG_MAX_ICLOGS + 1)
+-		tmp_head = head_blk;
++	if (*tail_blk != tmp_tail)
++		*tail_blk = tmp_tail;
+ 
+ 	/*
+-	 * We now have a tail and temporary head block that covers at least
+-	 * XLOG_MAX_ICLOGS records from the tail. We need to verify that these
+-	 * records were completely written. Run a CRC verification pass from
+-	 * tail to head and return the result.
++	 * Run a CRC check from the tail to the head. We can't just check
++	 * MAX_ICLOGS records past the tail because the tail may point to stale
++	 * blocks cleared during the search for the head/tail. These blocks are
++	 * overwritten with zero-length records and thus record count is not a
++	 * reliable indicator of the iclog state before a crash.
+ 	 */
+-	error = xlog_do_recovery_pass(log, tmp_head, tail_blk,
++	first_bad = 0;
++	error = xlog_do_recovery_pass(log, head_blk, *tail_blk,
+ 				      XLOG_RECOVER_CRCPASS, &first_bad);
++	while ((error == -EFSBADCRC || error == -EFSCORRUPTED) && first_bad) {
++		int	tail_distance;
++
++		/*
++		 * Is corruption within range of the head? If so, retry from
++		 * the next record. Otherwise return an error.
++		 */
++		tail_distance = xlog_tail_distance(log, head_blk, first_bad);
++		if (tail_distance > BTOBB(XLOG_MAX_ICLOGS * hsize))
++			break;
+ 
++		/* skip to the next record; returns positive count on success */
++		error = xlog_seek_logrec_hdr(log, head_blk, first_bad, 2, bp,
++				&tmp_tail, &thead, &wrapped);
++		if (error < 0)
++			goto out;
++
++		*tail_blk = tmp_tail;
++		first_bad = 0;
++		error = xlog_do_recovery_pass(log, head_blk, *tail_blk,
++					      XLOG_RECOVER_CRCPASS, &first_bad);
++	}
++
++	if (!error && *tail_blk != orig_tail)
++		xfs_warn(log->l_mp,
++		"Tail block (0x%llx) overwrite detected. Updated to 0x%llx",
++			 orig_tail, *tail_blk);
+ out:
+ 	xlog_put_bp(bp);
+ 	return error;
+@@ -1143,7 +1188,7 @@ xlog_verify_head(
+ 	 */
+ 	error = xlog_do_recovery_pass(log, *head_blk, tmp_rhead_blk,
+ 				      XLOG_RECOVER_CRCPASS, &first_bad);
+-	if (error == -EFSBADCRC) {
++	if ((error == -EFSBADCRC || error == -EFSCORRUPTED) && first_bad) {
+ 		/*
+ 		 * We've hit a potential torn write. Reset the error and warn
+ 		 * about it.
+@@ -1183,31 +1228,12 @@ xlog_verify_head(
+ 			ASSERT(0);
+ 			return 0;
+ 		}
+-
+-		/*
+-		 * Now verify the tail based on the updated head. This is
+-		 * required because the torn writes trimmed from the head could
+-		 * have been written over the tail of a previous record. Return
+-		 * any errors since recovery cannot proceed if the tail is
+-		 * corrupt.
+-		 *
+-		 * XXX: This leaves a gap in truly robust protection from torn
+-		 * writes in the log. If the head is behind the tail, the tail
+-		 * pushes forward to create some space and then a crash occurs
+-		 * causing the writes into the previous record's tail region to
+-		 * tear, log recovery isn't able to recover.
+-		 *
+-		 * How likely is this to occur? If possible, can we do something
+-		 * more intelligent here? Is it safe to push the tail forward if
+-		 * we can determine that the tail is within the range of the
+-		 * torn write (e.g., the kernel can only overwrite the tail if
+-		 * it has actually been pushed forward)? Alternatively, could we
+-		 * somehow prevent this condition at runtime?
+-		 */
+-		error = xlog_verify_tail(log, *head_blk, *tail_blk);
+ 	}
++	if (error)
++		return error;
+ 
+-	return error;
++	return xlog_verify_tail(log, *head_blk, tail_blk,
++				be32_to_cpu((*rhead)->h_size));
+ }
+ 
+ /*
+@@ -4801,12 +4827,16 @@ xlog_recover_process_intents(
+ 	int			error = 0;
+ 	struct xfs_ail_cursor	cur;
+ 	struct xfs_ail		*ailp;
++#if defined(DEBUG) || defined(XFS_WARN)
+ 	xfs_lsn_t		last_lsn;
++#endif
+ 
+ 	ailp = log->l_ailp;
+ 	spin_lock(&ailp->xa_lock);
+ 	lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
++#if defined(DEBUG) || defined(XFS_WARN)
+ 	last_lsn = xlog_assign_lsn(log->l_curr_cycle, log->l_curr_block);
++#endif
+ 	while (lip != NULL) {
+ 		/*
+ 		 * We're done when we see something other than an intent.
+@@ -5218,7 +5248,7 @@ xlog_do_recovery_pass(
+ 	xfs_daddr_t		*first_bad)	/* out: first bad log rec */
+ {
+ 	xlog_rec_header_t	*rhead;
+-	xfs_daddr_t		blk_no;
++	xfs_daddr_t		blk_no, rblk_no;
+ 	xfs_daddr_t		rhead_blk;
+ 	char			*offset;
+ 	xfs_buf_t		*hbp, *dbp;
+@@ -5231,7 +5261,7 @@ xlog_do_recovery_pass(
+ 	LIST_HEAD		(buffer_list);
+ 
+ 	ASSERT(head_blk != tail_blk);
+-	rhead_blk = 0;
++	blk_no = rhead_blk = tail_blk;
+ 
+ 	for (i = 0; i < XLOG_RHASH_SIZE; i++)
+ 		INIT_HLIST_HEAD(&rhash[i]);
+@@ -5309,7 +5339,6 @@ xlog_do_recovery_pass(
+ 	}
+ 
+ 	memset(rhash, 0, sizeof(rhash));
+-	blk_no = rhead_blk = tail_blk;
+ 	if (tail_blk > head_blk) {
+ 		/*
+ 		 * Perform recovery around the end of the physical log.
+@@ -5371,9 +5400,19 @@ xlog_do_recovery_pass(
+ 			bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
+ 			blk_no += hblks;
+ 
+-			/* Read in data for log record */
+-			if (blk_no + bblks <= log->l_logBBsize) {
+-				error = xlog_bread(log, blk_no, bblks, dbp,
++			/*
++			 * Read the log record data in multiple reads if it
++			 * wraps around the end of the log. Note that if the
++			 * header already wrapped, blk_no could point past the
++			 * end of the log. The record data is contiguous in
++			 * that case.
++			 */
++			if (blk_no + bblks <= log->l_logBBsize ||
++			    blk_no >= log->l_logBBsize) {
++				/* mod blk_no in case the header wrapped and
++				 * pushed it beyond the end of the log */
++				rblk_no = do_mod(blk_no, log->l_logBBsize);
++				error = xlog_bread(log, rblk_no, bblks, dbp,
+ 						   &offset);
+ 				if (error)
+ 					goto bread_err2;
+diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
+index 38aaacdbb8b3..c1c4c2ea1014 100644
+--- a/fs/xfs/xfs_super.c
++++ b/fs/xfs/xfs_super.c
+@@ -1220,7 +1220,7 @@ xfs_test_remount_options(
+ 	tmp_mp->m_super = sb;
+ 	error = xfs_parseargs(tmp_mp, options);
+ 	xfs_free_fsname(tmp_mp);
+-	kfree(tmp_mp);
++	kmem_free(tmp_mp);
+ 
+ 	return error;
+ }
+diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h
+index bcc3cdf8e1c5..bb0099708827 100644
+--- a/fs/xfs/xfs_trace.h
++++ b/fs/xfs/xfs_trace.h
+@@ -517,7 +517,6 @@ DEFINE_BUF_ITEM_EVENT(xfs_buf_item_size);
+ DEFINE_BUF_ITEM_EVENT(xfs_buf_item_size_ordered);
+ DEFINE_BUF_ITEM_EVENT(xfs_buf_item_size_stale);
+ DEFINE_BUF_ITEM_EVENT(xfs_buf_item_format);
+-DEFINE_BUF_ITEM_EVENT(xfs_buf_item_format_ordered);
+ DEFINE_BUF_ITEM_EVENT(xfs_buf_item_format_stale);
+ DEFINE_BUF_ITEM_EVENT(xfs_buf_item_ordered);
+ DEFINE_BUF_ITEM_EVENT(xfs_buf_item_pin);
+diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h
+index 6bdad6f58934..4709823e04b9 100644
+--- a/fs/xfs/xfs_trans.h
++++ b/fs/xfs/xfs_trans.h
+@@ -49,6 +49,7 @@ typedef struct xfs_log_item {
+ 	struct xfs_ail			*li_ailp;	/* ptr to AIL */
+ 	uint				li_type;	/* item type */
+ 	uint				li_flags;	/* misc flags */
++	struct xfs_buf			*li_buf;	/* real buffer pointer */
+ 	struct xfs_log_item		*li_bio_list;	/* buffer item list */
+ 	void				(*li_cb)(struct xfs_buf *,
+ 						 struct xfs_log_item *);
+@@ -64,11 +65,13 @@ typedef struct xfs_log_item {
+ } xfs_log_item_t;
+ 
+ #define	XFS_LI_IN_AIL	0x1
+-#define XFS_LI_ABORTED	0x2
++#define	XFS_LI_ABORTED	0x2
++#define	XFS_LI_FAILED	0x4
+ 
+ #define XFS_LI_FLAGS \
+ 	{ XFS_LI_IN_AIL,	"IN_AIL" }, \
+-	{ XFS_LI_ABORTED,	"ABORTED" }
++	{ XFS_LI_ABORTED,	"ABORTED" }, \
++	{ XFS_LI_FAILED,	"FAILED" }
+ 
+ struct xfs_item_ops {
+ 	void (*iop_size)(xfs_log_item_t *, int *, int *);
+@@ -79,6 +82,7 @@ struct xfs_item_ops {
+ 	void (*iop_unlock)(xfs_log_item_t *);
+ 	xfs_lsn_t (*iop_committed)(xfs_log_item_t *, xfs_lsn_t);
+ 	void (*iop_committing)(xfs_log_item_t *, xfs_lsn_t);
++	void (*iop_error)(xfs_log_item_t *, xfs_buf_t *);
+ };
+ 
+ void	xfs_log_item_init(struct xfs_mount *mp, struct xfs_log_item *item,
+@@ -208,12 +212,14 @@ void		xfs_trans_bhold_release(xfs_trans_t *, struct xfs_buf *);
+ void		xfs_trans_binval(xfs_trans_t *, struct xfs_buf *);
+ void		xfs_trans_inode_buf(xfs_trans_t *, struct xfs_buf *);
+ void		xfs_trans_stale_inode_buf(xfs_trans_t *, struct xfs_buf *);
+-void		xfs_trans_ordered_buf(xfs_trans_t *, struct xfs_buf *);
++bool		xfs_trans_ordered_buf(xfs_trans_t *, struct xfs_buf *);
+ void		xfs_trans_dquot_buf(xfs_trans_t *, struct xfs_buf *, uint);
+ void		xfs_trans_inode_alloc_buf(xfs_trans_t *, struct xfs_buf *);
+ void		xfs_trans_ichgtime(struct xfs_trans *, struct xfs_inode *, int);
+ void		xfs_trans_ijoin(struct xfs_trans *, struct xfs_inode *, uint);
+-void		xfs_trans_log_buf(xfs_trans_t *, struct xfs_buf *, uint, uint);
++void		xfs_trans_log_buf(struct xfs_trans *, struct xfs_buf *, uint,
++				  uint);
++void		xfs_trans_dirty_buf(struct xfs_trans *, struct xfs_buf *);
+ void		xfs_trans_log_inode(xfs_trans_t *, struct xfs_inode *, uint);
+ 
+ void		xfs_extent_free_init_defer_op(void);
+diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c
+index 9056c0f34a3c..70f5ab017323 100644
+--- a/fs/xfs/xfs_trans_ail.c
++++ b/fs/xfs/xfs_trans_ail.c
+@@ -687,12 +687,13 @@ xfs_trans_ail_update_bulk(
+ bool
+ xfs_ail_delete_one(
+ 	struct xfs_ail		*ailp,
+-	struct xfs_log_item 	*lip)
++	struct xfs_log_item	*lip)
+ {
+ 	struct xfs_log_item	*mlip = xfs_ail_min(ailp);
+ 
+ 	trace_xfs_ail_delete(lip, mlip->li_lsn, lip->li_lsn);
+ 	xfs_ail_delete(ailp, lip);
++	xfs_clear_li_failed(lip);
+ 	lip->li_flags &= ~XFS_LI_IN_AIL;
+ 	lip->li_lsn = 0;
+ 
+diff --git a/fs/xfs/xfs_trans_buf.c b/fs/xfs/xfs_trans_buf.c
+index 86987d823d76..3ba7a96a8abd 100644
+--- a/fs/xfs/xfs_trans_buf.c
++++ b/fs/xfs/xfs_trans_buf.c
+@@ -435,7 +435,7 @@ xfs_trans_brelse(xfs_trans_t	*tp,
+ 	if (XFS_FORCED_SHUTDOWN(tp->t_mountp) && freed) {
+ 		xfs_trans_ail_remove(&bip->bli_item, SHUTDOWN_LOG_IO_ERROR);
+ 		xfs_buf_item_relse(bp);
+-	} else if (!xfs_buf_item_dirty(bip)) {
++	} else if (!(bip->bli_flags & XFS_BLI_DIRTY)) {
+ /***
+ 		ASSERT(bp->b_pincount == 0);
+ ***/
+@@ -493,25 +493,17 @@ xfs_trans_bhold_release(xfs_trans_t	*tp,
+ }
+ 
+ /*
+- * This is called to mark bytes first through last inclusive of the given
+- * buffer as needing to be logged when the transaction is committed.
+- * The buffer must already be associated with the given transaction.
+- *
+- * First and last are numbers relative to the beginning of this buffer,
+- * so the first byte in the buffer is numbered 0 regardless of the
+- * value of b_blkno.
++ * Mark a buffer dirty in the transaction.
+  */
+ void
+-xfs_trans_log_buf(xfs_trans_t	*tp,
+-		  xfs_buf_t	*bp,
+-		  uint		first,
+-		  uint		last)
++xfs_trans_dirty_buf(
++	struct xfs_trans	*tp,
++	struct xfs_buf		*bp)
+ {
+-	xfs_buf_log_item_t	*bip = bp->b_fspriv;
++	struct xfs_buf_log_item	*bip = bp->b_fspriv;
+ 
+ 	ASSERT(bp->b_transp == tp);
+ 	ASSERT(bip != NULL);
+-	ASSERT(first <= last && last < BBTOB(bp->b_length));
+ 	ASSERT(bp->b_iodone == NULL ||
+ 	       bp->b_iodone == xfs_buf_iodone_callbacks);
+ 
+@@ -531,8 +523,6 @@ xfs_trans_log_buf(xfs_trans_t	*tp,
+ 	bp->b_iodone = xfs_buf_iodone_callbacks;
+ 	bip->bli_item.li_cb = xfs_buf_iodone;
+ 
+-	trace_xfs_trans_log_buf(bip);
+-
+ 	/*
+ 	 * If we invalidated the buffer within this transaction, then
+ 	 * cancel the invalidation now that we're dirtying the buffer
+@@ -545,17 +535,37 @@ xfs_trans_log_buf(xfs_trans_t	*tp,
+ 		bp->b_flags &= ~XBF_STALE;
+ 		bip->__bli_format.blf_flags &= ~XFS_BLF_CANCEL;
+ 	}
++	bip->bli_flags |= XFS_BLI_DIRTY | XFS_BLI_LOGGED;
+ 
+ 	tp->t_flags |= XFS_TRANS_DIRTY;
+ 	bip->bli_item.li_desc->lid_flags |= XFS_LID_DIRTY;
++}
+ 
+-	/*
+-	 * If we have an ordered buffer we are not logging any dirty range but
+-	 * it still needs to be marked dirty and that it has been logged.
+-	 */
+-	bip->bli_flags |= XFS_BLI_DIRTY | XFS_BLI_LOGGED;
+-	if (!(bip->bli_flags & XFS_BLI_ORDERED))
+-		xfs_buf_item_log(bip, first, last);
++/*
++ * This is called to mark bytes first through last inclusive of the given
++ * buffer as needing to be logged when the transaction is committed.
++ * The buffer must already be associated with the given transaction.
++ *
++ * First and last are numbers relative to the beginning of this buffer,
++ * so the first byte in the buffer is numbered 0 regardless of the
++ * value of b_blkno.
++ */
++void
++xfs_trans_log_buf(
++	struct xfs_trans	*tp,
++	struct xfs_buf		*bp,
++	uint			first,
++	uint			last)
++{
++	struct xfs_buf_log_item	*bip = bp->b_fspriv;
++
++	ASSERT(first <= last && last < BBTOB(bp->b_length));
++	ASSERT(!(bip->bli_flags & XFS_BLI_ORDERED));
++
++	xfs_trans_dirty_buf(tp, bp);
++
++	trace_xfs_trans_log_buf(bip);
++	xfs_buf_item_log(bip, first, last);
+ }
+ 
+ 
+@@ -708,14 +718,13 @@ xfs_trans_inode_alloc_buf(
+ }
+ 
+ /*
+- * Mark the buffer as ordered for this transaction. This means
+- * that the contents of the buffer are not recorded in the transaction
+- * but it is tracked in the AIL as though it was. This allows us
+- * to record logical changes in transactions rather than the physical
+- * changes we make to the buffer without changing writeback ordering
+- * constraints of metadata buffers.
++ * Mark the buffer as ordered for this transaction. This means that the contents
++ * of the buffer are not recorded in the transaction but it is tracked in the
++ * AIL as though it was. This allows us to record logical changes in
++ * transactions rather than the physical changes we make to the buffer without
++ * changing writeback ordering constraints of metadata buffers.
+  */
+-void
++bool
+ xfs_trans_ordered_buf(
+ 	struct xfs_trans	*tp,
+ 	struct xfs_buf		*bp)
+@@ -726,8 +735,18 @@ xfs_trans_ordered_buf(
+ 	ASSERT(bip != NULL);
+ 	ASSERT(atomic_read(&bip->bli_refcount) > 0);
+ 
++	if (xfs_buf_item_dirty_format(bip))
++		return false;
++
+ 	bip->bli_flags |= XFS_BLI_ORDERED;
+ 	trace_xfs_buf_item_ordered(bip);
++
++	/*
++	 * We don't log a dirty range of an ordered buffer but it still needs
++	 * to be marked dirty and that it has been logged.
++	 */
++	xfs_trans_dirty_buf(tp, bp);
++	return true;
+ }
+ 
+ /*
+diff --git a/fs/xfs/xfs_trans_priv.h b/fs/xfs/xfs_trans_priv.h
+index d91706c56c63..b317a3644c00 100644
+--- a/fs/xfs/xfs_trans_priv.h
++++ b/fs/xfs/xfs_trans_priv.h
+@@ -164,4 +164,35 @@ xfs_trans_ail_copy_lsn(
+ 	*dst = *src;
+ }
+ #endif
++
++static inline void
++xfs_clear_li_failed(
++	struct xfs_log_item	*lip)
++{
++	struct xfs_buf	*bp = lip->li_buf;
++
++	ASSERT(lip->li_flags & XFS_LI_IN_AIL);
++	lockdep_assert_held(&lip->li_ailp->xa_lock);
++
++	if (lip->li_flags & XFS_LI_FAILED) {
++		lip->li_flags &= ~XFS_LI_FAILED;
++		lip->li_buf = NULL;
++		xfs_buf_rele(bp);
++	}
++}
++
++static inline void
++xfs_set_li_failed(
++	struct xfs_log_item	*lip,
++	struct xfs_buf		*bp)
++{
++	lockdep_assert_held(&lip->li_ailp->xa_lock);
++
++	if (!(lip->li_flags & XFS_LI_FAILED)) {
++		xfs_buf_hold(bp);
++		lip->li_flags |= XFS_LI_FAILED;
++		lip->li_buf = bp;
++	}
++}
++
+ #endif	/* __XFS_TRANS_PRIV_H__ */
+diff --git a/include/linux/fs.h b/include/linux/fs.h
+index cbfe127bccf8..d0c0ca8ea8c1 100644
+--- a/include/linux/fs.h
++++ b/include/linux/fs.h
+@@ -2831,6 +2831,7 @@ static inline void lockdep_annotate_inode_mutex_key(struct inode *inode) { };
+ #endif
+ extern void unlock_new_inode(struct inode *);
+ extern unsigned int get_next_ino(void);
++extern void evict_inodes(struct super_block *sb);
+ 
+ extern void __iget(struct inode * inode);
+ extern void iget_failed(struct inode *);
+diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
+index e030a68ead7e..25438b2b6f22 100644
+--- a/include/linux/mm_inline.h
++++ b/include/linux/mm_inline.h
+@@ -126,4 +126,10 @@ static __always_inline enum lru_list page_lru(struct page *page)
+ 
+ #define lru_to_page(head) (list_entry((head)->prev, struct page, lru))
+ 
++#ifdef arch_unmap_kpfn
++extern void arch_unmap_kpfn(unsigned long pfn);
++#else
++static __always_inline void arch_unmap_kpfn(unsigned long pfn) { }
++#endif
++
+ #endif
+diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
+index d67a8182e5eb..63df75ae70ee 100644
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -885,7 +885,7 @@ void kfree_skb(struct sk_buff *skb);
+ void kfree_skb_list(struct sk_buff *segs);
+ void skb_tx_error(struct sk_buff *skb);
+ void consume_skb(struct sk_buff *skb);
+-void consume_stateless_skb(struct sk_buff *skb);
++void __consume_stateless_skb(struct sk_buff *skb);
+ void  __kfree_skb(struct sk_buff *skb);
+ extern struct kmem_cache *skbuff_head_cache;
+ 
+diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h
+index 6fdcd2427776..fc59e0775e00 100644
+--- a/include/net/inet_frag.h
++++ b/include/net/inet_frag.h
+@@ -1,14 +1,9 @@
+ #ifndef __NET_FRAG_H__
+ #define __NET_FRAG_H__
+ 
+-#include <linux/percpu_counter.h>
+-
+ struct netns_frags {
+-	/* The percpu_counter "mem" need to be cacheline aligned.
+-	 *  mem.count must not share cacheline with other writers
+-	 */
+-	struct percpu_counter   mem ____cacheline_aligned_in_smp;
+-
++	/* Keep atomic mem on separate cachelines in structs that include it */
++	atomic_t		mem ____cacheline_aligned_in_smp;
+ 	/* sysctls */
+ 	int			timeout;
+ 	int			high_thresh;
+@@ -108,15 +103,10 @@ struct inet_frags {
+ int inet_frags_init(struct inet_frags *);
+ void inet_frags_fini(struct inet_frags *);
+ 
+-static inline int inet_frags_init_net(struct netns_frags *nf)
+-{
+-	return percpu_counter_init(&nf->mem, 0, GFP_KERNEL);
+-}
+-static inline void inet_frags_uninit_net(struct netns_frags *nf)
++static inline void inet_frags_init_net(struct netns_frags *nf)
+ {
+-	percpu_counter_destroy(&nf->mem);
++	atomic_set(&nf->mem, 0);
+ }
+-
+ void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f);
+ 
+ void inet_frag_kill(struct inet_frag_queue *q, struct inet_frags *f);
+@@ -140,31 +130,24 @@ static inline bool inet_frag_evicting(struct inet_frag_queue *q)
+ 
+ /* Memory Tracking Functions. */
+ 
+-/* The default percpu_counter batch size is not big enough to scale to
+- * fragmentation mem acct sizes.
+- * The mem size of a 64K fragment is approx:
+- *  (44 fragments * 2944 truesize) + frag_queue struct(200) = 129736 bytes
+- */
+-static unsigned int frag_percpu_counter_batch = 130000;
+-
+ static inline int frag_mem_limit(struct netns_frags *nf)
+ {
+-	return percpu_counter_read(&nf->mem);
++	return atomic_read(&nf->mem);
+ }
+ 
+ static inline void sub_frag_mem_limit(struct netns_frags *nf, int i)
+ {
+-	percpu_counter_add_batch(&nf->mem, -i, frag_percpu_counter_batch);
++	atomic_sub(i, &nf->mem);
+ }
+ 
+ static inline void add_frag_mem_limit(struct netns_frags *nf, int i)
+ {
+-	percpu_counter_add_batch(&nf->mem, i, frag_percpu_counter_batch);
++	atomic_add(i, &nf->mem);
+ }
+ 
+-static inline unsigned int sum_frag_mem_limit(struct netns_frags *nf)
++static inline int sum_frag_mem_limit(struct netns_frags *nf)
+ {
+-	return percpu_counter_sum_positive(&nf->mem);
++	return atomic_read(&nf->mem);
+ }
+ 
+ /* RFC 3168 support :
+diff --git a/lib/idr.c b/lib/idr.c
+index b13682bb0a1c..20c2779e8d12 100644
+--- a/lib/idr.c
++++ b/lib/idr.c
+@@ -154,7 +154,7 @@ void *idr_replace(struct idr *idr, void *ptr, int id)
+ 	void __rcu **slot = NULL;
+ 	void *entry;
+ 
+-	if (WARN_ON_ONCE(id < 0))
++	if (id < 0)
+ 		return ERR_PTR(-EINVAL);
+ 	if (WARN_ON_ONCE(radix_tree_is_internal_node(ptr)))
+ 		return ERR_PTR(-EINVAL);
+diff --git a/mm/memory-failure.c b/mm/memory-failure.c
+index 1cd3b3569af8..88366626c0b7 100644
+--- a/mm/memory-failure.c
++++ b/mm/memory-failure.c
+@@ -1146,6 +1146,8 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
+ 		return 0;
+ 	}
+ 
++	arch_unmap_kpfn(pfn);
++
+ 	orig_head = hpage = compound_head(p);
+ 	num_poisoned_pages_inc();
+ 
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index e07556606284..72eb23d2426f 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -753,14 +753,11 @@ EXPORT_SYMBOL(consume_skb);
+  *	consume_stateless_skb - free an skbuff, assuming it is stateless
+  *	@skb: buffer to free
+  *
+- *	Works like consume_skb(), but this variant assumes that all the head
+- *	states have been already dropped.
++ *	Alike consume_skb(), but this variant assumes that this is the last
++ *	skb reference and all the head states have been already dropped
+  */
+-void consume_stateless_skb(struct sk_buff *skb)
++void __consume_stateless_skb(struct sk_buff *skb)
+ {
+-	if (!skb_unref(skb))
+-		return;
+-
+ 	trace_consume_skb(skb);
+ 	if (likely(skb->head))
+ 		skb_release_data(skb);
+diff --git a/net/ieee802154/6lowpan/reassembly.c b/net/ieee802154/6lowpan/reassembly.c
+index 30d875dff6b5..f85b08baff16 100644
+--- a/net/ieee802154/6lowpan/reassembly.c
++++ b/net/ieee802154/6lowpan/reassembly.c
+@@ -580,19 +580,14 @@ static int __net_init lowpan_frags_init_net(struct net *net)
+ {
+ 	struct netns_ieee802154_lowpan *ieee802154_lowpan =
+ 		net_ieee802154_lowpan(net);
+-	int res;
+ 
+ 	ieee802154_lowpan->frags.high_thresh = IPV6_FRAG_HIGH_THRESH;
+ 	ieee802154_lowpan->frags.low_thresh = IPV6_FRAG_LOW_THRESH;
+ 	ieee802154_lowpan->frags.timeout = IPV6_FRAG_TIMEOUT;
+ 
+-	res = inet_frags_init_net(&ieee802154_lowpan->frags);
+-	if (res)
+-		return res;
+-	res = lowpan_frags_ns_sysctl_register(net);
+-	if (res)
+-		inet_frags_uninit_net(&ieee802154_lowpan->frags);
+-	return res;
++	inet_frags_init_net(&ieee802154_lowpan->frags);
++
++	return lowpan_frags_ns_sysctl_register(net);
+ }
+ 
+ static void __net_exit lowpan_frags_exit_net(struct net *net)
+diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
+index 96e95e83cc61..af74d0433453 100644
+--- a/net/ipv4/inet_fragment.c
++++ b/net/ipv4/inet_fragment.c
+@@ -234,10 +234,8 @@ void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f)
+ 	cond_resched();
+ 
+ 	if (read_seqretry(&f->rnd_seqlock, seq) ||
+-	    percpu_counter_sum(&nf->mem))
++	    sum_frag_mem_limit(nf))
+ 		goto evict_again;
+-
+-	percpu_counter_destroy(&nf->mem);
+ }
+ EXPORT_SYMBOL(inet_frags_exit_net);
+ 
+diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
+index 9a8cfac503dc..46408c220d9d 100644
+--- a/net/ipv4/ip_fragment.c
++++ b/net/ipv4/ip_fragment.c
+@@ -844,8 +844,6 @@ static void __init ip4_frags_ctl_register(void)
+ 
+ static int __net_init ipv4_frags_init_net(struct net *net)
+ {
+-	int res;
+-
+ 	/* Fragment cache limits.
+ 	 *
+ 	 * The fragment memory accounting code, (tries to) account for
+@@ -871,13 +869,9 @@ static int __net_init ipv4_frags_init_net(struct net *net)
+ 
+ 	net->ipv4.frags.max_dist = 64;
+ 
+-	res = inet_frags_init_net(&net->ipv4.frags);
+-	if (res)
+-		return res;
+-	res = ip4_frags_ns_ctl_register(net);
+-	if (res)
+-		inet_frags_uninit_net(&net->ipv4.frags);
+-	return res;
++	inet_frags_init_net(&net->ipv4.frags);
++
++	return ip4_frags_ns_ctl_register(net);
+ }
+ 
+ static void __net_exit ipv4_frags_exit_net(struct net *net)
+diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
+index 129d1a3616f8..e1856bfa753d 100644
+--- a/net/ipv4/ip_tunnel.c
++++ b/net/ipv4/ip_tunnel.c
+@@ -618,8 +618,8 @@ void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, u8 proto)
+ 		ip_rt_put(rt);
+ 		goto tx_dropped;
+ 	}
+-	iptunnel_xmit(NULL, rt, skb, fl4.saddr, fl4.daddr, proto, key->tos,
+-		      key->ttl, df, !net_eq(tunnel->net, dev_net(dev)));
++	iptunnel_xmit(NULL, rt, skb, fl4.saddr, fl4.daddr, proto, tos, ttl,
++		      df, !net_eq(tunnel->net, dev_net(dev)));
+ 	return;
+ tx_error:
+ 	dev->stats.tx_errors++;
+diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
+index e9252c7df809..21022db7a2a6 100644
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -1722,9 +1722,9 @@ int tcp_v4_rcv(struct sk_buff *skb)
+ 		 */
+ 		sock_hold(sk);
+ 		refcounted = true;
+-		if (tcp_filter(sk, skb))
+-			goto discard_and_relse;
+-		nsk = tcp_check_req(sk, skb, req, false);
++		nsk = NULL;
++		if (!tcp_filter(sk, skb))
++			nsk = tcp_check_req(sk, skb, req, false);
+ 		if (!nsk) {
+ 			reqsk_put(req);
+ 			goto discard_and_relse;
+diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
+index 62344804baae..979e4d8526ba 100644
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -1386,12 +1386,15 @@ void skb_consume_udp(struct sock *sk, struct sk_buff *skb, int len)
+ 		unlock_sock_fast(sk, slow);
+ 	}
+ 
++	if (!skb_unref(skb))
++		return;
++
+ 	/* In the more common cases we cleared the head states previously,
+ 	 * see __udp_queue_rcv_skb().
+ 	 */
+ 	if (unlikely(udp_skb_has_head_state(skb)))
+ 		skb_release_head_state(skb);
+-	consume_stateless_skb(skb);
++	__consume_stateless_skb(skb);
+ }
+ EXPORT_SYMBOL_GPL(skb_consume_udp);
+ 
+diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
+index e1c85bb4eac0..1792bbfd80e1 100644
+--- a/net/ipv6/ip6_fib.c
++++ b/net/ipv6/ip6_fib.c
+@@ -198,6 +198,12 @@ static void rt6_release(struct rt6_info *rt)
+ 	}
+ }
+ 
++static void fib6_free_table(struct fib6_table *table)
++{
++	inetpeer_invalidate_tree(&table->tb6_peers);
++	kfree(table);
++}
++
+ static void fib6_link_table(struct net *net, struct fib6_table *tb)
+ {
+ 	unsigned int h;
+@@ -1915,15 +1921,22 @@ static int __net_init fib6_net_init(struct net *net)
+ 
+ static void fib6_net_exit(struct net *net)
+ {
++	unsigned int i;
++
+ 	rt6_ifdown(net, NULL);
+ 	del_timer_sync(&net->ipv6.ip6_fib_timer);
+ 
+-#ifdef CONFIG_IPV6_MULTIPLE_TABLES
+-	inetpeer_invalidate_tree(&net->ipv6.fib6_local_tbl->tb6_peers);
+-	kfree(net->ipv6.fib6_local_tbl);
+-#endif
+-	inetpeer_invalidate_tree(&net->ipv6.fib6_main_tbl->tb6_peers);
+-	kfree(net->ipv6.fib6_main_tbl);
++	for (i = 0; i < FIB6_TABLE_HASHSZ; i++) {
++		struct hlist_head *head = &net->ipv6.fib_table_hash[i];
++		struct hlist_node *tmp;
++		struct fib6_table *tb;
++
++		hlist_for_each_entry_safe(tb, tmp, head, tb6_hlist) {
++			hlist_del(&tb->tb6_hlist);
++			fib6_free_table(tb);
++		}
++	}
++
+ 	kfree(net->ipv6.fib_table_hash);
+ 	kfree(net->ipv6.rt6_stats);
+ }
+diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
+index 67ff2aaf5dcb..b7a72d409334 100644
+--- a/net/ipv6/ip6_gre.c
++++ b/net/ipv6/ip6_gre.c
+@@ -432,7 +432,9 @@ static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
+ 		}
+ 		break;
+ 	case ICMPV6_PKT_TOOBIG:
+-		mtu = be32_to_cpu(info) - offset;
++		mtu = be32_to_cpu(info) - offset - t->tun_hlen;
++		if (t->dev->type == ARPHRD_ETHER)
++			mtu -= ETH_HLEN;
+ 		if (mtu < IPV6_MIN_MTU)
+ 			mtu = IPV6_MIN_MTU;
+ 		t->dev->mtu = mtu;
+diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
+index 986d4ca38832..b263bf3a19f7 100644
+--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
++++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
+@@ -622,18 +622,12 @@ EXPORT_SYMBOL_GPL(nf_ct_frag6_gather);
+ 
+ static int nf_ct_net_init(struct net *net)
+ {
+-	int res;
+-
+ 	net->nf_frag.frags.high_thresh = IPV6_FRAG_HIGH_THRESH;
+ 	net->nf_frag.frags.low_thresh = IPV6_FRAG_LOW_THRESH;
+ 	net->nf_frag.frags.timeout = IPV6_FRAG_TIMEOUT;
+-	res = inet_frags_init_net(&net->nf_frag.frags);
+-	if (res)
+-		return res;
+-	res = nf_ct_frag6_sysctl_register(net);
+-	if (res)
+-		inet_frags_uninit_net(&net->nf_frag.frags);
+-	return res;
++	inet_frags_init_net(&net->nf_frag.frags);
++
++	return nf_ct_frag6_sysctl_register(net);
+ }
+ 
+ static void nf_ct_net_exit(struct net *net)
+diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
+index e1da5b888cc4..846012eae526 100644
+--- a/net/ipv6/reassembly.c
++++ b/net/ipv6/reassembly.c
+@@ -714,19 +714,13 @@ static void ip6_frags_sysctl_unregister(void)
+ 
+ static int __net_init ipv6_frags_init_net(struct net *net)
+ {
+-	int res;
+-
+ 	net->ipv6.frags.high_thresh = IPV6_FRAG_HIGH_THRESH;
+ 	net->ipv6.frags.low_thresh = IPV6_FRAG_LOW_THRESH;
+ 	net->ipv6.frags.timeout = IPV6_FRAG_TIMEOUT;
+ 
+-	res = inet_frags_init_net(&net->ipv6.frags);
+-	if (res)
+-		return res;
+-	res = ip6_frags_ns_sysctl_register(net);
+-	if (res)
+-		inet_frags_uninit_net(&net->ipv6.frags);
+-	return res;
++	inet_frags_init_net(&net->ipv6.frags);
++
++	return ip6_frags_ns_sysctl_register(net);
+ }
+ 
+ static void __net_exit ipv6_frags_exit_net(struct net *net)
+diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
+index 206210125fd7..660b9b2a8a25 100644
+--- a/net/ipv6/tcp_ipv6.c
++++ b/net/ipv6/tcp_ipv6.c
+@@ -1456,9 +1456,9 @@ static int tcp_v6_rcv(struct sk_buff *skb)
+ 		}
+ 		sock_hold(sk);
+ 		refcounted = true;
+-		if (tcp_filter(sk, skb))
+-			goto discard_and_relse;
+-		nsk = tcp_check_req(sk, skb, req, false);
++		nsk = NULL;
++		if (!tcp_filter(sk, skb))
++			nsk = tcp_check_req(sk, skb, req, false);
+ 		if (!nsk) {
+ 			reqsk_put(req);
+ 			goto discard_and_relse;
+diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c
+index 0225d62a869f..a71be33f3afe 100644
+--- a/net/sctp/ulpqueue.c
++++ b/net/sctp/ulpqueue.c
+@@ -265,7 +265,8 @@ int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
+ 		sctp_ulpq_clear_pd(ulpq);
+ 
+ 	if (queue == &sk->sk_receive_queue && !sp->data_ready_signalled) {
+-		sp->data_ready_signalled = 1;
++		if (!sock_owned_by_user(sk))
++			sp->data_ready_signalled = 1;
+ 		sk->sk_data_ready(sk);
+ 	}
+ 	return 1;


^ permalink raw reply related	[flat|nested] 20+ messages in thread
* [gentoo-commits] proj/linux-patches:4.13 commit in: /
@ 2017-09-13 22:32 Mike Pagano
  0 siblings, 0 replies; 20+ messages in thread
From: Mike Pagano @ 2017-09-13 22:32 UTC (permalink / raw
  To: gentoo-commits

commit:     1551942fe422bb69beee4c3c557ec040823de693
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Sep 13 22:32:05 2017 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Sep 13 22:32:05 2017 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=1551942f

Remove redundant patch

 0000_README                             |   4 -
 2400_BT-check-L2CAP-buffer-length.patch | 357 --------------------------------
 2 files changed, 361 deletions(-)

diff --git a/0000_README b/0000_README
index 9dfe3ef..cd98c21 100644
--- a/0000_README
+++ b/0000_README
@@ -63,10 +63,6 @@ Patch:  2300_enable-poweroff-on-Mac-Pro-11.patch
 From:   http://kernel.ubuntu.com/git/ubuntu/ubuntu-xenial.git/patch/drivers/pci/quirks.c?id=5080ff61a438f3dd80b88b423e1a20791d8a774c
 Desc:   Workaround to enable poweroff on Mac Pro 11. See bug #601964.
 
-Patch:  2400_BT-check-L2CAP-buffer-length.patch
-From:   https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=e860d2c904d1a9f38a24eb44c9f34b8f915a6ea3
-Desc:   Validate the output buffer length for L2CAP config reqs and resps to avoid stack buffer overflowing. CVE-2017-1000251. See bug #630840
-
 Patch:  2600_enable-key-swapping-for-apple-mac.patch
 From:   https://github.com/free5lot/hid-apple-patched
 Desc:   This hid-apple patch enables swapping of the FN and left Control keys and some additional on some apple keyboards. See bug #622902

diff --git a/2400_BT-check-L2CAP-buffer-length.patch b/2400_BT-check-L2CAP-buffer-length.patch
deleted file mode 100644
index c6bfdf7..0000000
--- a/2400_BT-check-L2CAP-buffer-length.patch
+++ /dev/null
@@ -1,357 +0,0 @@
-From e860d2c904d1a9f38a24eb44c9f34b8f915a6ea3 Mon Sep 17 00:00:00 2001
-From: Ben Seri <ben@armis.com>
-Date: Sat, 9 Sep 2017 23:15:59 +0200
-Subject: Bluetooth: Properly check L2CAP config option output buffer length
-
-Validate the output buffer length for L2CAP config requests and responses
-to avoid overflowing the stack buffer used for building the option blocks.
-
-Cc: stable@vger.kernel.org
-Signed-off-by: Ben Seri <ben@armis.com>
-Signed-off-by: Marcel Holtmann <marcel@holtmann.org>
-Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
----
- net/bluetooth/l2cap_core.c | 80 +++++++++++++++++++++++++---------------------
- 1 file changed, 43 insertions(+), 37 deletions(-)
-
-diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
-index 303c779..43ba91c 100644
---- a/net/bluetooth/l2cap_core.c
-+++ b/net/bluetooth/l2cap_core.c
-@@ -58,7 +58,7 @@ static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
- 				       u8 code, u8 ident, u16 dlen, void *data);
- static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
- 			   void *data);
--static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
-+static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size);
- static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
- 
- static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
-@@ -1473,7 +1473,7 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
- 
- 			set_bit(CONF_REQ_SENT, &chan->conf_state);
- 			l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
--				       l2cap_build_conf_req(chan, buf), buf);
-+				       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
- 			chan->num_conf_req++;
- 		}
- 
-@@ -2987,12 +2987,15 @@ static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
- 	return len;
- }
- 
--static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
-+static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val, size_t size)
- {
- 	struct l2cap_conf_opt *opt = *ptr;
- 
- 	BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
- 
-+	if (size < L2CAP_CONF_OPT_SIZE + len)
-+		return;
-+
- 	opt->type = type;
- 	opt->len  = len;
- 
-@@ -3017,7 +3020,7 @@ static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
- 	*ptr += L2CAP_CONF_OPT_SIZE + len;
- }
- 
--static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
-+static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan, size_t size)
- {
- 	struct l2cap_conf_efs efs;
- 
-@@ -3045,7 +3048,7 @@ static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
- 	}
- 
- 	l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
--			   (unsigned long) &efs);
-+			   (unsigned long) &efs, size);
- }
- 
- static void l2cap_ack_timeout(struct work_struct *work)
-@@ -3191,11 +3194,12 @@ static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
- 	chan->ack_win = chan->tx_win;
- }
- 
--static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
-+static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
- {
- 	struct l2cap_conf_req *req = data;
- 	struct l2cap_conf_rfc rfc = { .mode = chan->mode };
- 	void *ptr = req->data;
-+	void *endptr = data + data_size;
- 	u16 size;
- 
- 	BT_DBG("chan %p", chan);
-@@ -3220,7 +3224,7 @@ static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
- 
- done:
- 	if (chan->imtu != L2CAP_DEFAULT_MTU)
--		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
-+		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu, endptr - ptr);
- 
- 	switch (chan->mode) {
- 	case L2CAP_MODE_BASIC:
-@@ -3239,7 +3243,7 @@ done:
- 		rfc.max_pdu_size    = 0;
- 
- 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
--				   (unsigned long) &rfc);
-+				   (unsigned long) &rfc, endptr - ptr);
- 		break;
- 
- 	case L2CAP_MODE_ERTM:
-@@ -3259,21 +3263,21 @@ done:
- 				       L2CAP_DEFAULT_TX_WINDOW);
- 
- 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
--				   (unsigned long) &rfc);
-+				   (unsigned long) &rfc, endptr - ptr);
- 
- 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
--			l2cap_add_opt_efs(&ptr, chan);
-+			l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
- 
- 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
- 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
--					   chan->tx_win);
-+					   chan->tx_win, endptr - ptr);
- 
- 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
- 			if (chan->fcs == L2CAP_FCS_NONE ||
- 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
- 				chan->fcs = L2CAP_FCS_NONE;
- 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
--						   chan->fcs);
-+						   chan->fcs, endptr - ptr);
- 			}
- 		break;
- 
-@@ -3291,17 +3295,17 @@ done:
- 		rfc.max_pdu_size = cpu_to_le16(size);
- 
- 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
--				   (unsigned long) &rfc);
-+				   (unsigned long) &rfc, endptr - ptr);
- 
- 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
--			l2cap_add_opt_efs(&ptr, chan);
-+			l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
- 
- 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
- 			if (chan->fcs == L2CAP_FCS_NONE ||
- 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
- 				chan->fcs = L2CAP_FCS_NONE;
- 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
--						   chan->fcs);
-+						   chan->fcs, endptr - ptr);
- 			}
- 		break;
- 	}
-@@ -3312,10 +3316,11 @@ done:
- 	return ptr - data;
- }
- 
--static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
-+static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
- {
- 	struct l2cap_conf_rsp *rsp = data;
- 	void *ptr = rsp->data;
-+	void *endptr = data + data_size;
- 	void *req = chan->conf_req;
- 	int len = chan->conf_len;
- 	int type, hint, olen;
-@@ -3417,7 +3422,7 @@ done:
- 			return -ECONNREFUSED;
- 
- 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
--				   (unsigned long) &rfc);
-+				   (unsigned long) &rfc, endptr - ptr);
- 	}
- 
- 	if (result == L2CAP_CONF_SUCCESS) {
-@@ -3430,7 +3435,7 @@ done:
- 			chan->omtu = mtu;
- 			set_bit(CONF_MTU_DONE, &chan->conf_state);
- 		}
--		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
-+		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu, endptr - ptr);
- 
- 		if (remote_efs) {
- 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
-@@ -3444,7 +3449,7 @@ done:
- 
- 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
- 						   sizeof(efs),
--						   (unsigned long) &efs);
-+						   (unsigned long) &efs, endptr - ptr);
- 			} else {
- 				/* Send PENDING Conf Rsp */
- 				result = L2CAP_CONF_PENDING;
-@@ -3477,7 +3482,7 @@ done:
- 			set_bit(CONF_MODE_DONE, &chan->conf_state);
- 
- 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
--					   sizeof(rfc), (unsigned long) &rfc);
-+					   sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
- 
- 			if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
- 				chan->remote_id = efs.id;
-@@ -3491,7 +3496,7 @@ done:
- 					le32_to_cpu(efs.sdu_itime);
- 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
- 						   sizeof(efs),
--						   (unsigned long) &efs);
-+						   (unsigned long) &efs, endptr - ptr);
- 			}
- 			break;
- 
-@@ -3505,7 +3510,7 @@ done:
- 			set_bit(CONF_MODE_DONE, &chan->conf_state);
- 
- 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
--					   (unsigned long) &rfc);
-+					   (unsigned long) &rfc, endptr - ptr);
- 
- 			break;
- 
-@@ -3527,10 +3532,11 @@ done:
- }
- 
- static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
--				void *data, u16 *result)
-+				void *data, size_t size, u16 *result)
- {
- 	struct l2cap_conf_req *req = data;
- 	void *ptr = req->data;
-+	void *endptr = data + size;
- 	int type, olen;
- 	unsigned long val;
- 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
-@@ -3548,13 +3554,13 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
- 				chan->imtu = L2CAP_DEFAULT_MIN_MTU;
- 			} else
- 				chan->imtu = val;
--			l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
-+			l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu, endptr - ptr);
- 			break;
- 
- 		case L2CAP_CONF_FLUSH_TO:
- 			chan->flush_to = val;
- 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
--					   2, chan->flush_to);
-+					   2, chan->flush_to, endptr - ptr);
- 			break;
- 
- 		case L2CAP_CONF_RFC:
-@@ -3568,13 +3574,13 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
- 			chan->fcs = 0;
- 
- 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
--					   sizeof(rfc), (unsigned long) &rfc);
-+					   sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
- 			break;
- 
- 		case L2CAP_CONF_EWS:
- 			chan->ack_win = min_t(u16, val, chan->ack_win);
- 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
--					   chan->tx_win);
-+					   chan->tx_win, endptr - ptr);
- 			break;
- 
- 		case L2CAP_CONF_EFS:
-@@ -3587,7 +3593,7 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
- 				return -ECONNREFUSED;
- 
- 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
--					   (unsigned long) &efs);
-+					   (unsigned long) &efs, endptr - ptr);
- 			break;
- 
- 		case L2CAP_CONF_FCS:
-@@ -3692,7 +3698,7 @@ void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
- 		return;
- 
- 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
--		       l2cap_build_conf_req(chan, buf), buf);
-+		       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
- 	chan->num_conf_req++;
- }
- 
-@@ -3900,7 +3906,7 @@ sendresp:
- 		u8 buf[128];
- 		set_bit(CONF_REQ_SENT, &chan->conf_state);
- 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
--			       l2cap_build_conf_req(chan, buf), buf);
-+			       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
- 		chan->num_conf_req++;
- 	}
- 
-@@ -3978,7 +3984,7 @@ static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
- 			break;
- 
- 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
--			       l2cap_build_conf_req(chan, req), req);
-+			       l2cap_build_conf_req(chan, req, sizeof(req)), req);
- 		chan->num_conf_req++;
- 		break;
- 
-@@ -4090,7 +4096,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn,
- 	}
- 
- 	/* Complete config. */
--	len = l2cap_parse_conf_req(chan, rsp);
-+	len = l2cap_parse_conf_req(chan, rsp, sizeof(rsp));
- 	if (len < 0) {
- 		l2cap_send_disconn_req(chan, ECONNRESET);
- 		goto unlock;
-@@ -4124,7 +4130,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn,
- 	if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
- 		u8 buf[64];
- 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
--			       l2cap_build_conf_req(chan, buf), buf);
-+			       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
- 		chan->num_conf_req++;
- 	}
- 
-@@ -4184,7 +4190,7 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn,
- 			char buf[64];
- 
- 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
--						   buf, &result);
-+						   buf, sizeof(buf), &result);
- 			if (len < 0) {
- 				l2cap_send_disconn_req(chan, ECONNRESET);
- 				goto done;
-@@ -4214,7 +4220,7 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn,
- 			/* throw out any old stored conf requests */
- 			result = L2CAP_CONF_SUCCESS;
- 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
--						   req, &result);
-+						   req, sizeof(req), &result);
- 			if (len < 0) {
- 				l2cap_send_disconn_req(chan, ECONNRESET);
- 				goto done;
-@@ -4791,7 +4797,7 @@ static void l2cap_do_create(struct l2cap_chan *chan, int result,
- 			set_bit(CONF_REQ_SENT, &chan->conf_state);
- 			l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
- 				       L2CAP_CONF_REQ,
--				       l2cap_build_conf_req(chan, buf), buf);
-+				       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
- 			chan->num_conf_req++;
- 		}
- 	}
-@@ -7465,7 +7471,7 @@ static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
- 				set_bit(CONF_REQ_SENT, &chan->conf_state);
- 				l2cap_send_cmd(conn, l2cap_get_ident(conn),
- 					       L2CAP_CONF_REQ,
--					       l2cap_build_conf_req(chan, buf),
-+					       l2cap_build_conf_req(chan, buf, sizeof(buf)),
- 					       buf);
- 				chan->num_conf_req++;
- 			}
--- 
-cgit v1.1
-


^ permalink raw reply related	[flat|nested] 20+ messages in thread
* [gentoo-commits] proj/linux-patches:4.13 commit in: /
@ 2017-09-13 22:29 Mike Pagano
  0 siblings, 0 replies; 20+ messages in thread
From: Mike Pagano @ 2017-09-13 22:29 UTC (permalink / raw
  To: gentoo-commits

commit:     be6a2f6493f1092a99b39aa67ea703337e1ad45a
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Sep 13 22:29:45 2017 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Sep 13 22:29:45 2017 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=be6a2f64

Linux patch 4.13.2

 0000_README             |    4 +
 1001_linux-4.13.2.patch | 1342 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 1346 insertions(+)

diff --git a/0000_README b/0000_README
index 2c91dfe..9dfe3ef 100644
--- a/0000_README
+++ b/0000_README
@@ -47,6 +47,10 @@ Patch:  1000_linux-4.13.1.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.13.1
 
+Patch:  1001_linux-4.13.2.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.13.2
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1001_linux-4.13.2.patch b/1001_linux-4.13.2.patch
new file mode 100644
index 0000000..e79b722
--- /dev/null
+++ b/1001_linux-4.13.2.patch
@@ -0,0 +1,1342 @@
+diff --git a/Documentation/driver-api/firmware/request_firmware.rst b/Documentation/driver-api/firmware/request_firmware.rst
+index 1c2c4967cd43..cc0aea880824 100644
+--- a/Documentation/driver-api/firmware/request_firmware.rst
++++ b/Documentation/driver-api/firmware/request_firmware.rst
+@@ -44,17 +44,6 @@ request_firmware_nowait
+ .. kernel-doc:: drivers/base/firmware_class.c
+    :functions: request_firmware_nowait
+ 
+-Considerations for suspend and resume
+-=====================================
+-
+-During suspend and resume only the built-in firmware and the firmware cache
+-elements of the firmware API can be used. This is managed by fw_pm_notify().
+-
+-fw_pm_notify
+-------------
+-.. kernel-doc:: drivers/base/firmware_class.c
+-   :functions: fw_pm_notify
+-
+ request firmware API expected driver use
+ ========================================
+ 
+diff --git a/Makefile b/Makefile
+index 41a976854cad..8aad6bc50d52 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 13
+-SUBLEVEL = 1
++SUBLEVEL = 2
+ EXTRAVERSION =
+ NAME = Fearless Coyote
+ 
+diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
+index ff8b0aa2dfde..42f585379e19 100644
+--- a/arch/arm/mm/fault.c
++++ b/arch/arm/mm/fault.c
+@@ -315,8 +315,11 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
+ 	 * signal first. We do not need to release the mmap_sem because
+ 	 * it would already be released in __lock_page_or_retry in
+ 	 * mm/filemap.c. */
+-	if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
++	if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) {
++		if (!user_mode(regs))
++			goto no_context;
+ 		return 0;
++	}
+ 
+ 	/*
+ 	 * Major/minor page fault accounting is only done on the
+diff --git a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
+index 51763d674050..a92ac63addf0 100644
+--- a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
++++ b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
+@@ -323,6 +323,7 @@
+ 				interrupt-controller;
+ 				reg = <0x1d00000 0x10000>, /* GICD */
+ 				      <0x1d40000 0x40000>; /* GICR */
++				interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH>;
+ 			};
+ 		};
+ 
+diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
+index 9b1dd114956a..56e68dfac974 100644
+--- a/arch/x86/kvm/mmu.c
++++ b/arch/x86/kvm/mmu.c
+@@ -4839,7 +4839,8 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code,
+ 	 * Note: AMD only (since it supports the PFERR_GUEST_PAGE_MASK used
+ 	 *       in PFERR_NEXT_GUEST_PAGE)
+ 	 */
+-	if (error_code == PFERR_NESTED_GUEST_PAGE) {
++	if (vcpu->arch.mmu.direct_map &&
++		error_code == PFERR_NESTED_GUEST_PAGE) {
+ 		kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(cr2));
+ 		return 1;
+ 	}
+diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
+index bfbe1e154128..19b63d20f5d3 100644
+--- a/drivers/base/firmware_class.c
++++ b/drivers/base/firmware_class.c
+@@ -256,38 +256,6 @@ static int fw_cache_piggyback_on_request(const char *name);
+  * guarding for corner cases a global lock should be OK */
+ static DEFINE_MUTEX(fw_lock);
+ 
+-static bool __enable_firmware = false;
+-
+-static void enable_firmware(void)
+-{
+-	mutex_lock(&fw_lock);
+-	__enable_firmware = true;
+-	mutex_unlock(&fw_lock);
+-}
+-
+-static void disable_firmware(void)
+-{
+-	mutex_lock(&fw_lock);
+-	__enable_firmware = false;
+-	mutex_unlock(&fw_lock);
+-}
+-
+-/*
+- * When disabled only the built-in firmware and the firmware cache will be
+- * used to look for firmware.
+- */
+-static bool firmware_enabled(void)
+-{
+-	bool enabled = false;
+-
+-	mutex_lock(&fw_lock);
+-	if (__enable_firmware)
+-		enabled = true;
+-	mutex_unlock(&fw_lock);
+-
+-	return enabled;
+-}
+-
+ static struct firmware_cache fw_cache;
+ 
+ static struct firmware_buf *__allocate_fw_buf(const char *fw_name,
+@@ -1239,12 +1207,6 @@ _request_firmware(const struct firmware **firmware_p, const char *name,
+ 	if (ret <= 0) /* error or already assigned */
+ 		goto out;
+ 
+-	if (!firmware_enabled()) {
+-		WARN(1, "firmware request while host is not available\n");
+-		ret = -EHOSTDOWN;
+-		goto out;
+-	}
+-
+ 	ret = fw_get_filesystem_firmware(device, fw->priv);
+ 	if (ret) {
+ 		if (!(opt_flags & FW_OPT_NO_WARN))
+@@ -1755,62 +1717,6 @@ static void device_uncache_fw_images_delay(unsigned long delay)
+ 			   msecs_to_jiffies(delay));
+ }
+ 
+-/**
+- * fw_pm_notify - notifier for suspend/resume
+- * @notify_block: unused
+- * @mode: mode we are switching to
+- * @unused: unused
+- *
+- * Used to modify the firmware_class state as we move in between states.
+- * The firmware_class implements a firmware cache to enable device driver
+- * to fetch firmware upon resume before the root filesystem is ready. We
+- * disable API calls which do not use the built-in firmware or the firmware
+- * cache when we know these calls will not work.
+- *
+- * The inner logic behind all this is a bit complex so it is worth summarizing
+- * the kernel's own suspend/resume process with context and focus on how this
+- * can impact the firmware API.
+- *
+- * First a review on how we go to suspend::
+- *
+- *	pm_suspend() --> enter_state() -->
+- *	sys_sync()
+- *	suspend_prepare() -->
+- *		__pm_notifier_call_chain(PM_SUSPEND_PREPARE, ...);
+- *		suspend_freeze_processes() -->
+- *			freeze_processes() -->
+- *				__usermodehelper_set_disable_depth(UMH_DISABLED);
+- *				freeze all tasks ...
+- *			freeze_kernel_threads()
+- *	suspend_devices_and_enter() -->
+- *		dpm_suspend_start() -->
+- *				dpm_prepare()
+- *				dpm_suspend()
+- *		suspend_enter()  -->
+- *			platform_suspend_prepare()
+- *			dpm_suspend_late()
+- *			freeze_enter()
+- *			syscore_suspend()
+- *
+- * When we resume we bail out of a loop from suspend_devices_and_enter() and
+- * unwind back out to the caller enter_state() where we were before as follows::
+- *
+- * 	enter_state() -->
+- *	suspend_devices_and_enter() --> (bail from loop)
+- *		dpm_resume_end() -->
+- *			dpm_resume()
+- *			dpm_complete()
+- *	suspend_finish() -->
+- *		suspend_thaw_processes() -->
+- *			thaw_processes() -->
+- *				__usermodehelper_set_disable_depth(UMH_FREEZING);
+- *				thaw_workqueues();
+- *				thaw all processes ...
+- *				usermodehelper_enable();
+- *		pm_notifier_call_chain(PM_POST_SUSPEND);
+- *
+- * fw_pm_notify() works through pm_notifier_call_chain().
+- */
+ static int fw_pm_notify(struct notifier_block *notify_block,
+ 			unsigned long mode, void *unused)
+ {
+@@ -1824,7 +1730,6 @@ static int fw_pm_notify(struct notifier_block *notify_block,
+ 		 */
+ 		kill_pending_fw_fallback_reqs(true);
+ 		device_cache_fw_images();
+-		disable_firmware();
+ 		break;
+ 
+ 	case PM_POST_SUSPEND:
+@@ -1837,7 +1742,6 @@ static int fw_pm_notify(struct notifier_block *notify_block,
+ 		mutex_lock(&fw_lock);
+ 		fw_cache.state = FW_LOADER_NO_CACHE;
+ 		mutex_unlock(&fw_lock);
+-		enable_firmware();
+ 
+ 		device_uncache_fw_images_delay(10 * MSEC_PER_SEC);
+ 		break;
+@@ -1886,7 +1790,6 @@ static void __init fw_cache_init(void)
+ static int fw_shutdown_notify(struct notifier_block *unused1,
+ 			      unsigned long unused2, void *unused3)
+ {
+-	disable_firmware();
+ 	/*
+ 	 * Kill all pending fallback requests to avoid both stalling shutdown,
+ 	 * and avoid a deadlock with the usermode_lock.
+@@ -1902,7 +1805,6 @@ static struct notifier_block fw_shutdown_nb = {
+ 
+ static int __init firmware_class_init(void)
+ {
+-	enable_firmware();
+ 	fw_cache_init();
+ 	register_reboot_notifier(&fw_shutdown_nb);
+ #ifdef CONFIG_FW_LOADER_USER_HELPER
+@@ -1914,7 +1816,6 @@ static int __init firmware_class_init(void)
+ 
+ static void __exit firmware_class_exit(void)
+ {
+-	disable_firmware();
+ #ifdef CONFIG_PM_SLEEP
+ 	unregister_syscore_ops(&fw_syscore_ops);
+ 	unregister_pm_notifier(&fw_cache.pm_notify);
+diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
+index a764d5ca7536..5bedf7bc3d88 100644
+--- a/drivers/mtd/nand/mxc_nand.c
++++ b/drivers/mtd/nand/mxc_nand.c
+@@ -876,6 +876,8 @@ static void mxc_do_addr_cycle(struct mtd_info *mtd, int column, int page_addr)
+ 	}
+ }
+ 
++#define MXC_V1_ECCBYTES		5
++
+ static int mxc_v1_ooblayout_ecc(struct mtd_info *mtd, int section,
+ 				struct mtd_oob_region *oobregion)
+ {
+@@ -885,7 +887,7 @@ static int mxc_v1_ooblayout_ecc(struct mtd_info *mtd, int section,
+ 		return -ERANGE;
+ 
+ 	oobregion->offset = (section * 16) + 6;
+-	oobregion->length = nand_chip->ecc.bytes;
++	oobregion->length = MXC_V1_ECCBYTES;
+ 
+ 	return 0;
+ }
+@@ -907,8 +909,7 @@ static int mxc_v1_ooblayout_free(struct mtd_info *mtd, int section,
+ 			oobregion->length = 4;
+ 		}
+ 	} else {
+-		oobregion->offset = ((section - 1) * 16) +
+-				    nand_chip->ecc.bytes + 6;
++		oobregion->offset = ((section - 1) * 16) + MXC_V1_ECCBYTES + 6;
+ 		if (section < nand_chip->ecc.steps)
+ 			oobregion->length = (section * 16) + 6 -
+ 					    oobregion->offset;
+diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
+index c6c18b82f8f4..c05cf874cbb8 100644
+--- a/drivers/mtd/nand/nand_base.c
++++ b/drivers/mtd/nand/nand_base.c
+@@ -3993,10 +3993,13 @@ static void nand_manufacturer_detect(struct nand_chip *chip)
+ 	 * nand_decode_ext_id() otherwise.
+ 	 */
+ 	if (chip->manufacturer.desc && chip->manufacturer.desc->ops &&
+-	    chip->manufacturer.desc->ops->detect)
++	    chip->manufacturer.desc->ops->detect) {
++		/* The 3rd id byte holds MLC / multichip data */
++		chip->bits_per_cell = nand_get_bits_per_cell(chip->id.data[2]);
+ 		chip->manufacturer.desc->ops->detect(chip);
+-	else
++	} else {
+ 		nand_decode_ext_id(chip);
++	}
+ }
+ 
+ /*
+diff --git a/drivers/mtd/nand/nand_hynix.c b/drivers/mtd/nand/nand_hynix.c
+index b12dc7325378..bd9a6e343848 100644
+--- a/drivers/mtd/nand/nand_hynix.c
++++ b/drivers/mtd/nand/nand_hynix.c
+@@ -477,7 +477,7 @@ static void hynix_nand_extract_ecc_requirements(struct nand_chip *chip,
+ 		 * The ECC requirements field meaning depends on the
+ 		 * NAND technology.
+ 		 */
+-		u8 nand_tech = chip->id.data[5] & 0x3;
++		u8 nand_tech = chip->id.data[5] & 0x7;
+ 
+ 		if (nand_tech < 3) {
+ 			/* > 26nm, reference: H27UBG8T2A datasheet */
+@@ -533,7 +533,7 @@ static void hynix_nand_extract_scrambling_requirements(struct nand_chip *chip,
+ 		if (nand_tech > 0)
+ 			chip->options |= NAND_NEED_SCRAMBLING;
+ 	} else {
+-		nand_tech = chip->id.data[5] & 0x3;
++		nand_tech = chip->id.data[5] & 0x7;
+ 
+ 		/* < 32nm */
+ 		if (nand_tech > 2)
+diff --git a/drivers/mtd/nand/qcom_nandc.c b/drivers/mtd/nand/qcom_nandc.c
+index 88af7145a51a..8928500b5bde 100644
+--- a/drivers/mtd/nand/qcom_nandc.c
++++ b/drivers/mtd/nand/qcom_nandc.c
+@@ -109,7 +109,11 @@
+ #define	READ_ADDR			0
+ 
+ /* NAND_DEV_CMD_VLD bits */
+-#define	READ_START_VLD			0
++#define	READ_START_VLD			BIT(0)
++#define	READ_STOP_VLD			BIT(1)
++#define	WRITE_START_VLD			BIT(2)
++#define	ERASE_START_VLD			BIT(3)
++#define	SEQ_READ_START_VLD		BIT(4)
+ 
+ /* NAND_EBI2_ECC_BUF_CFG bits */
+ #define	NUM_STEPS			0
+@@ -148,6 +152,10 @@
+ #define	FETCH_ID			0xb
+ #define	RESET_DEVICE			0xd
+ 
++/* Default Value for NAND_DEV_CMD_VLD */
++#define NAND_DEV_CMD_VLD_VAL		(READ_START_VLD | WRITE_START_VLD | \
++					 ERASE_START_VLD | SEQ_READ_START_VLD)
++
+ /*
+  * the NAND controller performs reads/writes with ECC in 516 byte chunks.
+  * the driver calls the chunks 'step' or 'codeword' interchangeably
+@@ -672,8 +680,7 @@ static int nandc_param(struct qcom_nand_host *host)
+ 
+ 	/* configure CMD1 and VLD for ONFI param probing */
+ 	nandc_set_reg(nandc, NAND_DEV_CMD_VLD,
+-		      (nandc->vld & ~(1 << READ_START_VLD))
+-		      | 0 << READ_START_VLD);
++		      (nandc->vld & ~READ_START_VLD));
+ 	nandc_set_reg(nandc, NAND_DEV_CMD1,
+ 		      (nandc->cmd1 & ~(0xFF << READ_ADDR))
+ 		      | NAND_CMD_PARAM << READ_ADDR);
+@@ -1893,7 +1900,7 @@ static int qcom_nand_host_setup(struct qcom_nand_host *host)
+ 				| wide_bus << WIDE_FLASH
+ 				| 1 << DEV0_CFG1_ECC_DISABLE;
+ 
+-	host->ecc_bch_cfg = host->bch_enabled << ECC_CFG_ECC_DISABLE
++	host->ecc_bch_cfg = !host->bch_enabled << ECC_CFG_ECC_DISABLE
+ 				| 0 << ECC_SW_RESET
+ 				| host->cw_data << ECC_NUM_DATA_BYTES
+ 				| 1 << ECC_FORCE_CLK_OPEN
+@@ -1972,13 +1979,14 @@ static int qcom_nandc_setup(struct qcom_nand_controller *nandc)
+ {
+ 	/* kill onenand */
+ 	nandc_write(nandc, SFLASHC_BURST_CFG, 0);
++	nandc_write(nandc, NAND_DEV_CMD_VLD, NAND_DEV_CMD_VLD_VAL);
+ 
+ 	/* enable ADM DMA */
+ 	nandc_write(nandc, NAND_FLASH_CHIP_SELECT, DM_EN);
+ 
+ 	/* save the original values of these registers */
+ 	nandc->cmd1 = nandc_read(nandc, NAND_DEV_CMD1);
+-	nandc->vld = nandc_read(nandc, NAND_DEV_CMD_VLD);
++	nandc->vld = NAND_DEV_CMD_VLD_VAL;
+ 
+ 	return 0;
+ }
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
+index f1b60740e020..53ae30259989 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
+@@ -159,7 +159,8 @@ void brcmf_feat_attach(struct brcmf_pub *drvr)
+ 
+ 	brcmf_feat_firmware_capabilities(ifp);
+ 	memset(&gscan_cfg, 0, sizeof(gscan_cfg));
+-	if (drvr->bus_if->chip != BRCM_CC_43430_CHIP_ID)
++	if (drvr->bus_if->chip != BRCM_CC_43430_CHIP_ID &&
++	    drvr->bus_if->chip != BRCM_CC_4345_CHIP_ID)
+ 		brcmf_feat_iovar_data_set(ifp, BRCMF_FEAT_GSCAN,
+ 					  "pfn_gscan_cfg",
+ 					  &gscan_cfg, sizeof(gscan_cfg));
+diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
+index 0b75def39c6c..d2c289446c00 100644
+--- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
++++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
+@@ -3702,7 +3702,10 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
+ 	if (rt2x00_rt(rt2x00dev, RT3572))
+ 		rt2800_rfcsr_write(rt2x00dev, 8, 0);
+ 
+-	tx_pin = rt2800_register_read(rt2x00dev, TX_PIN_CFG);
++	if (rt2x00_rt(rt2x00dev, RT6352))
++		tx_pin = rt2800_register_read(rt2x00dev, TX_PIN_CFG);
++	else
++		tx_pin = 0;
+ 
+ 	switch (rt2x00dev->default_ant.tx_chain_num) {
+ 	case 3:
+diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c
+index 31965f0ef69d..e8f07573aed9 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c
++++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c
+@@ -1183,7 +1183,10 @@ static void btc8723b2ant_set_ant_path(struct btc_coexist *btcoexist,
+ 		}
+ 
+ 		/* fixed internal switch S1->WiFi, S0->BT */
+-		btcoexist->btc_write_4byte(btcoexist, 0x948, 0x0);
++		if (board_info->btdm_ant_pos == BTC_ANTENNA_AT_MAIN_PORT)
++			btcoexist->btc_write_2byte(btcoexist, 0x948, 0x0);
++		else
++			btcoexist->btc_write_2byte(btcoexist, 0x948, 0x280);
+ 
+ 		switch (antpos_type) {
+ 		case BTC_ANT_WIFI_AT_MAIN:
+diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c
+index e6024b013ca5..00eea3440290 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c
++++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c
+@@ -173,6 +173,16 @@ static u8 halbtc_get_wifi_central_chnl(struct btc_coexist *btcoexist)
+ 
+ u8 rtl_get_hwpg_single_ant_path(struct rtl_priv *rtlpriv)
+ {
++	struct rtl_mod_params *mod_params = rtlpriv->cfg->mod_params;
++
++	/* override ant_num / ant_path */
++	if (mod_params->ant_sel) {
++		rtlpriv->btcoexist.btc_info.ant_num =
++			(mod_params->ant_sel == 1 ? ANT_X2 : ANT_X1);
++
++		rtlpriv->btcoexist.btc_info.single_ant_path =
++			(mod_params->ant_sel == 1 ? 0 : 1);
++	}
+ 	return rtlpriv->btcoexist.btc_info.single_ant_path;
+ }
+ 
+@@ -183,6 +193,7 @@ u8 rtl_get_hwpg_bt_type(struct rtl_priv *rtlpriv)
+ 
+ u8 rtl_get_hwpg_ant_num(struct rtl_priv *rtlpriv)
+ {
++	struct rtl_mod_params *mod_params = rtlpriv->cfg->mod_params;
+ 	u8 num;
+ 
+ 	if (rtlpriv->btcoexist.btc_info.ant_num == ANT_X2)
+@@ -190,6 +201,10 @@ u8 rtl_get_hwpg_ant_num(struct rtl_priv *rtlpriv)
+ 	else
+ 		num = 1;
+ 
++	/* override ant_num / ant_path */
++	if (mod_params->ant_sel)
++		num = (mod_params->ant_sel == 1 ? ANT_X2 : ANT_X1) + 1;
++
+ 	return num;
+ }
+ 
+@@ -861,7 +876,7 @@ bool exhalbtc_bind_bt_coex_withadapter(void *adapter)
+ {
+ 	struct btc_coexist *btcoexist = &gl_bt_coexist;
+ 	struct rtl_priv *rtlpriv = adapter;
+-	u8 ant_num = 2, chip_type, single_ant_path = 0;
++	u8 ant_num = 2, chip_type;
+ 
+ 	if (btcoexist->binded)
+ 		return false;
+@@ -896,12 +911,6 @@ bool exhalbtc_bind_bt_coex_withadapter(void *adapter)
+ 	ant_num = rtl_get_hwpg_ant_num(rtlpriv);
+ 	exhalbtc_set_ant_num(rtlpriv, BT_COEX_ANT_TYPE_PG, ant_num);
+ 
+-	/* set default antenna position to main  port */
+-	btcoexist->board_info.btdm_ant_pos = BTC_ANTENNA_AT_MAIN_PORT;
+-
+-	single_ant_path = rtl_get_hwpg_single_ant_path(rtlpriv);
+-	exhalbtc_set_single_ant_path(single_ant_path);
+-
+ 	if (rtl_get_hwpg_package_type(rtlpriv) == 0)
+ 		btcoexist->board_info.tfbga_package = false;
+ 	else if (rtl_get_hwpg_package_type(rtlpriv) == 1)
+diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
+index 5f5cd306f76d..ffa7191ddfa5 100644
+--- a/drivers/nvme/host/fabrics.c
++++ b/drivers/nvme/host/fabrics.c
+@@ -75,7 +75,7 @@ static struct nvmf_host *nvmf_host_default(void)
+ 
+ 	kref_init(&host->ref);
+ 	snprintf(host->nqn, NVMF_NQN_SIZE,
+-		"nqn.2014-08.org.nvmexpress:NVMf:uuid:%pUb", &host->id);
++		"nqn.2014-08.org.nvmexpress:uuid:%pUb", &host->id);
+ 
+ 	mutex_lock(&nvmf_hosts_mutex);
+ 	list_add_tail(&host->list, &nvmf_hosts);
+diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
+index 12540b6104b5..1618dac7bf74 100644
+--- a/fs/btrfs/super.c
++++ b/fs/btrfs/super.c
+@@ -1814,6 +1814,8 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
+ 			goto restore;
+ 		}
+ 
++		btrfs_qgroup_rescan_resume(fs_info);
++
+ 		if (!fs_info->uuid_root) {
+ 			btrfs_info(fs_info, "creating UUID tree");
+ 			ret = btrfs_create_uuid_tree(fs_info);
+diff --git a/fs/nfs/file.c b/fs/nfs/file.c
+index af330c31f627..a85d1cf9b4a8 100644
+--- a/fs/nfs/file.c
++++ b/fs/nfs/file.c
+@@ -631,11 +631,11 @@ ssize_t nfs_file_write(struct kiocb *iocb, struct iov_iter *from)
+ 	if (result <= 0)
+ 		goto out;
+ 
+-	result = generic_write_sync(iocb, result);
+-	if (result < 0)
+-		goto out;
+ 	written = result;
+ 	iocb->ki_pos += written;
++	result = generic_write_sync(iocb, written);
++	if (result < 0)
++		goto out;
+ 
+ 	/* Return error values */
+ 	if (nfs_need_check_write(file, inode)) {
+diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
+index dc456416d2be..68cc22083639 100644
+--- a/fs/nfs/internal.h
++++ b/fs/nfs/internal.h
+@@ -251,7 +251,6 @@ int nfs_iocounter_wait(struct nfs_lock_context *l_ctx);
+ extern const struct nfs_pageio_ops nfs_pgio_rw_ops;
+ struct nfs_pgio_header *nfs_pgio_header_alloc(const struct nfs_rw_ops *);
+ void nfs_pgio_header_free(struct nfs_pgio_header *);
+-void nfs_pgio_data_destroy(struct nfs_pgio_header *);
+ int nfs_generic_pgio(struct nfs_pageio_descriptor *, struct nfs_pgio_header *);
+ int nfs_initiate_pgio(struct rpc_clnt *clnt, struct nfs_pgio_header *hdr,
+ 		      struct rpc_cred *cred, const struct nfs_rpc_ops *rpc_ops,
+diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
+index de9066a92c0d..d291e6e72573 100644
+--- a/fs/nfs/pagelist.c
++++ b/fs/nfs/pagelist.c
+@@ -530,16 +530,6 @@ struct nfs_pgio_header *nfs_pgio_header_alloc(const struct nfs_rw_ops *ops)
+ }
+ EXPORT_SYMBOL_GPL(nfs_pgio_header_alloc);
+ 
+-/*
+- * nfs_pgio_header_free - Free a read or write header
+- * @hdr: The header to free
+- */
+-void nfs_pgio_header_free(struct nfs_pgio_header *hdr)
+-{
+-	hdr->rw_ops->rw_free_header(hdr);
+-}
+-EXPORT_SYMBOL_GPL(nfs_pgio_header_free);
+-
+ /**
+  * nfs_pgio_data_destroy - make @hdr suitable for reuse
+  *
+@@ -548,14 +538,24 @@ EXPORT_SYMBOL_GPL(nfs_pgio_header_free);
+  *
+  * @hdr: A header that has had nfs_generic_pgio called
+  */
+-void nfs_pgio_data_destroy(struct nfs_pgio_header *hdr)
++static void nfs_pgio_data_destroy(struct nfs_pgio_header *hdr)
+ {
+ 	if (hdr->args.context)
+ 		put_nfs_open_context(hdr->args.context);
+ 	if (hdr->page_array.pagevec != hdr->page_array.page_array)
+ 		kfree(hdr->page_array.pagevec);
+ }
+-EXPORT_SYMBOL_GPL(nfs_pgio_data_destroy);
++
++/*
++ * nfs_pgio_header_free - Free a read or write header
++ * @hdr: The header to free
++ */
++void nfs_pgio_header_free(struct nfs_pgio_header *hdr)
++{
++	nfs_pgio_data_destroy(hdr);
++	hdr->rw_ops->rw_free_header(hdr);
++}
++EXPORT_SYMBOL_GPL(nfs_pgio_header_free);
+ 
+ /**
+  * nfs_pgio_rpcsetup - Set up arguments for a pageio call
+@@ -669,7 +669,6 @@ EXPORT_SYMBOL_GPL(nfs_initiate_pgio);
+ static void nfs_pgio_error(struct nfs_pgio_header *hdr)
+ {
+ 	set_bit(NFS_IOHDR_REDO, &hdr->flags);
+-	nfs_pgio_data_destroy(hdr);
+ 	hdr->completion_ops->completion(hdr);
+ }
+ 
+@@ -680,7 +679,6 @@ static void nfs_pgio_error(struct nfs_pgio_header *hdr)
+ static void nfs_pgio_release(void *calldata)
+ {
+ 	struct nfs_pgio_header *hdr = calldata;
+-	nfs_pgio_data_destroy(hdr);
+ 	hdr->completion_ops->completion(hdr);
+ }
+ 
+@@ -714,9 +712,6 @@ void nfs_pageio_init(struct nfs_pageio_descriptor *desc,
+ 		     int io_flags,
+ 		     gfp_t gfp_flags)
+ {
+-	struct nfs_pgio_mirror *new;
+-	int i;
+-
+ 	desc->pg_moreio = 0;
+ 	desc->pg_inode = inode;
+ 	desc->pg_ops = pg_ops;
+@@ -732,21 +727,9 @@ void nfs_pageio_init(struct nfs_pageio_descriptor *desc,
+ 	desc->pg_mirror_count = 1;
+ 	desc->pg_mirror_idx = 0;
+ 
+-	if (pg_ops->pg_get_mirror_count) {
+-		/* until we have a request, we don't have an lseg and no
+-		 * idea how many mirrors there will be */
+-		new = kcalloc(NFS_PAGEIO_DESCRIPTOR_MIRROR_MAX,
+-			      sizeof(struct nfs_pgio_mirror), gfp_flags);
+-		desc->pg_mirrors_dynamic = new;
+-		desc->pg_mirrors = new;
+-
+-		for (i = 0; i < NFS_PAGEIO_DESCRIPTOR_MIRROR_MAX; i++)
+-			nfs_pageio_mirror_init(&desc->pg_mirrors[i], bsize);
+-	} else {
+-		desc->pg_mirrors_dynamic = NULL;
+-		desc->pg_mirrors = desc->pg_mirrors_static;
+-		nfs_pageio_mirror_init(&desc->pg_mirrors[0], bsize);
+-	}
++	desc->pg_mirrors_dynamic = NULL;
++	desc->pg_mirrors = desc->pg_mirrors_static;
++	nfs_pageio_mirror_init(&desc->pg_mirrors[0], bsize);
+ }
+ EXPORT_SYMBOL_GPL(nfs_pageio_init);
+ 
+@@ -865,32 +848,52 @@ static int nfs_generic_pg_pgios(struct nfs_pageio_descriptor *desc)
+ 	return ret;
+ }
+ 
++static struct nfs_pgio_mirror *
++nfs_pageio_alloc_mirrors(struct nfs_pageio_descriptor *desc,
++		unsigned int mirror_count)
++{
++	struct nfs_pgio_mirror *ret;
++	unsigned int i;
++
++	kfree(desc->pg_mirrors_dynamic);
++	desc->pg_mirrors_dynamic = NULL;
++	if (mirror_count == 1)
++		return desc->pg_mirrors_static;
++	ret = kmalloc_array(mirror_count, sizeof(*ret), GFP_NOFS);
++	if (ret != NULL) {
++		for (i = 0; i < mirror_count; i++)
++			nfs_pageio_mirror_init(&ret[i], desc->pg_bsize);
++		desc->pg_mirrors_dynamic = ret;
++	}
++	return ret;
++}
++
+ /*
+  * nfs_pageio_setup_mirroring - determine if mirroring is to be used
+  *				by calling the pg_get_mirror_count op
+  */
+-static int nfs_pageio_setup_mirroring(struct nfs_pageio_descriptor *pgio,
++static void nfs_pageio_setup_mirroring(struct nfs_pageio_descriptor *pgio,
+ 				       struct nfs_page *req)
+ {
+-	int mirror_count = 1;
++	unsigned int mirror_count = 1;
+ 
+-	if (!pgio->pg_ops->pg_get_mirror_count)
+-		return 0;
+-
+-	mirror_count = pgio->pg_ops->pg_get_mirror_count(pgio, req);
+-
+-	if (pgio->pg_error < 0)
+-		return pgio->pg_error;
+-
+-	if (!mirror_count || mirror_count > NFS_PAGEIO_DESCRIPTOR_MIRROR_MAX)
+-		return -EINVAL;
++	if (pgio->pg_ops->pg_get_mirror_count)
++		mirror_count = pgio->pg_ops->pg_get_mirror_count(pgio, req);
++	if (mirror_count == pgio->pg_mirror_count || pgio->pg_error < 0)
++		return;
+ 
+-	if (WARN_ON_ONCE(!pgio->pg_mirrors_dynamic))
+-		return -EINVAL;
++	if (!mirror_count || mirror_count > NFS_PAGEIO_DESCRIPTOR_MIRROR_MAX) {
++		pgio->pg_error = -EINVAL;
++		return;
++	}
+ 
++	pgio->pg_mirrors = nfs_pageio_alloc_mirrors(pgio, mirror_count);
++	if (pgio->pg_mirrors == NULL) {
++		pgio->pg_error = -ENOMEM;
++		pgio->pg_mirrors = pgio->pg_mirrors_static;
++		mirror_count = 1;
++	}
+ 	pgio->pg_mirror_count = mirror_count;
+-
+-	return 0;
+ }
+ 
+ /*
+diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
+index c383d0913b54..64bb20130edf 100644
+--- a/fs/nfs/pnfs.c
++++ b/fs/nfs/pnfs.c
+@@ -2274,7 +2274,6 @@ pnfs_write_through_mds(struct nfs_pageio_descriptor *desc,
+ 		nfs_pageio_reset_write_mds(desc);
+ 		mirror->pg_recoalesce = 1;
+ 	}
+-	nfs_pgio_data_destroy(hdr);
+ 	hdr->release(hdr);
+ }
+ 
+@@ -2398,7 +2397,6 @@ pnfs_read_through_mds(struct nfs_pageio_descriptor *desc,
+ 		nfs_pageio_reset_read_mds(desc);
+ 		mirror->pg_recoalesce = 1;
+ 	}
+-	nfs_pgio_data_destroy(hdr);
+ 	hdr->release(hdr);
+ }
+ 
+diff --git a/fs/xfs/xfs_linux.h b/fs/xfs/xfs_linux.h
+index 9301c5a6060b..dcd1292664b3 100644
+--- a/fs/xfs/xfs_linux.h
++++ b/fs/xfs/xfs_linux.h
+@@ -270,7 +270,14 @@ static inline uint64_t howmany_64(uint64_t x, uint32_t y)
+ #endif /* DEBUG */
+ 
+ #ifdef CONFIG_XFS_RT
+-#define XFS_IS_REALTIME_INODE(ip) ((ip)->i_d.di_flags & XFS_DIFLAG_REALTIME)
++
++/*
++ * make sure we ignore the inode flag if the filesystem doesn't have a
++ * configured realtime device.
++ */
++#define XFS_IS_REALTIME_INODE(ip)			\
++	(((ip)->i_d.di_flags & XFS_DIFLAG_REALTIME) &&	\
++	 (ip)->i_mount->m_rtdev_targp)
+ #else
+ #define XFS_IS_REALTIME_INODE(ip) (0)
+ #endif
+diff --git a/lib/radix-tree.c b/lib/radix-tree.c
+index 898e87998417..79a804f1aab9 100644
+--- a/lib/radix-tree.c
++++ b/lib/radix-tree.c
+@@ -463,7 +463,7 @@ radix_tree_node_free(struct radix_tree_node *node)
+  * To make use of this facility, the radix tree must be initialised without
+  * __GFP_DIRECT_RECLAIM being passed to INIT_RADIX_TREE().
+  */
+-static int __radix_tree_preload(gfp_t gfp_mask, unsigned nr)
++static __must_check int __radix_tree_preload(gfp_t gfp_mask, unsigned nr)
+ {
+ 	struct radix_tree_preload *rtp;
+ 	struct radix_tree_node *node;
+@@ -2103,7 +2103,8 @@ EXPORT_SYMBOL(radix_tree_tagged);
+  */
+ void idr_preload(gfp_t gfp_mask)
+ {
+-	__radix_tree_preload(gfp_mask, IDR_PRELOAD_SIZE);
++	if (__radix_tree_preload(gfp_mask, IDR_PRELOAD_SIZE))
++		preempt_disable();
+ }
+ EXPORT_SYMBOL(idr_preload);
+ 
+@@ -2117,13 +2118,13 @@ EXPORT_SYMBOL(idr_preload);
+  */
+ int ida_pre_get(struct ida *ida, gfp_t gfp)
+ {
+-	__radix_tree_preload(gfp, IDA_PRELOAD_SIZE);
+ 	/*
+ 	 * The IDA API has no preload_end() equivalent.  Instead,
+ 	 * ida_get_new() can return -EAGAIN, prompting the caller
+ 	 * to return to the ida_pre_get() step.
+ 	 */
+-	preempt_enable();
++	if (!__radix_tree_preload(gfp, IDA_PRELOAD_SIZE))
++		preempt_enable();
+ 
+ 	if (!this_cpu_read(ida_bitmap)) {
+ 		struct ida_bitmap *bitmap = kmalloc(sizeof(*bitmap), gfp);
+diff --git a/mm/memory.c b/mm/memory.c
+index 56e48e4593cb..274547075486 100644
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -3888,6 +3888,11 @@ int handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
+ 	/* do counter updates before entering really critical section. */
+ 	check_sync_rss_stat(current);
+ 
++	if (!arch_vma_access_permitted(vma, flags & FAULT_FLAG_WRITE,
++					    flags & FAULT_FLAG_INSTRUCTION,
++					    flags & FAULT_FLAG_REMOTE))
++		return VM_FAULT_SIGSEGV;
++
+ 	/*
+ 	 * Enable the memcg OOM handling for faults triggered in user
+ 	 * space.  Kernel faults are handled more gracefully.
+@@ -3895,11 +3900,6 @@ int handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
+ 	if (flags & FAULT_FLAG_USER)
+ 		mem_cgroup_oom_enable();
+ 
+-	if (!arch_vma_access_permitted(vma, flags & FAULT_FLAG_WRITE,
+-					    flags & FAULT_FLAG_INSTRUCTION,
+-					    flags & FAULT_FLAG_REMOTE))
+-		return VM_FAULT_SIGSEGV;
+-
+ 	if (unlikely(is_vm_hugetlb_page(vma)))
+ 		ret = hugetlb_fault(vma->vm_mm, vma, address, flags);
+ 	else
+diff --git a/mm/sparse.c b/mm/sparse.c
+index 7b4be3fd5cac..cdce7a7bb3f3 100644
+--- a/mm/sparse.c
++++ b/mm/sparse.c
+@@ -630,7 +630,7 @@ void online_mem_sections(unsigned long start_pfn, unsigned long end_pfn)
+ 	unsigned long pfn;
+ 
+ 	for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
+-		unsigned long section_nr = pfn_to_section_nr(start_pfn);
++		unsigned long section_nr = pfn_to_section_nr(pfn);
+ 		struct mem_section *ms;
+ 
+ 		/* onlining code should never touch invalid ranges */
+diff --git a/mm/swapfile.c b/mm/swapfile.c
+index 6ba4aab2db0b..a8952b6563c6 100644
+--- a/mm/swapfile.c
++++ b/mm/swapfile.c
+@@ -3052,7 +3052,8 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
+ 	p->flags = 0;
+ 	spin_unlock(&swap_lock);
+ 	vfree(swap_map);
+-	vfree(cluster_info);
++	kvfree(cluster_info);
++	kvfree(frontswap_map);
+ 	if (swap_file) {
+ 		if (inode && S_ISREG(inode->i_mode)) {
+ 			inode_unlock(inode);
+diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
+index 303c779bfe38..43ba91c440bc 100644
+--- a/net/bluetooth/l2cap_core.c
++++ b/net/bluetooth/l2cap_core.c
+@@ -58,7 +58,7 @@ static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
+ 				       u8 code, u8 ident, u16 dlen, void *data);
+ static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
+ 			   void *data);
+-static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
++static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size);
+ static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
+ 
+ static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
+@@ -1473,7 +1473,7 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
+ 
+ 			set_bit(CONF_REQ_SENT, &chan->conf_state);
+ 			l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
+-				       l2cap_build_conf_req(chan, buf), buf);
++				       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
+ 			chan->num_conf_req++;
+ 		}
+ 
+@@ -2987,12 +2987,15 @@ static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
+ 	return len;
+ }
+ 
+-static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
++static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val, size_t size)
+ {
+ 	struct l2cap_conf_opt *opt = *ptr;
+ 
+ 	BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
+ 
++	if (size < L2CAP_CONF_OPT_SIZE + len)
++		return;
++
+ 	opt->type = type;
+ 	opt->len  = len;
+ 
+@@ -3017,7 +3020,7 @@ static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
+ 	*ptr += L2CAP_CONF_OPT_SIZE + len;
+ }
+ 
+-static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
++static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan, size_t size)
+ {
+ 	struct l2cap_conf_efs efs;
+ 
+@@ -3045,7 +3048,7 @@ static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
+ 	}
+ 
+ 	l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
+-			   (unsigned long) &efs);
++			   (unsigned long) &efs, size);
+ }
+ 
+ static void l2cap_ack_timeout(struct work_struct *work)
+@@ -3191,11 +3194,12 @@ static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
+ 	chan->ack_win = chan->tx_win;
+ }
+ 
+-static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
++static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
+ {
+ 	struct l2cap_conf_req *req = data;
+ 	struct l2cap_conf_rfc rfc = { .mode = chan->mode };
+ 	void *ptr = req->data;
++	void *endptr = data + data_size;
+ 	u16 size;
+ 
+ 	BT_DBG("chan %p", chan);
+@@ -3220,7 +3224,7 @@ static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
+ 
+ done:
+ 	if (chan->imtu != L2CAP_DEFAULT_MTU)
+-		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
++		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu, endptr - ptr);
+ 
+ 	switch (chan->mode) {
+ 	case L2CAP_MODE_BASIC:
+@@ -3239,7 +3243,7 @@ static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
+ 		rfc.max_pdu_size    = 0;
+ 
+ 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
+-				   (unsigned long) &rfc);
++				   (unsigned long) &rfc, endptr - ptr);
+ 		break;
+ 
+ 	case L2CAP_MODE_ERTM:
+@@ -3259,21 +3263,21 @@ static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
+ 				       L2CAP_DEFAULT_TX_WINDOW);
+ 
+ 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
+-				   (unsigned long) &rfc);
++				   (unsigned long) &rfc, endptr - ptr);
+ 
+ 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
+-			l2cap_add_opt_efs(&ptr, chan);
++			l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
+ 
+ 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
+ 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
+-					   chan->tx_win);
++					   chan->tx_win, endptr - ptr);
+ 
+ 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
+ 			if (chan->fcs == L2CAP_FCS_NONE ||
+ 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
+ 				chan->fcs = L2CAP_FCS_NONE;
+ 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
+-						   chan->fcs);
++						   chan->fcs, endptr - ptr);
+ 			}
+ 		break;
+ 
+@@ -3291,17 +3295,17 @@ static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
+ 		rfc.max_pdu_size = cpu_to_le16(size);
+ 
+ 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
+-				   (unsigned long) &rfc);
++				   (unsigned long) &rfc, endptr - ptr);
+ 
+ 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
+-			l2cap_add_opt_efs(&ptr, chan);
++			l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
+ 
+ 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
+ 			if (chan->fcs == L2CAP_FCS_NONE ||
+ 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
+ 				chan->fcs = L2CAP_FCS_NONE;
+ 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
+-						   chan->fcs);
++						   chan->fcs, endptr - ptr);
+ 			}
+ 		break;
+ 	}
+@@ -3312,10 +3316,11 @@ static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
+ 	return ptr - data;
+ }
+ 
+-static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
++static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
+ {
+ 	struct l2cap_conf_rsp *rsp = data;
+ 	void *ptr = rsp->data;
++	void *endptr = data + data_size;
+ 	void *req = chan->conf_req;
+ 	int len = chan->conf_len;
+ 	int type, hint, olen;
+@@ -3417,7 +3422,7 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
+ 			return -ECONNREFUSED;
+ 
+ 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
+-				   (unsigned long) &rfc);
++				   (unsigned long) &rfc, endptr - ptr);
+ 	}
+ 
+ 	if (result == L2CAP_CONF_SUCCESS) {
+@@ -3430,7 +3435,7 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
+ 			chan->omtu = mtu;
+ 			set_bit(CONF_MTU_DONE, &chan->conf_state);
+ 		}
+-		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
++		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu, endptr - ptr);
+ 
+ 		if (remote_efs) {
+ 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
+@@ -3444,7 +3449,7 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
+ 
+ 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
+ 						   sizeof(efs),
+-						   (unsigned long) &efs);
++						   (unsigned long) &efs, endptr - ptr);
+ 			} else {
+ 				/* Send PENDING Conf Rsp */
+ 				result = L2CAP_CONF_PENDING;
+@@ -3477,7 +3482,7 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
+ 			set_bit(CONF_MODE_DONE, &chan->conf_state);
+ 
+ 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
+-					   sizeof(rfc), (unsigned long) &rfc);
++					   sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
+ 
+ 			if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
+ 				chan->remote_id = efs.id;
+@@ -3491,7 +3496,7 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
+ 					le32_to_cpu(efs.sdu_itime);
+ 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
+ 						   sizeof(efs),
+-						   (unsigned long) &efs);
++						   (unsigned long) &efs, endptr - ptr);
+ 			}
+ 			break;
+ 
+@@ -3505,7 +3510,7 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
+ 			set_bit(CONF_MODE_DONE, &chan->conf_state);
+ 
+ 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
+-					   (unsigned long) &rfc);
++					   (unsigned long) &rfc, endptr - ptr);
+ 
+ 			break;
+ 
+@@ -3527,10 +3532,11 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
+ }
+ 
+ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
+-				void *data, u16 *result)
++				void *data, size_t size, u16 *result)
+ {
+ 	struct l2cap_conf_req *req = data;
+ 	void *ptr = req->data;
++	void *endptr = data + size;
+ 	int type, olen;
+ 	unsigned long val;
+ 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
+@@ -3548,13 +3554,13 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
+ 				chan->imtu = L2CAP_DEFAULT_MIN_MTU;
+ 			} else
+ 				chan->imtu = val;
+-			l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
++			l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu, endptr - ptr);
+ 			break;
+ 
+ 		case L2CAP_CONF_FLUSH_TO:
+ 			chan->flush_to = val;
+ 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
+-					   2, chan->flush_to);
++					   2, chan->flush_to, endptr - ptr);
+ 			break;
+ 
+ 		case L2CAP_CONF_RFC:
+@@ -3568,13 +3574,13 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
+ 			chan->fcs = 0;
+ 
+ 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
+-					   sizeof(rfc), (unsigned long) &rfc);
++					   sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
+ 			break;
+ 
+ 		case L2CAP_CONF_EWS:
+ 			chan->ack_win = min_t(u16, val, chan->ack_win);
+ 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
+-					   chan->tx_win);
++					   chan->tx_win, endptr - ptr);
+ 			break;
+ 
+ 		case L2CAP_CONF_EFS:
+@@ -3587,7 +3593,7 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
+ 				return -ECONNREFUSED;
+ 
+ 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
+-					   (unsigned long) &efs);
++					   (unsigned long) &efs, endptr - ptr);
+ 			break;
+ 
+ 		case L2CAP_CONF_FCS:
+@@ -3692,7 +3698,7 @@ void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
+ 		return;
+ 
+ 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
+-		       l2cap_build_conf_req(chan, buf), buf);
++		       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
+ 	chan->num_conf_req++;
+ }
+ 
+@@ -3900,7 +3906,7 @@ static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
+ 		u8 buf[128];
+ 		set_bit(CONF_REQ_SENT, &chan->conf_state);
+ 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
+-			       l2cap_build_conf_req(chan, buf), buf);
++			       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
+ 		chan->num_conf_req++;
+ 	}
+ 
+@@ -3978,7 +3984,7 @@ static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
+ 			break;
+ 
+ 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
+-			       l2cap_build_conf_req(chan, req), req);
++			       l2cap_build_conf_req(chan, req, sizeof(req)), req);
+ 		chan->num_conf_req++;
+ 		break;
+ 
+@@ -4090,7 +4096,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn,
+ 	}
+ 
+ 	/* Complete config. */
+-	len = l2cap_parse_conf_req(chan, rsp);
++	len = l2cap_parse_conf_req(chan, rsp, sizeof(rsp));
+ 	if (len < 0) {
+ 		l2cap_send_disconn_req(chan, ECONNRESET);
+ 		goto unlock;
+@@ -4124,7 +4130,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn,
+ 	if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
+ 		u8 buf[64];
+ 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
+-			       l2cap_build_conf_req(chan, buf), buf);
++			       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
+ 		chan->num_conf_req++;
+ 	}
+ 
+@@ -4184,7 +4190,7 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn,
+ 			char buf[64];
+ 
+ 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
+-						   buf, &result);
++						   buf, sizeof(buf), &result);
+ 			if (len < 0) {
+ 				l2cap_send_disconn_req(chan, ECONNRESET);
+ 				goto done;
+@@ -4214,7 +4220,7 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn,
+ 			/* throw out any old stored conf requests */
+ 			result = L2CAP_CONF_SUCCESS;
+ 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
+-						   req, &result);
++						   req, sizeof(req), &result);
+ 			if (len < 0) {
+ 				l2cap_send_disconn_req(chan, ECONNRESET);
+ 				goto done;
+@@ -4791,7 +4797,7 @@ static void l2cap_do_create(struct l2cap_chan *chan, int result,
+ 			set_bit(CONF_REQ_SENT, &chan->conf_state);
+ 			l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
+ 				       L2CAP_CONF_REQ,
+-				       l2cap_build_conf_req(chan, buf), buf);
++				       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
+ 			chan->num_conf_req++;
+ 		}
+ 	}
+@@ -7465,7 +7471,7 @@ static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
+ 				set_bit(CONF_REQ_SENT, &chan->conf_state);
+ 				l2cap_send_cmd(conn, l2cap_get_ident(conn),
+ 					       L2CAP_CONF_REQ,
+-					       l2cap_build_conf_req(chan, buf),
++					       l2cap_build_conf_req(chan, buf, sizeof(buf)),
+ 					       buf);
+ 				chan->num_conf_req++;
+ 			}
+diff --git a/tools/testing/selftests/timers/Makefile b/tools/testing/selftests/timers/Makefile
+index a9b86133b9b3..dfa916e651fb 100644
+--- a/tools/testing/selftests/timers/Makefile
++++ b/tools/testing/selftests/timers/Makefile
+@@ -14,20 +14,20 @@ TEST_GEN_PROGS_EXTENDED = alarmtimer-suspend valid-adjtimex adjtick change_skew
+ 
+ include ../lib.mk
+ 
++define RUN_DESTRUCTIVE_TESTS
++	@for TEST in $(TEST_GEN_PROGS_EXTENDED); do \
++		BASENAME_TEST=`basename $$TEST`;	\
++		if [ ! -x $$BASENAME_TEST ]; then	\
++			echo "selftests: Warning: file $$BASENAME_TEST is not executable, correct this.";\
++			echo "selftests: $$BASENAME_TEST [FAIL]"; \
++		else					\
++			cd `dirname $$TEST`; (./$$BASENAME_TEST && echo "selftests: $$BASENAME_TEST [PASS]") || echo "selftests:  $$BASENAME_TEST [FAIL]"; cd -;\
++		fi;					\
++	done;
++endef
++
+ # these tests require escalated privileges
+ # and may modify the system time or trigger
+ # other behavior like suspend
+ run_destructive_tests: run_tests
+-	./alarmtimer-suspend
+-	./valid-adjtimex
+-	./adjtick
+-	./change_skew
+-	./skew_consistency
+-	./clocksource-switch
+-	./freq-step
+-	./leap-a-day -s -i 10
+-	./leapcrash
+-	./set-tz
+-	./set-tai
+-	./set-2038
+-
++	$(RUN_DESTRUCTIVE_TESTS)
+diff --git a/tools/testing/selftests/timers/leap-a-day.c b/tools/testing/selftests/timers/leap-a-day.c
+index fb46ad6ac92c..067017634057 100644
+--- a/tools/testing/selftests/timers/leap-a-day.c
++++ b/tools/testing/selftests/timers/leap-a-day.c
+@@ -190,18 +190,18 @@ int main(int argc, char **argv)
+ 	struct sigevent se;
+ 	struct sigaction act;
+ 	int signum = SIGRTMAX;
+-	int settime = 0;
++	int settime = 1;
+ 	int tai_time = 0;
+ 	int insert = 1;
+-	int iterations = -1;
++	int iterations = 10;
+ 	int opt;
+ 
+ 	/* Process arguments */
+ 	while ((opt = getopt(argc, argv, "sti:")) != -1) {
+ 		switch (opt) {
+-		case 's':
+-			printf("Setting time to speed up testing\n");
+-			settime = 1;
++		case 'w':
++			printf("Only setting leap-flag, not changing time. It could take up to a day for leap to trigger.\n");
++			settime = 0;
+ 			break;
+ 		case 'i':
+ 			iterations = atoi(optarg);
+@@ -210,9 +210,10 @@ int main(int argc, char **argv)
+ 			tai_time = 1;
+ 			break;
+ 		default:
+-			printf("Usage: %s [-s] [-i <iterations>]\n", argv[0]);
+-			printf("	-s: Set time to right before leap second each iteration\n");
+-			printf("	-i: Number of iterations\n");
++			printf("Usage: %s [-w] [-i <iterations>]\n", argv[0]);
++			printf("	-w: Set flag and wait for leap second each iteration");
++			printf("	    (default sets time to right before leapsecond)\n");
++			printf("	-i: Number of iterations (-1 = infinite, default is 10)\n");
+ 			printf("	-t: Print TAI time\n");
+ 			exit(-1);
+ 		}
+diff --git a/tools/testing/selftests/x86/fsgsbase.c b/tools/testing/selftests/x86/fsgsbase.c
+index b4967d875236..f249e042b3b5 100644
+--- a/tools/testing/selftests/x86/fsgsbase.c
++++ b/tools/testing/selftests/x86/fsgsbase.c
+@@ -285,9 +285,12 @@ static void *threadproc(void *ctx)
+ 	}
+ }
+ 
+-static void set_gs_and_switch_to(unsigned long local, unsigned long remote)
++static void set_gs_and_switch_to(unsigned long local,
++				 unsigned short force_sel,
++				 unsigned long remote)
+ {
+ 	unsigned long base;
++	unsigned short sel_pre_sched, sel_post_sched;
+ 
+ 	bool hard_zero = false;
+ 	if (local == HARD_ZERO) {
+@@ -297,6 +300,8 @@ static void set_gs_and_switch_to(unsigned long local, unsigned long remote)
+ 
+ 	printf("[RUN]\tARCH_SET_GS(0x%lx)%s, then schedule to 0x%lx\n",
+ 	       local, hard_zero ? " and clear gs" : "", remote);
++	if (force_sel)
++		printf("\tBefore schedule, set selector to 0x%hx\n", force_sel);
+ 	if (syscall(SYS_arch_prctl, ARCH_SET_GS, local) != 0)
+ 		err(1, "ARCH_SET_GS");
+ 	if (hard_zero)
+@@ -307,18 +312,35 @@ static void set_gs_and_switch_to(unsigned long local, unsigned long remote)
+ 		printf("[FAIL]\tGSBASE wasn't set as expected\n");
+ 	}
+ 
++	if (force_sel) {
++		asm volatile ("mov %0, %%gs" : : "rm" (force_sel));
++		sel_pre_sched = force_sel;
++		local = read_base(GS);
++
++		/*
++		 * Signal delivery seems to mess up weird selectors.  Put it
++		 * back.
++		 */
++		asm volatile ("mov %0, %%gs" : : "rm" (force_sel));
++	} else {
++		asm volatile ("mov %%gs, %0" : "=rm" (sel_pre_sched));
++	}
++
+ 	remote_base = remote;
+ 	ftx = 1;
+ 	syscall(SYS_futex, &ftx, FUTEX_WAKE, 0, NULL, NULL, 0);
+ 	while (ftx != 0)
+ 		syscall(SYS_futex, &ftx, FUTEX_WAIT, 1, NULL, NULL, 0);
+ 
++	asm volatile ("mov %%gs, %0" : "=rm" (sel_post_sched));
+ 	base = read_base(GS);
+-	if (base == local) {
+-		printf("[OK]\tGSBASE remained 0x%lx\n", local);
++	if (base == local && sel_pre_sched == sel_post_sched) {
++		printf("[OK]\tGS/BASE remained 0x%hx/0x%lx\n",
++		       sel_pre_sched, local);
+ 	} else {
+ 		nerrs++;
+-		printf("[FAIL]\tGSBASE changed to 0x%lx\n", base);
++		printf("[FAIL]\tGS/BASE changed from 0x%hx/0x%lx to 0x%hx/0x%lx\n",
++		       sel_pre_sched, local, sel_post_sched, base);
+ 	}
+ }
+ 
+@@ -381,8 +403,15 @@ int main()
+ 
+ 	for (int local = 0; local < 4; local++) {
+ 		for (int remote = 0; remote < 4; remote++) {
+-			set_gs_and_switch_to(bases_with_hard_zero[local],
+-					     bases_with_hard_zero[remote]);
++			for (unsigned short s = 0; s < 5; s++) {
++				unsigned short sel = s;
++				if (s == 4)
++					asm ("mov %%ss, %0" : "=rm" (sel));
++				set_gs_and_switch_to(
++					bases_with_hard_zero[local],
++					sel,
++					bases_with_hard_zero[remote]);
++			}
+ 		}
+ 	}
+ 


^ permalink raw reply related	[flat|nested] 20+ messages in thread
* [gentoo-commits] proj/linux-patches:4.13 commit in: /
@ 2017-09-13 11:52 Mike Pagano
  0 siblings, 0 replies; 20+ messages in thread
From: Mike Pagano @ 2017-09-13 11:52 UTC (permalink / raw
  To: gentoo-commits

commit:     19f6558c89a32f9f47cc7aaa54c619c4cd68a1e8
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Sep 13 11:52:33 2017 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Sep 13 11:52:33 2017 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=19f6558c

Validate the output buffer length for L2CAP config reqs and resps to avoid stack buffer overflowing. CVE-2017-1000251. See bug #630840

 0000_README                             |   4 +
 2400_BT-check-L2CAP-buffer-length.patch | 357 ++++++++++++++++++++++++++++++++
 2 files changed, 361 insertions(+)

diff --git a/0000_README b/0000_README
index 6a8a687..2c91dfe 100644
--- a/0000_README
+++ b/0000_README
@@ -59,6 +59,10 @@ Patch:  2300_enable-poweroff-on-Mac-Pro-11.patch
 From:   http://kernel.ubuntu.com/git/ubuntu/ubuntu-xenial.git/patch/drivers/pci/quirks.c?id=5080ff61a438f3dd80b88b423e1a20791d8a774c
 Desc:   Workaround to enable poweroff on Mac Pro 11. See bug #601964.
 
+Patch:  2400_BT-check-L2CAP-buffer-length.patch
+From:   https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=e860d2c904d1a9f38a24eb44c9f34b8f915a6ea3
+Desc:   Validate the output buffer length for L2CAP config reqs and resps to avoid stack buffer overflowing. CVE-2017-1000251. See bug #630840
+
 Patch:  2600_enable-key-swapping-for-apple-mac.patch
 From:   https://github.com/free5lot/hid-apple-patched
 Desc:   This hid-apple patch enables swapping of the FN and left Control keys and some additional on some apple keyboards. See bug #622902

diff --git a/2400_BT-check-L2CAP-buffer-length.patch b/2400_BT-check-L2CAP-buffer-length.patch
new file mode 100644
index 0000000..c6bfdf7
--- /dev/null
+++ b/2400_BT-check-L2CAP-buffer-length.patch
@@ -0,0 +1,357 @@
+From e860d2c904d1a9f38a24eb44c9f34b8f915a6ea3 Mon Sep 17 00:00:00 2001
+From: Ben Seri <ben@armis.com>
+Date: Sat, 9 Sep 2017 23:15:59 +0200
+Subject: Bluetooth: Properly check L2CAP config option output buffer length
+
+Validate the output buffer length for L2CAP config requests and responses
+to avoid overflowing the stack buffer used for building the option blocks.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Ben Seri <ben@armis.com>
+Signed-off-by: Marcel Holtmann <marcel@holtmann.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+---
+ net/bluetooth/l2cap_core.c | 80 +++++++++++++++++++++++++---------------------
+ 1 file changed, 43 insertions(+), 37 deletions(-)
+
+diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
+index 303c779..43ba91c 100644
+--- a/net/bluetooth/l2cap_core.c
++++ b/net/bluetooth/l2cap_core.c
+@@ -58,7 +58,7 @@ static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
+ 				       u8 code, u8 ident, u16 dlen, void *data);
+ static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
+ 			   void *data);
+-static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
++static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size);
+ static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
+ 
+ static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
+@@ -1473,7 +1473,7 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
+ 
+ 			set_bit(CONF_REQ_SENT, &chan->conf_state);
+ 			l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
+-				       l2cap_build_conf_req(chan, buf), buf);
++				       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
+ 			chan->num_conf_req++;
+ 		}
+ 
+@@ -2987,12 +2987,15 @@ static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
+ 	return len;
+ }
+ 
+-static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
++static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val, size_t size)
+ {
+ 	struct l2cap_conf_opt *opt = *ptr;
+ 
+ 	BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
+ 
++	if (size < L2CAP_CONF_OPT_SIZE + len)
++		return;
++
+ 	opt->type = type;
+ 	opt->len  = len;
+ 
+@@ -3017,7 +3020,7 @@ static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
+ 	*ptr += L2CAP_CONF_OPT_SIZE + len;
+ }
+ 
+-static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
++static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan, size_t size)
+ {
+ 	struct l2cap_conf_efs efs;
+ 
+@@ -3045,7 +3048,7 @@ static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
+ 	}
+ 
+ 	l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
+-			   (unsigned long) &efs);
++			   (unsigned long) &efs, size);
+ }
+ 
+ static void l2cap_ack_timeout(struct work_struct *work)
+@@ -3191,11 +3194,12 @@ static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
+ 	chan->ack_win = chan->tx_win;
+ }
+ 
+-static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
++static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
+ {
+ 	struct l2cap_conf_req *req = data;
+ 	struct l2cap_conf_rfc rfc = { .mode = chan->mode };
+ 	void *ptr = req->data;
++	void *endptr = data + data_size;
+ 	u16 size;
+ 
+ 	BT_DBG("chan %p", chan);
+@@ -3220,7 +3224,7 @@ static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
+ 
+ done:
+ 	if (chan->imtu != L2CAP_DEFAULT_MTU)
+-		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
++		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu, endptr - ptr);
+ 
+ 	switch (chan->mode) {
+ 	case L2CAP_MODE_BASIC:
+@@ -3239,7 +3243,7 @@ done:
+ 		rfc.max_pdu_size    = 0;
+ 
+ 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
+-				   (unsigned long) &rfc);
++				   (unsigned long) &rfc, endptr - ptr);
+ 		break;
+ 
+ 	case L2CAP_MODE_ERTM:
+@@ -3259,21 +3263,21 @@ done:
+ 				       L2CAP_DEFAULT_TX_WINDOW);
+ 
+ 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
+-				   (unsigned long) &rfc);
++				   (unsigned long) &rfc, endptr - ptr);
+ 
+ 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
+-			l2cap_add_opt_efs(&ptr, chan);
++			l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
+ 
+ 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
+ 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
+-					   chan->tx_win);
++					   chan->tx_win, endptr - ptr);
+ 
+ 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
+ 			if (chan->fcs == L2CAP_FCS_NONE ||
+ 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
+ 				chan->fcs = L2CAP_FCS_NONE;
+ 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
+-						   chan->fcs);
++						   chan->fcs, endptr - ptr);
+ 			}
+ 		break;
+ 
+@@ -3291,17 +3295,17 @@ done:
+ 		rfc.max_pdu_size = cpu_to_le16(size);
+ 
+ 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
+-				   (unsigned long) &rfc);
++				   (unsigned long) &rfc, endptr - ptr);
+ 
+ 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
+-			l2cap_add_opt_efs(&ptr, chan);
++			l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
+ 
+ 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
+ 			if (chan->fcs == L2CAP_FCS_NONE ||
+ 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
+ 				chan->fcs = L2CAP_FCS_NONE;
+ 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
+-						   chan->fcs);
++						   chan->fcs, endptr - ptr);
+ 			}
+ 		break;
+ 	}
+@@ -3312,10 +3316,11 @@ done:
+ 	return ptr - data;
+ }
+ 
+-static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
++static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
+ {
+ 	struct l2cap_conf_rsp *rsp = data;
+ 	void *ptr = rsp->data;
++	void *endptr = data + data_size;
+ 	void *req = chan->conf_req;
+ 	int len = chan->conf_len;
+ 	int type, hint, olen;
+@@ -3417,7 +3422,7 @@ done:
+ 			return -ECONNREFUSED;
+ 
+ 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
+-				   (unsigned long) &rfc);
++				   (unsigned long) &rfc, endptr - ptr);
+ 	}
+ 
+ 	if (result == L2CAP_CONF_SUCCESS) {
+@@ -3430,7 +3435,7 @@ done:
+ 			chan->omtu = mtu;
+ 			set_bit(CONF_MTU_DONE, &chan->conf_state);
+ 		}
+-		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
++		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu, endptr - ptr);
+ 
+ 		if (remote_efs) {
+ 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
+@@ -3444,7 +3449,7 @@ done:
+ 
+ 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
+ 						   sizeof(efs),
+-						   (unsigned long) &efs);
++						   (unsigned long) &efs, endptr - ptr);
+ 			} else {
+ 				/* Send PENDING Conf Rsp */
+ 				result = L2CAP_CONF_PENDING;
+@@ -3477,7 +3482,7 @@ done:
+ 			set_bit(CONF_MODE_DONE, &chan->conf_state);
+ 
+ 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
+-					   sizeof(rfc), (unsigned long) &rfc);
++					   sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
+ 
+ 			if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
+ 				chan->remote_id = efs.id;
+@@ -3491,7 +3496,7 @@ done:
+ 					le32_to_cpu(efs.sdu_itime);
+ 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
+ 						   sizeof(efs),
+-						   (unsigned long) &efs);
++						   (unsigned long) &efs, endptr - ptr);
+ 			}
+ 			break;
+ 
+@@ -3505,7 +3510,7 @@ done:
+ 			set_bit(CONF_MODE_DONE, &chan->conf_state);
+ 
+ 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
+-					   (unsigned long) &rfc);
++					   (unsigned long) &rfc, endptr - ptr);
+ 
+ 			break;
+ 
+@@ -3527,10 +3532,11 @@ done:
+ }
+ 
+ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
+-				void *data, u16 *result)
++				void *data, size_t size, u16 *result)
+ {
+ 	struct l2cap_conf_req *req = data;
+ 	void *ptr = req->data;
++	void *endptr = data + size;
+ 	int type, olen;
+ 	unsigned long val;
+ 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
+@@ -3548,13 +3554,13 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
+ 				chan->imtu = L2CAP_DEFAULT_MIN_MTU;
+ 			} else
+ 				chan->imtu = val;
+-			l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
++			l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu, endptr - ptr);
+ 			break;
+ 
+ 		case L2CAP_CONF_FLUSH_TO:
+ 			chan->flush_to = val;
+ 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
+-					   2, chan->flush_to);
++					   2, chan->flush_to, endptr - ptr);
+ 			break;
+ 
+ 		case L2CAP_CONF_RFC:
+@@ -3568,13 +3574,13 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
+ 			chan->fcs = 0;
+ 
+ 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
+-					   sizeof(rfc), (unsigned long) &rfc);
++					   sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
+ 			break;
+ 
+ 		case L2CAP_CONF_EWS:
+ 			chan->ack_win = min_t(u16, val, chan->ack_win);
+ 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
+-					   chan->tx_win);
++					   chan->tx_win, endptr - ptr);
+ 			break;
+ 
+ 		case L2CAP_CONF_EFS:
+@@ -3587,7 +3593,7 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
+ 				return -ECONNREFUSED;
+ 
+ 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
+-					   (unsigned long) &efs);
++					   (unsigned long) &efs, endptr - ptr);
+ 			break;
+ 
+ 		case L2CAP_CONF_FCS:
+@@ -3692,7 +3698,7 @@ void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
+ 		return;
+ 
+ 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
+-		       l2cap_build_conf_req(chan, buf), buf);
++		       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
+ 	chan->num_conf_req++;
+ }
+ 
+@@ -3900,7 +3906,7 @@ sendresp:
+ 		u8 buf[128];
+ 		set_bit(CONF_REQ_SENT, &chan->conf_state);
+ 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
+-			       l2cap_build_conf_req(chan, buf), buf);
++			       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
+ 		chan->num_conf_req++;
+ 	}
+ 
+@@ -3978,7 +3984,7 @@ static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
+ 			break;
+ 
+ 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
+-			       l2cap_build_conf_req(chan, req), req);
++			       l2cap_build_conf_req(chan, req, sizeof(req)), req);
+ 		chan->num_conf_req++;
+ 		break;
+ 
+@@ -4090,7 +4096,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn,
+ 	}
+ 
+ 	/* Complete config. */
+-	len = l2cap_parse_conf_req(chan, rsp);
++	len = l2cap_parse_conf_req(chan, rsp, sizeof(rsp));
+ 	if (len < 0) {
+ 		l2cap_send_disconn_req(chan, ECONNRESET);
+ 		goto unlock;
+@@ -4124,7 +4130,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn,
+ 	if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
+ 		u8 buf[64];
+ 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
+-			       l2cap_build_conf_req(chan, buf), buf);
++			       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
+ 		chan->num_conf_req++;
+ 	}
+ 
+@@ -4184,7 +4190,7 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn,
+ 			char buf[64];
+ 
+ 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
+-						   buf, &result);
++						   buf, sizeof(buf), &result);
+ 			if (len < 0) {
+ 				l2cap_send_disconn_req(chan, ECONNRESET);
+ 				goto done;
+@@ -4214,7 +4220,7 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn,
+ 			/* throw out any old stored conf requests */
+ 			result = L2CAP_CONF_SUCCESS;
+ 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
+-						   req, &result);
++						   req, sizeof(req), &result);
+ 			if (len < 0) {
+ 				l2cap_send_disconn_req(chan, ECONNRESET);
+ 				goto done;
+@@ -4791,7 +4797,7 @@ static void l2cap_do_create(struct l2cap_chan *chan, int result,
+ 			set_bit(CONF_REQ_SENT, &chan->conf_state);
+ 			l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
+ 				       L2CAP_CONF_REQ,
+-				       l2cap_build_conf_req(chan, buf), buf);
++				       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
+ 			chan->num_conf_req++;
+ 		}
+ 	}
+@@ -7465,7 +7471,7 @@ static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
+ 				set_bit(CONF_REQ_SENT, &chan->conf_state);
+ 				l2cap_send_cmd(conn, l2cap_get_ident(conn),
+ 					       L2CAP_CONF_REQ,
+-					       l2cap_build_conf_req(chan, buf),
++					       l2cap_build_conf_req(chan, buf, sizeof(buf)),
+ 					       buf);
+ 				chan->num_conf_req++;
+ 			}
+-- 
+cgit v1.1
+


^ permalink raw reply related	[flat|nested] 20+ messages in thread
* [gentoo-commits] proj/linux-patches:4.13 commit in: /
@ 2017-09-10 14:15 Mike Pagano
  0 siblings, 0 replies; 20+ messages in thread
From: Mike Pagano @ 2017-09-10 14:15 UTC (permalink / raw
  To: gentoo-commits

commit:     6dd097e14861a422f31161e9738304b9f8c46e42
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sun Sep 10 14:14:56 2017 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sun Sep 10 14:14:56 2017 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=6dd097e1

Linux patch 4.13.1

 0000_README             |    5 +
 1000_linux-4.13.1.patch | 1735 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 1740 insertions(+)

diff --git a/0000_README b/0000_README
index b6b8110..6a8a687 100644
--- a/0000_README
+++ b/0000_README
@@ -43,6 +43,10 @@ EXPERIMENTAL
 Individual Patch Descriptions:
 --------------------------------------------------------------------------
 
+Patch:  1000_linux-4.13.1.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.13.1
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.
@@ -78,3 +82,4 @@ Desc:   Add Gentoo Linux support config settings and defaults.
 Patch:  5010_enable-additional-cpu-optimizations-for-gcc.patch
 From:   https://github.com/graysky2/kernel_gcc_patch/
 Desc:   Kernel patch enables gcc >= v4.9 optimizations for additional CPUs.
+

diff --git a/1000_linux-4.13.1.patch b/1000_linux-4.13.1.patch
new file mode 100644
index 0000000..6c6a08e
--- /dev/null
+++ b/1000_linux-4.13.1.patch
@@ -0,0 +1,1735 @@
+diff --git a/Makefile b/Makefile
+index ed65d7278bb3..41a976854cad 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 13
+-SUBLEVEL = 0
++SUBLEVEL = 1
+ EXTRAVERSION =
+ NAME = Fearless Coyote
+ 
+diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
+index 57057fb1cc07..65ab11d654e1 100644
+--- a/arch/s390/include/asm/pgtable.h
++++ b/arch/s390/include/asm/pgtable.h
+@@ -505,7 +505,7 @@ static inline int mm_alloc_pgste(struct mm_struct *mm)
+  * In the case that a guest uses storage keys
+  * faults should no longer be backed by zero pages
+  */
+-#define mm_forbids_zeropage mm_use_skey
++#define mm_forbids_zeropage mm_has_pgste
+ static inline int mm_use_skey(struct mm_struct *mm)
+ {
+ #ifdef CONFIG_PGSTE
+diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
+index 4fb3d3cdb370..ff84fb214bf9 100644
+--- a/arch/s390/mm/gmap.c
++++ b/arch/s390/mm/gmap.c
+@@ -2120,6 +2120,37 @@ static inline void thp_split_mm(struct mm_struct *mm)
+ #endif
+ }
+ 
++/*
++ * Remove all empty zero pages from the mapping for lazy refaulting
++ * - This must be called after mm->context.has_pgste is set, to avoid
++ *   future creation of zero pages
++ * - This must be called after THP was enabled
++ */
++static int __zap_zero_pages(pmd_t *pmd, unsigned long start,
++			   unsigned long end, struct mm_walk *walk)
++{
++	unsigned long addr;
++
++	for (addr = start; addr != end; addr += PAGE_SIZE) {
++		pte_t *ptep;
++		spinlock_t *ptl;
++
++		ptep = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
++		if (is_zero_pfn(pte_pfn(*ptep)))
++			ptep_xchg_direct(walk->mm, addr, ptep, __pte(_PAGE_INVALID));
++		pte_unmap_unlock(ptep, ptl);
++	}
++	return 0;
++}
++
++static inline void zap_zero_pages(struct mm_struct *mm)
++{
++	struct mm_walk walk = { .pmd_entry = __zap_zero_pages };
++
++	walk.mm = mm;
++	walk_page_range(0, TASK_SIZE, &walk);
++}
++
+ /*
+  * switch on pgstes for its userspace process (for kvm)
+  */
+@@ -2137,6 +2168,7 @@ int s390_enable_sie(void)
+ 	mm->context.has_pgste = 1;
+ 	/* split thp mappings and disable thp for future mappings */
+ 	thp_split_mm(mm);
++	zap_zero_pages(mm);
+ 	up_write(&mm->mmap_sem);
+ 	return 0;
+ }
+@@ -2149,13 +2181,6 @@ EXPORT_SYMBOL_GPL(s390_enable_sie);
+ static int __s390_enable_skey(pte_t *pte, unsigned long addr,
+ 			      unsigned long next, struct mm_walk *walk)
+ {
+-	/*
+-	 * Remove all zero page mappings,
+-	 * after establishing a policy to forbid zero page mappings
+-	 * following faults for that page will get fresh anonymous pages
+-	 */
+-	if (is_zero_pfn(pte_pfn(*pte)))
+-		ptep_xchg_direct(walk->mm, addr, pte, __pte(_PAGE_INVALID));
+ 	/* Clear storage key */
+ 	ptep_zap_key(walk->mm, addr, pte);
+ 	return 0;
+diff --git a/drivers/android/Kconfig b/drivers/android/Kconfig
+index 832e885349b1..4d4cdc1a6e25 100644
+--- a/drivers/android/Kconfig
++++ b/drivers/android/Kconfig
+@@ -22,7 +22,7 @@ config ANDROID_BINDER_IPC
+ config ANDROID_BINDER_DEVICES
+ 	string "Android Binder devices"
+ 	depends on ANDROID_BINDER_IPC
+-	default "binder,hwbinder"
++	default "binder,hwbinder,vndbinder"
+ 	---help---
+ 	  Default value for the binder.devices parameter.
+ 
+diff --git a/drivers/android/binder.c b/drivers/android/binder.c
+index 831cdd7d197d..3db96b79d122 100644
+--- a/drivers/android/binder.c
++++ b/drivers/android/binder.c
+@@ -4215,7 +4215,7 @@ static int __init init_binder_device(const char *name)
+ static int __init binder_init(void)
+ {
+ 	int ret;
+-	char *device_name, *device_names;
++	char *device_name, *device_names, *device_tmp;
+ 	struct binder_device *device;
+ 	struct hlist_node *tmp;
+ 
+@@ -4263,7 +4263,8 @@ static int __init binder_init(void)
+ 	}
+ 	strcpy(device_names, binder_devices_param);
+ 
+-	while ((device_name = strsep(&device_names, ","))) {
++	device_tmp = device_names;
++	while ((device_name = strsep(&device_tmp, ","))) {
+ 		ret = init_binder_device(device_name);
+ 		if (ret)
+ 			goto err_init_binder_device_failed;
+@@ -4277,6 +4278,9 @@ static int __init binder_init(void)
+ 		hlist_del(&device->hlist);
+ 		kfree(device);
+ 	}
++
++	kfree(device_names);
++
+ err_alloc_device_names_failed:
+ 	debugfs_remove_recursive(binder_debugfs_dir_entry_root);
+ 
+diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
+index 5a5fd0b404eb..cb9b0e9090e3 100644
+--- a/drivers/ata/ahci.c
++++ b/drivers/ata/ahci.c
+@@ -1469,7 +1469,14 @@ static void ahci_remap_check(struct pci_dev *pdev, int bar,
+ 		return;
+ 
+ 	dev_warn(&pdev->dev, "Found %d remapped NVMe devices.\n", count);
+-	dev_warn(&pdev->dev, "Switch your BIOS from RAID to AHCI mode to use them.\n");
++	dev_warn(&pdev->dev,
++		 "Switch your BIOS from RAID to AHCI mode to use them.\n");
++
++	/*
++	 * Don't rely on the msi-x capability in the remap case,
++	 * share the legacy interrupt across ahci and remapped devices.
++	 */
++	hpriv->flags |= AHCI_HFLAG_NO_MSI;
+ }
+ 
+ static int ahci_get_irq_vector(struct ata_host *host, int port)
+diff --git a/drivers/ata/pata_amd.c b/drivers/ata/pata_amd.c
+index 8d4d959a821c..8706533db57b 100644
+--- a/drivers/ata/pata_amd.c
++++ b/drivers/ata/pata_amd.c
+@@ -616,6 +616,7 @@ static const struct pci_device_id amd[] = {
+ 	{ PCI_VDEVICE(NVIDIA,	PCI_DEVICE_ID_NVIDIA_NFORCE_MCP73_IDE),	8 },
+ 	{ PCI_VDEVICE(NVIDIA,	PCI_DEVICE_ID_NVIDIA_NFORCE_MCP77_IDE),	8 },
+ 	{ PCI_VDEVICE(AMD,	PCI_DEVICE_ID_AMD_CS5536_IDE),		9 },
++	{ PCI_VDEVICE(AMD,	PCI_DEVICE_ID_AMD_CS5536_DEV_IDE),	9 },
+ 
+ 	{ },
+ };
+diff --git a/drivers/ata/pata_cs5536.c b/drivers/ata/pata_cs5536.c
+index 6c15a554efbe..dc1255294628 100644
+--- a/drivers/ata/pata_cs5536.c
++++ b/drivers/ata/pata_cs5536.c
+@@ -289,6 +289,7 @@ static int cs5536_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+ 
+ static const struct pci_device_id cs5536[] = {
+ 	{ PCI_VDEVICE(AMD,	PCI_DEVICE_ID_AMD_CS5536_IDE), },
++	{ PCI_VDEVICE(AMD,	PCI_DEVICE_ID_AMD_CS5536_DEV_IDE), },
+ 	{ },
+ };
+ 
+diff --git a/drivers/base/bus.c b/drivers/base/bus.c
+index e162c9a789ba..22a64fd3309b 100644
+--- a/drivers/base/bus.c
++++ b/drivers/base/bus.c
+@@ -698,7 +698,7 @@ int bus_add_driver(struct device_driver *drv)
+ 
+ out_unregister:
+ 	kobject_put(&priv->kobj);
+-	kfree(drv->p);
++	/* drv->p is freed in driver_release()  */
+ 	drv->p = NULL;
+ out_put_bus:
+ 	bus_put(bus);
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index fa24d693af24..e331e212f5fc 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -357,6 +357,7 @@ static const struct usb_device_id blacklist_table[] = {
+ 	{ USB_DEVICE(0x13d3, 0x3410), .driver_info = BTUSB_REALTEK },
+ 	{ USB_DEVICE(0x13d3, 0x3416), .driver_info = BTUSB_REALTEK },
+ 	{ USB_DEVICE(0x13d3, 0x3459), .driver_info = BTUSB_REALTEK },
++	{ USB_DEVICE(0x13d3, 0x3494), .driver_info = BTUSB_REALTEK },
+ 
+ 	/* Additional Realtek 8821AE Bluetooth devices */
+ 	{ USB_DEVICE(0x0b05, 0x17dc), .driver_info = BTUSB_REALTEK },
+diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
+index 0488b7f81dcf..54f3b375a453 100644
+--- a/drivers/crypto/caam/caamalg.c
++++ b/drivers/crypto/caam/caamalg.c
+@@ -81,40 +81,6 @@
+ #define debug(format, arg...)
+ #endif
+ 
+-#ifdef DEBUG
+-#include <linux/highmem.h>
+-
+-static void dbg_dump_sg(const char *level, const char *prefix_str,
+-			int prefix_type, int rowsize, int groupsize,
+-			struct scatterlist *sg, size_t tlen, bool ascii)
+-{
+-	struct scatterlist *it;
+-	void *it_page;
+-	size_t len;
+-	void *buf;
+-
+-	for (it = sg; it != NULL && tlen > 0 ; it = sg_next(sg)) {
+-		/*
+-		 * make sure the scatterlist's page
+-		 * has a valid virtual memory mapping
+-		 */
+-		it_page = kmap_atomic(sg_page(it));
+-		if (unlikely(!it_page)) {
+-			printk(KERN_ERR "dbg_dump_sg: kmap failed\n");
+-			return;
+-		}
+-
+-		buf = it_page + it->offset;
+-		len = min_t(size_t, tlen, it->length);
+-		print_hex_dump(level, prefix_str, prefix_type, rowsize,
+-			       groupsize, buf, len, ascii);
+-		tlen -= len;
+-
+-		kunmap_atomic(it_page);
+-	}
+-}
+-#endif
+-
+ static struct list_head alg_list;
+ 
+ struct caam_alg_entry {
+@@ -898,10 +864,10 @@ static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
+ 	print_hex_dump(KERN_ERR, "dstiv  @"__stringify(__LINE__)": ",
+ 		       DUMP_PREFIX_ADDRESS, 16, 4, req->info,
+ 		       edesc->src_nents > 1 ? 100 : ivsize, 1);
+-	dbg_dump_sg(KERN_ERR, "dst    @"__stringify(__LINE__)": ",
+-		    DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
+-		    edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
+ #endif
++	caam_dump_sg(KERN_ERR, "dst    @" __stringify(__LINE__)": ",
++		     DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
++		     edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
+ 
+ 	ablkcipher_unmap(jrdev, edesc, req);
+ 
+@@ -937,10 +903,10 @@ static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
+ 	print_hex_dump(KERN_ERR, "dstiv  @"__stringify(__LINE__)": ",
+ 		       DUMP_PREFIX_ADDRESS, 16, 4, req->info,
+ 		       ivsize, 1);
+-	dbg_dump_sg(KERN_ERR, "dst    @"__stringify(__LINE__)": ",
+-		    DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
+-		    edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
+ #endif
++	caam_dump_sg(KERN_ERR, "dst    @" __stringify(__LINE__)": ",
++		     DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
++		     edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
+ 
+ 	ablkcipher_unmap(jrdev, edesc, req);
+ 
+@@ -1107,10 +1073,10 @@ static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
+ 		       ivsize, 1);
+ 	pr_err("asked=%d, nbytes%d\n",
+ 	       (int)edesc->src_nents > 1 ? 100 : req->nbytes, req->nbytes);
+-	dbg_dump_sg(KERN_ERR, "src    @"__stringify(__LINE__)": ",
+-		    DUMP_PREFIX_ADDRESS, 16, 4, req->src,
+-		    edesc->src_nents > 1 ? 100 : req->nbytes, 1);
+ #endif
++	caam_dump_sg(KERN_ERR, "src    @" __stringify(__LINE__)": ",
++		     DUMP_PREFIX_ADDRESS, 16, 4, req->src,
++		     edesc->src_nents > 1 ? 100 : req->nbytes, 1);
+ 
+ 	len = desc_len(sh_desc);
+ 	init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
+@@ -1164,10 +1130,10 @@ static void init_ablkcipher_giv_job(u32 *sh_desc, dma_addr_t ptr,
+ 	print_hex_dump(KERN_ERR, "presciv@" __stringify(__LINE__) ": ",
+ 		       DUMP_PREFIX_ADDRESS, 16, 4, req->info,
+ 		       ivsize, 1);
+-	dbg_dump_sg(KERN_ERR, "src    @" __stringify(__LINE__) ": ",
+-		    DUMP_PREFIX_ADDRESS, 16, 4, req->src,
+-		    edesc->src_nents > 1 ? 100 : req->nbytes, 1);
+ #endif
++	caam_dump_sg(KERN_ERR, "src    @" __stringify(__LINE__) ": ",
++		     DUMP_PREFIX_ADDRESS, 16, 4, req->src,
++		     edesc->src_nents > 1 ? 100 : req->nbytes, 1);
+ 
+ 	len = desc_len(sh_desc);
+ 	init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
+@@ -1449,11 +1415,9 @@ static int aead_decrypt(struct aead_request *req)
+ 	u32 *desc;
+ 	int ret = 0;
+ 
+-#ifdef DEBUG
+-	dbg_dump_sg(KERN_ERR, "dec src@"__stringify(__LINE__)": ",
+-		    DUMP_PREFIX_ADDRESS, 16, 4, req->src,
+-		    req->assoclen + req->cryptlen, 1);
+-#endif
++	caam_dump_sg(KERN_ERR, "dec src@" __stringify(__LINE__)": ",
++		     DUMP_PREFIX_ADDRESS, 16, 4, req->src,
++		     req->assoclen + req->cryptlen, 1);
+ 
+ 	/* allocate extended descriptor */
+ 	edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN,
+diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c
+index 78c4c0485c58..3425f2d9a2a1 100644
+--- a/drivers/crypto/caam/caamalg_qi.c
++++ b/drivers/crypto/caam/caamalg_qi.c
+@@ -791,9 +791,9 @@ static void ablkcipher_done(struct caam_drv_req *drv_req, u32 status)
+ 	print_hex_dump(KERN_ERR, "dstiv  @" __stringify(__LINE__)": ",
+ 		       DUMP_PREFIX_ADDRESS, 16, 4, req->info,
+ 		       edesc->src_nents > 1 ? 100 : ivsize, 1);
+-	dbg_dump_sg(KERN_ERR, "dst    @" __stringify(__LINE__)": ",
+-		    DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
+-		    edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
++	caam_dump_sg(KERN_ERR, "dst    @" __stringify(__LINE__)": ",
++		     DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
++		     edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
+ #endif
+ 
+ 	ablkcipher_unmap(qidev, edesc, req);
+diff --git a/drivers/crypto/caam/error.c b/drivers/crypto/caam/error.c
+index 6f44ccb55c63..3d639f3b45aa 100644
+--- a/drivers/crypto/caam/error.c
++++ b/drivers/crypto/caam/error.c
+@@ -9,6 +9,46 @@
+ #include "desc.h"
+ #include "error.h"
+ 
++#ifdef DEBUG
++#include <linux/highmem.h>
++
++void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type,
++		  int rowsize, int groupsize, struct scatterlist *sg,
++		  size_t tlen, bool ascii)
++{
++	struct scatterlist *it;
++	void *it_page;
++	size_t len;
++	void *buf;
++
++	for (it = sg; it && tlen > 0 ; it = sg_next(sg)) {
++		/*
++		 * make sure the scatterlist's page
++		 * has a valid virtual memory mapping
++		 */
++		it_page = kmap_atomic(sg_page(it));
++		if (unlikely(!it_page)) {
++			pr_err("caam_dump_sg: kmap failed\n");
++			return;
++		}
++
++		buf = it_page + it->offset;
++		len = min_t(size_t, tlen, it->length);
++		print_hex_dump(level, prefix_str, prefix_type, rowsize,
++			       groupsize, buf, len, ascii);
++		tlen -= len;
++
++		kunmap_atomic(it_page);
++	}
++}
++#else
++void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type,
++		  int rowsize, int groupsize, struct scatterlist *sg,
++		  size_t tlen, bool ascii)
++{}
++#endif /* DEBUG */
++EXPORT_SYMBOL(caam_dump_sg);
++
+ static const struct {
+ 	u8 value;
+ 	const char *error_text;
+diff --git a/drivers/crypto/caam/error.h b/drivers/crypto/caam/error.h
+index b6350b0d9153..250e1a21c473 100644
+--- a/drivers/crypto/caam/error.h
++++ b/drivers/crypto/caam/error.h
+@@ -8,4 +8,8 @@
+ #define CAAM_ERROR_H
+ #define CAAM_ERROR_STR_MAX 302
+ void caam_jr_strstatus(struct device *jrdev, u32 status);
++
++void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type,
++		  int rowsize, int groupsize, struct scatterlist *sg,
++		  size_t tlen, bool ascii);
+ #endif /* CAAM_ERROR_H */
+diff --git a/drivers/crypto/caam/qi.c b/drivers/crypto/caam/qi.c
+index 1990ed460c46..53aed5816416 100644
+--- a/drivers/crypto/caam/qi.c
++++ b/drivers/crypto/caam/qi.c
+@@ -55,6 +55,7 @@ struct caam_qi_pcpu_priv {
+ } ____cacheline_aligned;
+ 
+ static DEFINE_PER_CPU(struct caam_qi_pcpu_priv, pcpu_qipriv);
++static DEFINE_PER_CPU(int, last_cpu);
+ 
+ /*
+  * caam_qi_priv - CAAM QI backend private params
+@@ -392,7 +393,6 @@ struct caam_drv_ctx *caam_drv_ctx_init(struct device *qidev,
+ 	dma_addr_t hwdesc;
+ 	struct caam_drv_ctx *drv_ctx;
+ 	const cpumask_t *cpus = qman_affine_cpus();
+-	static DEFINE_PER_CPU(int, last_cpu);
+ 
+ 	num_words = desc_len(sh_desc);
+ 	if (num_words > MAX_SDLEN) {
+diff --git a/drivers/fpga/altera-hps2fpga.c b/drivers/fpga/altera-hps2fpga.c
+index 3066b805f2d0..08c0ecb7d109 100644
+--- a/drivers/fpga/altera-hps2fpga.c
++++ b/drivers/fpga/altera-hps2fpga.c
+@@ -66,7 +66,7 @@ static int alt_hps2fpga_enable_show(struct fpga_bridge *bridge)
+ 
+ /* The L3 REMAP register is write only, so keep a cached value. */
+ static unsigned int l3_remap_shadow;
+-static spinlock_t l3_remap_lock;
++static DEFINE_SPINLOCK(l3_remap_lock);
+ 
+ static int _alt_hps2fpga_enable_set(struct altera_hps2fpga_data *priv,
+ 				    bool enable)
+@@ -171,8 +171,6 @@ static int alt_fpga_bridge_probe(struct platform_device *pdev)
+ 		return -EBUSY;
+ 	}
+ 
+-	spin_lock_init(&l3_remap_lock);
+-
+ 	if (!of_property_read_u32(dev->of_node, "bridge-enable", &enable)) {
+ 		if (enable > 1) {
+ 			dev_warn(dev, "invalid bridge-enable %u > 1\n", enable);
+diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
+index ae5f06895562..da2745b3d0a7 100644
+--- a/drivers/gpu/drm/drm_dp_mst_topology.c
++++ b/drivers/gpu/drm/drm_dp_mst_topology.c
+@@ -2540,8 +2540,8 @@ int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state,
+ 	int req_slots;
+ 
+ 	topology_state = drm_atomic_get_mst_topology_state(state, mgr);
+-	if (topology_state == NULL)
+-		return -ENOMEM;
++	if (IS_ERR(topology_state))
++		return PTR_ERR(topology_state);
+ 
+ 	port = drm_dp_get_validated_port_ref(mgr, port);
+ 	if (port == NULL)
+@@ -2580,8 +2580,8 @@ int drm_dp_atomic_release_vcpi_slots(struct drm_atomic_state *state,
+ 	struct drm_dp_mst_topology_state *topology_state;
+ 
+ 	topology_state = drm_atomic_get_mst_topology_state(state, mgr);
+-	if (topology_state == NULL)
+-		return -ENOMEM;
++	if (IS_ERR(topology_state))
++		return PTR_ERR(topology_state);
+ 
+ 	/* We cannot rely on port->vcpi.num_slots to update
+ 	 * topology_state->avail_slots as the port may not exist if the parent
+diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
+index 2bc0dc985214..3d35ea3e95db 100644
+--- a/drivers/gpu/drm/nouveau/nv50_display.c
++++ b/drivers/gpu/drm/nouveau/nv50_display.c
+@@ -4134,7 +4134,7 @@ nv50_disp_atomic_commit(struct drm_device *dev,
+ 	if (!nonblock) {
+ 		ret = drm_atomic_helper_wait_for_fences(dev, state, true);
+ 		if (ret)
+-			goto done;
++			goto err_cleanup;
+ 	}
+ 
+ 	for_each_plane_in_state(state, plane, plane_state, i) {
+@@ -4162,7 +4162,7 @@ nv50_disp_atomic_commit(struct drm_device *dev,
+ 		if (crtc->state->enable) {
+ 			if (!drm->have_disp_power_ref) {
+ 				drm->have_disp_power_ref = true;
+-				return ret;
++				return 0;
+ 			}
+ 			active = true;
+ 			break;
+@@ -4174,6 +4174,9 @@ nv50_disp_atomic_commit(struct drm_device *dev,
+ 		drm->have_disp_power_ref = false;
+ 	}
+ 
++err_cleanup:
++	if (ret)
++		drm_atomic_helper_cleanup_planes(dev, state);
+ done:
+ 	pm_runtime_put_autosuspend(dev->dev);
+ 	return ret;
+diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
+index eb9b278198b2..a4cb82495cee 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
+@@ -192,6 +192,10 @@ nvkm_pci_new_(const struct nvkm_pci_func *func, struct nvkm_device *device,
+ 		}
+ 	}
+ 
++#ifdef __BIG_ENDIAN
++	pci->msi = false;
++#endif
++
+ 	pci->msi = nvkm_boolopt(device->cfgopt, "NvMSI", pci->msi);
+ 	if (pci->msi && func->msi_rearm) {
+ 		pci->msi = pci_enable_msi(pci->pdev) == 0;
+diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
+index eeddc1e48409..871599826773 100644
+--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
++++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
+@@ -615,7 +615,7 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
+ 		} else {
+ 			pr_err("Failed to fill pool (%p)\n", pool);
+ 			/* If we have any pages left put them to the pool. */
+-			list_for_each_entry(p, &pool->list, lru) {
++			list_for_each_entry(p, &new_pages, lru) {
+ 				++cpages;
+ 			}
+ 			list_splice(&new_pages, &pool->list);
+diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
+index 18f401b442c2..c938af8c40cf 100644
+--- a/drivers/gpu/drm/vgem/vgem_drv.c
++++ b/drivers/gpu/drm/vgem/vgem_drv.c
+@@ -52,6 +52,7 @@ static void vgem_gem_free_object(struct drm_gem_object *obj)
+ 	struct drm_vgem_gem_object *vgem_obj = to_vgem_bo(obj);
+ 
+ 	kvfree(vgem_obj->pages);
++	mutex_destroy(&vgem_obj->pages_lock);
+ 
+ 	if (obj->import_attach)
+ 		drm_prime_gem_destroy(obj, vgem_obj->table);
+@@ -76,11 +77,15 @@ static int vgem_gem_fault(struct vm_fault *vmf)
+ 	if (page_offset > num_pages)
+ 		return VM_FAULT_SIGBUS;
+ 
++	ret = -ENOENT;
++	mutex_lock(&obj->pages_lock);
+ 	if (obj->pages) {
+ 		get_page(obj->pages[page_offset]);
+ 		vmf->page = obj->pages[page_offset];
+ 		ret = 0;
+-	} else {
++	}
++	mutex_unlock(&obj->pages_lock);
++	if (ret) {
+ 		struct page *page;
+ 
+ 		page = shmem_read_mapping_page(
+@@ -161,6 +166,8 @@ static struct drm_vgem_gem_object *__vgem_gem_create(struct drm_device *dev,
+ 		return ERR_PTR(ret);
+ 	}
+ 
++	mutex_init(&obj->pages_lock);
++
+ 	return obj;
+ }
+ 
+@@ -274,37 +281,66 @@ static const struct file_operations vgem_driver_fops = {
+ 	.release	= drm_release,
+ };
+ 
++static struct page **vgem_pin_pages(struct drm_vgem_gem_object *bo)
++{
++	mutex_lock(&bo->pages_lock);
++	if (bo->pages_pin_count++ == 0) {
++		struct page **pages;
++
++		pages = drm_gem_get_pages(&bo->base);
++		if (IS_ERR(pages)) {
++			bo->pages_pin_count--;
++			mutex_unlock(&bo->pages_lock);
++			return pages;
++		}
++
++		bo->pages = pages;
++	}
++	mutex_unlock(&bo->pages_lock);
++
++	return bo->pages;
++}
++
++static void vgem_unpin_pages(struct drm_vgem_gem_object *bo)
++{
++	mutex_lock(&bo->pages_lock);
++	if (--bo->pages_pin_count == 0) {
++		drm_gem_put_pages(&bo->base, bo->pages, true, true);
++		bo->pages = NULL;
++	}
++	mutex_unlock(&bo->pages_lock);
++}
++
+ static int vgem_prime_pin(struct drm_gem_object *obj)
+ {
++	struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
+ 	long n_pages = obj->size >> PAGE_SHIFT;
+ 	struct page **pages;
+ 
+-	/* Flush the object from the CPU cache so that importers can rely
+-	 * on coherent indirect access via the exported dma-address.
+-	 */
+-	pages = drm_gem_get_pages(obj);
++	pages = vgem_pin_pages(bo);
+ 	if (IS_ERR(pages))
+ 		return PTR_ERR(pages);
+ 
++	/* Flush the object from the CPU cache so that importers can rely
++	 * on coherent indirect access via the exported dma-address.
++	 */
+ 	drm_clflush_pages(pages, n_pages);
+-	drm_gem_put_pages(obj, pages, true, false);
+ 
+ 	return 0;
+ }
+ 
+-static struct sg_table *vgem_prime_get_sg_table(struct drm_gem_object *obj)
++static void vgem_prime_unpin(struct drm_gem_object *obj)
+ {
+-	struct sg_table *st;
+-	struct page **pages;
++	struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
+ 
+-	pages = drm_gem_get_pages(obj);
+-	if (IS_ERR(pages))
+-		return ERR_CAST(pages);
++	vgem_unpin_pages(bo);
++}
+ 
+-	st = drm_prime_pages_to_sg(pages, obj->size >> PAGE_SHIFT);
+-	drm_gem_put_pages(obj, pages, false, false);
++static struct sg_table *vgem_prime_get_sg_table(struct drm_gem_object *obj)
++{
++	struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
+ 
+-	return st;
++	return drm_prime_pages_to_sg(bo->pages, bo->base.size >> PAGE_SHIFT);
+ }
+ 
+ static struct drm_gem_object* vgem_prime_import(struct drm_device *dev,
+@@ -333,6 +369,8 @@ static struct drm_gem_object *vgem_prime_import_sg_table(struct drm_device *dev,
+ 		__vgem_gem_destroy(obj);
+ 		return ERR_PTR(-ENOMEM);
+ 	}
++
++	obj->pages_pin_count++; /* perma-pinned */
+ 	drm_prime_sg_to_page_addr_arrays(obj->table, obj->pages, NULL,
+ 					npages);
+ 	return &obj->base;
+@@ -340,23 +378,23 @@ static struct drm_gem_object *vgem_prime_import_sg_table(struct drm_device *dev,
+ 
+ static void *vgem_prime_vmap(struct drm_gem_object *obj)
+ {
++	struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
+ 	long n_pages = obj->size >> PAGE_SHIFT;
+ 	struct page **pages;
+-	void *addr;
+ 
+-	pages = drm_gem_get_pages(obj);
++	pages = vgem_pin_pages(bo);
+ 	if (IS_ERR(pages))
+ 		return NULL;
+ 
+-	addr = vmap(pages, n_pages, 0, pgprot_writecombine(PAGE_KERNEL));
+-	drm_gem_put_pages(obj, pages, false, false);
+-
+-	return addr;
++	return vmap(pages, n_pages, 0, pgprot_writecombine(PAGE_KERNEL));
+ }
+ 
+ static void vgem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
+ {
++	struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
++
+ 	vunmap(vaddr);
++	vgem_unpin_pages(bo);
+ }
+ 
+ static int vgem_prime_mmap(struct drm_gem_object *obj,
+@@ -409,6 +447,7 @@ static struct drm_driver vgem_driver = {
+ 	.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+ 	.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ 	.gem_prime_pin = vgem_prime_pin,
++	.gem_prime_unpin = vgem_prime_unpin,
+ 	.gem_prime_import = vgem_prime_import,
+ 	.gem_prime_export = drm_gem_prime_export,
+ 	.gem_prime_import_sg_table = vgem_prime_import_sg_table,
+diff --git a/drivers/gpu/drm/vgem/vgem_drv.h b/drivers/gpu/drm/vgem/vgem_drv.h
+index 1aae01419112..5c8f6d619ff3 100644
+--- a/drivers/gpu/drm/vgem/vgem_drv.h
++++ b/drivers/gpu/drm/vgem/vgem_drv.h
+@@ -43,7 +43,11 @@ struct vgem_file {
+ #define to_vgem_bo(x) container_of(x, struct drm_vgem_gem_object, base)
+ struct drm_vgem_gem_object {
+ 	struct drm_gem_object base;
++
+ 	struct page **pages;
++	unsigned int pages_pin_count;
++	struct mutex pages_lock;
++
+ 	struct sg_table *table;
+ };
+ 
+diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
+index 9f940293ede4..bb17d7bbefd3 100644
+--- a/drivers/hid/wacom_wac.c
++++ b/drivers/hid/wacom_wac.c
+@@ -1846,7 +1846,13 @@ static void wacom_wac_pad_usage_mapping(struct hid_device *hdev,
+ 		features->device_type |= WACOM_DEVICETYPE_PAD;
+ 		break;
+ 	case WACOM_HID_WD_TOUCHRINGSTATUS:
+-		wacom_map_usage(input, usage, field, EV_ABS, ABS_WHEEL, 0);
++		/*
++		 * Only set up type/code association. Completely mapping
++		 * this usage may overwrite the axis resolution and range.
++		 */
++		usage->type = EV_ABS;
++		usage->code = ABS_WHEEL;
++		set_bit(EV_ABS, input->evbit);
+ 		features->device_type |= WACOM_DEVICETYPE_PAD;
+ 		break;
+ 	case WACOM_HID_WD_BUTTONCONFIG:
+diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c
+index 590cf90dd21a..da40df2ff27d 100644
+--- a/drivers/hwtracing/intel_th/pci.c
++++ b/drivers/hwtracing/intel_th/pci.c
+@@ -95,6 +95,16 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
+ 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x318e),
+ 		.driver_data = (kernel_ulong_t)0,
+ 	},
++	{
++		/* Cannon Lake H */
++		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa326),
++		.driver_data = (kernel_ulong_t)0,
++	},
++	{
++		/* Cannon Lake LP */
++		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x9da6),
++		.driver_data = (kernel_ulong_t)0,
++	},
+ 	{ 0 },
+ };
+ 
+diff --git a/drivers/iio/adc/ti-ads1015.c b/drivers/iio/adc/ti-ads1015.c
+index 884b8e461b17..9c501e5042fb 100644
+--- a/drivers/iio/adc/ti-ads1015.c
++++ b/drivers/iio/adc/ti-ads1015.c
+@@ -81,18 +81,12 @@ static const unsigned int ads1115_data_rate[] = {
+ 	8, 16, 32, 64, 128, 250, 475, 860
+ };
+ 
+-static const struct {
+-	int scale;
+-	int uscale;
+-} ads1015_scale[] = {
+-	{3, 0},
+-	{2, 0},
+-	{1, 0},
+-	{0, 500000},
+-	{0, 250000},
+-	{0, 125000},
+-	{0, 125000},
+-	{0, 125000},
++/*
++ * Translation from PGA bits to full-scale positive and negative input voltage
++ * range in mV
++ */
++static int ads1015_fullscale_range[] = {
++	6144, 4096, 2048, 1024, 512, 256, 256, 256
+ };
+ 
+ #define ADS1015_V_CHAN(_chan, _addr) {				\
+@@ -183,6 +177,12 @@ struct ads1015_data {
+ 	struct ads1015_channel_data channel_data[ADS1015_CHANNELS];
+ 
+ 	unsigned int *data_rate;
++	/*
++	 * Set to true when the ADC is switched to the continuous-conversion
++	 * mode and exits from a power-down state.  This flag is used to avoid
++	 * getting the stale result from the conversion register.
++	 */
++	bool conv_invalid;
+ };
+ 
+ static bool ads1015_is_writeable_reg(struct device *dev, unsigned int reg)
+@@ -235,33 +235,43 @@ static int ads1015_set_power_state(struct ads1015_data *data, bool on)
+ 		ret = pm_runtime_put_autosuspend(dev);
+ 	}
+ 
+-	return ret;
++	return ret < 0 ? ret : 0;
+ }
+ 
+ static
+ int ads1015_get_adc_result(struct ads1015_data *data, int chan, int *val)
+ {
+ 	int ret, pga, dr, conv_time;
+-	bool change;
++	unsigned int old, mask, cfg;
+ 
+ 	if (chan < 0 || chan >= ADS1015_CHANNELS)
+ 		return -EINVAL;
+ 
++	ret = regmap_read(data->regmap, ADS1015_CFG_REG, &old);
++	if (ret)
++		return ret;
++
+ 	pga = data->channel_data[chan].pga;
+ 	dr = data->channel_data[chan].data_rate;
++	mask = ADS1015_CFG_MUX_MASK | ADS1015_CFG_PGA_MASK |
++		ADS1015_CFG_DR_MASK;
++	cfg = chan << ADS1015_CFG_MUX_SHIFT | pga << ADS1015_CFG_PGA_SHIFT |
++		dr << ADS1015_CFG_DR_SHIFT;
+ 
+-	ret = regmap_update_bits_check(data->regmap, ADS1015_CFG_REG,
+-				       ADS1015_CFG_MUX_MASK |
+-				       ADS1015_CFG_PGA_MASK,
+-				       chan << ADS1015_CFG_MUX_SHIFT |
+-				       pga << ADS1015_CFG_PGA_SHIFT,
+-				       &change);
+-	if (ret < 0)
++	cfg = (old & ~mask) | (cfg & mask);
++
++	ret = regmap_write(data->regmap, ADS1015_CFG_REG, cfg);
++	if (ret)
+ 		return ret;
+ 
+-	if (change) {
+-		conv_time = DIV_ROUND_UP(USEC_PER_SEC, data->data_rate[dr]);
++	if (old != cfg || data->conv_invalid) {
++		int dr_old = (old & ADS1015_CFG_DR_MASK) >>
++				ADS1015_CFG_DR_SHIFT;
++
++		conv_time = DIV_ROUND_UP(USEC_PER_SEC, data->data_rate[dr_old]);
++		conv_time += DIV_ROUND_UP(USEC_PER_SEC, data->data_rate[dr]);
+ 		usleep_range(conv_time, conv_time + 1);
++		data->conv_invalid = false;
+ 	}
+ 
+ 	return regmap_read(data->regmap, ADS1015_CONV_REG, val);
+@@ -298,17 +308,20 @@ static irqreturn_t ads1015_trigger_handler(int irq, void *p)
+ 	return IRQ_HANDLED;
+ }
+ 
+-static int ads1015_set_scale(struct ads1015_data *data, int chan,
++static int ads1015_set_scale(struct ads1015_data *data,
++			     struct iio_chan_spec const *chan,
+ 			     int scale, int uscale)
+ {
+ 	int i, ret, rindex = -1;
++	int fullscale = div_s64((scale * 1000000LL + uscale) <<
++				(chan->scan_type.realbits - 1), 1000000);
+ 
+-	for (i = 0; i < ARRAY_SIZE(ads1015_scale); i++)
+-		if (ads1015_scale[i].scale == scale &&
+-		    ads1015_scale[i].uscale == uscale) {
++	for (i = 0; i < ARRAY_SIZE(ads1015_fullscale_range); i++) {
++		if (ads1015_fullscale_range[i] == fullscale) {
+ 			rindex = i;
+ 			break;
+ 		}
++	}
+ 	if (rindex < 0)
+ 		return -EINVAL;
+ 
+@@ -318,32 +331,23 @@ static int ads1015_set_scale(struct ads1015_data *data, int chan,
+ 	if (ret < 0)
+ 		return ret;
+ 
+-	data->channel_data[chan].pga = rindex;
++	data->channel_data[chan->address].pga = rindex;
+ 
+ 	return 0;
+ }
+ 
+ static int ads1015_set_data_rate(struct ads1015_data *data, int chan, int rate)
+ {
+-	int i, ret, rindex = -1;
++	int i;
+ 
+-	for (i = 0; i < ARRAY_SIZE(ads1015_data_rate); i++)
++	for (i = 0; i < ARRAY_SIZE(ads1015_data_rate); i++) {
+ 		if (data->data_rate[i] == rate) {
+-			rindex = i;
+-			break;
++			data->channel_data[chan].data_rate = i;
++			return 0;
+ 		}
+-	if (rindex < 0)
+-		return -EINVAL;
+-
+-	ret = regmap_update_bits(data->regmap, ADS1015_CFG_REG,
+-				 ADS1015_CFG_DR_MASK,
+-				 rindex << ADS1015_CFG_DR_SHIFT);
+-	if (ret < 0)
+-		return ret;
+-
+-	data->channel_data[chan].data_rate = rindex;
++	}
+ 
+-	return 0;
++	return -EINVAL;
+ }
+ 
+ static int ads1015_read_raw(struct iio_dev *indio_dev,
+@@ -385,9 +389,9 @@ static int ads1015_read_raw(struct iio_dev *indio_dev,
+ 	}
+ 	case IIO_CHAN_INFO_SCALE:
+ 		idx = data->channel_data[chan->address].pga;
+-		*val = ads1015_scale[idx].scale;
+-		*val2 = ads1015_scale[idx].uscale;
+-		ret = IIO_VAL_INT_PLUS_MICRO;
++		*val = ads1015_fullscale_range[idx];
++		*val2 = chan->scan_type.realbits - 1;
++		ret = IIO_VAL_FRACTIONAL_LOG2;
+ 		break;
+ 	case IIO_CHAN_INFO_SAMP_FREQ:
+ 		idx = data->channel_data[chan->address].data_rate;
+@@ -414,7 +418,7 @@ static int ads1015_write_raw(struct iio_dev *indio_dev,
+ 	mutex_lock(&data->lock);
+ 	switch (mask) {
+ 	case IIO_CHAN_INFO_SCALE:
+-		ret = ads1015_set_scale(data, chan->address, val, val2);
++		ret = ads1015_set_scale(data, chan, val, val2);
+ 		break;
+ 	case IIO_CHAN_INFO_SAMP_FREQ:
+ 		ret = ads1015_set_data_rate(data, chan->address, val);
+@@ -446,7 +450,10 @@ static const struct iio_buffer_setup_ops ads1015_buffer_setup_ops = {
+ 	.validate_scan_mask = &iio_validate_scan_mask_onehot,
+ };
+ 
+-static IIO_CONST_ATTR(scale_available, "3 2 1 0.5 0.25 0.125");
++static IIO_CONST_ATTR_NAMED(ads1015_scale_available, scale_available,
++	"3 2 1 0.5 0.25 0.125");
++static IIO_CONST_ATTR_NAMED(ads1115_scale_available, scale_available,
++	"0.1875 0.125 0.0625 0.03125 0.015625 0.007813");
+ 
+ static IIO_CONST_ATTR_NAMED(ads1015_sampling_frequency_available,
+ 	sampling_frequency_available, "128 250 490 920 1600 2400 3300");
+@@ -454,7 +461,7 @@ static IIO_CONST_ATTR_NAMED(ads1115_sampling_frequency_available,
+ 	sampling_frequency_available, "8 16 32 64 128 250 475 860");
+ 
+ static struct attribute *ads1015_attributes[] = {
+-	&iio_const_attr_scale_available.dev_attr.attr,
++	&iio_const_attr_ads1015_scale_available.dev_attr.attr,
+ 	&iio_const_attr_ads1015_sampling_frequency_available.dev_attr.attr,
+ 	NULL,
+ };
+@@ -464,7 +471,7 @@ static const struct attribute_group ads1015_attribute_group = {
+ };
+ 
+ static struct attribute *ads1115_attributes[] = {
+-	&iio_const_attr_scale_available.dev_attr.attr,
++	&iio_const_attr_ads1115_scale_available.dev_attr.attr,
+ 	&iio_const_attr_ads1115_sampling_frequency_available.dev_attr.attr,
+ 	NULL,
+ };
+@@ -630,6 +637,15 @@ static int ads1015_probe(struct i2c_client *client,
+ 		dev_err(&client->dev, "iio triggered buffer setup failed\n");
+ 		return ret;
+ 	}
++
++	ret = regmap_update_bits(data->regmap, ADS1015_CFG_REG,
++				ADS1015_CFG_MOD_MASK,
++				ADS1015_CONTINUOUS << ADS1015_CFG_MOD_SHIFT);
++	if (ret)
++		return ret;
++
++	data->conv_invalid = true;
++
+ 	ret = pm_runtime_set_active(&client->dev);
+ 	if (ret)
+ 		goto err_buffer_cleanup;
+@@ -685,10 +701,15 @@ static int ads1015_runtime_resume(struct device *dev)
+ {
+ 	struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
+ 	struct ads1015_data *data = iio_priv(indio_dev);
++	int ret;
+ 
+-	return regmap_update_bits(data->regmap, ADS1015_CFG_REG,
++	ret = regmap_update_bits(data->regmap, ADS1015_CFG_REG,
+ 				  ADS1015_CFG_MOD_MASK,
+ 				  ADS1015_CONTINUOUS << ADS1015_CFG_MOD_SHIFT);
++	if (!ret)
++		data->conv_invalid = true;
++
++	return ret;
+ }
+ #endif
+ 
+diff --git a/drivers/mcb/mcb-lpc.c b/drivers/mcb/mcb-lpc.c
+index d072c088ce73..945091a88354 100644
+--- a/drivers/mcb/mcb-lpc.c
++++ b/drivers/mcb/mcb-lpc.c
+@@ -114,6 +114,12 @@ static struct resource sc24_fpga_resource = {
+ 	.flags = IORESOURCE_MEM,
+ };
+ 
++static struct resource sc31_fpga_resource = {
++	.start = 0xf000e000,
++	.end = 0xf000e000 + CHAM_HEADER_SIZE,
++	.flags = IORESOURCE_MEM,
++};
++
+ static struct platform_driver mcb_lpc_driver = {
+ 	.driver		= {
+ 		.name = "mcb-lpc",
+@@ -132,6 +138,15 @@ static const struct dmi_system_id mcb_lpc_dmi_table[] = {
+ 		.driver_data = (void *)&sc24_fpga_resource,
+ 		.callback = mcb_lpc_create_platform_device,
+ 	},
++	{
++		.ident = "SC31",
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "MEN"),
++			DMI_MATCH(DMI_PRODUCT_VERSION, "14SC31"),
++		},
++		.driver_data = (void *)&sc31_fpga_resource,
++		.callback = mcb_lpc_create_platform_device,
++	},
+ 	{}
+ };
+ MODULE_DEVICE_TABLE(dmi, mcb_lpc_dmi_table);
+diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
+index 75c5c903c8a6..53fa5b7380ff 100644
+--- a/drivers/net/wireless/ath/ath10k/core.c
++++ b/drivers/net/wireless/ath/ath10k/core.c
+@@ -2055,6 +2055,12 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
+ 		goto err_wmi_detach;
+ 	}
+ 
++	/* If firmware indicates Full Rx Reorder support it must be used in a
++	 * slightly different manner. Let HTT code know.
++	 */
++	ar->htt.rx_ring.in_ord_rx = !!(test_bit(WMI_SERVICE_RX_FULL_REORDER,
++						ar->wmi.svc_map));
++
+ 	status = ath10k_htt_rx_alloc(&ar->htt);
+ 	if (status) {
+ 		ath10k_err(ar, "failed to alloc htt rx: %d\n", status);
+@@ -2167,12 +2173,6 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
+ 		}
+ 	}
+ 
+-	/* If firmware indicates Full Rx Reorder support it must be used in a
+-	 * slightly different manner. Let HTT code know.
+-	 */
+-	ar->htt.rx_ring.in_ord_rx = !!(test_bit(WMI_SERVICE_RX_FULL_REORDER,
+-						ar->wmi.svc_map));
+-
+ 	status = ath10k_htt_rx_ring_refill(ar);
+ 	if (status) {
+ 		ath10k_err(ar, "failed to refill htt rx ring: %d\n", status);
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+index 84f4ba01e14f..875374e4747d 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+@@ -430,6 +430,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
+ 	{IWL_PCI_DEVICE(0x095B, 0x520A, iwl7265_2ac_cfg)},
+ 	{IWL_PCI_DEVICE(0x095A, 0x9000, iwl7265_2ac_cfg)},
+ 	{IWL_PCI_DEVICE(0x095A, 0x9400, iwl7265_2ac_cfg)},
++	{IWL_PCI_DEVICE(0x095A, 0x9E10, iwl7265_2ac_cfg)},
+ 
+ /* 8000 Series */
+ 	{IWL_PCI_DEVICE(0x24F3, 0x0010, iwl8260_2ac_cfg)},
+diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+index 06ad2d50f9b0..fdfdf2371986 100644
+--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c
++++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+@@ -4215,7 +4215,7 @@ int mwifiex_init_channel_scan_gap(struct mwifiex_adapter *adapter)
+ 	if (adapter->config_bands & BAND_A)
+ 		n_channels_a = mwifiex_band_5ghz.n_channels;
+ 
+-	adapter->num_in_chan_stats = max_t(u32, n_channels_bg, n_channels_a);
++	adapter->num_in_chan_stats = n_channels_bg + n_channels_a;
+ 	adapter->chan_stats = vmalloc(sizeof(*adapter->chan_stats) *
+ 				      adapter->num_in_chan_stats);
+ 
+diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c
+index ae9630b49342..9900855746ac 100644
+--- a/drivers/net/wireless/marvell/mwifiex/scan.c
++++ b/drivers/net/wireless/marvell/mwifiex/scan.c
+@@ -2492,6 +2492,12 @@ mwifiex_update_chan_statistics(struct mwifiex_private *priv,
+ 					      sizeof(struct mwifiex_chan_stats);
+ 
+ 	for (i = 0 ; i < num_chan; i++) {
++		if (adapter->survey_idx >= adapter->num_in_chan_stats) {
++			mwifiex_dbg(adapter, WARN,
++				    "FW reported too many channel results (max %d)\n",
++				    adapter->num_in_chan_stats);
++			return;
++		}
+ 		chan_stats.chan_num = fw_chan_stats->chan_num;
+ 		chan_stats.bandcfg = fw_chan_stats->bandcfg;
+ 		chan_stats.flags = fw_chan_stats->flags;
+diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c
+index 032b6317690d..08dc8919ef60 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/pci.c
++++ b/drivers/net/wireless/realtek/rtlwifi/pci.c
+@@ -2257,7 +2257,7 @@ int rtl_pci_probe(struct pci_dev *pdev,
+ 	/* find adapter */
+ 	if (!_rtl_pci_find_adapter(pdev, hw)) {
+ 		err = -ENODEV;
+-		goto fail3;
++		goto fail2;
+ 	}
+ 
+ 	/* Init IO handler */
+@@ -2318,10 +2318,10 @@ int rtl_pci_probe(struct pci_dev *pdev,
+ 	pci_set_drvdata(pdev, NULL);
+ 	rtl_deinit_core(hw);
+ 
++fail2:
+ 	if (rtlpriv->io.pci_mem_start != 0)
+ 		pci_iounmap(pdev, (void __iomem *)rtlpriv->io.pci_mem_start);
+ 
+-fail2:
+ 	pci_release_regions(pdev);
+ 	complete(&rtlpriv->firmware_loading_complete);
+ 
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c
+index 774e72058d24..bddd5a5ebe52 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c
+@@ -175,6 +175,8 @@ int rtl88e_init_sw_vars(struct ieee80211_hw *hw)
+ 				      rtl_fw_cb);
+ 	if (err) {
+ 		pr_info("Failed to request firmware!\n");
++		vfree(rtlpriv->rtlhal.pfirmware);
++		rtlpriv->rtlhal.pfirmware = NULL;
+ 		return 1;
+ 	}
+ 
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c
+index bcbb0c60f1f1..38f85bfdf0c7 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c
+@@ -176,6 +176,8 @@ int rtl92c_init_sw_vars(struct ieee80211_hw *hw)
+ 				      rtl_fw_cb);
+ 	if (err) {
+ 		pr_err("Failed to request firmware!\n");
++		vfree(rtlpriv->rtlhal.pfirmware);
++		rtlpriv->rtlhal.pfirmware = NULL;
+ 		return 1;
+ 	}
+ 
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c
+index 96c923b3feb4..e3eb850bb1de 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c
+@@ -85,6 +85,10 @@ static int rtl92cu_init_sw_vars(struct ieee80211_hw *hw)
+ 	err = request_firmware_nowait(THIS_MODULE, 1,
+ 				      fw_name, rtlpriv->io.dev,
+ 				      GFP_KERNEL, hw, rtl_fw_cb);
++	if (err) {
++		vfree(rtlpriv->rtlhal.pfirmware);
++		rtlpriv->rtlhal.pfirmware = NULL;
++	}
+ 	return err;
+ }
+ 
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c
+index 16132c66e5e1..e38d6f7370aa 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c
+@@ -183,6 +183,8 @@ static int rtl92d_init_sw_vars(struct ieee80211_hw *hw)
+ 				      rtl_fw_cb);
+ 	if (err) {
+ 		pr_err("Failed to request firmware!\n");
++		vfree(rtlpriv->rtlhal.pfirmware);
++		rtlpriv->rtlhal.pfirmware = NULL;
+ 		return 1;
+ 	}
+ 
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c
+index eaa503b7c4b4..745e9c32655c 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c
+@@ -177,6 +177,8 @@ int rtl92ee_init_sw_vars(struct ieee80211_hw *hw)
+ 				      rtl_fw_cb);
+ 	if (err) {
+ 		pr_err("Failed to request firmware!\n");
++		vfree(rtlpriv->rtlhal.pfirmware);
++		rtlpriv->rtlhal.pfirmware = NULL;
+ 		return 1;
+ 	}
+ 
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c
+index 2006b09ea74f..1ec20efb9ce1 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c
+@@ -216,6 +216,8 @@ static int rtl92s_init_sw_vars(struct ieee80211_hw *hw)
+ 				      rtl92se_fw_cb);
+ 	if (err) {
+ 		pr_err("Failed to request firmware!\n");
++		vfree(rtlpriv->rtlhal.pfirmware);
++		rtlpriv->rtlhal.pfirmware = NULL;
+ 		return 1;
+ 	}
+ 
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c
+index 7bf9f2557920..aab86667a7f3 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c
+@@ -184,6 +184,8 @@ int rtl8723e_init_sw_vars(struct ieee80211_hw *hw)
+ 				      rtl_fw_cb);
+ 	if (err) {
+ 		pr_err("Failed to request firmware!\n");
++		vfree(rtlpriv->rtlhal.pfirmware);
++		rtlpriv->rtlhal.pfirmware = NULL;
+ 		return 1;
+ 	}
+ 	return 0;
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c
+index f9d10f1e7cf8..f47d839f388d 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c
+@@ -187,16 +187,10 @@ int rtl8723be_init_sw_vars(struct ieee80211_hw *hw)
+ 				      rtlpriv->io.dev, GFP_KERNEL, hw,
+ 				      rtl_fw_cb);
+ 	if (err) {
+-		/* Failed to get firmware. Check if old version available */
+-		fw_name = "rtlwifi/rtl8723befw.bin";
+-		pr_info("Using firmware %s\n", fw_name);
+-		err = request_firmware_nowait(THIS_MODULE, 1, fw_name,
+-					      rtlpriv->io.dev, GFP_KERNEL, hw,
+-					      rtl_fw_cb);
+-		if (err) {
+-			pr_err("Failed to request firmware!\n");
+-			return 1;
+-		}
++		pr_err("Failed to request firmware!\n");
++		vfree(rtlpriv->rtlhal.pfirmware);
++		rtlpriv->rtlhal.pfirmware = NULL;
++		return 1;
+ 	}
+ 	return 0;
+ }
+@@ -287,6 +281,7 @@ static const struct rtl_hal_cfg rtl8723be_hal_cfg = {
+ 	.bar_id = 2,
+ 	.write_readback = true,
+ 	.name = "rtl8723be_pci",
++	.alt_fw_name = "rtlwifi/rtl8723befw.bin",
+ 	.ops = &rtl8723be_hal_ops,
+ 	.mod_params = &rtl8723be_mod_params,
+ 	.maps[SYS_ISO_CTRL] = REG_SYS_ISO_CTRL,
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c
+index d71d2776ca03..5925edf7877f 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c
+@@ -196,6 +196,8 @@ int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw)
+ 	rtlpriv->rtlhal.wowlan_firmware = vzalloc(0x8000);
+ 	if (!rtlpriv->rtlhal.wowlan_firmware) {
+ 		pr_err("Can't alloc buffer for wowlan fw.\n");
++		vfree(rtlpriv->rtlhal.pfirmware);
++		rtlpriv->rtlhal.pfirmware = NULL;
+ 		return 1;
+ 	}
+ 
+@@ -214,16 +216,10 @@ int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw)
+ 				      rtlpriv->io.dev, GFP_KERNEL, hw,
+ 				      rtl_fw_cb);
+ 	if (err) {
+-		/* Failed to get firmware. Check if old version available */
+-		fw_name = "rtlwifi/rtl8821aefw.bin";
+-		pr_info("Using firmware %s\n", fw_name);
+-		err = request_firmware_nowait(THIS_MODULE, 1, fw_name,
+-					      rtlpriv->io.dev, GFP_KERNEL, hw,
+-					      rtl_fw_cb);
+-		if (err) {
+-			pr_err("Failed to request normal firmware!\n");
+-			return 1;
+-		}
++		pr_err("Failed to request normal firmware!\n");
++		vfree(rtlpriv->rtlhal.wowlan_firmware);
++		vfree(rtlpriv->rtlhal.pfirmware);
++		return 1;
+ 	}
+ 	/*load wowlan firmware*/
+ 	pr_info("Using firmware %s\n", wowlan_fw_name);
+@@ -233,6 +229,8 @@ int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw)
+ 				      rtl_wowlan_fw_cb);
+ 	if (err) {
+ 		pr_err("Failed to request wowlan firmware!\n");
++		vfree(rtlpriv->rtlhal.wowlan_firmware);
++		vfree(rtlpriv->rtlhal.pfirmware);
+ 		return 1;
+ 	}
+ 	return 0;
+@@ -325,6 +323,7 @@ static const struct rtl_hal_cfg rtl8821ae_hal_cfg = {
+ 	.bar_id = 2,
+ 	.write_readback = true,
+ 	.name = "rtl8821ae_pci",
++	.alt_fw_name = "rtlwifi/rtl8821aefw.bin",
+ 	.ops = &rtl8821ae_hal_ops,
+ 	.mod_params = &rtl8821ae_mod_params,
+ 	.maps[SYS_ISO_CTRL] = REG_SYS_ISO_CTRL,
+diff --git a/drivers/of/device.c b/drivers/of/device.c
+index e0a28ea341fe..eabfa36383ce 100644
+--- a/drivers/of/device.c
++++ b/drivers/of/device.c
+@@ -274,6 +274,8 @@ ssize_t of_device_modalias(struct device *dev, char *str, ssize_t len)
+ 	ssize_t sl = of_device_get_modalias(dev, str, len - 2);
+ 	if (sl < 0)
+ 		return sl;
++	if (sl > len - 2)
++		return -ENOMEM;
+ 
+ 	str[sl++] = '\n';
+ 	str[sl] = 0;
+diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
+index 84e782d8e7c3..aad6ebb51735 100644
+--- a/drivers/scsi/sg.c
++++ b/drivers/scsi/sg.c
+@@ -1233,6 +1233,7 @@ sg_mmap(struct file *filp, struct vm_area_struct *vma)
+ 	unsigned long req_sz, len, sa;
+ 	Sg_scatter_hold *rsv_schp;
+ 	int k, length;
++	int ret = 0;
+ 
+ 	if ((!filp) || (!vma) || (!(sfp = (Sg_fd *) filp->private_data)))
+ 		return -ENXIO;
+@@ -1243,8 +1244,11 @@ sg_mmap(struct file *filp, struct vm_area_struct *vma)
+ 	if (vma->vm_pgoff)
+ 		return -EINVAL;	/* want no offset */
+ 	rsv_schp = &sfp->reserve;
+-	if (req_sz > rsv_schp->bufflen)
+-		return -ENOMEM;	/* cannot map more than reserved buffer */
++	mutex_lock(&sfp->f_mutex);
++	if (req_sz > rsv_schp->bufflen) {
++		ret = -ENOMEM;	/* cannot map more than reserved buffer */
++		goto out;
++	}
+ 
+ 	sa = vma->vm_start;
+ 	length = 1 << (PAGE_SHIFT + rsv_schp->page_order);
+@@ -1258,7 +1262,9 @@ sg_mmap(struct file *filp, struct vm_area_struct *vma)
+ 	vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
+ 	vma->vm_private_data = sfp;
+ 	vma->vm_ops = &sg_mmap_vm_ops;
+-	return 0;
++out:
++	mutex_unlock(&sfp->f_mutex);
++	return ret;
+ }
+ 
+ static void
+@@ -1735,9 +1741,12 @@ sg_start_req(Sg_request *srp, unsigned char *cmd)
+ 		    !sfp->res_in_use) {
+ 			sfp->res_in_use = 1;
+ 			sg_link_reserve(sfp, srp, dxfer_len);
+-		} else if ((hp->flags & SG_FLAG_MMAP_IO) && sfp->res_in_use) {
++		} else if (hp->flags & SG_FLAG_MMAP_IO) {
++			res = -EBUSY; /* sfp->res_in_use == 1 */
++			if (dxfer_len > rsv_schp->bufflen)
++				res = -ENOMEM;
+ 			mutex_unlock(&sfp->f_mutex);
+-			return -EBUSY;
++			return res;
+ 		} else {
+ 			res = sg_build_indirect(req_schp, sfp, dxfer_len);
+ 			if (res) {
+diff --git a/drivers/staging/ccree/ssi_cipher.c b/drivers/staging/ccree/ssi_cipher.c
+index cd2eafc04232..bdc68830389f 100644
+--- a/drivers/staging/ccree/ssi_cipher.c
++++ b/drivers/staging/ccree/ssi_cipher.c
+@@ -23,6 +23,7 @@
+ #include <crypto/aes.h>
+ #include <crypto/ctr.h>
+ #include <crypto/des.h>
++#include <crypto/scatterwalk.h>
+ 
+ #include "ssi_config.h"
+ #include "ssi_driver.h"
+@@ -716,6 +717,7 @@ static int ssi_blkcipher_complete(struct device *dev,
+ {
+ 	int completion_error = 0;
+ 	u32 inflight_counter;
++	struct ablkcipher_request *req = (struct ablkcipher_request *)areq;
+ 
+ 	ssi_buffer_mgr_unmap_blkcipher_request(dev, req_ctx, ivsize, src, dst);
+ 
+@@ -726,6 +728,22 @@ static int ssi_blkcipher_complete(struct device *dev,
+ 		ctx_p->drvdata->inflight_counter--;
+ 
+ 	if (areq) {
++		/*
++		 * The crypto API expects us to set the req->info to the last
++		 * ciphertext block. For encrypt, simply copy from the result.
++		 * For decrypt, we must copy from a saved buffer since this
++		 * could be an in-place decryption operation and the src is
++		 * lost by this point.
++		 */
++		if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT)  {
++			memcpy(req->info, req_ctx->backup_info, ivsize);
++			kfree(req_ctx->backup_info);
++		} else {
++			scatterwalk_map_and_copy(req->info, req->dst,
++						 (req->nbytes - ivsize),
++						 ivsize, 0);
++		}
++
+ 		ablkcipher_request_complete(areq, completion_error);
+ 		return 0;
+ 	}
+@@ -759,11 +777,13 @@ static int ssi_blkcipher_process(
+ 	if (unlikely(validate_data_size(ctx_p, nbytes))) {
+ 		SSI_LOG_ERR("Unsupported data size %d.\n", nbytes);
+ 		crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_BLOCK_LEN);
+-		return -EINVAL;
++		rc = -EINVAL;
++		goto exit_process;
+ 	}
+ 	if (nbytes == 0) {
+ 		/* No data to process is valid */
+-		return 0;
++		rc = 0;
++		goto exit_process;
+ 	}
+ 	/*For CTS in case of data size aligned to 16 use CBC mode*/
+ 	if (((nbytes % AES_BLOCK_SIZE) == 0) && (ctx_p->cipher_mode == DRV_CIPHER_CBC_CTS)) {
+@@ -842,6 +862,9 @@ static int ssi_blkcipher_process(
+ 	if (cts_restore_flag != 0)
+ 		ctx_p->cipher_mode = DRV_CIPHER_CBC_CTS;
+ 
++	if (rc != -EINPROGRESS)
++		kfree(req_ctx->backup_info);
++
+ 	return rc;
+ }
+ 
+@@ -884,7 +907,6 @@ static int ssi_ablkcipher_encrypt(struct ablkcipher_request *req)
+ 	struct blkcipher_req_ctx *req_ctx = ablkcipher_request_ctx(req);
+ 	unsigned int ivsize = crypto_ablkcipher_ivsize(ablk_tfm);
+ 
+-	req_ctx->backup_info = req->info;
+ 	req_ctx->is_giv = false;
+ 
+ 	return ssi_blkcipher_process(tfm, req_ctx, req->dst, req->src, req->nbytes, req->info, ivsize, (void *)req, DRV_CRYPTO_DIRECTION_ENCRYPT);
+@@ -897,8 +919,18 @@ static int ssi_ablkcipher_decrypt(struct ablkcipher_request *req)
+ 	struct blkcipher_req_ctx *req_ctx = ablkcipher_request_ctx(req);
+ 	unsigned int ivsize = crypto_ablkcipher_ivsize(ablk_tfm);
+ 
+-	req_ctx->backup_info = req->info;
++	/*
++	 * Allocate and save the last IV sized bytes of the source, which will
++	 * be lost in case of in-place decryption and might be needed for CTS.
++	 */
++	req_ctx->backup_info = kmalloc(ivsize, GFP_KERNEL);
++	if (!req_ctx->backup_info)
++		return -ENOMEM;
++
++	scatterwalk_map_and_copy(req_ctx->backup_info, req->src,
++				 (req->nbytes - ivsize), ivsize, 0);
+ 	req_ctx->is_giv = false;
++
+ 	return ssi_blkcipher_process(tfm, req_ctx, req->dst, req->src, req->nbytes, req->info, ivsize, (void *)req, DRV_CRYPTO_DIRECTION_DECRYPT);
+ }
+ 
+diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h
+index e6d28a249fc1..bfbabae1aad8 100644
+--- a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h
++++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h
+@@ -123,8 +123,8 @@ struct dpaa2_eth_swa {
+ /* Error bits in FD CTRL */
+ #define DPAA2_FD_CTRL_UFD		0x00000004
+ #define DPAA2_FD_CTRL_SBE		0x00000008
+-#define DPAA2_FD_CTRL_FSE		0x00000010
+-#define DPAA2_FD_CTRL_FAERR		0x00000020
++#define DPAA2_FD_CTRL_FSE		0x00000020
++#define DPAA2_FD_CTRL_FAERR		0x00000040
+ 
+ #define DPAA2_FD_RX_ERR_MASK		(DPAA2_FD_CTRL_SBE	| \
+ 					 DPAA2_FD_CTRL_FAERR)
+diff --git a/drivers/staging/rts5208/rtsx_scsi.c b/drivers/staging/rts5208/rtsx_scsi.c
+index 36b5a11f21d2..a401b13f5f5e 100644
+--- a/drivers/staging/rts5208/rtsx_scsi.c
++++ b/drivers/staging/rts5208/rtsx_scsi.c
+@@ -414,7 +414,7 @@ void set_sense_data(struct rtsx_chip *chip, unsigned int lun, u8 err_code,
+ 	sense->ascq = ascq;
+ 	if (sns_key_info0 != 0) {
+ 		sense->sns_key_info[0] = SKSV | sns_key_info0;
+-		sense->sns_key_info[1] = (sns_key_info1 & 0xf0) >> 8;
++		sense->sns_key_info[1] = (sns_key_info1 & 0xf0) >> 4;
+ 		sense->sns_key_info[2] = sns_key_info1 & 0x0f;
+ 	}
+ }
+diff --git a/drivers/thunderbolt/ctl.c b/drivers/thunderbolt/ctl.c
+index 69c0232a22f8..fb40dd0588b9 100644
+--- a/drivers/thunderbolt/ctl.c
++++ b/drivers/thunderbolt/ctl.c
+@@ -804,7 +804,7 @@ struct tb_cfg_result tb_cfg_reset(struct tb_ctl *ctl, u64 route,
+ 	req->request_type = TB_CFG_PKG_RESET;
+ 	req->response = &reply;
+ 	req->response_size = sizeof(reply);
+-	req->response_type = sizeof(TB_CFG_PKG_RESET);
++	req->response_type = TB_CFG_PKG_RESET;
+ 
+ 	res = tb_cfg_request_sync(ctl, req, timeout_msec);
+ 
+diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
+index ebe27595c4af..0ff0feddfd1f 100644
+--- a/drivers/usb/core/devio.c
++++ b/drivers/usb/core/devio.c
+@@ -623,6 +623,8 @@ static void async_completed(struct urb *urb)
+ 	if (as->status < 0 && as->bulk_addr && as->status != -ECONNRESET &&
+ 			as->status != -ENOENT)
+ 		cancel_bulk_urbs(ps, as->bulk_addr);
++
++	wake_up(&ps->wait);
+ 	spin_unlock(&ps->lock);
+ 
+ 	if (signr) {
+@@ -630,8 +632,6 @@ static void async_completed(struct urb *urb)
+ 		put_pid(pid);
+ 		put_cred(cred);
+ 	}
+-
+-	wake_up(&ps->wait);
+ }
+ 
+ static void destroy_async(struct usb_dev_state *ps, struct list_head *list)
+diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
+index 574da2b4529c..82806e311202 100644
+--- a/drivers/usb/core/quirks.c
++++ b/drivers/usb/core/quirks.c
+@@ -57,8 +57,9 @@ static const struct usb_device_id usb_quirk_list[] = {
+ 	/* Microsoft LifeCam-VX700 v2.0 */
+ 	{ USB_DEVICE(0x045e, 0x0770), .driver_info = USB_QUIRK_RESET_RESUME },
+ 
+-	/* Logitech HD Pro Webcams C920 and C930e */
++	/* Logitech HD Pro Webcams C920, C920-C and C930e */
+ 	{ USB_DEVICE(0x046d, 0x082d), .driver_info = USB_QUIRK_DELAY_INIT },
++	{ USB_DEVICE(0x046d, 0x0841), .driver_info = USB_QUIRK_DELAY_INIT },
+ 	{ USB_DEVICE(0x046d, 0x0843), .driver_info = USB_QUIRK_DELAY_INIT },
+ 
+ 	/* Logitech ConferenceCam CC3000e */
+@@ -217,6 +218,9 @@ static const struct usb_device_id usb_quirk_list[] = {
+ 	{ USB_DEVICE(0x1a0a, 0x0200), .driver_info =
+ 			USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL },
+ 
++	/* Corsair Strafe RGB */
++	{ USB_DEVICE(0x1b1c, 0x1b20), .driver_info = USB_QUIRK_DELAY_INIT },
++
+ 	/* Acer C120 LED Projector */
+ 	{ USB_DEVICE(0x1de1, 0xc102), .driver_info = USB_QUIRK_NO_LPM },
+ 
+diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
+index c8f38649f749..658d9d1f9ea3 100644
+--- a/drivers/usb/host/pci-quirks.c
++++ b/drivers/usb/host/pci-quirks.c
+@@ -142,29 +142,30 @@ static int amd_chipset_sb_type_init(struct amd_chipset_info *pinfo)
+ 			pinfo->sb_type.gen = AMD_CHIPSET_SB700;
+ 		else if (rev >= 0x40 && rev <= 0x4f)
+ 			pinfo->sb_type.gen = AMD_CHIPSET_SB800;
+-	}
+-	pinfo->smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD,
+-					  0x145c, NULL);
+-	if (pinfo->smbus_dev) {
+-		pinfo->sb_type.gen = AMD_CHIPSET_TAISHAN;
+ 	} else {
+ 		pinfo->smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD,
+ 				PCI_DEVICE_ID_AMD_HUDSON2_SMBUS, NULL);
+ 
+-		if (!pinfo->smbus_dev) {
+-			pinfo->sb_type.gen = NOT_AMD_CHIPSET;
+-			return 0;
++		if (pinfo->smbus_dev) {
++			rev = pinfo->smbus_dev->revision;
++			if (rev >= 0x11 && rev <= 0x14)
++				pinfo->sb_type.gen = AMD_CHIPSET_HUDSON2;
++			else if (rev >= 0x15 && rev <= 0x18)
++				pinfo->sb_type.gen = AMD_CHIPSET_BOLTON;
++			else if (rev >= 0x39 && rev <= 0x3a)
++				pinfo->sb_type.gen = AMD_CHIPSET_YANGTZE;
++		} else {
++			pinfo->smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD,
++							  0x145c, NULL);
++			if (pinfo->smbus_dev) {
++				rev = pinfo->smbus_dev->revision;
++				pinfo->sb_type.gen = AMD_CHIPSET_TAISHAN;
++			} else {
++				pinfo->sb_type.gen = NOT_AMD_CHIPSET;
++				return 0;
++			}
+ 		}
+-
+-		rev = pinfo->smbus_dev->revision;
+-		if (rev >= 0x11 && rev <= 0x14)
+-			pinfo->sb_type.gen = AMD_CHIPSET_HUDSON2;
+-		else if (rev >= 0x15 && rev <= 0x18)
+-			pinfo->sb_type.gen = AMD_CHIPSET_BOLTON;
+-		else if (rev >= 0x39 && rev <= 0x3a)
+-			pinfo->sb_type.gen = AMD_CHIPSET_YANGTZE;
+ 	}
+-
+ 	pinfo->sb_type.rev = rev;
+ 	return 1;
+ }
+diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
+index 87cbd56cc761..b67692857daf 100644
+--- a/drivers/usb/musb/musb_core.c
++++ b/drivers/usb/musb/musb_core.c
+@@ -2671,6 +2671,13 @@ static int musb_suspend(struct device *dev)
+ {
+ 	struct musb	*musb = dev_to_musb(dev);
+ 	unsigned long	flags;
++	int ret;
++
++	ret = pm_runtime_get_sync(dev);
++	if (ret < 0) {
++		pm_runtime_put_noidle(dev);
++		return ret;
++	}
+ 
+ 	musb_platform_disable(musb);
+ 	musb_disable_interrupts(musb);
+@@ -2721,14 +2728,6 @@ static int musb_resume(struct device *dev)
+ 	if ((devctl & mask) != (musb->context.devctl & mask))
+ 		musb->port1_status = 0;
+ 
+-	/*
+-	 * The USB HUB code expects the device to be in RPM_ACTIVE once it came
+-	 * out of suspend
+-	 */
+-	pm_runtime_disable(dev);
+-	pm_runtime_set_active(dev);
+-	pm_runtime_enable(dev);
+-
+ 	musb_start(musb);
+ 
+ 	spin_lock_irqsave(&musb->lock, flags);
+@@ -2738,6 +2737,9 @@ static int musb_resume(struct device *dev)
+ 			error);
+ 	spin_unlock_irqrestore(&musb->lock, flags);
+ 
++	pm_runtime_mark_last_busy(dev);
++	pm_runtime_put_autosuspend(dev);
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index fe123153b1a5..2a9944326210 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -2023,6 +2023,7 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x02, 0x01) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x00, 0x00) },
+ 	{ USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7d04, 0xff) },			/* D-Link DWM-158 */
++	{ USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7d0e, 0xff) },			/* D-Link DWM-157 C1 */
+ 	{ USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e19, 0xff),			/* D-Link DWM-221 B1 */
+ 	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ 	{ USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e35, 0xff),			/* D-Link DWM-222 */
+diff --git a/fs/dlm/user.c b/fs/dlm/user.c
+index 23488f559cf9..84199151b64b 100644
+--- a/fs/dlm/user.c
++++ b/fs/dlm/user.c
+@@ -355,6 +355,10 @@ static int dlm_device_register(struct dlm_ls *ls, char *name)
+ 	error = misc_register(&ls->ls_device);
+ 	if (error) {
+ 		kfree(ls->ls_device.name);
++		/* this has to be set to NULL
++		 * to avoid a double-free in dlm_device_deregister
++		 */
++		ls->ls_device.name = NULL;
+ 	}
+ fail:
+ 	return error;
+diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
+index c71e532da458..4adf6161ec77 100644
+--- a/include/linux/pci_ids.h
++++ b/include/linux/pci_ids.h
+@@ -576,6 +576,7 @@
+ #define PCI_DEVICE_ID_AMD_CS5536_EHC    0x2095
+ #define PCI_DEVICE_ID_AMD_CS5536_UDC    0x2096
+ #define PCI_DEVICE_ID_AMD_CS5536_UOC    0x2097
++#define PCI_DEVICE_ID_AMD_CS5536_DEV_IDE    0x2092
+ #define PCI_DEVICE_ID_AMD_CS5536_IDE    0x209A
+ #define PCI_DEVICE_ID_AMD_LX_VIDEO  0x2081
+ #define PCI_DEVICE_ID_AMD_LX_AES    0x2082
+diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
+index db6dc9dc0482..1c49431f3121 100644
+--- a/include/linux/workqueue.h
++++ b/include/linux/workqueue.h
+@@ -323,8 +323,8 @@ enum {
+ 
+ 	__WQ_DRAINING		= 1 << 16, /* internal: workqueue is draining */
+ 	__WQ_ORDERED		= 1 << 17, /* internal: workqueue is ordered */
+-	__WQ_ORDERED_EXPLICIT	= 1 << 18, /* internal: alloc_ordered_workqueue() */
+ 	__WQ_LEGACY		= 1 << 18, /* internal: create*_workqueue() */
++	__WQ_ORDERED_EXPLICIT	= 1 << 19, /* internal: alloc_ordered_workqueue() */
+ 
+ 	WQ_MAX_ACTIVE		= 512,	  /* I like 512, better ideas? */
+ 	WQ_MAX_UNBOUND_PER_CPU	= 4,	  /* 4 * #cpus for unbound wq */
+diff --git a/include/uapi/linux/android/binder.h b/include/uapi/linux/android/binder.h
+index 51f891fb1b18..7668b5791c91 100644
+--- a/include/uapi/linux/android/binder.h
++++ b/include/uapi/linux/android/binder.h
+@@ -132,6 +132,7 @@ enum {
+ 
+ /* struct binder_fd_array_object - object describing an array of fds in a buffer
+  * @hdr:		common header structure
++ * @pad:		padding to ensure correct alignment
+  * @num_fds:		number of file descriptors in the buffer
+  * @parent:		index in offset array to buffer holding the fd array
+  * @parent_offset:	start offset of fd array in the buffer
+@@ -152,6 +153,7 @@ enum {
+  */
+ struct binder_fd_array_object {
+ 	struct binder_object_header	hdr;
++	__u32				pad;
+ 	binder_size_t			num_fds;
+ 	binder_size_t			parent;
+ 	binder_size_t			parent_offset;
+diff --git a/kernel/configs/android-base.config b/kernel/configs/android-base.config
+index d70829033bb7..d3fd428f4b92 100644
+--- a/kernel/configs/android-base.config
++++ b/kernel/configs/android-base.config
+@@ -10,6 +10,7 @@
+ # CONFIG_USELIB is not set
+ CONFIG_ANDROID=y
+ CONFIG_ANDROID_BINDER_IPC=y
++CONFIG_ANDROID_BINDER_DEVICES=binder,hwbinder,vndbinder
+ CONFIG_ANDROID_LOW_MEMORY_KILLER=y
+ CONFIG_ARMV8_DEPRECATED=y
+ CONFIG_ASHMEM=y


^ permalink raw reply related	[flat|nested] 20+ messages in thread
* [gentoo-commits] proj/linux-patches:4.13 commit in: /
@ 2017-09-07  0:23 Mike Pagano
  0 siblings, 0 replies; 20+ messages in thread
From: Mike Pagano @ 2017-09-07  0:23 UTC (permalink / raw
  To: gentoo-commits

commit:     73e439923a3fb2f4cf7bd1f1b05256019bddf7e5
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu Sep  7 00:23:13 2017 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu Sep  7 00:23:13 2017 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=73e43992

Fix for fbcondecor patch. Thanks to juneau_. See bug #629860

 4200_fbcondecor.patch | 11 ++++-------
 1 file changed, 4 insertions(+), 7 deletions(-)

diff --git a/4200_fbcondecor.patch b/4200_fbcondecor.patch
index f7d9879..f853c51 100644
--- a/4200_fbcondecor.patch
+++ b/4200_fbcondecor.patch
@@ -1158,11 +1158,8 @@ index b87f5cf..ce44538 100644
  	fbcon_has_exited = 1;
  }
  
-diff --git a/drivers/video/console/fbcondecor.c b/drivers/video/console/fbcondecor.c
-new file mode 100644
-index 0000000..65cc0d3
---- /dev/null
-+++ b/drivers/video/console/fbcondecor.c
+--- /dev/null	2017-09-04 17:08:58.496102115 -0400
++++ b/drivers/video/console/fbcondecor.c	2017-09-04 18:52:37.309402197 -0400
 @@ -0,0 +1,549 @@
 +/*
 + *  linux/drivers/video/console/fbcondecor.c -- Framebuffer console decorations
@@ -1201,7 +1198,7 @@ index 0000000..65cc0d3
 +#include <linux/fs.h>
 +#include <linux/compat.h>
 +#include <linux/console.h>
-+
++#include <linux/binfmts.h>
 +#include <linux/uaccess.h>
 +#include <asm/irq.h>
 +
@@ -1336,7 +1333,7 @@ index 0000000..65cc0d3
 +	    cfg->ty + cfg->theight > info->var.yres)
 +		return -EINVAL;
 +
-+	len = strlen_user(cfg->theme);
++	len = strnlen_user(cfg->theme, MAX_ARG_STRLEN);
 +	if (!len || len > FBCON_DECOR_THEME_LEN)
 +		return -EINVAL;
 +	tmp = kmalloc(len, GFP_KERNEL);


^ permalink raw reply related	[flat|nested] 20+ messages in thread
* [gentoo-commits] proj/linux-patches:4.13 commit in: /
@ 2017-08-02 16:51 Mike Pagano
  0 siblings, 0 replies; 20+ messages in thread
From: Mike Pagano @ 2017-08-02 16:51 UTC (permalink / raw
  To: gentoo-commits

commit:     9a50db0860fc67bacfcb9dfde3fa567f7f3c6b4e
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Aug  2 16:51:17 2017 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Aug  2 16:51:17 2017 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=9a50db08

Gentoo Linux support config settings and defaults. Patch to add support for namespace user.pax.* on tmpfs. Patch to enable link security restrictions by default. Patch to ensure that /dev/root doesn't appear in /proc/mounts when booting without an initramfs. Patch to enable control of the unaligned access control policy from sysctl. Workaround to enable poweroff on Mac Pro 11. See bug #601964. Kernel patch enables gcc >= v4.9 optimizations for additional CPUs. fbcondecor bootsplash patch.

 0000_README                                        |   32 +
 1500_XATTR_USER_PREFIX.patch                       |   69 +
 ...ble-link-security-restrictions-by-default.patch |   22 +
 2300_enable-poweroff-on-Mac-Pro-11.patch           |   76 +
 2600_enable-key-swapping-for-apple-mac.patch       |  114 ++
 2900_dev-root-proc-mount-fix.patch                 |   38 +
 4200_fbcondecor.patch                              | 2095 ++++++++++++++++++++
 4400_alpha-sysctl-uac.patch                        |  142 ++
 ...able-additional-cpu-optimizations-for-gcc.patch |  530 +++++
 9 files changed, 3118 insertions(+)

diff --git a/0000_README b/0000_README
index 9018993..b6b8110 100644
--- a/0000_README
+++ b/0000_README
@@ -43,6 +43,38 @@ EXPERIMENTAL
 Individual Patch Descriptions:
 --------------------------------------------------------------------------
 
+Patch:  1500_XATTR_USER_PREFIX.patch
+From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
+Desc:   Support for namespace user.pax.* on tmpfs.
+
+Patch:  1510_fs-enable-link-security-restrictions-by-default.patch
+From:   http://sources.debian.net/src/linux/3.16.7-ckt4-3/debian/patches/debian/fs-enable-link-security-restrictions-by-default.patch/
+Desc:   Enable link security restrictions by default.
+
+Patch:  2300_enable-poweroff-on-Mac-Pro-11.patch
+From:   http://kernel.ubuntu.com/git/ubuntu/ubuntu-xenial.git/patch/drivers/pci/quirks.c?id=5080ff61a438f3dd80b88b423e1a20791d8a774c
+Desc:   Workaround to enable poweroff on Mac Pro 11. See bug #601964.
+
+Patch:  2600_enable-key-swapping-for-apple-mac.patch
+From:   https://github.com/free5lot/hid-apple-patched
+Desc:   This hid-apple patch enables swapping of the FN and left Control keys and some additional on some apple keyboards. See bug #622902
+
+Patch:  2900_dev-root-proc-mount-fix.patch
+From:   https://bugs.gentoo.org/show_bug.cgi?id=438380
+Desc:   Ensure that /dev/root doesn't appear in /proc/mounts when bootint without an initramfs.
+
+Patch:  4200_fbcondecor.patch
+From:   http://www.mepiscommunity.org/fbcondecor
+Desc:   Bootsplash ported by Uladzimir Bely. (Bug #596126)
+
+Patch:  4400_alpha-sysctl-uac.patch
+From:   Tobias Klausmann (klausman@gentoo.org) and http://bugs.gentoo.org/show_bug.cgi?id=217323 
+Desc:   Enable control of the unaligned access control policy from sysctl
+
 Patch:  4567_distro-Gentoo-Kconfig.patch
 From:   Tom Wijsman <TomWij@gentoo.org>
 Desc:   Add Gentoo Linux support config settings and defaults.
+
+Patch:  5010_enable-additional-cpu-optimizations-for-gcc.patch
+From:   https://github.com/graysky2/kernel_gcc_patch/
+Desc:   Kernel patch enables gcc >= v4.9 optimizations for additional CPUs.

diff --git a/1500_XATTR_USER_PREFIX.patch b/1500_XATTR_USER_PREFIX.patch
new file mode 100644
index 0000000..bacd032
--- /dev/null
+++ b/1500_XATTR_USER_PREFIX.patch
@@ -0,0 +1,69 @@
+From: Anthony G. Basile <blueness@gentoo.org>
+
+This patch adds support for a restricted user-controlled namespace on
+tmpfs filesystem used to house PaX flags.  The namespace must be of the
+form user.pax.* and its value cannot exceed a size of 8 bytes.
+
+This is needed even on all Gentoo systems so that XATTR_PAX flags
+are preserved for users who might build packages using portage on
+a tmpfs system with a non-hardened kernel and then switch to a
+hardened kernel with XATTR_PAX enabled.
+
+The namespace is added to any user with Extended Attribute support
+enabled for tmpfs.  Users who do not enable xattrs will not have
+the XATTR_PAX flags preserved.
+
+diff --git a/include/uapi/linux/xattr.h b/include/uapi/linux/xattr.h
+index 1590c49..5eab462 100644
+--- a/include/uapi/linux/xattr.h
++++ b/include/uapi/linux/xattr.h
+@@ -73,5 +73,9 @@
+ #define XATTR_POSIX_ACL_DEFAULT  "posix_acl_default"
+ #define XATTR_NAME_POSIX_ACL_DEFAULT XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_DEFAULT
+ 
++/* User namespace */
++#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
++#define XATTR_PAX_FLAGS_SUFFIX "flags"
++#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
+ 
+ #endif /* _UAPI_LINUX_XATTR_H */
+diff --git a/mm/shmem.c b/mm/shmem.c
+index 440e2a7..c377172 100644
+--- a/mm/shmem.c
++++ b/mm/shmem.c
+@@ -2667,6 +2667,14 @@ static int shmem_xattr_handler_set(const struct xattr_handler *handler,
+ 	struct shmem_inode_info *info = SHMEM_I(d_inode(dentry));
+ 
+ 	name = xattr_full_name(handler, name);
++
++	if (!strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN)) {
++		if (strcmp(name, XATTR_NAME_PAX_FLAGS))
++			return -EOPNOTSUPP;
++		if (size > 8)
++			return -EINVAL;
++	}
++
+ 	return simple_xattr_set(&info->xattrs, name, value, size, flags);
+ }
+ 
+@@ -2682,6 +2690,12 @@ static const struct xattr_handler shmem_trusted_xattr_handler = {
+ 	.set = shmem_xattr_handler_set,
+ };
+ 
++static const struct xattr_handler shmem_user_xattr_handler = {
++	.prefix = XATTR_USER_PREFIX,
++	.get = shmem_xattr_handler_get,
++	.set = shmem_xattr_handler_set,
++};
++
+ static const struct xattr_handler *shmem_xattr_handlers[] = {
+ #ifdef CONFIG_TMPFS_POSIX_ACL
+ 	&posix_acl_access_xattr_handler,
+@@ -2689,6 +2703,7 @@ static const struct xattr_handler *shmem_xattr_handlers[] = {
+ #endif
+ 	&shmem_security_xattr_handler,
+ 	&shmem_trusted_xattr_handler,
++	&shmem_user_xattr_handler,
+ 	NULL
+ };
+ 

diff --git a/1510_fs-enable-link-security-restrictions-by-default.patch b/1510_fs-enable-link-security-restrictions-by-default.patch
new file mode 100644
index 0000000..639fb3c
--- /dev/null
+++ b/1510_fs-enable-link-security-restrictions-by-default.patch
@@ -0,0 +1,22 @@
+From: Ben Hutchings <ben@decadent.org.uk>
+Subject: fs: Enable link security restrictions by default
+Date: Fri, 02 Nov 2012 05:32:06 +0000
+Bug-Debian: https://bugs.debian.org/609455
+Forwarded: not-needed
+
+This reverts commit 561ec64ae67ef25cac8d72bb9c4bfc955edfd415
+('VFS: don't do protected {sym,hard}links by default').
+
+--- a/fs/namei.c
++++ b/fs/namei.c
+@@ -651,8 +651,8 @@ static inline void put_link(struct namei
+ 	path_put(link);
+ }
+ 
+-int sysctl_protected_symlinks __read_mostly = 0;
+-int sysctl_protected_hardlinks __read_mostly = 0;
++int sysctl_protected_symlinks __read_mostly = 1;
++int sysctl_protected_hardlinks __read_mostly = 1;
+ 
+ /**
+  * may_follow_link - Check symlink following for unsafe situations

diff --git a/2300_enable-poweroff-on-Mac-Pro-11.patch b/2300_enable-poweroff-on-Mac-Pro-11.patch
new file mode 100644
index 0000000..063f2a1
--- /dev/null
+++ b/2300_enable-poweroff-on-Mac-Pro-11.patch
@@ -0,0 +1,76 @@
+From 5080ff61a438f3dd80b88b423e1a20791d8a774c Mon Sep 17 00:00:00 2001
+From: Chen Yu <yu.c.chen@intel.com>
+Date: Fri, 19 Aug 2016 10:25:57 -0700
+Subject: UBUNTU: SAUCE: PCI: Workaround to enable poweroff on Mac Pro 11
+
+BugLink: http://bugs.launchpad.net/bugs/1587714
+
+People reported that they can not do a poweroff nor a
+suspend to ram on their Mac Pro 11. After some investigations
+it was found that, once the PCI bridge 0000:00:1c.0 reassigns its
+mm windows to ([mem 0x7fa00000-0x7fbfffff] and
+[mem 0x7fc00000-0x7fdfffff 64bit pref]), the region of ACPI
+io resource 0x1804 becomes unaccessible immediately, where the
+ACPI Sleep register is located, as a result neither poweroff(S5)
+nor suspend to ram(S3) works.
+
+As suggested by Bjorn, further testing shows that, there is an
+unreported device may be (using) conflict with above aperture,
+which brings unpredictable result such as the failure of accessing
+the io port, which blocks the poweroff(S5). Besides if we reassign
+the memory aperture to the other place, the poweroff works again.
+
+As we do not find any resource declared in _CRS which contain above
+memory aperture, and Mac OS does not use this pci bridge neither, we
+choose a simple workaround to clear the hotplug flag(suggested by
+Yinghai Lu), thus do not allocate any resource for this pci bridge,
+and thereby no conflict anymore.
+
+Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=103211
+Cc: Bjorn Helgaas <bhelgaas@google.com>
+Cc: Rafael J. Wysocki <rafael@kernel.org>
+Cc: Lukas Wunner <lukas@wunner.de>
+Signed-off-by: Chen Yu <yu.c.chen@intel.com>
+Reference: https://patchwork.kernel.org/patch/9289777/
+Signed-off-by: Kamal Mostafa <kamal@canonical.com>
+Acked-by: Brad Figg <brad.figg@canonical.com>
+Acked-by: Stefan Bader <stefan.bader@canonical.com>
+Signed-off-by: Tim Gardner <tim.gardner@canonical.com>
+---
+ drivers/pci/quirks.c | 20 ++++++++++++++++++++
+ 1 file changed, 20 insertions(+)
+
+diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
+index 48cfaa0..23968b6 100644
+--- a/drivers/pci/quirks.c
++++ b/drivers/pci/quirks.c
+@@ -2750,6 +2750,26 @@ static void quirk_hotplug_bridge(struct pci_dev *dev)
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_HINT, 0x0020, quirk_hotplug_bridge);
+ 
+ /*
++ * Apple: Avoid programming the memory/io aperture of 00:1c.0
++ *
++ * BIOS does not declare any resource for 00:1c.0, but with
++ * hotplug flag set, thus the OS allocates:
++ * [mem 0x7fa00000 - 0x7fbfffff]
++ * [mem 0x7fc00000-0x7fdfffff 64bit pref]
++ * which is conflict with an unreported device, which
++ * causes unpredictable result such as accessing io port.
++ * So clear the hotplug flag to work around it.
++ */
++static void quirk_apple_mbp_poweroff(struct pci_dev *dev)
++{
++   if (dmi_match(DMI_PRODUCT_NAME, "MacBookPro11,4") ||
++       dmi_match(DMI_PRODUCT_NAME, "MacBookPro11,5"))
++       dev->is_hotplug_bridge = 0;
++}
++
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x8c10, quirk_apple_mbp_poweroff);
++
++/*
+  * This is a quirk for the Ricoh MMC controller found as a part of
+  * some mulifunction chips.
+ 
+-- 
+cgit v0.11.2
+

diff --git a/2600_enable-key-swapping-for-apple-mac.patch b/2600_enable-key-swapping-for-apple-mac.patch
new file mode 100644
index 0000000..ab228d3
--- /dev/null
+++ b/2600_enable-key-swapping-for-apple-mac.patch
@@ -0,0 +1,114 @@
+--- a/drivers/hid/hid-apple.c
++++ b/drivers/hid/hid-apple.c
+@@ -52,6 +52,22 @@
+ 		"(For people who want to keep Windows PC keyboard muscle memory. "
+ 		"[0] = as-is, Mac layout. 1 = swapped, Windows layout.)");
+ 
++static unsigned int swap_fn_leftctrl;
++module_param(swap_fn_leftctrl, uint, 0644);
++MODULE_PARM_DESC(swap_fn_leftctrl, "Swap the Fn and left Control keys. "
++		"(For people who want to keep PC keyboard muscle memory. "
++		"[0] = as-is, Mac layout, 1 = swapped, PC layout)");
++
++static unsigned int rightalt_as_rightctrl;
++module_param(rightalt_as_rightctrl, uint, 0644);
++MODULE_PARM_DESC(rightalt_as_rightctrl, "Use the right Alt key as a right Ctrl key. "
++		"[0] = as-is, Mac layout. 1 = Right Alt is right Ctrl");
++
++static unsigned int ejectcd_as_delete;
++module_param(ejectcd_as_delete, uint, 0644);
++MODULE_PARM_DESC(ejectcd_as_delete, "Use Eject-CD key as Delete key. "
++		"([0] = disabled, 1 = enabled)");
++
+ struct apple_sc {
+ 	unsigned long quirks;
+ 	unsigned int fn_on;
+@@ -164,6 +180,21 @@
+ 	{ }
+ };
+ 
++static const struct apple_key_translation swapped_fn_leftctrl_keys[] = {
++	{ KEY_FN, KEY_LEFTCTRL },
++	{ }
++};
++
++static const struct apple_key_translation rightalt_as_rightctrl_keys[] = {
++	{ KEY_RIGHTALT, KEY_RIGHTCTRL },
++	{ }
++};
++
++static const struct apple_key_translation ejectcd_as_delete_keys[] = {
++	{ KEY_EJECTCD,	KEY_DELETE },
++	{ }
++};
++
+ static const struct apple_key_translation *apple_find_translation(
+ 		const struct apple_key_translation *table, u16 from)
+ {
+@@ -183,9 +214,11 @@
+ 	struct apple_sc *asc = hid_get_drvdata(hid);
+ 	const struct apple_key_translation *trans, *table;
+ 
+-	if (usage->code == KEY_FN) {
++	u16 fn_keycode = (swap_fn_leftctrl) ? (KEY_LEFTCTRL) : (KEY_FN);
++
++	if (usage->code == fn_keycode) {
+ 		asc->fn_on = !!value;
+-		input_event(input, usage->type, usage->code, value);
++		input_event(input, usage->type, KEY_FN, value);
+ 		return 1;
+ 	}
+ 
+@@ -264,6 +297,30 @@
+ 		}
+ 	}
+ 
++	if (swap_fn_leftctrl) {
++		trans = apple_find_translation(swapped_fn_leftctrl_keys, usage->code);
++		if (trans) {
++			input_event(input, usage->type, trans->to, value);
++			return 1;
++		}
++	}
++
++	if (ejectcd_as_delete) {
++		trans = apple_find_translation(ejectcd_as_delete_keys, usage->code);
++		if (trans) {
++			input_event(input, usage->type, trans->to, value);
++			return 1;
++		}
++	}
++
++	if (rightalt_as_rightctrl) {
++		trans = apple_find_translation(rightalt_as_rightctrl_keys, usage->code);
++		if (trans) {
++			input_event(input, usage->type, trans->to, value);
++			return 1;
++		}
++	}
++
+ 	return 0;
+ }
+ 
+@@ -327,6 +384,21 @@
+ 
+ 	for (trans = apple_iso_keyboard; trans->from; trans++)
+ 		set_bit(trans->to, input->keybit);
++
++	if (swap_fn_leftctrl) {
++		for (trans = swapped_fn_leftctrl_keys; trans->from; trans++)
++			set_bit(trans->to, input->keybit);
++	}
++
++	if (ejectcd_as_delete) {
++		for (trans = ejectcd_as_delete_keys; trans->from; trans++)
++			set_bit(trans->to, input->keybit);
++	}
++
++        if (rightalt_as_rightctrl) {
++		for (trans = rightalt_as_rightctrl_keys; trans->from; trans++)
++			set_bit(trans->to, input->keybit);
++	}
+ }
+ 
+ static int apple_input_mapping(struct hid_device *hdev, struct hid_input *hi,

diff --git a/2900_dev-root-proc-mount-fix.patch b/2900_dev-root-proc-mount-fix.patch
new file mode 100644
index 0000000..60af1eb
--- /dev/null
+++ b/2900_dev-root-proc-mount-fix.patch
@@ -0,0 +1,38 @@
+--- a/init/do_mounts.c	2015-08-19 10:27:16.753852576 -0400
++++ b/init/do_mounts.c	2015-08-19 10:34:25.473850353 -0400
+@@ -490,7 +490,11 @@ void __init change_floppy(char *fmt, ...
+ 	va_start(args, fmt);
+ 	vsprintf(buf, fmt, args);
+ 	va_end(args);
+-	fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
++	if (saved_root_name[0])
++		fd = sys_open(saved_root_name, O_RDWR | O_NDELAY, 0);
++	else
++		fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
++
+ 	if (fd >= 0) {
+ 		sys_ioctl(fd, FDEJECT, 0);
+ 		sys_close(fd);
+@@ -534,11 +538,17 @@ void __init mount_root(void)
+ #endif
+ #ifdef CONFIG_BLOCK
+ 	{
+-		int err = create_dev("/dev/root", ROOT_DEV);
+-
+-		if (err < 0)
+-			pr_emerg("Failed to create /dev/root: %d\n", err);
+-		mount_block_root("/dev/root", root_mountflags);
++		if (saved_root_name[0] == '/') {
++	       	int err = create_dev(saved_root_name, ROOT_DEV);
++			if (err < 0)
++				pr_emerg("Failed to create %s: %d\n", saved_root_name, err);
++			mount_block_root(saved_root_name, root_mountflags);
++		} else {
++			int err = create_dev("/dev/root", ROOT_DEV);
++			if (err < 0)
++				pr_emerg("Failed to create /dev/root: %d\n", err);
++			mount_block_root("/dev/root", root_mountflags);
++		}
+ 	}
+ #endif
+ }

diff --git a/4200_fbcondecor.patch b/4200_fbcondecor.patch
new file mode 100644
index 0000000..f7d9879
--- /dev/null
+++ b/4200_fbcondecor.patch
@@ -0,0 +1,2095 @@
+diff --git a/Documentation/fb/00-INDEX b/Documentation/fb/00-INDEX
+index fe85e7c..2230930 100644
+--- a/Documentation/fb/00-INDEX
++++ b/Documentation/fb/00-INDEX
+@@ -23,6 +23,8 @@ ep93xx-fb.txt
+ 	- info on the driver for EP93xx LCD controller.
+ fbcon.txt
+ 	- intro to and usage guide for the framebuffer console (fbcon).
++fbcondecor.txt
++	- info on the Framebuffer Console Decoration
+ framebuffer.txt
+ 	- introduction to frame buffer devices.
+ gxfb.txt
+diff --git a/Documentation/fb/fbcondecor.txt b/Documentation/fb/fbcondecor.txt
+new file mode 100644
+index 0000000..637209e
+--- /dev/null
++++ b/Documentation/fb/fbcondecor.txt
+@@ -0,0 +1,207 @@
++What is it?
++-----------
++
++The framebuffer decorations are a kernel feature which allows displaying a
++background picture on selected consoles.
++
++What do I need to get it to work?
++---------------------------------
++
++To get fbcondecor up-and-running you will have to:
++ 1) get a copy of splashutils [1] or a similar program
++ 2) get some fbcondecor themes
++ 3) build the kernel helper program
++ 4) build your kernel with the FB_CON_DECOR option enabled.
++
++To get fbcondecor operational right after fbcon initialization is finished, you
++will have to include a theme and the kernel helper into your initramfs image.
++Please refer to splashutils documentation for instructions on how to do that.
++
++[1] The splashutils package can be downloaded from:
++    http://github.com/alanhaggai/fbsplash
++
++The userspace helper
++--------------------
++
++The userspace fbcondecor helper (by default: /sbin/fbcondecor_helper) is called by the
++kernel whenever an important event occurs and the kernel needs some kind of
++job to be carried out. Important events include console switches and video
++mode switches (the kernel requests background images and configuration
++parameters for the current console). The fbcondecor helper must be accessible at
++all times. If it's not, fbcondecor will be switched off automatically.
++
++It's possible to set path to the fbcondecor helper by writing it to
++/proc/sys/kernel/fbcondecor.
++
++*****************************************************************************
++
++The information below is mostly technical stuff. There's probably no need to
++read it unless you plan to develop a userspace helper.
++
++The fbcondecor protocol
++-----------------------
++
++The fbcondecor protocol defines a communication interface between the kernel and
++the userspace fbcondecor helper.
++
++The kernel side is responsible for:
++
++ * rendering console text, using an image as a background (instead of a
++   standard solid color fbcon uses),
++ * accepting commands from the user via ioctls on the fbcondecor device,
++ * calling the userspace helper to set things up as soon as the fb subsystem
++   is initialized.
++
++The userspace helper is responsible for everything else, including parsing
++configuration files, decompressing the image files whenever the kernel needs
++it, and communicating with the kernel if necessary.
++
++The fbcondecor protocol specifies how communication is done in both ways:
++kernel->userspace and userspace->helper.
++
++Kernel -> Userspace
++-------------------
++
++The kernel communicates with the userspace helper by calling it and specifying
++the task to be done in a series of arguments.
++
++The arguments follow the pattern:
++<fbcondecor protocol version> <command> <parameters>
++
++All commands defined in fbcondecor protocol v2 have the following parameters:
++ virtual console
++ framebuffer number
++ theme
++
++Fbcondecor protocol v1 specified an additional 'fbcondecor mode' after the
++framebuffer number. Fbcondecor protocol v1 is deprecated and should not be used.
++
++Fbcondecor protocol v2 specifies the following commands:
++
++getpic
++------
++ The kernel issues this command to request image data. It's up to the
++ userspace  helper to find a background image appropriate for the specified
++ theme and the current resolution. The userspace helper should respond by
++ issuing the FBIOCONDECOR_SETPIC ioctl.
++
++init
++----
++ The kernel issues this command after the fbcondecor device is created and
++ the fbcondecor interface is initialized. Upon receiving 'init', the userspace
++ helper should parse the kernel command line (/proc/cmdline) or otherwise
++ decide whether fbcondecor is to be activated.
++
++ To activate fbcondecor on the first console the helper should issue the
++ FBIOCONDECOR_SETCFG, FBIOCONDECOR_SETPIC and FBIOCONDECOR_SETSTATE commands,
++ in the above-mentioned order.
++
++ When the userspace helper is called in an early phase of the boot process
++ (right after the initialization of fbcon), no filesystems will be mounted.
++ The helper program should mount sysfs and then create the appropriate
++ framebuffer, fbcondecor and tty0 devices (if they don't already exist) to get
++ current display settings and to be able to communicate with the kernel side.
++ It should probably also mount the procfs to be able to parse the kernel
++ command line parameters.
++
++ Note that the console sem is not held when the kernel calls fbcondecor_helper
++ with the 'init' command. The fbcondecor helper should perform all ioctls with
++ origin set to FBCON_DECOR_IO_ORIG_USER.
++
++modechange
++----------
++ The kernel issues this command on a mode change. The helper's response should
++ be similar to the response to the 'init' command. Note that this time the
++ console sem is held and all ioctls must be performed with origin set to
++ FBCON_DECOR_IO_ORIG_KERNEL.
++
++
++Userspace -> Kernel
++-------------------
++
++Userspace programs can communicate with fbcondecor via ioctls on the
++fbcondecor device. These ioctls are to be used by both the userspace helper
++(called only by the kernel) and userspace configuration tools (run by the users).
++
++The fbcondecor helper should set the origin field to FBCON_DECOR_IO_ORIG_KERNEL
++when doing the appropriate ioctls. All userspace configuration tools should
++use FBCON_DECOR_IO_ORIG_USER. Failure to set the appropriate value in the origin
++field when performing ioctls from the kernel helper will most likely result
++in a console deadlock.
++
++FBCON_DECOR_IO_ORIG_KERNEL instructs fbcondecor not to try to acquire the console
++semaphore. Not surprisingly, FBCON_DECOR_IO_ORIG_USER instructs it to acquire
++the console sem.
++
++The framebuffer console decoration provides the following ioctls (all defined in
++linux/fb.h):
++
++FBIOCONDECOR_SETPIC
++description: loads a background picture for a virtual console
++argument: struct fbcon_decor_iowrapper*; data: struct fb_image*
++notes:
++If called for consoles other than the current foreground one, the picture data
++will be ignored.
++
++If the current virtual console is running in a 8-bpp mode, the cmap substruct
++of fb_image has to be filled appropriately: start should be set to 16 (first
++16 colors are reserved for fbcon), len to a value <= 240 and red, green and
++blue should point to valid cmap data. The transp field is ingored. The fields
++dx, dy, bg_color, fg_color in fb_image are ignored as well.
++
++FBIOCONDECOR_SETCFG
++description: sets the fbcondecor config for a virtual console
++argument: struct fbcon_decor_iowrapper*; data: struct vc_decor*
++notes: The structure has to be filled with valid data.
++
++FBIOCONDECOR_GETCFG
++description: gets the fbcondecor config for a virtual console
++argument: struct fbcon_decor_iowrapper*; data: struct vc_decor*
++
++FBIOCONDECOR_SETSTATE
++description: sets the fbcondecor state for a virtual console
++argument: struct fbcon_decor_iowrapper*; data: unsigned int*
++          values: 0 = disabled, 1 = enabled.
++
++FBIOCONDECOR_GETSTATE
++description: gets the fbcondecor state for a virtual console
++argument: struct fbcon_decor_iowrapper*; data: unsigned int*
++          values: as in FBIOCONDECOR_SETSTATE
++
++Info on used structures:
++
++Definition of struct vc_decor can be found in linux/console_decor.h. It's
++heavily commented. Note that the 'theme' field should point to a string
++no longer than FBCON_DECOR_THEME_LEN. When FBIOCONDECOR_GETCFG call is
++performed, the theme field should point to a char buffer of length
++FBCON_DECOR_THEME_LEN.
++
++Definition of struct fbcon_decor_iowrapper can be found in linux/fb.h.
++The fields in this struct have the following meaning:
++
++vc:
++Virtual console number.
++
++origin:
++Specifies if the ioctl is performed as a response to a kernel request. The
++fbcondecor helper should set this field to FBCON_DECOR_IO_ORIG_KERNEL, userspace
++programs should set it to FBCON_DECOR_IO_ORIG_USER. This field is necessary to
++avoid console semaphore deadlocks.
++
++data:
++Pointer to a data structure appropriate for the performed ioctl. Type of
++the data struct is specified in the ioctls description.
++
++*****************************************************************************
++
++Credit
++------
++
++Original 'bootsplash' project & implementation by:
++  Volker Poplawski <volker@poplawski.de>, Stefan Reinauer <stepan@suse.de>,
++  Steffen Winterfeldt <snwint@suse.de>, Michael Schroeder <mls@suse.de>,
++  Ken Wimer <wimer@suse.de>.
++
++Fbcondecor, fbcondecor protocol design, current implementation & docs by:
++  Michal Januszewski <michalj+fbcondecor@gmail.com>
++
+diff --git a/drivers/Makefile b/drivers/Makefile
+index 53abb4a..1721aee 100644
+--- a/drivers/Makefile
++++ b/drivers/Makefile
+@@ -17,6 +17,10 @@ obj-y				+= pwm/
+ obj-$(CONFIG_PCI)		+= pci/
+ obj-$(CONFIG_PARISC)		+= parisc/
+ obj-$(CONFIG_RAPIDIO)		+= rapidio/
++# tty/ comes before char/ so that the VT console is the boot-time
++# default.
++obj-y				+= tty/
++obj-y				+= char/
+ obj-y				+= video/
+ obj-y				+= idle/
+ 
+@@ -45,11 +49,6 @@ obj-$(CONFIG_REGULATOR)		+= regulator/
+ # reset controllers early, since gpu drivers might rely on them to initialize
+ obj-$(CONFIG_RESET_CONTROLLER)	+= reset/
+ 
+-# tty/ comes before char/ so that the VT console is the boot-time
+-# default.
+-obj-y				+= tty/
+-obj-y				+= char/
+-
+ # iommu/ comes before gpu as gpu are using iommu controllers
+ obj-$(CONFIG_IOMMU_SUPPORT)	+= iommu/
+ 
+diff --git a/drivers/video/console/Kconfig b/drivers/video/console/Kconfig
+index 38da6e2..fe58152 100644
+--- a/drivers/video/console/Kconfig
++++ b/drivers/video/console/Kconfig
+@@ -130,6 +130,19 @@ config FRAMEBUFFER_CONSOLE_ROTATION
+          such that other users of the framebuffer will remain normally
+          oriented.
+ 
++config FB_CON_DECOR
++	bool "Support for the Framebuffer Console Decorations"
++	depends on FRAMEBUFFER_CONSOLE=y && !FB_TILEBLITTING
++	default n
++	---help---
++	  This option enables support for framebuffer console decorations which
++	  makes it possible to display images in the background of the system
++	  consoles.  Note that userspace utilities are necessary in order to take
++	  advantage of these features. Refer to Documentation/fb/fbcondecor.txt
++	  for more information.
++
++	  If unsure, say N.
++
+ config STI_CONSOLE
+         bool "STI text console"
+         depends on PARISC
+diff --git a/drivers/video/console/Makefile b/drivers/video/console/Makefile
+index 43bfa48..cc104b6 100644
+--- a/drivers/video/console/Makefile
++++ b/drivers/video/console/Makefile
+@@ -16,4 +16,5 @@ obj-$(CONFIG_FRAMEBUFFER_CONSOLE)     += fbcon_rotate.o fbcon_cw.o fbcon_ud.o \
+                                          fbcon_ccw.o
+ endif
+ 
++obj-$(CONFIG_FB_CON_DECOR)     	  += fbcondecor.o cfbcondecor.o
+ obj-$(CONFIG_FB_STI)              += sticore.o
+diff --git a/drivers/video/console/bitblit.c b/drivers/video/console/bitblit.c
+index dbfe4ee..14da307 100644
+--- a/drivers/video/console/bitblit.c
++++ b/drivers/video/console/bitblit.c
+@@ -18,6 +18,7 @@
+ #include <linux/console.h>
+ #include <asm/types.h>
+ #include "fbcon.h"
++#include "fbcondecor.h"
+ 
+ /*
+  * Accelerated handlers.
+@@ -55,6 +56,13 @@ static void bit_bmove(struct vc_data *vc, struct fb_info *info, int sy,
+ 	area.height = height * vc->vc_font.height;
+ 	area.width = width * vc->vc_font.width;
+ 
++	if (fbcon_decor_active(info, vc)) {
++		area.sx += vc->vc_decor.tx;
++		area.sy += vc->vc_decor.ty;
++		area.dx += vc->vc_decor.tx;
++		area.dy += vc->vc_decor.ty;
++	}
++
+ 	info->fbops->fb_copyarea(info, &area);
+ }
+ 
+@@ -379,11 +387,15 @@ static void bit_cursor(struct vc_data *vc, struct fb_info *info, int mode,
+ 	cursor.image.depth = 1;
+ 	cursor.rop = ROP_XOR;
+ 
+-	if (info->fbops->fb_cursor)
+-		err = info->fbops->fb_cursor(info, &cursor);
++	if (fbcon_decor_active(info, vc)) {
++		fbcon_decor_cursor(info, &cursor);
++	} else {
++		if (info->fbops->fb_cursor)
++			err = info->fbops->fb_cursor(info, &cursor);
+ 
+-	if (err)
+-		soft_cursor(info, &cursor);
++		if (err)
++			soft_cursor(info, &cursor);
++	}
+ 
+ 	ops->cursor_reset = 0;
+ }
+diff --git a/drivers/video/console/cfbcondecor.c b/drivers/video/console/cfbcondecor.c
+new file mode 100644
+index 0000000..c262540
+--- /dev/null
++++ b/drivers/video/console/cfbcondecor.c
+@@ -0,0 +1,473 @@
++/*
++ *  linux/drivers/video/cfbcon_decor.c -- Framebuffer decor render functions
++ *
++ *  Copyright (C) 2004 Michal Januszewski <michalj+fbcondecor@gmail.com>
++ *
++ *  Code based upon "Bootdecor" (C) 2001-2003
++ *       Volker Poplawski <volker@poplawski.de>,
++ *       Stefan Reinauer <stepan@suse.de>,
++ *       Steffen Winterfeldt <snwint@suse.de>,
++ *       Michael Schroeder <mls@suse.de>,
++ *       Ken Wimer <wimer@suse.de>.
++ *
++ *  This file is subject to the terms and conditions of the GNU General Public
++ *  License.  See the file COPYING in the main directory of this archive for
++ *  more details.
++ */
++#include <linux/module.h>
++#include <linux/types.h>
++#include <linux/fb.h>
++#include <linux/selection.h>
++#include <linux/slab.h>
++#include <linux/vt_kern.h>
++#include <asm/irq.h>
++
++#include "fbcon.h"
++#include "fbcondecor.h"
++
++#define parse_pixel(shift, bpp, type)						\
++	do {									\
++		if (d & (0x80 >> (shift)))					\
++			dd2[(shift)] = fgx;					\
++		else								\
++			dd2[(shift)] = transparent ? *(type *)decor_src : bgx;	\
++		decor_src += (bpp);						\
++	} while (0)								\
++
++extern int get_color(struct vc_data *vc, struct fb_info *info,
++		     u16 c, int is_fg);
++
++void fbcon_decor_fix_pseudo_pal(struct fb_info *info, struct vc_data *vc)
++{
++	int i, j, k;
++	int minlen = min(min(info->var.red.length, info->var.green.length),
++			     info->var.blue.length);
++	u32 col;
++
++	for (j = i = 0; i < 16; i++) {
++		k = color_table[i];
++
++		col = ((vc->vc_palette[j++]  >> (8-minlen))
++			<< info->var.red.offset);
++		col |= ((vc->vc_palette[j++] >> (8-minlen))
++			<< info->var.green.offset);
++		col |= ((vc->vc_palette[j++] >> (8-minlen))
++			<< info->var.blue.offset);
++			((u32 *)info->pseudo_palette)[k] = col;
++	}
++}
++
++void fbcon_decor_renderc(struct fb_info *info, int ypos, int xpos, int height,
++		      int width, u8 *src, u32 fgx, u32 bgx, u8 transparent)
++{
++	unsigned int x, y;
++	u32 dd;
++	int bytespp = ((info->var.bits_per_pixel + 7) >> 3);
++	unsigned int d = ypos * info->fix.line_length + xpos * bytespp;
++	unsigned int ds = (ypos * info->var.xres + xpos) * bytespp;
++	u16 dd2[4];
++
++	u8 *decor_src = (u8 *)(info->bgdecor.data + ds);
++	u8 *dst = (u8 *)(info->screen_base + d);
++
++	if ((ypos + height) > info->var.yres || (xpos + width) > info->var.xres)
++		return;
++
++	for (y = 0; y < height; y++) {
++		switch (info->var.bits_per_pixel) {
++
++		case 32:
++			for (x = 0; x < width; x++) {
++
++				if ((x & 7) == 0)
++					d = *src++;
++				if (d & 0x80)
++					dd = fgx;
++				else
++					dd = transparent ?
++					     *(u32 *)decor_src : bgx;
++
++				d <<= 1;
++				decor_src += 4;
++				fb_writel(dd, dst);
++				dst += 4;
++			}
++			break;
++		case 24:
++			for (x = 0; x < width; x++) {
++
++				if ((x & 7) == 0)
++					d = *src++;
++				if (d & 0x80)
++					dd = fgx;
++				else
++					dd = transparent ?
++					     (*(u32 *)decor_src & 0xffffff) : bgx;
++
++				d <<= 1;
++				decor_src += 3;
++#ifdef __LITTLE_ENDIAN
++				fb_writew(dd & 0xffff, dst);
++				dst += 2;
++				fb_writeb((dd >> 16), dst);
++#else
++				fb_writew(dd >> 8, dst);
++				dst += 2;
++				fb_writeb(dd & 0xff, dst);
++#endif
++				dst++;
++			}
++			break;
++		case 16:
++			for (x = 0; x < width; x += 2) {
++				if ((x & 7) == 0)
++					d = *src++;
++
++				parse_pixel(0, 2, u16);
++				parse_pixel(1, 2, u16);
++#ifdef __LITTLE_ENDIAN
++				dd = dd2[0] | (dd2[1] << 16);
++#else
++				dd = dd2[1] | (dd2[0] << 16);
++#endif
++				d <<= 2;
++				fb_writel(dd, dst);
++				dst += 4;
++			}
++			break;
++
++		case 8:
++			for (x = 0; x < width; x += 4) {
++				if ((x & 7) == 0)
++					d = *src++;
++
++				parse_pixel(0, 1, u8);
++				parse_pixel(1, 1, u8);
++				parse_pixel(2, 1, u8);
++				parse_pixel(3, 1, u8);
++
++#ifdef __LITTLE_ENDIAN
++				dd = dd2[0] | (dd2[1] << 8) | (dd2[2] << 16) | (dd2[3] << 24);
++#else
++				dd = dd2[3] | (dd2[2] << 8) | (dd2[1] << 16) | (dd2[0] << 24);
++#endif
++				d <<= 4;
++				fb_writel(dd, dst);
++				dst += 4;
++			}
++		}
++
++		dst += info->fix.line_length - width * bytespp;
++		decor_src += (info->var.xres - width) * bytespp;
++	}
++}
++
++#define cc2cx(a)						\
++	((info->fix.visual == FB_VISUAL_TRUECOLOR ||		\
++		info->fix.visual == FB_VISUAL_DIRECTCOLOR) ?	\
++			((u32 *)info->pseudo_palette)[a] : a)
++
++void fbcon_decor_putcs(struct vc_data *vc, struct fb_info *info,
++		   const unsigned short *s, int count, int yy, int xx)
++{
++	unsigned short charmask = vc->vc_hi_font_mask ? 0x1ff : 0xff;
++	struct fbcon_ops *ops = info->fbcon_par;
++	int fg_color, bg_color, transparent;
++	u8 *src;
++	u32 bgx, fgx;
++	u16 c = scr_readw(s);
++
++	fg_color = get_color(vc, info, c, 1);
++	bg_color = get_color(vc, info, c, 0);
++
++	/* Don't paint the background image if console is blanked */
++	transparent = ops->blank_state ? 0 :
++		(vc->vc_decor.bg_color == bg_color);
++
++	xx = xx * vc->vc_font.width + vc->vc_decor.tx;
++	yy = yy * vc->vc_font.height + vc->vc_decor.ty;
++
++	fgx = cc2cx(fg_color);
++	bgx = cc2cx(bg_color);
++
++	while (count--) {
++		c = scr_readw(s++);
++		src = vc->vc_font.data + (c & charmask) * vc->vc_font.height *
++		      ((vc->vc_font.width + 7) >> 3);
++
++		fbcon_decor_renderc(info, yy, xx, vc->vc_font.height,
++			       vc->vc_font.width, src, fgx, bgx, transparent);
++		xx += vc->vc_font.width;
++	}
++}
++
++void fbcon_decor_cursor(struct fb_info *info, struct fb_cursor *cursor)
++{
++	int i;
++	unsigned int dsize, s_pitch;
++	struct fbcon_ops *ops = info->fbcon_par;
++	struct vc_data *vc;
++	u8 *src;
++
++	/* we really don't need any cursors while the console is blanked */
++	if (info->state != FBINFO_STATE_RUNNING || ops->blank_state)
++		return;
++
++	vc = vc_cons[ops->currcon].d;
++
++	src = kmalloc(64 + sizeof(struct fb_image), GFP_ATOMIC);
++	if (!src)
++		return;
++
++	s_pitch = (cursor->image.width + 7) >> 3;
++	dsize = s_pitch * cursor->image.height;
++	if (cursor->enable) {
++		switch (cursor->rop) {
++		case ROP_XOR:
++			for (i = 0; i < dsize; i++)
++				src[i] = cursor->image.data[i] ^ cursor->mask[i];
++			break;
++		case ROP_COPY:
++		default:
++			for (i = 0; i < dsize; i++)
++				src[i] = cursor->image.data[i] & cursor->mask[i];
++			break;
++		}
++	} else
++		memcpy(src, cursor->image.data, dsize);
++
++	fbcon_decor_renderc(info,
++			cursor->image.dy + vc->vc_decor.ty,
++			cursor->image.dx + vc->vc_decor.tx,
++			cursor->image.height,
++			cursor->image.width,
++			(u8 *)src,
++			cc2cx(cursor->image.fg_color),
++			cc2cx(cursor->image.bg_color),
++			cursor->image.bg_color == vc->vc_decor.bg_color);
++
++	kfree(src);
++}
++
++static void decorset(u8 *dst, int height, int width, int dstbytes,
++				u32 bgx, int bpp)
++{
++	int i;
++
++	if (bpp == 8)
++		bgx |= bgx << 8;
++	if (bpp == 16 || bpp == 8)
++		bgx |= bgx << 16;
++
++	while (height-- > 0) {
++		u8 *p = dst;
++
++		switch (bpp) {
++
++		case 32:
++			for (i = 0; i < width; i++) {
++				fb_writel(bgx, p); p += 4;
++			}
++			break;
++		case 24:
++			for (i = 0; i < width; i++) {
++#ifdef __LITTLE_ENDIAN
++				fb_writew((bgx & 0xffff), (u16 *)p); p += 2;
++				fb_writeb((bgx >> 16), p++);
++#else
++				fb_writew((bgx >> 8), (u16 *)p); p += 2;
++				fb_writeb((bgx & 0xff), p++);
++#endif
++			}
++			break;
++		case 16:
++			for (i = 0; i < width/4; i++) {
++				fb_writel(bgx, p); p += 4;
++				fb_writel(bgx, p); p += 4;
++			}
++			if (width & 2) {
++				fb_writel(bgx, p); p += 4;
++			}
++			if (width & 1)
++				fb_writew(bgx, (u16 *)p);
++			break;
++		case 8:
++			for (i = 0; i < width/4; i++) {
++				fb_writel(bgx, p); p += 4;
++			}
++
++			if (width & 2) {
++				fb_writew(bgx, p); p += 2;
++			}
++			if (width & 1)
++				fb_writeb(bgx, (u8 *)p);
++			break;
++
++		}
++		dst += dstbytes;
++	}
++}
++
++void fbcon_decor_copy(u8 *dst, u8 *src, int height, int width, int linebytes,
++		   int srclinebytes, int bpp)
++{
++	int i;
++
++	while (height-- > 0) {
++		u32 *p = (u32 *)dst;
++		u32 *q = (u32 *)src;
++
++		switch (bpp) {
++
++		case 32:
++			for (i = 0; i < width; i++)
++				fb_writel(*q++, p++);
++			break;
++		case 24:
++			for (i = 0; i < (width * 3 / 4); i++)
++				fb_writel(*q++, p++);
++			if ((width * 3) % 4) {
++				if (width & 2) {
++					fb_writeb(*(u8 *)q, (u8 *)p);
++				} else if (width & 1) {
++					fb_writew(*(u16 *)q, (u16 *)p);
++					fb_writeb(*(u8 *)((u16 *)q + 1),
++							(u8 *)((u16 *)p + 2));
++				}
++			}
++			break;
++		case 16:
++			for (i = 0; i < width/4; i++) {
++				fb_writel(*q++, p++);
++				fb_writel(*q++, p++);
++			}
++			if (width & 2)
++				fb_writel(*q++, p++);
++			if (width & 1)
++				fb_writew(*(u16 *)q, (u16 *)p);
++			break;
++		case 8:
++			for (i = 0; i < width/4; i++)
++				fb_writel(*q++, p++);
++
++			if (width & 2) {
++				fb_writew(*(u16 *)q, (u16 *)p);
++				q = (u32 *) ((u16 *)q + 1);
++				p = (u32 *) ((u16 *)p + 1);
++			}
++			if (width & 1)
++				fb_writeb(*(u8 *)q, (u8 *)p);
++			break;
++		}
++
++		dst += linebytes;
++		src += srclinebytes;
++	}
++}
++
++static void decorfill(struct fb_info *info, int sy, int sx, int height,
++		       int width)
++{
++	int bytespp = ((info->var.bits_per_pixel + 7) >> 3);
++	int d  = sy * info->fix.line_length + sx * bytespp;
++	int ds = (sy * info->var.xres + sx) * bytespp;
++
++	fbcon_decor_copy((u8 *)(info->screen_base + d), (u8 *)(info->bgdecor.data + ds),
++		    height, width, info->fix.line_length, info->var.xres * bytespp,
++		    info->var.bits_per_pixel);
++}
++
++void fbcon_decor_clear(struct vc_data *vc, struct fb_info *info, int sy, int sx,
++		    int height, int width)
++{
++	int bgshift = (vc->vc_hi_font_mask) ? 13 : 12;
++	struct fbcon_ops *ops = info->fbcon_par;
++	u8 *dst;
++	int transparent, bg_color = attr_bgcol_ec(bgshift, vc, info);
++
++	transparent = (vc->vc_decor.bg_color == bg_color);
++	sy = sy * vc->vc_font.height + vc->vc_decor.ty;
++	sx = sx * vc->vc_font.width + vc->vc_decor.tx;
++	height *= vc->vc_font.height;
++	width *= vc->vc_font.width;
++
++	/* Don't paint the background image if console is blanked */
++	if (transparent && !ops->blank_state) {
++		decorfill(info, sy, sx, height, width);
++	} else {
++		dst = (u8 *)(info->screen_base + sy * info->fix.line_length +
++			     sx * ((info->var.bits_per_pixel + 7) >> 3));
++		decorset(dst, height, width, info->fix.line_length, cc2cx(bg_color),
++			  info->var.bits_per_pixel);
++	}
++}
++
++void fbcon_decor_clear_margins(struct vc_data *vc, struct fb_info *info,
++			    int bottom_only)
++{
++	unsigned int tw = vc->vc_cols*vc->vc_font.width;
++	unsigned int th = vc->vc_rows*vc->vc_font.height;
++
++	if (!bottom_only) {
++		/* top margin */
++		decorfill(info, 0, 0, vc->vc_decor.ty, info->var.xres);
++		/* left margin */
++		decorfill(info, vc->vc_decor.ty, 0, th, vc->vc_decor.tx);
++		/* right margin */
++		decorfill(info, vc->vc_decor.ty, vc->vc_decor.tx + tw, th,
++			   info->var.xres - vc->vc_decor.tx - tw);
++	}
++	decorfill(info, vc->vc_decor.ty + th, 0,
++		   info->var.yres - vc->vc_decor.ty - th, info->var.xres);
++}
++
++void fbcon_decor_bmove_redraw(struct vc_data *vc, struct fb_info *info, int y,
++			   int sx, int dx, int width)
++{
++	u16 *d = (u16 *) (vc->vc_origin + vc->vc_size_row * y + dx * 2);
++	u16 *s = d + (dx - sx);
++	u16 *start = d;
++	u16 *ls = d;
++	u16 *le = d + width;
++	u16 c;
++	int x = dx;
++	u16 attr = 1;
++
++	do {
++		c = scr_readw(d);
++		if (attr != (c & 0xff00)) {
++			attr = c & 0xff00;
++			if (d > start) {
++				fbcon_decor_putcs(vc, info, start, d - start, y, x);
++				x += d - start;
++				start = d;
++			}
++		}
++		if (s >= ls && s < le && c == scr_readw(s)) {
++			if (d > start) {
++				fbcon_decor_putcs(vc, info, start, d - start, y, x);
++				x += d - start + 1;
++				start = d + 1;
++			} else {
++				x++;
++				start++;
++			}
++		}
++		s++;
++		d++;
++	} while (d < le);
++	if (d > start)
++		fbcon_decor_putcs(vc, info, start, d - start, y, x);
++}
++
++void fbcon_decor_blank(struct vc_data *vc, struct fb_info *info, int blank)
++{
++	if (blank) {
++		decorset((u8 *)info->screen_base, info->var.yres, info->var.xres,
++			  info->fix.line_length, 0, info->var.bits_per_pixel);
++	} else {
++		update_screen(vc);
++		fbcon_decor_clear_margins(vc, info, 0);
++	}
++}
++
+diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
+index b87f5cf..ce44538 100644
+--- a/drivers/video/console/fbcon.c
++++ b/drivers/video/console/fbcon.c
+@@ -79,6 +79,7 @@
+ #include <asm/irq.h>
+ 
+ #include "fbcon.h"
++#include "../console/fbcondecor.h"
+ 
+ #ifdef FBCONDEBUG
+ #  define DPRINTK(fmt, args...) printk(KERN_DEBUG "%s: " fmt, __func__ , ## args)
+@@ -94,7 +95,7 @@ enum {
+ 
+ static struct display fb_display[MAX_NR_CONSOLES];
+ 
+-static signed char con2fb_map[MAX_NR_CONSOLES];
++signed char con2fb_map[MAX_NR_CONSOLES];
+ static signed char con2fb_map_boot[MAX_NR_CONSOLES];
+ 
+ static int logo_lines;
+@@ -282,7 +283,7 @@ static inline int fbcon_is_inactive(struct vc_data *vc, struct fb_info *info)
+ 		!vt_force_oops_output(vc);
+ }
+ 
+-static int get_color(struct vc_data *vc, struct fb_info *info,
++int get_color(struct vc_data *vc, struct fb_info *info,
+ 	      u16 c, int is_fg)
+ {
+ 	int depth = fb_get_color_depth(&info->var, &info->fix);
+@@ -546,6 +547,9 @@ static int do_fbcon_takeover(int show_logo)
+ 		info_idx = -1;
+ 	} else {
+ 		fbcon_has_console_bind = 1;
++#ifdef CONFIG_FB_CON_DECOR
++		fbcon_decor_init();
++#endif
+ 	}
+ 
+ 	return err;
+@@ -1005,6 +1009,12 @@ static const char *fbcon_startup(void)
+ 	rows = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres);
+ 	cols /= vc->vc_font.width;
+ 	rows /= vc->vc_font.height;
++
++	if (fbcon_decor_active(info, vc)) {
++		cols = vc->vc_decor.twidth / vc->vc_font.width;
++		rows = vc->vc_decor.theight / vc->vc_font.height;
++	}
++
+ 	vc_resize(vc, cols, rows);
+ 
+ 	DPRINTK("mode:   %s\n", info->fix.id);
+@@ -1034,7 +1044,7 @@ static void fbcon_init(struct vc_data *vc, int init)
+ 	cap = info->flags;
+ 
+ 	if (vc != svc || logo_shown == FBCON_LOGO_DONTSHOW ||
+-	    (info->fix.type == FB_TYPE_TEXT))
++	    (info->fix.type == FB_TYPE_TEXT) || fbcon_decor_active(info, vc))
+ 		logo = 0;
+ 
+ 	if (var_to_display(p, &info->var, info))
+@@ -1259,6 +1269,11 @@ static void fbcon_clear(struct vc_data *vc, int sy, int sx, int height,
+ 		fbcon_clear_margins(vc, 0);
+ 	}
+ 
++	if (fbcon_decor_active(info, vc)) {
++		fbcon_decor_clear(vc, info, sy, sx, height, width);
++		return;
++	}
++
+ 	/* Split blits that cross physical y_wrap boundary */
+ 
+ 	y_break = p->vrows - p->yscroll;
+@@ -1278,10 +1293,15 @@ static void fbcon_putcs(struct vc_data *vc, const unsigned short *s,
+ 	struct display *p = &fb_display[vc->vc_num];
+ 	struct fbcon_ops *ops = info->fbcon_par;
+ 
+-	if (!fbcon_is_inactive(vc, info))
+-		ops->putcs(vc, info, s, count, real_y(p, ypos), xpos,
+-			   get_color(vc, info, scr_readw(s), 1),
+-			   get_color(vc, info, scr_readw(s), 0));
++	if (!fbcon_is_inactive(vc, info)) {
++
++		if (fbcon_decor_active(info, vc))
++			fbcon_decor_putcs(vc, info, s, count, ypos, xpos);
++		else
++			ops->putcs(vc, info, s, count, real_y(p, ypos), xpos,
++				   get_color(vc, info, scr_readw(s), 1),
++				   get_color(vc, info, scr_readw(s), 0));
++	}
+ }
+ 
+ static void fbcon_putc(struct vc_data *vc, int c, int ypos, int xpos)
+@@ -1297,8 +1317,12 @@ static void fbcon_clear_margins(struct vc_data *vc, int bottom_only)
+ 	struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
+ 	struct fbcon_ops *ops = info->fbcon_par;
+ 
+-	if (!fbcon_is_inactive(vc, info))
+-		ops->clear_margins(vc, info, bottom_only);
++	if (!fbcon_is_inactive(vc, info)) {
++		if (fbcon_decor_active(info, vc))
++			fbcon_decor_clear_margins(vc, info, bottom_only);
++		else
++			ops->clear_margins(vc, info, bottom_only);
++	}
+ }
+ 
+ static void fbcon_cursor(struct vc_data *vc, int mode)
+@@ -1819,7 +1843,7 @@ static int fbcon_scroll(struct vc_data *vc, int t, int b, int dir,
+ 			count = vc->vc_rows;
+ 		if (softback_top)
+ 			fbcon_softback_note(vc, t, count);
+-		if (logo_shown >= 0)
++		if (logo_shown >= 0 || fbcon_decor_active(info, vc))
+ 			goto redraw_up;
+ 		switch (p->scrollmode) {
+ 		case SCROLL_MOVE:
+@@ -1912,6 +1936,8 @@ static int fbcon_scroll(struct vc_data *vc, int t, int b, int dir,
+ 			count = vc->vc_rows;
+ 		if (logo_shown >= 0)
+ 			goto redraw_down;
++		if (fbcon_decor_active(info, vc))
++			goto redraw_down;
+ 		switch (p->scrollmode) {
+ 		case SCROLL_MOVE:
+ 			fbcon_redraw_blit(vc, info, p, b - 1, b - t - count,
+@@ -2060,6 +2086,13 @@ static void fbcon_bmove_rec(struct vc_data *vc, struct display *p, int sy, int s
+ 		}
+ 		return;
+ 	}
++
++	if (fbcon_decor_active(info, vc) && sy == dy && height == 1) {
++		/* must use slower redraw bmove to keep background pic intact */
++		fbcon_decor_bmove_redraw(vc, info, sy, sx, dx, width);
++		return;
++	}
++
+ 	ops->bmove(vc, info, real_y(p, sy), sx, real_y(p, dy), dx,
+ 		   height, width);
+ }
+@@ -2130,8 +2163,8 @@ static int fbcon_resize(struct vc_data *vc, unsigned int width,
+ 	var.yres = virt_h * virt_fh;
+ 	x_diff = info->var.xres - var.xres;
+ 	y_diff = info->var.yres - var.yres;
+-	if (x_diff < 0 || x_diff > virt_fw ||
+-	    y_diff < 0 || y_diff > virt_fh) {
++	if ((x_diff < 0 || x_diff > virt_fw ||
++		y_diff < 0 || y_diff > virt_fh) && !vc->vc_decor.state) {
+ 		const struct fb_videomode *mode;
+ 
+ 		DPRINTK("attempting resize %ix%i\n", var.xres, var.yres);
+@@ -2167,6 +2200,22 @@ static int fbcon_switch(struct vc_data *vc)
+ 
+ 	info = registered_fb[con2fb_map[vc->vc_num]];
+ 	ops = info->fbcon_par;
++	prev_console = ops->currcon;
++	if (prev_console != -1)
++		old_info = registered_fb[con2fb_map[prev_console]];
++
++#ifdef CONFIG_FB_CON_DECOR
++	if (!fbcon_decor_active_vc(vc) && info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
++		struct vc_data *vc_curr = vc_cons[prev_console].d;
++
++		if (vc_curr && fbcon_decor_active_vc(vc_curr)) {
++			// Clear the screen to avoid displaying funky colors
++			// during palette updates.
++			memset((u8 *)info->screen_base + info->fix.line_length * info->var.yoffset,
++			       0, info->var.yres * info->fix.line_length);
++		}
++	}
++#endif
+ 
+ 	if (softback_top) {
+ 		if (softback_lines)
+@@ -2185,9 +2234,6 @@ static int fbcon_switch(struct vc_data *vc)
+ 		logo_shown = FBCON_LOGO_CANSHOW;
+ 	}
+ 
+-	prev_console = ops->currcon;
+-	if (prev_console != -1)
+-		old_info = registered_fb[con2fb_map[prev_console]];
+ 	/*
+ 	 * FIXME: If we have multiple fbdev's loaded, we need to
+ 	 * update all info->currcon.  Perhaps, we can place this
+@@ -2231,6 +2277,18 @@ static int fbcon_switch(struct vc_data *vc)
+ 			fbcon_del_cursor_timer(old_info);
+ 	}
+ 
++	if (fbcon_decor_active_vc(vc)) {
++		struct vc_data *vc_curr = vc_cons[prev_console].d;
++
++		if (!vc_curr->vc_decor.theme ||
++			strcmp(vc->vc_decor.theme, vc_curr->vc_decor.theme) ||
++			(fbcon_decor_active_nores(info, vc_curr) &&
++			 !fbcon_decor_active(info, vc_curr))) {
++			fbcon_decor_disable(vc, 0);
++			fbcon_decor_call_helper("modechange", vc->vc_num);
++		}
++	}
++
+ 	if (fbcon_is_inactive(vc, info) ||
+ 	    ops->blank_state != FB_BLANK_UNBLANK)
+ 		fbcon_del_cursor_timer(info);
+@@ -2339,15 +2397,20 @@ static int fbcon_blank(struct vc_data *vc, int blank, int mode_switch)
+ 		}
+ 	}
+ 
+- 	if (!fbcon_is_inactive(vc, info)) {
++	if (!fbcon_is_inactive(vc, info)) {
+ 		if (ops->blank_state != blank) {
+ 			ops->blank_state = blank;
+ 			fbcon_cursor(vc, blank ? CM_ERASE : CM_DRAW);
+ 			ops->cursor_flash = (!blank);
+ 
+-			if (!(info->flags & FBINFO_MISC_USEREVENT))
+-				if (fb_blank(info, blank))
+-					fbcon_generic_blank(vc, info, blank);
++			if (!(info->flags & FBINFO_MISC_USEREVENT)) {
++				if (fb_blank(info, blank)) {
++					if (fbcon_decor_active(info, vc))
++						fbcon_decor_blank(vc, info, blank);
++					else
++						fbcon_generic_blank(vc, info, blank);
++				}
++			}
+ 		}
+ 
+ 		if (!blank)
+@@ -2522,13 +2585,22 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h,
+ 	}
+ 
+ 	if (resize) {
++		/* reset wrap/pan */
+ 		int cols, rows;
+ 
+ 		cols = FBCON_SWAP(ops->rotate, info->var.xres, info->var.yres);
+ 		rows = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres);
++
++		if (fbcon_decor_active(info, vc)) {
++			info->var.xoffset = info->var.yoffset = p->yscroll = 0;
++			cols = vc->vc_decor.twidth;
++			rows = vc->vc_decor.theight;
++		}
+ 		cols /= w;
+ 		rows /= h;
++
+ 		vc_resize(vc, cols, rows);
++
+ 		if (con_is_visible(vc) && softback_buf)
+ 			fbcon_update_softback(vc);
+ 	} else if (con_is_visible(vc)
+@@ -2657,7 +2729,11 @@ static void fbcon_set_palette(struct vc_data *vc, const unsigned char *table)
+ 	int i, j, k, depth;
+ 	u8 val;
+ 
+-	if (fbcon_is_inactive(vc, info))
++	if (fbcon_is_inactive(vc, info)
++#ifdef CONFIG_FB_CON_DECOR
++			|| vc->vc_num != fg_console
++#endif
++		)
+ 		return;
+ 
+ 	if (!con_is_visible(vc))
+@@ -2683,7 +2759,47 @@ static void fbcon_set_palette(struct vc_data *vc, const unsigned char *table)
+ 	} else
+ 		fb_copy_cmap(fb_default_cmap(1 << depth), &palette_cmap);
+ 
+-	fb_set_cmap(&palette_cmap, info);
++	if (fbcon_decor_active(info, vc_cons[fg_console].d) &&
++	    info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
++
++		u16 *red, *green, *blue;
++		int minlen = min(min(info->var.red.length, info->var.green.length),
++				     info->var.blue.length);
++
++		struct fb_cmap cmap = {
++			.start = 0,
++			.len = (1 << minlen),
++			.red = NULL,
++			.green = NULL,
++			.blue = NULL,
++			.transp = NULL
++		};
++
++		red = kmalloc(256 * sizeof(u16) * 3, GFP_KERNEL);
++
++		if (!red)
++			goto out;
++
++		green = red + 256;
++		blue = green + 256;
++		cmap.red = red;
++		cmap.green = green;
++		cmap.blue = blue;
++
++		for (i = 0; i < cmap.len; i++)
++			red[i] = green[i] = blue[i] = (0xffff * i)/(cmap.len-1);
++
++		fb_set_cmap(&cmap, info);
++		fbcon_decor_fix_pseudo_pal(info, vc_cons[fg_console].d);
++		kfree(red);
++
++		return;
++
++	} else if (fbcon_decor_active(info, vc_cons[fg_console].d) &&
++		   info->var.bits_per_pixel == 8 && info->bgdecor.cmap.red != NULL)
++		fb_set_cmap(&info->bgdecor.cmap, info);
++
++out:	fb_set_cmap(&palette_cmap, info);
+ }
+ 
+ static u16 *fbcon_screen_pos(struct vc_data *vc, int offset)
+@@ -2908,7 +3024,14 @@ static void fbcon_modechanged(struct fb_info *info)
+ 		rows = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres);
+ 		cols /= vc->vc_font.width;
+ 		rows /= vc->vc_font.height;
+-		vc_resize(vc, cols, rows);
++
++		if (!fbcon_decor_active_nores(info, vc)) {
++			vc_resize(vc, cols, rows);
++		} else {
++			fbcon_decor_disable(vc, 0);
++			fbcon_decor_call_helper("modechange", vc->vc_num);
++		}
++
+ 		updatescrollmode(p, info, vc);
+ 		scrollback_max = 0;
+ 		scrollback_current = 0;
+@@ -2953,7 +3076,8 @@ static void fbcon_set_all_vcs(struct fb_info *info)
+ 		rows = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres);
+ 		cols /= vc->vc_font.width;
+ 		rows /= vc->vc_font.height;
+-		vc_resize(vc, cols, rows);
++		if (!fbcon_decor_active_nores(info, vc))
++			vc_resize(vc, cols, rows);
+ 	}
+ 
+ 	if (fg != -1)
+@@ -3594,6 +3718,7 @@ static void fbcon_exit(void)
+ 		}
+ 	}
+ 
++	fbcon_decor_exit();
+ 	fbcon_has_exited = 1;
+ }
+ 
+diff --git a/drivers/video/console/fbcondecor.c b/drivers/video/console/fbcondecor.c
+new file mode 100644
+index 0000000..65cc0d3
+--- /dev/null
++++ b/drivers/video/console/fbcondecor.c
+@@ -0,0 +1,549 @@
++/*
++ *  linux/drivers/video/console/fbcondecor.c -- Framebuffer console decorations
++ *
++ *  Copyright (C) 2004-2009 Michal Januszewski <michalj+fbcondecor@gmail.com>
++ *
++ *  Code based upon "Bootsplash" (C) 2001-2003
++ *       Volker Poplawski <volker@poplawski.de>,
++ *       Stefan Reinauer <stepan@suse.de>,
++ *       Steffen Winterfeldt <snwint@suse.de>,
++ *       Michael Schroeder <mls@suse.de>,
++ *       Ken Wimer <wimer@suse.de>.
++ *
++ *  Compat ioctl support by Thorsten Klein <TK@Thorsten-Klein.de>.
++ *
++ *  This file is subject to the terms and conditions of the GNU General Public
++ *  License.  See the file COPYING in the main directory of this archive for
++ *  more details.
++ *
++ */
++#include <linux/module.h>
++#include <linux/kernel.h>
++#include <linux/string.h>
++#include <linux/types.h>
++#include <linux/fb.h>
++#include <linux/vt_kern.h>
++#include <linux/vmalloc.h>
++#include <linux/unistd.h>
++#include <linux/syscalls.h>
++#include <linux/init.h>
++#include <linux/proc_fs.h>
++#include <linux/workqueue.h>
++#include <linux/kmod.h>
++#include <linux/miscdevice.h>
++#include <linux/device.h>
++#include <linux/fs.h>
++#include <linux/compat.h>
++#include <linux/console.h>
++
++#include <linux/uaccess.h>
++#include <asm/irq.h>
++
++#include "fbcon.h"
++#include "fbcondecor.h"
++
++extern signed char con2fb_map[];
++static int fbcon_decor_enable(struct vc_data *vc);
++
++static int initialized;
++
++char fbcon_decor_path[KMOD_PATH_LEN] = "/sbin/fbcondecor_helper";
++EXPORT_SYMBOL(fbcon_decor_path);
++
++int fbcon_decor_call_helper(char *cmd, unsigned short vc)
++{
++	char *envp[] = {
++		"HOME=/",
++		"PATH=/sbin:/bin",
++		NULL
++	};
++
++	char tfb[5];
++	char tcons[5];
++	unsigned char fb = (int) con2fb_map[vc];
++
++	char *argv[] = {
++		fbcon_decor_path,
++		"2",
++		cmd,
++		tcons,
++		tfb,
++		vc_cons[vc].d->vc_decor.theme,
++		NULL
++	};
++
++	snprintf(tfb, 5, "%d", fb);
++	snprintf(tcons, 5, "%d", vc);
++
++	return call_usermodehelper(fbcon_decor_path, argv, envp, UMH_WAIT_EXEC);
++}
++
++/* Disables fbcondecor on a virtual console; called with console sem held. */
++int fbcon_decor_disable(struct vc_data *vc, unsigned char redraw)
++{
++	struct fb_info *info;
++
++	if (!vc->vc_decor.state)
++		return -EINVAL;
++
++	info = registered_fb[(int) con2fb_map[vc->vc_num]];
++
++	if (info == NULL)
++		return -EINVAL;
++
++	vc->vc_decor.state = 0;
++	vc_resize(vc, info->var.xres / vc->vc_font.width,
++		  info->var.yres / vc->vc_font.height);
++
++	if (fg_console == vc->vc_num && redraw) {
++		redraw_screen(vc, 0);
++		update_region(vc, vc->vc_origin +
++			      vc->vc_size_row * vc->vc_top,
++			      vc->vc_size_row * (vc->vc_bottom - vc->vc_top) / 2);
++	}
++
++	printk(KERN_INFO "fbcondecor: switched decor state to 'off' on console %d\n",
++			 vc->vc_num);
++
++	return 0;
++}
++
++/* Enables fbcondecor on a virtual console; called with console sem held. */
++static int fbcon_decor_enable(struct vc_data *vc)
++{
++	struct fb_info *info;
++
++	info = registered_fb[(int) con2fb_map[vc->vc_num]];
++
++	if (vc->vc_decor.twidth == 0 || vc->vc_decor.theight == 0 ||
++	    info == NULL || vc->vc_decor.state || (!info->bgdecor.data &&
++	    vc->vc_num == fg_console))
++		return -EINVAL;
++
++	vc->vc_decor.state = 1;
++	vc_resize(vc, vc->vc_decor.twidth / vc->vc_font.width,
++		  vc->vc_decor.theight / vc->vc_font.height);
++
++	if (fg_console == vc->vc_num) {
++		redraw_screen(vc, 0);
++		update_region(vc, vc->vc_origin +
++			      vc->vc_size_row * vc->vc_top,
++			      vc->vc_size_row * (vc->vc_bottom - vc->vc_top) / 2);
++		fbcon_decor_clear_margins(vc, info, 0);
++	}
++
++	printk(KERN_INFO "fbcondecor: switched decor state to 'on' on console %d\n",
++			 vc->vc_num);
++
++	return 0;
++}
++
++static inline int fbcon_decor_ioctl_dosetstate(struct vc_data *vc, unsigned int state, unsigned char origin)
++{
++	int ret;
++
++	console_lock();
++	if (!state)
++		ret = fbcon_decor_disable(vc, 1);
++	else
++		ret = fbcon_decor_enable(vc);
++	console_unlock();
++
++	return ret;
++}
++
++static inline void fbcon_decor_ioctl_dogetstate(struct vc_data *vc, unsigned int *state)
++{
++	*state = vc->vc_decor.state;
++}
++
++static int fbcon_decor_ioctl_dosetcfg(struct vc_data *vc, struct vc_decor *cfg, unsigned char origin)
++{
++	struct fb_info *info;
++	int len;
++	char *tmp;
++
++	info = registered_fb[(int) con2fb_map[vc->vc_num]];
++
++	if (info == NULL || !cfg->twidth || !cfg->theight ||
++	    cfg->tx + cfg->twidth  > info->var.xres ||
++	    cfg->ty + cfg->theight > info->var.yres)
++		return -EINVAL;
++
++	len = strlen_user(cfg->theme);
++	if (!len || len > FBCON_DECOR_THEME_LEN)
++		return -EINVAL;
++	tmp = kmalloc(len, GFP_KERNEL);
++	if (!tmp)
++		return -ENOMEM;
++	if (copy_from_user(tmp, (void __user *)cfg->theme, len))
++		return -EFAULT;
++	cfg->theme = tmp;
++	cfg->state = 0;
++
++	console_lock();
++	if (vc->vc_decor.state)
++		fbcon_decor_disable(vc, 1);
++	kfree(vc->vc_decor.theme);
++	vc->vc_decor = *cfg;
++	console_unlock();
++
++	printk(KERN_INFO "fbcondecor: console %d using theme '%s'\n",
++			 vc->vc_num, vc->vc_decor.theme);
++	return 0;
++}
++
++static int fbcon_decor_ioctl_dogetcfg(struct vc_data *vc,
++					struct vc_decor *decor)
++{
++	char __user *tmp;
++
++	tmp = decor->theme;
++	*decor = vc->vc_decor;
++	decor->theme = tmp;
++
++	if (vc->vc_decor.theme) {
++		if (copy_to_user(tmp, vc->vc_decor.theme,
++					strlen(vc->vc_decor.theme) + 1))
++			return -EFAULT;
++	} else
++		if (put_user(0, tmp))
++			return -EFAULT;
++
++	return 0;
++}
++
++static int fbcon_decor_ioctl_dosetpic(struct vc_data *vc, struct fb_image *img,
++						unsigned char origin)
++{
++	struct fb_info *info;
++	int len;
++	u8 *tmp;
++
++	if (vc->vc_num != fg_console)
++		return -EINVAL;
++
++	info = registered_fb[(int) con2fb_map[vc->vc_num]];
++
++	if (info == NULL)
++		return -EINVAL;
++
++	if (img->width != info->var.xres || img->height != info->var.yres) {
++		printk(KERN_ERR "fbcondecor: picture dimensions mismatch\n");
++		printk(KERN_ERR "%dx%d vs %dx%d\n", img->width, img->height,
++				info->var.xres, info->var.yres);
++		return -EINVAL;
++	}
++
++	if (img->depth != info->var.bits_per_pixel) {
++		printk(KERN_ERR "fbcondecor: picture depth mismatch\n");
++		return -EINVAL;
++	}
++
++	if (img->depth == 8) {
++		if (!img->cmap.len || !img->cmap.red || !img->cmap.green ||
++		    !img->cmap.blue)
++			return -EINVAL;
++
++		tmp = vmalloc(img->cmap.len * 3 * 2);
++		if (!tmp)
++			return -ENOMEM;
++
++		if (copy_from_user(tmp,
++				(void __user *)img->cmap.red,
++						(img->cmap.len << 1)) ||
++			copy_from_user(tmp + (img->cmap.len << 1),
++				(void __user *)img->cmap.green,
++						(img->cmap.len << 1)) ||
++			copy_from_user(tmp + (img->cmap.len << 2),
++				(void __user *)img->cmap.blue,
++						(img->cmap.len << 1))) {
++			vfree(tmp);
++			return -EFAULT;
++		}
++
++		img->cmap.transp = NULL;
++		img->cmap.red = (u16 *)tmp;
++		img->cmap.green = img->cmap.red + img->cmap.len;
++		img->cmap.blue = img->cmap.green + img->cmap.len;
++	} else {
++		img->cmap.red = NULL;
++	}
++
++	len = ((img->depth + 7) >> 3) * img->width * img->height;
++
++	/*
++	 * Allocate an additional byte so that we never go outside of the
++	 * buffer boundaries in the rendering functions in a 24 bpp mode.
++	 */
++	tmp = vmalloc(len + 1);
++
++	if (!tmp)
++		goto out;
++
++	if (copy_from_user(tmp, (void __user *)img->data, len))
++		goto out;
++
++	img->data = tmp;
++
++	console_lock();
++
++	if (info->bgdecor.data)
++		vfree((u8 *)info->bgdecor.data);
++	if (info->bgdecor.cmap.red)
++		vfree(info->bgdecor.cmap.red);
++
++	info->bgdecor = *img;
++
++	if (fbcon_decor_active_vc(vc) && fg_console == vc->vc_num) {
++		redraw_screen(vc, 0);
++		update_region(vc, vc->vc_origin +
++			      vc->vc_size_row * vc->vc_top,
++			      vc->vc_size_row * (vc->vc_bottom - vc->vc_top) / 2);
++		fbcon_decor_clear_margins(vc, info, 0);
++	}
++
++	console_unlock();
++
++	return 0;
++
++out:
++	if (img->cmap.red)
++		vfree(img->cmap.red);
++
++	if (tmp)
++		vfree(tmp);
++	return -ENOMEM;
++}
++
++static long fbcon_decor_ioctl(struct file *filp, u_int cmd, u_long arg)
++{
++	struct fbcon_decor_iowrapper __user *wrapper = (void __user *) arg;
++	struct vc_data *vc = NULL;
++	unsigned short vc_num = 0;
++	unsigned char origin = 0;
++	void __user *data = NULL;
++
++	if (!access_ok(VERIFY_READ, wrapper,
++			sizeof(struct fbcon_decor_iowrapper)))
++		return -EFAULT;
++
++	__get_user(vc_num, &wrapper->vc);
++	__get_user(origin, &wrapper->origin);
++	__get_user(data, &wrapper->data);
++
++	if (!vc_cons_allocated(vc_num))
++		return -EINVAL;
++
++	vc = vc_cons[vc_num].d;
++
++	switch (cmd) {
++	case FBIOCONDECOR_SETPIC:
++	{
++		struct fb_image img;
++
++		if (copy_from_user(&img, (struct fb_image __user *)data, sizeof(struct fb_image)))
++			return -EFAULT;
++
++		return fbcon_decor_ioctl_dosetpic(vc, &img, origin);
++	}
++	case FBIOCONDECOR_SETCFG:
++	{
++		struct vc_decor cfg;
++
++		if (copy_from_user(&cfg, (struct vc_decor __user *)data, sizeof(struct vc_decor)))
++			return -EFAULT;
++
++		return fbcon_decor_ioctl_dosetcfg(vc, &cfg, origin);
++	}
++	case FBIOCONDECOR_GETCFG:
++	{
++		int rval;
++		struct vc_decor cfg;
++
++		if (copy_from_user(&cfg, (struct vc_decor __user *)data, sizeof(struct vc_decor)))
++			return -EFAULT;
++
++		rval = fbcon_decor_ioctl_dogetcfg(vc, &cfg);
++
++		if (copy_to_user(data, &cfg, sizeof(struct vc_decor)))
++			return -EFAULT;
++		return rval;
++	}
++	case FBIOCONDECOR_SETSTATE:
++	{
++		unsigned int state = 0;
++
++		if (get_user(state, (unsigned int __user *)data))
++			return -EFAULT;
++		return fbcon_decor_ioctl_dosetstate(vc, state, origin);
++	}
++	case FBIOCONDECOR_GETSTATE:
++	{
++		unsigned int state = 0;
++
++		fbcon_decor_ioctl_dogetstate(vc, &state);
++		return put_user(state, (unsigned int __user *)data);
++	}
++
++	default:
++		return -ENOIOCTLCMD;
++	}
++}
++
++#ifdef CONFIG_COMPAT
++
++static long fbcon_decor_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
++{
++	struct fbcon_decor_iowrapper32 __user *wrapper = (void __user *)arg;
++	struct vc_data *vc = NULL;
++	unsigned short vc_num = 0;
++	unsigned char origin = 0;
++	compat_uptr_t data_compat = 0;
++	void __user *data = NULL;
++
++	if (!access_ok(VERIFY_READ, wrapper,
++			sizeof(struct fbcon_decor_iowrapper32)))
++		return -EFAULT;
++
++	__get_user(vc_num, &wrapper->vc);
++	__get_user(origin, &wrapper->origin);
++	__get_user(data_compat, &wrapper->data);
++	data = compat_ptr(data_compat);
++
++	if (!vc_cons_allocated(vc_num))
++		return -EINVAL;
++
++	vc = vc_cons[vc_num].d;
++
++	switch (cmd) {
++	case FBIOCONDECOR_SETPIC32:
++	{
++		struct fb_image32 img_compat;
++		struct fb_image img;
++
++		if (copy_from_user(&img_compat, (struct fb_image32 __user *)data, sizeof(struct fb_image32)))
++			return -EFAULT;
++
++		fb_image_from_compat(img, img_compat);
++
++		return fbcon_decor_ioctl_dosetpic(vc, &img, origin);
++	}
++
++	case FBIOCONDECOR_SETCFG32:
++	{
++		struct vc_decor32 cfg_compat;
++		struct vc_decor cfg;
++
++		if (copy_from_user(&cfg_compat, (struct vc_decor32 __user *)data, sizeof(struct vc_decor32)))
++			return -EFAULT;
++
++		vc_decor_from_compat(cfg, cfg_compat);
++
++		return fbcon_decor_ioctl_dosetcfg(vc, &cfg, origin);
++	}
++
++	case FBIOCONDECOR_GETCFG32:
++	{
++		int rval;
++		struct vc_decor32 cfg_compat;
++		struct vc_decor cfg;
++
++		if (copy_from_user(&cfg_compat, (struct vc_decor32 __user *)data, sizeof(struct vc_decor32)))
++			return -EFAULT;
++		cfg.theme = compat_ptr(cfg_compat.theme);
++
++		rval = fbcon_decor_ioctl_dogetcfg(vc, &cfg);
++
++		vc_decor_to_compat(cfg_compat, cfg);
++
++		if (copy_to_user((struct vc_decor32 __user *)data, &cfg_compat, sizeof(struct vc_decor32)))
++			return -EFAULT;
++		return rval;
++	}
++
++	case FBIOCONDECOR_SETSTATE32:
++	{
++		compat_uint_t state_compat = 0;
++		unsigned int state = 0;
++
++		if (get_user(state_compat, (compat_uint_t __user *)data))
++			return -EFAULT;
++
++		state = (unsigned int)state_compat;
++
++		return fbcon_decor_ioctl_dosetstate(vc, state, origin);
++	}
++
++	case FBIOCONDECOR_GETSTATE32:
++	{
++		compat_uint_t state_compat = 0;
++		unsigned int state = 0;
++
++		fbcon_decor_ioctl_dogetstate(vc, &state);
++		state_compat = (compat_uint_t)state;
++
++		return put_user(state_compat, (compat_uint_t __user *)data);
++	}
++
++	default:
++		return -ENOIOCTLCMD;
++	}
++}
++#else
++  #define fbcon_decor_compat_ioctl NULL
++#endif
++
++static struct file_operations fbcon_decor_ops = {
++	.owner = THIS_MODULE,
++	.unlocked_ioctl = fbcon_decor_ioctl,
++	.compat_ioctl = fbcon_decor_compat_ioctl
++};
++
++static struct miscdevice fbcon_decor_dev = {
++	.minor = MISC_DYNAMIC_MINOR,
++	.name = "fbcondecor",
++	.fops = &fbcon_decor_ops
++};
++
++void fbcon_decor_reset(void)
++{
++	int i;
++
++	for (i = 0; i < num_registered_fb; i++) {
++		registered_fb[i]->bgdecor.data = NULL;
++		registered_fb[i]->bgdecor.cmap.red = NULL;
++	}
++
++	for (i = 0; i < MAX_NR_CONSOLES && vc_cons[i].d; i++) {
++		vc_cons[i].d->vc_decor.state = vc_cons[i].d->vc_decor.twidth =
++						vc_cons[i].d->vc_decor.theight = 0;
++		vc_cons[i].d->vc_decor.theme = NULL;
++	}
++}
++
++int fbcon_decor_init(void)
++{
++	int i;
++
++	fbcon_decor_reset();
++
++	if (initialized)
++		return 0;
++
++	i = misc_register(&fbcon_decor_dev);
++	if (i) {
++		printk(KERN_ERR "fbcondecor: failed to register device\n");
++		return i;
++	}
++
++	fbcon_decor_call_helper("init", 0);
++	initialized = 1;
++	return 0;
++}
++
++int fbcon_decor_exit(void)
++{
++	fbcon_decor_reset();
++	return 0;
++}
+diff --git a/drivers/video/console/fbcondecor.h b/drivers/video/console/fbcondecor.h
+new file mode 100644
+index 0000000..c49386c
+--- /dev/null
++++ b/drivers/video/console/fbcondecor.h
+@@ -0,0 +1,77 @@
++/*
++ *  linux/drivers/video/console/fbcondecor.h -- Framebuffer Console Decoration headers
++ *
++ *  Copyright (C) 2004 Michal Januszewski <michalj+fbcondecor@gmail.com>
++ *
++ */
++
++#ifndef __FBCON_DECOR_H
++#define __FBCON_DECOR_H
++
++#ifndef _LINUX_FB_H
++#include <linux/fb.h>
++#endif
++
++/* This is needed for vc_cons in fbcmap.c */
++#include <linux/vt_kern.h>
++
++struct fb_cursor;
++struct fb_info;
++struct vc_data;
++
++#ifdef CONFIG_FB_CON_DECOR
++/* fbcondecor.c */
++int fbcon_decor_init(void);
++int fbcon_decor_exit(void);
++int fbcon_decor_call_helper(char *cmd, unsigned short cons);
++int fbcon_decor_disable(struct vc_data *vc, unsigned char redraw);
++
++/* cfbcondecor.c */
++void fbcon_decor_putcs(struct vc_data *vc, struct fb_info *info, const unsigned short *s, int count, int yy, int xx);
++void fbcon_decor_cursor(struct fb_info *info, struct fb_cursor *cursor);
++void fbcon_decor_clear(struct vc_data *vc, struct fb_info *info, int sy, int sx, int height, int width);
++void fbcon_decor_clear_margins(struct vc_data *vc, struct fb_info *info, int bottom_only);
++void fbcon_decor_blank(struct vc_data *vc, struct fb_info *info, int blank);
++void fbcon_decor_bmove_redraw(struct vc_data *vc, struct fb_info *info, int y, int sx, int dx, int width);
++void fbcon_decor_copy(u8 *dst, u8 *src, int height, int width, int linebytes, int srclinesbytes, int bpp);
++void fbcon_decor_fix_pseudo_pal(struct fb_info *info, struct vc_data *vc);
++
++/* vt.c */
++void acquire_console_sem(void);
++void release_console_sem(void);
++void do_unblank_screen(int entering_gfx);
++
++/* struct vc_data *y */
++#define fbcon_decor_active_vc(y) (y->vc_decor.state && y->vc_decor.theme)
++
++/* struct fb_info *x, struct vc_data *y */
++#define fbcon_decor_active_nores(x, y) (x->bgdecor.data && fbcon_decor_active_vc(y))
++
++/* struct fb_info *x, struct vc_data *y */
++#define fbcon_decor_active(x, y) (fbcon_decor_active_nores(x, y) &&	\
++				x->bgdecor.width == x->var.xres &&	\
++				x->bgdecor.height == x->var.yres &&	\
++				x->bgdecor.depth == x->var.bits_per_pixel)
++
++#else /* CONFIG_FB_CON_DECOR */
++
++static inline void fbcon_decor_putcs(struct vc_data *vc, struct fb_info *info, const unsigned short *s, int count, int yy, int xx) {}
++static inline void fbcon_decor_putc(struct vc_data *vc, struct fb_info *info, int c, int ypos, int xpos) {}
++static inline void fbcon_decor_cursor(struct fb_info *info, struct fb_cursor *cursor) {}
++static inline void fbcon_decor_clear(struct vc_data *vc, struct fb_info *info, int sy, int sx, int height, int width) {}
++static inline void fbcon_decor_clear_margins(struct vc_data *vc, struct fb_info *info, int bottom_only) {}
++static inline void fbcon_decor_blank(struct vc_data *vc, struct fb_info *info, int blank) {}
++static inline void fbcon_decor_bmove_redraw(struct vc_data *vc, struct fb_info *info, int y, int sx, int dx, int width) {}
++static inline void fbcon_decor_fix_pseudo_pal(struct fb_info *info, struct vc_data *vc) {}
++static inline int fbcon_decor_call_helper(char *cmd, unsigned short cons) { return 0; }
++static inline int fbcon_decor_init(void) { return 0; }
++static inline int fbcon_decor_exit(void) { return 0; }
++static inline int fbcon_decor_disable(struct vc_data *vc, unsigned char redraw) { return 0; }
++
++#define fbcon_decor_active_vc(y) (0)
++#define fbcon_decor_active_nores(x, y) (0)
++#define fbcon_decor_active(x, y) (0)
++
++#endif /* CONFIG_FB_CON_DECOR */
++
++#endif /* __FBCON_DECOR_H */
+diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
+index 88b008f..c84113d 100644
+--- a/drivers/video/fbdev/Kconfig
++++ b/drivers/video/fbdev/Kconfig
+@@ -1216,7 +1216,6 @@ config FB_MATROX
+ 	select FB_CFB_FILLRECT
+ 	select FB_CFB_COPYAREA
+ 	select FB_CFB_IMAGEBLIT
+-	select FB_TILEBLITTING
+ 	select FB_MACMODES if PPC_PMAC
+ 	---help---
+ 	  Say Y here if you have a Matrox Millennium, Matrox Millennium II,
+diff --git a/drivers/video/fbdev/core/fbcmap.c b/drivers/video/fbdev/core/fbcmap.c
+index f89245b..c2c12ce 100644
+--- a/drivers/video/fbdev/core/fbcmap.c
++++ b/drivers/video/fbdev/core/fbcmap.c
+@@ -17,6 +17,8 @@
+ #include <linux/slab.h>
+ #include <linux/uaccess.h>
+ 
++#include "../../console/fbcondecor.h"
++
+ static u16 red2[] __read_mostly = {
+     0x0000, 0xaaaa
+ };
+@@ -254,9 +256,12 @@ int fb_set_cmap(struct fb_cmap *cmap, struct fb_info *info)
+ 				break;
+ 		}
+ 	}
+-	if (rc == 0)
++	if (rc == 0) {
+ 		fb_copy_cmap(cmap, &info->cmap);
+-
++		if (fbcon_decor_active(info, vc_cons[fg_console].d) &&
++		    info->fix.visual == FB_VISUAL_DIRECTCOLOR)
++			fbcon_decor_fix_pseudo_pal(info, vc_cons[fg_console].d);
++	}
+ 	return rc;
+ }
+ 
+diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
+index 76c1ad9..fafc0af 100644
+--- a/drivers/video/fbdev/core/fbmem.c
++++ b/drivers/video/fbdev/core/fbmem.c
+@@ -1251,15 +1251,6 @@ struct fb_fix_screeninfo32 {
+ 	u16			reserved[3];
+ };
+ 
+-struct fb_cmap32 {
+-	u32			start;
+-	u32			len;
+-	compat_caddr_t	red;
+-	compat_caddr_t	green;
+-	compat_caddr_t	blue;
+-	compat_caddr_t	transp;
+-};
+-
+ static int fb_getput_cmap(struct fb_info *info, unsigned int cmd,
+ 			  unsigned long arg)
+ {
+diff --git a/include/linux/console_decor.h b/include/linux/console_decor.h
+new file mode 100644
+index 0000000..1514355
+--- /dev/null
++++ b/include/linux/console_decor.h
+@@ -0,0 +1,46 @@
++#ifndef _LINUX_CONSOLE_DECOR_H_
++#define _LINUX_CONSOLE_DECOR_H_ 1
++
++/* A structure used by the framebuffer console decorations (drivers/video/console/fbcondecor.c) */
++struct vc_decor {
++	__u8 bg_color;				/* The color that is to be treated as transparent */
++	__u8 state;				/* Current decor state: 0 = off, 1 = on */
++	__u16 tx, ty;				/* Top left corner coordinates of the text field */
++	__u16 twidth, theight;			/* Width and height of the text field */
++	char *theme;
++};
++
++#ifdef __KERNEL__
++#ifdef CONFIG_COMPAT
++#include <linux/compat.h>
++
++struct vc_decor32 {
++	__u8 bg_color;				/* The color that is to be treated as transparent */
++	__u8 state;				/* Current decor state: 0 = off, 1 = on */
++	__u16 tx, ty;				/* Top left corner coordinates of the text field */
++	__u16 twidth, theight;			/* Width and height of the text field */
++	compat_uptr_t theme;
++};
++
++#define vc_decor_from_compat(to, from) \
++	(to).bg_color = (from).bg_color; \
++	(to).state    = (from).state; \
++	(to).tx       = (from).tx; \
++	(to).ty       = (from).ty; \
++	(to).twidth   = (from).twidth; \
++	(to).theight  = (from).theight; \
++	(to).theme    = compat_ptr((from).theme)
++
++#define vc_decor_to_compat(to, from) \
++	(to).bg_color = (from).bg_color; \
++	(to).state    = (from).state; \
++	(to).tx       = (from).tx; \
++	(to).ty       = (from).ty; \
++	(to).twidth   = (from).twidth; \
++	(to).theight  = (from).theight; \
++	(to).theme    = ptr_to_compat((from).theme)
++
++#endif /* CONFIG_COMPAT */
++#endif /* __KERNEL__ */
++
++#endif
+diff --git a/include/linux/console_struct.h b/include/linux/console_struct.h
+index 6fd3c90..c649555 100644
+--- a/include/linux/console_struct.h
++++ b/include/linux/console_struct.h
+@@ -20,6 +20,7 @@ struct vt_struct;
+ struct uni_pagedir;
+ 
+ #define NPAR 16
++#include <linux/console_decor.h>
+ 
+ /*
+  * Example: vc_data of a console that was scrolled 3 lines down.
+@@ -140,6 +141,8 @@ struct vc_data {
+ 	struct uni_pagedir *vc_uni_pagedir;
+ 	struct uni_pagedir **vc_uni_pagedir_loc; /* [!] Location of uni_pagedir variable for this console */
+ 	bool vc_panic_force_write; /* when oops/panic this VC can accept forced output/blanking */
++
++	struct vc_decor vc_decor;
+ 	/* additional information is in vt_kern.h */
+ };
+ 
+diff --git a/include/linux/fb.h b/include/linux/fb.h
+index a964d07..672cc64 100644
+--- a/include/linux/fb.h
++++ b/include/linux/fb.h
+@@ -238,6 +238,34 @@ struct fb_deferred_io {
+ };
+ #endif
+ 
++#ifdef __KERNEL__
++#ifdef CONFIG_COMPAT
++struct fb_image32 {
++	__u32 dx;			/* Where to place image */
++	__u32 dy;
++	__u32 width;			/* Size of image */
++	__u32 height;
++	__u32 fg_color;			/* Only used when a mono bitmap */
++	__u32 bg_color;
++	__u8  depth;			/* Depth of the image */
++	const compat_uptr_t data;	/* Pointer to image data */
++	struct fb_cmap32 cmap;		/* color map info */
++};
++
++#define fb_image_from_compat(to, from) \
++	(to).dx       = (from).dx; \
++	(to).dy       = (from).dy; \
++	(to).width    = (from).width; \
++	(to).height   = (from).height; \
++	(to).fg_color = (from).fg_color; \
++	(to).bg_color = (from).bg_color; \
++	(to).depth    = (from).depth; \
++	(to).data     = compat_ptr((from).data); \
++	fb_cmap_from_compat((to).cmap, (from).cmap)
++
++#endif /* CONFIG_COMPAT */
++#endif /* __KERNEL__ */
++
+ /*
+  * Frame buffer operations
+  *
+@@ -508,6 +536,9 @@ struct fb_info {
+ #define FBINFO_STATE_SUSPENDED	1
+ 	u32 state;			/* Hardware state i.e suspend */
+ 	void *fbcon_par;                /* fbcon use-only private area */
++
++	struct fb_image bgdecor;
++
+ 	/* From here on everything is device dependent */
+ 	void *par;
+ 	/* we need the PCI or similar aperture base/size not
+diff --git a/include/uapi/linux/fb.h b/include/uapi/linux/fb.h
+index fb795c3..4b57c67 100644
+--- a/include/uapi/linux/fb.h
++++ b/include/uapi/linux/fb.h
+@@ -8,6 +8,23 @@
+ 
+ #define FB_MAX			32	/* sufficient for now */
+ 
++struct fbcon_decor_iowrapper {
++	unsigned short vc;		/* Virtual console */
++	unsigned char origin;		/* Point of origin of the request */
++	void *data;
++};
++
++#ifdef __KERNEL__
++#ifdef CONFIG_COMPAT
++#include <linux/compat.h>
++struct fbcon_decor_iowrapper32 {
++	unsigned short vc;		/* Virtual console */
++	unsigned char origin;		/* Point of origin of the request */
++	compat_uptr_t data;
++};
++#endif /* CONFIG_COMPAT */
++#endif /* __KERNEL__ */
++
+ /* ioctls
+    0x46 is 'F'								*/
+ #define FBIOGET_VSCREENINFO	0x4600
+@@ -35,6 +52,25 @@
+ #define FBIOGET_DISPINFO        0x4618
+ #define FBIO_WAITFORVSYNC	_IOW('F', 0x20, __u32)
+ 
++#define FBIOCONDECOR_SETCFG	_IOWR('F', 0x19, struct fbcon_decor_iowrapper)
++#define FBIOCONDECOR_GETCFG	_IOR('F', 0x1A, struct fbcon_decor_iowrapper)
++#define FBIOCONDECOR_SETSTATE	_IOWR('F', 0x1B, struct fbcon_decor_iowrapper)
++#define FBIOCONDECOR_GETSTATE	_IOR('F', 0x1C, struct fbcon_decor_iowrapper)
++#define FBIOCONDECOR_SETPIC	_IOWR('F', 0x1D, struct fbcon_decor_iowrapper)
++#ifdef __KERNEL__
++#ifdef CONFIG_COMPAT
++#define FBIOCONDECOR_SETCFG32	_IOWR('F', 0x19, struct fbcon_decor_iowrapper32)
++#define FBIOCONDECOR_GETCFG32	_IOR('F', 0x1A, struct fbcon_decor_iowrapper32)
++#define FBIOCONDECOR_SETSTATE32	_IOWR('F', 0x1B, struct fbcon_decor_iowrapper32)
++#define FBIOCONDECOR_GETSTATE32	_IOR('F', 0x1C, struct fbcon_decor_iowrapper32)
++#define FBIOCONDECOR_SETPIC32	_IOWR('F', 0x1D, struct fbcon_decor_iowrapper32)
++#endif /* CONFIG_COMPAT */
++#endif /* __KERNEL__ */
++
++#define FBCON_DECOR_THEME_LEN		128	/* Maximum length of a theme name */
++#define FBCON_DECOR_IO_ORIG_KERNEL	0	/* Kernel ioctl origin */
++#define FBCON_DECOR_IO_ORIG_USER	1	/* User ioctl origin */
++
+ #define FB_TYPE_PACKED_PIXELS		0	/* Packed Pixels	*/
+ #define FB_TYPE_PLANES			1	/* Non interleaved planes */
+ #define FB_TYPE_INTERLEAVED_PLANES	2	/* Interleaved planes	*/
+@@ -277,6 +313,29 @@ struct fb_var_screeninfo {
+ 	__u32 reserved[4];		/* Reserved for future compatibility */
+ };
+ 
++#ifdef __KERNEL__
++#ifdef CONFIG_COMPAT
++struct fb_cmap32 {
++	__u32 start;
++	__u32 len;			/* Number of entries */
++	compat_uptr_t red;		/* Red values	*/
++	compat_uptr_t green;
++	compat_uptr_t blue;
++	compat_uptr_t transp;		/* transparency, can be NULL */
++};
++
++#define fb_cmap_from_compat(to, from) \
++	(to).start  = (from).start; \
++	(to).len    = (from).len; \
++	(to).red    = compat_ptr((from).red); \
++	(to).green  = compat_ptr((from).green); \
++	(to).blue   = compat_ptr((from).blue); \
++	(to).transp = compat_ptr((from).transp)
++
++#endif /* CONFIG_COMPAT */
++#endif /* __KERNEL__ */
++
++
+ struct fb_cmap {
+ 	__u32 start;			/* First entry	*/
+ 	__u32 len;			/* Number of entries */
+diff --git a/kernel/sysctl.c b/kernel/sysctl.c
+index 6ee416e..d2c2425 100644
+--- a/kernel/sysctl.c
++++ b/kernel/sysctl.c
+@@ -149,6 +149,10 @@ static const int cap_last_cap = CAP_LAST_CAP;
+ static unsigned long hung_task_timeout_max = (LONG_MAX/HZ);
+ #endif
+ 
++#ifdef CONFIG_FB_CON_DECOR
++extern char fbcon_decor_path[];
++#endif
++
+ #ifdef CONFIG_INOTIFY_USER
+ #include <linux/inotify.h>
+ #endif
+@@ -266,6 +270,15 @@ static struct ctl_table sysctl_base_table[] = {
+ 		.mode		= 0555,
+ 		.child		= dev_table,
+ 	},
++#ifdef CONFIG_FB_CON_DECOR
++	{
++		.procname	= "fbcondecor",
++		.data		= &fbcon_decor_path,
++		.maxlen		= KMOD_PATH_LEN,
++		.mode		= 0644,
++		.proc_handler	= &proc_dostring,
++	},
++#endif
+ 	{ }
+ };
+ 

diff --git a/4400_alpha-sysctl-uac.patch b/4400_alpha-sysctl-uac.patch
new file mode 100644
index 0000000..d42b4ed
--- /dev/null
+++ b/4400_alpha-sysctl-uac.patch
@@ -0,0 +1,142 @@
+diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
+index 7f312d8..1eb686b 100644
+--- a/arch/alpha/Kconfig
++++ b/arch/alpha/Kconfig
+@@ -697,6 +697,33 @@ config HZ
+ 	default 1200 if HZ_1200
+ 	default 1024
+
++config ALPHA_UAC_SYSCTL
++       bool "Configure UAC policy via sysctl"
++       depends on SYSCTL
++       default y
++       ---help---
++         Configuring the UAC (unaligned access control) policy on a Linux
++         system usually involves setting a compile time define. If you say
++         Y here, you will be able to modify the UAC policy at runtime using
++         the /proc interface.
++
++         The UAC policy defines the action Linux should take when an
++         unaligned memory access occurs. The action can include printing a
++         warning message (NOPRINT), sending a signal to the offending
++         program to help developers debug their applications (SIGBUS), or
++         disabling the transparent fixing (NOFIX).
++
++         The sysctls will be initialized to the compile-time defined UAC
++         policy. You can change these manually, or with the sysctl(8)
++         userspace utility.
++
++         To disable the warning messages at runtime, you would use
++
++           echo 1 > /proc/sys/kernel/uac/noprint
++
++         This is pretty harmless. Say Y if you're not sure.
++
++
+ source "drivers/pci/Kconfig"
+ source "drivers/eisa/Kconfig"
+
+diff --git a/arch/alpha/kernel/traps.c b/arch/alpha/kernel/traps.c
+index 74aceea..cb35d80 100644
+--- a/arch/alpha/kernel/traps.c
++++ b/arch/alpha/kernel/traps.c
+@@ -103,6 +103,49 @@ static char * ireg_name[] = {"v0", "t0", "t1", "t2", "t3", "t4", "t5", "t6",
+ 			   "t10", "t11", "ra", "pv", "at", "gp", "sp", "zero"};
+ #endif
+
++#ifdef CONFIG_ALPHA_UAC_SYSCTL
++
++#include <linux/sysctl.h>
++
++static int enabled_noprint = 0;
++static int enabled_sigbus = 0;
++static int enabled_nofix = 0;
++
++struct ctl_table uac_table[] = {
++       {
++               .procname       = "noprint",
++               .data           = &enabled_noprint,
++               .maxlen         = sizeof (int),
++               .mode           = 0644,
++               .proc_handler = &proc_dointvec,
++       },
++       {
++               .procname       = "sigbus",
++               .data           = &enabled_sigbus,
++               .maxlen         = sizeof (int),
++               .mode           = 0644,
++               .proc_handler = &proc_dointvec,
++       },
++       {
++               .procname       = "nofix",
++               .data           = &enabled_nofix,
++               .maxlen         = sizeof (int),
++               .mode           = 0644,
++               .proc_handler = &proc_dointvec,
++       },
++       { }
++};
++
++static int __init init_uac_sysctl(void)
++{
++   /* Initialize sysctls with the #defined UAC policy */
++   enabled_noprint = (test_thread_flag (TS_UAC_NOPRINT)) ? 1 : 0;
++   enabled_sigbus = (test_thread_flag (TS_UAC_SIGBUS)) ? 1 : 0;
++   enabled_nofix = (test_thread_flag (TS_UAC_NOFIX)) ? 1 : 0;
++   return 0;
++}
++#endif
++
+ static void
+ dik_show_code(unsigned int *pc)
+ {
+@@ -785,7 +828,12 @@ do_entUnaUser(void __user * va, unsigned long opcode,
+ 	/* Check the UAC bits to decide what the user wants us to do
+ 	   with the unaliged access.  */
+
++#ifndef CONFIG_ALPHA_UAC_SYSCTL
+ 	if (!(current_thread_info()->status & TS_UAC_NOPRINT)) {
++#else  /* CONFIG_ALPHA_UAC_SYSCTL */
++	if (!(current_thread_info()->status & TS_UAC_NOPRINT) &&
++	    !(enabled_noprint)) {
++#endif /* CONFIG_ALPHA_UAC_SYSCTL */
+ 		if (__ratelimit(&ratelimit)) {
+ 			printk("%s(%d): unaligned trap at %016lx: %p %lx %ld\n",
+ 			       current->comm, task_pid_nr(current),
+@@ -1090,3 +1138,6 @@ trap_init(void)
+ 	wrent(entSys, 5);
+ 	wrent(entDbg, 6);
+ }
++#ifdef CONFIG_ALPHA_UAC_SYSCTL
++       __initcall(init_uac_sysctl);
++#endif
+diff --git a/kernel/sysctl.c b/kernel/sysctl.c
+index 87b2fc3..55021a8 100644
+--- a/kernel/sysctl.c
++++ b/kernel/sysctl.c
+@@ -152,6 +152,11 @@ static unsigned long hung_task_timeout_max = (LONG_MAX/HZ);
+ #ifdef CONFIG_INOTIFY_USER
+ #include <linux/inotify.h>
+ #endif
++
++#ifdef CONFIG_ALPHA_UAC_SYSCTL
++extern struct ctl_table uac_table[];
++#endif
++
+ #ifdef CONFIG_SPARC
+ #endif
+
+@@ -1844,6 +1849,13 @@ static struct ctl_table debug_table[] = {
+ 		.extra2		= &one,
+ 	},
+ #endif
++#ifdef CONFIG_ALPHA_UAC_SYSCTL
++	{
++	        .procname   = "uac",
++		.mode       = 0555,
++	        .child      = uac_table,
++	 },
++#endif /* CONFIG_ALPHA_UAC_SYSCTL */
+ 	{ }
+ };
+

diff --git a/5010_enable-additional-cpu-optimizations-for-gcc.patch b/5010_enable-additional-cpu-optimizations-for-gcc.patch
new file mode 100644
index 0000000..c68d072
--- /dev/null
+++ b/5010_enable-additional-cpu-optimizations-for-gcc.patch
@@ -0,0 +1,530 @@
+WARNING
+This patch works with gcc versions 4.9+ and with kernel version 3.15+ and should
+NOT be applied when compiling on older versions of gcc due to key name changes
+of the march flags introduced with the version 4.9 release of gcc.[1]
+
+Use the older version of this patch hosted on the same github for older
+versions of gcc.
+
+FEATURES
+This patch adds additional CPU options to the Linux kernel accessible under:
+ Processor type and features  --->
+  Processor family --->
+
+The expanded microarchitectures include:
+* AMD Improved K8-family
+* AMD K10-family
+* AMD Family 10h (Barcelona)
+* AMD Family 14h (Bobcat)
+* AMD Family 16h (Jaguar)
+* AMD Family 15h (Bulldozer)
+* AMD Family 15h (Piledriver)
+* AMD Family 15h (Steamroller)
+* AMD Family 15h (Excavator)
+* AMD Family 17h (Zen)
+* Intel Silvermont low-power processors
+* Intel 1st Gen Core i3/i5/i7 (Nehalem)
+* Intel 1.5 Gen Core i3/i5/i7 (Westmere)
+* Intel 2nd Gen Core i3/i5/i7 (Sandybridge)
+* Intel 3rd Gen Core i3/i5/i7 (Ivybridge)
+* Intel 4th Gen Core i3/i5/i7 (Haswell)
+* Intel 5th Gen Core i3/i5/i7 (Broadwell)
+* Intel 6th Gen Core i3/i5.i7 (Skylake)
+
+It also offers to compile passing the 'native' option which, "selects the CPU
+to generate code for at compilation time by determining the processor type of
+the compiling machine. Using -march=native enables all instruction subsets
+supported by the local machine and will produce code optimized for the local
+machine under the constraints of the selected instruction set."[3]
+
+MINOR NOTES
+This patch also changes 'atom' to 'bonnell' in accordance with the gcc v4.9
+changes. Note that upstream is using the deprecated 'match=atom' flags when I
+believe it should use the newer 'march=bonnell' flag for atom processors.[2]
+
+It is not recommended to compile on Atom-CPUs with the 'native' option.[4] The
+recommendation is use to the 'atom' option instead.
+
+BENEFITS
+Small but real speed increases are measurable using a make endpoint comparing
+a generic kernel to one built with one of the respective microarchs.
+
+See the following experimental evidence supporting this statement:
+https://github.com/graysky2/kernel_gcc_patch
+
+REQUIREMENTS
+linux version >=3.15
+gcc version >=4.9
+
+ACKNOWLEDGMENTS
+This patch builds on the seminal work by Jeroen.[5]
+
+REFERENCES
+1. https://gcc.gnu.org/gcc-4.9/changes.html
+2. https://bugzilla.kernel.org/show_bug.cgi?id=77461
+3. https://gcc.gnu.org/onlinedocs/gcc/x86-Options.html
+4. https://github.com/graysky2/kernel_gcc_patch/issues/15
+5. http://www.linuxforge.net/docs/linux/linux-gcc.php
+
+--- a/arch/x86/include/asm/module.h	2017-08-02 11:41:47.442200461 -0400
++++ b/arch/x86/include/asm/module.h	2017-08-02 12:14:21.204358744 -0400
+@@ -15,6 +15,24 @@
+ #define MODULE_PROC_FAMILY "586MMX "
+ #elif defined CONFIG_MCORE2
+ #define MODULE_PROC_FAMILY "CORE2 "
++#elif defined CONFIG_MNATIVE
++#define MODULE_PROC_FAMILY "NATIVE "
++#elif defined CONFIG_MNEHALEM
++#define MODULE_PROC_FAMILY "NEHALEM "
++#elif defined CONFIG_MWESTMERE
++#define MODULE_PROC_FAMILY "WESTMERE "
++#elif defined CONFIG_MSILVERMONT
++#define MODULE_PROC_FAMILY "SILVERMONT "
++#elif defined CONFIG_MSANDYBRIDGE
++#define MODULE_PROC_FAMILY "SANDYBRIDGE "
++#elif defined CONFIG_MIVYBRIDGE
++#define MODULE_PROC_FAMILY "IVYBRIDGE "
++#elif defined CONFIG_MHASWELL
++#define MODULE_PROC_FAMILY "HASWELL "
++#elif defined CONFIG_MBROADWELL
++#define MODULE_PROC_FAMILY "BROADWELL "
++#elif defined CONFIG_MSKYLAKE
++#define MODULE_PROC_FAMILY "SKYLAKE "
+ #elif defined CONFIG_MATOM
+ #define MODULE_PROC_FAMILY "ATOM "
+ #elif defined CONFIG_M686
+@@ -33,6 +51,26 @@
+ #define MODULE_PROC_FAMILY "K7 "
+ #elif defined CONFIG_MK8
+ #define MODULE_PROC_FAMILY "K8 "
++#elif defined CONFIG_MK8SSE3
++#define MODULE_PROC_FAMILY "K8SSE3 "
++#elif defined CONFIG_MK10
++#define MODULE_PROC_FAMILY "K10 "
++#elif defined CONFIG_MBARCELONA
++#define MODULE_PROC_FAMILY "BARCELONA "
++#elif defined CONFIG_MBOBCAT
++#define MODULE_PROC_FAMILY "BOBCAT "
++#elif defined CONFIG_MBULLDOZER
++#define MODULE_PROC_FAMILY "BULLDOZER "
++#elif defined CONFIG_MPILEDRIVER
++#define MODULE_PROC_FAMILY "PILEDRIVER "
++#elif defined CONFIG_MSTEAMROLLER
++#define MODULE_PROC_FAMILY "STEAMROLLER "
++#elif defined CONFIG_MJAGUAR
++#define MODULE_PROC_FAMILY "JAGUAR "
++#elif defined CONFIG_MEXCAVATOR
++#define MODULE_PROC_FAMILY "EXCAVATOR "
++#elif defined CONFIG_MZEN
++#define MODULE_PROC_FAMILY "ZEN "
+ #elif defined CONFIG_MELAN
+ #define MODULE_PROC_FAMILY "ELAN "
+ #elif defined CONFIG_MCRUSOE
+--- a/arch/x86/Kconfig.cpu	2017-08-02 11:41:47.443200463 -0400
++++ b/arch/x86/Kconfig.cpu	2017-08-02 12:14:37.108956741 -0400
+@@ -115,6 +115,7 @@ config MPENTIUMM
+ config MPENTIUM4
+ 	bool "Pentium-4/Celeron(P4-based)/Pentium-4 M/older Xeon"
+ 	depends on X86_32
++	select X86_P6_NOP
+ 	---help---
+ 	  Select this for Intel Pentium 4 chips.  This includes the
+ 	  Pentium 4, Pentium D, P4-based Celeron and Xeon, and
+@@ -147,9 +148,8 @@ config MPENTIUM4
+ 		-Paxville
+ 		-Dempsey
+ 
+-
+ config MK6
+-	bool "K6/K6-II/K6-III"
++	bool "AMD K6/K6-II/K6-III"
+ 	depends on X86_32
+ 	---help---
+ 	  Select this for an AMD K6-family processor.  Enables use of
+@@ -157,7 +157,7 @@ config MK6
+ 	  flags to GCC.
+ 
+ config MK7
+-	bool "Athlon/Duron/K7"
++	bool "AMD Athlon/Duron/K7"
+ 	depends on X86_32
+ 	---help---
+ 	  Select this for an AMD Athlon K7-family processor.  Enables use of
+@@ -165,12 +165,83 @@ config MK7
+ 	  flags to GCC.
+ 
+ config MK8
+-	bool "Opteron/Athlon64/Hammer/K8"
++	bool "AMD Opteron/Athlon64/Hammer/K8"
+ 	---help---
+ 	  Select this for an AMD Opteron or Athlon64 Hammer-family processor.
+ 	  Enables use of some extended instructions, and passes appropriate
+ 	  optimization flags to GCC.
+ 
++config MK8SSE3
++	bool "AMD Opteron/Athlon64/Hammer/K8 with SSE3"
++	---help---
++	  Select this for improved AMD Opteron or Athlon64 Hammer-family processors.
++	  Enables use of some extended instructions, and passes appropriate
++	  optimization flags to GCC.
++
++config MK10
++	bool "AMD 61xx/7x50/PhenomX3/X4/II/K10"
++	---help---
++	  Select this for an AMD 61xx Eight-Core Magny-Cours, Athlon X2 7x50,
++		Phenom X3/X4/II, Athlon II X2/X3/X4, or Turion II-family processor.
++	  Enables use of some extended instructions, and passes appropriate
++	  optimization flags to GCC.
++
++config MBARCELONA
++	bool "AMD Barcelona"
++	---help---
++	  Select this for AMD Family 10h Barcelona processors.
++
++	  Enables -march=barcelona
++
++config MBOBCAT
++	bool "AMD Bobcat"
++	---help---
++	  Select this for AMD Family 14h Bobcat processors.
++
++	  Enables -march=btver1
++
++config MJAGUAR
++	bool "AMD Jaguar"
++	---help---
++	  Select this for AMD Family 16h Jaguar processors.
++
++	  Enables -march=btver2
++
++config MBULLDOZER
++	bool "AMD Bulldozer"
++	---help---
++	  Select this for AMD Family 15h Bulldozer processors.
++
++	  Enables -march=bdver1
++
++config MPILEDRIVER
++	bool "AMD Piledriver"
++	---help---
++	  Select this for AMD Family 15h Piledriver processors.
++
++	  Enables -march=bdver2
++
++config MSTEAMROLLER
++	bool "AMD Steamroller"
++	---help---
++	  Select this for AMD Family 15h Steamroller processors.
++
++	  Enables -march=bdver3
++
++config MEXCAVATOR
++	bool "AMD Excavator"
++	---help---
++	  Select this for AMD Family 15h Excavator processors.
++
++	  Enables -march=bdver4
++
++config MZEN
++	bool "AMD Zen"
++	---help---
++	  Select this for AMD Family 17h Zen processors.
++
++	  Enables -march=znver1
++
+ config MCRUSOE
+ 	bool "Crusoe"
+ 	depends on X86_32
+@@ -252,6 +323,7 @@ config MVIAC7
+ 
+ config MPSC
+ 	bool "Intel P4 / older Netburst based Xeon"
++	select X86_P6_NOP
+ 	depends on X86_64
+ 	---help---
+ 	  Optimize for Intel Pentium 4, Pentium D and older Nocona/Dempsey
+@@ -261,8 +333,19 @@ config MPSC
+ 	  using the cpu family field
+ 	  in /proc/cpuinfo. Family 15 is an older Xeon, Family 6 a newer one.
+ 
++config MATOM
++	bool "Intel Atom"
++	select X86_P6_NOP
++	---help---
++
++	  Select this for the Intel Atom platform. Intel Atom CPUs have an
++	  in-order pipelining architecture and thus can benefit from
++	  accordingly optimized code. Use a recent GCC with specific Atom
++	  support in order to fully benefit from selecting this option.
++
+ config MCORE2
+-	bool "Core 2/newer Xeon"
++	bool "Intel Core 2"
++	select X86_P6_NOP
+ 	---help---
+ 
+ 	  Select this for Intel Core 2 and newer Core 2 Xeons (Xeon 51xx and
+@@ -270,14 +353,79 @@ config MCORE2
+ 	  family in /proc/cpuinfo. Newer ones have 6 and older ones 15
+ 	  (not a typo)
+ 
+-config MATOM
+-	bool "Intel Atom"
++	  Enables -march=core2
++
++config MNEHALEM
++	bool "Intel Nehalem"
++	select X86_P6_NOP
+ 	---help---
+ 
+-	  Select this for the Intel Atom platform. Intel Atom CPUs have an
+-	  in-order pipelining architecture and thus can benefit from
+-	  accordingly optimized code. Use a recent GCC with specific Atom
+-	  support in order to fully benefit from selecting this option.
++	  Select this for 1st Gen Core processors in the Nehalem family.
++
++	  Enables -march=nehalem
++
++config MWESTMERE
++	bool "Intel Westmere"
++	select X86_P6_NOP
++	---help---
++
++	  Select this for the Intel Westmere formerly Nehalem-C family.
++
++	  Enables -march=westmere
++
++config MSILVERMONT
++	bool "Intel Silvermont"
++	select X86_P6_NOP
++	---help---
++
++	  Select this for the Intel Silvermont platform.
++
++	  Enables -march=silvermont
++
++config MSANDYBRIDGE
++	bool "Intel Sandy Bridge"
++	select X86_P6_NOP
++	---help---
++
++	  Select this for 2nd Gen Core processors in the Sandy Bridge family.
++
++	  Enables -march=sandybridge
++
++config MIVYBRIDGE
++	bool "Intel Ivy Bridge"
++	select X86_P6_NOP
++	---help---
++
++	  Select this for 3rd Gen Core processors in the Ivy Bridge family.
++
++	  Enables -march=ivybridge
++
++config MHASWELL
++	bool "Intel Haswell"
++	select X86_P6_NOP
++	---help---
++
++	  Select this for 4th Gen Core processors in the Haswell family.
++
++	  Enables -march=haswell
++
++config MBROADWELL
++	bool "Intel Broadwell"
++	select X86_P6_NOP
++	---help---
++
++	  Select this for 5th Gen Core processors in the Broadwell family.
++
++	  Enables -march=broadwell
++
++config MSKYLAKE
++	bool "Intel Skylake"
++	select X86_P6_NOP
++	---help---
++
++	  Select this for 6th Gen Core processors in the Skylake family.
++
++	  Enables -march=skylake
+ 
+ config GENERIC_CPU
+ 	bool "Generic-x86-64"
+@@ -286,6 +434,19 @@ config GENERIC_CPU
+ 	  Generic x86-64 CPU.
+ 	  Run equally well on all x86-64 CPUs.
+ 
++config MNATIVE
++ bool "Native optimizations autodetected by GCC"
++ ---help---
++
++   GCC 4.2 and above support -march=native, which automatically detects
++   the optimum settings to use based on your processor. -march=native 
++   also detects and applies additional settings beyond -march specific
++   to your CPU, (eg. -msse4). Unless you have a specific reason not to
++   (e.g. distcc cross-compiling), you should probably be using
++   -march=native rather than anything listed below.
++
++   Enables -march=native
++
+ endchoice
+ 
+ config X86_GENERIC
+@@ -310,7 +471,7 @@ config X86_INTERNODE_CACHE_SHIFT
+ config X86_L1_CACHE_SHIFT
+ 	int
+ 	default "7" if MPENTIUM4 || MPSC
+-	default "6" if MK7 || MK8 || MPENTIUMM || MCORE2 || MATOM || MVIAC7 || X86_GENERIC || GENERIC_CPU
++	default "6" if MK7 || MK8 || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MJAGUAR || MPENTIUMM || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MNATIVE || MATOM || MVIAC7 || X86_GENERIC || GENERIC_CPU
+ 	default "4" if MELAN || M486 || MGEODEGX1
+ 	default "5" if MWINCHIP3D || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2 || MGEODE_LX
+ 
+@@ -341,45 +502,46 @@ config X86_ALIGNMENT_16
+ 
+ config X86_INTEL_USERCOPY
+ 	def_bool y
+-	depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M586MMX || X86_GENERIC || MK8 || MK7 || MEFFICEON || MCORE2
++	depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M586MMX || X86_GENERIC || MK8 || MK8SSE3 || MK7 || MEFFICEON || MCORE2 || MK10 || MBARCELONA || MNEHALEM || MWESTMERE || MSILVERMONT || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MNATIVE
+ 
+ config X86_USE_PPRO_CHECKSUM
+ 	def_bool y
+-	depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MVIAC7 || MEFFICEON || MGEODE_LX || MCORE2 || MATOM
++	depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MK10 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MK8SSE3 || MVIAC3_2 || MVIAC7 || MEFFICEON || MGEODE_LX || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MATOM || MNATIVE
+ 
+ config X86_USE_3DNOW
+ 	def_bool y
+ 	depends on (MCYRIXIII || MK7 || MGEODE_LX) && !UML
+ 
+-#
+-# P6_NOPs are a relatively minor optimization that require a family >=
+-# 6 processor, except that it is broken on certain VIA chips.
+-# Furthermore, AMD chips prefer a totally different sequence of NOPs
+-# (which work on all CPUs).  In addition, it looks like Virtual PC
+-# does not understand them.
+-#
+-# As a result, disallow these if we're not compiling for X86_64 (these
+-# NOPs do work on all x86-64 capable chips); the list of processors in
+-# the right-hand clause are the cores that benefit from this optimization.
+-#
+ config X86_P6_NOP
+-	def_bool y
+-	depends on X86_64
+-	depends on (MCORE2 || MPENTIUM4 || MPSC)
++	default n
++	bool "Support for P6_NOPs on Intel chips"
++	depends on (MCORE2 || MPENTIUM4 || MPSC || MATOM || MNEHALEM || MWESTMERE || MSILVERMONT  || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MNATIVE)
++	---help---
++	P6_NOPs are a relatively minor optimization that require a family >=
++	6 processor, except that it is broken on certain VIA chips.
++	Furthermore, AMD chips prefer a totally different sequence of NOPs
++	(which work on all CPUs).  In addition, it looks like Virtual PC
++	does not understand them.
++
++	As a result, disallow these if we're not compiling for X86_64 (these
++	NOPs do work on all x86-64 capable chips); the list of processors in
++	the right-hand clause are the cores that benefit from this optimization.
++
++	Say Y if you have Intel CPU newer than Pentium Pro, N otherwise.
+ 
+ config X86_TSC
+ 	def_bool y
+-	depends on (MWINCHIP3D || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MVIAC7 || MGEODEGX1 || MGEODE_LX || MCORE2 || MATOM) || X86_64
++	depends on (MWINCHIP3D || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MK8SSE3 || MVIAC3_2 || MVIAC7 || MGEODEGX1 || MGEODE_LX || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MNATIVE || MATOM) || X86_64
+ 
+ config X86_CMPXCHG64
+ 	def_bool y
+-	depends on X86_PAE || X86_64 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MATOM
++	depends on X86_PAE || X86_64 || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MATOM || MNATIVE
+ 
+ # this should be set for all -march=.. options where the compiler
+ # generates cmov.
+ config X86_CMOV
+ 	def_bool y
+-	depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
++	depends on (MK8 || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MJAGUAR || MK7 || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MNATIVE || MATOM || MGEODE_LX)
+ 
+ config X86_MINIMUM_CPU_FAMILY
+ 	int
+--- a/arch/x86/Makefile	2017-08-02 11:41:47.443200463 -0400
++++ b/arch/x86/Makefile	2017-08-02 12:14:46.373727353 -0400
+@@ -121,13 +121,40 @@ else
+ 	KBUILD_CFLAGS += $(call cc-option,-mskip-rax-setup)
+ 
+         # FIXME - should be integrated in Makefile.cpu (Makefile_32.cpu)
++        cflags-$(CONFIG_MNATIVE) += $(call cc-option,-march=native)
+         cflags-$(CONFIG_MK8) += $(call cc-option,-march=k8)
++        cflags-$(CONFIG_MK8SSE3) += $(call cc-option,-march=k8-sse3,-mtune=k8)
++        cflags-$(CONFIG_MK10) += $(call cc-option,-march=amdfam10)
++        cflags-$(CONFIG_MBARCELONA) += $(call cc-option,-march=barcelona)
++        cflags-$(CONFIG_MBOBCAT) += $(call cc-option,-march=btver1)
++        cflags-$(CONFIG_MJAGUAR) += $(call cc-option,-march=btver2)
++        cflags-$(CONFIG_MBULLDOZER) += $(call cc-option,-march=bdver1)
++        cflags-$(CONFIG_MPILEDRIVER) += $(call cc-option,-march=bdver2)
++        cflags-$(CONFIG_MSTEAMROLLER) += $(call cc-option,-march=bdver3)
++        cflags-$(CONFIG_MEXCAVATOR) += $(call cc-option,-march=bdver4)
++        cflags-$(CONFIG_MZEN) += $(call cc-option,-march=znver1)
+         cflags-$(CONFIG_MPSC) += $(call cc-option,-march=nocona)
+ 
+         cflags-$(CONFIG_MCORE2) += \
+-                $(call cc-option,-march=core2,$(call cc-option,-mtune=generic))
+-	cflags-$(CONFIG_MATOM) += $(call cc-option,-march=atom) \
+-		$(call cc-option,-mtune=atom,$(call cc-option,-mtune=generic))
++                $(call cc-option,-march=core2,$(call cc-option,-mtune=core2))
++        cflags-$(CONFIG_MNEHALEM) += \
++                $(call cc-option,-march=nehalem,$(call cc-option,-mtune=nehalem))
++        cflags-$(CONFIG_MWESTMERE) += \
++                $(call cc-option,-march=westmere,$(call cc-option,-mtune=westmere))
++        cflags-$(CONFIG_MSILVERMONT) += \
++                $(call cc-option,-march=silvermont,$(call cc-option,-mtune=silvermont))
++        cflags-$(CONFIG_MSANDYBRIDGE) += \
++                $(call cc-option,-march=sandybridge,$(call cc-option,-mtune=sandybridge))
++        cflags-$(CONFIG_MIVYBRIDGE) += \
++                $(call cc-option,-march=ivybridge,$(call cc-option,-mtune=ivybridge))
++        cflags-$(CONFIG_MHASWELL) += \
++                $(call cc-option,-march=haswell,$(call cc-option,-mtune=haswell))
++        cflags-$(CONFIG_MBROADWELL) += \
++                $(call cc-option,-march=broadwell,$(call cc-option,-mtune=broadwell))
++        cflags-$(CONFIG_MSKYLAKE) += \
++                $(call cc-option,-march=skylake,$(call cc-option,-mtune=skylake))
++        cflags-$(CONFIG_MATOM) += $(call cc-option,-march=bonnell) \
++                $(call cc-option,-mtune=bonnell,$(call cc-option,-mtune=generic))
+         cflags-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=generic)
+         KBUILD_CFLAGS += $(cflags-y)
+ 
+--- a/arch/x86/Makefile_32.cpu	2017-08-02 11:41:47.444200464 -0400
++++ b/arch/x86/Makefile_32.cpu	2017-08-02 12:23:41.636760695 -0400
+@@ -22,7 +22,18 @@ cflags-$(CONFIG_MK6)		+= -march=k6
+ # Please note, that patches that add -march=athlon-xp and friends are pointless.
+ # They make zero difference whatsosever to performance at this time.
+ cflags-$(CONFIG_MK7)		+= -march=athlon
++cflags-$(CONFIG_MNATIVE) 	+= $(call cc-option,-march=native)
+ cflags-$(CONFIG_MK8)		+= $(call cc-option,-march=k8,-march=athlon)
++cflags-$(CONFIG_MK8SSE3) 	+= $(call cc-option,-march=k8-sse3,-march=athlon)
++cflags-$(CONFIG_MK10) 		+= $(call cc-option,-march=amdfam10,-march=athlon)
++cflags-$(CONFIG_MBARCELONA) += $(call cc-option,-march=barcelona,-march=athlon)
++cflags-$(CONFIG_MBOBCAT) 	+= $(call cc-option,-march=btver1,-march=athlon)
++cflags-$(CONFIG_MJAGUAR) 	+= $(call cc-option,-march=btver2,-march=athlon)
++cflags-$(CONFIG_MBULLDOZER) += $(call cc-option,-march=bdver1,-march=athlon)
++cflags-$(CONFIG_MPILEDRIVER) 	+= $(call cc-option,-march=bdver2,-march=athlon)
++cflags-$(CONFIG_MSTEAMROLLER) 	+= $(call cc-option,-march=bdver3,-march=athlon)
++cflags-$(CONFIG_MEXCAVATOR) += $(call cc-option,-march=bdver4,-march=athlon)
++cflags-$(CONFIG_MZEN) 		+= $(call cc-option,-march=znver1,-march=athlon)
+ cflags-$(CONFIG_MCRUSOE)	+= -march=i686 -falign-functions=0 -falign-jumps=0 -falign-loops=0
+ cflags-$(CONFIG_MEFFICEON)	+= -march=i686 $(call tune,pentium3) -falign-functions=0 -falign-jumps=0 -falign-loops=0
+ cflags-$(CONFIG_MWINCHIPC6)	+= $(call cc-option,-march=winchip-c6,-march=i586)
+@@ -31,9 +42,12 @@ cflags-$(CONFIG_MCYRIXIII)	+= $(call cc-
+ cflags-$(CONFIG_MVIAC3_2)	+= $(call cc-option,-march=c3-2,-march=i686)
+ cflags-$(CONFIG_MVIAC7)		+= -march=i686
+ cflags-$(CONFIG_MCORE2)		+= -march=i686 $(call tune,core2)
+-cflags-$(CONFIG_MATOM)		+= $(call cc-option,-march=atom,$(call cc-option,-march=core2,-march=i686)) \
+-	$(call cc-option,-mtune=atom,$(call cc-option,-mtune=generic))
+-
++cflags-$(CONFIG_MNEHALEM) 	+= -march=i686 $(call tune,nehalem)
++cflags-$(CONFIG_MWESTMERE) 	+= -march=i686 $(call tune,westmere)
++cflags-$(CONFIG_MSILVERMONT) += -march=i686 $(call tune,silvermont)
++cflags-$(CONFIG_MSANDYBRIDGE) += -march=i686 $(call tune,sandybridge)
++cflags-$(CONFIG_MIVYBRIDGE) += -march=i686 $(call tune,ivybridge)
++cflags-$(CONFIG_MHASWELL) 	+= -march=i686 $(call tune,haswell)
+ # AMD Elan support
+ cflags-$(CONFIG_MELAN)		+= -march=i486
+ 


^ permalink raw reply related	[flat|nested] 20+ messages in thread

end of thread, other threads:[~2017-11-24  9:42 UTC | newest]

Thread overview: 20+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2017-11-18 20:04 [gentoo-commits] proj/linux-patches:4.13 commit in: / Alice Ferrazzi
  -- strict thread matches above, loose matches on Subject: below --
2017-11-24  9:41 Alice Ferrazzi
2017-11-21  9:43 Alice Ferrazzi
2017-11-15 11:17 Mike Pagano
2017-11-08 13:48 Mike Pagano
2017-11-02 10:04 Mike Pagano
2017-10-27 10:26 Mike Pagano
2017-10-21 20:16 Mike Pagano
2017-10-18 13:56 Mike Pagano
2017-10-14 14:28 Mike Pagano
2017-10-12 12:55 Mike Pagano
2017-10-05 11:36 Mike Pagano
2017-09-27 16:44 Mike Pagano
2017-09-20 10:09 Mike Pagano
2017-09-13 22:32 Mike Pagano
2017-09-13 22:29 Mike Pagano
2017-09-13 11:52 Mike Pagano
2017-09-10 14:15 Mike Pagano
2017-09-07  0:23 Mike Pagano
2017-08-02 16:51 Mike Pagano

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox