public inbox for gentoo-commits@lists.gentoo.org
 help / color / mirror / Atom feed
* [gentoo-commits] proj/hardened-patchset:master commit in: 4.9.11/, 4.9.10/
@ 2017-02-19 23:07 Anthony G. Basile
  0 siblings, 0 replies; only message in thread
From: Anthony G. Basile @ 2017-02-19 23:07 UTC (permalink / raw
  To: gentoo-commits

commit:     6e301f34686a1ec00db06b41657207a49403ea2e
Author:     Anthony G. Basile <blueness <AT> gentoo <DOT> org>
AuthorDate: Sun Feb 19 23:06:52 2017 +0000
Commit:     Anthony G. Basile <blueness <AT> gentoo <DOT> org>
CommitDate: Sun Feb 19 23:06:52 2017 +0000
URL:        https://gitweb.gentoo.org/proj/hardened-patchset.git/commit/?id=6e301f34

grsecurity-3.1-4.9.11-201702181444

 {4.9.10 => 4.9.11}/0000_README                     |    6 +-
 {4.9.10 => 4.9.11}/1009_linux-4.9.10.patch         |    0
 4.9.11/1010_linux-4.9.11.patch                     | 1893 ++++++++++++++++++++
 .../4420_grsecurity-3.1-4.9.11-201702181444.patch  |  143 +-
 {4.9.10 => 4.9.11}/4425_grsec_remove_EI_PAX.patch  |    0
 .../4426_default_XATTR_PAX_FLAGS.patch             |    0
 .../4427_force_XATTR_PAX_tmpfs.patch               |    0
 .../4430_grsec-remove-localversion-grsec.patch     |    0
 {4.9.10 => 4.9.11}/4435_grsec-mute-warnings.patch  |    0
 .../4440_grsec-remove-protected-paths.patch        |    0
 .../4450_grsec-kconfig-default-gids.patch          |    0
 .../4465_selinux-avc_audit-log-curr_ip.patch       |    0
 {4.9.10 => 4.9.11}/4470_disable-compat_vdso.patch  |    0
 {4.9.10 => 4.9.11}/4475_emutramp_default_on.patch  |    0
 14 files changed, 1970 insertions(+), 72 deletions(-)

diff --git a/4.9.10/0000_README b/4.9.11/0000_README
similarity index 93%
rename from 4.9.10/0000_README
rename to 4.9.11/0000_README
index 3bc6d08..27a4c3e 100644
--- a/4.9.10/0000_README
+++ b/4.9.11/0000_README
@@ -6,7 +6,11 @@ Patch:	1009_linux-4.9.10.patch
 From:	http://www.kernel.org
 Desc:	Linux 4.9.10
 
-Patch:	4420_grsecurity-3.1-4.9.10-201702162016.patch
+Patch:	1010_linux-4.9.11.patch
+From:	http://www.kernel.org
+Desc:	Linux 4.9.11
+
+Patch:	4420_grsecurity-3.1-4.9.11-201702181444.patch
 From:	http://www.grsecurity.net
 Desc:	hardened-sources base patch from upstream grsecurity
 

diff --git a/4.9.10/1009_linux-4.9.10.patch b/4.9.11/1009_linux-4.9.10.patch
similarity index 100%
rename from 4.9.10/1009_linux-4.9.10.patch
rename to 4.9.11/1009_linux-4.9.10.patch

diff --git a/4.9.11/1010_linux-4.9.11.patch b/4.9.11/1010_linux-4.9.11.patch
new file mode 100644
index 0000000..59eb5c7
--- /dev/null
+++ b/4.9.11/1010_linux-4.9.11.patch
@@ -0,0 +1,1893 @@
+diff --git a/Makefile b/Makefile
+index d2fe757..18b0c5a 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 9
+-SUBLEVEL = 10
++SUBLEVEL = 11
+ EXTRAVERSION =
+ NAME = Roaring Lionus
+ 
+diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
+index ebb4e95..96d80df 100644
+--- a/arch/x86/kernel/fpu/core.c
++++ b/arch/x86/kernel/fpu/core.c
+@@ -236,7 +236,8 @@ void fpstate_init(union fpregs_state *state)
+ 	 * it will #GP. Make sure it is replaced after the memset().
+ 	 */
+ 	if (static_cpu_has(X86_FEATURE_XSAVES))
+-		state->xsave.header.xcomp_bv = XCOMP_BV_COMPACTED_FORMAT;
++		state->xsave.header.xcomp_bv = XCOMP_BV_COMPACTED_FORMAT |
++					       xfeatures_mask;
+ 
+ 	if (static_cpu_has(X86_FEATURE_FXSR))
+ 		fpstate_init_fxstate(&state->fxsave);
+diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+index f2e8bed..4d3ddc2 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+@@ -507,8 +507,11 @@ void mlx4_en_recover_from_oom(struct mlx4_en_priv *priv)
+ 		return;
+ 
+ 	for (ring = 0; ring < priv->rx_ring_num; ring++) {
+-		if (mlx4_en_is_ring_empty(priv->rx_ring[ring]))
++		if (mlx4_en_is_ring_empty(priv->rx_ring[ring])) {
++			local_bh_disable();
+ 			napi_reschedule(&priv->rx_cq[ring]->napi);
++			local_bh_enable();
++		}
+ 	}
+ }
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
+index 71382df..81d8e3b 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
+@@ -765,7 +765,8 @@ void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv);
+ int mlx5e_modify_rqs_vsd(struct mlx5e_priv *priv, bool vsd);
+ 
+ int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz, int ix);
+-void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv);
++void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_priv *priv, void *tirc,
++				    enum mlx5e_traffic_types tt);
+ 
+ int mlx5e_open_locked(struct net_device *netdev);
+ int mlx5e_close_locked(struct net_device *netdev);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+index 51c6a57..126cfeb 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+@@ -975,15 +975,18 @@ static int mlx5e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
+ 
+ static void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in, int inlen)
+ {
+-	struct mlx5_core_dev *mdev = priv->mdev;
+ 	void *tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx);
+-	int i;
++	struct mlx5_core_dev *mdev = priv->mdev;
++	int ctxlen = MLX5_ST_SZ_BYTES(tirc);
++	int tt;
+ 
+ 	MLX5_SET(modify_tir_in, in, bitmask.hash, 1);
+-	mlx5e_build_tir_ctx_hash(tirc, priv);
+ 
+-	for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++)
+-		mlx5_core_modify_tir(mdev, priv->indir_tir[i].tirn, in, inlen);
++	for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
++		memset(tirc, 0, ctxlen);
++		mlx5e_build_indir_tir_ctx_hash(priv, tirc, tt);
++		mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in, inlen);
++	}
+ }
+ 
+ static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+index 5dc3e24..b3067137 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+@@ -1978,8 +1978,23 @@ static void mlx5e_build_tir_ctx_lro(void *tirc, struct mlx5e_priv *priv)
+ 	MLX5_SET(tirc, tirc, lro_timeout_period_usecs, priv->params.lro_timeout);
+ }
+ 
+-void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv)
++void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_priv *priv, void *tirc,
++				    enum mlx5e_traffic_types tt)
+ {
++	void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
++
++#define MLX5_HASH_IP            (MLX5_HASH_FIELD_SEL_SRC_IP   |\
++				 MLX5_HASH_FIELD_SEL_DST_IP)
++
++#define MLX5_HASH_IP_L4PORTS    (MLX5_HASH_FIELD_SEL_SRC_IP   |\
++				 MLX5_HASH_FIELD_SEL_DST_IP   |\
++				 MLX5_HASH_FIELD_SEL_L4_SPORT |\
++				 MLX5_HASH_FIELD_SEL_L4_DPORT)
++
++#define MLX5_HASH_IP_IPSEC_SPI  (MLX5_HASH_FIELD_SEL_SRC_IP   |\
++				 MLX5_HASH_FIELD_SEL_DST_IP   |\
++				 MLX5_HASH_FIELD_SEL_IPSEC_SPI)
++
+ 	MLX5_SET(tirc, tirc, rx_hash_fn,
+ 		 mlx5e_rx_hash_fn(priv->params.rss_hfunc));
+ 	if (priv->params.rss_hfunc == ETH_RSS_HASH_TOP) {
+@@ -1991,6 +2006,88 @@ void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv)
+ 		MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
+ 		memcpy(rss_key, priv->params.toeplitz_hash_key, len);
+ 	}
++
++	switch (tt) {
++	case MLX5E_TT_IPV4_TCP:
++		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
++			 MLX5_L3_PROT_TYPE_IPV4);
++		MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
++			 MLX5_L4_PROT_TYPE_TCP);
++		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
++			 MLX5_HASH_IP_L4PORTS);
++		break;
++
++	case MLX5E_TT_IPV6_TCP:
++		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
++			 MLX5_L3_PROT_TYPE_IPV6);
++		MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
++			 MLX5_L4_PROT_TYPE_TCP);
++		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
++			 MLX5_HASH_IP_L4PORTS);
++		break;
++
++	case MLX5E_TT_IPV4_UDP:
++		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
++			 MLX5_L3_PROT_TYPE_IPV4);
++		MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
++			 MLX5_L4_PROT_TYPE_UDP);
++		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
++			 MLX5_HASH_IP_L4PORTS);
++		break;
++
++	case MLX5E_TT_IPV6_UDP:
++		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
++			 MLX5_L3_PROT_TYPE_IPV6);
++		MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
++			 MLX5_L4_PROT_TYPE_UDP);
++		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
++			 MLX5_HASH_IP_L4PORTS);
++		break;
++
++	case MLX5E_TT_IPV4_IPSEC_AH:
++		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
++			 MLX5_L3_PROT_TYPE_IPV4);
++		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
++			 MLX5_HASH_IP_IPSEC_SPI);
++		break;
++
++	case MLX5E_TT_IPV6_IPSEC_AH:
++		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
++			 MLX5_L3_PROT_TYPE_IPV6);
++		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
++			 MLX5_HASH_IP_IPSEC_SPI);
++		break;
++
++	case MLX5E_TT_IPV4_IPSEC_ESP:
++		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
++			 MLX5_L3_PROT_TYPE_IPV4);
++		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
++			 MLX5_HASH_IP_IPSEC_SPI);
++		break;
++
++	case MLX5E_TT_IPV6_IPSEC_ESP:
++		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
++			 MLX5_L3_PROT_TYPE_IPV6);
++		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
++			 MLX5_HASH_IP_IPSEC_SPI);
++		break;
++
++	case MLX5E_TT_IPV4:
++		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
++			 MLX5_L3_PROT_TYPE_IPV4);
++		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
++			 MLX5_HASH_IP);
++		break;
++
++	case MLX5E_TT_IPV6:
++		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
++			 MLX5_L3_PROT_TYPE_IPV6);
++		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
++			 MLX5_HASH_IP);
++		break;
++	default:
++		WARN_ONCE(true, "%s: bad traffic type!\n", __func__);
++	}
+ }
+ 
+ static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv)
+@@ -2360,110 +2457,13 @@ void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv)
+ static void mlx5e_build_indir_tir_ctx(struct mlx5e_priv *priv, u32 *tirc,
+ 				      enum mlx5e_traffic_types tt)
+ {
+-	void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
+-
+ 	MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn);
+ 
+-#define MLX5_HASH_IP            (MLX5_HASH_FIELD_SEL_SRC_IP   |\
+-				 MLX5_HASH_FIELD_SEL_DST_IP)
+-
+-#define MLX5_HASH_IP_L4PORTS    (MLX5_HASH_FIELD_SEL_SRC_IP   |\
+-				 MLX5_HASH_FIELD_SEL_DST_IP   |\
+-				 MLX5_HASH_FIELD_SEL_L4_SPORT |\
+-				 MLX5_HASH_FIELD_SEL_L4_DPORT)
+-
+-#define MLX5_HASH_IP_IPSEC_SPI  (MLX5_HASH_FIELD_SEL_SRC_IP   |\
+-				 MLX5_HASH_FIELD_SEL_DST_IP   |\
+-				 MLX5_HASH_FIELD_SEL_IPSEC_SPI)
+-
+ 	mlx5e_build_tir_ctx_lro(tirc, priv);
+ 
+ 	MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
+ 	MLX5_SET(tirc, tirc, indirect_table, priv->indir_rqt.rqtn);
+-	mlx5e_build_tir_ctx_hash(tirc, priv);
+-
+-	switch (tt) {
+-	case MLX5E_TT_IPV4_TCP:
+-		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+-			 MLX5_L3_PROT_TYPE_IPV4);
+-		MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
+-			 MLX5_L4_PROT_TYPE_TCP);
+-		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+-			 MLX5_HASH_IP_L4PORTS);
+-		break;
+-
+-	case MLX5E_TT_IPV6_TCP:
+-		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+-			 MLX5_L3_PROT_TYPE_IPV6);
+-		MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
+-			 MLX5_L4_PROT_TYPE_TCP);
+-		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+-			 MLX5_HASH_IP_L4PORTS);
+-		break;
+-
+-	case MLX5E_TT_IPV4_UDP:
+-		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+-			 MLX5_L3_PROT_TYPE_IPV4);
+-		MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
+-			 MLX5_L4_PROT_TYPE_UDP);
+-		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+-			 MLX5_HASH_IP_L4PORTS);
+-		break;
+-
+-	case MLX5E_TT_IPV6_UDP:
+-		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+-			 MLX5_L3_PROT_TYPE_IPV6);
+-		MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
+-			 MLX5_L4_PROT_TYPE_UDP);
+-		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+-			 MLX5_HASH_IP_L4PORTS);
+-		break;
+-
+-	case MLX5E_TT_IPV4_IPSEC_AH:
+-		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+-			 MLX5_L3_PROT_TYPE_IPV4);
+-		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+-			 MLX5_HASH_IP_IPSEC_SPI);
+-		break;
+-
+-	case MLX5E_TT_IPV6_IPSEC_AH:
+-		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+-			 MLX5_L3_PROT_TYPE_IPV6);
+-		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+-			 MLX5_HASH_IP_IPSEC_SPI);
+-		break;
+-
+-	case MLX5E_TT_IPV4_IPSEC_ESP:
+-		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+-			 MLX5_L3_PROT_TYPE_IPV4);
+-		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+-			 MLX5_HASH_IP_IPSEC_SPI);
+-		break;
+-
+-	case MLX5E_TT_IPV6_IPSEC_ESP:
+-		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+-			 MLX5_L3_PROT_TYPE_IPV6);
+-		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+-			 MLX5_HASH_IP_IPSEC_SPI);
+-		break;
+-
+-	case MLX5E_TT_IPV4:
+-		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+-			 MLX5_L3_PROT_TYPE_IPV4);
+-		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+-			 MLX5_HASH_IP);
+-		break;
+-
+-	case MLX5E_TT_IPV6:
+-		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+-			 MLX5_L3_PROT_TYPE_IPV6);
+-		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+-			 MLX5_HASH_IP);
+-		break;
+-	default:
+-		WARN_ONCE(true,
+-			  "mlx5e_build_indir_tir_ctx: bad traffic type!\n");
+-	}
++	mlx5e_build_indir_tir_ctx_hash(priv, tirc, tt);
+ }
+ 
+ static void mlx5e_build_direct_tir_ctx(struct mlx5e_priv *priv, u32 *tirc,
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+index 914e546..7e20e4b 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+@@ -1110,9 +1110,8 @@ static struct mlx5_flow_rule *add_rule_fg(struct mlx5_flow_group *fg,
+ 				return rule;
+ 			}
+ 			rule = add_rule_fte(fte, fg, dest);
+-			unlock_ref_node(&fte->node);
+ 			if (IS_ERR(rule))
+-				goto unlock_fg;
++				goto unlock_fte;
+ 			else
+ 				goto add_rule;
+ 		}
+@@ -1130,6 +1129,7 @@ static struct mlx5_flow_rule *add_rule_fg(struct mlx5_flow_group *fg,
+ 		goto unlock_fg;
+ 	}
+ 	tree_init_node(&fte->node, 0, del_fte);
++	nested_lock_ref_node(&fte->node, FS_MUTEX_CHILD);
+ 	rule = add_rule_fte(fte, fg, dest);
+ 	if (IS_ERR(rule)) {
+ 		kfree(fte);
+@@ -1142,6 +1142,8 @@ static struct mlx5_flow_rule *add_rule_fg(struct mlx5_flow_group *fg,
+ 	list_add(&fte->node.list, prev);
+ add_rule:
+ 	tree_add_node(&rule->node, &fte->node);
++unlock_fte:
++	unlock_ref_node(&fte->node);
+ unlock_fg:
+ 	unlock_ref_node(&fg->node);
+ 	return rule;
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+index 7df4ff1..7d19029 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+@@ -305,8 +305,12 @@ static int dwmac1000_irq_status(struct mac_device_info *hw,
+ {
+ 	void __iomem *ioaddr = hw->pcsr;
+ 	u32 intr_status = readl(ioaddr + GMAC_INT_STATUS);
++	u32 intr_mask = readl(ioaddr + GMAC_INT_MASK);
+ 	int ret = 0;
+ 
++	/* Discard masked bits */
++	intr_status &= ~intr_mask;
++
+ 	/* Not used events (e.g. MMC interrupts) are not handled. */
+ 	if ((intr_status & GMAC_INT_STATUS_MMCTIS))
+ 		x->mmc_tx_irq_n++;
+diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
+index 6255973..1b65f0f 100644
+--- a/drivers/net/loopback.c
++++ b/drivers/net/loopback.c
+@@ -164,6 +164,7 @@ static void loopback_setup(struct net_device *dev)
+ {
+ 	dev->mtu		= 64 * 1024;
+ 	dev->hard_header_len	= ETH_HLEN;	/* 14	*/
++	dev->min_header_len	= ETH_HLEN;	/* 14	*/
+ 	dev->addr_len		= ETH_ALEN;	/* 6	*/
+ 	dev->type		= ARPHRD_LOOPBACK;	/* 0x0001*/
+ 	dev->flags		= IFF_LOOPBACK;
+diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
+index 6f38daf..adea6f5 100644
+--- a/drivers/net/macvtap.c
++++ b/drivers/net/macvtap.c
+@@ -682,7 +682,7 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
+ 	ssize_t n;
+ 
+ 	if (q->flags & IFF_VNET_HDR) {
+-		vnet_hdr_len = q->vnet_hdr_sz;
++		vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz);
+ 
+ 		err = -EINVAL;
+ 		if (len < vnet_hdr_len)
+@@ -822,7 +822,7 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
+ 
+ 	if (q->flags & IFF_VNET_HDR) {
+ 		struct virtio_net_hdr vnet_hdr;
+-		vnet_hdr_len = q->vnet_hdr_sz;
++		vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz);
+ 		if (iov_iter_count(iter) < vnet_hdr_len)
+ 			return -EINVAL;
+ 
+diff --git a/drivers/net/tun.c b/drivers/net/tun.c
+index 18402d7..b31aca8 100644
+--- a/drivers/net/tun.c
++++ b/drivers/net/tun.c
+@@ -1187,9 +1187,11 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
+ 	}
+ 
+ 	if (tun->flags & IFF_VNET_HDR) {
+-		if (len < tun->vnet_hdr_sz)
++		int vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
++
++		if (len < vnet_hdr_sz)
+ 			return -EINVAL;
+-		len -= tun->vnet_hdr_sz;
++		len -= vnet_hdr_sz;
+ 
+ 		n = copy_from_iter(&gso, sizeof(gso), from);
+ 		if (n != sizeof(gso))
+@@ -1201,7 +1203,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
+ 
+ 		if (tun16_to_cpu(tun, gso.hdr_len) > len)
+ 			return -EINVAL;
+-		iov_iter_advance(from, tun->vnet_hdr_sz - sizeof(gso));
++		iov_iter_advance(from, vnet_hdr_sz - sizeof(gso));
+ 	}
+ 
+ 	if ((tun->flags & TUN_TYPE_MASK) == IFF_TAP) {
+@@ -1348,7 +1350,7 @@ static ssize_t tun_put_user(struct tun_struct *tun,
+ 		vlan_hlen = VLAN_HLEN;
+ 
+ 	if (tun->flags & IFF_VNET_HDR)
+-		vnet_hdr_sz = tun->vnet_hdr_sz;
++		vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
+ 
+ 	total = skb->len + vlan_hlen + vnet_hdr_sz;
+ 
+diff --git a/drivers/net/usb/catc.c b/drivers/net/usb/catc.c
+index d9ca05d..4086415 100644
+--- a/drivers/net/usb/catc.c
++++ b/drivers/net/usb/catc.c
+@@ -777,7 +777,7 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
+ 	struct net_device *netdev;
+ 	struct catc *catc;
+ 	u8 broadcast[ETH_ALEN];
+-	int i, pktsz;
++	int pktsz, ret;
+ 
+ 	if (usb_set_interface(usbdev,
+ 			intf->altsetting->desc.bInterfaceNumber, 1)) {
+@@ -812,12 +812,8 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
+ 	if ((!catc->ctrl_urb) || (!catc->tx_urb) || 
+ 	    (!catc->rx_urb) || (!catc->irq_urb)) {
+ 		dev_err(&intf->dev, "No free urbs available.\n");
+-		usb_free_urb(catc->ctrl_urb);
+-		usb_free_urb(catc->tx_urb);
+-		usb_free_urb(catc->rx_urb);
+-		usb_free_urb(catc->irq_urb);
+-		free_netdev(netdev);
+-		return -ENOMEM;
++		ret = -ENOMEM;
++		goto fail_free;
+ 	}
+ 
+ 	/* The F5U011 has the same vendor/product as the netmate but a device version of 0x130 */
+@@ -845,15 +841,24 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
+                 catc->irq_buf, 2, catc_irq_done, catc, 1);
+ 
+ 	if (!catc->is_f5u011) {
++		u32 *buf;
++		int i;
++
+ 		dev_dbg(dev, "Checking memory size\n");
+ 
+-		i = 0x12345678;
+-		catc_write_mem(catc, 0x7a80, &i, 4);
+-		i = 0x87654321;	
+-		catc_write_mem(catc, 0xfa80, &i, 4);
+-		catc_read_mem(catc, 0x7a80, &i, 4);
++		buf = kmalloc(4, GFP_KERNEL);
++		if (!buf) {
++			ret = -ENOMEM;
++			goto fail_free;
++		}
++
++		*buf = 0x12345678;
++		catc_write_mem(catc, 0x7a80, buf, 4);
++		*buf = 0x87654321;
++		catc_write_mem(catc, 0xfa80, buf, 4);
++		catc_read_mem(catc, 0x7a80, buf, 4);
+ 	  
+-		switch (i) {
++		switch (*buf) {
+ 		case 0x12345678:
+ 			catc_set_reg(catc, TxBufCount, 8);
+ 			catc_set_reg(catc, RxBufCount, 32);
+@@ -868,6 +873,8 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
+ 			dev_dbg(dev, "32k Memory\n");
+ 			break;
+ 		}
++
++		kfree(buf);
+ 	  
+ 		dev_dbg(dev, "Getting MAC from SEEROM.\n");
+ 	  
+@@ -914,16 +921,21 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
+ 	usb_set_intfdata(intf, catc);
+ 
+ 	SET_NETDEV_DEV(netdev, &intf->dev);
+-	if (register_netdev(netdev) != 0) {
+-		usb_set_intfdata(intf, NULL);
+-		usb_free_urb(catc->ctrl_urb);
+-		usb_free_urb(catc->tx_urb);
+-		usb_free_urb(catc->rx_urb);
+-		usb_free_urb(catc->irq_urb);
+-		free_netdev(netdev);
+-		return -EIO;
+-	}
++	ret = register_netdev(netdev);
++	if (ret)
++		goto fail_clear_intfdata;
++
+ 	return 0;
++
++fail_clear_intfdata:
++	usb_set_intfdata(intf, NULL);
++fail_free:
++	usb_free_urb(catc->ctrl_urb);
++	usb_free_urb(catc->tx_urb);
++	usb_free_urb(catc->rx_urb);
++	usb_free_urb(catc->irq_urb);
++	free_netdev(netdev);
++	return ret;
+ }
+ 
+ static void catc_disconnect(struct usb_interface *intf)
+diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
+index 1434e5d..ee40ac2 100644
+--- a/drivers/net/usb/pegasus.c
++++ b/drivers/net/usb/pegasus.c
+@@ -126,40 +126,61 @@ static void async_ctrl_callback(struct urb *urb)
+ 
+ static int get_registers(pegasus_t *pegasus, __u16 indx, __u16 size, void *data)
+ {
++	u8 *buf;
+ 	int ret;
+ 
++	buf = kmalloc(size, GFP_NOIO);
++	if (!buf)
++		return -ENOMEM;
++
+ 	ret = usb_control_msg(pegasus->usb, usb_rcvctrlpipe(pegasus->usb, 0),
+ 			      PEGASUS_REQ_GET_REGS, PEGASUS_REQT_READ, 0,
+-			      indx, data, size, 1000);
++			      indx, buf, size, 1000);
+ 	if (ret < 0)
+ 		netif_dbg(pegasus, drv, pegasus->net,
+ 			  "%s returned %d\n", __func__, ret);
++	else if (ret <= size)
++		memcpy(data, buf, ret);
++	kfree(buf);
+ 	return ret;
+ }
+ 
+-static int set_registers(pegasus_t *pegasus, __u16 indx, __u16 size, void *data)
++static int set_registers(pegasus_t *pegasus, __u16 indx, __u16 size,
++			 const void *data)
+ {
++	u8 *buf;
+ 	int ret;
+ 
++	buf = kmemdup(data, size, GFP_NOIO);
++	if (!buf)
++		return -ENOMEM;
++
+ 	ret = usb_control_msg(pegasus->usb, usb_sndctrlpipe(pegasus->usb, 0),
+ 			      PEGASUS_REQ_SET_REGS, PEGASUS_REQT_WRITE, 0,
+-			      indx, data, size, 100);
++			      indx, buf, size, 100);
+ 	if (ret < 0)
+ 		netif_dbg(pegasus, drv, pegasus->net,
+ 			  "%s returned %d\n", __func__, ret);
++	kfree(buf);
+ 	return ret;
+ }
+ 
+ static int set_register(pegasus_t *pegasus, __u16 indx, __u8 data)
+ {
++	u8 *buf;
+ 	int ret;
+ 
++	buf = kmemdup(&data, 1, GFP_NOIO);
++	if (!buf)
++		return -ENOMEM;
++
+ 	ret = usb_control_msg(pegasus->usb, usb_sndctrlpipe(pegasus->usb, 0),
+ 			      PEGASUS_REQ_SET_REG, PEGASUS_REQT_WRITE, data,
+-			      indx, &data, 1, 1000);
++			      indx, buf, 1, 1000);
+ 	if (ret < 0)
+ 		netif_dbg(pegasus, drv, pegasus->net,
+ 			  "%s returned %d\n", __func__, ret);
++	kfree(buf);
+ 	return ret;
+ }
+ 
+diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
+index 7c72bfa..dc4f7ea 100644
+--- a/drivers/net/usb/rtl8150.c
++++ b/drivers/net/usb/rtl8150.c
+@@ -155,16 +155,36 @@ static const char driver_name [] = "rtl8150";
+ */
+ static int get_registers(rtl8150_t * dev, u16 indx, u16 size, void *data)
+ {
+-	return usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
+-			       RTL8150_REQ_GET_REGS, RTL8150_REQT_READ,
+-			       indx, 0, data, size, 500);
++	void *buf;
++	int ret;
++
++	buf = kmalloc(size, GFP_NOIO);
++	if (!buf)
++		return -ENOMEM;
++
++	ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
++			      RTL8150_REQ_GET_REGS, RTL8150_REQT_READ,
++			      indx, 0, buf, size, 500);
++	if (ret > 0 && ret <= size)
++		memcpy(data, buf, ret);
++	kfree(buf);
++	return ret;
+ }
+ 
+-static int set_registers(rtl8150_t * dev, u16 indx, u16 size, void *data)
++static int set_registers(rtl8150_t * dev, u16 indx, u16 size, const void *data)
+ {
+-	return usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
+-			       RTL8150_REQ_SET_REGS, RTL8150_REQT_WRITE,
+-			       indx, 0, data, size, 500);
++	void *buf;
++	int ret;
++
++	buf = kmemdup(data, size, GFP_NOIO);
++	if (!buf)
++		return -ENOMEM;
++
++	ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
++			      RTL8150_REQ_SET_REGS, RTL8150_REQT_WRITE,
++			      indx, 0, buf, size, 500);
++	kfree(buf);
++	return ret;
+ }
+ 
+ static void async_set_reg_cb(struct urb *urb)
+diff --git a/include/linux/can/core.h b/include/linux/can/core.h
+index a087500..df08a41 100644
+--- a/include/linux/can/core.h
++++ b/include/linux/can/core.h
+@@ -45,10 +45,9 @@ struct can_proto {
+ extern int  can_proto_register(const struct can_proto *cp);
+ extern void can_proto_unregister(const struct can_proto *cp);
+ 
+-extern int  can_rx_register(struct net_device *dev, canid_t can_id,
+-			    canid_t mask,
+-			    void (*func)(struct sk_buff *, void *),
+-			    void *data, char *ident);
++int can_rx_register(struct net_device *dev, canid_t can_id, canid_t mask,
++		    void (*func)(struct sk_buff *, void *),
++		    void *data, char *ident, struct sock *sk);
+ 
+ extern void can_rx_unregister(struct net_device *dev, canid_t can_id,
+ 			      canid_t mask,
+diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
+index d83590e..bb9b102 100644
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -1508,6 +1508,7 @@ enum netdev_priv_flags {
+  *	@mtu:		Interface MTU value
+  *	@type:		Interface hardware type
+  *	@hard_header_len: Maximum hardware header length.
++ *	@min_header_len:  Minimum hardware header length
+  *
+  *	@needed_headroom: Extra headroom the hardware may need, but not in all
+  *			  cases can this be guaranteed
+@@ -1728,6 +1729,7 @@ struct net_device {
+ 	unsigned int		mtu;
+ 	unsigned short		type;
+ 	unsigned short		hard_header_len;
++	unsigned short		min_header_len;
+ 
+ 	unsigned short		needed_headroom;
+ 	unsigned short		needed_tailroom;
+@@ -2783,6 +2785,8 @@ static inline bool dev_validate_header(const struct net_device *dev,
+ {
+ 	if (likely(len >= dev->hard_header_len))
+ 		return true;
++	if (len < dev->min_header_len)
++		return false;
+ 
+ 	if (capable(CAP_SYS_RAWIO)) {
+ 		memset(ll_header + len, 0, dev->hard_header_len - len);
+diff --git a/include/net/cipso_ipv4.h b/include/net/cipso_ipv4.h
+index 3ebb168..a34b141 100644
+--- a/include/net/cipso_ipv4.h
++++ b/include/net/cipso_ipv4.h
+@@ -309,6 +309,10 @@ static inline int cipso_v4_validate(const struct sk_buff *skb,
+ 	}
+ 
+ 	for (opt_iter = 6; opt_iter < opt_len;) {
++		if (opt_iter + 1 == opt_len) {
++			err_offset = opt_iter;
++			goto out;
++		}
+ 		tag_len = opt[opt_iter + 1];
+ 		if ((tag_len == 0) || (tag_len > (opt_len - opt_iter))) {
+ 			err_offset = opt_iter + 1;
+diff --git a/include/net/ipv6.h b/include/net/ipv6.h
+index f11ca83..7f15f95 100644
+--- a/include/net/ipv6.h
++++ b/include/net/ipv6.h
+@@ -871,7 +871,7 @@ int ip6_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb);
+  *	upper-layer output functions
+  */
+ int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
+-	     struct ipv6_txoptions *opt, int tclass);
++	     __u32 mark, struct ipv6_txoptions *opt, int tclass);
+ 
+ int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr);
+ 
+diff --git a/include/net/lwtunnel.h b/include/net/lwtunnel.h
+index fc7c0db..3f40132 100644
+--- a/include/net/lwtunnel.h
++++ b/include/net/lwtunnel.h
+@@ -176,7 +176,10 @@ static inline int lwtunnel_valid_encap_type(u16 encap_type)
+ }
+ static inline int lwtunnel_valid_encap_type_attr(struct nlattr *attr, int len)
+ {
+-	return -EOPNOTSUPP;
++	/* return 0 since we are not walking attr looking for
++	 * RTA_ENCAP_TYPE attribute on nexthops.
++	 */
++	return 0;
+ }
+ 
+ static inline int lwtunnel_build_state(struct net_device *dev, u16 encap_type,
+diff --git a/net/can/af_can.c b/net/can/af_can.c
+index 1108079..5488e4a 100644
+--- a/net/can/af_can.c
++++ b/net/can/af_can.c
+@@ -445,6 +445,7 @@ static struct hlist_head *find_rcv_list(canid_t *can_id, canid_t *mask,
+  * @func: callback function on filter match
+  * @data: returned parameter for callback function
+  * @ident: string for calling module identification
++ * @sk: socket pointer (might be NULL)
+  *
+  * Description:
+  *  Invokes the callback function with the received sk_buff and the given
+@@ -468,7 +469,7 @@ static struct hlist_head *find_rcv_list(canid_t *can_id, canid_t *mask,
+  */
+ int can_rx_register(struct net_device *dev, canid_t can_id, canid_t mask,
+ 		    void (*func)(struct sk_buff *, void *), void *data,
+-		    char *ident)
++		    char *ident, struct sock *sk)
+ {
+ 	struct receiver *r;
+ 	struct hlist_head *rl;
+@@ -496,6 +497,7 @@ int can_rx_register(struct net_device *dev, canid_t can_id, canid_t mask,
+ 		r->func    = func;
+ 		r->data    = data;
+ 		r->ident   = ident;
++		r->sk      = sk;
+ 
+ 		hlist_add_head_rcu(&r->list, rl);
+ 		d->entries++;
+@@ -520,8 +522,11 @@ EXPORT_SYMBOL(can_rx_register);
+ static void can_rx_delete_receiver(struct rcu_head *rp)
+ {
+ 	struct receiver *r = container_of(rp, struct receiver, rcu);
++	struct sock *sk = r->sk;
+ 
+ 	kmem_cache_free(rcv_cache, r);
++	if (sk)
++		sock_put(sk);
+ }
+ 
+ /**
+@@ -596,8 +601,11 @@ void can_rx_unregister(struct net_device *dev, canid_t can_id, canid_t mask,
+ 	spin_unlock(&can_rcvlists_lock);
+ 
+ 	/* schedule the receiver item for deletion */
+-	if (r)
++	if (r) {
++		if (r->sk)
++			sock_hold(r->sk);
+ 		call_rcu(&r->rcu, can_rx_delete_receiver);
++	}
+ }
+ EXPORT_SYMBOL(can_rx_unregister);
+ 
+diff --git a/net/can/af_can.h b/net/can/af_can.h
+index fca0fe9..b86f512 100644
+--- a/net/can/af_can.h
++++ b/net/can/af_can.h
+@@ -50,13 +50,14 @@
+ 
+ struct receiver {
+ 	struct hlist_node list;
+-	struct rcu_head rcu;
+ 	canid_t can_id;
+ 	canid_t mask;
+ 	unsigned long matches;
+ 	void (*func)(struct sk_buff *, void *);
+ 	void *data;
+ 	char *ident;
++	struct sock *sk;
++	struct rcu_head rcu;
+ };
+ 
+ #define CAN_SFF_RCV_ARRAY_SZ (1 << CAN_SFF_ID_BITS)
+diff --git a/net/can/bcm.c b/net/can/bcm.c
+index 5e9ed5e..e4f694d 100644
+--- a/net/can/bcm.c
++++ b/net/can/bcm.c
+@@ -1225,7 +1225,7 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
+ 				err = can_rx_register(dev, op->can_id,
+ 						      REGMASK(op->can_id),
+ 						      bcm_rx_handler, op,
+-						      "bcm");
++						      "bcm", sk);
+ 
+ 				op->rx_reg_dev = dev;
+ 				dev_put(dev);
+@@ -1234,7 +1234,7 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
+ 		} else
+ 			err = can_rx_register(NULL, op->can_id,
+ 					      REGMASK(op->can_id),
+-					      bcm_rx_handler, op, "bcm");
++					      bcm_rx_handler, op, "bcm", sk);
+ 		if (err) {
+ 			/* this bcm rx op is broken -> remove it */
+ 			list_del(&op->list);
+diff --git a/net/can/gw.c b/net/can/gw.c
+index 4551687..77c8af4 100644
+--- a/net/can/gw.c
++++ b/net/can/gw.c
+@@ -442,7 +442,7 @@ static inline int cgw_register_filter(struct cgw_job *gwj)
+ {
+ 	return can_rx_register(gwj->src.dev, gwj->ccgw.filter.can_id,
+ 			       gwj->ccgw.filter.can_mask, can_can_gw_rcv,
+-			       gwj, "gw");
++			       gwj, "gw", NULL);
+ }
+ 
+ static inline void cgw_unregister_filter(struct cgw_job *gwj)
+diff --git a/net/can/raw.c b/net/can/raw.c
+index b075f02..6dc546a 100644
+--- a/net/can/raw.c
++++ b/net/can/raw.c
+@@ -190,7 +190,7 @@ static int raw_enable_filters(struct net_device *dev, struct sock *sk,
+ 	for (i = 0; i < count; i++) {
+ 		err = can_rx_register(dev, filter[i].can_id,
+ 				      filter[i].can_mask,
+-				      raw_rcv, sk, "raw");
++				      raw_rcv, sk, "raw", sk);
+ 		if (err) {
+ 			/* clean up successfully registered filters */
+ 			while (--i >= 0)
+@@ -211,7 +211,7 @@ static int raw_enable_errfilter(struct net_device *dev, struct sock *sk,
+ 
+ 	if (err_mask)
+ 		err = can_rx_register(dev, 0, err_mask | CAN_ERR_FLAG,
+-				      raw_rcv, sk, "raw");
++				      raw_rcv, sk, "raw", sk);
+ 
+ 	return err;
+ }
+diff --git a/net/core/dev.c b/net/core/dev.c
+index df51c50..60b0a604 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -1696,24 +1696,19 @@ EXPORT_SYMBOL_GPL(net_dec_egress_queue);
+ 
+ static struct static_key netstamp_needed __read_mostly;
+ #ifdef HAVE_JUMP_LABEL
+-/* We are not allowed to call static_key_slow_dec() from irq context
+- * If net_disable_timestamp() is called from irq context, defer the
+- * static_key_slow_dec() calls.
+- */
+ static atomic_t netstamp_needed_deferred;
+-#endif
+-
+-void net_enable_timestamp(void)
++static void netstamp_clear(struct work_struct *work)
+ {
+-#ifdef HAVE_JUMP_LABEL
+ 	int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
+ 
+-	if (deferred) {
+-		while (--deferred)
+-			static_key_slow_dec(&netstamp_needed);
+-		return;
+-	}
++	while (deferred--)
++		static_key_slow_dec(&netstamp_needed);
++}
++static DECLARE_WORK(netstamp_work, netstamp_clear);
+ #endif
++
++void net_enable_timestamp(void)
++{
+ 	static_key_slow_inc(&netstamp_needed);
+ }
+ EXPORT_SYMBOL(net_enable_timestamp);
+@@ -1721,12 +1716,12 @@ EXPORT_SYMBOL(net_enable_timestamp);
+ void net_disable_timestamp(void)
+ {
+ #ifdef HAVE_JUMP_LABEL
+-	if (in_interrupt()) {
+-		atomic_inc(&netstamp_needed_deferred);
+-		return;
+-	}
+-#endif
++	/* net_disable_timestamp() can be called from non process context */
++	atomic_inc(&netstamp_needed_deferred);
++	schedule_work(&netstamp_work);
++#else
+ 	static_key_slow_dec(&netstamp_needed);
++#endif
+ }
+ EXPORT_SYMBOL(net_disable_timestamp);
+ 
+diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
+index 715e5d1..7506c03 100644
+--- a/net/dccp/ipv6.c
++++ b/net/dccp/ipv6.c
+@@ -227,7 +227,7 @@ static int dccp_v6_send_response(const struct sock *sk, struct request_sock *req
+ 		opt = ireq->ipv6_opt;
+ 		if (!opt)
+ 			opt = rcu_dereference(np->opt);
+-		err = ip6_xmit(sk, skb, &fl6, opt, np->tclass);
++		err = ip6_xmit(sk, skb, &fl6, sk->sk_mark, opt, np->tclass);
+ 		rcu_read_unlock();
+ 		err = net_xmit_eval(err);
+ 	}
+@@ -281,7 +281,7 @@ static void dccp_v6_ctl_send_reset(const struct sock *sk, struct sk_buff *rxskb)
+ 	dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
+ 	if (!IS_ERR(dst)) {
+ 		skb_dst_set(skb, dst);
+-		ip6_xmit(ctl_sk, skb, &fl6, NULL, 0);
++		ip6_xmit(ctl_sk, skb, &fl6, 0, NULL, 0);
+ 		DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
+ 		DCCP_INC_STATS(DCCP_MIB_OUTRSTS);
+ 		return;
+diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c
+index da38621..0f99297 100644
+--- a/net/dsa/dsa2.c
++++ b/net/dsa/dsa2.c
+@@ -273,6 +273,7 @@ static int dsa_user_port_apply(struct device_node *port, u32 index,
+ 	if (err) {
+ 		dev_warn(ds->dev, "Failed to create slave %d: %d\n",
+ 			 index, err);
++		ds->ports[index].netdev = NULL;
+ 		return err;
+ 	}
+ 
+diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
+index 02acfff..24d7aff 100644
+--- a/net/ethernet/eth.c
++++ b/net/ethernet/eth.c
+@@ -356,6 +356,7 @@ void ether_setup(struct net_device *dev)
+ 	dev->header_ops		= &eth_header_ops;
+ 	dev->type		= ARPHRD_ETHER;
+ 	dev->hard_header_len 	= ETH_HLEN;
++	dev->min_header_len	= ETH_HLEN;
+ 	dev->mtu		= ETH_DATA_LEN;
+ 	dev->addr_len		= ETH_ALEN;
+ 	dev->tx_queue_len	= 1000;	/* Ethernet wants good queues */
+diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
+index 72d6f05..ae20616 100644
+--- a/net/ipv4/cipso_ipv4.c
++++ b/net/ipv4/cipso_ipv4.c
+@@ -1587,6 +1587,10 @@ int cipso_v4_validate(const struct sk_buff *skb, unsigned char **option)
+ 				goto validate_return_locked;
+ 			}
+ 
++		if (opt_iter + 1 == opt_len) {
++			err_offset = opt_iter;
++			goto validate_return_locked;
++		}
+ 		tag_len = tag[1];
+ 		if (tag_len > (opt_len - opt_iter)) {
+ 			err_offset = opt_iter + 1;
+diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
+index 32a08bc..1bc623d 100644
+--- a/net/ipv4/igmp.c
++++ b/net/ipv4/igmp.c
+@@ -1172,6 +1172,7 @@ static void igmpv3_del_delrec(struct in_device *in_dev, struct ip_mc_list *im)
+ 				psf->sf_crcount = im->crcount;
+ 		}
+ 		in_dev_put(pmc->interface);
++		kfree(pmc);
+ 	}
+ 	spin_unlock_bh(&im->lock);
+ }
+diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
+index 877bdb0..e5c1dbe 100644
+--- a/net/ipv4/ip_output.c
++++ b/net/ipv4/ip_output.c
+@@ -1606,6 +1606,7 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
+ 	sk->sk_protocol = ip_hdr(skb)->protocol;
+ 	sk->sk_bound_dev_if = arg->bound_dev_if;
+ 	sk->sk_sndbuf = sysctl_wmem_default;
++	sk->sk_mark = fl4.flowi4_mark;
+ 	err = ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base,
+ 			     len, 0, &ipc, &rt, MSG_DONTWAIT);
+ 	if (unlikely(err)) {
+diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
+index f226f408..65336f3 100644
+--- a/net/ipv4/ip_sockglue.c
++++ b/net/ipv4/ip_sockglue.c
+@@ -1215,7 +1215,14 @@ void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb)
+ 		pktinfo->ipi_ifindex = 0;
+ 		pktinfo->ipi_spec_dst.s_addr = 0;
+ 	}
+-	skb_dst_drop(skb);
++	/* We need to keep the dst for __ip_options_echo()
++	 * We could restrict the test to opt.ts_needtime || opt.srr,
++	 * but the following is good enough as IP options are not often used.
++	 */
++	if (unlikely(IPCB(skb)->opt.optlen))
++		skb_dst_force(skb);
++	else
++		skb_dst_drop(skb);
+ }
+ 
+ int ip_setsockopt(struct sock *sk, int level,
+diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
+index 96b8e2b..105c074 100644
+--- a/net/ipv4/ping.c
++++ b/net/ipv4/ping.c
+@@ -642,6 +642,8 @@ static int ping_v4_push_pending_frames(struct sock *sk, struct pingfakehdr *pfh,
+ {
+ 	struct sk_buff *skb = skb_peek(&sk->sk_write_queue);
+ 
++	if (!skb)
++		return 0;
+ 	pfh->wcheck = csum_partial((char *)&pfh->icmph,
+ 		sizeof(struct icmphdr), pfh->wcheck);
+ 	pfh->icmph.checksum = csum_fold(pfh->wcheck);
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index 814af89..6a90a0e 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -772,6 +772,12 @@ ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos,
+ 				ret = -EAGAIN;
+ 				break;
+ 			}
++			/* if __tcp_splice_read() got nothing while we have
++			 * an skb in receive queue, we do not want to loop.
++			 * This might happen with URG data.
++			 */
++			if (!skb_queue_empty(&sk->sk_receive_queue))
++				break;
+ 			sk_wait_data(sk, &timeo, NULL);
+ 			if (signal_pending(current)) {
+ 				ret = sock_intr_errno(timeo);
+diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
+index 896e9df..65d6189 100644
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -2436,9 +2436,11 @@ u32 __tcp_select_window(struct sock *sk)
+ 	int full_space = min_t(int, tp->window_clamp, allowed_space);
+ 	int window;
+ 
+-	if (mss > full_space)
++	if (unlikely(mss > full_space)) {
+ 		mss = full_space;
+-
++		if (mss <= 0)
++			return 0;
++	}
+ 	if (free_space < (full_space >> 1)) {
+ 		icsk->icsk_ack.quick = 0;
+ 
+diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
+index 532c3ef..798a095 100644
+--- a/net/ipv6/inet6_connection_sock.c
++++ b/net/ipv6/inet6_connection_sock.c
+@@ -173,7 +173,7 @@ int inet6_csk_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl_unused
+ 	/* Restore final destination back after routing done */
+ 	fl6.daddr = sk->sk_v6_daddr;
+ 
+-	res = ip6_xmit(sk, skb, &fl6, rcu_dereference(np->opt),
++	res = ip6_xmit(sk, skb, &fl6, sk->sk_mark, rcu_dereference(np->opt),
+ 		       np->tclass);
+ 	rcu_read_unlock();
+ 	return res;
+diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
+index d7d6d3a..0a59220 100644
+--- a/net/ipv6/ip6_gre.c
++++ b/net/ipv6/ip6_gre.c
+@@ -367,35 +367,37 @@ static void ip6gre_tunnel_uninit(struct net_device *dev)
+ 
+ 
+ static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
+-		u8 type, u8 code, int offset, __be32 info)
++		       u8 type, u8 code, int offset, __be32 info)
+ {
+-	const struct ipv6hdr *ipv6h = (const struct ipv6hdr *)skb->data;
+-	__be16 *p = (__be16 *)(skb->data + offset);
+-	int grehlen = offset + 4;
++	const struct gre_base_hdr *greh;
++	const struct ipv6hdr *ipv6h;
++	int grehlen = sizeof(*greh);
+ 	struct ip6_tnl *t;
++	int key_off = 0;
+ 	__be16 flags;
++	__be32 key;
+ 
+-	flags = p[0];
+-	if (flags&(GRE_CSUM|GRE_KEY|GRE_SEQ|GRE_ROUTING|GRE_VERSION)) {
+-		if (flags&(GRE_VERSION|GRE_ROUTING))
+-			return;
+-		if (flags&GRE_KEY) {
+-			grehlen += 4;
+-			if (flags&GRE_CSUM)
+-				grehlen += 4;
+-		}
++	if (!pskb_may_pull(skb, offset + grehlen))
++		return;
++	greh = (const struct gre_base_hdr *)(skb->data + offset);
++	flags = greh->flags;
++	if (flags & (GRE_VERSION | GRE_ROUTING))
++		return;
++	if (flags & GRE_CSUM)
++		grehlen += 4;
++	if (flags & GRE_KEY) {
++		key_off = grehlen + offset;
++		grehlen += 4;
+ 	}
+ 
+-	/* If only 8 bytes returned, keyed message will be dropped here */
+-	if (!pskb_may_pull(skb, grehlen))
++	if (!pskb_may_pull(skb, offset + grehlen))
+ 		return;
+ 	ipv6h = (const struct ipv6hdr *)skb->data;
+-	p = (__be16 *)(skb->data + offset);
++	greh = (const struct gre_base_hdr *)(skb->data + offset);
++	key = key_off ? *(__be32 *)(skb->data + key_off) : 0;
+ 
+ 	t = ip6gre_tunnel_lookup(skb->dev, &ipv6h->daddr, &ipv6h->saddr,
+-				flags & GRE_KEY ?
+-				*(((__be32 *)p) + (grehlen / 4) - 1) : 0,
+-				p[1]);
++				 key, greh->protocol);
+ 	if (!t)
+ 		return;
+ 
+diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
+index 59eb4ed..9a87bfb 100644
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -163,7 +163,7 @@ int ip6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
+  * which are using proper atomic operations or spinlocks.
+  */
+ int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
+-	     struct ipv6_txoptions *opt, int tclass)
++	     __u32 mark, struct ipv6_txoptions *opt, int tclass)
+ {
+ 	struct net *net = sock_net(sk);
+ 	const struct ipv6_pinfo *np = inet6_sk(sk);
+@@ -230,7 +230,7 @@ int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
+ 
+ 	skb->protocol = htons(ETH_P_IPV6);
+ 	skb->priority = sk->sk_priority;
+-	skb->mark = sk->sk_mark;
++	skb->mark = mark;
+ 
+ 	mtu = dst_mtu(dst);
+ 	if ((skb->len <= mtu) || skb->ignore_df || skb_is_gso(skb)) {
+diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
+index f95437f..f6ba452 100644
+--- a/net/ipv6/ip6_tunnel.c
++++ b/net/ipv6/ip6_tunnel.c
+@@ -400,18 +400,19 @@ ip6_tnl_dev_uninit(struct net_device *dev)
+ 
+ __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw)
+ {
+-	const struct ipv6hdr *ipv6h = (const struct ipv6hdr *) raw;
+-	__u8 nexthdr = ipv6h->nexthdr;
+-	__u16 off = sizeof(*ipv6h);
++	const struct ipv6hdr *ipv6h = (const struct ipv6hdr *)raw;
++	unsigned int nhoff = raw - skb->data;
++	unsigned int off = nhoff + sizeof(*ipv6h);
++	u8 next, nexthdr = ipv6h->nexthdr;
+ 
+ 	while (ipv6_ext_hdr(nexthdr) && nexthdr != NEXTHDR_NONE) {
+-		__u16 optlen = 0;
+ 		struct ipv6_opt_hdr *hdr;
+-		if (raw + off + sizeof(*hdr) > skb->data &&
+-		    !pskb_may_pull(skb, raw - skb->data + off + sizeof (*hdr)))
++		u16 optlen;
++
++		if (!pskb_may_pull(skb, off + sizeof(*hdr)))
+ 			break;
+ 
+-		hdr = (struct ipv6_opt_hdr *) (raw + off);
++		hdr = (struct ipv6_opt_hdr *)(skb->data + off);
+ 		if (nexthdr == NEXTHDR_FRAGMENT) {
+ 			struct frag_hdr *frag_hdr = (struct frag_hdr *) hdr;
+ 			if (frag_hdr->frag_off)
+@@ -422,20 +423,29 @@ __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw)
+ 		} else {
+ 			optlen = ipv6_optlen(hdr);
+ 		}
++		/* cache hdr->nexthdr, since pskb_may_pull() might
++		 * invalidate hdr
++		 */
++		next = hdr->nexthdr;
+ 		if (nexthdr == NEXTHDR_DEST) {
+-			__u16 i = off + 2;
++			u16 i = 2;
++
++			/* Remember : hdr is no longer valid at this point. */
++			if (!pskb_may_pull(skb, off + optlen))
++				break;
++
+ 			while (1) {
+ 				struct ipv6_tlv_tnl_enc_lim *tel;
+ 
+ 				/* No more room for encapsulation limit */
+-				if (i + sizeof (*tel) > off + optlen)
++				if (i + sizeof(*tel) > optlen)
+ 					break;
+ 
+-				tel = (struct ipv6_tlv_tnl_enc_lim *) &raw[i];
++				tel = (struct ipv6_tlv_tnl_enc_lim *)(skb->data + off + i);
+ 				/* return index of option if found and valid */
+ 				if (tel->type == IPV6_TLV_TNL_ENCAP_LIMIT &&
+ 				    tel->length == 1)
+-					return i;
++					return i + off - nhoff;
+ 				/* else jump to next option */
+ 				if (tel->type)
+ 					i += tel->length + 2;
+@@ -443,7 +453,7 @@ __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw)
+ 					i++;
+ 			}
+ 		}
+-		nexthdr = hdr->nexthdr;
++		nexthdr = next;
+ 		off += optlen;
+ 	}
+ 	return 0;
+diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
+index 14a3903..1bdc703 100644
+--- a/net/ipv6/mcast.c
++++ b/net/ipv6/mcast.c
+@@ -81,7 +81,7 @@ static void mld_gq_timer_expire(unsigned long data);
+ static void mld_ifc_timer_expire(unsigned long data);
+ static void mld_ifc_event(struct inet6_dev *idev);
+ static void mld_add_delrec(struct inet6_dev *idev, struct ifmcaddr6 *pmc);
+-static void mld_del_delrec(struct inet6_dev *idev, const struct in6_addr *addr);
++static void mld_del_delrec(struct inet6_dev *idev, struct ifmcaddr6 *pmc);
+ static void mld_clear_delrec(struct inet6_dev *idev);
+ static bool mld_in_v1_mode(const struct inet6_dev *idev);
+ static int sf_setstate(struct ifmcaddr6 *pmc);
+@@ -692,9 +692,9 @@ static void igmp6_group_dropped(struct ifmcaddr6 *mc)
+ 			dev_mc_del(dev, buf);
+ 	}
+ 
+-	if (mc->mca_flags & MAF_NOREPORT)
+-		goto done;
+ 	spin_unlock_bh(&mc->mca_lock);
++	if (mc->mca_flags & MAF_NOREPORT)
++		return;
+ 
+ 	if (!mc->idev->dead)
+ 		igmp6_leave_group(mc);
+@@ -702,8 +702,6 @@ static void igmp6_group_dropped(struct ifmcaddr6 *mc)
+ 	spin_lock_bh(&mc->mca_lock);
+ 	if (del_timer(&mc->mca_timer))
+ 		atomic_dec(&mc->mca_refcnt);
+-done:
+-	ip6_mc_clear_src(mc);
+ 	spin_unlock_bh(&mc->mca_lock);
+ }
+ 
+@@ -748,10 +746,11 @@ static void mld_add_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im)
+ 	spin_unlock_bh(&idev->mc_lock);
+ }
+ 
+-static void mld_del_delrec(struct inet6_dev *idev, const struct in6_addr *pmca)
++static void mld_del_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im)
+ {
+ 	struct ifmcaddr6 *pmc, *pmc_prev;
+-	struct ip6_sf_list *psf, *psf_next;
++	struct ip6_sf_list *psf;
++	struct in6_addr *pmca = &im->mca_addr;
+ 
+ 	spin_lock_bh(&idev->mc_lock);
+ 	pmc_prev = NULL;
+@@ -768,14 +767,21 @@ static void mld_del_delrec(struct inet6_dev *idev, const struct in6_addr *pmca)
+ 	}
+ 	spin_unlock_bh(&idev->mc_lock);
+ 
++	spin_lock_bh(&im->mca_lock);
+ 	if (pmc) {
+-		for (psf = pmc->mca_tomb; psf; psf = psf_next) {
+-			psf_next = psf->sf_next;
+-			kfree(psf);
++		im->idev = pmc->idev;
++		im->mca_crcount = idev->mc_qrv;
++		im->mca_sfmode = pmc->mca_sfmode;
++		if (pmc->mca_sfmode == MCAST_INCLUDE) {
++			im->mca_tomb = pmc->mca_tomb;
++			im->mca_sources = pmc->mca_sources;
++			for (psf = im->mca_sources; psf; psf = psf->sf_next)
++				psf->sf_crcount = im->mca_crcount;
+ 		}
+ 		in6_dev_put(pmc->idev);
+ 		kfree(pmc);
+ 	}
++	spin_unlock_bh(&im->mca_lock);
+ }
+ 
+ static void mld_clear_delrec(struct inet6_dev *idev)
+@@ -904,7 +910,7 @@ int ipv6_dev_mc_inc(struct net_device *dev, const struct in6_addr *addr)
+ 	mca_get(mc);
+ 	write_unlock_bh(&idev->lock);
+ 
+-	mld_del_delrec(idev, &mc->mca_addr);
++	mld_del_delrec(idev, mc);
+ 	igmp6_group_added(mc);
+ 	ma_put(mc);
+ 	return 0;
+@@ -927,6 +933,7 @@ int __ipv6_dev_mc_dec(struct inet6_dev *idev, const struct in6_addr *addr)
+ 				write_unlock_bh(&idev->lock);
+ 
+ 				igmp6_group_dropped(ma);
++				ip6_mc_clear_src(ma);
+ 
+ 				ma_put(ma);
+ 				return 0;
+@@ -2501,15 +2508,17 @@ void ipv6_mc_down(struct inet6_dev *idev)
+ 	/* Withdraw multicast list */
+ 
+ 	read_lock_bh(&idev->lock);
+-	mld_ifc_stop_timer(idev);
+-	mld_gq_stop_timer(idev);
+-	mld_dad_stop_timer(idev);
+ 
+ 	for (i = idev->mc_list; i; i = i->next)
+ 		igmp6_group_dropped(i);
+-	read_unlock_bh(&idev->lock);
+ 
+-	mld_clear_delrec(idev);
++	/* Should stop timer after group drop. or we will
++	 * start timer again in mld_ifc_event()
++	 */
++	mld_ifc_stop_timer(idev);
++	mld_gq_stop_timer(idev);
++	mld_dad_stop_timer(idev);
++	read_unlock_bh(&idev->lock);
+ }
+ 
+ static void ipv6_mc_reset(struct inet6_dev *idev)
+@@ -2531,8 +2540,10 @@ void ipv6_mc_up(struct inet6_dev *idev)
+ 
+ 	read_lock_bh(&idev->lock);
+ 	ipv6_mc_reset(idev);
+-	for (i = idev->mc_list; i; i = i->next)
++	for (i = idev->mc_list; i; i = i->next) {
++		mld_del_delrec(idev, i);
+ 		igmp6_group_added(i);
++	}
+ 	read_unlock_bh(&idev->lock);
+ }
+ 
+@@ -2565,6 +2576,7 @@ void ipv6_mc_destroy_dev(struct inet6_dev *idev)
+ 
+ 	/* Deactivate timers */
+ 	ipv6_mc_down(idev);
++	mld_clear_delrec(idev);
+ 
+ 	/* Delete all-nodes address. */
+ 	/* We cannot call ipv6_dev_mc_dec() directly, our caller in
+@@ -2579,11 +2591,9 @@ void ipv6_mc_destroy_dev(struct inet6_dev *idev)
+ 	write_lock_bh(&idev->lock);
+ 	while ((i = idev->mc_list) != NULL) {
+ 		idev->mc_list = i->next;
+-		write_unlock_bh(&idev->lock);
+ 
+-		igmp6_group_dropped(i);
++		write_unlock_bh(&idev->lock);
+ 		ma_put(i);
+-
+ 		write_lock_bh(&idev->lock);
+ 	}
+ 	write_unlock_bh(&idev->lock);
+diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
+index b1cdf80..40d7405 100644
+--- a/net/ipv6/sit.c
++++ b/net/ipv6/sit.c
+@@ -1390,6 +1390,7 @@ static int ipip6_tunnel_init(struct net_device *dev)
+ 	err = dst_cache_init(&tunnel->dst_cache, GFP_KERNEL);
+ 	if (err) {
+ 		free_percpu(dev->tstats);
++		dev->tstats = NULL;
+ 		return err;
+ 	}
+ 
+diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
+index b9f1fee..6673965 100644
+--- a/net/ipv6/tcp_ipv6.c
++++ b/net/ipv6/tcp_ipv6.c
+@@ -467,7 +467,7 @@ static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
+ 		opt = ireq->ipv6_opt;
+ 		if (!opt)
+ 			opt = rcu_dereference(np->opt);
+-		err = ip6_xmit(sk, skb, fl6, opt, np->tclass);
++		err = ip6_xmit(sk, skb, fl6, sk->sk_mark, opt, np->tclass);
+ 		rcu_read_unlock();
+ 		err = net_xmit_eval(err);
+ 	}
+@@ -837,7 +837,7 @@ static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32
+ 	dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
+ 	if (!IS_ERR(dst)) {
+ 		skb_dst_set(buff, dst);
+-		ip6_xmit(ctl_sk, buff, &fl6, NULL, tclass);
++		ip6_xmit(ctl_sk, buff, &fl6, fl6.flowi6_mark, NULL, tclass);
+ 		TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
+ 		if (rst)
+ 			TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
+@@ -987,6 +987,16 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
+ 	return 0; /* don't send reset */
+ }
+ 
++static void tcp_v6_restore_cb(struct sk_buff *skb)
++{
++	/* We need to move header back to the beginning if xfrm6_policy_check()
++	 * and tcp_v6_fill_cb() are going to be called again.
++	 * ip6_datagram_recv_specific_ctl() also expects IP6CB to be there.
++	 */
++	memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
++		sizeof(struct inet6_skb_parm));
++}
++
+ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
+ 					 struct request_sock *req,
+ 					 struct dst_entry *dst,
+@@ -1178,8 +1188,10 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
+ 						      sk_gfp_mask(sk, GFP_ATOMIC));
+ 			consume_skb(ireq->pktopts);
+ 			ireq->pktopts = NULL;
+-			if (newnp->pktoptions)
++			if (newnp->pktoptions) {
++				tcp_v6_restore_cb(newnp->pktoptions);
+ 				skb_set_owner_r(newnp->pktoptions, newsk);
++			}
+ 		}
+ 	}
+ 
+@@ -1194,16 +1206,6 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
+ 	return NULL;
+ }
+ 
+-static void tcp_v6_restore_cb(struct sk_buff *skb)
+-{
+-	/* We need to move header back to the beginning if xfrm6_policy_check()
+-	 * and tcp_v6_fill_cb() are going to be called again.
+-	 * ip6_datagram_recv_specific_ctl() also expects IP6CB to be there.
+-	 */
+-	memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
+-		sizeof(struct inet6_skb_parm));
+-}
+-
+ /* The socket must have it's spinlock held when we get
+  * here, unless it is a TCP_LISTEN socket.
+  *
+diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
+index 2599af6..181e755c 100644
+--- a/net/l2tp/l2tp_core.h
++++ b/net/l2tp/l2tp_core.h
+@@ -273,6 +273,7 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb,
+ int l2tp_nl_register_ops(enum l2tp_pwtype pw_type,
+ 			 const struct l2tp_nl_cmd_ops *ops);
+ void l2tp_nl_unregister_ops(enum l2tp_pwtype pw_type);
++int l2tp_ioctl(struct sock *sk, int cmd, unsigned long arg);
+ 
+ /* Session reference counts. Incremented when code obtains a reference
+  * to a session.
+diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
+index 8938b6b..c0f0750 100644
+--- a/net/l2tp/l2tp_ip.c
++++ b/net/l2tp/l2tp_ip.c
+@@ -11,6 +11,7 @@
+ 
+ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+ 
++#include <asm/ioctls.h>
+ #include <linux/icmp.h>
+ #include <linux/module.h>
+ #include <linux/skbuff.h>
+@@ -560,6 +561,30 @@ static int l2tp_ip_recvmsg(struct sock *sk, struct msghdr *msg,
+ 	return err ? err : copied;
+ }
+ 
++int l2tp_ioctl(struct sock *sk, int cmd, unsigned long arg)
++{
++	struct sk_buff *skb;
++	int amount;
++
++	switch (cmd) {
++	case SIOCOUTQ:
++		amount = sk_wmem_alloc_get(sk);
++		break;
++	case SIOCINQ:
++		spin_lock_bh(&sk->sk_receive_queue.lock);
++		skb = skb_peek(&sk->sk_receive_queue);
++		amount = skb ? skb->len : 0;
++		spin_unlock_bh(&sk->sk_receive_queue.lock);
++		break;
++
++	default:
++		return -ENOIOCTLCMD;
++	}
++
++	return put_user(amount, (int __user *)arg);
++}
++EXPORT_SYMBOL(l2tp_ioctl);
++
+ static struct proto l2tp_ip_prot = {
+ 	.name		   = "L2TP/IP",
+ 	.owner		   = THIS_MODULE,
+@@ -568,7 +593,7 @@ static struct proto l2tp_ip_prot = {
+ 	.bind		   = l2tp_ip_bind,
+ 	.connect	   = l2tp_ip_connect,
+ 	.disconnect	   = l2tp_ip_disconnect,
+-	.ioctl		   = udp_ioctl,
++	.ioctl		   = l2tp_ioctl,
+ 	.destroy	   = l2tp_ip_destroy_sock,
+ 	.setsockopt	   = ip_setsockopt,
+ 	.getsockopt	   = ip_getsockopt,
+diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
+index aa821cb..1a65c9a 100644
+--- a/net/l2tp/l2tp_ip6.c
++++ b/net/l2tp/l2tp_ip6.c
+@@ -729,7 +729,7 @@ static struct proto l2tp_ip6_prot = {
+ 	.bind		   = l2tp_ip6_bind,
+ 	.connect	   = l2tp_ip6_connect,
+ 	.disconnect	   = l2tp_ip6_disconnect,
+-	.ioctl		   = udp_ioctl,
++	.ioctl		   = l2tp_ioctl,
+ 	.destroy	   = l2tp_ip6_destroy_sock,
+ 	.setsockopt	   = ipv6_setsockopt,
+ 	.getsockopt	   = ipv6_getsockopt,
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index 94e4a59..458722b 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -2813,7 +2813,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
+ 	struct virtio_net_hdr vnet_hdr = { 0 };
+ 	int offset = 0;
+ 	struct packet_sock *po = pkt_sk(sk);
+-	int hlen, tlen;
++	int hlen, tlen, linear;
+ 	int extra_len = 0;
+ 
+ 	/*
+@@ -2874,8 +2874,9 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
+ 	err = -ENOBUFS;
+ 	hlen = LL_RESERVED_SPACE(dev);
+ 	tlen = dev->needed_tailroom;
+-	skb = packet_alloc_skb(sk, hlen + tlen, hlen, len,
+-			       __virtio16_to_cpu(vio_le(), vnet_hdr.hdr_len),
++	linear = __virtio16_to_cpu(vio_le(), vnet_hdr.hdr_len);
++	linear = max(linear, min_t(int, len, dev->hard_header_len));
++	skb = packet_alloc_skb(sk, hlen + tlen, hlen, len, linear,
+ 			       msg->msg_flags & MSG_DONTWAIT, &err);
+ 	if (skb == NULL)
+ 		goto out_unlock;
+diff --git a/net/sched/cls_matchall.c b/net/sched/cls_matchall.c
+index f935429..b12bc2a 100644
+--- a/net/sched/cls_matchall.c
++++ b/net/sched/cls_matchall.c
+@@ -16,16 +16,11 @@
+ #include <net/sch_generic.h>
+ #include <net/pkt_cls.h>
+ 
+-struct cls_mall_filter {
++struct cls_mall_head {
+ 	struct tcf_exts exts;
+ 	struct tcf_result res;
+ 	u32 handle;
+-	struct rcu_head	rcu;
+ 	u32 flags;
+-};
+-
+-struct cls_mall_head {
+-	struct cls_mall_filter *filter;
+ 	struct rcu_head	rcu;
+ };
+ 
+@@ -33,38 +28,29 @@ static int mall_classify(struct sk_buff *skb, const struct tcf_proto *tp,
+ 			 struct tcf_result *res)
+ {
+ 	struct cls_mall_head *head = rcu_dereference_bh(tp->root);
+-	struct cls_mall_filter *f = head->filter;
+ 
+-	if (tc_skip_sw(f->flags))
++	if (tc_skip_sw(head->flags))
+ 		return -1;
+ 
+-	return tcf_exts_exec(skb, &f->exts, res);
++	return tcf_exts_exec(skb, &head->exts, res);
+ }
+ 
+ static int mall_init(struct tcf_proto *tp)
+ {
+-	struct cls_mall_head *head;
+-
+-	head = kzalloc(sizeof(*head), GFP_KERNEL);
+-	if (!head)
+-		return -ENOBUFS;
+-
+-	rcu_assign_pointer(tp->root, head);
+-
+ 	return 0;
+ }
+ 
+-static void mall_destroy_filter(struct rcu_head *head)
++static void mall_destroy_rcu(struct rcu_head *rcu)
+ {
+-	struct cls_mall_filter *f = container_of(head, struct cls_mall_filter, rcu);
++	struct cls_mall_head *head = container_of(rcu, struct cls_mall_head,
++						  rcu);
+ 
+-	tcf_exts_destroy(&f->exts);
+-
+-	kfree(f);
++	tcf_exts_destroy(&head->exts);
++	kfree(head);
+ }
+ 
+ static int mall_replace_hw_filter(struct tcf_proto *tp,
+-				  struct cls_mall_filter *f,
++				  struct cls_mall_head *head,
+ 				  unsigned long cookie)
+ {
+ 	struct net_device *dev = tp->q->dev_queue->dev;
+@@ -74,7 +60,7 @@ static int mall_replace_hw_filter(struct tcf_proto *tp,
+ 	offload.type = TC_SETUP_MATCHALL;
+ 	offload.cls_mall = &mall_offload;
+ 	offload.cls_mall->command = TC_CLSMATCHALL_REPLACE;
+-	offload.cls_mall->exts = &f->exts;
++	offload.cls_mall->exts = &head->exts;
+ 	offload.cls_mall->cookie = cookie;
+ 
+ 	return dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol,
+@@ -82,7 +68,7 @@ static int mall_replace_hw_filter(struct tcf_proto *tp,
+ }
+ 
+ static void mall_destroy_hw_filter(struct tcf_proto *tp,
+-				   struct cls_mall_filter *f,
++				   struct cls_mall_head *head,
+ 				   unsigned long cookie)
+ {
+ 	struct net_device *dev = tp->q->dev_queue->dev;
+@@ -103,29 +89,20 @@ static bool mall_destroy(struct tcf_proto *tp, bool force)
+ {
+ 	struct cls_mall_head *head = rtnl_dereference(tp->root);
+ 	struct net_device *dev = tp->q->dev_queue->dev;
+-	struct cls_mall_filter *f = head->filter;
+ 
+-	if (!force && f)
+-		return false;
++	if (!head)
++		return true;
+ 
+-	if (f) {
+-		if (tc_should_offload(dev, tp, f->flags))
+-			mall_destroy_hw_filter(tp, f, (unsigned long) f);
++	if (tc_should_offload(dev, tp, head->flags))
++		mall_destroy_hw_filter(tp, head, (unsigned long) head);
+ 
+-		call_rcu(&f->rcu, mall_destroy_filter);
+-	}
+-	kfree_rcu(head, rcu);
++	call_rcu(&head->rcu, mall_destroy_rcu);
+ 	return true;
+ }
+ 
+ static unsigned long mall_get(struct tcf_proto *tp, u32 handle)
+ {
+-	struct cls_mall_head *head = rtnl_dereference(tp->root);
+-	struct cls_mall_filter *f = head->filter;
+-
+-	if (f && f->handle == handle)
+-		return (unsigned long) f;
+-	return 0;
++	return 0UL;
+ }
+ 
+ static const struct nla_policy mall_policy[TCA_MATCHALL_MAX + 1] = {
+@@ -134,7 +111,7 @@ static const struct nla_policy mall_policy[TCA_MATCHALL_MAX + 1] = {
+ };
+ 
+ static int mall_set_parms(struct net *net, struct tcf_proto *tp,
+-			  struct cls_mall_filter *f,
++			  struct cls_mall_head *head,
+ 			  unsigned long base, struct nlattr **tb,
+ 			  struct nlattr *est, bool ovr)
+ {
+@@ -147,11 +124,11 @@ static int mall_set_parms(struct net *net, struct tcf_proto *tp,
+ 		return err;
+ 
+ 	if (tb[TCA_MATCHALL_CLASSID]) {
+-		f->res.classid = nla_get_u32(tb[TCA_MATCHALL_CLASSID]);
+-		tcf_bind_filter(tp, &f->res, base);
++		head->res.classid = nla_get_u32(tb[TCA_MATCHALL_CLASSID]);
++		tcf_bind_filter(tp, &head->res, base);
+ 	}
+ 
+-	tcf_exts_change(tp, &f->exts, &e);
++	tcf_exts_change(tp, &head->exts, &e);
+ 
+ 	return 0;
+ }
+@@ -162,21 +139,17 @@ static int mall_change(struct net *net, struct sk_buff *in_skb,
+ 		       unsigned long *arg, bool ovr)
+ {
+ 	struct cls_mall_head *head = rtnl_dereference(tp->root);
+-	struct cls_mall_filter *fold = (struct cls_mall_filter *) *arg;
+ 	struct net_device *dev = tp->q->dev_queue->dev;
+-	struct cls_mall_filter *f;
+ 	struct nlattr *tb[TCA_MATCHALL_MAX + 1];
++	struct cls_mall_head *new;
+ 	u32 flags = 0;
+ 	int err;
+ 
+ 	if (!tca[TCA_OPTIONS])
+ 		return -EINVAL;
+ 
+-	if (head->filter)
+-		return -EBUSY;
+-
+-	if (fold)
+-		return -EINVAL;
++	if (head)
++		return -EEXIST;
+ 
+ 	err = nla_parse_nested(tb, TCA_MATCHALL_MAX,
+ 			       tca[TCA_OPTIONS], mall_policy);
+@@ -189,23 +162,23 @@ static int mall_change(struct net *net, struct sk_buff *in_skb,
+ 			return -EINVAL;
+ 	}
+ 
+-	f = kzalloc(sizeof(*f), GFP_KERNEL);
+-	if (!f)
++	new = kzalloc(sizeof(*new), GFP_KERNEL);
++	if (!new)
+ 		return -ENOBUFS;
+ 
+-	tcf_exts_init(&f->exts, TCA_MATCHALL_ACT, 0);
++	tcf_exts_init(&new->exts, TCA_MATCHALL_ACT, 0);
+ 
+ 	if (!handle)
+ 		handle = 1;
+-	f->handle = handle;
+-	f->flags = flags;
++	new->handle = handle;
++	new->flags = flags;
+ 
+-	err = mall_set_parms(net, tp, f, base, tb, tca[TCA_RATE], ovr);
++	err = mall_set_parms(net, tp, new, base, tb, tca[TCA_RATE], ovr);
+ 	if (err)
+ 		goto errout;
+ 
+ 	if (tc_should_offload(dev, tp, flags)) {
+-		err = mall_replace_hw_filter(tp, f, (unsigned long) f);
++		err = mall_replace_hw_filter(tp, new, (unsigned long) new);
+ 		if (err) {
+ 			if (tc_skip_sw(flags))
+ 				goto errout;
+@@ -214,39 +187,29 @@ static int mall_change(struct net *net, struct sk_buff *in_skb,
+ 		}
+ 	}
+ 
+-	*arg = (unsigned long) f;
+-	rcu_assign_pointer(head->filter, f);
+-
++	*arg = (unsigned long) head;
++	rcu_assign_pointer(tp->root, new);
++	if (head)
++		call_rcu(&head->rcu, mall_destroy_rcu);
+ 	return 0;
+ 
+ errout:
+-	kfree(f);
++	kfree(new);
+ 	return err;
+ }
+ 
+ static int mall_delete(struct tcf_proto *tp, unsigned long arg)
+ {
+-	struct cls_mall_head *head = rtnl_dereference(tp->root);
+-	struct cls_mall_filter *f = (struct cls_mall_filter *) arg;
+-	struct net_device *dev = tp->q->dev_queue->dev;
+-
+-	if (tc_should_offload(dev, tp, f->flags))
+-		mall_destroy_hw_filter(tp, f, (unsigned long) f);
+-
+-	RCU_INIT_POINTER(head->filter, NULL);
+-	tcf_unbind_filter(tp, &f->res);
+-	call_rcu(&f->rcu, mall_destroy_filter);
+-	return 0;
++	return -EOPNOTSUPP;
+ }
+ 
+ static void mall_walk(struct tcf_proto *tp, struct tcf_walker *arg)
+ {
+ 	struct cls_mall_head *head = rtnl_dereference(tp->root);
+-	struct cls_mall_filter *f = head->filter;
+ 
+ 	if (arg->count < arg->skip)
+ 		goto skip;
+-	if (arg->fn(tp, (unsigned long) f, arg) < 0)
++	if (arg->fn(tp, (unsigned long) head, arg) < 0)
+ 		arg->stop = 1;
+ skip:
+ 	arg->count++;
+@@ -255,28 +218,28 @@ static void mall_walk(struct tcf_proto *tp, struct tcf_walker *arg)
+ static int mall_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
+ 		     struct sk_buff *skb, struct tcmsg *t)
+ {
+-	struct cls_mall_filter *f = (struct cls_mall_filter *) fh;
++	struct cls_mall_head *head = (struct cls_mall_head *) fh;
+ 	struct nlattr *nest;
+ 
+-	if (!f)
++	if (!head)
+ 		return skb->len;
+ 
+-	t->tcm_handle = f->handle;
++	t->tcm_handle = head->handle;
+ 
+ 	nest = nla_nest_start(skb, TCA_OPTIONS);
+ 	if (!nest)
+ 		goto nla_put_failure;
+ 
+-	if (f->res.classid &&
+-	    nla_put_u32(skb, TCA_MATCHALL_CLASSID, f->res.classid))
++	if (head->res.classid &&
++	    nla_put_u32(skb, TCA_MATCHALL_CLASSID, head->res.classid))
+ 		goto nla_put_failure;
+ 
+-	if (tcf_exts_dump(skb, &f->exts))
++	if (tcf_exts_dump(skb, &head->exts))
+ 		goto nla_put_failure;
+ 
+ 	nla_nest_end(skb, nest);
+ 
+-	if (tcf_exts_dump_stats(skb, &f->exts) < 0)
++	if (tcf_exts_dump_stats(skb, &head->exts) < 0)
+ 		goto nla_put_failure;
+ 
+ 	return skb->len;
+diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
+index 176af30..6a2532d 100644
+--- a/net/sctp/ipv6.c
++++ b/net/sctp/ipv6.c
+@@ -222,7 +222,8 @@ static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *transport)
+ 	SCTP_INC_STATS(sock_net(sk), SCTP_MIB_OUTSCTPPACKS);
+ 
+ 	rcu_read_lock();
+-	res = ip6_xmit(sk, skb, fl6, rcu_dereference(np->opt), np->tclass);
++	res = ip6_xmit(sk, skb, fl6, sk->sk_mark, rcu_dereference(np->opt),
++		       np->tclass);
+ 	rcu_read_unlock();
+ 	return res;
+ }
+diff --git a/net/sctp/socket.c b/net/sctp/socket.c
+index ca12aa3..6cbe5bd 100644
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -7427,7 +7427,8 @@ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
+ 		 */
+ 		release_sock(sk);
+ 		current_timeo = schedule_timeout(current_timeo);
+-		BUG_ON(sk != asoc->base.sk);
++		if (sk != asoc->base.sk)
++			goto do_error;
+ 		lock_sock(sk);
+ 
+ 		*timeo_p = current_timeo;

diff --git a/4.9.10/4420_grsecurity-3.1-4.9.10-201702162016.patch b/4.9.11/4420_grsecurity-3.1-4.9.11-201702181444.patch
similarity index 99%
rename from 4.9.10/4420_grsecurity-3.1-4.9.10-201702162016.patch
rename to 4.9.11/4420_grsecurity-3.1-4.9.11-201702181444.patch
index ef4d7f3..91575ee 100644
--- a/4.9.10/4420_grsecurity-3.1-4.9.10-201702162016.patch
+++ b/4.9.11/4420_grsecurity-3.1-4.9.11-201702181444.patch
@@ -419,7 +419,7 @@ index 3d0ae15..84e5412 100644
        cmd_syscalls = $(CONFIG_SHELL) $< $(CC) $(c_flags) $(missing_syscalls_flags)
  
 diff --git a/Makefile b/Makefile
-index d2fe757..92fd198 100644
+index 18b0c5a..54a9fea 100644
 --- a/Makefile
 +++ b/Makefile
 @@ -302,7 +302,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
@@ -30440,7 +30440,7 @@ index 04f89ca..43ad7de 100644
  unlock_done:
  	mutex_unlock(&espfix_init_mutex);
 diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
-index ebb4e95..37e51387 100644
+index 96d80df..1932b44 100644
 --- a/arch/x86/kernel/fpu/core.c
 +++ b/arch/x86/kernel/fpu/core.c
 @@ -136,7 +136,7 @@ void __kernel_fpu_end(void)
@@ -30461,7 +30461,7 @@ index ebb4e95..37e51387 100644
  			else
  				fpregs_deactivate(fpu);
  		}
-@@ -261,7 +261,7 @@ int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
+@@ -262,7 +262,7 @@ int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
  	 * leak into the child task:
  	 */
  	if (use_eager_fpu())
@@ -30470,7 +30470,7 @@ index ebb4e95..37e51387 100644
  
  	/*
  	 * Save current FPU registers directly into the child
-@@ -280,11 +280,10 @@ int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
+@@ -281,11 +281,10 @@ int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
  	 */
  	preempt_disable();
  	if (!copy_fpregs_to_fpstate(dst_fpu)) {
@@ -30484,7 +30484,7 @@ index ebb4e95..37e51387 100644
  		else
  			fpregs_deactivate(src_fpu);
  	}
-@@ -305,7 +304,7 @@ void fpu__activate_curr(struct fpu *fpu)
+@@ -306,7 +305,7 @@ void fpu__activate_curr(struct fpu *fpu)
  	WARN_ON_FPU(fpu != &current->thread.fpu);
  
  	if (!fpu->fpstate_active) {
@@ -30493,7 +30493,7 @@ index ebb4e95..37e51387 100644
  		trace_x86_fpu_init_state(fpu);
  
  		trace_x86_fpu_activate_state(fpu);
-@@ -333,7 +332,7 @@ void fpu__activate_fpstate_read(struct fpu *fpu)
+@@ -334,7 +333,7 @@ void fpu__activate_fpstate_read(struct fpu *fpu)
  		fpu__save(fpu);
  	} else {
  		if (!fpu->fpstate_active) {
@@ -30502,7 +30502,7 @@ index ebb4e95..37e51387 100644
  			trace_x86_fpu_init_state(fpu);
  
  			trace_x86_fpu_activate_state(fpu);
-@@ -368,7 +367,7 @@ void fpu__activate_fpstate_write(struct fpu *fpu)
+@@ -369,7 +368,7 @@ void fpu__activate_fpstate_write(struct fpu *fpu)
  		/* Invalidate any lazy state: */
  		fpu->last_cpu = -1;
  	} else {
@@ -30511,7 +30511,7 @@ index ebb4e95..37e51387 100644
  		trace_x86_fpu_init_state(fpu);
  
  		trace_x86_fpu_activate_state(fpu);
-@@ -431,7 +430,7 @@ void fpu__current_fpstate_write_end(void)
+@@ -432,7 +431,7 @@ void fpu__current_fpstate_write_end(void)
  	 * an XRSTOR if they are active.
  	 */
  	if (fpregs_active())
@@ -30520,7 +30520,7 @@ index ebb4e95..37e51387 100644
  
  	/*
  	 * Our update is done and the fpregs/fpstate are in sync
-@@ -458,7 +457,7 @@ void fpu__restore(struct fpu *fpu)
+@@ -459,7 +458,7 @@ void fpu__restore(struct fpu *fpu)
  	kernel_fpu_disable();
  	trace_x86_fpu_before_restore(fpu);
  	fpregs_activate(fpu);
@@ -30529,7 +30529,7 @@ index ebb4e95..37e51387 100644
  	fpu->counter++;
  	trace_x86_fpu_after_restore(fpu);
  	kernel_fpu_enable();
-@@ -554,11 +553,11 @@ int fpu__exception_code(struct fpu *fpu, int trap_nr)
+@@ -555,11 +554,11 @@ int fpu__exception_code(struct fpu *fpu, int trap_nr)
  		 * fully reproduce the context of the exception.
  		 */
  		if (boot_cpu_has(X86_FEATURE_FXSR)) {
@@ -30545,7 +30545,7 @@ index ebb4e95..37e51387 100644
  		}
  
  		err = swd & ~cwd;
-@@ -572,7 +571,7 @@ int fpu__exception_code(struct fpu *fpu, int trap_nr)
+@@ -573,7 +572,7 @@ int fpu__exception_code(struct fpu *fpu, int trap_nr)
  		unsigned short mxcsr = MXCSR_DEFAULT;
  
  		if (boot_cpu_has(X86_FEATURE_XMM))
@@ -69593,10 +69593,10 @@ index f9db2ce..6cd460c 100644
  }
  
 diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
-index 6255973..7ae59f5 100644
+index 1b65f0f..55b35dc 100644
 --- a/drivers/net/loopback.c
 +++ b/drivers/net/loopback.c
-@@ -216,6 +216,6 @@ static __net_init int loopback_net_init(struct net *net)
+@@ -217,6 +217,6 @@ static __net_init int loopback_net_init(struct net *net)
  }
  
  /* Registered in net/core/dev.c */
@@ -69663,7 +69663,7 @@ index 26d6f0b..af4d2ad 100644
  };
  
 diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
-index 6f38daf..5a5bedd 100644
+index adea6f5..5991765 100644
 --- a/drivers/net/macvtap.c
 +++ b/drivers/net/macvtap.c
 @@ -514,7 +514,7 @@ static void macvtap_setup(struct net_device *dev)
@@ -69854,7 +69854,7 @@ index a380649..fd8fe79c 100644
  };
  
 diff --git a/drivers/net/tun.c b/drivers/net/tun.c
-index 18402d7..61603d0 100644
+index b31aca8..3853488 100644
 --- a/drivers/net/tun.c
 +++ b/drivers/net/tun.c
 @@ -966,7 +966,7 @@ static void tun_set_headroom(struct net_device *dev, int new_hr)
@@ -69866,7 +69866,7 @@ index 18402d7..61603d0 100644
  		new_hr = NET_SKB_PAD;
  
  	tun->align = new_hr;
-@@ -1548,7 +1548,7 @@ static int tun_validate(struct nlattr *tb[], struct nlattr *data[])
+@@ -1550,7 +1550,7 @@ static int tun_validate(struct nlattr *tb[], struct nlattr *data[])
  	return -EINVAL;
  }
  
@@ -69875,7 +69875,7 @@ index 18402d7..61603d0 100644
  	.kind		= DRV_NAME,
  	.priv_size	= sizeof(struct tun_struct),
  	.setup		= tun_setup,
-@@ -1977,7 +1977,7 @@ static int tun_set_queue(struct file *file, struct ifreq *ifr)
+@@ -1979,7 +1979,7 @@ static int tun_set_queue(struct file *file, struct ifreq *ifr)
  }
  
  static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
@@ -69884,7 +69884,7 @@ index 18402d7..61603d0 100644
  {
  	struct tun_file *tfile = file->private_data;
  	struct tun_struct *tun;
-@@ -1991,6 +1991,9 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
+@@ -1993,6 +1993,9 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
  	int le;
  	int ret;
  
@@ -69894,7 +69894,7 @@ index 18402d7..61603d0 100644
  	if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || _IOC_TYPE(cmd) == 0x89) {
  		if (copy_from_user(&ifr, argp, ifreq_len))
  			return -EFAULT;
-@@ -2506,7 +2509,7 @@ static int tun_device_event(struct notifier_block *unused,
+@@ -2508,7 +2511,7 @@ static int tun_device_event(struct notifier_block *unused,
  	return NOTIFY_DONE;
  }
  
@@ -141755,7 +141755,7 @@ index cd0c8bd..8c20e41 100644
  struct iovec;
  struct kvec;
 diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
-index d83590e..7bd7c11 100644
+index bb9b102..1969a64 100644
 --- a/include/linux/netdevice.h
 +++ b/include/linux/netdevice.h
 @@ -1324,6 +1324,7 @@ struct net_device_ops {
@@ -141766,7 +141766,7 @@ index d83590e..7bd7c11 100644
  
  /**
   * enum net_device_priv_flags - &struct net_device priv_flags
-@@ -1651,7 +1652,7 @@ struct net_device {
+@@ -1652,7 +1653,7 @@ struct net_device {
  	unsigned long		base_addr;
  	int			irq;
  
@@ -141775,7 +141775,7 @@ index d83590e..7bd7c11 100644
  
  	/*
  	 *	Some hardware also needs these fields (state,dev_list,
-@@ -1691,9 +1692,9 @@ struct net_device {
+@@ -1692,9 +1693,9 @@ struct net_device {
  
  	struct net_device_stats	stats;
  
@@ -141788,7 +141788,7 @@ index d83590e..7bd7c11 100644
  
  #ifdef CONFIG_WIRELESS_EXT
  	const struct iw_handler_def *wireless_handlers;
-@@ -3364,7 +3365,7 @@ static __always_inline int ____dev_forward_skb(struct net_device *dev,
+@@ -3368,7 +3369,7 @@ static __always_inline int ____dev_forward_skb(struct net_device *dev,
  {
  	if (skb_orphan_frags(skb, GFP_ATOMIC) ||
  	    unlikely(!is_skb_forwardable(dev, skb))) {
@@ -141797,7 +141797,7 @@ index d83590e..7bd7c11 100644
  		kfree_skb(skb);
  		return NET_RX_DROP;
  	}
-@@ -4290,7 +4291,7 @@ static inline bool netif_reduces_vlan_mtu(struct net_device *dev)
+@@ -4294,7 +4295,7 @@ static inline bool netif_reduces_vlan_mtu(struct net_device *dev)
  	return dev->priv_flags & IFF_MACSEC;
  }
  
@@ -145591,7 +145591,7 @@ index cd6018a..996671f 100644
  	struct list_head	est_list;	/* estimator list */
  	spinlock_t		est_lock;
 diff --git a/include/net/ipv6.h b/include/net/ipv6.h
-index f11ca83..bec016e 100644
+index 7f15f95..d381999 100644
 --- a/include/net/ipv6.h
 +++ b/include/net/ipv6.h
 @@ -788,7 +788,7 @@ static inline __be32 ip6_make_flowlabel(struct net *net, struct sk_buff *skb,
@@ -163663,10 +163663,10 @@ index 3408ed5..885aab5 100644
  	.priv_size	= sizeof(struct chnl_net),
  	.setup		= ipcaif_net_setup,
 diff --git a/net/can/af_can.c b/net/can/af_can.c
-index 1108079..1871d16 100644
+index 5488e4a..823ad05 100644
 --- a/net/can/af_can.c
 +++ b/net/can/af_can.c
-@@ -890,7 +890,7 @@ static const struct net_proto_family can_family_ops = {
+@@ -898,7 +898,7 @@ static const struct net_proto_family can_family_ops = {
  };
  
  /* notifier block for netdevice event */
@@ -163676,7 +163676,7 @@ index 1108079..1871d16 100644
  };
  
 diff --git a/net/can/bcm.c b/net/can/bcm.c
-index 5e9ed5e..916d38c 100644
+index e4f694d..62ad313 100644
 --- a/net/can/bcm.c
 +++ b/net/can/bcm.c
 @@ -1699,7 +1699,7 @@ static int __init bcm_module_init(void)
@@ -163689,7 +163689,7 @@ index 5e9ed5e..916d38c 100644
  }
  
 diff --git a/net/can/gw.c b/net/can/gw.c
-index 4551687..4e82e9b 100644
+index 77c8af4..7d32a8b 100644
 --- a/net/can/gw.c
 +++ b/net/can/gw.c
 @@ -80,7 +80,6 @@ MODULE_PARM_DESC(max_hops,
@@ -163899,10 +163899,10 @@ index b7de71f..808387d 100644
  
  	return err;
 diff --git a/net/core/dev.c b/net/core/dev.c
-index df51c50..26e1440 100644
+index 60b0a604..920cbea 100644
 --- a/net/core/dev.c
 +++ b/net/core/dev.c
-@@ -3000,7 +3000,7 @@ static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device
+@@ -2995,7 +2995,7 @@ static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device
  out_kfree_skb:
  	kfree_skb(skb);
  out_null:
@@ -163911,7 +163911,7 @@ index df51c50..26e1440 100644
  	return NULL;
  }
  
-@@ -3411,7 +3411,7 @@ static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
+@@ -3406,7 +3406,7 @@ static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
  	rc = -ENETDOWN;
  	rcu_read_unlock_bh();
  
@@ -163920,7 +163920,7 @@ index df51c50..26e1440 100644
  	kfree_skb_list(skb);
  	return rc;
  out:
-@@ -3764,7 +3764,7 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
+@@ -3759,7 +3759,7 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
  
  	local_irq_restore(flags);
  
@@ -163929,7 +163929,7 @@ index df51c50..26e1440 100644
  	kfree_skb(skb);
  	return NET_RX_DROP;
  }
-@@ -3841,7 +3841,7 @@ int netif_rx_ni(struct sk_buff *skb)
+@@ -3836,7 +3836,7 @@ int netif_rx_ni(struct sk_buff *skb)
  }
  EXPORT_SYMBOL(netif_rx_ni);
  
@@ -163938,7 +163938,7 @@ index df51c50..26e1440 100644
  {
  	struct softnet_data *sd = this_cpu_ptr(&softnet_data);
  
-@@ -4208,9 +4208,9 @@ static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc)
+@@ -4203,9 +4203,9 @@ static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc)
  	} else {
  drop:
  		if (!deliver_exact)
@@ -163950,7 +163950,7 @@ index df51c50..26e1440 100644
  		kfree_skb(skb);
  		/* Jamal, now you will not able to escape explaining
  		 * me how you were going to use this. :-)
-@@ -5197,7 +5197,7 @@ static int napi_poll(struct napi_struct *n, struct list_head *repoll)
+@@ -5192,7 +5192,7 @@ static int napi_poll(struct napi_struct *n, struct list_head *repoll)
  	return work;
  }
  
@@ -163959,7 +163959,7 @@ index df51c50..26e1440 100644
  {
  	struct softnet_data *sd = this_cpu_ptr(&softnet_data);
  	unsigned long time_limit = jiffies + 2;
-@@ -7540,9 +7540,9 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
+@@ -7535,9 +7535,9 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
  	} else {
  		netdev_stats_to_stats64(storage, &dev->stats);
  	}
@@ -163972,7 +163972,7 @@ index df51c50..26e1440 100644
  	return storage;
  }
  EXPORT_SYMBOL(dev_get_stats);
-@@ -8167,7 +8167,7 @@ static void __net_exit netdev_exit(struct net *net)
+@@ -8162,7 +8162,7 @@ static void __net_exit netdev_exit(struct net *net)
  	kfree(net->dev_index_head);
  }
  
@@ -163981,7 +163981,7 @@ index df51c50..26e1440 100644
  	.init = netdev_init,
  	.exit = netdev_exit,
  };
-@@ -8267,7 +8267,7 @@ static void __net_exit default_device_exit_batch(struct list_head *net_list)
+@@ -8262,7 +8262,7 @@ static void __net_exit default_device_exit_batch(struct list_head *net_list)
  	rtnl_unlock();
  }
  
@@ -165507,10 +165507,10 @@ index d6feabb..9cb3988 100644
  						  ICMP_PROT_UNREACH, 0);
  				}
 diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
-index f226f408..87ab217 100644
+index 65336f3..3c7e4b7 100644
 --- a/net/ipv4/ip_sockglue.c
 +++ b/net/ipv4/ip_sockglue.c
-@@ -1335,7 +1335,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
+@@ -1342,7 +1342,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
  		len = min_t(unsigned int, len, opt->optlen);
  		if (put_user(len, optlen))
  			return -EFAULT;
@@ -165520,7 +165520,7 @@ index f226f408..87ab217 100644
  			return -EFAULT;
  		return 0;
  	}
-@@ -1471,7 +1472,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
+@@ -1478,7 +1479,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
  		if (sk->sk_type != SOCK_STREAM)
  			return -ENOPROTOOPT;
  
@@ -165721,7 +165721,7 @@ index 4a9e6db..06174e1 100644
  		pr_err("Unable to proc dir entry\n");
  		return -ENOMEM;
 diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
-index 96b8e2b..fb38f77 100644
+index 105c074..6c5033d 100644
 --- a/net/ipv4/ping.c
 +++ b/net/ipv4/ping.c
 @@ -59,7 +59,7 @@ struct ping_table {
@@ -165760,7 +165760,7 @@ index 96b8e2b..fb38f77 100644
  						   info, (u8 *)icmph);
  #endif
  		}
-@@ -923,10 +923,10 @@ int ping_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock,
+@@ -925,10 +925,10 @@ int ping_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock,
  		}
  
  		if (inet6_sk(sk)->rxopt.all)
@@ -165773,7 +165773,7 @@ index 96b8e2b..fb38f77 100644
  		else if (skb->protocol == htons(ETH_P_IP) && isk->cmsg_flags)
  			ip_cmsg_recv(msg, skb);
  #endif
-@@ -1123,7 +1123,7 @@ static void ping_v4_format_sock(struct sock *sp, struct seq_file *f,
+@@ -1125,7 +1125,7 @@ static void ping_v4_format_sock(struct sock *sp, struct seq_file *f,
  		from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
  		0, sock_i_ino(sp),
  		atomic_read(&sp->sk_refcnt), sp,
@@ -166770,7 +166770,7 @@ index ef54852..56699bb 100644
  	return new;
  }
 diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
-index d7d6d3a..eaae042 100644
+index 0a59220..645d4d6 100644
 --- a/net/ipv6/ip6_gre.c
 +++ b/net/ipv6/ip6_gre.c
 @@ -71,8 +71,8 @@ struct ip6gre_net {
@@ -166784,7 +166784,7 @@ index d7d6d3a..eaae042 100644
  static int ip6gre_tunnel_init(struct net_device *dev);
  static void ip6gre_tunnel_setup(struct net_device *dev);
  static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t);
-@@ -1067,7 +1067,7 @@ static void ip6gre_fb_tunnel_init(struct net_device *dev)
+@@ -1069,7 +1069,7 @@ static void ip6gre_fb_tunnel_init(struct net_device *dev)
  }
  
  
@@ -166793,7 +166793,7 @@ index d7d6d3a..eaae042 100644
  	.handler     = gre_rcv,
  	.err_handler = ip6gre_err,
  	.flags       = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
-@@ -1520,7 +1520,7 @@ static const struct nla_policy ip6gre_policy[IFLA_GRE_MAX + 1] = {
+@@ -1522,7 +1522,7 @@ static const struct nla_policy ip6gre_policy[IFLA_GRE_MAX + 1] = {
  	[IFLA_GRE_ENCAP_DPORT]  = { .type = NLA_U16 },
  };
  
@@ -166802,7 +166802,7 @@ index d7d6d3a..eaae042 100644
  	.kind		= "ip6gre",
  	.maxtype	= IFLA_GRE_MAX,
  	.policy		= ip6gre_policy,
-@@ -1535,7 +1535,7 @@ static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
+@@ -1537,7 +1537,7 @@ static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
  	.get_link_net	= ip6_tnl_get_link_net,
  };
  
@@ -166812,7 +166812,7 @@ index d7d6d3a..eaae042 100644
  	.maxtype	= IFLA_GRE_MAX,
  	.policy		= ip6gre_policy,
 diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
-index f95437f..9bca401 100644
+index f6ba452..b04707b 100644
 --- a/net/ipv6/ip6_tunnel.c
 +++ b/net/ipv6/ip6_tunnel.c
 @@ -81,7 +81,7 @@ static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
@@ -166824,7 +166824,7 @@ index f95437f..9bca401 100644
  
  static int ip6_tnl_net_id __read_mostly;
  struct ip6_tnl_net {
-@@ -2093,7 +2093,7 @@ static const struct nla_policy ip6_tnl_policy[IFLA_IPTUN_MAX + 1] = {
+@@ -2103,7 +2103,7 @@ static const struct nla_policy ip6_tnl_policy[IFLA_IPTUN_MAX + 1] = {
  	[IFLA_IPTUN_COLLECT_METADATA]	= { .type = NLA_FLAG },
  };
  
@@ -167201,7 +167201,7 @@ index bff4460..4551e9a 100644
  	table = kmemdup(ipv6_route_table_template,
  			sizeof(ipv6_route_table_template),
 diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
-index b1cdf80..18a97a9 100644
+index 40d7405..dc61f2b 100644
 --- a/net/ipv6/sit.c
 +++ b/net/ipv6/sit.c
 @@ -74,7 +74,7 @@ static void ipip6_tunnel_setup(struct net_device *dev);
@@ -167213,7 +167213,7 @@ index b1cdf80..18a97a9 100644
  
  static int sit_net_id __read_mostly;
  struct sit_net {
-@@ -1735,7 +1735,7 @@ static void ipip6_dellink(struct net_device *dev, struct list_head *head)
+@@ -1736,7 +1736,7 @@ static void ipip6_dellink(struct net_device *dev, struct list_head *head)
  		unregister_netdevice_queue(dev, head);
  }
  
@@ -167236,7 +167236,7 @@ index 69c50e7..ec875fa 100644
  	struct ctl_table *ipv6_icmp_table;
  	int err;
 diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
-index b9f1fee..3e0f50f 100644
+index 6673965..16ec3de 100644
 --- a/net/ipv6/tcp_ipv6.c
 +++ b/net/ipv6/tcp_ipv6.c
 @@ -101,6 +101,10 @@ static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
@@ -167250,7 +167250,7 @@ index b9f1fee..3e0f50f 100644
  static __u32 tcp_v6_init_sequence(const struct sk_buff *skb)
  {
  	return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
-@@ -1300,6 +1304,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
+@@ -1302,6 +1306,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
  	return 0;
  
  reset:
@@ -167260,7 +167260,7 @@ index b9f1fee..3e0f50f 100644
  	tcp_v6_send_reset(sk, skb);
  discard:
  	if (opt_skb)
-@@ -1404,12 +1411,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
+@@ -1406,12 +1413,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
  	sk = __inet6_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th),
  				th->source, th->dest, inet6_iif(skb),
  				&refcounted);
@@ -167283,7 +167283,7 @@ index b9f1fee..3e0f50f 100644
  
  	if (sk->sk_state == TCP_NEW_SYN_RECV) {
  		struct request_sock *req = inet_reqsk(sk);
-@@ -1499,6 +1514,10 @@ static int tcp_v6_rcv(struct sk_buff *skb)
+@@ -1501,6 +1516,10 @@ static int tcp_v6_rcv(struct sk_buff *skb)
  bad_packet:
  		__TCP_INC_STATS(net, TCP_MIB_INERRS);
  	} else {
@@ -168246,10 +168246,10 @@ index 965f7e3..daa74100 100644
  }
  
 diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
-index 8938b6b..bd9ca60 100644
+index c0f0750..7f2e432 100644
 --- a/net/l2tp/l2tp_ip.c
 +++ b/net/l2tp/l2tp_ip.c
-@@ -616,7 +616,7 @@ static struct inet_protosw l2tp_ip_protosw = {
+@@ -641,7 +641,7 @@ static struct inet_protosw l2tp_ip_protosw = {
  	.ops		= &l2tp_ip_ops,
  };
  
@@ -168259,7 +168259,7 @@ index 8938b6b..bd9ca60 100644
  	.netns_ok	= 1,
  };
 diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
-index aa821cb..045ff78 100644
+index 1a65c9a..46ac95e 100644
 --- a/net/l2tp/l2tp_ip6.c
 +++ b/net/l2tp/l2tp_ip6.c
 @@ -777,7 +777,7 @@ static struct inet_protosw l2tp_ip6_protosw = {
@@ -169936,7 +169936,7 @@ index 7eb955e..479c9a6 100644
  
  static int __init ovs_vxlan_tnl_init(void)
 diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
-index 94e4a59..0339089 100644
+index 458722b..5852eae 100644
 --- a/net/packet/af_packet.c
 +++ b/net/packet/af_packet.c
 @@ -278,7 +278,7 @@ static int packet_direct_xmit(struct sk_buff *skb)
@@ -169991,7 +169991,7 @@ index 94e4a59..0339089 100644
  	spin_unlock(&sk->sk_receive_queue.lock);
  
  drop_n_restore:
-@@ -3847,7 +3847,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
+@@ -3848,7 +3848,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
  	case PACKET_HDRLEN:
  		if (len > sizeof(int))
  			len = sizeof(int);
@@ -170000,7 +170000,7 @@ index 94e4a59..0339089 100644
  			return -EFAULT;
  		switch (val) {
  		case TPACKET_V1:
-@@ -3882,9 +3882,9 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
+@@ -3883,9 +3883,9 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
  	case PACKET_ROLLOVER_STATS:
  		if (!po->rollover)
  			return -EINVAL;
@@ -170013,7 +170013,7 @@ index 94e4a59..0339089 100644
  		data = &rstats;
  		lv = sizeof(rstats);
  		break;
-@@ -3902,7 +3902,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
+@@ -3903,7 +3903,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
  		len = lv;
  	if (put_user(len, optlen))
  		return -EFAULT;
@@ -170469,10 +170469,10 @@ index 6cfb6e9..eaa7ef4 100644
  	}
  }
 diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
-index 176af30..585194b 100644
+index 6a2532d..09ce23f 100644
 --- a/net/sctp/ipv6.c
 +++ b/net/sctp/ipv6.c
-@@ -990,7 +990,7 @@ static const struct inet6_protocol sctpv6_protocol = {
+@@ -991,7 +991,7 @@ static const struct inet6_protocol sctpv6_protocol = {
  	.flags        = INET6_PROTO_NOPOLICY | INET6_PROTO_FINAL,
  };
  
@@ -170481,7 +170481,7 @@ index 176af30..585194b 100644
  	.sa_family	   = AF_INET6,
  	.sctp_xmit	   = sctp_v6_xmit,
  	.setsockopt	   = ipv6_setsockopt,
-@@ -1047,7 +1047,7 @@ void sctp_v6_pf_init(void)
+@@ -1048,7 +1048,7 @@ void sctp_v6_pf_init(void)
  
  void sctp_v6_pf_exit(void)
  {
@@ -170560,7 +170560,7 @@ index c345bf1..41a50e5 100644
  		NULL, sctp_cname, sctp_tname, sctp_oname, sctp_pname,
  	};
 diff --git a/net/sctp/socket.c b/net/sctp/socket.c
-index ca12aa3..7a691ba 100644
+index 6cbe5bd..04c4e00 100644
 --- a/net/sctp/socket.c
 +++ b/net/sctp/socket.c
 @@ -4719,7 +4719,7 @@ static int sctp_getsockopt_autoclose(struct sock *sk, int len, char __user *optv
@@ -170581,7 +170581,7 @@ index ca12aa3..7a691ba 100644
  		if (copy_to_user(to, &temp, addrlen))
  			return -EFAULT;
  		to += addrlen;
-@@ -7854,6 +7856,10 @@ struct proto sctp_prot = {
+@@ -7855,6 +7857,10 @@ struct proto sctp_prot = {
  	.unhash      =	sctp_unhash,
  	.get_port    =	sctp_get_port,
  	.obj_size    =  sizeof(struct sctp_sock),
@@ -209399,10 +209399,10 @@ index 0000000..1181e93
 +size_mei_msg_data_65529_fields size mei_msg_data 0 65529 NULL
 diff --git a/scripts/gcc-plugins/size_overflow_plugin/e_fns.data b/scripts/gcc-plugins/size_overflow_plugin/e_fns.data
 new file mode 100644
-index 0000000..0d6ca5e
+index 0000000..75e575c
 --- /dev/null
 +++ b/scripts/gcc-plugins/size_overflow_plugin/e_fns.data
-@@ -0,0 +1,5031 @@
+@@ -0,0 +1,5032 @@
 +logi_dj_recv_query_paired_devices_fndecl_13_fns logi_dj_recv_query_paired_devices fndecl 0 13 NULL
 +response_length_ib_uverbs_ex_destroy_wq_resp_15_fns response_length ib_uverbs_ex_destroy_wq_resp 0 15 NULL
 +kfd_wait_on_events_fndecl_19_fns kfd_wait_on_events fndecl 2 19 NULL
@@ -210555,6 +210555,7 @@ index 0000000..0d6ca5e
 +elem_size_snd_array_15155_fns elem_size snd_array 0 15155 NULL nohasharray
 +chaoskey_read_fndecl_15155_fns chaoskey_read fndecl 3 15155 &elem_size_snd_array_15155_fns
 +walk_hugetlb_range_fndecl_15190_fns walk_hugetlb_range fndecl 0 15190 NULL
++get_registers_fndecl_15192_fns get_registers fndecl 3 15192 NULL
 +max_clk_sdhci_host_15206_fns max_clk sdhci_host 0 15206 NULL
 +nlm_end_grace_read_fndecl_15209_fns nlm_end_grace_read fndecl 3 15209 NULL
 +genwqe_ffdc_buff_size_fndecl_15236_fns genwqe_ffdc_buff_size fndecl 0 15236 NULL

diff --git a/4.9.10/4425_grsec_remove_EI_PAX.patch b/4.9.11/4425_grsec_remove_EI_PAX.patch
similarity index 100%
rename from 4.9.10/4425_grsec_remove_EI_PAX.patch
rename to 4.9.11/4425_grsec_remove_EI_PAX.patch

diff --git a/4.9.10/4426_default_XATTR_PAX_FLAGS.patch b/4.9.11/4426_default_XATTR_PAX_FLAGS.patch
similarity index 100%
rename from 4.9.10/4426_default_XATTR_PAX_FLAGS.patch
rename to 4.9.11/4426_default_XATTR_PAX_FLAGS.patch

diff --git a/4.9.10/4427_force_XATTR_PAX_tmpfs.patch b/4.9.11/4427_force_XATTR_PAX_tmpfs.patch
similarity index 100%
rename from 4.9.10/4427_force_XATTR_PAX_tmpfs.patch
rename to 4.9.11/4427_force_XATTR_PAX_tmpfs.patch

diff --git a/4.9.10/4430_grsec-remove-localversion-grsec.patch b/4.9.11/4430_grsec-remove-localversion-grsec.patch
similarity index 100%
rename from 4.9.10/4430_grsec-remove-localversion-grsec.patch
rename to 4.9.11/4430_grsec-remove-localversion-grsec.patch

diff --git a/4.9.10/4435_grsec-mute-warnings.patch b/4.9.11/4435_grsec-mute-warnings.patch
similarity index 100%
rename from 4.9.10/4435_grsec-mute-warnings.patch
rename to 4.9.11/4435_grsec-mute-warnings.patch

diff --git a/4.9.10/4440_grsec-remove-protected-paths.patch b/4.9.11/4440_grsec-remove-protected-paths.patch
similarity index 100%
rename from 4.9.10/4440_grsec-remove-protected-paths.patch
rename to 4.9.11/4440_grsec-remove-protected-paths.patch

diff --git a/4.9.10/4450_grsec-kconfig-default-gids.patch b/4.9.11/4450_grsec-kconfig-default-gids.patch
similarity index 100%
rename from 4.9.10/4450_grsec-kconfig-default-gids.patch
rename to 4.9.11/4450_grsec-kconfig-default-gids.patch

diff --git a/4.9.10/4465_selinux-avc_audit-log-curr_ip.patch b/4.9.11/4465_selinux-avc_audit-log-curr_ip.patch
similarity index 100%
rename from 4.9.10/4465_selinux-avc_audit-log-curr_ip.patch
rename to 4.9.11/4465_selinux-avc_audit-log-curr_ip.patch

diff --git a/4.9.10/4470_disable-compat_vdso.patch b/4.9.11/4470_disable-compat_vdso.patch
similarity index 100%
rename from 4.9.10/4470_disable-compat_vdso.patch
rename to 4.9.11/4470_disable-compat_vdso.patch

diff --git a/4.9.10/4475_emutramp_default_on.patch b/4.9.11/4475_emutramp_default_on.patch
similarity index 100%
rename from 4.9.10/4475_emutramp_default_on.patch
rename to 4.9.11/4475_emutramp_default_on.patch


^ permalink raw reply related	[flat|nested] only message in thread

only message in thread, other threads:[~2017-02-19 23:07 UTC | newest]

Thread overview: (only message) (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2017-02-19 23:07 [gentoo-commits] proj/hardened-patchset:master commit in: 4.9.11/, 4.9.10/ Anthony G. Basile

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox