* [gentoo-commits] linux-patches r2752 - genpatches-2.6/trunk/3.13
@ 2014-04-15 14:43 Mike Pagano (mpagano)
0 siblings, 0 replies; only message in thread
From: Mike Pagano (mpagano) @ 2014-04-15 14:43 UTC (permalink / raw
To: gentoo-commits
Author: mpagano
Date: 2014-04-15 14:43:16 +0000 (Tue, 15 Apr 2014)
New Revision: 2752
Added:
genpatches-2.6/trunk/3.13/1009_linux-3.13.10.patch
Modified:
genpatches-2.6/trunk/3.13/0000_README
Log:
Linux patch 3.13.10
Modified: genpatches-2.6/trunk/3.13/0000_README
===================================================================
--- genpatches-2.6/trunk/3.13/0000_README 2014-04-15 14:29:42 UTC (rev 2751)
+++ genpatches-2.6/trunk/3.13/0000_README 2014-04-15 14:43:16 UTC (rev 2752)
@@ -78,6 +78,10 @@
From: http://www.kernel.org
Desc: Linux 3.13.9
+Patch: 1009_linux-3.13.10.patch
+From: http://www.kernel.org
+Desc: Linux 3.13.10
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
Added: genpatches-2.6/trunk/3.13/1009_linux-3.13.10.patch
===================================================================
--- genpatches-2.6/trunk/3.13/1009_linux-3.13.10.patch (rev 0)
+++ genpatches-2.6/trunk/3.13/1009_linux-3.13.10.patch 2014-04-15 14:43:16 UTC (rev 2752)
@@ -0,0 +1,2864 @@
+diff --git a/Documentation/devicetree/bindings/net/micrel-ks8851.txt b/Documentation/devicetree/bindings/net/micrel-ks8851.txt
+index 11ace3c3d805..4fc392763611 100644
+--- a/Documentation/devicetree/bindings/net/micrel-ks8851.txt
++++ b/Documentation/devicetree/bindings/net/micrel-ks8851.txt
+@@ -7,3 +7,4 @@ Required properties:
+
+ Optional properties:
+ - local-mac-address : Ethernet mac address to use
++- vdd-supply: supply for Ethernet mac
+diff --git a/Makefile b/Makefile
+index 652f97296a40..982ade0ad2dc 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 13
+-SUBLEVEL = 9
++SUBLEVEL = 10
+ EXTRAVERSION =
+ NAME = One Giant Leap for Frogkind
+
+diff --git a/arch/arc/boot/dts/nsimosci.dts b/arch/arc/boot/dts/nsimosci.dts
+index ea16d782af58..4f31b2eb5cdf 100644
+--- a/arch/arc/boot/dts/nsimosci.dts
++++ b/arch/arc/boot/dts/nsimosci.dts
+@@ -11,13 +11,16 @@
+
+ / {
+ compatible = "snps,nsimosci";
+- clock-frequency = <80000000>; /* 80 MHZ */
++ clock-frequency = <20000000>; /* 20 MHZ */
+ #address-cells = <1>;
+ #size-cells = <1>;
+ interrupt-parent = <&intc>;
+
+ chosen {
+- bootargs = "console=tty0 consoleblank=0";
++ /* this is for console on PGU */
++ /* bootargs = "console=tty0 consoleblank=0"; */
++ /* this is for console on serial */
++ bootargs = "earlycon=uart8250,mmio32,0xc0000000,115200n8 console=ttyS0,115200n8 consoleblank=0 debug";
+ };
+
+ aliases {
+@@ -44,15 +47,14 @@
+ };
+
+ uart0: serial@c0000000 {
+- compatible = "snps,dw-apb-uart";
++ compatible = "ns8250";
+ reg = <0xc0000000 0x2000>;
+ interrupts = <11>;
+- #clock-frequency = <80000000>;
+ clock-frequency = <3686400>;
+ baud = <115200>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+- status = "okay";
++ no-loopback-test = <1>;
+ };
+
+ pgu0: pgu@c9000000 {
+diff --git a/arch/arc/configs/nsimosci_defconfig b/arch/arc/configs/nsimosci_defconfig
+index 451af30914f6..c01ba35a4eff 100644
+--- a/arch/arc/configs/nsimosci_defconfig
++++ b/arch/arc/configs/nsimosci_defconfig
+@@ -54,6 +54,7 @@ CONFIG_SERIO_ARC_PS2=y
+ CONFIG_SERIAL_8250=y
+ CONFIG_SERIAL_8250_CONSOLE=y
+ CONFIG_SERIAL_8250_DW=y
++CONFIG_SERIAL_OF_PLATFORM=y
+ CONFIG_SERIAL_ARC=y
+ CONFIG_SERIAL_ARC_CONSOLE=y
+ # CONFIG_HW_RANDOM is not set
+diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig
+index 75f25a8e3001..44eb27e05a0d 100644
+--- a/arch/m68k/Kconfig
++++ b/arch/m68k/Kconfig
+@@ -17,6 +17,7 @@ config M68K
+ select FPU if MMU
+ select ARCH_WANT_IPC_PARSE_VERSION
+ select ARCH_USES_GETTIMEOFFSET if MMU && !COLDFIRE
++ select HAVE_FUTEX_CMPXCHG if MMU && FUTEX
+ select HAVE_MOD_ARCH_SPECIFIC
+ select MODULES_USE_ELF_REL
+ select MODULES_USE_ELF_RELA
+diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
+index e9f312532526..c8d8283cb20e 100644
+--- a/arch/s390/Kconfig
++++ b/arch/s390/Kconfig
+@@ -117,6 +117,7 @@ config S390
+ select HAVE_FUNCTION_GRAPH_TRACER
+ select HAVE_FUNCTION_TRACER
+ select HAVE_FUNCTION_TRACE_MCOUNT_TEST
++ select HAVE_FUTEX_CMPXCHG if FUTEX
+ select HAVE_KERNEL_BZIP2
+ select HAVE_KERNEL_GZIP
+ select HAVE_KERNEL_LZ4
+diff --git a/arch/x86/crypto/ghash-clmulni-intel_asm.S b/arch/x86/crypto/ghash-clmulni-intel_asm.S
+index 586f41aac361..185fad49d86f 100644
+--- a/arch/x86/crypto/ghash-clmulni-intel_asm.S
++++ b/arch/x86/crypto/ghash-clmulni-intel_asm.S
+@@ -24,10 +24,6 @@
+ .align 16
+ .Lbswap_mask:
+ .octa 0x000102030405060708090a0b0c0d0e0f
+-.Lpoly:
+- .octa 0xc2000000000000000000000000000001
+-.Ltwo_one:
+- .octa 0x00000001000000000000000000000001
+
+ #define DATA %xmm0
+ #define SHASH %xmm1
+@@ -134,28 +130,3 @@ ENTRY(clmul_ghash_update)
+ .Lupdate_just_ret:
+ ret
+ ENDPROC(clmul_ghash_update)
+-
+-/*
+- * void clmul_ghash_setkey(be128 *shash, const u8 *key);
+- *
+- * Calculate hash_key << 1 mod poly
+- */
+-ENTRY(clmul_ghash_setkey)
+- movaps .Lbswap_mask, BSWAP
+- movups (%rsi), %xmm0
+- PSHUFB_XMM BSWAP %xmm0
+- movaps %xmm0, %xmm1
+- psllq $1, %xmm0
+- psrlq $63, %xmm1
+- movaps %xmm1, %xmm2
+- pslldq $8, %xmm1
+- psrldq $8, %xmm2
+- por %xmm1, %xmm0
+- # reduction
+- pshufd $0b00100100, %xmm2, %xmm1
+- pcmpeqd .Ltwo_one, %xmm1
+- pand .Lpoly, %xmm1
+- pxor %xmm1, %xmm0
+- movups %xmm0, (%rdi)
+- ret
+-ENDPROC(clmul_ghash_setkey)
+diff --git a/arch/x86/crypto/ghash-clmulni-intel_glue.c b/arch/x86/crypto/ghash-clmulni-intel_glue.c
+index 6759dd1135be..d785cf2c529c 100644
+--- a/arch/x86/crypto/ghash-clmulni-intel_glue.c
++++ b/arch/x86/crypto/ghash-clmulni-intel_glue.c
+@@ -30,8 +30,6 @@ void clmul_ghash_mul(char *dst, const be128 *shash);
+ void clmul_ghash_update(char *dst, const char *src, unsigned int srclen,
+ const be128 *shash);
+
+-void clmul_ghash_setkey(be128 *shash, const u8 *key);
+-
+ struct ghash_async_ctx {
+ struct cryptd_ahash *cryptd_tfm;
+ };
+@@ -58,13 +56,23 @@ static int ghash_setkey(struct crypto_shash *tfm,
+ const u8 *key, unsigned int keylen)
+ {
+ struct ghash_ctx *ctx = crypto_shash_ctx(tfm);
++ be128 *x = (be128 *)key;
++ u64 a, b;
+
+ if (keylen != GHASH_BLOCK_SIZE) {
+ crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+
+- clmul_ghash_setkey(&ctx->shash, key);
++ /* perform multiplication by 'x' in GF(2^128) */
++ a = be64_to_cpu(x->a);
++ b = be64_to_cpu(x->b);
++
++ ctx->shash.a = (__be64)((b << 1) | (a >> 63));
++ ctx->shash.b = (__be64)((a << 1) | (b >> 63));
++
++ if (a >> 63)
++ ctx->shash.b ^= cpu_to_be64(0xc2);
+
+ return 0;
+ }
+diff --git a/drivers/cpufreq/powernow-k6.c b/drivers/cpufreq/powernow-k6.c
+index 643e7952cad3..b9a444e358b5 100644
+--- a/drivers/cpufreq/powernow-k6.c
++++ b/drivers/cpufreq/powernow-k6.c
+@@ -26,41 +26,108 @@
+ static unsigned int busfreq; /* FSB, in 10 kHz */
+ static unsigned int max_multiplier;
+
++static unsigned int param_busfreq = 0;
++static unsigned int param_max_multiplier = 0;
++
++module_param_named(max_multiplier, param_max_multiplier, uint, S_IRUGO);
++MODULE_PARM_DESC(max_multiplier, "Maximum multiplier (allowed values: 20 30 35 40 45 50 55 60)");
++
++module_param_named(bus_frequency, param_busfreq, uint, S_IRUGO);
++MODULE_PARM_DESC(bus_frequency, "Bus frequency in kHz");
+
+ /* Clock ratio multiplied by 10 - see table 27 in AMD#23446 */
+ static struct cpufreq_frequency_table clock_ratio[] = {
+- {45, /* 000 -> 4.5x */ 0},
++ {60, /* 110 -> 6.0x */ 0},
++ {55, /* 011 -> 5.5x */ 0},
+ {50, /* 001 -> 5.0x */ 0},
++ {45, /* 000 -> 4.5x */ 0},
+ {40, /* 010 -> 4.0x */ 0},
+- {55, /* 011 -> 5.5x */ 0},
+- {20, /* 100 -> 2.0x */ 0},
+- {30, /* 101 -> 3.0x */ 0},
+- {60, /* 110 -> 6.0x */ 0},
+ {35, /* 111 -> 3.5x */ 0},
++ {30, /* 101 -> 3.0x */ 0},
++ {20, /* 100 -> 2.0x */ 0},
+ {0, CPUFREQ_TABLE_END}
+ };
+
++static const u8 index_to_register[8] = { 6, 3, 1, 0, 2, 7, 5, 4 };
++static const u8 register_to_index[8] = { 3, 2, 4, 1, 7, 6, 0, 5 };
++
++static const struct {
++ unsigned freq;
++ unsigned mult;
++} usual_frequency_table[] = {
++ { 400000, 40 }, // 100 * 4
++ { 450000, 45 }, // 100 * 4.5
++ { 475000, 50 }, // 95 * 5
++ { 500000, 50 }, // 100 * 5
++ { 506250, 45 }, // 112.5 * 4.5
++ { 533500, 55 }, // 97 * 5.5
++ { 550000, 55 }, // 100 * 5.5
++ { 562500, 50 }, // 112.5 * 5
++ { 570000, 60 }, // 95 * 6
++ { 600000, 60 }, // 100 * 6
++ { 618750, 55 }, // 112.5 * 5.5
++ { 660000, 55 }, // 120 * 5.5
++ { 675000, 60 }, // 112.5 * 6
++ { 720000, 60 }, // 120 * 6
++};
++
++#define FREQ_RANGE 3000
+
+ /**
+ * powernow_k6_get_cpu_multiplier - returns the current FSB multiplier
+ *
+- * Returns the current setting of the frequency multiplier. Core clock
++ * Returns the current setting of the frequency multiplier. Core clock
+ * speed is frequency of the Front-Side Bus multiplied with this value.
+ */
+ static int powernow_k6_get_cpu_multiplier(void)
+ {
+- u64 invalue = 0;
++ unsigned long invalue = 0;
+ u32 msrval;
+
++ local_irq_disable();
++
+ msrval = POWERNOW_IOPORT + 0x1;
+ wrmsr(MSR_K6_EPMR, msrval, 0); /* enable the PowerNow port */
+ invalue = inl(POWERNOW_IOPORT + 0x8);
+ msrval = POWERNOW_IOPORT + 0x0;
+ wrmsr(MSR_K6_EPMR, msrval, 0); /* disable it again */
+
+- return clock_ratio[(invalue >> 5)&7].driver_data;
++ local_irq_enable();
++
++ return clock_ratio[register_to_index[(invalue >> 5)&7]].driver_data;
+ }
+
++static void powernow_k6_set_cpu_multiplier(unsigned int best_i)
++{
++ unsigned long outvalue, invalue;
++ unsigned long msrval;
++ unsigned long cr0;
++
++ /* we now need to transform best_i to the BVC format, see AMD#23446 */
++
++ /*
++ * The processor doesn't respond to inquiry cycles while changing the
++ * frequency, so we must disable cache.
++ */
++ local_irq_disable();
++ cr0 = read_cr0();
++ write_cr0(cr0 | X86_CR0_CD);
++ wbinvd();
++
++ outvalue = (1<<12) | (1<<10) | (1<<9) | (index_to_register[best_i]<<5);
++
++ msrval = POWERNOW_IOPORT + 0x1;
++ wrmsr(MSR_K6_EPMR, msrval, 0); /* enable the PowerNow port */
++ invalue = inl(POWERNOW_IOPORT + 0x8);
++ invalue = invalue & 0x1f;
++ outvalue = outvalue | invalue;
++ outl(outvalue, (POWERNOW_IOPORT + 0x8));
++ msrval = POWERNOW_IOPORT + 0x0;
++ wrmsr(MSR_K6_EPMR, msrval, 0); /* disable it again */
++
++ write_cr0(cr0);
++ local_irq_enable();
++}
+
+ /**
+ * powernow_k6_target - set the PowerNow! multiplier
+@@ -71,8 +138,6 @@ static int powernow_k6_get_cpu_multiplier(void)
+ static int powernow_k6_target(struct cpufreq_policy *policy,
+ unsigned int best_i)
+ {
+- unsigned long outvalue = 0, invalue = 0;
+- unsigned long msrval;
+ struct cpufreq_freqs freqs;
+
+ if (clock_ratio[best_i].driver_data > max_multiplier) {
+@@ -85,35 +150,63 @@ static int powernow_k6_target(struct cpufreq_policy *policy,
+
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
+
+- /* we now need to transform best_i to the BVC format, see AMD#23446 */
+-
+- outvalue = (1<<12) | (1<<10) | (1<<9) | (best_i<<5);
+-
+- msrval = POWERNOW_IOPORT + 0x1;
+- wrmsr(MSR_K6_EPMR, msrval, 0); /* enable the PowerNow port */
+- invalue = inl(POWERNOW_IOPORT + 0x8);
+- invalue = invalue & 0xf;
+- outvalue = outvalue | invalue;
+- outl(outvalue , (POWERNOW_IOPORT + 0x8));
+- msrval = POWERNOW_IOPORT + 0x0;
+- wrmsr(MSR_K6_EPMR, msrval, 0); /* disable it again */
++ powernow_k6_set_cpu_multiplier(best_i);
+
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
+
+ return 0;
+ }
+
+-
+ static int powernow_k6_cpu_init(struct cpufreq_policy *policy)
+ {
+ unsigned int i, f;
++ unsigned khz;
+
+ if (policy->cpu != 0)
+ return -ENODEV;
+
+- /* get frequencies */
+- max_multiplier = powernow_k6_get_cpu_multiplier();
+- busfreq = cpu_khz / max_multiplier;
++ max_multiplier = 0;
++ khz = cpu_khz;
++ for (i = 0; i < ARRAY_SIZE(usual_frequency_table); i++) {
++ if (khz >= usual_frequency_table[i].freq - FREQ_RANGE &&
++ khz <= usual_frequency_table[i].freq + FREQ_RANGE) {
++ khz = usual_frequency_table[i].freq;
++ max_multiplier = usual_frequency_table[i].mult;
++ break;
++ }
++ }
++ if (param_max_multiplier) {
++ for (i = 0; (clock_ratio[i].frequency != CPUFREQ_TABLE_END); i++) {
++ if (clock_ratio[i].driver_data == param_max_multiplier) {
++ max_multiplier = param_max_multiplier;
++ goto have_max_multiplier;
++ }
++ }
++ printk(KERN_ERR "powernow-k6: invalid max_multiplier parameter, valid parameters 20, 30, 35, 40, 45, 50, 55, 60\n");
++ return -EINVAL;
++ }
++
++ if (!max_multiplier) {
++ printk(KERN_WARNING "powernow-k6: unknown frequency %u, cannot determine current multiplier\n", khz);
++ printk(KERN_WARNING "powernow-k6: use module parameters max_multiplier and bus_frequency\n");
++ return -EOPNOTSUPP;
++ }
++
++have_max_multiplier:
++ param_max_multiplier = max_multiplier;
++
++ if (param_busfreq) {
++ if (param_busfreq >= 50000 && param_busfreq <= 150000) {
++ busfreq = param_busfreq / 10;
++ goto have_busfreq;
++ }
++ printk(KERN_ERR "powernow-k6: invalid bus_frequency parameter, allowed range 50000 - 150000 kHz\n");
++ return -EINVAL;
++ }
++
++ busfreq = khz / max_multiplier;
++have_busfreq:
++ param_busfreq = busfreq * 10;
+
+ /* table init */
+ for (i = 0; (clock_ratio[i].frequency != CPUFREQ_TABLE_END); i++) {
+@@ -125,7 +218,7 @@ static int powernow_k6_cpu_init(struct cpufreq_policy *policy)
+ }
+
+ /* cpuinfo and default policy values */
+- policy->cpuinfo.transition_latency = 200000;
++ policy->cpuinfo.transition_latency = 500000;
+
+ return cpufreq_table_validate_and_show(policy, clock_ratio);
+ }
+diff --git a/drivers/isdn/isdnloop/isdnloop.c b/drivers/isdn/isdnloop/isdnloop.c
+index 02125e6a9109..5a4da94aefb0 100644
+--- a/drivers/isdn/isdnloop/isdnloop.c
++++ b/drivers/isdn/isdnloop/isdnloop.c
+@@ -518,9 +518,9 @@ static isdnloop_stat isdnloop_cmd_table[] =
+ static void
+ isdnloop_fake_err(isdnloop_card *card)
+ {
+- char buf[60];
++ char buf[64];
+
+- sprintf(buf, "E%s", card->omsg);
++ snprintf(buf, sizeof(buf), "E%s", card->omsg);
+ isdnloop_fake(card, buf, -1);
+ isdnloop_fake(card, "NAK", -1);
+ }
+@@ -903,6 +903,8 @@ isdnloop_parse_cmd(isdnloop_card *card)
+ case 7:
+ /* 0x;EAZ */
+ p += 3;
++ if (strlen(p) >= sizeof(card->eazlist[0]))
++ break;
+ strcpy(card->eazlist[ch - 1], p);
+ break;
+ case 8:
+@@ -1070,6 +1072,12 @@ isdnloop_start(isdnloop_card *card, isdnloop_sdef *sdefp)
+ return -EBUSY;
+ if (copy_from_user((char *) &sdef, (char *) sdefp, sizeof(sdef)))
+ return -EFAULT;
++
++ for (i = 0; i < 3; i++) {
++ if (!memchr(sdef.num[i], 0, sizeof(sdef.num[i])))
++ return -EINVAL;
++ }
++
+ spin_lock_irqsave(&card->isdnloop_lock, flags);
+ switch (sdef.ptype) {
+ case ISDN_PTYPE_EURO:
+@@ -1127,7 +1135,7 @@ isdnloop_command(isdn_ctrl *c, isdnloop_card *card)
+ {
+ ulong a;
+ int i;
+- char cbuf[60];
++ char cbuf[80];
+ isdn_ctrl cmd;
+ isdnloop_cdef cdef;
+
+@@ -1192,7 +1200,6 @@ isdnloop_command(isdn_ctrl *c, isdnloop_card *card)
+ break;
+ if ((c->arg & 255) < ISDNLOOP_BCH) {
+ char *p;
+- char dial[50];
+ char dcode[4];
+
+ a = c->arg;
+@@ -1204,10 +1211,10 @@ isdnloop_command(isdn_ctrl *c, isdnloop_card *card)
+ } else
+ /* Normal Dial */
+ strcpy(dcode, "CAL");
+- strcpy(dial, p);
+- sprintf(cbuf, "%02d;D%s_R%s,%02d,%02d,%s\n", (int) (a + 1),
+- dcode, dial, c->parm.setup.si1,
+- c->parm.setup.si2, c->parm.setup.eazmsn);
++ snprintf(cbuf, sizeof(cbuf),
++ "%02d;D%s_R%s,%02d,%02d,%s\n", (int) (a + 1),
++ dcode, p, c->parm.setup.si1,
++ c->parm.setup.si2, c->parm.setup.eazmsn);
+ i = isdnloop_writecmd(cbuf, strlen(cbuf), 0, card);
+ }
+ break;
+diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
+index 02872405d35d..b215df1bf00e 100644
+--- a/drivers/net/bonding/bond_alb.c
++++ b/drivers/net/bonding/bond_alb.c
+@@ -731,7 +731,7 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bon
+ client_info->ntt = 0;
+ }
+
+- if (!vlan_get_tag(skb, &client_info->vlan_id))
++ if (vlan_get_tag(skb, &client_info->vlan_id))
+ client_info->vlan_id = 0;
+
+ if (!client_info->assigned) {
+diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
+index d9980ad00b4b..8409bc142ac4 100644
+--- a/drivers/net/ethernet/broadcom/bnx2.c
++++ b/drivers/net/ethernet/broadcom/bnx2.c
+@@ -2490,6 +2490,7 @@ bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
+
+ bp->fw_wr_seq++;
+ msg_data |= bp->fw_wr_seq;
++ bp->fw_last_msg = msg_data;
+
+ bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
+
+@@ -3982,8 +3983,23 @@ bnx2_setup_wol(struct bnx2 *bp)
+ wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
+ }
+
+- if (!(bp->flags & BNX2_FLAG_NO_WOL))
+- bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 1, 0);
++ if (!(bp->flags & BNX2_FLAG_NO_WOL)) {
++ u32 val;
++
++ wol_msg |= BNX2_DRV_MSG_DATA_WAIT3;
++ if (bp->fw_last_msg || BNX2_CHIP(bp) != BNX2_CHIP_5709) {
++ bnx2_fw_sync(bp, wol_msg, 1, 0);
++ return;
++ }
++ /* Tell firmware not to power down the PHY yet, otherwise
++ * the chip will take a long time to respond to MMIO reads.
++ */
++ val = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
++ bnx2_shmem_wr(bp, BNX2_PORT_FEATURE,
++ val | BNX2_PORT_FEATURE_ASF_ENABLED);
++ bnx2_fw_sync(bp, wol_msg, 1, 0);
++ bnx2_shmem_wr(bp, BNX2_PORT_FEATURE, val);
++ }
+
+ }
+
+@@ -4015,9 +4031,22 @@ bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
+
+ if (bp->wol)
+ pci_set_power_state(bp->pdev, PCI_D3hot);
+- } else {
+- pci_set_power_state(bp->pdev, PCI_D3hot);
++ break;
++
++ }
++ if (!bp->fw_last_msg && BNX2_CHIP(bp) == BNX2_CHIP_5709) {
++ u32 val;
++
++ /* Tell firmware not to power down the PHY yet,
++ * otherwise the other port may not respond to
++ * MMIO reads.
++ */
++ val = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
++ val &= ~BNX2_CONDITION_PM_STATE_MASK;
++ val |= BNX2_CONDITION_PM_STATE_UNPREP;
++ bnx2_shmem_wr(bp, BNX2_BC_STATE_CONDITION, val);
+ }
++ pci_set_power_state(bp->pdev, PCI_D3hot);
+
+ /* No more memory access after this point until
+ * device is brought back to D0.
+diff --git a/drivers/net/ethernet/broadcom/bnx2.h b/drivers/net/ethernet/broadcom/bnx2.h
+index 18cb2d23e56b..0eb2a65c35b4 100644
+--- a/drivers/net/ethernet/broadcom/bnx2.h
++++ b/drivers/net/ethernet/broadcom/bnx2.h
+@@ -6890,6 +6890,7 @@ struct bnx2 {
+
+ u16 fw_wr_seq;
+ u16 fw_drv_pulse_wr_seq;
++ u32 fw_last_msg;
+
+ int rx_max_ring;
+ int rx_ring_size;
+@@ -7396,6 +7397,10 @@ struct bnx2_rv2p_fw_file {
+ #define BNX2_CONDITION_MFW_RUN_NCSI 0x00006000
+ #define BNX2_CONDITION_MFW_RUN_NONE 0x0000e000
+ #define BNX2_CONDITION_MFW_RUN_MASK 0x0000e000
++#define BNX2_CONDITION_PM_STATE_MASK 0x00030000
++#define BNX2_CONDITION_PM_STATE_FULL 0x00030000
++#define BNX2_CONDITION_PM_STATE_PREP 0x00020000
++#define BNX2_CONDITION_PM_STATE_UNPREP 0x00010000
+
+ #define BNX2_BC_STATE_DEBUG_CMD 0x1dc
+ #define BNX2_BC_STATE_BC_DBG_CMD_SIGNATURE 0x42440000
+diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
+index 4abd85edbb39..af88893d2e0b 100644
+--- a/drivers/net/ethernet/broadcom/tg3.c
++++ b/drivers/net/ethernet/broadcom/tg3.c
+@@ -17485,8 +17485,6 @@ static int tg3_init_one(struct pci_dev *pdev,
+
+ tg3_init_bufmgr_config(tp);
+
+- features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
+-
+ /* 5700 B0 chips do not support checksumming correctly due
+ * to hardware bugs.
+ */
+@@ -17518,7 +17516,8 @@ static int tg3_init_one(struct pci_dev *pdev,
+ features |= NETIF_F_TSO_ECN;
+ }
+
+- dev->features |= features;
++ dev->features |= features | NETIF_F_HW_VLAN_CTAG_TX |
++ NETIF_F_HW_VLAN_CTAG_RX;
+ dev->vlan_features |= features;
+
+ /*
+diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
+index 50bb71c663e2..bf17c8863da8 100644
+--- a/drivers/net/ethernet/freescale/fec_main.c
++++ b/drivers/net/ethernet/freescale/fec_main.c
+@@ -528,13 +528,6 @@ fec_restart(struct net_device *ndev, int duplex)
+ /* Clear any outstanding interrupt. */
+ writel(0xffc00000, fep->hwp + FEC_IEVENT);
+
+- /* Setup multicast filter. */
+- set_multicast_list(ndev);
+-#ifndef CONFIG_M5272
+- writel(0, fep->hwp + FEC_HASH_TABLE_HIGH);
+- writel(0, fep->hwp + FEC_HASH_TABLE_LOW);
+-#endif
+-
+ /* Set maximum receive buffer size. */
+ writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE);
+
+@@ -655,6 +648,13 @@ fec_restart(struct net_device *ndev, int duplex)
+
+ writel(rcntl, fep->hwp + FEC_R_CNTRL);
+
++ /* Setup multicast filter. */
++ set_multicast_list(ndev);
++#ifndef CONFIG_M5272
++ writel(0, fep->hwp + FEC_HASH_TABLE_HIGH);
++ writel(0, fep->hwp + FEC_HASH_TABLE_LOW);
++#endif
++
+ if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
+ /* enable ENET endian swap */
+ ecntl |= (1 << 8);
+diff --git a/drivers/net/ethernet/micrel/ks8851.c b/drivers/net/ethernet/micrel/ks8851.c
+index 727b546a9eb8..e0c92e0e5e1d 100644
+--- a/drivers/net/ethernet/micrel/ks8851.c
++++ b/drivers/net/ethernet/micrel/ks8851.c
+@@ -23,6 +23,7 @@
+ #include <linux/crc32.h>
+ #include <linux/mii.h>
+ #include <linux/eeprom_93cx6.h>
++#include <linux/regulator/consumer.h>
+
+ #include <linux/spi/spi.h>
+
+@@ -83,6 +84,7 @@ union ks8851_tx_hdr {
+ * @rc_rxqcr: Cached copy of KS_RXQCR.
+ * @eeprom_size: Companion eeprom size in Bytes, 0 if no eeprom
+ * @eeprom: 93CX6 EEPROM state for accessing on-board EEPROM.
++ * @vdd_reg: Optional regulator supplying the chip
+ *
+ * The @lock ensures that the chip is protected when certain operations are
+ * in progress. When the read or write packet transfer is in progress, most
+@@ -130,6 +132,7 @@ struct ks8851_net {
+ struct spi_transfer spi_xfer2[2];
+
+ struct eeprom_93cx6 eeprom;
++ struct regulator *vdd_reg;
+ };
+
+ static int msg_enable;
+@@ -1414,6 +1417,21 @@ static int ks8851_probe(struct spi_device *spi)
+ ks->spidev = spi;
+ ks->tx_space = 6144;
+
++ ks->vdd_reg = regulator_get_optional(&spi->dev, "vdd");
++ if (IS_ERR(ks->vdd_reg)) {
++ ret = PTR_ERR(ks->vdd_reg);
++ if (ret == -EPROBE_DEFER)
++ goto err_reg;
++ } else {
++ ret = regulator_enable(ks->vdd_reg);
++ if (ret) {
++ dev_err(&spi->dev, "regulator enable fail: %d\n",
++ ret);
++ goto err_reg_en;
++ }
++ }
++
++
+ mutex_init(&ks->lock);
+ spin_lock_init(&ks->statelock);
+
+@@ -1508,8 +1526,14 @@ static int ks8851_probe(struct spi_device *spi)
+ err_netdev:
+ free_irq(ndev->irq, ks);
+
+-err_id:
+ err_irq:
++err_id:
++ if (!IS_ERR(ks->vdd_reg))
++ regulator_disable(ks->vdd_reg);
++err_reg_en:
++ if (!IS_ERR(ks->vdd_reg))
++ regulator_put(ks->vdd_reg);
++err_reg:
+ free_netdev(ndev);
+ return ret;
+ }
+@@ -1523,6 +1547,10 @@ static int ks8851_remove(struct spi_device *spi)
+
+ unregister_netdev(priv->netdev);
+ free_irq(spi->irq, priv);
++ if (!IS_ERR(priv->vdd_reg)) {
++ regulator_disable(priv->vdd_reg);
++ regulator_put(priv->vdd_reg);
++ }
+ free_netdev(priv->netdev);
+
+ return 0;
+diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
+index 676c3c057bfb..0a54c5c566ae 100644
+--- a/drivers/net/ethernet/sfc/ef10.c
++++ b/drivers/net/ethernet/sfc/ef10.c
+@@ -555,10 +555,17 @@ static int efx_ef10_dimension_resources(struct efx_nic *efx)
+ * several of each (in fact that's the only option if host
+ * page size is >4K). So we may allocate some extra VIs just
+ * for writing PIO buffers through.
++ *
++ * The UC mapping contains (min_vis - 1) complete VIs and the
++ * first half of the next VI. Then the WC mapping begins with
++ * the second half of this last VI.
+ */
+ uc_mem_map_size = PAGE_ALIGN((min_vis - 1) * EFX_VI_PAGE_SIZE +
+ ER_DZ_TX_PIOBUF);
+ if (nic_data->n_piobufs) {
++ /* pio_write_vi_base rounds down to give the number of complete
++ * VIs inside the UC mapping.
++ */
+ pio_write_vi_base = uc_mem_map_size / EFX_VI_PAGE_SIZE;
+ wc_mem_map_size = (PAGE_ALIGN((pio_write_vi_base +
+ nic_data->n_piobufs) *
+diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
+index fd844b53e385..a3a9f0fcf8e8 100644
+--- a/drivers/net/ethernet/sfc/efx.c
++++ b/drivers/net/ethernet/sfc/efx.c
+@@ -1514,6 +1514,8 @@ static int efx_probe_nic(struct efx_nic *efx)
+ if (rc)
+ goto fail1;
+
++ efx_set_channels(efx);
++
+ rc = efx->type->dimension_resources(efx);
+ if (rc)
+ goto fail2;
+@@ -1524,7 +1526,6 @@ static int efx_probe_nic(struct efx_nic *efx)
+ efx->rx_indir_table[i] =
+ ethtool_rxfh_indir_default(i, efx->rss_spread);
+
+- efx_set_channels(efx);
+ netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels);
+ netif_set_real_num_rx_queues(efx->net_dev, efx->n_rx_channels);
+
+diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
+index e15ec2b12035..b74f5ffb1121 100644
+--- a/drivers/net/usb/cdc_ncm.c
++++ b/drivers/net/usb/cdc_ncm.c
+@@ -69,7 +69,6 @@ static struct usb_driver cdc_ncm_driver;
+ static int cdc_ncm_setup(struct usbnet *dev)
+ {
+ struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
+- struct usb_cdc_ncm_ntb_parameters ncm_parm;
+ u32 val;
+ u8 flags;
+ u8 iface_no;
+@@ -83,22 +82,22 @@ static int cdc_ncm_setup(struct usbnet *dev)
+ err = usbnet_read_cmd(dev, USB_CDC_GET_NTB_PARAMETERS,
+ USB_TYPE_CLASS | USB_DIR_IN
+ |USB_RECIP_INTERFACE,
+- 0, iface_no, &ncm_parm,
+- sizeof(ncm_parm));
++ 0, iface_no, &ctx->ncm_parm,
++ sizeof(ctx->ncm_parm));
+ if (err < 0) {
+ dev_err(&dev->intf->dev, "failed GET_NTB_PARAMETERS\n");
+ return err; /* GET_NTB_PARAMETERS is required */
+ }
+
+ /* read correct set of parameters according to device mode */
+- ctx->rx_max = le32_to_cpu(ncm_parm.dwNtbInMaxSize);
+- ctx->tx_max = le32_to_cpu(ncm_parm.dwNtbOutMaxSize);
+- ctx->tx_remainder = le16_to_cpu(ncm_parm.wNdpOutPayloadRemainder);
+- ctx->tx_modulus = le16_to_cpu(ncm_parm.wNdpOutDivisor);
+- ctx->tx_ndp_modulus = le16_to_cpu(ncm_parm.wNdpOutAlignment);
++ ctx->rx_max = le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize);
++ ctx->tx_max = le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize);
++ ctx->tx_remainder = le16_to_cpu(ctx->ncm_parm.wNdpOutPayloadRemainder);
++ ctx->tx_modulus = le16_to_cpu(ctx->ncm_parm.wNdpOutDivisor);
++ ctx->tx_ndp_modulus = le16_to_cpu(ctx->ncm_parm.wNdpOutAlignment);
+ /* devices prior to NCM Errata shall set this field to zero */
+- ctx->tx_max_datagrams = le16_to_cpu(ncm_parm.wNtbOutMaxDatagrams);
+- ntb_fmt_supported = le16_to_cpu(ncm_parm.bmNtbFormatsSupported);
++ ctx->tx_max_datagrams = le16_to_cpu(ctx->ncm_parm.wNtbOutMaxDatagrams);
++ ntb_fmt_supported = le16_to_cpu(ctx->ncm_parm.bmNtbFormatsSupported);
+
+ /* there are some minor differences in NCM and MBIM defaults */
+ if (cdc_ncm_comm_intf_is_mbim(ctx->control->cur_altsetting)) {
+@@ -147,7 +146,7 @@ static int cdc_ncm_setup(struct usbnet *dev)
+ }
+
+ /* inform device about NTB input size changes */
+- if (ctx->rx_max != le32_to_cpu(ncm_parm.dwNtbInMaxSize)) {
++ if (ctx->rx_max != le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize)) {
+ __le32 dwNtbInMaxSize = cpu_to_le32(ctx->rx_max);
+
+ err = usbnet_write_cmd(dev, USB_CDC_SET_NTB_INPUT_SIZE,
+@@ -163,14 +162,6 @@ static int cdc_ncm_setup(struct usbnet *dev)
+ dev_dbg(&dev->intf->dev, "Using default maximum transmit length=%d\n",
+ CDC_NCM_NTB_MAX_SIZE_TX);
+ ctx->tx_max = CDC_NCM_NTB_MAX_SIZE_TX;
+-
+- /* Adding a pad byte here simplifies the handling in
+- * cdc_ncm_fill_tx_frame, by making tx_max always
+- * represent the real skb max size.
+- */
+- if (ctx->tx_max % usb_maxpacket(dev->udev, dev->out, 1) == 0)
+- ctx->tx_max++;
+-
+ }
+
+ /*
+@@ -440,6 +431,10 @@ advance:
+ goto error2;
+ }
+
++ /* initialize data interface */
++ if (cdc_ncm_setup(dev))
++ goto error2;
++
+ /* configure data interface */
+ temp = usb_set_interface(dev->udev, iface_no, data_altsetting);
+ if (temp) {
+@@ -454,12 +449,6 @@ advance:
+ goto error2;
+ }
+
+- /* initialize data interface */
+- if (cdc_ncm_setup(dev)) {
+- dev_dbg(&intf->dev, "cdc_ncm_setup() failed\n");
+- goto error2;
+- }
+-
+ usb_set_intfdata(ctx->data, dev);
+ usb_set_intfdata(ctx->control, dev);
+
+@@ -476,6 +465,15 @@ advance:
+ dev->hard_mtu = ctx->tx_max;
+ dev->rx_urb_size = ctx->rx_max;
+
++ /* cdc_ncm_setup will override dwNtbOutMaxSize if it is
++ * outside the sane range. Adding a pad byte here if necessary
++ * simplifies the handling in cdc_ncm_fill_tx_frame, making
++ * tx_max always represent the real skb max size.
++ */
++ if (ctx->tx_max != le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize) &&
++ ctx->tx_max % usb_maxpacket(dev->udev, dev->out, 1) == 0)
++ ctx->tx_max++;
++
+ return 0;
+
+ error2:
+diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
+index a91fa49b81c3..1d4da74595f9 100644
+--- a/drivers/net/usb/usbnet.c
++++ b/drivers/net/usb/usbnet.c
+@@ -753,14 +753,12 @@ EXPORT_SYMBOL_GPL(usbnet_unlink_rx_urbs);
+ // precondition: never called in_interrupt
+ static void usbnet_terminate_urbs(struct usbnet *dev)
+ {
+- DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
+ DECLARE_WAITQUEUE(wait, current);
+ int temp;
+
+ /* ensure there are no more active urbs */
+- add_wait_queue(&unlink_wakeup, &wait);
++ add_wait_queue(&dev->wait, &wait);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+- dev->wait = &unlink_wakeup;
+ temp = unlink_urbs(dev, &dev->txq) +
+ unlink_urbs(dev, &dev->rxq);
+
+@@ -774,15 +772,14 @@ static void usbnet_terminate_urbs(struct usbnet *dev)
+ "waited for %d urb completions\n", temp);
+ }
+ set_current_state(TASK_RUNNING);
+- dev->wait = NULL;
+- remove_wait_queue(&unlink_wakeup, &wait);
++ remove_wait_queue(&dev->wait, &wait);
+ }
+
+ int usbnet_stop (struct net_device *net)
+ {
+ struct usbnet *dev = netdev_priv(net);
+ struct driver_info *info = dev->driver_info;
+- int retval;
++ int retval, pm;
+
+ clear_bit(EVENT_DEV_OPEN, &dev->flags);
+ netif_stop_queue (net);
+@@ -792,6 +789,8 @@ int usbnet_stop (struct net_device *net)
+ net->stats.rx_packets, net->stats.tx_packets,
+ net->stats.rx_errors, net->stats.tx_errors);
+
++ /* to not race resume */
++ pm = usb_autopm_get_interface(dev->intf);
+ /* allow minidriver to stop correctly (wireless devices to turn off
+ * radio etc) */
+ if (info->stop) {
+@@ -818,6 +817,9 @@ int usbnet_stop (struct net_device *net)
+ dev->flags = 0;
+ del_timer_sync (&dev->delay);
+ tasklet_kill (&dev->bh);
++ if (!pm)
++ usb_autopm_put_interface(dev->intf);
++
+ if (info->manage_power &&
+ !test_and_clear_bit(EVENT_NO_RUNTIME_PM, &dev->flags))
+ info->manage_power(dev, 0);
+@@ -1438,11 +1440,12 @@ static void usbnet_bh (unsigned long param)
+ /* restart RX again after disabling due to high error rate */
+ clear_bit(EVENT_RX_KILL, &dev->flags);
+
+- // waiting for all pending urbs to complete?
+- if (dev->wait) {
+- if ((dev->txq.qlen + dev->rxq.qlen + dev->done.qlen) == 0) {
+- wake_up (dev->wait);
+- }
++ /* waiting for all pending urbs to complete?
++ * only then can we forgo submitting anew
++ */
++ if (waitqueue_active(&dev->wait)) {
++ if (dev->txq.qlen + dev->rxq.qlen + dev->done.qlen == 0)
++ wake_up_all(&dev->wait);
+
+ // or are we maybe short a few urbs?
+ } else if (netif_running (dev->net) &&
+@@ -1581,6 +1584,7 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
+ dev->driver_name = name;
+ dev->msg_enable = netif_msg_init (msg_level, NETIF_MSG_DRV
+ | NETIF_MSG_PROBE | NETIF_MSG_LINK);
++ init_waitqueue_head(&dev->wait);
+ skb_queue_head_init (&dev->rxq);
+ skb_queue_head_init (&dev->txq);
+ skb_queue_head_init (&dev->done);
+@@ -1792,9 +1796,10 @@ int usbnet_resume (struct usb_interface *intf)
+ spin_unlock_irq(&dev->txq.lock);
+
+ if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
+- /* handle remote wakeup ASAP */
+- if (!dev->wait &&
+- netif_device_present(dev->net) &&
++ /* handle remote wakeup ASAP
++ * we cannot race against stop
++ */
++ if (netif_device_present(dev->net) &&
+ !timer_pending(&dev->delay) &&
+ !test_bit(EVENT_RX_HALT, &dev->flags))
+ rx_alloc_submit(dev, GFP_NOIO);
+diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
+index 8eb6b9d75cef..19d96c889aac 100644
+--- a/drivers/net/virtio_net.c
++++ b/drivers/net/virtio_net.c
+@@ -633,8 +633,7 @@ static bool try_fill_recv(struct receive_queue *rq, gfp_t gfp)
+ } while (rq->vq->num_free);
+ if (unlikely(rq->num > rq->max))
+ rq->max = rq->num;
+- if (unlikely(!virtqueue_kick(rq->vq)))
+- return false;
++ virtqueue_kick(rq->vq);
+ return !oom;
+ }
+
+@@ -840,7 +839,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
+ err = xmit_skb(sq, skb);
+
+ /* This should not happen! */
+- if (unlikely(err) || unlikely(!virtqueue_kick(sq->vq))) {
++ if (unlikely(err)) {
+ dev->stats.tx_fifo_errors++;
+ if (net_ratelimit())
+ dev_warn(&dev->dev,
+@@ -849,6 +848,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
+ kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
++ virtqueue_kick(sq->vq);
+
+ /* Don't wait up for transmitted skbs to be freed. */
+ skb_orphan(skb);
+diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
+index 02479739eaa0..fc5d2b7e687f 100644
+--- a/drivers/net/vxlan.c
++++ b/drivers/net/vxlan.c
+@@ -777,6 +777,9 @@ static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
+ if (err)
+ return err;
+
++ if (vxlan->default_dst.remote_ip.sa.sa_family != ip.sa.sa_family)
++ return -EAFNOSUPPORT;
++
+ spin_lock_bh(&vxlan->hash_lock);
+ err = vxlan_fdb_create(vxlan, addr, &ip, ndm->ndm_state, flags,
+ port, vni, ifindex, ndm->ndm_flags);
+@@ -1208,6 +1211,9 @@ static int arp_reduce(struct net_device *dev, struct sk_buff *skb)
+
+ neigh_release(n);
+
++ if (reply == NULL)
++ goto out;
++
+ skb_reset_mac_header(reply);
+ __skb_pull(reply, skb_network_offset(reply));
+ reply->ip_summed = CHECKSUM_UNNECESSARY;
+@@ -1229,15 +1235,103 @@ out:
+ }
+
+ #if IS_ENABLED(CONFIG_IPV6)
++
++static struct sk_buff *vxlan_na_create(struct sk_buff *request,
++ struct neighbour *n, bool isrouter)
++{
++ struct net_device *dev = request->dev;
++ struct sk_buff *reply;
++ struct nd_msg *ns, *na;
++ struct ipv6hdr *pip6;
++ u8 *daddr;
++ int na_olen = 8; /* opt hdr + ETH_ALEN for target */
++ int ns_olen;
++ int i, len;
++
++ if (dev == NULL)
++ return NULL;
++
++ len = LL_RESERVED_SPACE(dev) + sizeof(struct ipv6hdr) +
++ sizeof(*na) + na_olen + dev->needed_tailroom;
++ reply = alloc_skb(len, GFP_ATOMIC);
++ if (reply == NULL)
++ return NULL;
++
++ reply->protocol = htons(ETH_P_IPV6);
++ reply->dev = dev;
++ skb_reserve(reply, LL_RESERVED_SPACE(request->dev));
++ skb_push(reply, sizeof(struct ethhdr));
++ skb_set_mac_header(reply, 0);
++
++ ns = (struct nd_msg *)skb_transport_header(request);
++
++ daddr = eth_hdr(request)->h_source;
++ ns_olen = request->len - skb_transport_offset(request) - sizeof(*ns);
++ for (i = 0; i < ns_olen-1; i += (ns->opt[i+1]<<3)) {
++ if (ns->opt[i] == ND_OPT_SOURCE_LL_ADDR) {
++ daddr = ns->opt + i + sizeof(struct nd_opt_hdr);
++ break;
++ }
++ }
++
++ /* Ethernet header */
++ memcpy(eth_hdr(reply)->h_dest, daddr, ETH_ALEN);
++ memcpy(eth_hdr(reply)->h_source, n->ha, ETH_ALEN);
++ eth_hdr(reply)->h_proto = htons(ETH_P_IPV6);
++ reply->protocol = htons(ETH_P_IPV6);
++
++ skb_pull(reply, sizeof(struct ethhdr));
++ skb_set_network_header(reply, 0);
++ skb_put(reply, sizeof(struct ipv6hdr));
++
++ /* IPv6 header */
++
++ pip6 = ipv6_hdr(reply);
++ memset(pip6, 0, sizeof(struct ipv6hdr));
++ pip6->version = 6;
++ pip6->priority = ipv6_hdr(request)->priority;
++ pip6->nexthdr = IPPROTO_ICMPV6;
++ pip6->hop_limit = 255;
++ pip6->daddr = ipv6_hdr(request)->saddr;
++ pip6->saddr = *(struct in6_addr *)n->primary_key;
++
++ skb_pull(reply, sizeof(struct ipv6hdr));
++ skb_set_transport_header(reply, 0);
++
++ na = (struct nd_msg *)skb_put(reply, sizeof(*na) + na_olen);
++
++ /* Neighbor Advertisement */
++ memset(na, 0, sizeof(*na)+na_olen);
++ na->icmph.icmp6_type = NDISC_NEIGHBOUR_ADVERTISEMENT;
++ na->icmph.icmp6_router = isrouter;
++ na->icmph.icmp6_override = 1;
++ na->icmph.icmp6_solicited = 1;
++ na->target = ns->target;
++ memcpy(&na->opt[2], n->ha, ETH_ALEN);
++ na->opt[0] = ND_OPT_TARGET_LL_ADDR;
++ na->opt[1] = na_olen >> 3;
++
++ na->icmph.icmp6_cksum = csum_ipv6_magic(&pip6->saddr,
++ &pip6->daddr, sizeof(*na)+na_olen, IPPROTO_ICMPV6,
++ csum_partial(na, sizeof(*na)+na_olen, 0));
++
++ pip6->payload_len = htons(sizeof(*na)+na_olen);
++
++ skb_push(reply, sizeof(struct ipv6hdr));
++
++ reply->ip_summed = CHECKSUM_UNNECESSARY;
++
++ return reply;
++}
++
+ static int neigh_reduce(struct net_device *dev, struct sk_buff *skb)
+ {
+ struct vxlan_dev *vxlan = netdev_priv(dev);
+- struct neighbour *n;
+- union vxlan_addr ipa;
++ struct nd_msg *msg;
+ const struct ipv6hdr *iphdr;
+ const struct in6_addr *saddr, *daddr;
+- struct nd_msg *msg;
+- struct inet6_dev *in6_dev = NULL;
++ struct neighbour *n;
++ struct inet6_dev *in6_dev;
+
+ in6_dev = __in6_dev_get(dev);
+ if (!in6_dev)
+@@ -1250,19 +1344,20 @@ static int neigh_reduce(struct net_device *dev, struct sk_buff *skb)
+ saddr = &iphdr->saddr;
+ daddr = &iphdr->daddr;
+
+- if (ipv6_addr_loopback(daddr) ||
+- ipv6_addr_is_multicast(daddr))
+- goto out;
+-
+ msg = (struct nd_msg *)skb_transport_header(skb);
+ if (msg->icmph.icmp6_code != 0 ||
+ msg->icmph.icmp6_type != NDISC_NEIGHBOUR_SOLICITATION)
+ goto out;
+
+- n = neigh_lookup(ipv6_stub->nd_tbl, daddr, dev);
++ if (ipv6_addr_loopback(daddr) ||
++ ipv6_addr_is_multicast(&msg->target))
++ goto out;
++
++ n = neigh_lookup(ipv6_stub->nd_tbl, &msg->target, dev);
+
+ if (n) {
+ struct vxlan_fdb *f;
++ struct sk_buff *reply;
+
+ if (!(n->nud_state & NUD_CONNECTED)) {
+ neigh_release(n);
+@@ -1276,13 +1371,23 @@ static int neigh_reduce(struct net_device *dev, struct sk_buff *skb)
+ goto out;
+ }
+
+- ipv6_stub->ndisc_send_na(dev, n, saddr, &msg->target,
+- !!in6_dev->cnf.forwarding,
+- true, false, false);
++ reply = vxlan_na_create(skb, n,
++ !!(f ? f->flags & NTF_ROUTER : 0));
++
+ neigh_release(n);
++
++ if (reply == NULL)
++ goto out;
++
++ if (netif_rx_ni(reply) == NET_RX_DROP)
++ dev->stats.rx_dropped++;
++
+ } else if (vxlan->flags & VXLAN_F_L3MISS) {
+- ipa.sin6.sin6_addr = *daddr;
+- ipa.sa.sa_family = AF_INET6;
++ union vxlan_addr ipa = {
++ .sin6.sin6_addr = msg->target,
++ .sa.sa_family = AF_INET6,
++ };
++
+ vxlan_ip_miss(dev, &ipa);
+ }
+
+@@ -2384,9 +2489,10 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
+ vni = nla_get_u32(data[IFLA_VXLAN_ID]);
+ dst->remote_vni = vni;
+
++ /* Unless IPv6 is explicitly requested, assume IPv4 */
++ dst->remote_ip.sa.sa_family = AF_INET;
+ if (data[IFLA_VXLAN_GROUP]) {
+ dst->remote_ip.sin.sin_addr.s_addr = nla_get_be32(data[IFLA_VXLAN_GROUP]);
+- dst->remote_ip.sa.sa_family = AF_INET;
+ } else if (data[IFLA_VXLAN_GROUP6]) {
+ if (!IS_ENABLED(CONFIG_IPV6))
+ return -EPFNOSUPPORT;
+diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
+index c47794b9d42f..6668d03039b6 100644
+--- a/drivers/net/xen-netback/common.h
++++ b/drivers/net/xen-netback/common.h
+@@ -113,6 +113,11 @@ struct xenvif {
+ domid_t domid;
+ unsigned int handle;
+
++ /* Is this interface disabled? True when backend discovers
++ * frontend is rogue.
++ */
++ bool disabled;
++
+ /* Use NAPI for guest TX */
+ struct napi_struct napi;
+ /* When feature-split-event-channels = 0, tx_irq = rx_irq. */
+diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
+index fff8cddfed81..a2ef2e6f3266 100644
+--- a/drivers/net/xen-netback/interface.c
++++ b/drivers/net/xen-netback/interface.c
+@@ -67,6 +67,15 @@ static int xenvif_poll(struct napi_struct *napi, int budget)
+ struct xenvif *vif = container_of(napi, struct xenvif, napi);
+ int work_done;
+
++ /* This vif is rogue, we pretend we've there is nothing to do
++ * for this vif to deschedule it from NAPI. But this interface
++ * will be turned off in thread context later.
++ */
++ if (unlikely(vif->disabled)) {
++ napi_complete(napi);
++ return 0;
++ }
++
+ work_done = xenvif_tx_action(vif, budget);
+
+ if (work_done < budget) {
+@@ -323,6 +332,8 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
+ vif->ip_csum = 1;
+ vif->dev = dev;
+
++ vif->disabled = false;
++
+ vif->credit_bytes = vif->remaining_credit = ~0UL;
+ vif->credit_usec = 0UL;
+ init_timer(&vif->credit_timeout);
+diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
+index 78425554a537..b898c6bffd9e 100644
+--- a/drivers/net/xen-netback/netback.c
++++ b/drivers/net/xen-netback/netback.c
+@@ -203,8 +203,8 @@ static bool start_new_rx_buffer(int offset, unsigned long size, int head)
+ * into multiple copies tend to give large frags their
+ * own buffers as before.
+ */
+- if ((offset + size > MAX_BUFFER_OFFSET) &&
+- (size <= MAX_BUFFER_OFFSET) && offset && !head)
++ BUG_ON(size > MAX_BUFFER_OFFSET);
++ if ((offset + size > MAX_BUFFER_OFFSET) && offset && !head)
+ return true;
+
+ return false;
+@@ -338,7 +338,7 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
+ struct gnttab_copy *copy_gop;
+ struct xenvif_rx_meta *meta;
+ unsigned long bytes;
+- int gso_type;
++ int gso_type = XEN_NETIF_GSO_TYPE_NONE;
+
+ /* Data must not cross a page boundary. */
+ BUG_ON(size + offset > PAGE_SIZE<<compound_order(page));
+@@ -397,12 +397,12 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
+ }
+
+ /* Leave a gap for the GSO descriptor. */
+- if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
+- gso_type = XEN_NETIF_GSO_TYPE_TCPV4;
+- else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
+- gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
+- else
+- gso_type = XEN_NETIF_GSO_TYPE_NONE;
++ if (skb_is_gso(skb)) {
++ if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
++ gso_type = XEN_NETIF_GSO_TYPE_TCPV4;
++ else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
++ gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
++ }
+
+ if (*head && ((1 << gso_type) & vif->gso_mask))
+ vif->rx.req_cons++;
+@@ -436,19 +436,15 @@ static int xenvif_gop_skb(struct sk_buff *skb,
+ int head = 1;
+ int old_meta_prod;
+ int gso_type;
+- int gso_size;
+
+ old_meta_prod = npo->meta_prod;
+
+- if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) {
+- gso_type = XEN_NETIF_GSO_TYPE_TCPV4;
+- gso_size = skb_shinfo(skb)->gso_size;
+- } else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
+- gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
+- gso_size = skb_shinfo(skb)->gso_size;
+- } else {
+- gso_type = XEN_NETIF_GSO_TYPE_NONE;
+- gso_size = 0;
++ gso_type = XEN_NETIF_GSO_TYPE_NONE;
++ if (skb_is_gso(skb)) {
++ if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
++ gso_type = XEN_NETIF_GSO_TYPE_TCPV4;
++ else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
++ gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
+ }
+
+ /* Set up a GSO prefix descriptor, if necessary */
+@@ -456,7 +452,7 @@ static int xenvif_gop_skb(struct sk_buff *skb,
+ req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
+ meta = npo->meta + npo->meta_prod++;
+ meta->gso_type = gso_type;
+- meta->gso_size = gso_size;
++ meta->gso_size = skb_shinfo(skb)->gso_size;
+ meta->size = 0;
+ meta->id = req->id;
+ }
+@@ -466,7 +462,7 @@ static int xenvif_gop_skb(struct sk_buff *skb,
+
+ if ((1 << gso_type) & vif->gso_mask) {
+ meta->gso_type = gso_type;
+- meta->gso_size = gso_size;
++ meta->gso_size = skb_shinfo(skb)->gso_size;
+ } else {
+ meta->gso_type = XEN_NETIF_GSO_TYPE_NONE;
+ meta->gso_size = 0;
+@@ -756,7 +752,8 @@ static void xenvif_tx_err(struct xenvif *vif,
+ static void xenvif_fatal_tx_err(struct xenvif *vif)
+ {
+ netdev_err(vif->dev, "fatal error; disabling device\n");
+- xenvif_carrier_off(vif);
++ vif->disabled = true;
++ xenvif_kick_thread(vif);
+ }
+
+ static int xenvif_count_requests(struct xenvif *vif,
+@@ -1483,7 +1480,7 @@ static unsigned xenvif_tx_build_gops(struct xenvif *vif, int budget)
+ vif->tx.sring->req_prod, vif->tx.req_cons,
+ XEN_NETIF_TX_RING_SIZE);
+ xenvif_fatal_tx_err(vif);
+- continue;
++ break;
+ }
+
+ work_to_do = RING_HAS_UNCONSUMED_REQUESTS(&vif->tx);
+@@ -1877,7 +1874,18 @@ int xenvif_kthread(void *data)
+ while (!kthread_should_stop()) {
+ wait_event_interruptible(vif->wq,
+ rx_work_todo(vif) ||
++ vif->disabled ||
+ kthread_should_stop());
++
++ /* This frontend is found to be rogue, disable it in
++ * kthread context. Currently this is only set when
++ * netback finds out frontend sends malformed packet,
++ * but we cannot disable the interface in softirq
++ * context so we defer it here.
++ */
++ if (unlikely(vif->disabled && netif_carrier_ok(vif->dev)))
++ xenvif_carrier_off(vif);
++
+ if (kthread_should_stop())
+ break;
+
+diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
+index b12176f2013c..5264d839474a 100644
+--- a/drivers/vhost/net.c
++++ b/drivers/vhost/net.c
+@@ -501,9 +501,13 @@ static int get_rx_bufs(struct vhost_virtqueue *vq,
+ r = -ENOBUFS;
+ goto err;
+ }
+- d = vhost_get_vq_desc(vq->dev, vq, vq->iov + seg,
++ r = vhost_get_vq_desc(vq->dev, vq, vq->iov + seg,
+ ARRAY_SIZE(vq->iov) - seg, &out,
+ &in, log, log_num);
++ if (unlikely(r < 0))
++ goto err;
++
++ d = r;
+ if (d == vq->num) {
+ r = 0;
+ goto err;
+@@ -528,6 +532,12 @@ static int get_rx_bufs(struct vhost_virtqueue *vq,
+ *iovcount = seg;
+ if (unlikely(log))
+ *log_num = nlogs;
++
++ /* Detect overrun */
++ if (unlikely(datalen > 0)) {
++ r = UIO_MAXIOV + 1;
++ goto err;
++ }
+ return headcount;
+ err:
+ vhost_discard_vq_desc(vq, headcount);
+@@ -583,6 +593,14 @@ static void handle_rx(struct vhost_net *net)
+ /* On error, stop handling until the next kick. */
+ if (unlikely(headcount < 0))
+ break;
++ /* On overrun, truncate and discard */
++ if (unlikely(headcount > UIO_MAXIOV)) {
++ msg.msg_iovlen = 1;
++ err = sock->ops->recvmsg(NULL, sock, &msg,
++ 1, MSG_DONTWAIT | MSG_TRUNC);
++ pr_debug("Discarded rx packet: len %zd\n", sock_len);
++ continue;
++ }
+ /* OK, now we need to know about added descriptors. */
+ if (!headcount) {
+ if (unlikely(vhost_enable_notify(&net->dev, vq))) {
+diff --git a/include/linux/futex.h b/include/linux/futex.h
+index b0d95cac826e..6435f46d6e13 100644
+--- a/include/linux/futex.h
++++ b/include/linux/futex.h
+@@ -55,7 +55,11 @@ union futex_key {
+ #ifdef CONFIG_FUTEX
+ extern void exit_robust_list(struct task_struct *curr);
+ extern void exit_pi_state_list(struct task_struct *curr);
++#ifdef CONFIG_HAVE_FUTEX_CMPXCHG
++#define futex_cmpxchg_enabled 1
++#else
+ extern int futex_cmpxchg_enabled;
++#endif
+ #else
+ static inline void exit_robust_list(struct task_struct *curr)
+ {
+diff --git a/include/linux/usb/cdc_ncm.h b/include/linux/usb/cdc_ncm.h
+index c3fa80745996..2c14d9cdd57a 100644
+--- a/include/linux/usb/cdc_ncm.h
++++ b/include/linux/usb/cdc_ncm.h
+@@ -88,6 +88,7 @@
+ #define cdc_ncm_data_intf_is_mbim(x) ((x)->desc.bInterfaceProtocol == USB_CDC_MBIM_PROTO_NTB)
+
+ struct cdc_ncm_ctx {
++ struct usb_cdc_ncm_ntb_parameters ncm_parm;
+ struct hrtimer tx_timer;
+ struct tasklet_struct bh;
+
+diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h
+index e303eef94dd5..0662e98fef72 100644
+--- a/include/linux/usb/usbnet.h
++++ b/include/linux/usb/usbnet.h
+@@ -30,7 +30,7 @@ struct usbnet {
+ struct driver_info *driver_info;
+ const char *driver_name;
+ void *driver_priv;
+- wait_queue_head_t *wait;
++ wait_queue_head_t wait;
+ struct mutex phy_mutex;
+ unsigned char suspend_count;
+ unsigned char pkt_cnt, pkt_err;
+diff --git a/include/net/sock.h b/include/net/sock.h
+index 2ef3c3eca47a..a2b3d4e0cd64 100644
+--- a/include/net/sock.h
++++ b/include/net/sock.h
+@@ -1474,6 +1474,11 @@ static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb)
+ */
+ #define sock_owned_by_user(sk) ((sk)->sk_lock.owned)
+
++static inline void sock_release_ownership(struct sock *sk)
++{
++ sk->sk_lock.owned = 0;
++}
++
+ /*
+ * Macro so as to not evaluate some arguments when
+ * lockdep is not enabled.
+diff --git a/include/net/tcp.h b/include/net/tcp.h
+index 9250d6292059..197b02045721 100644
+--- a/include/net/tcp.h
++++ b/include/net/tcp.h
+@@ -480,20 +480,21 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
+ #ifdef CONFIG_SYN_COOKIES
+ #include <linux/ktime.h>
+
+-/* Syncookies use a monotonic timer which increments every 64 seconds.
++/* Syncookies use a monotonic timer which increments every 60 seconds.
+ * This counter is used both as a hash input and partially encoded into
+ * the cookie value. A cookie is only validated further if the delta
+ * between the current counter value and the encoded one is less than this,
+- * i.e. a sent cookie is valid only at most for 128 seconds (or less if
++ * i.e. a sent cookie is valid only at most for 2*60 seconds (or less if
+ * the counter advances immediately after a cookie is generated).
+ */
+ #define MAX_SYNCOOKIE_AGE 2
+
+ static inline u32 tcp_cookie_time(void)
+ {
+- struct timespec now;
+- getnstimeofday(&now);
+- return now.tv_sec >> 6; /* 64 seconds granularity */
++ u64 val = get_jiffies_64();
++
++ do_div(val, 60 * HZ);
++ return val;
+ }
+
+ u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th,
+diff --git a/init/Kconfig b/init/Kconfig
+index 4e5d96ab2034..66e6759e4436 100644
+--- a/init/Kconfig
++++ b/init/Kconfig
+@@ -1399,6 +1399,13 @@ config FUTEX
+ support for "fast userspace mutexes". The resulting kernel may not
+ run glibc-based applications correctly.
+
++config HAVE_FUTEX_CMPXCHG
++ bool
++ help
++ Architectures should select this if futex_atomic_cmpxchg_inatomic()
++ is implemented and always working. This removes a couple of runtime
++ checks.
++
+ config EPOLL
+ bool "Enable eventpoll support" if EXPERT
+ default y
+diff --git a/kernel/futex.c b/kernel/futex.c
+index f6ff0191ecf7..0d74e3aecb0b 100644
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -68,7 +68,9 @@
+
+ #include "locking/rtmutex_common.h"
+
++#ifndef CONFIG_HAVE_FUTEX_CMPXCHG
+ int __read_mostly futex_cmpxchg_enabled;
++#endif
+
+ #define FUTEX_HASHBITS (CONFIG_BASE_SMALL ? 4 : 8)
+
+@@ -2731,10 +2733,10 @@ SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
+ return do_futex(uaddr, op, val, tp, uaddr2, val2, val3);
+ }
+
+-static int __init futex_init(void)
++static void __init futex_detect_cmpxchg(void)
+ {
++#ifndef CONFIG_HAVE_FUTEX_CMPXCHG
+ u32 curval;
+- int i;
+
+ /*
+ * This will fail and we want it. Some arch implementations do
+@@ -2748,6 +2750,14 @@ static int __init futex_init(void)
+ */
+ if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
+ futex_cmpxchg_enabled = 1;
++#endif
++}
++
++static int __init futex_init(void)
++{
++ int i;
++
++ futex_detect_cmpxchg();
+
+ for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
+ plist_head_init(&futex_queues[i].chain);
+diff --git a/lib/nlattr.c b/lib/nlattr.c
+index 18eca7809b08..fc6754720ced 100644
+--- a/lib/nlattr.c
++++ b/lib/nlattr.c
+@@ -303,9 +303,15 @@ int nla_memcmp(const struct nlattr *nla, const void *data,
+ */
+ int nla_strcmp(const struct nlattr *nla, const char *str)
+ {
+- int len = strlen(str) + 1;
+- int d = nla_len(nla) - len;
++ int len = strlen(str);
++ char *buf = nla_data(nla);
++ int attrlen = nla_len(nla);
++ int d;
+
++ if (attrlen > 0 && buf[attrlen - 1] == '\0')
++ attrlen--;
++
++ d = attrlen - len;
+ if (d == 0)
+ d = memcmp(nla_data(nla), str, len);
+
+diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
+index b3d17d1c49c3..9a87f5aecd1f 100644
+--- a/net/8021q/vlan.c
++++ b/net/8021q/vlan.c
+@@ -307,9 +307,11 @@ static void vlan_sync_address(struct net_device *dev,
+ static void vlan_transfer_features(struct net_device *dev,
+ struct net_device *vlandev)
+ {
++ struct vlan_dev_priv *vlan = vlan_dev_priv(vlandev);
++
+ vlandev->gso_max_size = dev->gso_max_size;
+
+- if (dev->features & NETIF_F_HW_VLAN_CTAG_TX)
++ if (vlan_hw_offload_capable(dev->features, vlan->vlan_proto))
+ vlandev->hard_header_len = dev->hard_header_len;
+ else
+ vlandev->hard_header_len = dev->hard_header_len + VLAN_HLEN;
+diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
+index 47c908f1f626..b1c42410fe30 100644
+--- a/net/8021q/vlan_dev.c
++++ b/net/8021q/vlan_dev.c
+@@ -538,6 +538,9 @@ static int vlan_passthru_hard_header(struct sk_buff *skb, struct net_device *dev
+ struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
+ struct net_device *real_dev = vlan->real_dev;
+
++ if (saddr == NULL)
++ saddr = dev->dev_addr;
++
+ return dev_hard_header(skb, real_dev, type, daddr, saddr, len);
+ }
+
+@@ -589,7 +592,8 @@ static int vlan_dev_init(struct net_device *dev)
+ #endif
+
+ dev->needed_headroom = real_dev->needed_headroom;
+- if (real_dev->features & NETIF_F_HW_VLAN_CTAG_TX) {
++ if (vlan_hw_offload_capable(real_dev->features,
++ vlan_dev_priv(dev)->vlan_proto)) {
+ dev->header_ops = &vlan_passthru_header_ops;
+ dev->hard_header_len = real_dev->hard_header_len;
+ } else {
+diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
+index ef66365b7354..93067ecdb9a2 100644
+--- a/net/bridge/br_multicast.c
++++ b/net/bridge/br_multicast.c
+@@ -1127,9 +1127,10 @@ static void br_multicast_query_received(struct net_bridge *br,
+ struct net_bridge_port *port,
+ struct bridge_mcast_querier *querier,
+ int saddr,
++ bool is_general_query,
+ unsigned long max_delay)
+ {
+- if (saddr)
++ if (saddr && is_general_query)
+ br_multicast_update_querier_timer(br, querier, max_delay);
+ else if (timer_pending(&querier->timer))
+ return;
+@@ -1181,8 +1182,16 @@ static int br_ip4_multicast_query(struct net_bridge *br,
+ IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE) : 1;
+ }
+
++ /* RFC2236+RFC3376 (IGMPv2+IGMPv3) require the multicast link layer
++ * all-systems destination addresses (224.0.0.1) for general queries
++ */
++ if (!group && iph->daddr != htonl(INADDR_ALLHOSTS_GROUP)) {
++ err = -EINVAL;
++ goto out;
++ }
++
+ br_multicast_query_received(br, port, &br->ip4_querier, !!iph->saddr,
+- max_delay);
++ !group, max_delay);
+
+ if (!group)
+ goto out;
+@@ -1228,6 +1237,7 @@ static int br_ip6_multicast_query(struct net_bridge *br,
+ unsigned long max_delay;
+ unsigned long now = jiffies;
+ const struct in6_addr *group = NULL;
++ bool is_general_query;
+ int err = 0;
+
+ spin_lock(&br->multicast_lock);
+@@ -1235,6 +1245,12 @@ static int br_ip6_multicast_query(struct net_bridge *br,
+ (port && port->state == BR_STATE_DISABLED))
+ goto out;
+
++ /* RFC2710+RFC3810 (MLDv1+MLDv2) require link-local source addresses */
++ if (!(ipv6_addr_type(&ip6h->saddr) & IPV6_ADDR_LINKLOCAL)) {
++ err = -EINVAL;
++ goto out;
++ }
++
+ if (skb->len == sizeof(*mld)) {
+ if (!pskb_may_pull(skb, sizeof(*mld))) {
+ err = -EINVAL;
+@@ -1256,8 +1272,19 @@ static int br_ip6_multicast_query(struct net_bridge *br,
+ max_delay = max(msecs_to_jiffies(mldv2_mrc(mld2q)), 1UL);
+ }
+
++ is_general_query = group && ipv6_addr_any(group);
++
++ /* RFC2710+RFC3810 (MLDv1+MLDv2) require the multicast link layer
++ * all-nodes destination address (ff02::1) for general queries
++ */
++ if (is_general_query && !ipv6_addr_is_ll_all_nodes(&ip6h->daddr)) {
++ err = -EINVAL;
++ goto out;
++ }
++
+ br_multicast_query_received(br, port, &br->ip6_querier,
+- !ipv6_addr_any(&ip6h->saddr), max_delay);
++ !ipv6_addr_any(&ip6h->saddr),
++ is_general_query, max_delay);
+
+ if (!group)
+ goto out;
+diff --git a/net/core/netpoll.c b/net/core/netpoll.c
+index 81975f27e24a..9a46671bb5a2 100644
+--- a/net/core/netpoll.c
++++ b/net/core/netpoll.c
+@@ -742,7 +742,7 @@ static bool pkt_is_ns(struct sk_buff *skb)
+ struct nd_msg *msg;
+ struct ipv6hdr *hdr;
+
+- if (skb->protocol != htons(ETH_P_ARP))
++ if (skb->protocol != htons(ETH_P_IPV6))
+ return false;
+ if (!pskb_may_pull(skb, sizeof(struct ipv6hdr) + sizeof(struct nd_msg)))
+ return false;
+diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
+index cf67144d3e3c..cc706c981dee 100644
+--- a/net/core/rtnetlink.c
++++ b/net/core/rtnetlink.c
+@@ -2014,12 +2014,13 @@ EXPORT_SYMBOL(rtmsg_ifinfo);
+ static int nlmsg_populate_fdb_fill(struct sk_buff *skb,
+ struct net_device *dev,
+ u8 *addr, u32 pid, u32 seq,
+- int type, unsigned int flags)
++ int type, unsigned int flags,
++ int nlflags)
+ {
+ struct nlmsghdr *nlh;
+ struct ndmsg *ndm;
+
+- nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), NLM_F_MULTI);
++ nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), nlflags);
+ if (!nlh)
+ return -EMSGSIZE;
+
+@@ -2057,7 +2058,7 @@ static void rtnl_fdb_notify(struct net_device *dev, u8 *addr, int type)
+ if (!skb)
+ goto errout;
+
+- err = nlmsg_populate_fdb_fill(skb, dev, addr, 0, 0, type, NTF_SELF);
++ err = nlmsg_populate_fdb_fill(skb, dev, addr, 0, 0, type, NTF_SELF, 0);
+ if (err < 0) {
+ kfree_skb(skb);
+ goto errout;
+@@ -2282,7 +2283,8 @@ static int nlmsg_populate_fdb(struct sk_buff *skb,
+
+ err = nlmsg_populate_fdb_fill(skb, dev, ha->addr,
+ portid, seq,
+- RTM_NEWNEIGH, NTF_SELF);
++ RTM_NEWNEIGH, NTF_SELF,
++ NLM_F_MULTI);
+ if (err < 0)
+ return err;
+ skip:
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index deffb3776f93..9a9898e76c92 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -2756,81 +2756,84 @@ EXPORT_SYMBOL_GPL(skb_pull_rcsum);
+
+ /**
+ * skb_segment - Perform protocol segmentation on skb.
+- * @skb: buffer to segment
++ * @head_skb: buffer to segment
+ * @features: features for the output path (see dev->features)
+ *
+ * This function performs segmentation on the given skb. It returns
+ * a pointer to the first in a list of new skbs for the segments.
+ * In case of error it returns ERR_PTR(err).
+ */
+-struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features)
++struct sk_buff *skb_segment(struct sk_buff *head_skb,
++ netdev_features_t features)
+ {
+ struct sk_buff *segs = NULL;
+ struct sk_buff *tail = NULL;
+- struct sk_buff *fskb = skb_shinfo(skb)->frag_list;
+- skb_frag_t *skb_frag = skb_shinfo(skb)->frags;
+- unsigned int mss = skb_shinfo(skb)->gso_size;
+- unsigned int doffset = skb->data - skb_mac_header(skb);
++ struct sk_buff *list_skb = skb_shinfo(head_skb)->frag_list;
++ skb_frag_t *frag = skb_shinfo(head_skb)->frags;
++ unsigned int mss = skb_shinfo(head_skb)->gso_size;
++ unsigned int doffset = head_skb->data - skb_mac_header(head_skb);
++ struct sk_buff *frag_skb = head_skb;
+ unsigned int offset = doffset;
+- unsigned int tnl_hlen = skb_tnl_header_len(skb);
++ unsigned int tnl_hlen = skb_tnl_header_len(head_skb);
+ unsigned int headroom;
+ unsigned int len;
+ __be16 proto;
+ bool csum;
+ int sg = !!(features & NETIF_F_SG);
+- int nfrags = skb_shinfo(skb)->nr_frags;
++ int nfrags = skb_shinfo(head_skb)->nr_frags;
+ int err = -ENOMEM;
+ int i = 0;
+ int pos;
+
+- proto = skb_network_protocol(skb);
++ proto = skb_network_protocol(head_skb);
+ if (unlikely(!proto))
+ return ERR_PTR(-EINVAL);
+
+ csum = !!can_checksum_protocol(features, proto);
+- __skb_push(skb, doffset);
+- headroom = skb_headroom(skb);
+- pos = skb_headlen(skb);
++ __skb_push(head_skb, doffset);
++ headroom = skb_headroom(head_skb);
++ pos = skb_headlen(head_skb);
+
+ do {
+ struct sk_buff *nskb;
+- skb_frag_t *frag;
++ skb_frag_t *nskb_frag;
+ int hsize;
+ int size;
+
+- len = skb->len - offset;
++ len = head_skb->len - offset;
+ if (len > mss)
+ len = mss;
+
+- hsize = skb_headlen(skb) - offset;
++ hsize = skb_headlen(head_skb) - offset;
+ if (hsize < 0)
+ hsize = 0;
+ if (hsize > len || !sg)
+ hsize = len;
+
+- if (!hsize && i >= nfrags && skb_headlen(fskb) &&
+- (skb_headlen(fskb) == len || sg)) {
+- BUG_ON(skb_headlen(fskb) > len);
++ if (!hsize && i >= nfrags && skb_headlen(list_skb) &&
++ (skb_headlen(list_skb) == len || sg)) {
++ BUG_ON(skb_headlen(list_skb) > len);
+
+ i = 0;
+- nfrags = skb_shinfo(fskb)->nr_frags;
+- skb_frag = skb_shinfo(fskb)->frags;
+- pos += skb_headlen(fskb);
++ nfrags = skb_shinfo(list_skb)->nr_frags;
++ frag = skb_shinfo(list_skb)->frags;
++ frag_skb = list_skb;
++ pos += skb_headlen(list_skb);
+
+ while (pos < offset + len) {
+ BUG_ON(i >= nfrags);
+
+- size = skb_frag_size(skb_frag);
++ size = skb_frag_size(frag);
+ if (pos + size > offset + len)
+ break;
+
+ i++;
+ pos += size;
+- skb_frag++;
++ frag++;
+ }
+
+- nskb = skb_clone(fskb, GFP_ATOMIC);
+- fskb = fskb->next;
++ nskb = skb_clone(list_skb, GFP_ATOMIC);
++ list_skb = list_skb->next;
+
+ if (unlikely(!nskb))
+ goto err;
+@@ -2851,7 +2854,7 @@ struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features)
+ __skb_push(nskb, doffset);
+ } else {
+ nskb = __alloc_skb(hsize + doffset + headroom,
+- GFP_ATOMIC, skb_alloc_rx_flag(skb),
++ GFP_ATOMIC, skb_alloc_rx_flag(head_skb),
+ NUMA_NO_NODE);
+
+ if (unlikely(!nskb))
+@@ -2867,12 +2870,12 @@ struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features)
+ segs = nskb;
+ tail = nskb;
+
+- __copy_skb_header(nskb, skb);
+- nskb->mac_len = skb->mac_len;
++ __copy_skb_header(nskb, head_skb);
++ nskb->mac_len = head_skb->mac_len;
+
+ skb_headers_offset_update(nskb, skb_headroom(nskb) - headroom);
+
+- skb_copy_from_linear_data_offset(skb, -tnl_hlen,
++ skb_copy_from_linear_data_offset(head_skb, -tnl_hlen,
+ nskb->data - tnl_hlen,
+ doffset + tnl_hlen);
+
+@@ -2881,30 +2884,32 @@ struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features)
+
+ if (!sg) {
+ nskb->ip_summed = CHECKSUM_NONE;
+- nskb->csum = skb_copy_and_csum_bits(skb, offset,
++ nskb->csum = skb_copy_and_csum_bits(head_skb, offset,
+ skb_put(nskb, len),
+ len, 0);
+ continue;
+ }
+
+- frag = skb_shinfo(nskb)->frags;
++ nskb_frag = skb_shinfo(nskb)->frags;
+
+- skb_copy_from_linear_data_offset(skb, offset,
++ skb_copy_from_linear_data_offset(head_skb, offset,
+ skb_put(nskb, hsize), hsize);
+
+- skb_shinfo(nskb)->tx_flags = skb_shinfo(skb)->tx_flags & SKBTX_SHARED_FRAG;
++ skb_shinfo(nskb)->tx_flags = skb_shinfo(head_skb)->tx_flags &
++ SKBTX_SHARED_FRAG;
+
+ while (pos < offset + len) {
+ if (i >= nfrags) {
+- BUG_ON(skb_headlen(fskb));
++ BUG_ON(skb_headlen(list_skb));
+
+ i = 0;
+- nfrags = skb_shinfo(fskb)->nr_frags;
+- skb_frag = skb_shinfo(fskb)->frags;
++ nfrags = skb_shinfo(list_skb)->nr_frags;
++ frag = skb_shinfo(list_skb)->frags;
++ frag_skb = list_skb;
+
+ BUG_ON(!nfrags);
+
+- fskb = fskb->next;
++ list_skb = list_skb->next;
+ }
+
+ if (unlikely(skb_shinfo(nskb)->nr_frags >=
+@@ -2915,27 +2920,30 @@ struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features)
+ goto err;
+ }
+
+- *frag = *skb_frag;
+- __skb_frag_ref(frag);
+- size = skb_frag_size(frag);
++ if (unlikely(skb_orphan_frags(frag_skb, GFP_ATOMIC)))
++ goto err;
++
++ *nskb_frag = *frag;
++ __skb_frag_ref(nskb_frag);
++ size = skb_frag_size(nskb_frag);
+
+ if (pos < offset) {
+- frag->page_offset += offset - pos;
+- skb_frag_size_sub(frag, offset - pos);
++ nskb_frag->page_offset += offset - pos;
++ skb_frag_size_sub(nskb_frag, offset - pos);
+ }
+
+ skb_shinfo(nskb)->nr_frags++;
+
+ if (pos + size <= offset + len) {
+ i++;
+- skb_frag++;
++ frag++;
+ pos += size;
+ } else {
+- skb_frag_size_sub(frag, pos + size - (offset + len));
++ skb_frag_size_sub(nskb_frag, pos + size - (offset + len));
+ goto skip_fraglist;
+ }
+
+- frag++;
++ nskb_frag++;
+ }
+
+ skip_fraglist:
+@@ -2949,15 +2957,12 @@ perform_csum_check:
+ nskb->len - doffset, 0);
+ nskb->ip_summed = CHECKSUM_NONE;
+ }
+- } while ((offset += len) < skb->len);
++ } while ((offset += len) < head_skb->len);
+
+ return segs;
+
+ err:
+- while ((skb = segs)) {
+- segs = skb->next;
+- kfree_skb(skb);
+- }
++ kfree_skb_list(segs);
+ return ERR_PTR(err);
+ }
+ EXPORT_SYMBOL_GPL(skb_segment);
+diff --git a/net/core/sock.c b/net/core/sock.c
+index fbc5cfbc2580..50db7336419c 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -2384,10 +2384,13 @@ void release_sock(struct sock *sk)
+ if (sk->sk_backlog.tail)
+ __release_sock(sk);
+
++ /* Warning : release_cb() might need to release sk ownership,
++ * ie call sock_release_ownership(sk) before us.
++ */
+ if (sk->sk_prot->release_cb)
+ sk->sk_prot->release_cb(sk);
+
+- sk->sk_lock.owned = 0;
++ sock_release_ownership(sk);
+ if (waitqueue_active(&sk->sk_lock.wq))
+ wake_up(&sk->sk_lock.wq);
+ spin_unlock_bh(&sk->sk_lock.slock);
+diff --git a/net/ipv4/gre_demux.c b/net/ipv4/gre_demux.c
+index 5893e99e8299..5147dbe8a363 100644
+--- a/net/ipv4/gre_demux.c
++++ b/net/ipv4/gre_demux.c
+@@ -182,6 +182,14 @@ static int gre_cisco_rcv(struct sk_buff *skb)
+ int i;
+ bool csum_err = false;
+
++#ifdef CONFIG_NET_IPGRE_BROADCAST
++ if (ipv4_is_multicast(ip_hdr(skb)->daddr)) {
++ /* Looped back packet, drop it! */
++ if (rt_is_output_route(skb_rtable(skb)))
++ goto drop;
++ }
++#endif
++
+ if (parse_gre_header(skb, &tpi, &csum_err) < 0)
+ goto drop;
+
+diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
+index bb075fc9a14f..3b01959bf4bb 100644
+--- a/net/ipv4/inet_fragment.c
++++ b/net/ipv4/inet_fragment.c
+@@ -208,7 +208,7 @@ int inet_frag_evictor(struct netns_frags *nf, struct inet_frags *f, bool force)
+ }
+
+ work = frag_mem_limit(nf) - nf->low_thresh;
+- while (work > 0) {
++ while (work > 0 || force) {
+ spin_lock(&nf->lru_lock);
+
+ if (list_empty(&nf->lru_list)) {
+@@ -278,9 +278,10 @@ static struct inet_frag_queue *inet_frag_intern(struct netns_frags *nf,
+
+ atomic_inc(&qp->refcnt);
+ hlist_add_head(&qp->list, &hb->chain);
++ inet_frag_lru_add(nf, qp);
+ spin_unlock(&hb->chain_lock);
+ read_unlock(&f->lock);
+- inet_frag_lru_add(nf, qp);
++
+ return qp;
+ }
+
+diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
+index 471c8c1f3b67..1febfa10898d 100644
+--- a/net/ipv4/ip_tunnel.c
++++ b/net/ipv4/ip_tunnel.c
+@@ -411,9 +411,6 @@ int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
+
+ #ifdef CONFIG_NET_IPGRE_BROADCAST
+ if (ipv4_is_multicast(iph->daddr)) {
+- /* Looped back packet, drop it! */
+- if (rt_is_output_route(skb_rtable(skb)))
+- goto drop;
+ tunnel->dev->stats.multicast++;
+ skb->pkt_type = PACKET_BROADCAST;
+ }
+diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
+index f9a146627f35..42ffbc8d65c6 100644
+--- a/net/ipv4/ip_tunnel_core.c
++++ b/net/ipv4/ip_tunnel_core.c
+@@ -109,6 +109,7 @@ int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto)
+ secpath_reset(skb);
+ if (!skb->l4_rxhash)
+ skb->rxhash = 0;
++ skb_dst_drop(skb);
+ skb->vlan_tci = 0;
+ skb_set_queue_mapping(skb, 0);
+ skb->pkt_type = PACKET_HOST;
+diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
+index 1672409f5ba5..6fbf3393d842 100644
+--- a/net/ipv4/ipmr.c
++++ b/net/ipv4/ipmr.c
+@@ -2253,13 +2253,14 @@ int ipmr_get_route(struct net *net, struct sk_buff *skb,
+ }
+
+ static int ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
+- u32 portid, u32 seq, struct mfc_cache *c, int cmd)
++ u32 portid, u32 seq, struct mfc_cache *c, int cmd,
++ int flags)
+ {
+ struct nlmsghdr *nlh;
+ struct rtmsg *rtm;
+ int err;
+
+- nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), NLM_F_MULTI);
++ nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), flags);
+ if (nlh == NULL)
+ return -EMSGSIZE;
+
+@@ -2327,7 +2328,7 @@ static void mroute_netlink_event(struct mr_table *mrt, struct mfc_cache *mfc,
+ if (skb == NULL)
+ goto errout;
+
+- err = ipmr_fill_mroute(mrt, skb, 0, 0, mfc, cmd);
++ err = ipmr_fill_mroute(mrt, skb, 0, 0, mfc, cmd, 0);
+ if (err < 0)
+ goto errout;
+
+@@ -2366,7 +2367,8 @@ static int ipmr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
+ if (ipmr_fill_mroute(mrt, skb,
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq,
+- mfc, RTM_NEWROUTE) < 0)
++ mfc, RTM_NEWROUTE,
++ NLM_F_MULTI) < 0)
+ goto done;
+ next_entry:
+ e++;
+@@ -2380,7 +2382,8 @@ next_entry:
+ if (ipmr_fill_mroute(mrt, skb,
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq,
+- mfc, RTM_NEWROUTE) < 0) {
++ mfc, RTM_NEWROUTE,
++ NLM_F_MULTI) < 0) {
+ spin_unlock_bh(&mfc_unres_lock);
+ goto done;
+ }
+diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
+index 067213924751..1d7b9ddb94d0 100644
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -2629,7 +2629,7 @@ static void get_timewait4_sock(const struct inet_timewait_sock *tw,
+ {
+ __be32 dest, src;
+ __u16 destp, srcp;
+- long delta = tw->tw_ttd - jiffies;
++ s32 delta = tw->tw_ttd - inet_tw_time_stamp();
+
+ dest = tw->tw_daddr;
+ src = tw->tw_rcv_saddr;
+diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
+index 7ea0aa20b562..99737ed21653 100644
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -765,6 +765,17 @@ void tcp_release_cb(struct sock *sk)
+ if (flags & (1UL << TCP_TSQ_DEFERRED))
+ tcp_tsq_handler(sk);
+
++ /* Here begins the tricky part :
++ * We are called from release_sock() with :
++ * 1) BH disabled
++ * 2) sk_lock.slock spinlock held
++ * 3) socket owned by us (sk->sk_lock.owned == 1)
++ *
++ * But following code is meant to be called from BH handlers,
++ * so we should keep BH disabled, but early release socket ownership
++ */
++ sock_release_ownership(sk);
++
+ if (flags & (1UL << TCP_WRITE_TIMER_DEFERRED)) {
+ tcp_write_timer_handler(sk);
+ __sock_put(sk);
+diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
+index 9c05d77a20d4..0e8ae698ccce 100644
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -1078,8 +1078,11 @@ retry:
+ * Lifetime is greater than REGEN_ADVANCE time units. In particular,
+ * an implementation must not create a temporary address with a zero
+ * Preferred Lifetime.
++ * Use age calculation as in addrconf_verify to avoid unnecessary
++ * temporary addresses being generated.
+ */
+- if (tmp_prefered_lft <= regen_advance) {
++ age = (now - tmp_tstamp + ADDRCONF_TIMER_FUZZ_MINUS) / HZ;
++ if (tmp_prefered_lft <= regen_advance + age) {
+ in6_ifa_put(ifp);
+ in6_dev_put(idev);
+ ret = -1;
+diff --git a/net/ipv6/exthdrs_offload.c b/net/ipv6/exthdrs_offload.c
+index cf77f3abfd06..447a7fbd1bb6 100644
+--- a/net/ipv6/exthdrs_offload.c
++++ b/net/ipv6/exthdrs_offload.c
+@@ -25,11 +25,11 @@ int __init ipv6_exthdrs_offload_init(void)
+ int ret;
+
+ ret = inet6_add_offload(&rthdr_offload, IPPROTO_ROUTING);
+- if (!ret)
++ if (ret)
+ goto out;
+
+ ret = inet6_add_offload(&dstopt_offload, IPPROTO_DSTOPTS);
+- if (!ret)
++ if (ret)
+ goto out_rt;
+
+ out:
+diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
+index eef8d945b362..e2c9ff840f63 100644
+--- a/net/ipv6/icmp.c
++++ b/net/ipv6/icmp.c
+@@ -516,7 +516,7 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
+ np->tclass, NULL, &fl6, (struct rt6_info *)dst,
+ MSG_DONTWAIT, np->dontfrag);
+ if (err) {
+- ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTERRORS);
++ ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTERRORS);
+ ip6_flush_pending_frames(sk);
+ } else {
+ err = icmpv6_push_pending_frames(sk, &fl6, &tmp_hdr,
+diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
+index 2e5667d557fe..345742fd7d1c 100644
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -1088,21 +1088,19 @@ static void ip6_append_data_mtu(unsigned int *mtu,
+ unsigned int fragheaderlen,
+ struct sk_buff *skb,
+ struct rt6_info *rt,
+- bool pmtuprobe)
++ unsigned int orig_mtu)
+ {
+ if (!(rt->dst.flags & DST_XFRM_TUNNEL)) {
+ if (skb == NULL) {
+ /* first fragment, reserve header_len */
+- *mtu = *mtu - rt->dst.header_len;
++ *mtu = orig_mtu - rt->dst.header_len;
+
+ } else {
+ /*
+ * this fragment is not first, the headers
+ * space is regarded as data space.
+ */
+- *mtu = min(*mtu, pmtuprobe ?
+- rt->dst.dev->mtu :
+- dst_mtu(rt->dst.path));
++ *mtu = orig_mtu;
+ }
+ *maxfraglen = ((*mtu - fragheaderlen) & ~7)
+ + fragheaderlen - sizeof(struct frag_hdr);
+@@ -1119,7 +1117,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
+ struct ipv6_pinfo *np = inet6_sk(sk);
+ struct inet_cork *cork;
+ struct sk_buff *skb, *skb_prev = NULL;
+- unsigned int maxfraglen, fragheaderlen, mtu;
++ unsigned int maxfraglen, fragheaderlen, mtu, orig_mtu;
+ int exthdrlen;
+ int dst_exthdrlen;
+ int hh_len;
+@@ -1201,6 +1199,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
+ dst_exthdrlen = 0;
+ mtu = cork->fragsize;
+ }
++ orig_mtu = mtu;
+
+ hh_len = LL_RESERVED_SPACE(rt->dst.dev);
+
+@@ -1298,8 +1297,7 @@ alloc_new_skb:
+ if (skb == NULL || skb_prev == NULL)
+ ip6_append_data_mtu(&mtu, &maxfraglen,
+ fragheaderlen, skb, rt,
+- np->pmtudisc ==
+- IPV6_PMTUDISC_PROBE);
++ orig_mtu);
+
+ skb_prev = skb;
+
+@@ -1555,8 +1553,8 @@ int ip6_push_pending_frames(struct sock *sk)
+ if (proto == IPPROTO_ICMPV6) {
+ struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
+
+- ICMP6MSGOUT_INC_STATS_BH(net, idev, icmp6_hdr(skb)->icmp6_type);
+- ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTMSGS);
++ ICMP6MSGOUT_INC_STATS(net, idev, icmp6_hdr(skb)->icmp6_type);
++ ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
+ }
+
+ err = ip6_local_out(skb);
+diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
+index 0eb4038a4d63..8737400af0a0 100644
+--- a/net/ipv6/ip6mr.c
++++ b/net/ipv6/ip6mr.c
+@@ -2349,13 +2349,14 @@ int ip6mr_get_route(struct net *net,
+ }
+
+ static int ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
+- u32 portid, u32 seq, struct mfc6_cache *c, int cmd)
++ u32 portid, u32 seq, struct mfc6_cache *c, int cmd,
++ int flags)
+ {
+ struct nlmsghdr *nlh;
+ struct rtmsg *rtm;
+ int err;
+
+- nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), NLM_F_MULTI);
++ nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), flags);
+ if (nlh == NULL)
+ return -EMSGSIZE;
+
+@@ -2423,7 +2424,7 @@ static void mr6_netlink_event(struct mr6_table *mrt, struct mfc6_cache *mfc,
+ if (skb == NULL)
+ goto errout;
+
+- err = ip6mr_fill_mroute(mrt, skb, 0, 0, mfc, cmd);
++ err = ip6mr_fill_mroute(mrt, skb, 0, 0, mfc, cmd, 0);
+ if (err < 0)
+ goto errout;
+
+@@ -2462,7 +2463,8 @@ static int ip6mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
+ if (ip6mr_fill_mroute(mrt, skb,
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq,
+- mfc, RTM_NEWROUTE) < 0)
++ mfc, RTM_NEWROUTE,
++ NLM_F_MULTI) < 0)
+ goto done;
+ next_entry:
+ e++;
+@@ -2476,7 +2478,8 @@ next_entry:
+ if (ip6mr_fill_mroute(mrt, skb,
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq,
+- mfc, RTM_NEWROUTE) < 0) {
++ mfc, RTM_NEWROUTE,
++ NLM_F_MULTI) < 0) {
+ spin_unlock_bh(&mfc_unres_lock);
+ goto done;
+ }
+diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
+index d18f9f903db6..d81abd5ba767 100644
+--- a/net/ipv6/mcast.c
++++ b/net/ipv6/mcast.c
+@@ -1620,11 +1620,12 @@ static void mld_sendpack(struct sk_buff *skb)
+ dst_output);
+ out:
+ if (!err) {
+- ICMP6MSGOUT_INC_STATS_BH(net, idev, ICMPV6_MLD2_REPORT);
+- ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTMSGS);
+- IP6_UPD_PO_STATS_BH(net, idev, IPSTATS_MIB_OUTMCAST, payload_len);
+- } else
+- IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_OUTDISCARDS);
++ ICMP6MSGOUT_INC_STATS(net, idev, ICMPV6_MLD2_REPORT);
++ ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
++ IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUTMCAST, payload_len);
++ } else {
++ IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
++ }
+
+ rcu_read_unlock();
+ return;
+diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
+index a83243c3d656..3a1f1f32779f 100644
+--- a/net/ipv6/ping.c
++++ b/net/ipv6/ping.c
+@@ -182,8 +182,8 @@ int ping_v6_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+ MSG_DONTWAIT, np->dontfrag);
+
+ if (err) {
+- ICMP6_INC_STATS_BH(sock_net(sk), rt->rt6i_idev,
+- ICMP6_MIB_OUTERRORS);
++ ICMP6_INC_STATS(sock_net(sk), rt->rt6i_idev,
++ ICMP6_MIB_OUTERRORS);
+ ip6_flush_pending_frames(sk);
+ } else {
+ err = icmpv6_push_pending_frames(sk, &fl6,
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index 4b4944c3e4c4..40b6e6910792 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -1495,7 +1495,7 @@ int ip6_route_add(struct fib6_config *cfg)
+ if (!table)
+ goto out;
+
+- rt = ip6_dst_alloc(net, NULL, DST_NOCOUNT, table);
++ rt = ip6_dst_alloc(net, NULL, (cfg->fc_flags & RTF_ADDRCONF) ? 0 : DST_NOCOUNT, table);
+
+ if (!rt) {
+ err = -ENOMEM;
+diff --git a/net/rds/iw.c b/net/rds/iw.c
+index 7826d46baa70..589935661d66 100644
+--- a/net/rds/iw.c
++++ b/net/rds/iw.c
+@@ -239,7 +239,8 @@ static int rds_iw_laddr_check(__be32 addr)
+ ret = rdma_bind_addr(cm_id, (struct sockaddr *)&sin);
+ /* due to this, we will claim to support IB devices unless we
+ check node_type. */
+- if (ret || cm_id->device->node_type != RDMA_NODE_RNIC)
++ if (ret || !cm_id->device ||
++ cm_id->device->node_type != RDMA_NODE_RNIC)
+ ret = -EADDRNOTAVAIL;
+
+ rdsdebug("addr %pI4 ret %d node type %d\n",
+diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
+index 95d843961907..990aa4946463 100644
+--- a/net/sched/sch_fq.c
++++ b/net/sched/sch_fq.c
+@@ -578,9 +578,11 @@ static void fq_rehash(struct fq_sched_data *q,
+ q->stat_gc_flows += fcnt;
+ }
+
+-static int fq_resize(struct fq_sched_data *q, u32 log)
++static int fq_resize(struct Qdisc *sch, u32 log)
+ {
++ struct fq_sched_data *q = qdisc_priv(sch);
+ struct rb_root *array;
++ void *old_fq_root;
+ u32 idx;
+
+ if (q->fq_root && log == q->fq_trees_log)
+@@ -593,13 +595,19 @@ static int fq_resize(struct fq_sched_data *q, u32 log)
+ for (idx = 0; idx < (1U << log); idx++)
+ array[idx] = RB_ROOT;
+
+- if (q->fq_root) {
+- fq_rehash(q, q->fq_root, q->fq_trees_log, array, log);
+- kfree(q->fq_root);
+- }
++ sch_tree_lock(sch);
++
++ old_fq_root = q->fq_root;
++ if (old_fq_root)
++ fq_rehash(q, old_fq_root, q->fq_trees_log, array, log);
++
+ q->fq_root = array;
+ q->fq_trees_log = log;
+
++ sch_tree_unlock(sch);
++
++ kfree(old_fq_root);
++
+ return 0;
+ }
+
+@@ -675,9 +683,11 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt)
+ q->flow_refill_delay = usecs_to_jiffies(usecs_delay);
+ }
+
+- if (!err)
+- err = fq_resize(q, fq_log);
+-
++ if (!err) {
++ sch_tree_unlock(sch);
++ err = fq_resize(sch, fq_log);
++ sch_tree_lock(sch);
++ }
+ while (sch->q.qlen > sch->limit) {
+ struct sk_buff *skb = fq_dequeue(sch);
+
+@@ -723,7 +733,7 @@ static int fq_init(struct Qdisc *sch, struct nlattr *opt)
+ if (opt)
+ err = fq_change(sch, opt);
+ else
+- err = fq_resize(q, q->fq_trees_log);
++ err = fq_resize(sch, q->fq_trees_log);
+
+ return err;
+ }
+diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
+index fe690320b1e4..6eee917b1276 100644
+--- a/net/sctp/sm_make_chunk.c
++++ b/net/sctp/sm_make_chunk.c
+@@ -1420,8 +1420,8 @@ static void sctp_chunk_destroy(struct sctp_chunk *chunk)
+ BUG_ON(!list_empty(&chunk->list));
+ list_del_init(&chunk->transmitted_list);
+
+- /* Free the chunk skb data and the SCTP_chunk stub itself. */
+- dev_kfree_skb(chunk->skb);
++ consume_skb(chunk->skb);
++ consume_skb(chunk->auth_chunk);
+
+ SCTP_DBG_OBJCNT_DEC(chunk);
+ kmem_cache_free(sctp_chunk_cachep, chunk);
+diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
+index af7be05ff9bf..9f9b1d8dbc87 100644
+--- a/net/sctp/sm_statefuns.c
++++ b/net/sctp/sm_statefuns.c
+@@ -761,7 +761,6 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(struct net *net,
+
+ /* Make sure that we and the peer are AUTH capable */
+ if (!net->sctp.auth_enable || !new_asoc->peer.auth_capable) {
+- kfree_skb(chunk->auth_chunk);
+ sctp_association_free(new_asoc);
+ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+ }
+@@ -776,10 +775,6 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(struct net *net,
+ auth.transport = chunk->transport;
+
+ ret = sctp_sf_authenticate(net, ep, new_asoc, type, &auth);
+-
+- /* We can now safely free the auth_chunk clone */
+- kfree_skb(chunk->auth_chunk);
+-
+ if (ret != SCTP_IERROR_NO_ERROR) {
+ sctp_association_free(new_asoc);
+ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+diff --git a/net/socket.c b/net/socket.c
+index e83c416708af..dc57dae20a9a 100644
+--- a/net/socket.c
++++ b/net/socket.c
+@@ -1972,6 +1972,10 @@ static int copy_msghdr_from_user(struct msghdr *kmsg,
+ {
+ if (copy_from_user(kmsg, umsg, sizeof(struct msghdr)))
+ return -EFAULT;
++
++ if (kmsg->msg_namelen < 0)
++ return -EINVAL;
++
+ if (kmsg->msg_namelen > sizeof(struct sockaddr_storage))
+ kmsg->msg_namelen = sizeof(struct sockaddr_storage);
+ return 0;
+diff --git a/net/tipc/config.c b/net/tipc/config.c
+index c301a9a592d8..5afe633114e0 100644
+--- a/net/tipc/config.c
++++ b/net/tipc/config.c
+@@ -376,7 +376,6 @@ static void cfg_conn_msg_event(int conid, struct sockaddr_tipc *addr,
+ struct tipc_cfg_msg_hdr *req_hdr;
+ struct tipc_cfg_msg_hdr *rep_hdr;
+ struct sk_buff *rep_buf;
+- int ret;
+
+ /* Validate configuration message header (ignore invalid message) */
+ req_hdr = (struct tipc_cfg_msg_hdr *)buf;
+@@ -398,12 +397,8 @@ static void cfg_conn_msg_event(int conid, struct sockaddr_tipc *addr,
+ memcpy(rep_hdr, req_hdr, sizeof(*rep_hdr));
+ rep_hdr->tcm_len = htonl(rep_buf->len);
+ rep_hdr->tcm_flags &= htons(~TCM_F_REQUEST);
+-
+- ret = tipc_conn_sendmsg(&cfgsrv, conid, addr, rep_buf->data,
+- rep_buf->len);
+- if (ret < 0)
+- pr_err("Sending cfg reply message failed, no memory\n");
+-
++ tipc_conn_sendmsg(&cfgsrv, conid, addr, rep_buf->data,
++ rep_buf->len);
+ kfree_skb(rep_buf);
+ }
+ }
+diff --git a/net/tipc/handler.c b/net/tipc/handler.c
+index e4bc8a296744..1fabf160501f 100644
+--- a/net/tipc/handler.c
++++ b/net/tipc/handler.c
+@@ -58,7 +58,6 @@ unsigned int tipc_k_signal(Handler routine, unsigned long argument)
+
+ spin_lock_bh(&qitem_lock);
+ if (!handler_enabled) {
+- pr_err("Signal request ignored by handler\n");
+ spin_unlock_bh(&qitem_lock);
+ return -ENOPROTOOPT;
+ }
+diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c
+index 09dcd54b04e1..299e45af7e4e 100644
+--- a/net/tipc/name_table.c
++++ b/net/tipc/name_table.c
+@@ -942,20 +942,51 @@ int tipc_nametbl_init(void)
+ return 0;
+ }
+
++/**
++ * tipc_purge_publications - remove all publications for a given type
++ *
++ * tipc_nametbl_lock must be held when calling this function
++ */
++static void tipc_purge_publications(struct name_seq *seq)
++{
++ struct publication *publ, *safe;
++ struct sub_seq *sseq;
++ struct name_info *info;
++
++ if (!seq->sseqs) {
++ nameseq_delete_empty(seq);
++ return;
++ }
++ sseq = seq->sseqs;
++ info = sseq->info;
++ list_for_each_entry_safe(publ, safe, &info->zone_list, zone_list) {
++ tipc_nametbl_remove_publ(publ->type, publ->lower, publ->node,
++ publ->ref, publ->key);
++ }
++}
++
+ void tipc_nametbl_stop(void)
+ {
+ u32 i;
++ struct name_seq *seq;
++ struct hlist_head *seq_head;
++ struct hlist_node *safe;
+
+ if (!table.types)
+ return;
+
+- /* Verify name table is empty, then release it */
++ /* Verify name table is empty and purge any lingering
++ * publications, then release the name table
++ */
+ write_lock_bh(&tipc_nametbl_lock);
+ for (i = 0; i < TIPC_NAMETBL_SIZE; i++) {
+ if (hlist_empty(&table.types[i]))
+ continue;
+- pr_err("nametbl_stop(): orphaned hash chain detected\n");
+- break;
++ seq_head = &table.types[i];
++ hlist_for_each_entry_safe(seq, safe, seq_head, ns_list) {
++ tipc_purge_publications(seq);
++ }
++ continue;
+ }
+ kfree(table.types);
+ table.types = NULL;
+diff --git a/net/tipc/server.c b/net/tipc/server.c
+index fd3fa57a410e..bd2336aad0e4 100644
+--- a/net/tipc/server.c
++++ b/net/tipc/server.c
+@@ -87,7 +87,6 @@ static void tipc_clean_outqueues(struct tipc_conn *con);
+ static void tipc_conn_kref_release(struct kref *kref)
+ {
+ struct tipc_conn *con = container_of(kref, struct tipc_conn, kref);
+- struct tipc_server *s = con->server;
+
+ if (con->sock) {
+ tipc_sock_release_local(con->sock);
+@@ -95,10 +94,6 @@ static void tipc_conn_kref_release(struct kref *kref)
+ }
+
+ tipc_clean_outqueues(con);
+-
+- if (con->conid)
+- s->tipc_conn_shutdown(con->conid, con->usr_data);
+-
+ kfree(con);
+ }
+
+@@ -181,6 +176,9 @@ static void tipc_close_conn(struct tipc_conn *con)
+ struct tipc_server *s = con->server;
+
+ if (test_and_clear_bit(CF_CONNECTED, &con->flags)) {
++ if (con->conid)
++ s->tipc_conn_shutdown(con->conid, con->usr_data);
++
+ spin_lock_bh(&s->idr_lock);
+ idr_remove(&s->conn_idr, con->conid);
+ s->idr_in_use--;
+@@ -429,10 +427,12 @@ int tipc_conn_sendmsg(struct tipc_server *s, int conid,
+ list_add_tail(&e->list, &con->outqueue);
+ spin_unlock_bh(&con->outqueue_lock);
+
+- if (test_bit(CF_CONNECTED, &con->flags))
++ if (test_bit(CF_CONNECTED, &con->flags)) {
+ if (!queue_work(s->send_wq, &con->swork))
+ conn_put(con);
+-
++ } else {
++ conn_put(con);
++ }
+ return 0;
+ }
+
+diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
+index d38bb45d82e9..c2a37aa12498 100644
+--- a/net/tipc/subscr.c
++++ b/net/tipc/subscr.c
+@@ -96,20 +96,16 @@ static void subscr_send_event(struct tipc_subscription *sub, u32 found_lower,
+ {
+ struct tipc_subscriber *subscriber = sub->subscriber;
+ struct kvec msg_sect;
+- int ret;
+
+ msg_sect.iov_base = (void *)&sub->evt;
+ msg_sect.iov_len = sizeof(struct tipc_event);
+-
+ sub->evt.event = htohl(event, sub->swap);
+ sub->evt.found_lower = htohl(found_lower, sub->swap);
+ sub->evt.found_upper = htohl(found_upper, sub->swap);
+ sub->evt.port.ref = htohl(port_ref, sub->swap);
+ sub->evt.port.node = htohl(node, sub->swap);
+- ret = tipc_conn_sendmsg(&topsrv, subscriber->conid, NULL,
+- msg_sect.iov_base, msg_sect.iov_len);
+- if (ret < 0)
+- pr_err("Sending subscription event failed, no memory\n");
++ tipc_conn_sendmsg(&topsrv, subscriber->conid, NULL, msg_sect.iov_base,
++ msg_sect.iov_len);
+ }
+
+ /**
+@@ -153,14 +149,6 @@ static void subscr_timeout(struct tipc_subscription *sub)
+ /* The spin lock per subscriber is used to protect its members */
+ spin_lock_bh(&subscriber->lock);
+
+- /* Validate if the connection related to the subscriber is
+- * closed (in case subscriber is terminating)
+- */
+- if (subscriber->conid == 0) {
+- spin_unlock_bh(&subscriber->lock);
+- return;
+- }
+-
+ /* Validate timeout (in case subscription is being cancelled) */
+ if (sub->timeout == TIPC_WAIT_FOREVER) {
+ spin_unlock_bh(&subscriber->lock);
+@@ -215,9 +203,6 @@ static void subscr_release(struct tipc_subscriber *subscriber)
+
+ spin_lock_bh(&subscriber->lock);
+
+- /* Invalidate subscriber reference */
+- subscriber->conid = 0;
+-
+ /* Destroy any existing subscriptions for subscriber */
+ list_for_each_entry_safe(sub, sub_temp, &subscriber->subscription_list,
+ subscription_list) {
+@@ -278,9 +263,9 @@ static void subscr_cancel(struct tipc_subscr *s,
+ *
+ * Called with subscriber lock held.
+ */
+-static struct tipc_subscription *subscr_subscribe(struct tipc_subscr *s,
+- struct tipc_subscriber *subscriber)
+-{
++static int subscr_subscribe(struct tipc_subscr *s,
++ struct tipc_subscriber *subscriber,
++ struct tipc_subscription **sub_p) {
+ struct tipc_subscription *sub;
+ int swap;
+
+@@ -291,23 +276,21 @@ static struct tipc_subscription *subscr_subscribe(struct tipc_subscr *s,
+ if (s->filter & htohl(TIPC_SUB_CANCEL, swap)) {
+ s->filter &= ~htohl(TIPC_SUB_CANCEL, swap);
+ subscr_cancel(s, subscriber);
+- return NULL;
++ return 0;
+ }
+
+ /* Refuse subscription if global limit exceeded */
+ if (atomic_read(&subscription_count) >= TIPC_MAX_SUBSCRIPTIONS) {
+ pr_warn("Subscription rejected, limit reached (%u)\n",
+ TIPC_MAX_SUBSCRIPTIONS);
+- subscr_terminate(subscriber);
+- return NULL;
++ return -EINVAL;
+ }
+
+ /* Allocate subscription object */
+ sub = kmalloc(sizeof(*sub), GFP_ATOMIC);
+ if (!sub) {
+ pr_warn("Subscription rejected, no memory\n");
+- subscr_terminate(subscriber);
+- return NULL;
++ return -ENOMEM;
+ }
+
+ /* Initialize subscription object */
+@@ -321,8 +304,7 @@ static struct tipc_subscription *subscr_subscribe(struct tipc_subscr *s,
+ (sub->seq.lower > sub->seq.upper)) {
+ pr_warn("Subscription rejected, illegal request\n");
+ kfree(sub);
+- subscr_terminate(subscriber);
+- return NULL;
++ return -EINVAL;
+ }
+ INIT_LIST_HEAD(&sub->nameseq_list);
+ list_add(&sub->subscription_list, &subscriber->subscription_list);
+@@ -335,8 +317,8 @@ static struct tipc_subscription *subscr_subscribe(struct tipc_subscr *s,
+ (Handler)subscr_timeout, (unsigned long)sub);
+ k_start_timer(&sub->timer, sub->timeout);
+ }
+-
+- return sub;
++ *sub_p = sub;
++ return 0;
+ }
+
+ /* Handle one termination request for the subscriber */
+@@ -350,10 +332,14 @@ static void subscr_conn_msg_event(int conid, struct sockaddr_tipc *addr,
+ void *usr_data, void *buf, size_t len)
+ {
+ struct tipc_subscriber *subscriber = usr_data;
+- struct tipc_subscription *sub;
++ struct tipc_subscription *sub = NULL;
+
+ spin_lock_bh(&subscriber->lock);
+- sub = subscr_subscribe((struct tipc_subscr *)buf, subscriber);
++ if (subscr_subscribe((struct tipc_subscr *)buf, subscriber, &sub) < 0) {
++ spin_unlock_bh(&subscriber->lock);
++ subscr_terminate(subscriber);
++ return;
++ }
+ if (sub)
+ tipc_nametbl_subscribe(sub);
+ spin_unlock_bh(&subscriber->lock);
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index d7c1ac621a90..c3975bcf725f 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -1785,8 +1785,11 @@ static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock,
+ goto out;
+
+ err = mutex_lock_interruptible(&u->readlock);
+- if (err) {
+- err = sock_intr_errno(sock_rcvtimeo(sk, noblock));
++ if (unlikely(err)) {
++ /* recvmsg() in non blocking mode is supposed to return -EAGAIN
++ * sk_rcvtimeo is not honored by mutex_lock_interruptible()
++ */
++ err = noblock ? -EAGAIN : -ERESTARTSYS;
+ goto out;
+ }
+
+@@ -1911,6 +1914,7 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
+ struct unix_sock *u = unix_sk(sk);
+ struct sockaddr_un *sunaddr = msg->msg_name;
+ int copied = 0;
++ int noblock = flags & MSG_DONTWAIT;
+ int check_creds = 0;
+ int target;
+ int err = 0;
+@@ -1926,7 +1930,7 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
+ goto out;
+
+ target = sock_rcvlowat(sk, flags&MSG_WAITALL, size);
+- timeo = sock_rcvtimeo(sk, flags&MSG_DONTWAIT);
++ timeo = sock_rcvtimeo(sk, noblock);
+
+ /* Lock the socket to prevent queue disordering
+ * while sleeps in memcpy_tomsg
+@@ -1938,8 +1942,11 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
+ }
+
+ err = mutex_lock_interruptible(&u->readlock);
+- if (err) {
+- err = sock_intr_errno(timeo);
++ if (unlikely(err)) {
++ /* recvmsg() in non blocking mode is supposed to return -EAGAIN
++ * sk_rcvtimeo is not honored by mutex_lock_interruptible()
++ */
++ err = noblock ? -EAGAIN : -ERESTARTSYS;
+ goto out;
+ }
+
+diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
+index 57b0b49f4e6e..019749cd0df6 100644
+--- a/security/selinux/hooks.c
++++ b/security/selinux/hooks.c
+@@ -1419,15 +1419,33 @@ static int inode_doinit_with_dentry(struct inode *inode, struct dentry *opt_dent
+ isec->sid = sbsec->sid;
+
+ if ((sbsec->flags & SE_SBPROC) && !S_ISLNK(inode->i_mode)) {
+- if (opt_dentry) {
+- isec->sclass = inode_mode_to_security_class(inode->i_mode);
+- rc = selinux_proc_get_sid(opt_dentry,
+- isec->sclass,
+- &sid);
+- if (rc)
+- goto out_unlock;
+- isec->sid = sid;
+- }
++ /* We must have a dentry to determine the label on
++ * procfs inodes */
++ if (opt_dentry)
++ /* Called from d_instantiate or
++ * d_splice_alias. */
++ dentry = dget(opt_dentry);
++ else
++ /* Called from selinux_complete_init, try to
++ * find a dentry. */
++ dentry = d_find_alias(inode);
++ /*
++ * This can be hit on boot when a file is accessed
++ * before the policy is loaded. When we load policy we
++ * may find inodes that have no dentry on the
++ * sbsec->isec_head list. No reason to complain as
++ * these will get fixed up the next time we go through
++ * inode_doinit() with a dentry, before these inodes
++ * could be used again by userspace.
++ */
++ if (!dentry)
++ goto out_unlock;
++ isec->sclass = inode_mode_to_security_class(inode->i_mode);
++ rc = selinux_proc_get_sid(dentry, isec->sclass, &sid);
++ dput(dentry);
++ if (rc)
++ goto out_unlock;
++ isec->sid = sid;
+ }
+ break;
+ }
^ permalink raw reply [flat|nested] only message in thread
only message in thread, other threads:[~2014-04-15 14:43 UTC | newest]
Thread overview: (only message) (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2014-04-15 14:43 [gentoo-commits] linux-patches r2752 - genpatches-2.6/trunk/3.13 Mike Pagano (mpagano)
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox