From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from lists.gentoo.org (pigeon.gentoo.org [208.92.234.80]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by finch.gentoo.org (Postfix) with ESMTPS id 84191138335 for ; Wed, 21 Nov 2018 15:02:31 +0000 (UTC) Received: from pigeon.gentoo.org (localhost [127.0.0.1]) by pigeon.gentoo.org (Postfix) with SMTP id 6A4FDE08AA; Wed, 21 Nov 2018 15:02:30 +0000 (UTC) Received: from smtp.gentoo.org (dev.gentoo.org [IPv6:2001:470:ea4a:1:5054:ff:fec7:86e4]) (using TLSv1.2 with cipher ECDHE-RSA-AES128-GCM-SHA256 (128/128 bits)) (No client certificate requested) by pigeon.gentoo.org (Postfix) with ESMTPS id 27CF1E08AA for ; Wed, 21 Nov 2018 15:02:30 +0000 (UTC) Received: from oystercatcher.gentoo.org (oystercatcher.gentoo.org [148.251.78.52]) (using TLSv1.2 with cipher ECDHE-RSA-AES128-GCM-SHA256 (128/128 bits)) (No client certificate requested) by smtp.gentoo.org (Postfix) with ESMTPS id EB87F335C6F for ; Wed, 21 Nov 2018 15:02:27 +0000 (UTC) Received: from localhost.localdomain (localhost [IPv6:::1]) by oystercatcher.gentoo.org (Postfix) with ESMTP id 69BE0477 for ; Wed, 21 Nov 2018 15:02:26 +0000 (UTC) From: "Mike Pagano" To: gentoo-commits@lists.gentoo.org Content-Transfer-Encoding: 8bit Content-type: text/plain; charset=UTF-8 Reply-To: gentoo-dev@lists.gentoo.org, "Mike Pagano" Message-ID: <1542812460.91c9d0b074d39361d5e1acf4a654db751f57cabc.mpagano@gentoo> Subject: [gentoo-commits] proj/linux-patches:4.4 commit in: / X-VCS-Repository: proj/linux-patches X-VCS-Files: 0000_README 1142_linux-4.4.143.patch X-VCS-Directories: / X-VCS-Committer: mpagano X-VCS-Committer-Name: Mike Pagano X-VCS-Revision: 91c9d0b074d39361d5e1acf4a654db751f57cabc X-VCS-Branch: 4.4 Date: Wed, 21 Nov 2018 15:02:26 +0000 (UTC) Precedence: bulk List-Post: List-Help: List-Unsubscribe: List-Subscribe: List-Id: Gentoo Linux mail X-BeenThere: gentoo-commits@lists.gentoo.org X-Archives-Salt: 7e64654c-7e77-40e1-88fe-359bd1a0021c X-Archives-Hash: 9a7e1cbe4d446bfd645e8807b0777c91 commit: 91c9d0b074d39361d5e1acf4a654db751f57cabc Author: Mike Pagano gentoo org> AuthorDate: Sun Jul 22 15:15:27 2018 +0000 Commit: Mike Pagano gentoo org> CommitDate: Wed Nov 21 15:01:00 2018 +0000 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=91c9d0b0 Linux patch 4.4.143 Signed-off-by: Mike Pagano gentoo.org> 0000_README | 4 + 1142_linux-4.4.143.patch | 1060 ++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 1064 insertions(+) diff --git a/0000_README b/0000_README index 3d85018..42e6d1f 100644 --- a/0000_README +++ b/0000_README @@ -611,6 +611,10 @@ Patch: 1141_linux-4.4.142.patch From: http://www.kernel.org Desc: Linux 4.4.142 +Patch: 1142_linux-4.4.143.patch +From: http://www.kernel.org +Desc: Linux 4.4.143 + Patch: 1500_XATTR_USER_PREFIX.patch From: https://bugs.gentoo.org/show_bug.cgi?id=470644 Desc: Support for namespace user.pax.* on tmpfs. diff --git a/1142_linux-4.4.143.patch b/1142_linux-4.4.143.patch new file mode 100644 index 0000000..022f040 --- /dev/null +++ b/1142_linux-4.4.143.patch @@ -0,0 +1,1060 @@ +diff --git a/Makefile b/Makefile +index 75d6176c8786..54690fee0485 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 4 + PATCHLEVEL = 4 +-SUBLEVEL = 142 ++SUBLEVEL = 143 + EXTRAVERSION = + NAME = Blurry Fish Butt + +diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c +index 1ee603d07847..354b99f56c1e 100644 +--- a/arch/mips/kernel/process.c ++++ b/arch/mips/kernel/process.c +@@ -629,21 +629,48 @@ unsigned long arch_align_stack(unsigned long sp) + return sp & ALMASK; + } + ++static DEFINE_PER_CPU(struct call_single_data, backtrace_csd); ++static struct cpumask backtrace_csd_busy; ++ + static void arch_dump_stack(void *info) + { + struct pt_regs *regs; ++ static arch_spinlock_t lock = __ARCH_SPIN_LOCK_UNLOCKED; + ++ arch_spin_lock(&lock); + regs = get_irq_regs(); + + if (regs) + show_regs(regs); ++ else ++ dump_stack(); ++ arch_spin_unlock(&lock); + +- dump_stack(); ++ cpumask_clear_cpu(smp_processor_id(), &backtrace_csd_busy); + } + + void arch_trigger_all_cpu_backtrace(bool include_self) + { +- smp_call_function(arch_dump_stack, NULL, 1); ++ struct call_single_data *csd; ++ int cpu; ++ ++ for_each_cpu(cpu, cpu_online_mask) { ++ /* ++ * If we previously sent an IPI to the target CPU & it hasn't ++ * cleared its bit in the busy cpumask then it didn't handle ++ * our previous IPI & it's not safe for us to reuse the ++ * call_single_data_t. ++ */ ++ if (cpumask_test_and_set_cpu(cpu, &backtrace_csd_busy)) { ++ pr_warn("Unable to send backtrace IPI to CPU%u - perhaps it hung?\n", ++ cpu); ++ continue; ++ } ++ ++ csd = &per_cpu(backtrace_csd, cpu); ++ csd->func = arch_dump_stack; ++ smp_call_function_single_async(cpu, csd); ++ } + } + + int mips_get_process_fp_mode(struct task_struct *task) +diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c +index 31ca2edd7218..1b901218e3ae 100644 +--- a/arch/mips/kernel/traps.c ++++ b/arch/mips/kernel/traps.c +@@ -344,6 +344,7 @@ static void __show_regs(const struct pt_regs *regs) + void show_regs(struct pt_regs *regs) + { + __show_regs((struct pt_regs *)regs); ++ dump_stack(); + } + + void show_registers(struct pt_regs *regs) +diff --git a/arch/x86/include/asm/asm.h b/arch/x86/include/asm/asm.h +index 1c79c8add0eb..21e84a31d211 100644 +--- a/arch/x86/include/asm/asm.h ++++ b/arch/x86/include/asm/asm.h +@@ -44,6 +44,65 @@ + #define _ASM_SI __ASM_REG(si) + #define _ASM_DI __ASM_REG(di) + ++#ifndef __x86_64__ ++/* 32 bit */ ++ ++#define _ASM_ARG1 _ASM_AX ++#define _ASM_ARG2 _ASM_DX ++#define _ASM_ARG3 _ASM_CX ++ ++#define _ASM_ARG1L eax ++#define _ASM_ARG2L edx ++#define _ASM_ARG3L ecx ++ ++#define _ASM_ARG1W ax ++#define _ASM_ARG2W dx ++#define _ASM_ARG3W cx ++ ++#define _ASM_ARG1B al ++#define _ASM_ARG2B dl ++#define _ASM_ARG3B cl ++ ++#else ++/* 64 bit */ ++ ++#define _ASM_ARG1 _ASM_DI ++#define _ASM_ARG2 _ASM_SI ++#define _ASM_ARG3 _ASM_DX ++#define _ASM_ARG4 _ASM_CX ++#define _ASM_ARG5 r8 ++#define _ASM_ARG6 r9 ++ ++#define _ASM_ARG1Q rdi ++#define _ASM_ARG2Q rsi ++#define _ASM_ARG3Q rdx ++#define _ASM_ARG4Q rcx ++#define _ASM_ARG5Q r8 ++#define _ASM_ARG6Q r9 ++ ++#define _ASM_ARG1L edi ++#define _ASM_ARG2L esi ++#define _ASM_ARG3L edx ++#define _ASM_ARG4L ecx ++#define _ASM_ARG5L r8d ++#define _ASM_ARG6L r9d ++ ++#define _ASM_ARG1W di ++#define _ASM_ARG2W si ++#define _ASM_ARG3W dx ++#define _ASM_ARG4W cx ++#define _ASM_ARG5W r8w ++#define _ASM_ARG6W r9w ++ ++#define _ASM_ARG1B dil ++#define _ASM_ARG2B sil ++#define _ASM_ARG3B dl ++#define _ASM_ARG4B cl ++#define _ASM_ARG5B r8b ++#define _ASM_ARG6B r9b ++ ++#endif ++ + /* Exception table entry */ + #ifdef __ASSEMBLY__ + # define _ASM_EXTABLE(from,to) \ +diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c +index c302f47f6323..94712e1c5cf9 100644 +--- a/drivers/atm/zatm.c ++++ b/drivers/atm/zatm.c +@@ -1481,6 +1481,8 @@ static int zatm_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg) + return -EFAULT; + if (pool < 0 || pool > ZATM_LAST_POOL) + return -EINVAL; ++ pool = array_index_nospec(pool, ++ ZATM_LAST_POOL + 1); + if (copy_from_user(&info, + &((struct zatm_pool_req __user *) arg)->info, + sizeof(info))) return -EFAULT; +diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c +index 58a630e55d5d..78d0722feacb 100644 +--- a/drivers/crypto/amcc/crypto4xx_core.c ++++ b/drivers/crypto/amcc/crypto4xx_core.c +@@ -207,7 +207,7 @@ static u32 crypto4xx_build_pdr(struct crypto4xx_device *dev) + dev->pdr_pa); + return -ENOMEM; + } +- memset(dev->pdr, 0, sizeof(struct ce_pd) * PPC4XX_NUM_PD); ++ memset(dev->pdr, 0, sizeof(struct ce_pd) * PPC4XX_NUM_PD); + dev->shadow_sa_pool = dma_alloc_coherent(dev->core_dev->device, + 256 * PPC4XX_NUM_PD, + &dev->shadow_sa_pool_pa, +@@ -240,13 +240,15 @@ static u32 crypto4xx_build_pdr(struct crypto4xx_device *dev) + + static void crypto4xx_destroy_pdr(struct crypto4xx_device *dev) + { +- if (dev->pdr != NULL) ++ if (dev->pdr) + dma_free_coherent(dev->core_dev->device, + sizeof(struct ce_pd) * PPC4XX_NUM_PD, + dev->pdr, dev->pdr_pa); ++ + if (dev->shadow_sa_pool) + dma_free_coherent(dev->core_dev->device, 256 * PPC4XX_NUM_PD, + dev->shadow_sa_pool, dev->shadow_sa_pool_pa); ++ + if (dev->shadow_sr_pool) + dma_free_coherent(dev->core_dev->device, + sizeof(struct sa_state_record) * PPC4XX_NUM_PD, +@@ -416,12 +418,12 @@ static u32 crypto4xx_build_sdr(struct crypto4xx_device *dev) + + static void crypto4xx_destroy_sdr(struct crypto4xx_device *dev) + { +- if (dev->sdr != NULL) ++ if (dev->sdr) + dma_free_coherent(dev->core_dev->device, + sizeof(struct ce_sd) * PPC4XX_NUM_SD, + dev->sdr, dev->sdr_pa); + +- if (dev->scatter_buffer_va != NULL) ++ if (dev->scatter_buffer_va) + dma_free_coherent(dev->core_dev->device, + dev->scatter_buffer_size * PPC4XX_NUM_SD, + dev->scatter_buffer_va, +@@ -1029,12 +1031,10 @@ int crypto4xx_register_alg(struct crypto4xx_device *sec_dev, + break; + } + +- if (rc) { +- list_del(&alg->entry); ++ if (rc) + kfree(alg); +- } else { ++ else + list_add_tail(&alg->entry, &sec_dev->alg_list); +- } + } + + return 0; +@@ -1188,7 +1188,7 @@ static int crypto4xx_probe(struct platform_device *ofdev) + + rc = crypto4xx_build_gdr(core_dev->dev); + if (rc) +- goto err_build_gdr; ++ goto err_build_pdr; + + rc = crypto4xx_build_sdr(core_dev->dev); + if (rc) +@@ -1230,12 +1230,11 @@ err_iomap: + err_request_irq: + irq_dispose_mapping(core_dev->irq); + tasklet_kill(&core_dev->tasklet); +- crypto4xx_destroy_sdr(core_dev->dev); + err_build_sdr: ++ crypto4xx_destroy_sdr(core_dev->dev); + crypto4xx_destroy_gdr(core_dev->dev); +-err_build_gdr: +- crypto4xx_destroy_pdr(core_dev->dev); + err_build_pdr: ++ crypto4xx_destroy_pdr(core_dev->dev); + kfree(core_dev->dev); + err_alloc_dev: + kfree(core_dev); +diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c +index 8b1929e9f698..ec5834087e4b 100644 +--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c ++++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c +@@ -1063,7 +1063,8 @@ static int bcm_enet_open(struct net_device *dev) + val = enet_readl(priv, ENET_CTL_REG); + val |= ENET_CTL_ENABLE_MASK; + enet_writel(priv, val, ENET_CTL_REG); +- enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG); ++ if (priv->dma_has_sram) ++ enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG); + enet_dmac_writel(priv, priv->dma_chan_en_mask, + ENETDMAC_CHANCFG, priv->rx_chan); + +@@ -1787,7 +1788,9 @@ static int bcm_enet_probe(struct platform_device *pdev) + ret = PTR_ERR(priv->mac_clk); + goto out; + } +- clk_prepare_enable(priv->mac_clk); ++ ret = clk_prepare_enable(priv->mac_clk); ++ if (ret) ++ goto out_put_clk_mac; + + /* initialize default and fetch platform data */ + priv->rx_ring_size = BCMENET_DEF_RX_DESC; +@@ -1819,9 +1822,11 @@ static int bcm_enet_probe(struct platform_device *pdev) + if (IS_ERR(priv->phy_clk)) { + ret = PTR_ERR(priv->phy_clk); + priv->phy_clk = NULL; +- goto out_put_clk_mac; ++ goto out_disable_clk_mac; + } +- clk_prepare_enable(priv->phy_clk); ++ ret = clk_prepare_enable(priv->phy_clk); ++ if (ret) ++ goto out_put_clk_phy; + } + + /* do minimal hardware init to be able to probe mii bus */ +@@ -1921,13 +1926,16 @@ out_free_mdio: + out_uninit_hw: + /* turn off mdc clock */ + enet_writel(priv, 0, ENET_MIISC_REG); +- if (priv->phy_clk) { ++ if (priv->phy_clk) + clk_disable_unprepare(priv->phy_clk); ++ ++out_put_clk_phy: ++ if (priv->phy_clk) + clk_put(priv->phy_clk); +- } + +-out_put_clk_mac: ++out_disable_clk_mac: + clk_disable_unprepare(priv->mac_clk); ++out_put_clk_mac: + clk_put(priv->mac_clk); + out: + free_netdev(dev); +@@ -2772,7 +2780,9 @@ static int bcm_enetsw_probe(struct platform_device *pdev) + ret = PTR_ERR(priv->mac_clk); + goto out_unmap; + } +- clk_enable(priv->mac_clk); ++ ret = clk_prepare_enable(priv->mac_clk); ++ if (ret) ++ goto out_put_clk; + + priv->rx_chan = 0; + priv->tx_chan = 1; +@@ -2793,7 +2803,7 @@ static int bcm_enetsw_probe(struct platform_device *pdev) + + ret = register_netdev(dev); + if (ret) +- goto out_put_clk; ++ goto out_disable_clk; + + netif_carrier_off(dev); + platform_set_drvdata(pdev, dev); +@@ -2802,6 +2812,9 @@ static int bcm_enetsw_probe(struct platform_device *pdev) + + return 0; + ++out_disable_clk: ++ clk_disable_unprepare(priv->mac_clk); ++ + out_put_clk: + clk_put(priv->mac_clk); + +@@ -2833,6 +2846,9 @@ static int bcm_enetsw_remove(struct platform_device *pdev) + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + release_mem_region(res->start, resource_size(res)); + ++ clk_disable_unprepare(priv->mac_clk); ++ clk_put(priv->mac_clk); ++ + free_netdev(dev); + return 0; + } +diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c +index 8f7aa53a4c4b..7ae8374bff13 100644 +--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c ++++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c +@@ -50,6 +50,7 @@ + #include + #include + #include ++#include + #include + + #include "common.h" +@@ -2256,6 +2257,7 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr) + + if (t.qset_idx >= nqsets) + return -EINVAL; ++ t.qset_idx = array_index_nospec(t.qset_idx, nqsets); + + q = &adapter->params.sge.qset[q1 + t.qset_idx]; + t.rspq_size = q->rspq_size; +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +index 16bd585365a8..9ac14df0ca3b 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +@@ -643,6 +643,7 @@ static void cmd_work_handler(struct work_struct *work) + struct semaphore *sem; + unsigned long flags; + int alloc_ret; ++ int cmd_mode; + + sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem; + down(sem); +@@ -688,6 +689,7 @@ static void cmd_work_handler(struct work_struct *work) + set_signature(ent, !cmd->checksum_disabled); + dump_command(dev, ent, 1); + ent->ts1 = ktime_get_ns(); ++ cmd_mode = cmd->mode; + + /* ring doorbell after the descriptor is valid */ + mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx); +@@ -695,7 +697,7 @@ static void cmd_work_handler(struct work_struct *work) + iowrite32be(1 << ent->idx, &dev->iseg->cmd_dbell); + mmiowb(); + /* if not in polling don't use ent after this point */ +- if (cmd->mode == CMD_MODE_POLLING) { ++ if (cmd_mode == CMD_MODE_POLLING) { + poll_timeout(ent); + /* make sure we read the descriptor after ownership is SW */ + rmb(); +@@ -1126,7 +1128,7 @@ static ssize_t outlen_write(struct file *filp, const char __user *buf, + { + struct mlx5_core_dev *dev = filp->private_data; + struct mlx5_cmd_debug *dbg = &dev->cmd.dbg; +- char outlen_str[8]; ++ char outlen_str[8] = {0}; + int outlen; + void *ptr; + int err; +@@ -1141,8 +1143,6 @@ static ssize_t outlen_write(struct file *filp, const char __user *buf, + if (copy_from_user(outlen_str, buf, count)) + return -EFAULT; + +- outlen_str[7] = 0; +- + err = sscanf(outlen_str, "%d", &outlen); + if (err < 0) + return err; +diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c +index 174f7341c5c3..688b6da5a9bb 100644 +--- a/drivers/net/ethernet/qlogic/qed/qed_main.c ++++ b/drivers/net/ethernet/qlogic/qed/qed_main.c +@@ -22,6 +22,7 @@ + #include + #include + #include ++#include + + #include "qed.h" + #include "qed_sp.h" +@@ -634,6 +635,14 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev, + /* We want a minimum of one slowpath and one fastpath vector per hwfn */ + cdev->int_params.in.min_msix_cnt = cdev->num_hwfns * 2; + ++ if (is_kdump_kernel()) { ++ DP_INFO(cdev, ++ "Kdump kernel: Limit the max number of requested MSI-X vectors to %hd\n", ++ cdev->int_params.in.min_msix_cnt); ++ cdev->int_params.in.num_vectors = ++ cdev->int_params.in.min_msix_cnt; ++ } ++ + rc = qed_set_int_mode(cdev, false); + if (rc) { + DP_ERR(cdev, "qed_slowpath_setup_int ERR\n"); +diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c +index e23a642357e7..eb4d8df49399 100644 +--- a/drivers/net/ethernet/sun/sungem.c ++++ b/drivers/net/ethernet/sun/sungem.c +@@ -60,8 +60,7 @@ + #include + #include "sungem.h" + +-/* Stripping FCS is causing problems, disabled for now */ +-#undef STRIP_FCS ++#define STRIP_FCS + + #define DEFAULT_MSG (NETIF_MSG_DRV | \ + NETIF_MSG_PROBE | \ +@@ -435,7 +434,7 @@ static int gem_rxmac_reset(struct gem *gp) + writel(desc_dma & 0xffffffff, gp->regs + RXDMA_DBLOW); + writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK); + val = (RXDMA_CFG_BASE | (RX_OFFSET << 10) | +- ((14 / 2) << 13) | RXDMA_CFG_FTHRESH_128); ++ (ETH_HLEN << 13) | RXDMA_CFG_FTHRESH_128); + writel(val, gp->regs + RXDMA_CFG); + if (readl(gp->regs + GREG_BIFCFG) & GREG_BIFCFG_M66EN) + writel(((5 & RXDMA_BLANK_IPKTS) | +@@ -760,7 +759,6 @@ static int gem_rx(struct gem *gp, int work_to_do) + struct net_device *dev = gp->dev; + int entry, drops, work_done = 0; + u32 done; +- __sum16 csum; + + if (netif_msg_rx_status(gp)) + printk(KERN_DEBUG "%s: rx interrupt, done: %d, rx_new: %d\n", +@@ -855,9 +853,13 @@ static int gem_rx(struct gem *gp, int work_to_do) + skb = copy_skb; + } + +- csum = (__force __sum16)htons((status & RXDCTRL_TCPCSUM) ^ 0xffff); +- skb->csum = csum_unfold(csum); +- skb->ip_summed = CHECKSUM_COMPLETE; ++ if (likely(dev->features & NETIF_F_RXCSUM)) { ++ __sum16 csum; ++ ++ csum = (__force __sum16)htons((status & RXDCTRL_TCPCSUM) ^ 0xffff); ++ skb->csum = csum_unfold(csum); ++ skb->ip_summed = CHECKSUM_COMPLETE; ++ } + skb->protocol = eth_type_trans(skb, gp->dev); + + napi_gro_receive(&gp->napi, skb); +@@ -1755,7 +1757,7 @@ static void gem_init_dma(struct gem *gp) + writel(0, gp->regs + TXDMA_KICK); + + val = (RXDMA_CFG_BASE | (RX_OFFSET << 10) | +- ((14 / 2) << 13) | RXDMA_CFG_FTHRESH_128); ++ (ETH_HLEN << 13) | RXDMA_CFG_FTHRESH_128); + writel(val, gp->regs + RXDMA_CFG); + + writel(desc_dma >> 32, gp->regs + RXDMA_DBHI); +@@ -2973,8 +2975,8 @@ static int gem_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) + pci_set_drvdata(pdev, dev); + + /* We can do scatter/gather and HW checksum */ +- dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM; +- dev->features |= dev->hw_features | NETIF_F_RXCSUM; ++ dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM; ++ dev->features = dev->hw_features; + if (pci_using_dac) + dev->features |= NETIF_F_HIGHDMA; + +diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c +index 2991d7155540..2bb336cb13ee 100644 +--- a/drivers/net/usb/r8152.c ++++ b/drivers/net/usb/r8152.c +@@ -3139,7 +3139,8 @@ static int rtl8152_close(struct net_device *netdev) + #ifdef CONFIG_PM_SLEEP + unregister_pm_notifier(&tp->pm_notifier); + #endif +- napi_disable(&tp->napi); ++ if (!test_bit(RTL8152_UNPLUG, &tp->flags)) ++ napi_disable(&tp->napi); + clear_bit(WORK_ENABLE, &tp->flags); + usb_kill_urb(tp->intr_urb); + cancel_delayed_work_sync(&tp->schedule); +diff --git a/drivers/net/wireless/realtek/rtlwifi/core.c b/drivers/net/wireless/realtek/rtlwifi/core.c +index 8b537a5a4b01..8006f0972ad1 100644 +--- a/drivers/net/wireless/realtek/rtlwifi/core.c ++++ b/drivers/net/wireless/realtek/rtlwifi/core.c +@@ -135,7 +135,6 @@ found_alt: + firmware->size); + rtlpriv->rtlhal.wowlan_fwsize = firmware->size; + } +- rtlpriv->rtlhal.fwsize = firmware->size; + release_firmware(firmware); + } + +diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c +index 44a5a8777053..645b2197930e 100644 +--- a/drivers/vhost/net.c ++++ b/drivers/vhost/net.c +@@ -955,7 +955,8 @@ err_used: + if (ubufs) + vhost_net_ubuf_put_wait_and_free(ubufs); + err_ubufs: +- sockfd_put(sock); ++ if (sock) ++ sockfd_put(sock); + err_vq: + mutex_unlock(&vq->mutex); + err: +diff --git a/fs/ocfs2/cluster/nodemanager.c b/fs/ocfs2/cluster/nodemanager.c +index 72afdca3cea7..3c45a9301a09 100644 +--- a/fs/ocfs2/cluster/nodemanager.c ++++ b/fs/ocfs2/cluster/nodemanager.c +@@ -40,6 +40,9 @@ char *o2nm_fence_method_desc[O2NM_FENCE_METHODS] = { + "panic", /* O2NM_FENCE_PANIC */ + }; + ++static inline void o2nm_lock_subsystem(void); ++static inline void o2nm_unlock_subsystem(void); ++ + struct o2nm_node *o2nm_get_node_by_num(u8 node_num) + { + struct o2nm_node *node = NULL; +@@ -181,7 +184,10 @@ static struct o2nm_cluster *to_o2nm_cluster_from_node(struct o2nm_node *node) + { + /* through the first node_set .parent + * mycluster/nodes/mynode == o2nm_cluster->o2nm_node_group->o2nm_node */ +- return to_o2nm_cluster(node->nd_item.ci_parent->ci_parent); ++ if (node->nd_item.ci_parent) ++ return to_o2nm_cluster(node->nd_item.ci_parent->ci_parent); ++ else ++ return NULL; + } + + enum { +@@ -194,7 +200,7 @@ static ssize_t o2nm_node_num_store(struct config_item *item, const char *page, + size_t count) + { + struct o2nm_node *node = to_o2nm_node(item); +- struct o2nm_cluster *cluster = to_o2nm_cluster_from_node(node); ++ struct o2nm_cluster *cluster; + unsigned long tmp; + char *p = (char *)page; + int ret = 0; +@@ -214,6 +220,13 @@ static ssize_t o2nm_node_num_store(struct config_item *item, const char *page, + !test_bit(O2NM_NODE_ATTR_PORT, &node->nd_set_attributes)) + return -EINVAL; /* XXX */ + ++ o2nm_lock_subsystem(); ++ cluster = to_o2nm_cluster_from_node(node); ++ if (!cluster) { ++ o2nm_unlock_subsystem(); ++ return -EINVAL; ++ } ++ + write_lock(&cluster->cl_nodes_lock); + if (cluster->cl_nodes[tmp]) + ret = -EEXIST; +@@ -226,6 +239,8 @@ static ssize_t o2nm_node_num_store(struct config_item *item, const char *page, + set_bit(tmp, cluster->cl_nodes_bitmap); + } + write_unlock(&cluster->cl_nodes_lock); ++ o2nm_unlock_subsystem(); ++ + if (ret) + return ret; + +@@ -269,7 +284,7 @@ static ssize_t o2nm_node_ipv4_address_store(struct config_item *item, + size_t count) + { + struct o2nm_node *node = to_o2nm_node(item); +- struct o2nm_cluster *cluster = to_o2nm_cluster_from_node(node); ++ struct o2nm_cluster *cluster; + int ret, i; + struct rb_node **p, *parent; + unsigned int octets[4]; +@@ -286,6 +301,13 @@ static ssize_t o2nm_node_ipv4_address_store(struct config_item *item, + be32_add_cpu(&ipv4_addr, octets[i] << (i * 8)); + } + ++ o2nm_lock_subsystem(); ++ cluster = to_o2nm_cluster_from_node(node); ++ if (!cluster) { ++ o2nm_unlock_subsystem(); ++ return -EINVAL; ++ } ++ + ret = 0; + write_lock(&cluster->cl_nodes_lock); + if (o2nm_node_ip_tree_lookup(cluster, ipv4_addr, &p, &parent)) +@@ -298,6 +320,8 @@ static ssize_t o2nm_node_ipv4_address_store(struct config_item *item, + rb_insert_color(&node->nd_ip_node, &cluster->cl_node_ip_tree); + } + write_unlock(&cluster->cl_nodes_lock); ++ o2nm_unlock_subsystem(); ++ + if (ret) + return ret; + +@@ -315,7 +339,7 @@ static ssize_t o2nm_node_local_store(struct config_item *item, const char *page, + size_t count) + { + struct o2nm_node *node = to_o2nm_node(item); +- struct o2nm_cluster *cluster = to_o2nm_cluster_from_node(node); ++ struct o2nm_cluster *cluster; + unsigned long tmp; + char *p = (char *)page; + ssize_t ret; +@@ -333,17 +357,26 @@ static ssize_t o2nm_node_local_store(struct config_item *item, const char *page, + !test_bit(O2NM_NODE_ATTR_PORT, &node->nd_set_attributes)) + return -EINVAL; /* XXX */ + ++ o2nm_lock_subsystem(); ++ cluster = to_o2nm_cluster_from_node(node); ++ if (!cluster) { ++ ret = -EINVAL; ++ goto out; ++ } ++ + /* the only failure case is trying to set a new local node + * when a different one is already set */ + if (tmp && tmp == cluster->cl_has_local && +- cluster->cl_local_node != node->nd_num) +- return -EBUSY; ++ cluster->cl_local_node != node->nd_num) { ++ ret = -EBUSY; ++ goto out; ++ } + + /* bring up the rx thread if we're setting the new local node. */ + if (tmp && !cluster->cl_has_local) { + ret = o2net_start_listening(node); + if (ret) +- return ret; ++ goto out; + } + + if (!tmp && cluster->cl_has_local && +@@ -358,7 +391,11 @@ static ssize_t o2nm_node_local_store(struct config_item *item, const char *page, + cluster->cl_local_node = node->nd_num; + } + +- return count; ++ ret = count; ++ ++out: ++ o2nm_unlock_subsystem(); ++ return ret; + } + + CONFIGFS_ATTR(o2nm_node_, num); +@@ -750,6 +787,16 @@ static struct o2nm_cluster_group o2nm_cluster_group = { + }, + }; + ++static inline void o2nm_lock_subsystem(void) ++{ ++ mutex_lock(&o2nm_cluster_group.cs_subsys.su_mutex); ++} ++ ++static inline void o2nm_unlock_subsystem(void) ++{ ++ mutex_unlock(&o2nm_cluster_group.cs_subsys.su_mutex); ++} ++ + int o2nm_depend_item(struct config_item *item) + { + return configfs_depend_item(&o2nm_cluster_group.cs_subsys, item); +diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h +index 287e698c28de..143d40e8a1ea 100644 +--- a/include/linux/compiler-gcc.h ++++ b/include/linux/compiler-gcc.h +@@ -64,22 +64,41 @@ + #define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0])) + #endif + ++/* ++ * Feature detection for gnu_inline (gnu89 extern inline semantics). Either ++ * __GNUC_STDC_INLINE__ is defined (not using gnu89 extern inline semantics, ++ * and we opt in to the gnu89 semantics), or __GNUC_STDC_INLINE__ is not ++ * defined so the gnu89 semantics are the default. ++ */ ++#ifdef __GNUC_STDC_INLINE__ ++# define __gnu_inline __attribute__((gnu_inline)) ++#else ++# define __gnu_inline ++#endif ++ + /* + * Force always-inline if the user requests it so via the .config, +- * or if gcc is too old: ++ * or if gcc is too old. ++ * GCC does not warn about unused static inline functions for ++ * -Wunused-function. This turns out to avoid the need for complex #ifdef ++ * directives. Suppress the warning in clang as well by using "unused" ++ * function attribute, which is redundant but not harmful for gcc. ++ * Prefer gnu_inline, so that extern inline functions do not emit an ++ * externally visible function. This makes extern inline behave as per gnu89 ++ * semantics rather than c99. This prevents multiple symbol definition errors ++ * of extern inline functions at link time. ++ * A lot of inline functions can cause havoc with function tracing. + */ + #if !defined(CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING) || \ + !defined(CONFIG_OPTIMIZE_INLINING) || (__GNUC__ < 4) +-#define inline inline __attribute__((always_inline)) notrace +-#define __inline__ __inline__ __attribute__((always_inline)) notrace +-#define __inline __inline __attribute__((always_inline)) notrace ++#define inline \ ++ inline __attribute__((always_inline, unused)) notrace __gnu_inline + #else +-/* A lot of inline functions can cause havoc with function tracing */ +-#define inline inline notrace +-#define __inline__ __inline__ notrace +-#define __inline __inline notrace ++#define inline inline __attribute__((unused)) notrace __gnu_inline + #endif + ++#define __inline__ inline ++#define __inline inline + #define __always_inline inline __attribute__((always_inline)) + #define noinline __attribute__((noinline)) + +diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c +index 665fd87cc105..8b8a43fda6ca 100644 +--- a/net/bridge/netfilter/ebtables.c ++++ b/net/bridge/netfilter/ebtables.c +@@ -404,6 +404,12 @@ ebt_check_watcher(struct ebt_entry_watcher *w, struct xt_tgchk_param *par, + watcher = xt_request_find_target(NFPROTO_BRIDGE, w->u.name, 0); + if (IS_ERR(watcher)) + return PTR_ERR(watcher); ++ ++ if (watcher->family != NFPROTO_BRIDGE) { ++ module_put(watcher->me); ++ return -ENOENT; ++ } ++ + w->u.watcher = watcher; + + par->target = watcher; +@@ -724,6 +730,13 @@ ebt_check_entry(struct ebt_entry *e, struct net *net, + goto cleanup_watchers; + } + ++ /* Reject UNSPEC, xtables verdicts/return values are incompatible */ ++ if (target->family != NFPROTO_BRIDGE) { ++ module_put(target->me); ++ ret = -ENOENT; ++ goto cleanup_watchers; ++ } ++ + t->u.target = target; + if (t->u.target == &ebt_standard_target) { + if (gap < sizeof(struct ebt_standard_target)) { +diff --git a/net/dccp/ccids/ccid3.c b/net/dccp/ccids/ccid3.c +index 119c04317d48..03fcf3ee1534 100644 +--- a/net/dccp/ccids/ccid3.c ++++ b/net/dccp/ccids/ccid3.c +@@ -599,7 +599,7 @@ static void ccid3_hc_rx_send_feedback(struct sock *sk, + { + struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk); + struct dccp_sock *dp = dccp_sk(sk); +- ktime_t now = ktime_get_real(); ++ ktime_t now = ktime_get(); + s64 delta = 0; + + switch (fbtype) { +@@ -624,15 +624,14 @@ static void ccid3_hc_rx_send_feedback(struct sock *sk, + case CCID3_FBACK_PERIODIC: + delta = ktime_us_delta(now, hc->rx_tstamp_last_feedback); + if (delta <= 0) +- DCCP_BUG("delta (%ld) <= 0", (long)delta); +- else +- hc->rx_x_recv = scaled_div32(hc->rx_bytes_recv, delta); ++ delta = 1; ++ hc->rx_x_recv = scaled_div32(hc->rx_bytes_recv, delta); + break; + default: + return; + } + +- ccid3_pr_debug("Interval %ldusec, X_recv=%u, 1/p=%u\n", (long)delta, ++ ccid3_pr_debug("Interval %lldusec, X_recv=%u, 1/p=%u\n", delta, + hc->rx_x_recv, hc->rx_pinv); + + hc->rx_tstamp_last_feedback = now; +@@ -679,7 +678,8 @@ static int ccid3_hc_rx_insert_options(struct sock *sk, struct sk_buff *skb) + static u32 ccid3_first_li(struct sock *sk) + { + struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk); +- u32 x_recv, p, delta; ++ u32 x_recv, p; ++ s64 delta; + u64 fval; + + if (hc->rx_rtt == 0) { +@@ -687,7 +687,9 @@ static u32 ccid3_first_li(struct sock *sk) + hc->rx_rtt = DCCP_FALLBACK_RTT; + } + +- delta = ktime_to_us(net_timedelta(hc->rx_tstamp_last_feedback)); ++ delta = ktime_us_delta(ktime_get(), hc->rx_tstamp_last_feedback); ++ if (delta <= 0) ++ delta = 1; + x_recv = scaled_div32(hc->rx_bytes_recv, delta); + if (x_recv == 0) { /* would also trigger divide-by-zero */ + DCCP_WARN("X_recv==0\n"); +diff --git a/net/dns_resolver/dns_key.c b/net/dns_resolver/dns_key.c +index e26df2764e83..1689c7bdf1c9 100644 +--- a/net/dns_resolver/dns_key.c ++++ b/net/dns_resolver/dns_key.c +@@ -87,35 +87,39 @@ dns_resolver_preparse(struct key_preparsed_payload *prep) + opt++; + kdebug("options: '%s'", opt); + do { ++ int opt_len, opt_nlen; + const char *eq; +- int opt_len, opt_nlen, opt_vlen, tmp; ++ char optval[128]; + + next_opt = memchr(opt, '#', end - opt) ?: end; + opt_len = next_opt - opt; +- if (opt_len <= 0 || opt_len > 128) { ++ if (opt_len <= 0 || opt_len > sizeof(optval)) { + pr_warn_ratelimited("Invalid option length (%d) for dns_resolver key\n", + opt_len); + return -EINVAL; + } + +- eq = memchr(opt, '=', opt_len) ?: end; +- opt_nlen = eq - opt; +- eq++; +- opt_vlen = next_opt - eq; /* will be -1 if no value */ ++ eq = memchr(opt, '=', opt_len); ++ if (eq) { ++ opt_nlen = eq - opt; ++ eq++; ++ memcpy(optval, eq, next_opt - eq); ++ optval[next_opt - eq] = '\0'; ++ } else { ++ opt_nlen = opt_len; ++ optval[0] = '\0'; ++ } + +- tmp = opt_vlen >= 0 ? opt_vlen : 0; +- kdebug("option '%*.*s' val '%*.*s'", +- opt_nlen, opt_nlen, opt, tmp, tmp, eq); ++ kdebug("option '%*.*s' val '%s'", ++ opt_nlen, opt_nlen, opt, optval); + + /* see if it's an error number representing a DNS error + * that's to be recorded as the result in this key */ + if (opt_nlen == sizeof(DNS_ERRORNO_OPTION) - 1 && + memcmp(opt, DNS_ERRORNO_OPTION, opt_nlen) == 0) { + kdebug("dns error number option"); +- if (opt_vlen <= 0) +- goto bad_option_value; + +- ret = kstrtoul(eq, 10, &derrno); ++ ret = kstrtoul(optval, 10, &derrno); + if (ret < 0) + goto bad_option_value; + +diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c +index 70fb352e317f..75abf978ef30 100644 +--- a/net/ipv4/sysctl_net_ipv4.c ++++ b/net/ipv4/sysctl_net_ipv4.c +@@ -213,8 +213,9 @@ static int proc_tcp_fastopen_key(struct ctl_table *ctl, int write, + { + struct ctl_table tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) }; + struct tcp_fastopen_context *ctxt; +- int ret; + u32 user_key[4]; /* 16 bytes, matching TCP_FASTOPEN_KEY_LENGTH */ ++ __le32 key[4]; ++ int ret, i; + + tbl.data = kmalloc(tbl.maxlen, GFP_KERNEL); + if (!tbl.data) +@@ -223,11 +224,14 @@ static int proc_tcp_fastopen_key(struct ctl_table *ctl, int write, + rcu_read_lock(); + ctxt = rcu_dereference(tcp_fastopen_ctx); + if (ctxt) +- memcpy(user_key, ctxt->key, TCP_FASTOPEN_KEY_LENGTH); ++ memcpy(key, ctxt->key, TCP_FASTOPEN_KEY_LENGTH); + else +- memset(user_key, 0, sizeof(user_key)); ++ memset(key, 0, sizeof(key)); + rcu_read_unlock(); + ++ for (i = 0; i < ARRAY_SIZE(key); i++) ++ user_key[i] = le32_to_cpu(key[i]); ++ + snprintf(tbl.data, tbl.maxlen, "%08x-%08x-%08x-%08x", + user_key[0], user_key[1], user_key[2], user_key[3]); + ret = proc_dostring(&tbl, write, buffer, lenp, ppos); +@@ -243,12 +247,16 @@ static int proc_tcp_fastopen_key(struct ctl_table *ctl, int write, + * first invocation of tcp_fastopen_cookie_gen + */ + tcp_fastopen_init_key_once(false); +- tcp_fastopen_reset_cipher(user_key, TCP_FASTOPEN_KEY_LENGTH); ++ ++ for (i = 0; i < ARRAY_SIZE(user_key); i++) ++ key[i] = cpu_to_le32(user_key[i]); ++ ++ tcp_fastopen_reset_cipher(key, TCP_FASTOPEN_KEY_LENGTH); + } + + bad_key: + pr_debug("proc FO key set 0x%x-%x-%x-%x <- 0x%s: %u\n", +- user_key[0], user_key[1], user_key[2], user_key[3], ++ user_key[0], user_key[1], user_key[2], user_key[3], + (char *)tbl.data, ret); + kfree(tbl.data); + return ret; +diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c +index a9041915afc0..4350ee058441 100644 +--- a/net/ipv4/tcp_input.c ++++ b/net/ipv4/tcp_input.c +@@ -3218,6 +3218,15 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets, + + if (tcp_is_reno(tp)) { + tcp_remove_reno_sacks(sk, pkts_acked); ++ ++ /* If any of the cumulatively ACKed segments was ++ * retransmitted, non-SACK case cannot confirm that ++ * progress was due to original transmission due to ++ * lack of TCPCB_SACKED_ACKED bits even if some of ++ * the packets may have been never retransmitted. ++ */ ++ if (flag & FLAG_RETRANS_DATA_ACKED) ++ flag &= ~FLAG_ORIG_SACK_ACKED; + } else { + int delta; + +diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c +index dec4e7bda5f3..11282ffca567 100644 +--- a/net/ipv6/sit.c ++++ b/net/ipv6/sit.c +@@ -692,7 +692,6 @@ static int ipip6_rcv(struct sk_buff *skb) + + if (iptunnel_pull_header(skb, 0, htons(ETH_P_IPV6))) + goto out; +- iph = ip_hdr(skb); + + err = IP_ECN_decapsulate(iph, skb); + if (unlikely(err)) { +diff --git a/net/nfc/llcp_commands.c b/net/nfc/llcp_commands.c +index d25212b135ea..04f060488686 100644 +--- a/net/nfc/llcp_commands.c ++++ b/net/nfc/llcp_commands.c +@@ -754,11 +754,14 @@ int nfc_llcp_send_ui_frame(struct nfc_llcp_sock *sock, u8 ssap, u8 dsap, + pr_debug("Fragment %zd bytes remaining %zd", + frag_len, remaining_len); + +- pdu = nfc_alloc_send_skb(sock->dev, &sock->sk, MSG_DONTWAIT, ++ pdu = nfc_alloc_send_skb(sock->dev, &sock->sk, 0, + frag_len + LLCP_HEADER_SIZE, &err); + if (pdu == NULL) { +- pr_err("Could not allocate PDU\n"); +- continue; ++ pr_err("Could not allocate PDU (error=%d)\n", err); ++ len -= remaining_len; ++ if (len == 0) ++ len = err; ++ break; + } + + pdu = llcp_add_header(pdu, dsap, ssap, LLCP_PDU_UI); +diff --git a/net/rds/loop.c b/net/rds/loop.c +index 6b12b68541ae..05cab8c5a379 100644 +--- a/net/rds/loop.c ++++ b/net/rds/loop.c +@@ -191,4 +191,5 @@ struct rds_transport rds_loop_transport = { + .inc_copy_to_user = rds_message_inc_copy_to_user, + .inc_free = rds_loop_inc_free, + .t_name = "loopback", ++ .t_type = RDS_TRANS_LOOP, + }; +diff --git a/net/rds/rds.h b/net/rds/rds.h +index 4588860f4c3b..254f1345cf7e 100644 +--- a/net/rds/rds.h ++++ b/net/rds/rds.h +@@ -401,6 +401,11 @@ struct rds_notifier { + int n_status; + }; + ++/* Available as part of RDS core, so doesn't need to participate ++ * in get_preferred transport etc ++ */ ++#define RDS_TRANS_LOOP 3 ++ + /** + * struct rds_transport - transport specific behavioural hooks + * +diff --git a/net/rds/recv.c b/net/rds/recv.c +index 0514af3ab378..6275de19689c 100644 +--- a/net/rds/recv.c ++++ b/net/rds/recv.c +@@ -76,6 +76,11 @@ static void rds_recv_rcvbuf_delta(struct rds_sock *rs, struct sock *sk, + return; + + rs->rs_rcv_bytes += delta; ++ ++ /* loop transport doesn't send/recv congestion updates */ ++ if (rs->rs_transport->t_type == RDS_TRANS_LOOP) ++ return; ++ + now_congested = rs->rs_rcv_bytes > rds_sk_rcvbuf(rs); + + rdsdebug("rs %p (%pI4:%u) recv bytes %d buf %d " +diff --git a/net/sched/sch_blackhole.c b/net/sched/sch_blackhole.c +index 3fee70d9814f..562edd50fa94 100644 +--- a/net/sched/sch_blackhole.c ++++ b/net/sched/sch_blackhole.c +@@ -20,7 +20,7 @@ + static int blackhole_enqueue(struct sk_buff *skb, struct Qdisc *sch) + { + qdisc_drop(skb, sch); +- return NET_XMIT_SUCCESS; ++ return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; + } + + static struct sk_buff *blackhole_dequeue(struct Qdisc *sch)